commented early_printk patch because of rejects.
[linux-flexiantxendom0-3.2.10.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
11    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
12    - kmod support by: Cyrus Durgin
13    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
14    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
15
16    - lots of fixes and improvements to the RAID1/RAID5 and generic
17      RAID code (such as request based resynchronization):
18
19      Neil Brown <neilb@cse.unsw.edu.au>.
20
21    This program is free software; you can redistribute it and/or modify
22    it under the terms of the GNU General Public License as published by
23    the Free Software Foundation; either version 2, or (at your option)
24    any later version.
25
26    You should have received a copy of the GNU General Public License
27    (for example /usr/src/linux/COPYING); if not, write to the Free
28    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 */
30
31 #include <linux/module.h>
32 #include <linux/config.h>
33 #include <linux/linkage.h>
34 #include <linux/raid/md.h>
35 #include <linux/sysctl.h>
36 #include <linux/devfs_fs_kernel.h>
37 #include <linux/buffer_head.h> /* for invalidate_bdev */
38 #include <linux/suspend.h>
39
40 #include <linux/init.h>
41
42 #ifdef CONFIG_KMOD
43 #include <linux/kmod.h>
44 #endif
45
46 #define __KERNEL_SYSCALLS__
47 #include <linux/unistd.h>
48
49 #include <asm/unaligned.h>
50
51 #define MAJOR_NR MD_MAJOR
52 #define MD_DRIVER
53 #define DEVICE_NR(device) (minor(device))
54
55 #define DEBUG 0
56 #define dprintk(x...) ((void)(DEBUG && printk(x)))
57
58
59 #ifndef MODULE
60 static void autostart_arrays (void);
61 #endif
62
63 static mdk_personality_t *pers[MAX_PERSONALITY];
64 static spinlock_t pers_lock = SPIN_LOCK_UNLOCKED;
65
66 /*
67  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
68  * is 1000 KB/sec, so the extra system load does not show up that much.
69  * Increase it if you want to have more _guaranteed_ speed. Note that
70  * the RAID driver will use the maximum available bandwith if the IO
71  * subsystem is idle. There is also an 'absolute maximum' reconstruction
72  * speed limit - in case reconstruction slows down your system despite
73  * idle IO detection.
74  *
75  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
76  */
77
78 static int sysctl_speed_limit_min = 1000;
79 static int sysctl_speed_limit_max = 200000;
80
81 static struct ctl_table_header *raid_table_header;
82
83 static ctl_table raid_table[] = {
84         {
85                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
86                 .procname       = "speed_limit_min",
87                 .data           = &sysctl_speed_limit_min,
88                 .maxlen         = sizeof(int),
89                 .mode           = 0644,
90                 .proc_handler   = &proc_dointvec,
91         },
92         {
93                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
94                 .procname       = "speed_limit_max",
95                 .data           = &sysctl_speed_limit_max,
96                 .maxlen         = sizeof(int),
97                 .mode           = 0644,
98                 .proc_handler   = &proc_dointvec,
99         },
100         { .ctl_name = 0 }
101 };
102
103 static ctl_table raid_dir_table[] = {
104         {
105                 .ctl_name       = DEV_RAID,
106                 .procname       = "raid",
107                 .maxlen         = 0,
108                 .mode           = 0555,
109                 .child          = raid_table,
110         },
111         { .ctl_name = 0 }
112 };
113
114 static ctl_table raid_root_table[] = {
115         {
116                 .ctl_name       = CTL_DEV,
117                 .procname       = "dev",
118                 .maxlen         = 0,
119                 .mode           = 0555,
120                 .child          = raid_dir_table,
121         },
122         { .ctl_name = 0 }
123 };
124
125 static struct block_device_operations md_fops;
126
127 static struct gendisk *disks[MAX_MD_DEVS];
128
129 /*
130  * Enables to iterate over all existing md arrays
131  * all_mddevs_lock protects this list as well as mddev_map.
132  */
133 static LIST_HEAD(all_mddevs);
134 static spinlock_t all_mddevs_lock = SPIN_LOCK_UNLOCKED;
135
136
137 /*
138  * iterates through all used mddevs in the system.
139  * We take care to grab the all_mddevs_lock whenever navigating
140  * the list, and to always hold a refcount when unlocked.
141  * Any code which breaks out of this loop while own
142  * a reference to the current mddev and must mddev_put it.
143  */
144 #define ITERATE_MDDEV(mddev,tmp)                                        \
145                                                                         \
146         for (({ spin_lock(&all_mddevs_lock);                            \
147                 tmp = all_mddevs.next;                                  \
148                 mddev = NULL;});                                        \
149              ({ if (tmp != &all_mddevs)                                 \
150                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
151                 spin_unlock(&all_mddevs_lock);                          \
152                 if (mddev) mddev_put(mddev);                            \
153                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
154                 tmp != &all_mddevs;});                                  \
155              ({ spin_lock(&all_mddevs_lock);                            \
156                 tmp = tmp->next;})                                      \
157                 )
158
159 static mddev_t *mddev_map[MAX_MD_DEVS];
160
161 static int md_fail_request (request_queue_t *q, struct bio *bio)
162 {
163         bio_io_error(bio, bio->bi_size);
164         return 0;
165 }
166
167 static inline mddev_t *mddev_get(mddev_t *mddev)
168 {
169         atomic_inc(&mddev->active);
170         return mddev;
171 }
172
173 static void mddev_put(mddev_t *mddev)
174 {
175         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
176                 return;
177         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
178                 list_del(&mddev->all_mddevs);
179                 mddev_map[mdidx(mddev)] = NULL;
180                 blk_put_queue(mddev->queue);
181                 kfree(mddev);
182                 MOD_DEC_USE_COUNT;
183         }
184         spin_unlock(&all_mddevs_lock);
185 }
186
187 static mddev_t * mddev_find(int unit)
188 {
189         mddev_t *mddev, *new = NULL;
190
191  retry:
192         spin_lock(&all_mddevs_lock);
193         if (mddev_map[unit]) {
194                 mddev =  mddev_get(mddev_map[unit]);
195                 spin_unlock(&all_mddevs_lock);
196                 if (new)
197                         kfree(new);
198                 return mddev;
199         }
200         if (new) {
201                 mddev_map[unit] = new;
202                 list_add(&new->all_mddevs, &all_mddevs);
203                 spin_unlock(&all_mddevs_lock);
204                 MOD_INC_USE_COUNT;
205                 return new;
206         }
207         spin_unlock(&all_mddevs_lock);
208
209         new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL);
210         if (!new)
211                 return NULL;
212
213         memset(new, 0, sizeof(*new));
214
215         new->__minor = unit;
216         init_MUTEX(&new->reconfig_sem);
217         INIT_LIST_HEAD(&new->disks);
218         INIT_LIST_HEAD(&new->all_mddevs);
219         init_timer(&new->safemode_timer);
220         atomic_set(&new->active, 1);
221
222         new->queue = blk_alloc_queue(GFP_KERNEL);
223         if (!new->queue) {
224                 kfree(new);
225                 return NULL;
226         }
227
228         blk_queue_make_request(new->queue, md_fail_request);
229
230         goto retry;
231 }
232
233 static inline int mddev_lock(mddev_t * mddev)
234 {
235         return down_interruptible(&mddev->reconfig_sem);
236 }
237
238 static inline void mddev_lock_uninterruptible(mddev_t * mddev)
239 {
240         down(&mddev->reconfig_sem);
241 }
242
243 static inline int mddev_trylock(mddev_t * mddev)
244 {
245         return down_trylock(&mddev->reconfig_sem);
246 }
247
248 static inline void mddev_unlock(mddev_t * mddev)
249 {
250         up(&mddev->reconfig_sem);
251 }
252
253 mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
254 {
255         mdk_rdev_t * rdev;
256         struct list_head *tmp;
257
258         ITERATE_RDEV(mddev,rdev,tmp) {
259                 if (rdev->desc_nr == nr)
260                         return rdev;
261         }
262         return NULL;
263 }
264
265 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
266 {
267         struct list_head *tmp;
268         mdk_rdev_t *rdev;
269
270         ITERATE_RDEV(mddev,rdev,tmp) {
271                 if (rdev->bdev->bd_dev == dev)
272                         return rdev;
273         }
274         return NULL;
275 }
276
277 inline static sector_t calc_dev_sboffset(struct block_device *bdev)
278 {
279         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
280         return MD_NEW_SIZE_BLOCKS(size);
281 }
282
283 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
284 {
285         sector_t size;
286
287         size = rdev->sb_offset;
288
289         if (chunk_size)
290                 size &= ~((sector_t)chunk_size/1024 - 1);
291         return size;
292 }
293
294 static int alloc_disk_sb(mdk_rdev_t * rdev)
295 {
296         if (rdev->sb_page)
297                 MD_BUG();
298
299         rdev->sb_page = alloc_page(GFP_KERNEL);
300         if (!rdev->sb_page) {
301                 printk(KERN_ALERT "md: out of memory.\n");
302                 return -EINVAL;
303         }
304
305         return 0;
306 }
307
308 static void free_disk_sb(mdk_rdev_t * rdev)
309 {
310         if (rdev->sb_page) {
311                 page_cache_release(rdev->sb_page);
312                 rdev->sb_loaded = 0;
313                 rdev->sb_page = NULL;
314                 rdev->sb_offset = 0;
315                 rdev->size = 0;
316         }
317 }
318
319
320 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
321 {
322         if (bio->bi_size)
323                 return 1;
324
325         complete((struct completion*)bio->bi_private);
326         return 0;
327 }
328
329 static int sync_page_io(struct block_device *bdev, sector_t sector, int size,
330                    struct page *page, int rw)
331 {
332         struct bio bio;
333         struct bio_vec vec;
334         struct completion event;
335
336         bio_init(&bio);
337         bio.bi_io_vec = &vec;
338         vec.bv_page = page;
339         vec.bv_len = size;
340         vec.bv_offset = 0;
341         bio.bi_vcnt = 1;
342         bio.bi_idx = 0;
343         bio.bi_size = size;
344         bio.bi_bdev = bdev;
345         bio.bi_sector = sector;
346         init_completion(&event);
347         bio.bi_private = &event;
348         bio.bi_end_io = bi_complete;
349         submit_bio(rw, &bio);
350         blk_run_queues();
351         wait_for_completion(&event);
352
353         return test_bit(BIO_UPTODATE, &bio.bi_flags);
354 }
355
356 static int read_disk_sb(mdk_rdev_t * rdev)
357 {
358         char b[BDEVNAME_SIZE];
359         if (!rdev->sb_page) {
360                 MD_BUG();
361                 return -EINVAL;
362         }
363         if (rdev->sb_loaded)
364                 return 0;
365
366
367         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, MD_SB_BYTES, rdev->sb_page, READ))
368                 goto fail;
369         rdev->sb_loaded = 1;
370         return 0;
371
372 fail:
373         printk(KERN_ERR "md: disabled device %s, could not read superblock.\n",
374                 bdevname(rdev->bdev,b));
375         return -EINVAL;
376 }
377
378 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
379 {
380         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
381                 (sb1->set_uuid1 == sb2->set_uuid1) &&
382                 (sb1->set_uuid2 == sb2->set_uuid2) &&
383                 (sb1->set_uuid3 == sb2->set_uuid3))
384
385                 return 1;
386
387         return 0;
388 }
389
390
391 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
392 {
393         int ret;
394         mdp_super_t *tmp1, *tmp2;
395
396         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
397         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
398
399         if (!tmp1 || !tmp2) {
400                 ret = 0;
401                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
402                 goto abort;
403         }
404
405         *tmp1 = *sb1;
406         *tmp2 = *sb2;
407
408         /*
409          * nr_disks is not constant
410          */
411         tmp1->nr_disks = 0;
412         tmp2->nr_disks = 0;
413
414         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
415                 ret = 0;
416         else
417                 ret = 1;
418
419 abort:
420         if (tmp1)
421                 kfree(tmp1);
422         if (tmp2)
423                 kfree(tmp2);
424
425         return ret;
426 }
427
428 static unsigned int calc_sb_csum(mdp_super_t * sb)
429 {
430         unsigned int disk_csum, csum;
431
432         disk_csum = sb->sb_csum;
433         sb->sb_csum = 0;
434         csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
435         sb->sb_csum = disk_csum;
436         return csum;
437 }
438
439 /*
440  * Handle superblock details.
441  * We want to be able to handle multiple superblock formats
442  * so we have a common interface to them all, and an array of
443  * different handlers.
444  * We rely on user-space to write the initial superblock, and support
445  * reading and updating of superblocks.
446  * Interface methods are:
447  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
448  *      loads and validates a superblock on dev.
449  *      if refdev != NULL, compare superblocks on both devices
450  *    Return:
451  *      0 - dev has a superblock that is compatible with refdev
452  *      1 - dev has a superblock that is compatible and newer than refdev
453  *          so dev should be used as the refdev in future
454  *     -EINVAL superblock incompatible or invalid
455  *     -othererror e.g. -EIO
456  *
457  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
458  *      Verify that dev is acceptable into mddev.
459  *       The first time, mddev->raid_disks will be 0, and data from
460  *       dev should be merged in.  Subsequent calls check that dev
461  *       is new enough.  Return 0 or -EINVAL
462  *
463  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
464  *     Update the superblock for rdev with data in mddev
465  *     This does not write to disc.
466  *
467  */
468
469 struct super_type  {
470         char            *name;
471         struct module   *owner;
472         int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
473         int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
474         void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
475 };
476
477 /*
478  * load_super for 0.90.0 
479  */
480 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
481 {
482         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
483         mdp_super_t *sb;
484         int ret;
485         sector_t sb_offset;
486
487         /*
488          * Calculate the position of the superblock,
489          * it's at the end of the disk.
490          *
491          * It also happens to be a multiple of 4Kb.
492          */
493         sb_offset = calc_dev_sboffset(rdev->bdev);
494         rdev->sb_offset = sb_offset;
495
496         ret = read_disk_sb(rdev);
497         if (ret) return ret;
498
499         ret = -EINVAL;
500
501         bdevname(rdev->bdev, b);
502         sb = (mdp_super_t*)page_address(rdev->sb_page);
503
504         if (sb->md_magic != MD_SB_MAGIC) {
505                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
506                        b);
507                 goto abort;
508         }
509
510         if (sb->major_version != 0 ||
511             sb->minor_version != 90) {
512                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
513                         sb->major_version, sb->minor_version,
514                         b);
515                 goto abort;
516         }
517
518         if (sb->md_minor >= MAX_MD_DEVS) {
519                 printk(KERN_ERR "md: %s: invalid raid minor (%x)\n",
520                         b, sb->md_minor);
521                 goto abort;
522         }
523         if (sb->raid_disks <= 0)
524                 goto abort;
525
526         if (calc_sb_csum(sb) != sb->sb_csum) {
527                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
528                         b);
529                 goto abort;
530         }
531
532         rdev->preferred_minor = sb->md_minor;
533         rdev->data_offset = 0;
534
535         if (sb->level == MULTIPATH)
536                 rdev->desc_nr = -1;
537         else
538                 rdev->desc_nr = sb->this_disk.number;
539
540         if (refdev == 0)
541                 ret = 1;
542         else {
543                 __u64 ev1, ev2;
544                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
545                 if (!uuid_equal(refsb, sb)) {
546                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
547                                 b, bdevname(refdev->bdev,b2));
548                         goto abort;
549                 }
550                 if (!sb_equal(refsb, sb)) {
551                         printk(KERN_WARNING "md: %s has same UUID"
552                                " but different superblock to %s\n",
553                                b, bdevname(refdev->bdev, b2));
554                         goto abort;
555                 }
556                 ev1 = md_event(sb);
557                 ev2 = md_event(refsb);
558                 if (ev1 > ev2)
559                         ret = 1;
560                 else 
561                         ret = 0;
562         }
563         rdev->size = calc_dev_size(rdev, sb->chunk_size);
564
565  abort:
566         return ret;
567 }
568
569 /*
570  * validate_super for 0.90.0
571  */
572 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
573 {
574         mdp_disk_t *desc;
575         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
576
577         if (mddev->raid_disks == 0) {
578                 mddev->major_version = 0;
579                 mddev->minor_version = sb->minor_version;
580                 mddev->patch_version = sb->patch_version;
581                 mddev->persistent = ! sb->not_persistent;
582                 mddev->chunk_size = sb->chunk_size;
583                 mddev->ctime = sb->ctime;
584                 mddev->utime = sb->utime;
585                 mddev->level = sb->level;
586                 mddev->layout = sb->layout;
587                 mddev->raid_disks = sb->raid_disks;
588                 mddev->size = sb->size;
589                 mddev->events = md_event(sb);
590
591                 if (sb->state & (1<<MD_SB_CLEAN))
592                         mddev->recovery_cp = MaxSector;
593                 else {
594                         if (sb->events_hi == sb->cp_events_hi && 
595                                 sb->events_lo == sb->cp_events_lo) {
596                                 mddev->recovery_cp = sb->recovery_cp;
597                         } else
598                                 mddev->recovery_cp = 0;
599                 }
600
601                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
602                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
603                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
604                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
605
606                 mddev->max_disks = MD_SB_DISKS;
607         } else {
608                 __u64 ev1;
609                 ev1 = md_event(sb);
610                 ++ev1;
611                 if (ev1 < mddev->events) 
612                         return -EINVAL;
613         }
614         if (mddev->level != LEVEL_MULTIPATH) {
615                 rdev->raid_disk = -1;
616                 rdev->in_sync = rdev->faulty = 0;
617                 desc = sb->disks + rdev->desc_nr;
618
619                 if (desc->state & (1<<MD_DISK_FAULTY))
620                         rdev->faulty = 1;
621                 else if (desc->state & (1<<MD_DISK_SYNC) &&
622                          desc->raid_disk < mddev->raid_disks) {
623                         rdev->in_sync = 1;
624                         rdev->raid_disk = desc->raid_disk;
625                 }
626         }
627         return 0;
628 }
629
630 /*
631  * sync_super for 0.90.0
632  */
633 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
634 {
635         mdp_super_t *sb;
636         struct list_head *tmp;
637         mdk_rdev_t *rdev2;
638         int next_spare = mddev->raid_disks;
639
640         /* make rdev->sb match mddev data..
641          *
642          * 1/ zero out disks
643          * 2/ Add info for each disk, keeping track of highest desc_nr
644          * 3/ any empty disks < highest become removed
645          *
646          * disks[0] gets initialised to REMOVED because
647          * we cannot be sure from other fields if it has
648          * been initialised or not.
649          */
650         int highest = 0;
651         int i;
652         int active=0, working=0,failed=0,spare=0,nr_disks=0;
653
654         sb = (mdp_super_t*)page_address(rdev->sb_page);
655
656         memset(sb, 0, sizeof(*sb));
657
658         sb->md_magic = MD_SB_MAGIC;
659         sb->major_version = mddev->major_version;
660         sb->minor_version = mddev->minor_version;
661         sb->patch_version = mddev->patch_version;
662         sb->gvalid_words  = 0; /* ignored */
663         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
664         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
665         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
666         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
667
668         sb->ctime = mddev->ctime;
669         sb->level = mddev->level;
670         sb->size  = mddev->size;
671         sb->raid_disks = mddev->raid_disks;
672         sb->md_minor = mddev->__minor;
673         sb->not_persistent = !mddev->persistent;
674         sb->utime = mddev->utime;
675         sb->state = 0;
676         sb->events_hi = (mddev->events>>32);
677         sb->events_lo = (u32)mddev->events;
678
679         if (mddev->in_sync)
680         {
681                 sb->recovery_cp = mddev->recovery_cp;
682                 sb->cp_events_hi = (mddev->events>>32);
683                 sb->cp_events_lo = (u32)mddev->events;
684                 if (mddev->recovery_cp == MaxSector)
685                         sb->state = (1<< MD_SB_CLEAN);
686         } else
687                 sb->recovery_cp = 0;
688
689         sb->layout = mddev->layout;
690         sb->chunk_size = mddev->chunk_size;
691
692         sb->disks[0].state = (1<<MD_DISK_REMOVED);
693         ITERATE_RDEV(mddev,rdev2,tmp) {
694                 mdp_disk_t *d;
695                 if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty)
696                         rdev2->desc_nr = rdev2->raid_disk;
697                 else
698                         rdev2->desc_nr = next_spare++;
699                 d = &sb->disks[rdev2->desc_nr];
700                 nr_disks++;
701                 d->number = rdev2->desc_nr;
702                 d->major = MAJOR(rdev2->bdev->bd_dev);
703                 d->minor = MINOR(rdev2->bdev->bd_dev);
704                 if (rdev2->raid_disk >= 0 && rdev->in_sync && !rdev2->faulty)
705                         d->raid_disk = rdev2->raid_disk;
706                 else
707                         d->raid_disk = rdev2->desc_nr; /* compatibility */
708                 if (rdev2->faulty) {
709                         d->state = (1<<MD_DISK_FAULTY);
710                         failed++;
711                 } else if (rdev2->in_sync) {
712                         d->state = (1<<MD_DISK_ACTIVE);
713                         d->state |= (1<<MD_DISK_SYNC);
714                         active++;
715                         working++;
716                 } else {
717                         d->state = 0;
718                         spare++;
719                         working++;
720                 }
721                 if (rdev2->desc_nr > highest)
722                         highest = rdev2->desc_nr;
723         }
724         
725         /* now set the "removed" bit on any non-trailing holes */
726         for (i=0; i<highest; i++) {
727                 mdp_disk_t *d = &sb->disks[i];
728                 if (d->state == 0 && d->number == 0) {
729                         d->number = i;
730                         d->raid_disk = i;
731                         d->state = (1<<MD_DISK_REMOVED);
732                 }
733         }
734         sb->nr_disks = nr_disks;
735         sb->active_disks = active;
736         sb->working_disks = working;
737         sb->failed_disks = failed;
738         sb->spare_disks = spare;
739
740         sb->this_disk = sb->disks[rdev->desc_nr];
741         sb->sb_csum = calc_sb_csum(sb);
742 }
743
744 /*
745  * version 1 superblock
746  */
747
748 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
749 {
750         unsigned int disk_csum, csum;
751         int size = 256 + sb->max_dev*2;
752
753         disk_csum = sb->sb_csum;
754         sb->sb_csum = 0;
755         csum = csum_partial((void *)sb, size, 0);
756         sb->sb_csum = disk_csum;
757         return csum;
758 }
759
760 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
761 {
762         struct mdp_superblock_1 *sb;
763         int ret;
764         sector_t sb_offset;
765         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
766
767         /*
768          * Calculate the position of the superblock.
769          * It is always aligned to a 4K boundary and
770          * depeding on minor_version, it can be:
771          * 0: At least 8K, but less than 12K, from end of device
772          * 1: At start of device
773          * 2: 4K from start of device.
774          */
775         switch(minor_version) {
776         case 0:
777                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
778                 sb_offset -= 8*2;
779                 sb_offset &= ~(4*2);
780                 /* convert from sectors to K */
781                 sb_offset /= 2;
782                 break;
783         case 1:
784                 sb_offset = 0;
785                 break;
786         case 2:
787                 sb_offset = 4;
788                 break;
789         default:
790                 return -EINVAL;
791         }
792         rdev->sb_offset = sb_offset;
793
794         ret = read_disk_sb(rdev);
795         if (ret) return ret;
796
797
798         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
799
800         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
801             sb->major_version != cpu_to_le32(1) ||
802             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
803             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
804             sb->feature_map != 0)
805                 return -EINVAL;
806
807         if (calc_sb_1_csum(sb) != sb->sb_csum) {
808                 printk("md: invalid superblock checksum on %s\n",
809                         bdevname(rdev->bdev,b));
810                 return -EINVAL;
811         }
812         rdev->preferred_minor = 0xffff;
813         rdev->data_offset = le64_to_cpu(sb->data_offset);
814
815         if (refdev == 0)
816                 return 1;
817         else {
818                 __u64 ev1, ev2;
819                 struct mdp_superblock_1 *refsb = 
820                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
821
822                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
823                     sb->level != refsb->level ||
824                     sb->layout != refsb->layout ||
825                     sb->chunksize != refsb->chunksize) {
826                         printk(KERN_WARNING "md: %s has strangely different"
827                                 " superblock to %s\n",
828                                 bdevname(rdev->bdev,b),
829                                 bdevname(refdev->bdev,b2));
830                         return -EINVAL;
831                 }
832                 ev1 = le64_to_cpu(sb->events);
833                 ev2 = le64_to_cpu(refsb->events);
834
835                 if (ev1 > ev2)
836                         return 1;
837         }
838         if (minor_version) 
839                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
840         else
841                 rdev->size = rdev->sb_offset;
842         if (rdev->size < le64_to_cpu(sb->data_size)/2)
843                 return -EINVAL;
844         rdev->size = le64_to_cpu(sb->data_size)/2;
845         if (le32_to_cpu(sb->chunksize))
846                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
847         return 0;
848 }
849
850 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
851 {
852         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
853
854         if (mddev->raid_disks == 0) {
855                 mddev->major_version = 1;
856                 mddev->minor_version = 0;
857                 mddev->patch_version = 0;
858                 mddev->persistent = 1;
859                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
860                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
861                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
862                 mddev->level = le32_to_cpu(sb->level);
863                 mddev->layout = le32_to_cpu(sb->layout);
864                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
865                 mddev->size = (u32)le64_to_cpu(sb->size);
866                 mddev->events = le64_to_cpu(sb->events);
867                 
868                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
869                 memcpy(mddev->uuid, sb->set_uuid, 16);
870
871                 mddev->max_disks =  (4096-256)/2;
872         } else {
873                 __u64 ev1;
874                 ev1 = le64_to_cpu(sb->events);
875                 ++ev1;
876                 if (ev1 < mddev->events)
877                         return -EINVAL;
878         }
879
880         if (mddev->level != LEVEL_MULTIPATH) {
881                 int role;
882                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
883                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
884                 switch(role) {
885                 case 0xffff: /* spare */
886                         rdev->in_sync = 0;
887                         rdev->faulty = 0;
888                         rdev->raid_disk = -1;
889                         break;
890                 case 0xfffe: /* faulty */
891                         rdev->in_sync = 0;
892                         rdev->faulty = 1;
893                         rdev->raid_disk = -1;
894                         break;
895                 default:
896                         rdev->in_sync = 1;
897                         rdev->faulty = 0;
898                         rdev->raid_disk = role;
899                         break;
900                 }
901         }
902         return 0;
903 }
904
905 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
906 {
907         struct mdp_superblock_1 *sb;
908         struct list_head *tmp;
909         mdk_rdev_t *rdev2;
910         int max_dev, i;
911         /* make rdev->sb match mddev and rdev data. */
912
913         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
914
915         sb->feature_map = 0;
916         sb->pad0 = 0;
917         memset(sb->pad1, 0, sizeof(sb->pad1));
918         memset(sb->pad2, 0, sizeof(sb->pad2));
919         memset(sb->pad3, 0, sizeof(sb->pad3));
920
921         sb->utime = cpu_to_le64((__u64)mddev->utime);
922         sb->events = cpu_to_le64(mddev->events);
923         if (mddev->in_sync)
924                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
925         else
926                 sb->resync_offset = cpu_to_le64(0);
927
928         max_dev = 0;
929         ITERATE_RDEV(mddev,rdev2,tmp)
930                 if (rdev2->desc_nr > max_dev)
931                         max_dev = rdev2->desc_nr;
932         
933         sb->max_dev = max_dev;
934         for (i=0; i<max_dev;i++)
935                 sb->dev_roles[max_dev] = cpu_to_le16(0xfffe);
936         
937         ITERATE_RDEV(mddev,rdev2,tmp) {
938                 i = rdev2->desc_nr;
939                 if (rdev2->faulty)
940                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
941                 else if (rdev2->in_sync)
942                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
943                 else
944                         sb->dev_roles[i] = cpu_to_le16(0xffff);
945         }
946
947         sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
948 }
949
950
951 struct super_type super_types[] = {
952         [0] = {
953                 .name   = "0.90.0",
954                 .owner  = THIS_MODULE,
955                 .load_super     = super_90_load,
956                 .validate_super = super_90_validate,
957                 .sync_super     = super_90_sync,
958         },
959         [1] = {
960                 .name   = "md-1",
961                 .owner  = THIS_MODULE,
962                 .load_super     = super_1_load,
963                 .validate_super = super_1_validate,
964                 .sync_super     = super_1_sync,
965         },
966 };
967         
968 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
969 {
970         struct list_head *tmp;
971         mdk_rdev_t *rdev;
972
973         ITERATE_RDEV(mddev,rdev,tmp)
974                 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
975                         return rdev;
976
977         return NULL;
978 }
979
980 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
981 {
982         struct list_head *tmp;
983         mdk_rdev_t *rdev;
984
985         ITERATE_RDEV(mddev1,rdev,tmp)
986                 if (match_dev_unit(mddev2, rdev))
987                         return 1;
988
989         return 0;
990 }
991
992 static LIST_HEAD(pending_raid_disks);
993
994 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
995 {
996         mdk_rdev_t *same_pdev;
997         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
998
999         if (rdev->mddev) {
1000                 MD_BUG();
1001                 return -EINVAL;
1002         }
1003         same_pdev = match_dev_unit(mddev, rdev);
1004         if (same_pdev)
1005                 printk(KERN_WARNING
1006                         "md%d: WARNING: %s appears to be on the same physical"
1007                         " disk as %s. True\n     protection against single-disk"
1008                         " failure might be compromised.\n",
1009                         mdidx(mddev), bdevname(rdev->bdev,b),
1010                         bdevname(same_pdev->bdev,b2));
1011
1012         /* Verify rdev->desc_nr is unique.
1013          * If it is -1, assign a free number, else
1014          * check number is not in use
1015          */
1016         if (rdev->desc_nr < 0) {
1017                 int choice = 0;
1018                 if (mddev->pers) choice = mddev->raid_disks;
1019                 while (find_rdev_nr(mddev, choice))
1020                         choice++;
1021                 rdev->desc_nr = choice;
1022         } else {
1023                 if (find_rdev_nr(mddev, rdev->desc_nr))
1024                         return -EBUSY;
1025         }
1026                         
1027         list_add(&rdev->same_set, &mddev->disks);
1028         rdev->mddev = mddev;
1029         printk(KERN_INFO "md: bind<%s>\n", bdevname(rdev->bdev,b));
1030         return 0;
1031 }
1032
1033 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1034 {
1035         char b[BDEVNAME_SIZE];
1036         if (!rdev->mddev) {
1037                 MD_BUG();
1038                 return;
1039         }
1040         list_del_init(&rdev->same_set);
1041         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1042         rdev->mddev = NULL;
1043 }
1044
1045 /*
1046  * prevent the device from being mounted, repartitioned or
1047  * otherwise reused by a RAID array (or any other kernel
1048  * subsystem), by opening the device. [simply getting an
1049  * inode is not enough, the SCSI module usage code needs
1050  * an explicit open() on the device]
1051  */
1052 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1053 {
1054         int err = 0;
1055         struct block_device *bdev;
1056
1057         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE, BDEV_RAW);
1058         if (IS_ERR(bdev))
1059                 return PTR_ERR(bdev);
1060         err = bd_claim(bdev, rdev);
1061         if (err) {
1062                 blkdev_put(bdev, BDEV_RAW);
1063                 return err;
1064         }
1065         rdev->bdev = bdev;
1066         return err;
1067 }
1068
1069 static void unlock_rdev(mdk_rdev_t *rdev)
1070 {
1071         struct block_device *bdev = rdev->bdev;
1072         rdev->bdev = NULL;
1073         if (!bdev)
1074                 MD_BUG();
1075         bd_release(bdev);
1076         blkdev_put(bdev, BDEV_RAW);
1077 }
1078
1079 void md_autodetect_dev(dev_t dev);
1080
1081 static void export_rdev(mdk_rdev_t * rdev)
1082 {
1083         char b[BDEVNAME_SIZE];
1084         printk(KERN_INFO "md: export_rdev(%s)\n",
1085                 bdevname(rdev->bdev,b));
1086         if (rdev->mddev)
1087                 MD_BUG();
1088         free_disk_sb(rdev);
1089         list_del_init(&rdev->same_set);
1090 #ifndef MODULE
1091         md_autodetect_dev(rdev->bdev->bd_dev);
1092 #endif
1093         unlock_rdev(rdev);
1094         kfree(rdev);
1095 }
1096
1097 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1098 {
1099         unbind_rdev_from_array(rdev);
1100         export_rdev(rdev);
1101 }
1102
1103 static void export_array(mddev_t *mddev)
1104 {
1105         struct list_head *tmp;
1106         mdk_rdev_t *rdev;
1107
1108         ITERATE_RDEV(mddev,rdev,tmp) {
1109                 if (!rdev->mddev) {
1110                         MD_BUG();
1111                         continue;
1112                 }
1113                 kick_rdev_from_array(rdev);
1114         }
1115         if (!list_empty(&mddev->disks))
1116                 MD_BUG();
1117         mddev->raid_disks = 0;
1118         mddev->major_version = 0;
1119 }
1120
1121 static void print_desc(mdp_disk_t *desc)
1122 {
1123         char b[BDEVNAME_SIZE];
1124
1125         printk(" DISK<N:%d,%s(%d,%d),R:%d,S:%d>\n", desc->number,
1126                 __bdevname(MKDEV(desc->major, desc->minor), b),
1127                 desc->major,desc->minor,desc->raid_disk,desc->state);
1128 }
1129
1130 static void print_sb(mdp_super_t *sb)
1131 {
1132         int i;
1133
1134         printk(KERN_INFO 
1135                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1136                 sb->major_version, sb->minor_version, sb->patch_version,
1137                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1138                 sb->ctime);
1139         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1140                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1141                 sb->md_minor, sb->layout, sb->chunk_size);
1142         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1143                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1144                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1145                 sb->failed_disks, sb->spare_disks,
1146                 sb->sb_csum, (unsigned long)sb->events_lo);
1147
1148         printk(KERN_INFO);
1149         for (i = 0; i < MD_SB_DISKS; i++) {
1150                 mdp_disk_t *desc;
1151
1152                 desc = sb->disks + i;
1153                 if (desc->number || desc->major || desc->minor ||
1154                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1155                         printk("     D %2d: ", i);
1156                         print_desc(desc);
1157                 }
1158         }
1159         printk(KERN_INFO "md:     THIS: ");
1160         print_desc(&sb->this_disk);
1161
1162 }
1163
1164 static void print_rdev(mdk_rdev_t *rdev)
1165 {
1166         char b[BDEVNAME_SIZE];
1167         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1168                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1169                 rdev->faulty, rdev->in_sync, rdev->desc_nr);
1170         if (rdev->sb_loaded) {
1171                 printk(KERN_INFO "md: rdev superblock:\n");
1172                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1173         } else
1174                 printk(KERN_INFO "md: no rdev superblock!\n");
1175 }
1176
1177 void md_print_devices(void)
1178 {
1179         struct list_head *tmp, *tmp2;
1180         mdk_rdev_t *rdev;
1181         mddev_t *mddev;
1182         char b[BDEVNAME_SIZE];
1183
1184         printk("\n");
1185         printk("md:     **********************************\n");
1186         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1187         printk("md:     **********************************\n");
1188         ITERATE_MDDEV(mddev,tmp) {
1189                 printk("md%d: ", mdidx(mddev));
1190
1191                 ITERATE_RDEV(mddev,rdev,tmp2)
1192                         printk("<%s>", bdevname(rdev->bdev,b));
1193                 printk("\n");
1194
1195                 ITERATE_RDEV(mddev,rdev,tmp2)
1196                         print_rdev(rdev);
1197         }
1198         printk("md:     **********************************\n");
1199         printk("\n");
1200 }
1201
1202
1203 static int write_disk_sb(mdk_rdev_t * rdev)
1204 {
1205         char b[BDEVNAME_SIZE];
1206         if (!rdev->sb_loaded) {
1207                 MD_BUG();
1208                 return 1;
1209         }
1210         if (rdev->faulty) {
1211                 MD_BUG();
1212                 return 1;
1213         }
1214
1215         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1216                 bdevname(rdev->bdev,b),
1217                (unsigned long long)rdev->sb_offset);
1218   
1219         if (sync_page_io(rdev->bdev, rdev->sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE))
1220                 return 0;
1221
1222         printk("md: write_disk_sb failed for device %s\n", 
1223                 bdevname(rdev->bdev,b));
1224         return 1;
1225 }
1226
1227 static void sync_sbs(mddev_t * mddev)
1228 {
1229         mdk_rdev_t *rdev;
1230         struct list_head *tmp;
1231
1232         ITERATE_RDEV(mddev,rdev,tmp) {
1233                 super_types[mddev->major_version].
1234                         sync_super(mddev, rdev);
1235                 rdev->sb_loaded = 1;
1236         }
1237 }
1238
1239 static void md_update_sb(mddev_t * mddev)
1240 {
1241         int err, count = 100;
1242         struct list_head *tmp;
1243         mdk_rdev_t *rdev;
1244
1245         mddev->sb_dirty = 0;
1246 repeat:
1247         mddev->utime = get_seconds();
1248         mddev->events ++;
1249
1250         if (!mddev->events) {
1251                 /*
1252                  * oops, this 64-bit counter should never wrap.
1253                  * Either we are in around ~1 trillion A.C., assuming
1254                  * 1 reboot per second, or we have a bug:
1255                  */
1256                 MD_BUG();
1257                 mddev->events --;
1258         }
1259         sync_sbs(mddev);
1260
1261         /*
1262          * do not write anything to disk if using
1263          * nonpersistent superblocks
1264          */
1265         if (!mddev->persistent)
1266                 return;
1267
1268         dprintk(KERN_INFO 
1269                 "md: updating md%d RAID superblock on device (in sync %d)\n",
1270                 mdidx(mddev),mddev->in_sync);
1271
1272         err = 0;
1273         ITERATE_RDEV(mddev,rdev,tmp) {
1274                 char b[BDEVNAME_SIZE];
1275                 dprintk(KERN_INFO "md: ");
1276                 if (rdev->faulty)
1277                         dprintk("(skipping faulty ");
1278
1279                 dprintk("%s ", bdevname(rdev->bdev,b));
1280                 if (!rdev->faulty) {
1281                         err += write_disk_sb(rdev);
1282                 } else
1283                         dprintk(")\n");
1284                 if (!err && mddev->level == LEVEL_MULTIPATH)
1285                         /* only need to write one superblock... */
1286                         break;
1287         }
1288         if (err) {
1289                 if (--count) {
1290                         printk(KERN_ERR "md: errors occurred during superblock"
1291                                 " update, repeating\n");
1292                         goto repeat;
1293                 }
1294                 printk(KERN_ERR \
1295                         "md: excessive errors occurred during superblock update, exiting\n");
1296         }
1297 }
1298
1299 /*
1300  * Import a device. If 'super_format' >= 0, then sanity check the superblock
1301  *
1302  * mark the device faulty if:
1303  *
1304  *   - the device is nonexistent (zero size)
1305  *   - the device has no valid superblock
1306  *
1307  * a faulty rdev _never_ has rdev->sb set.
1308  */
1309 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1310 {
1311         char b[BDEVNAME_SIZE];
1312         int err;
1313         mdk_rdev_t *rdev;
1314         sector_t size;
1315
1316         rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
1317         if (!rdev) {
1318                 printk(KERN_ERR "md: could not alloc mem for %s!\n", 
1319                         __bdevname(newdev, b));
1320                 return ERR_PTR(-ENOMEM);
1321         }
1322         memset(rdev, 0, sizeof(*rdev));
1323
1324         if ((err = alloc_disk_sb(rdev)))
1325                 goto abort_free;
1326
1327         err = lock_rdev(rdev, newdev);
1328         if (err) {
1329                 printk(KERN_ERR "md: could not lock %s.\n",
1330                         __bdevname(newdev, b));
1331                 goto abort_free;
1332         }
1333         rdev->desc_nr = -1;
1334         rdev->faulty = 0;
1335         rdev->in_sync = 0;
1336         rdev->data_offset = 0;
1337         atomic_set(&rdev->nr_pending, 0);
1338
1339         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1340         if (!size) {
1341                 printk(KERN_WARNING 
1342                         "md: %s has zero or unknown size, marking faulty!\n",
1343                         bdevname(rdev->bdev,b));
1344                 err = -EINVAL;
1345                 goto abort_free;
1346         }
1347
1348         if (super_format >= 0) {
1349                 err = super_types[super_format].
1350                         load_super(rdev, NULL, super_minor);
1351                 if (err == -EINVAL) {
1352                         printk(KERN_WARNING 
1353                                 "md: %s has invalid sb, not importing!\n",
1354                                 bdevname(rdev->bdev,b));
1355                         goto abort_free;
1356                 }
1357                 if (err < 0) {
1358                         printk(KERN_WARNING 
1359                                 "md: could not read %s's sb, not importing!\n",
1360                                 bdevname(rdev->bdev,b));
1361                         goto abort_free;
1362                 }
1363         }
1364         INIT_LIST_HEAD(&rdev->same_set);
1365
1366         return rdev;
1367
1368 abort_free:
1369         if (rdev->sb_page) {
1370                 if (rdev->bdev)
1371                         unlock_rdev(rdev);
1372                 free_disk_sb(rdev);
1373         }
1374         kfree(rdev);
1375         return ERR_PTR(err);
1376 }
1377
1378 /*
1379  * Check a full RAID array for plausibility
1380  */
1381
1382
1383 static int analyze_sbs(mddev_t * mddev)
1384 {
1385         int i;
1386         struct list_head *tmp;
1387         mdk_rdev_t *rdev, *freshest;
1388         char b[BDEVNAME_SIZE];
1389
1390         freshest = NULL;
1391         ITERATE_RDEV(mddev,rdev,tmp)
1392                 switch (super_types[mddev->major_version].
1393                         load_super(rdev, freshest, mddev->minor_version)) {
1394                 case 1:
1395                         freshest = rdev;
1396                         break;
1397                 case 0:
1398                         break;
1399                 default:
1400                         printk( KERN_ERR \
1401                                 "md: fatal superblock inconsistency in %s"
1402                                 " -- removing from array\n", 
1403                                 bdevname(rdev->bdev,b));
1404                         kick_rdev_from_array(rdev);
1405                 }
1406
1407
1408         super_types[mddev->major_version].
1409                 validate_super(mddev, freshest);
1410
1411         i = 0;
1412         ITERATE_RDEV(mddev,rdev,tmp) {
1413                 if (rdev != freshest)
1414                         if (super_types[mddev->major_version].
1415                             validate_super(mddev, rdev)) {
1416                                 printk(KERN_WARNING "md: kicking non-fresh %s"
1417                                         " from array!\n",
1418                                         bdevname(rdev->bdev,b));
1419                                 kick_rdev_from_array(rdev);
1420                                 continue;
1421                         }
1422                 if (mddev->level == LEVEL_MULTIPATH) {
1423                         rdev->desc_nr = i++;
1424                         rdev->raid_disk = rdev->desc_nr;
1425                         rdev->in_sync = 1;
1426                 }
1427         }
1428
1429
1430         /*
1431          * Check if we can support this RAID array
1432          */
1433         if (mddev->major_version != MD_MAJOR_VERSION ||
1434                         mddev->minor_version > MD_MINOR_VERSION) {
1435                 printk(KERN_ALERT 
1436                         "md: md%d: unsupported raid array version %d.%d.%d\n",
1437                         mdidx(mddev), mddev->major_version,
1438                         mddev->minor_version, mddev->patch_version);
1439                 goto abort;
1440         }
1441
1442         if ((mddev->recovery_cp != MaxSector) && ((mddev->level == 1) ||
1443                         (mddev->level == 4) || (mddev->level == 5)))
1444                 printk(KERN_ERR "md: md%d: raid array is not clean"
1445                         " -- starting background reconstruction\n", 
1446                         mdidx(mddev));
1447
1448         return 0;
1449 abort:
1450         return 1;
1451 }
1452
1453
1454 static struct kobject *md_probe(dev_t dev, int *part, void *data)
1455 {
1456         static DECLARE_MUTEX(disks_sem);
1457         int unit = MINOR(dev);
1458         mddev_t *mddev = mddev_find(unit);
1459         struct gendisk *disk;
1460
1461         if (!mddev)
1462                 return NULL;
1463
1464         down(&disks_sem);
1465         if (disks[unit]) {
1466                 up(&disks_sem);
1467                 mddev_put(mddev);
1468                 return NULL;
1469         }
1470         disk = alloc_disk(1);
1471         if (!disk) {
1472                 up(&disks_sem);
1473                 mddev_put(mddev);
1474                 return NULL;
1475         }
1476         disk->major = MD_MAJOR;
1477         disk->first_minor = mdidx(mddev);
1478         sprintf(disk->disk_name, "md%d", mdidx(mddev));
1479         disk->fops = &md_fops;
1480         disk->private_data = mddev;
1481         disk->queue = mddev->queue;
1482         add_disk(disk);
1483         disks[mdidx(mddev)] = disk;
1484         up(&disks_sem);
1485         return NULL;
1486 }
1487
1488 void md_wakeup_thread(mdk_thread_t *thread);
1489
1490 static void md_safemode_timeout(unsigned long data)
1491 {
1492         mddev_t *mddev = (mddev_t *) data;
1493
1494         mddev->safemode = 1;
1495         md_wakeup_thread(mddev->thread);
1496 }
1497
1498
1499 static int do_md_run(mddev_t * mddev)
1500 {
1501         int pnum, err;
1502         int chunk_size;
1503         struct list_head *tmp;
1504         mdk_rdev_t *rdev;
1505         struct gendisk *disk;
1506         char b[BDEVNAME_SIZE];
1507
1508         if (list_empty(&mddev->disks)) {
1509                 MD_BUG();
1510                 return -EINVAL;
1511         }
1512
1513         if (mddev->pers)
1514                 return -EBUSY;
1515
1516         /*
1517          * Analyze all RAID superblock(s)
1518          */
1519         if (!mddev->raid_disks && analyze_sbs(mddev)) {
1520                 MD_BUG();
1521                 return -EINVAL;
1522         }
1523
1524         chunk_size = mddev->chunk_size;
1525         pnum = level_to_pers(mddev->level);
1526
1527         if ((pnum != MULTIPATH) && (pnum != RAID1)) {
1528                 if (!chunk_size) {
1529                         /*
1530                          * 'default chunksize' in the old md code used to
1531                          * be PAGE_SIZE, baaad.
1532                          * we abort here to be on the safe side. We don't
1533                          * want to continue the bad practice.
1534                          */
1535                         printk(KERN_ERR 
1536                                 "no chunksize specified, see 'man raidtab'\n");
1537                         return -EINVAL;
1538                 }
1539                 if (chunk_size > MAX_CHUNK_SIZE) {
1540                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
1541                                 chunk_size, MAX_CHUNK_SIZE);
1542                         return -EINVAL;
1543                 }
1544                 /*
1545                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
1546                  */
1547                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
1548                         MD_BUG();
1549                         return -EINVAL;
1550                 }
1551                 if (chunk_size < PAGE_SIZE) {
1552                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
1553                                 chunk_size, PAGE_SIZE);
1554                         return -EINVAL;
1555                 }
1556
1557                 /* devices must have minimum size of one chunk */
1558                 ITERATE_RDEV(mddev,rdev,tmp) {
1559                         if (rdev->faulty)
1560                                 continue;
1561                         if (rdev->size < chunk_size / 1024) {
1562                                 printk(KERN_WARNING
1563                                         "md: Dev %s smaller than chunk_size:"
1564                                         " %lluk < %dk\n",
1565                                         bdevname(rdev->bdev,b),
1566                                         (unsigned long long)rdev->size,
1567                                         chunk_size / 1024);
1568                                 return -EINVAL;
1569                         }
1570                 }
1571         }
1572
1573         if (pnum >= MAX_PERSONALITY) {
1574                 MD_BUG();
1575                 return -EINVAL;
1576         }
1577
1578 #ifdef CONFIG_KMOD
1579         if (!pers[pnum])
1580         {
1581                 request_module("md-personality-%d", pnum);
1582         }
1583 #endif
1584
1585         /*
1586          * Drop all container device buffers, from now on
1587          * the only valid external interface is through the md
1588          * device.
1589          * Also find largest hardsector size
1590          */
1591         ITERATE_RDEV(mddev,rdev,tmp) {
1592                 if (rdev->faulty)
1593                         continue;
1594                 sync_blockdev(rdev->bdev);
1595                 invalidate_bdev(rdev->bdev, 0);
1596         }
1597
1598         md_probe(mdidx(mddev), NULL, NULL);
1599         disk = disks[mdidx(mddev)];
1600         if (!disk)
1601                 return -ENOMEM;
1602
1603         spin_lock(&pers_lock);
1604         if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) {
1605                 spin_unlock(&pers_lock);
1606                 printk(KERN_ERR "md: personality %d is not loaded!\n",
1607                        pnum);
1608                 return -EINVAL;
1609         }
1610
1611         mddev->pers = pers[pnum];
1612         spin_unlock(&pers_lock);
1613
1614         blk_queue_make_request(mddev->queue, mddev->pers->make_request);
1615         printk("%s: setting max_sectors to %d, segment boundary to %d\n",
1616                 disk->disk_name,
1617                 chunk_size >> 9,
1618                 (chunk_size>>1)-1);
1619         blk_queue_max_sectors(mddev->queue, chunk_size >> 9);
1620         blk_queue_segment_boundary(mddev->queue, (chunk_size>>1) - 1);
1621         mddev->queue->queuedata = mddev;
1622
1623         err = mddev->pers->run(mddev);
1624         if (err) {
1625                 printk(KERN_ERR "md: pers->run() failed ...\n");
1626                 module_put(mddev->pers->owner);
1627                 mddev->pers = NULL;
1628                 return -EINVAL;
1629         }
1630         atomic_set(&mddev->writes_pending,0);
1631         mddev->safemode = 0;
1632         mddev->safemode_timer.function = md_safemode_timeout;
1633         mddev->safemode_timer.data = (unsigned long) mddev;
1634         mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
1635         mddev->in_sync = 1;
1636         
1637         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1638         md_wakeup_thread(mddev->thread);
1639         set_capacity(disk, mddev->array_size<<1);
1640         return 0;
1641 }
1642
1643 static int restart_array(mddev_t *mddev)
1644 {
1645         struct gendisk *disk = disks[mdidx(mddev)];
1646         int err;
1647
1648         /*
1649          * Complain if it has no devices
1650          */
1651         err = -ENXIO;
1652         if (list_empty(&mddev->disks))
1653                 goto out;
1654
1655         if (mddev->pers) {
1656                 err = -EBUSY;
1657                 if (!mddev->ro)
1658                         goto out;
1659
1660                 mddev->safemode = 0;
1661                 mddev->ro = 0;
1662                 set_disk_ro(disk, 0);
1663
1664                 printk(KERN_INFO "md: md%d switched to read-write mode.\n",
1665                         mdidx(mddev));
1666                 /*
1667                  * Kick recovery or resync if necessary
1668                  */
1669                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1670                 md_wakeup_thread(mddev->thread);
1671                 err = 0;
1672         } else {
1673                 printk(KERN_ERR "md: md%d has no personality assigned.\n",
1674                         mdidx(mddev));
1675                 err = -EINVAL;
1676         }
1677
1678 out:
1679         return err;
1680 }
1681
1682 static int do_md_stop(mddev_t * mddev, int ro)
1683 {
1684         int err = 0;
1685         struct gendisk *disk = disks[mdidx(mddev)];
1686
1687         if (mddev->pers) {
1688                 if (atomic_read(&mddev->active)>2) {
1689                         printk("md: md%d still in use.\n",mdidx(mddev));
1690                         return -EBUSY;
1691                 }
1692
1693                 if (mddev->sync_thread) {
1694                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1695                         md_unregister_thread(mddev->sync_thread);
1696                         mddev->sync_thread = NULL;
1697                 }
1698
1699                 del_timer_sync(&mddev->safemode_timer);
1700
1701                 invalidate_partition(disk, 0);
1702
1703                 if (ro) {
1704                         err  = -ENXIO;
1705                         if (mddev->ro)
1706                                 goto out;
1707                         mddev->ro = 1;
1708                 } else {
1709                         if (mddev->ro)
1710                                 set_disk_ro(disk, 0);
1711                         if (mddev->pers->stop(mddev)) {
1712                                 err = -EBUSY;
1713                                 if (mddev->ro)
1714                                         set_disk_ro(disk, 1);
1715                                 goto out;
1716                         }
1717                         module_put(mddev->pers->owner);
1718                         mddev->pers = NULL;
1719                         if (mddev->ro)
1720                                 mddev->ro = 0;
1721                 }
1722                 if (mddev->raid_disks) {
1723                         /* mark array as shutdown cleanly */
1724                         mddev->in_sync = 1;
1725                         md_update_sb(mddev);
1726                 }
1727                 if (ro)
1728                         set_disk_ro(disk, 1);
1729         }
1730         /*
1731          * Free resources if final stop
1732          */
1733         if (!ro) {
1734                 struct gendisk *disk;
1735                 printk(KERN_INFO "md: md%d stopped.\n", mdidx(mddev));
1736
1737                 export_array(mddev);
1738
1739                 mddev->array_size = 0;
1740                 disk = disks[mdidx(mddev)];
1741                 if (disk)
1742                         set_capacity(disk, 0);
1743         } else
1744                 printk(KERN_INFO "md: md%d switched to read-only mode.\n",
1745                         mdidx(mddev));
1746         err = 0;
1747 out:
1748         return err;
1749 }
1750
1751 static void autorun_array(mddev_t *mddev)
1752 {
1753         mdk_rdev_t *rdev;
1754         struct list_head *tmp;
1755         int err;
1756
1757         if (list_empty(&mddev->disks)) {
1758                 MD_BUG();
1759                 return;
1760         }
1761
1762         printk(KERN_INFO "md: running: ");
1763
1764         ITERATE_RDEV(mddev,rdev,tmp) {
1765                 char b[BDEVNAME_SIZE];
1766                 printk("<%s>", bdevname(rdev->bdev,b));
1767         }
1768         printk("\n");
1769
1770         err = do_md_run (mddev);
1771         if (err) {
1772                 printk(KERN_WARNING "md :do_md_run() returned %d\n", err);
1773                 do_md_stop (mddev, 0);
1774         }
1775 }
1776
1777 /*
1778  * lets try to run arrays based on all disks that have arrived
1779  * until now. (those are in pending_raid_disks)
1780  *
1781  * the method: pick the first pending disk, collect all disks with
1782  * the same UUID, remove all from the pending list and put them into
1783  * the 'same_array' list. Then order this list based on superblock
1784  * update time (freshest comes first), kick out 'old' disks and
1785  * compare superblocks. If everything's fine then run it.
1786  *
1787  * If "unit" is allocated, then bump its reference count
1788  */
1789 static void autorun_devices(void)
1790 {
1791         struct list_head candidates;
1792         struct list_head *tmp;
1793         mdk_rdev_t *rdev0, *rdev;
1794         mddev_t *mddev;
1795         char b[BDEVNAME_SIZE];
1796
1797         printk(KERN_INFO "md: autorun ...\n");
1798         while (!list_empty(&pending_raid_disks)) {
1799                 rdev0 = list_entry(pending_raid_disks.next,
1800                                          mdk_rdev_t, same_set);
1801
1802                 printk(KERN_INFO "md: considering %s ...\n",
1803                         bdevname(rdev0->bdev,b));
1804                 INIT_LIST_HEAD(&candidates);
1805                 ITERATE_RDEV_PENDING(rdev,tmp)
1806                         if (super_90_load(rdev, rdev0, 0) >= 0) {
1807                                 printk(KERN_INFO "md:  adding %s ...\n",
1808                                         bdevname(rdev->bdev,b));
1809                                 list_move(&rdev->same_set, &candidates);
1810                         }
1811                 /*
1812                  * now we have a set of devices, with all of them having
1813                  * mostly sane superblocks. It's time to allocate the
1814                  * mddev.
1815                  */
1816
1817                 mddev = mddev_find(rdev0->preferred_minor);
1818                 if (!mddev) {
1819                         printk(KERN_ERR 
1820                                 "md: cannot allocate memory for md drive.\n");
1821                         break;
1822                 }
1823                 if (mddev_lock(mddev)) 
1824                         printk(KERN_WARNING "md: md%d locked, cannot run\n",
1825                                mdidx(mddev));
1826                 else if (mddev->raid_disks || mddev->major_version
1827                          || !list_empty(&mddev->disks)) {
1828                         printk(KERN_WARNING 
1829                                 "md: md%d already running, cannot run %s\n",
1830                                 mdidx(mddev), bdevname(rdev0->bdev,b));
1831                         mddev_unlock(mddev);
1832                 } else {
1833                         printk(KERN_INFO "md: created md%d\n", mdidx(mddev));
1834                         ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
1835                                 list_del_init(&rdev->same_set);
1836                                 if (bind_rdev_to_array(rdev, mddev))
1837                                         export_rdev(rdev);
1838                         }
1839                         autorun_array(mddev);
1840                         mddev_unlock(mddev);
1841                 }
1842                 /* on success, candidates will be empty, on error
1843                  * it won't...
1844                  */
1845                 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
1846                         export_rdev(rdev);
1847                 mddev_put(mddev);
1848         }
1849         printk(KERN_INFO "md: ... autorun DONE.\n");
1850 }
1851
1852 /*
1853  * import RAID devices based on one partition
1854  * if possible, the array gets run as well.
1855  */
1856
1857 static int autostart_array(dev_t startdev)
1858 {
1859         char b[BDEVNAME_SIZE];
1860         int err = -EINVAL, i;
1861         mdp_super_t *sb = NULL;
1862         mdk_rdev_t *start_rdev = NULL, *rdev;
1863
1864         start_rdev = md_import_device(startdev, 0, 0);
1865         if (IS_ERR(start_rdev)) {
1866                 printk(KERN_WARNING "md: could not import %s!\n",
1867                         __bdevname(startdev, b));
1868                 return err;
1869         }
1870
1871         /* NOTE: this can only work for 0.90.0 superblocks */
1872         sb = (mdp_super_t*)page_address(start_rdev->sb_page);
1873         if (sb->major_version != 0 ||
1874             sb->minor_version != 90 ) {
1875                 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
1876                 export_rdev(start_rdev);
1877                 return err;
1878         }
1879
1880         if (start_rdev->faulty) {
1881                 printk(KERN_WARNING 
1882                         "md: can not autostart based on faulty %s!\n",
1883                         bdevname(start_rdev->bdev,b));
1884                 export_rdev(start_rdev);
1885                 return err;
1886         }
1887         list_add(&start_rdev->same_set, &pending_raid_disks);
1888
1889         for (i = 0; i < MD_SB_DISKS; i++) {
1890                 mdp_disk_t *desc;
1891                 dev_t dev;
1892
1893                 desc = sb->disks + i;
1894                 dev = MKDEV(desc->major, desc->minor);
1895
1896                 if (!dev)
1897                         continue;
1898                 if (dev == startdev)
1899                         continue;
1900                 rdev = md_import_device(dev, 0, 0);
1901                 if (IS_ERR(rdev)) {
1902                         printk(KERN_WARNING "md: could not import %s,"
1903                                 " trying to run array nevertheless.\n",
1904                                 __bdevname(dev, b));
1905                         continue;
1906                 }
1907                 list_add(&rdev->same_set, &pending_raid_disks);
1908         }
1909
1910         /*
1911          * possibly return codes
1912          */
1913         autorun_devices();
1914         return 0;
1915
1916 }
1917
1918
1919 static int get_version(void * arg)
1920 {
1921         mdu_version_t ver;
1922
1923         ver.major = MD_MAJOR_VERSION;
1924         ver.minor = MD_MINOR_VERSION;
1925         ver.patchlevel = MD_PATCHLEVEL_VERSION;
1926
1927         if (copy_to_user(arg, &ver, sizeof(ver)))
1928                 return -EFAULT;
1929
1930         return 0;
1931 }
1932
1933 static int get_array_info(mddev_t * mddev, void * arg)
1934 {
1935         mdu_array_info_t info;
1936         int nr,working,active,failed,spare;
1937         mdk_rdev_t *rdev;
1938         struct list_head *tmp;
1939
1940         nr=working=active=failed=spare=0;
1941         ITERATE_RDEV(mddev,rdev,tmp) {
1942                 nr++;
1943                 if (rdev->faulty)
1944                         failed++;
1945                 else {
1946                         working++;
1947                         if (rdev->in_sync)
1948                                 active++;       
1949                         else
1950                                 spare++;
1951                 }
1952         }
1953
1954         info.major_version = mddev->major_version;
1955         info.minor_version = mddev->minor_version;
1956         info.patch_version = 1;
1957         info.ctime         = mddev->ctime;
1958         info.level         = mddev->level;
1959         info.size          = mddev->size;
1960         info.nr_disks      = nr;
1961         info.raid_disks    = mddev->raid_disks;
1962         info.md_minor      = mddev->__minor;
1963         info.not_persistent= !mddev->persistent;
1964
1965         info.utime         = mddev->utime;
1966         info.state         = 0;
1967         if (mddev->in_sync)
1968                 info.state = (1<<MD_SB_CLEAN);
1969         info.active_disks  = active;
1970         info.working_disks = working;
1971         info.failed_disks  = failed;
1972         info.spare_disks   = spare;
1973
1974         info.layout        = mddev->layout;
1975         info.chunk_size    = mddev->chunk_size;
1976
1977         if (copy_to_user(arg, &info, sizeof(info)))
1978                 return -EFAULT;
1979
1980         return 0;
1981 }
1982
1983 static int get_disk_info(mddev_t * mddev, void * arg)
1984 {
1985         mdu_disk_info_t info;
1986         unsigned int nr;
1987         mdk_rdev_t *rdev;
1988
1989         if (copy_from_user(&info, arg, sizeof(info)))
1990                 return -EFAULT;
1991
1992         nr = info.number;
1993
1994         rdev = find_rdev_nr(mddev, nr);
1995         if (rdev) {
1996                 info.major = MAJOR(rdev->bdev->bd_dev);
1997                 info.minor = MINOR(rdev->bdev->bd_dev);
1998                 info.raid_disk = rdev->raid_disk;
1999                 info.state = 0;
2000                 if (rdev->faulty)
2001                         info.state |= (1<<MD_DISK_FAULTY);
2002                 else if (rdev->in_sync) {
2003                         info.state |= (1<<MD_DISK_ACTIVE);
2004                         info.state |= (1<<MD_DISK_SYNC);
2005                 }
2006         } else {
2007                 info.major = info.minor = 0;
2008                 info.raid_disk = -1;
2009                 info.state = (1<<MD_DISK_REMOVED);
2010         }
2011
2012         if (copy_to_user(arg, &info, sizeof(info)))
2013                 return -EFAULT;
2014
2015         return 0;
2016 }
2017
2018 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2019 {
2020         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
2021         mdk_rdev_t *rdev;
2022         dev_t dev;
2023         dev = MKDEV(info->major,info->minor);
2024         if (!mddev->raid_disks) {
2025                 int err;
2026                 /* expecting a device which has a superblock */
2027                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
2028                 if (IS_ERR(rdev)) {
2029                         printk(KERN_WARNING 
2030                                 "md: md_import_device returned %ld\n",
2031                                 PTR_ERR(rdev));
2032                         return PTR_ERR(rdev);
2033                 }
2034                 if (!list_empty(&mddev->disks)) {
2035                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2036                                                         mdk_rdev_t, same_set);
2037                         int err = super_types[mddev->major_version]
2038                                 .load_super(rdev, rdev0, mddev->minor_version);
2039                         if (err < 0) {
2040                                 printk(KERN_WARNING 
2041                                         "md: %s has different UUID to %s\n",
2042                                         bdevname(rdev->bdev,b), 
2043                                         bdevname(rdev0->bdev,b2));
2044                                 export_rdev(rdev);
2045                                 return -EINVAL;
2046                         }
2047                 }
2048                 err = bind_rdev_to_array(rdev, mddev);
2049                 if (err)
2050                         export_rdev(rdev);
2051                 return err;
2052         }
2053
2054         /*
2055          * add_new_disk can be used once the array is assembled
2056          * to add "hot spares".  They must already have a superblock
2057          * written
2058          */
2059         if (mddev->pers) {
2060                 int err;
2061                 if (!mddev->pers->hot_add_disk) {
2062                         printk(KERN_WARNING 
2063                                 "md%d: personality does not support diskops!\n",
2064                                mdidx(mddev));
2065                         return -EINVAL;
2066                 }
2067                 rdev = md_import_device(dev, mddev->major_version,
2068                                         mddev->minor_version);
2069                 if (IS_ERR(rdev)) {
2070                         printk(KERN_WARNING 
2071                                 "md: md_import_device returned %ld\n",
2072                                 PTR_ERR(rdev));
2073                         return PTR_ERR(rdev);
2074                 }
2075                 rdev->in_sync = 0; /* just to be sure */
2076                 rdev->raid_disk = -1;
2077                 err = bind_rdev_to_array(rdev, mddev);
2078                 if (err)
2079                         export_rdev(rdev);
2080                 if (mddev->thread)
2081                         md_wakeup_thread(mddev->thread);
2082                 return err;
2083         }
2084
2085         /* otherwise, add_new_disk is only allowed
2086          * for major_version==0 superblocks
2087          */
2088         if (mddev->major_version != 0) {
2089                 printk(KERN_WARNING "md%d: ADD_NEW_DISK not supported\n",
2090                        mdidx(mddev));
2091                 return -EINVAL;
2092         }
2093
2094         if (!(info->state & (1<<MD_DISK_FAULTY))) {
2095                 int err;
2096                 rdev = md_import_device (dev, -1, 0);
2097                 if (IS_ERR(rdev)) {
2098                         printk(KERN_WARNING 
2099                                 "md: error, md_import_device() returned %ld\n",
2100                                 PTR_ERR(rdev));
2101                         return PTR_ERR(rdev);
2102                 }
2103                 rdev->desc_nr = info->number;
2104                 if (info->raid_disk < mddev->raid_disks)
2105                         rdev->raid_disk = info->raid_disk;
2106                 else
2107                         rdev->raid_disk = -1;
2108
2109                 rdev->faulty = 0;
2110                 if (rdev->raid_disk < mddev->raid_disks)
2111                         rdev->in_sync = (info->state & (1<<MD_DISK_SYNC));
2112                 else
2113                         rdev->in_sync = 0;
2114
2115                 err = bind_rdev_to_array(rdev, mddev);
2116                 if (err) {
2117                         export_rdev(rdev);
2118                         return err;
2119                 }
2120
2121                 if (!mddev->persistent) {
2122                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
2123                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2124                 } else 
2125                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2126                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
2127
2128                 if (!mddev->size || (mddev->size > rdev->size))
2129                         mddev->size = rdev->size;
2130         }
2131
2132         return 0;
2133 }
2134
2135 static int hot_generate_error(mddev_t * mddev, dev_t dev)
2136 {
2137         char b[BDEVNAME_SIZE];
2138         struct request_queue *q;
2139         mdk_rdev_t *rdev;
2140
2141         if (!mddev->pers)
2142                 return -ENODEV;
2143
2144         printk(KERN_INFO "md: trying to generate %s error in md%d ... \n",
2145                 __bdevname(dev, b), mdidx(mddev));
2146
2147         rdev = find_rdev(mddev, dev);
2148         if (!rdev) {
2149                 MD_BUG();
2150                 return -ENXIO;
2151         }
2152
2153         if (rdev->desc_nr == -1) {
2154                 MD_BUG();
2155                 return -EINVAL;
2156         }
2157         if (!rdev->in_sync)
2158                 return -ENODEV;
2159
2160         q = bdev_get_queue(rdev->bdev);
2161         if (!q) {
2162                 MD_BUG();
2163                 return -ENODEV;
2164         }
2165         printk(KERN_INFO "md: okay, generating error!\n");
2166 //      q->oneshot_error = 1; // disabled for now
2167
2168         return 0;
2169 }
2170
2171 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
2172 {
2173         char b[BDEVNAME_SIZE];
2174         mdk_rdev_t *rdev;
2175
2176         if (!mddev->pers)
2177                 return -ENODEV;
2178
2179         printk(KERN_INFO "md: trying to remove %s from md%d ... \n",
2180                 __bdevname(dev, b), mdidx(mddev));
2181
2182         rdev = find_rdev(mddev, dev);
2183         if (!rdev)
2184                 return -ENXIO;
2185
2186         if (rdev->raid_disk >= 0)
2187                 goto busy;
2188
2189         kick_rdev_from_array(rdev);
2190         md_update_sb(mddev);
2191
2192         return 0;
2193 busy:
2194         printk(KERN_WARNING "md: cannot remove active disk %s from md%d ... \n",
2195                 bdevname(rdev->bdev,b), mdidx(mddev));
2196         return -EBUSY;
2197 }
2198
2199 static int hot_add_disk(mddev_t * mddev, dev_t dev)
2200 {
2201         char b[BDEVNAME_SIZE];
2202         int err;
2203         unsigned int size;
2204         mdk_rdev_t *rdev;
2205
2206         if (!mddev->pers)
2207                 return -ENODEV;
2208
2209         printk(KERN_INFO "md: trying to hot-add %s to md%d ... \n",
2210                 __bdevname(dev, b), mdidx(mddev));
2211
2212         if (mddev->major_version != 0) {
2213                 printk(KERN_WARNING "md%d: HOT_ADD may only be used with"
2214                         " version-0 superblocks.\n",
2215                         mdidx(mddev));
2216                 return -EINVAL;
2217         }
2218         if (!mddev->pers->hot_add_disk) {
2219                 printk(KERN_WARNING 
2220                         "md%d: personality does not support diskops!\n",
2221                         mdidx(mddev));
2222                 return -EINVAL;
2223         }
2224
2225         rdev = md_import_device (dev, -1, 0);
2226         if (IS_ERR(rdev)) {
2227                 printk(KERN_WARNING 
2228                         "md: error, md_import_device() returned %ld\n",
2229                         PTR_ERR(rdev));
2230                 return -EINVAL;
2231         }
2232
2233         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2234         size = calc_dev_size(rdev, mddev->chunk_size);
2235         rdev->size = size;
2236
2237         if (size < mddev->size) {
2238                 printk(KERN_WARNING 
2239                         "md%d: disk size %llu blocks < array size %llu\n",
2240                         mdidx(mddev), (unsigned long long)size,
2241                         (unsigned long long)mddev->size);
2242                 err = -ENOSPC;
2243                 goto abort_export;
2244         }
2245
2246         if (rdev->faulty) {
2247                 printk(KERN_WARNING 
2248                         "md: can not hot-add faulty %s disk to md%d!\n",
2249                         bdevname(rdev->bdev,b), mdidx(mddev));
2250                 err = -EINVAL;
2251                 goto abort_export;
2252         }
2253         rdev->in_sync = 0;
2254         rdev->desc_nr = -1;
2255         bind_rdev_to_array(rdev, mddev);
2256
2257         /*
2258          * The rest should better be atomic, we can have disk failures
2259          * noticed in interrupt contexts ...
2260          */
2261
2262         if (rdev->desc_nr == mddev->max_disks) {
2263                 printk(KERN_WARNING "md%d: can not hot-add to full array!\n",
2264                         mdidx(mddev));
2265                 err = -EBUSY;
2266                 goto abort_unbind_export;
2267         }
2268
2269         rdev->raid_disk = -1;
2270
2271         md_update_sb(mddev);
2272
2273         /*
2274          * Kick recovery, maybe this spare has to be added to the
2275          * array immediately.
2276          */
2277         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2278         md_wakeup_thread(mddev->thread);
2279
2280         return 0;
2281
2282 abort_unbind_export:
2283         unbind_rdev_from_array(rdev);
2284
2285 abort_export:
2286         export_rdev(rdev);
2287         return err;
2288 }
2289
2290 /*
2291  * set_array_info is used two different ways
2292  * The original usage is when creating a new array.
2293  * In this usage, raid_disks is > = and it together with
2294  *  level, size, not_persistent,layout,chunksize determine the
2295  *  shape of the array.
2296  *  This will always create an array with a type-0.90.0 superblock.
2297  * The newer usage is when assembling an array.
2298  *  In this case raid_disks will be 0, and the major_version field is
2299  *  use to determine which style super-blocks are to be found on the devices.
2300  *  The minor and patch _version numbers are also kept incase the
2301  *  super_block handler wishes to interpret them.
2302  */
2303 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2304 {
2305
2306         if (info->raid_disks == 0) {
2307                 /* just setting version number for superblock loading */
2308                 if (info->major_version < 0 ||
2309                     info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
2310                     super_types[info->major_version].name == NULL) {
2311                         /* maybe try to auto-load a module? */
2312                         printk(KERN_INFO 
2313                                 "md: superblock version %d not known\n",
2314                                 info->major_version);
2315                         return -EINVAL;
2316                 }
2317                 mddev->major_version = info->major_version;
2318                 mddev->minor_version = info->minor_version;
2319                 mddev->patch_version = info->patch_version;
2320                 return 0;
2321         }
2322         mddev->major_version = MD_MAJOR_VERSION;
2323         mddev->minor_version = MD_MINOR_VERSION;
2324         mddev->patch_version = MD_PATCHLEVEL_VERSION;
2325         mddev->ctime         = get_seconds();
2326
2327         mddev->level         = info->level;
2328         mddev->size          = info->size;
2329         mddev->raid_disks    = info->raid_disks;
2330         /* don't set __minor, it is determined by which /dev/md* was
2331          * openned
2332          */
2333         if (info->state & (1<<MD_SB_CLEAN))
2334                 mddev->recovery_cp = MaxSector;
2335         else
2336                 mddev->recovery_cp = 0;
2337         mddev->persistent    = ! info->not_persistent;
2338
2339         mddev->layout        = info->layout;
2340         mddev->chunk_size    = info->chunk_size;
2341
2342         mddev->max_disks     = MD_SB_DISKS;
2343
2344
2345         /*
2346          * Generate a 128 bit UUID
2347          */
2348         get_random_bytes(mddev->uuid, 16);
2349
2350         return 0;
2351 }
2352
2353 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
2354 {
2355         mdk_rdev_t *rdev;
2356
2357         rdev = find_rdev(mddev, dev);
2358         if (!rdev)
2359                 return 0;
2360
2361         md_error(mddev, rdev);
2362         return 1;
2363 }
2364
2365 static int md_ioctl(struct inode *inode, struct file *file,
2366                         unsigned int cmd, unsigned long arg)
2367 {
2368         char b[BDEVNAME_SIZE];
2369         unsigned int minor;
2370         int err = 0;
2371         struct hd_geometry *loc = (struct hd_geometry *) arg;
2372         mddev_t *mddev = NULL;
2373         kdev_t dev;
2374
2375         if (!capable(CAP_SYS_ADMIN))
2376                 return -EACCES;
2377
2378         dev = inode->i_rdev;
2379         minor = minor(dev);
2380         if (minor >= MAX_MD_DEVS) {
2381                 MD_BUG();
2382                 return -EINVAL;
2383         }
2384
2385         /*
2386          * Commands dealing with the RAID driver but not any
2387          * particular array:
2388          */
2389         switch (cmd)
2390         {
2391                 case RAID_VERSION:
2392                         err = get_version((void *)arg);
2393                         goto done;
2394
2395                 case PRINT_RAID_DEBUG:
2396                         err = 0;
2397                         md_print_devices();
2398                         goto done;
2399
2400 #ifndef MODULE
2401                 case RAID_AUTORUN:
2402                         err = 0;
2403                         autostart_arrays();
2404                         goto done;
2405 #endif
2406                 default:;
2407         }
2408
2409         /*
2410          * Commands creating/starting a new array:
2411          */
2412
2413         mddev = inode->i_bdev->bd_inode->u.generic_ip;
2414
2415         if (!mddev) {
2416                 BUG();
2417                 goto abort;
2418         }
2419
2420
2421         if (cmd == START_ARRAY) {
2422                 /* START_ARRAY doesn't need to lock the array as autostart_array
2423                  * does the locking, and it could even be a different array
2424                  */
2425                 err = autostart_array(arg);
2426                 if (err) {
2427                         printk(KERN_WARNING "md: autostart %s failed!\n",
2428                                 __bdevname(arg, b));
2429                         goto abort;
2430                 }
2431                 goto done;
2432         }
2433
2434         err = mddev_lock(mddev);
2435         if (err) {
2436                 printk(KERN_INFO 
2437                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
2438                         err, cmd);
2439                 goto abort;
2440         }
2441
2442         switch (cmd)
2443         {
2444                 case SET_ARRAY_INFO:
2445
2446                         if (!list_empty(&mddev->disks)) {
2447                                 printk(KERN_WARNING 
2448                                         "md: array md%d already has disks!\n",
2449                                         mdidx(mddev));
2450                                 err = -EBUSY;
2451                                 goto abort_unlock;
2452                         }
2453                         if (mddev->raid_disks) {
2454                                 printk(KERN_WARNING 
2455                                         "md: array md%d already initialised!\n",
2456                                         mdidx(mddev));
2457                                 err = -EBUSY;
2458                                 goto abort_unlock;
2459                         }
2460                         {
2461                                 mdu_array_info_t info;
2462                                 if (!arg)
2463                                         memset(&info, 0, sizeof(info));
2464                                 else if (copy_from_user(&info, (void*)arg, sizeof(info))) {
2465                                         err = -EFAULT;
2466                                         goto abort_unlock;
2467                                 }
2468                                 err = set_array_info(mddev, &info);
2469                                 if (err) {
2470                                         printk(KERN_WARNING "md: couldn't set"
2471                                                 " array info. %d\n", err);
2472                                         goto abort_unlock;
2473                                 }
2474                         }
2475                         goto done_unlock;
2476
2477                 default:;
2478         }
2479
2480         /*
2481          * Commands querying/configuring an existing array:
2482          */
2483         /* if we are initialised yet, only ADD_NEW_DISK or STOP_ARRAY is allowed */
2484         if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY && cmd != RUN_ARRAY) {
2485                 err = -ENODEV;
2486                 goto abort_unlock;
2487         }
2488
2489         /*
2490          * Commands even a read-only array can execute:
2491          */
2492         switch (cmd)
2493         {
2494                 case GET_ARRAY_INFO:
2495                         err = get_array_info(mddev, (void *)arg);
2496                         goto done_unlock;
2497
2498                 case GET_DISK_INFO:
2499                         err = get_disk_info(mddev, (void *)arg);
2500                         goto done_unlock;
2501
2502                 case RESTART_ARRAY_RW:
2503                         err = restart_array(mddev);
2504                         goto done_unlock;
2505
2506                 case STOP_ARRAY:
2507                         err = do_md_stop (mddev, 0);
2508                         goto done_unlock;
2509
2510                 case STOP_ARRAY_RO:
2511                         err = do_md_stop (mddev, 1);
2512                         goto done_unlock;
2513
2514         /*
2515          * We have a problem here : there is no easy way to give a CHS
2516          * virtual geometry. We currently pretend that we have a 2 heads
2517          * 4 sectors (with a BIG number of cylinders...). This drives
2518          * dosfs just mad... ;-)
2519          */
2520                 case HDIO_GETGEO:
2521                         if (!loc) {
2522                                 err = -EINVAL;
2523                                 goto abort_unlock;
2524                         }
2525                         err = put_user (2, (char *) &loc->heads);
2526                         if (err)
2527                                 goto abort_unlock;
2528                         err = put_user (4, (char *) &loc->sectors);
2529                         if (err)
2530                                 goto abort_unlock;
2531                         err = put_user(get_capacity(disks[mdidx(mddev)])/8,
2532                                                 (short *) &loc->cylinders);
2533                         if (err)
2534                                 goto abort_unlock;
2535                         err = put_user (get_start_sect(inode->i_bdev),
2536                                                 (long *) &loc->start);
2537                         goto done_unlock;
2538         }
2539
2540         /*
2541          * The remaining ioctls are changing the state of the
2542          * superblock, so we do not allow read-only arrays
2543          * here:
2544          */
2545         if (mddev->ro) {
2546                 err = -EROFS;
2547                 goto abort_unlock;
2548         }
2549
2550         switch (cmd)
2551         {
2552                 case ADD_NEW_DISK:
2553                 {
2554                         mdu_disk_info_t info;
2555                         if (copy_from_user(&info, (void*)arg, sizeof(info)))
2556                                 err = -EFAULT;
2557                         else
2558                                 err = add_new_disk(mddev, &info);
2559                         goto done_unlock;
2560                 }
2561                 case HOT_GENERATE_ERROR:
2562                         err = hot_generate_error(mddev, arg);
2563                         goto done_unlock;
2564                 case HOT_REMOVE_DISK:
2565                         err = hot_remove_disk(mddev, arg);
2566                         goto done_unlock;
2567
2568                 case HOT_ADD_DISK:
2569                         err = hot_add_disk(mddev, arg);
2570                         goto done_unlock;
2571
2572                 case SET_DISK_FAULTY:
2573                         err = set_disk_faulty(mddev, arg);
2574                         goto done_unlock;
2575
2576                 case RUN_ARRAY:
2577                 {
2578                         err = do_md_run (mddev);
2579                         /*
2580                          * we have to clean up the mess if
2581                          * the array cannot be run for some
2582                          * reason ...
2583                          * ->pers will not be set, to superblock will
2584                          * not be updated.
2585                          */
2586                         if (err)
2587                                 do_md_stop (mddev, 0);
2588                         goto done_unlock;
2589                 }
2590
2591                 default:
2592                         if (_IOC_TYPE(cmd) == MD_MAJOR)
2593                                 printk(KERN_WARNING "md: %s(pid %d) used"
2594                                         " obsolete MD ioctl, upgrade your"
2595                                         " software to use new ictls.\n",
2596                                         current->comm, current->pid);
2597                         err = -EINVAL;
2598                         goto abort_unlock;
2599         }
2600
2601 done_unlock:
2602 abort_unlock:
2603         mddev_unlock(mddev);
2604
2605         return err;
2606 done:
2607         if (err)
2608                 MD_BUG();
2609 abort:
2610         return err;
2611 }
2612
2613 static int md_open(struct inode *inode, struct file *file)
2614 {
2615         /*
2616          * Succeed if we can find or allocate a mddev structure.
2617          */
2618         mddev_t *mddev = mddev_find(minor(inode->i_rdev));
2619         int err = -ENOMEM;
2620
2621         if (!mddev)
2622                 goto out;
2623
2624         if ((err = mddev_lock(mddev)))
2625                 goto put;
2626
2627         err = 0;
2628         mddev_unlock(mddev);
2629         inode->i_bdev->bd_inode->u.generic_ip = mddev_get(mddev);
2630  put:
2631         mddev_put(mddev);
2632  out:
2633         return err;
2634 }
2635
2636 static int md_release(struct inode *inode, struct file * file)
2637 {
2638         mddev_t *mddev = inode->i_bdev->bd_inode->u.generic_ip;
2639
2640         if (!mddev)
2641                 BUG();
2642         mddev_put(mddev);
2643
2644         return 0;
2645 }
2646
2647 static struct block_device_operations md_fops =
2648 {
2649         .owner          = THIS_MODULE,
2650         .open           = md_open,
2651         .release        = md_release,
2652         .ioctl          = md_ioctl,
2653 };
2654
2655 int md_thread(void * arg)
2656 {
2657         mdk_thread_t *thread = arg;
2658
2659         lock_kernel();
2660
2661         /*
2662          * Detach thread
2663          */
2664
2665         daemonize(thread->name, mdidx(thread->mddev));
2666
2667         current->exit_signal = SIGCHLD;
2668         allow_signal(SIGKILL);
2669         thread->tsk = current;
2670
2671         /*
2672          * md_thread is a 'system-thread', it's priority should be very
2673          * high. We avoid resource deadlocks individually in each
2674          * raid personality. (RAID5 does preallocation) We also use RR and
2675          * the very same RT priority as kswapd, thus we will never get
2676          * into a priority inversion deadlock.
2677          *
2678          * we definitely have to have equal or higher priority than
2679          * bdflush, otherwise bdflush will deadlock if there are too
2680          * many dirty RAID5 blocks.
2681          */
2682         unlock_kernel();
2683
2684         complete(thread->event);
2685         while (thread->run) {
2686                 void (*run)(mddev_t *);
2687
2688                 wait_event_interruptible(thread->wqueue,
2689                                          test_bit(THREAD_WAKEUP, &thread->flags));
2690                 if (current->flags & PF_FREEZE)
2691                         refrigerator(PF_IOTHREAD);
2692
2693                 clear_bit(THREAD_WAKEUP, &thread->flags);
2694
2695                 run = thread->run;
2696                 if (run) {
2697                         run(thread->mddev);
2698                         blk_run_queues();
2699                 }
2700                 if (signal_pending(current))
2701                         flush_signals(current);
2702         }
2703         complete(thread->event);
2704         return 0;
2705 }
2706
2707 void md_wakeup_thread(mdk_thread_t *thread)
2708 {
2709         if (thread) {
2710                 dprintk("md: waking up MD thread %p.\n", thread);
2711                 set_bit(THREAD_WAKEUP, &thread->flags);
2712                 wake_up(&thread->wqueue);
2713         }
2714 }
2715
2716 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
2717                                  const char *name)
2718 {
2719         mdk_thread_t *thread;
2720         int ret;
2721         struct completion event;
2722
2723         thread = (mdk_thread_t *) kmalloc
2724                                 (sizeof(mdk_thread_t), GFP_KERNEL);
2725         if (!thread)
2726                 return NULL;
2727
2728         memset(thread, 0, sizeof(mdk_thread_t));
2729         init_waitqueue_head(&thread->wqueue);
2730
2731         init_completion(&event);
2732         thread->event = &event;
2733         thread->run = run;
2734         thread->mddev = mddev;
2735         thread->name = name;
2736         ret = kernel_thread(md_thread, thread, 0);
2737         if (ret < 0) {
2738                 kfree(thread);
2739                 return NULL;
2740         }
2741         wait_for_completion(&event);
2742         return thread;
2743 }
2744
2745 void md_interrupt_thread(mdk_thread_t *thread)
2746 {
2747         if (!thread->tsk) {
2748                 MD_BUG();
2749                 return;
2750         }
2751         dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
2752         send_sig(SIGKILL, thread->tsk, 1);
2753 }
2754
2755 void md_unregister_thread(mdk_thread_t *thread)
2756 {
2757         struct completion event;
2758
2759         init_completion(&event);
2760
2761         thread->event = &event;
2762         thread->run = NULL;
2763         thread->name = NULL;
2764         md_interrupt_thread(thread);
2765         wait_for_completion(&event);
2766         kfree(thread);
2767 }
2768
2769 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
2770 {
2771         dprintk("md_error dev:(%d:%d), rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
2772                 MD_MAJOR,mdidx(mddev),
2773                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
2774                 __builtin_return_address(0),__builtin_return_address(1),
2775                 __builtin_return_address(2),__builtin_return_address(3));
2776
2777         if (!mddev) {
2778                 MD_BUG();
2779                 return;
2780         }
2781
2782         if (!rdev || rdev->faulty)
2783                 return;
2784         if (!mddev->pers->error_handler)
2785                 return;
2786         mddev->pers->error_handler(mddev,rdev);
2787         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2788         md_wakeup_thread(mddev->thread);
2789 }
2790
2791 /* seq_file implementation /proc/mdstat */
2792
2793 static void status_unused(struct seq_file *seq)
2794 {
2795         int i = 0;
2796         mdk_rdev_t *rdev;
2797         struct list_head *tmp;
2798
2799         seq_printf(seq, "unused devices: ");
2800
2801         ITERATE_RDEV_PENDING(rdev,tmp) {
2802                 char b[BDEVNAME_SIZE];
2803                 i++;
2804                 seq_printf(seq, "%s ",
2805                               bdevname(rdev->bdev,b));
2806         }
2807         if (!i)
2808                 seq_printf(seq, "<none>");
2809
2810         seq_printf(seq, "\n");
2811 }
2812
2813
2814 static void status_resync(struct seq_file *seq, mddev_t * mddev)
2815 {
2816         unsigned long max_blocks, resync, res, dt, db, rt;
2817
2818         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
2819         max_blocks = mddev->size;
2820
2821         /*
2822          * Should not happen.
2823          */
2824         if (!max_blocks) {
2825                 MD_BUG();
2826                 return;
2827         }
2828         res = (resync/1024)*1000/(max_blocks/1024 + 1);
2829         {
2830                 int i, x = res/50, y = 20-x;
2831                 seq_printf(seq, "[");
2832                 for (i = 0; i < x; i++)
2833                         seq_printf(seq, "=");
2834                 seq_printf(seq, ">");
2835                 for (i = 0; i < y; i++)
2836                         seq_printf(seq, ".");
2837                 seq_printf(seq, "] ");
2838         }
2839         seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)",
2840                       (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
2841                        "resync" : "recovery"),
2842                       res/10, res % 10, resync, max_blocks);
2843
2844         /*
2845          * We do not want to overflow, so the order of operands and
2846          * the * 100 / 100 trick are important. We do a +1 to be
2847          * safe against division by zero. We only estimate anyway.
2848          *
2849          * dt: time from mark until now
2850          * db: blocks written from mark until now
2851          * rt: remaining time
2852          */
2853         dt = ((jiffies - mddev->resync_mark) / HZ);
2854         if (!dt) dt++;
2855         db = resync - (mddev->resync_mark_cnt/2);
2856         rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
2857
2858         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
2859
2860         seq_printf(seq, " speed=%ldK/sec", db/dt);
2861 }
2862
2863 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
2864 {
2865         struct list_head *tmp;
2866         loff_t l = *pos;
2867         mddev_t *mddev;
2868
2869         if (l >= 0x10000)
2870                 return NULL;
2871         if (!l--)
2872                 /* header */
2873                 return (void*)1;
2874
2875         spin_lock(&all_mddevs_lock);
2876         list_for_each(tmp,&all_mddevs)
2877                 if (!l--) {
2878                         mddev = list_entry(tmp, mddev_t, all_mddevs);
2879                         mddev_get(mddev);
2880                         spin_unlock(&all_mddevs_lock);
2881                         return mddev;
2882                 }
2883         spin_unlock(&all_mddevs_lock);
2884         if (!l--)
2885                 return (void*)2;/* tail */
2886         return NULL;
2887 }
2888
2889 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2890 {
2891         struct list_head *tmp;
2892         mddev_t *next_mddev, *mddev = v;
2893         
2894         ++*pos;
2895         if (v == (void*)2)
2896                 return NULL;
2897
2898         spin_lock(&all_mddevs_lock);
2899         if (v == (void*)1)
2900                 tmp = all_mddevs.next;
2901         else
2902                 tmp = mddev->all_mddevs.next;
2903         if (tmp != &all_mddevs)
2904                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
2905         else {
2906                 next_mddev = (void*)2;
2907                 *pos = 0x10000;
2908         }               
2909         spin_unlock(&all_mddevs_lock);
2910
2911         if (v != (void*)1)
2912                 mddev_put(mddev);
2913         return next_mddev;
2914
2915 }
2916
2917 static void md_seq_stop(struct seq_file *seq, void *v)
2918 {
2919         mddev_t *mddev = v;
2920
2921         if (mddev && v != (void*)1 && v != (void*)2)
2922                 mddev_put(mddev);
2923 }
2924
2925 static int md_seq_show(struct seq_file *seq, void *v)
2926 {
2927         mddev_t *mddev = v;
2928         sector_t size;
2929         struct list_head *tmp2;
2930         mdk_rdev_t *rdev;
2931         int i;
2932
2933         if (v == (void*)1) {
2934                 seq_printf(seq, "Personalities : ");
2935                 spin_lock(&pers_lock);
2936                 for (i = 0; i < MAX_PERSONALITY; i++)
2937                         if (pers[i])
2938                                 seq_printf(seq, "[%s] ", pers[i]->name);
2939
2940                 spin_unlock(&pers_lock);
2941                 seq_printf(seq, "\n");
2942                 return 0;
2943         }
2944         if (v == (void*)2) {
2945                 status_unused(seq);
2946                 return 0;
2947         }
2948
2949         if (mddev_lock(mddev)!=0) 
2950                 return -EINTR;
2951         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
2952                 seq_printf(seq, "md%d : %sactive", mdidx(mddev),
2953                                                 mddev->pers ? "" : "in");
2954                 if (mddev->pers) {
2955                         if (mddev->ro)
2956                                 seq_printf(seq, " (read-only)");
2957                         seq_printf(seq, " %s", mddev->pers->name);
2958                 }
2959
2960                 size = 0;
2961                 ITERATE_RDEV(mddev,rdev,tmp2) {
2962                         char b[BDEVNAME_SIZE];
2963                         seq_printf(seq, " %s[%d]",
2964                                 bdevname(rdev->bdev,b), rdev->desc_nr);
2965                         if (rdev->faulty) {
2966                                 seq_printf(seq, "(F)");
2967                                 continue;
2968                         }
2969                         size += rdev->size;
2970                 }
2971
2972                 if (!list_empty(&mddev->disks)) {
2973                         if (mddev->pers)
2974                                 seq_printf(seq, "\n      %llu blocks",
2975                                         (unsigned long long)mddev->array_size);
2976                         else
2977                                 seq_printf(seq, "\n      %llu blocks",
2978                                         (unsigned long long)size);
2979                 }
2980
2981                 if (mddev->pers) {
2982                         mddev->pers->status (seq, mddev);
2983                         seq_printf(seq, "\n      ");
2984                         if (mddev->curr_resync > 2)
2985                                 status_resync (seq, mddev);
2986                         else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
2987                                 seq_printf(seq, "       resync=DELAYED");
2988                 }
2989
2990                 seq_printf(seq, "\n");
2991         }
2992         mddev_unlock(mddev);
2993         
2994         return 0;
2995 }
2996
2997 static struct seq_operations md_seq_ops = {
2998         .start  = md_seq_start,
2999         .next   = md_seq_next,
3000         .stop   = md_seq_stop,
3001         .show   = md_seq_show,
3002 };
3003
3004 static int md_seq_open(struct inode *inode, struct file *file)
3005 {
3006         int error;
3007
3008         error = seq_open(file, &md_seq_ops);
3009         return error;
3010 }
3011
3012 static struct file_operations md_seq_fops = {
3013         .open           = md_seq_open,
3014         .read           = seq_read,
3015         .llseek         = seq_lseek,
3016         .release        = seq_release,
3017 };
3018
3019 int register_md_personality(int pnum, mdk_personality_t *p)
3020 {
3021         if (pnum >= MAX_PERSONALITY) {
3022                 MD_BUG();
3023                 return -EINVAL;
3024         }
3025
3026         spin_lock(&pers_lock);
3027         if (pers[pnum]) {
3028                 spin_unlock(&pers_lock);
3029                 MD_BUG();
3030                 return -EBUSY;
3031         }
3032
3033         pers[pnum] = p;
3034         printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
3035         spin_unlock(&pers_lock);
3036         return 0;
3037 }
3038
3039 int unregister_md_personality(int pnum)
3040 {
3041         if (pnum >= MAX_PERSONALITY) {
3042                 MD_BUG();
3043                 return -EINVAL;
3044         }
3045
3046         printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
3047         spin_lock(&pers_lock);
3048         pers[pnum] = NULL;
3049         spin_unlock(&pers_lock);
3050         return 0;
3051 }
3052
3053 void md_sync_acct(mdk_rdev_t *rdev, unsigned long nr_sectors)
3054 {
3055         rdev->bdev->bd_contains->bd_disk->sync_io += nr_sectors;
3056 }
3057
3058 static int is_mddev_idle(mddev_t *mddev)
3059 {
3060         mdk_rdev_t * rdev;
3061         struct list_head *tmp;
3062         int idle;
3063         unsigned long curr_events;
3064
3065         idle = 1;
3066         ITERATE_RDEV(mddev,rdev,tmp) {
3067                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
3068                 curr_events = disk_stat_read(disk, read_sectors) + 
3069                                 disk_stat_read(disk, write_sectors) - 
3070                                 disk->sync_io;
3071                 if ((curr_events - rdev->last_events) > 32) {
3072                         rdev->last_events = curr_events;
3073                         idle = 0;
3074                 }
3075         }
3076         return idle;
3077 }
3078
3079 void md_done_sync(mddev_t *mddev, int blocks, int ok)
3080 {
3081         /* another "blocks" (512byte) blocks have been synced */
3082         atomic_sub(blocks, &mddev->recovery_active);
3083         wake_up(&mddev->recovery_wait);
3084         if (!ok) {
3085                 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3086                 md_wakeup_thread(mddev->thread);
3087                 // stop recovery, signal do_sync ....
3088         }
3089 }
3090
3091
3092 void md_write_start(mddev_t *mddev)
3093 {
3094         if (!atomic_read(&mddev->writes_pending)) {
3095                 mddev_lock_uninterruptible(mddev);
3096                 if (mddev->in_sync) {
3097                         mddev->in_sync = 0;
3098                         del_timer(&mddev->safemode_timer);
3099                         md_update_sb(mddev);
3100                 }
3101                 atomic_inc(&mddev->writes_pending);
3102                 mddev_unlock(mddev);
3103         } else
3104                 atomic_inc(&mddev->writes_pending);
3105 }
3106
3107 void md_write_end(mddev_t *mddev)
3108 {
3109         if (atomic_dec_and_test(&mddev->writes_pending)) {
3110                 if (mddev->safemode == 2)
3111                         md_wakeup_thread(mddev->thread);
3112                 else
3113                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
3114         }
3115 }
3116
3117 static inline void md_enter_safemode(mddev_t *mddev)
3118 {
3119         mddev_lock_uninterruptible(mddev);
3120         if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
3121             !mddev->in_sync && mddev->recovery_cp == MaxSector) {
3122                 mddev->in_sync = 1;
3123                 md_update_sb(mddev);
3124         }
3125         mddev_unlock(mddev);
3126
3127         if (mddev->safemode == 1)
3128                 mddev->safemode = 0;
3129 }
3130
3131 void md_handle_safemode(mddev_t *mddev)
3132 {
3133         if (signal_pending(current)) {
3134                 printk(KERN_INFO "md: md%d in immediate safe mode\n",
3135                         mdidx(mddev));
3136                 mddev->safemode = 2;
3137                 flush_signals(current);
3138         }
3139         if (mddev->safemode)
3140                 md_enter_safemode(mddev);
3141 }
3142
3143
3144 DECLARE_WAIT_QUEUE_HEAD(resync_wait);
3145
3146 #define SYNC_MARKS      10
3147 #define SYNC_MARK_STEP  (3*HZ)
3148 static void md_do_sync(mddev_t *mddev)
3149 {
3150         mddev_t *mddev2;
3151         unsigned int max_sectors, currspeed = 0,
3152                 j, window;
3153         unsigned long mark[SYNC_MARKS];
3154         unsigned long mark_cnt[SYNC_MARKS];
3155         int last_mark,m;
3156         struct list_head *tmp;
3157         unsigned long last_check;
3158
3159         /* just incase thread restarts... */
3160         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
3161                 return;
3162
3163         /* we overload curr_resync somewhat here.
3164          * 0 == not engaged in resync at all
3165          * 2 == checking that there is no conflict with another sync
3166          * 1 == like 2, but have yielded to allow conflicting resync to
3167          *              commense
3168          * other == active in resync - this many blocks
3169          */
3170         do {
3171                 mddev->curr_resync = 2;
3172
3173                 ITERATE_MDDEV(mddev2,tmp) {
3174                         if (mddev2 == mddev)
3175                                 continue;
3176                         if (mddev2->curr_resync && 
3177                             match_mddev_units(mddev,mddev2)) {
3178                                 printk(KERN_INFO "md: delaying resync of md%d"
3179                                         " until md%d has finished resync (they"
3180                                         " share one or more physical units)\n",
3181                                        mdidx(mddev), mdidx(mddev2));
3182                                 if (mddev < mddev2) {/* arbitrarily yield */
3183                                         mddev->curr_resync = 1;
3184                                         wake_up(&resync_wait);
3185                                 }
3186                                 if (wait_event_interruptible(resync_wait,
3187                                                              mddev2->curr_resync < mddev->curr_resync)) {
3188                                         flush_signals(current);
3189                                         mddev_put(mddev2);
3190                                         goto skip;
3191                                 }
3192                         }
3193                         if (mddev->curr_resync == 1) {
3194                                 mddev_put(mddev2);
3195                                 break;
3196                         }
3197                 }
3198         } while (mddev->curr_resync < 2);
3199
3200         max_sectors = mddev->size << 1;
3201
3202         printk(KERN_INFO "md: syncing RAID array md%d\n", mdidx(mddev));
3203         printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
3204                 " %d KB/sec/disc.\n", sysctl_speed_limit_min);
3205         printk(KERN_INFO "md: using maximum available idle IO bandwith "
3206                "(but not more than %d KB/sec) for reconstruction.\n",
3207                sysctl_speed_limit_max);
3208
3209         is_mddev_idle(mddev); /* this also initializes IO event counters */
3210         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3211                 j = mddev->recovery_cp;
3212         else
3213                 j = 0;
3214         for (m = 0; m < SYNC_MARKS; m++) {
3215                 mark[m] = jiffies;
3216                 mark_cnt[m] = j;
3217         }
3218         last_mark = 0;
3219         mddev->resync_mark = mark[last_mark];
3220         mddev->resync_mark_cnt = mark_cnt[last_mark];
3221
3222         /*
3223          * Tune reconstruction:
3224          */
3225         window = 32*(PAGE_SIZE/512);
3226         printk(KERN_INFO "md: using %dk window, over a total of %d blocks.\n",
3227                 window/2,max_sectors/2);
3228
3229         atomic_set(&mddev->recovery_active, 0);
3230         init_waitqueue_head(&mddev->recovery_wait);
3231         last_check = 0;
3232
3233         if (j)
3234                 printk(KERN_INFO 
3235                         "md: resuming recovery of md%d from checkpoint.\n",
3236                         mdidx(mddev));
3237
3238         while (j < max_sectors) {
3239                 int sectors;
3240
3241                 sectors = mddev->pers->sync_request(mddev, j, currspeed < sysctl_speed_limit_min);
3242                 if (sectors < 0) {
3243                         set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3244                         goto out;
3245                 }
3246                 atomic_add(sectors, &mddev->recovery_active);
3247                 j += sectors;
3248                 if (j>1) mddev->curr_resync = j;
3249
3250                 if (last_check + window > j)
3251                         continue;
3252
3253                 last_check = j;
3254
3255                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
3256                     test_bit(MD_RECOVERY_ERR, &mddev->recovery))
3257                         break;
3258
3259                 blk_run_queues();
3260
3261         repeat:
3262                 if (jiffies >= mark[last_mark] + SYNC_MARK_STEP ) {
3263                         /* step marks */
3264                         int next = (last_mark+1) % SYNC_MARKS;
3265
3266                         mddev->resync_mark = mark[next];
3267                         mddev->resync_mark_cnt = mark_cnt[next];
3268                         mark[next] = jiffies;
3269                         mark_cnt[next] = j - atomic_read(&mddev->recovery_active);
3270                         last_mark = next;
3271                 }
3272
3273
3274                 if (signal_pending(current)) {
3275                         /*
3276                          * got a signal, exit.
3277                          */
3278                         printk(KERN_INFO 
3279                                 "md: md_do_sync() got signal ... exiting\n");
3280                         flush_signals(current);
3281                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3282                         goto out;
3283                 }
3284
3285                 /*
3286                  * this loop exits only if either when we are slower than
3287                  * the 'hard' speed limit, or the system was IO-idle for
3288                  * a jiffy.
3289                  * the system might be non-idle CPU-wise, but we only care
3290                  * about not overloading the IO subsystem. (things like an
3291                  * e2fsck being done on the RAID array should execute fast)
3292                  */
3293                 cond_resched();
3294
3295                 currspeed = (j-mddev->resync_mark_cnt)/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
3296
3297                 if (currspeed > sysctl_speed_limit_min) {
3298                         if ((currspeed > sysctl_speed_limit_max) ||
3299                                         !is_mddev_idle(mddev)) {
3300                                 current->state = TASK_INTERRUPTIBLE;
3301                                 schedule_timeout(HZ/4);
3302                                 goto repeat;
3303                         }
3304                 }
3305         }
3306         printk(KERN_INFO "md: md%d: sync done.\n",mdidx(mddev));
3307         /*
3308          * this also signals 'finished resyncing' to md_stop
3309          */
3310  out:
3311         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
3312
3313         /* tell personality that we are finished */
3314         mddev->pers->sync_request(mddev, max_sectors, 1);
3315
3316         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
3317             mddev->curr_resync > 2 &&
3318             mddev->curr_resync > mddev->recovery_cp) {
3319                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
3320                         printk(KERN_INFO 
3321                                 "md: checkpointing recovery of md%d.\n",
3322                                 mdidx(mddev));
3323                         mddev->recovery_cp = mddev->curr_resync;
3324                 } else
3325                         mddev->recovery_cp = MaxSector;
3326         }
3327
3328         if (mddev->safemode)
3329                 md_enter_safemode(mddev);
3330  skip:
3331         mddev->curr_resync = 0;
3332         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
3333         md_wakeup_thread(mddev->thread);
3334 }
3335
3336
3337 /*
3338  * This routine is regularly called by all per-raid-array threads to
3339  * deal with generic issues like resync and super-block update.
3340  * Raid personalities that don't have a thread (linear/raid0) do not
3341  * need this as they never do any recovery or update the superblock.
3342  *
3343  * It does not do any resync itself, but rather "forks" off other threads
3344  * to do that as needed.
3345  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
3346  * "->recovery" and create a thread at ->sync_thread.
3347  * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
3348  * and wakeups up this thread which will reap the thread and finish up.
3349  * This thread also removes any faulty devices (with nr_pending == 0).
3350  *
3351  * The overall approach is:
3352  *  1/ if the superblock needs updating, update it.
3353  *  2/ If a recovery thread is running, don't do anything else.
3354  *  3/ If recovery has finished, clean up, possibly marking spares active.
3355  *  4/ If there are any faulty devices, remove them.
3356  *  5/ If array is degraded, try to add spares devices
3357  *  6/ If array has spares or is not in-sync, start a resync thread.
3358  */
3359 void md_check_recovery(mddev_t *mddev)
3360 {
3361         mdk_rdev_t *rdev;
3362         struct list_head *rtmp;
3363
3364
3365         dprintk(KERN_INFO "md: recovery thread got woken up ...\n");
3366
3367         if (mddev->ro)
3368                 return;
3369         if ( ! (
3370                 mddev->sb_dirty ||
3371                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
3372                 test_bit(MD_RECOVERY_DONE, &mddev->recovery)
3373                 ))
3374                 return;
3375         if (mddev_trylock(mddev)==0) {
3376                 int spares =0;
3377                 if (mddev->sb_dirty)
3378                         md_update_sb(mddev);
3379                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
3380                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery))
3381                         /* resync/recovery still happening */
3382                         goto unlock;
3383                 if (mddev->sync_thread) {
3384                         /* resync has finished, collect result */
3385                         md_unregister_thread(mddev->sync_thread);
3386                         mddev->sync_thread = NULL;
3387                         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery)) {
3388                                 /* success...*/
3389                                 /* activate any spares */
3390                                 mddev->pers->spare_active(mddev);
3391                         }
3392                         md_update_sb(mddev);
3393                         mddev->recovery = 0;
3394                         wake_up(&resync_wait);
3395                         goto unlock;
3396                 }
3397                 if (mddev->recovery) {
3398                         /* that's odd.. */
3399                         mddev->recovery = 0;
3400                         wake_up(&resync_wait);
3401                 }
3402
3403                 /* no recovery is running.
3404                  * remove any failed drives, then
3405                  * add spares if possible
3406                  */
3407                 ITERATE_RDEV(mddev,rdev,rtmp) {
3408                         if (rdev->raid_disk >= 0 &&
3409                             rdev->faulty &&
3410                             atomic_read(&rdev->nr_pending)==0) {
3411                                 mddev->pers->hot_remove_disk(mddev, rdev->raid_disk);
3412                                 rdev->raid_disk = -1;
3413                         }
3414                         if (!rdev->faulty && rdev->raid_disk >= 0 && !rdev->in_sync)
3415                                 spares++;
3416                 }
3417                 if (mddev->degraded) {
3418                         ITERATE_RDEV(mddev,rdev,rtmp)
3419                                 if (rdev->raid_disk < 0
3420                                     && !rdev->faulty) {
3421                                         if (mddev->pers->hot_add_disk(mddev,rdev))
3422                                                 spares++;
3423                                         else
3424                                                 break;
3425                                 }
3426                 }
3427
3428                 if (!spares && (mddev->recovery_cp == MaxSector )) {
3429                         /* nothing we can do ... */
3430                         goto unlock;
3431                 }
3432                 if (mddev->pers->sync_request) {
3433                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3434                         if (!spares)
3435                                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3436                         mddev->sync_thread = md_register_thread(md_do_sync,
3437                                                                 mddev,
3438                                                                 "md%d_resync");
3439                         if (!mddev->sync_thread) {
3440                                 printk(KERN_ERR "md%d: could not start resync"
3441                                         " thread...\n", 
3442                                         mdidx(mddev));
3443                                 /* leave the spares where they are, it shouldn't hurt */
3444                                 mddev->recovery = 0;
3445                         } else {
3446                                 md_wakeup_thread(mddev->sync_thread);
3447                         }
3448                 }
3449         unlock:
3450                 mddev_unlock(mddev);
3451         }
3452 }
3453
3454 int md_notify_reboot(struct notifier_block *this,
3455                                         unsigned long code, void *x)
3456 {
3457         struct list_head *tmp;
3458         mddev_t *mddev;
3459
3460         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
3461
3462                 printk(KERN_INFO "md: stopping all md devices.\n");
3463
3464                 ITERATE_MDDEV(mddev,tmp)
3465                         if (mddev_trylock(mddev)==0)
3466                                 do_md_stop (mddev, 1);
3467                 /*
3468                  * certain more exotic SCSI devices are known to be
3469                  * volatile wrt too early system reboots. While the
3470                  * right place to handle this issue is the given
3471                  * driver, we do want to have a safe RAID driver ...
3472                  */
3473                 mdelay(1000*1);
3474         }
3475         return NOTIFY_DONE;
3476 }
3477
3478 struct notifier_block md_notifier = {
3479         .notifier_call  = md_notify_reboot,
3480         .next           = NULL,
3481         .priority       = INT_MAX, /* before any real devices */
3482 };
3483
3484 static void md_geninit(void)
3485 {
3486         struct proc_dir_entry *p;
3487
3488         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
3489
3490 #ifdef CONFIG_PROC_FS
3491         p = create_proc_entry("mdstat", S_IRUGO, NULL);
3492         if (p)
3493                 p->proc_fops = &md_seq_fops;
3494 #endif
3495 }
3496
3497 int __init md_init(void)
3498 {
3499         int minor;
3500
3501         printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
3502                         " MD_SB_DISKS=%d\n",
3503                         MD_MAJOR_VERSION, MD_MINOR_VERSION,
3504                         MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
3505
3506         if (register_blkdev(MAJOR_NR, "md"))
3507                 return -1;
3508
3509         devfs_mk_dir("md");
3510         blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
3511                                 md_probe, NULL, NULL);
3512
3513         for (minor=0; minor < MAX_MD_DEVS; ++minor) {
3514                 devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
3515                                 S_IFBLK|S_IRUSR|S_IWUSR,
3516                                 "md/%d", minor);
3517         }
3518
3519         register_reboot_notifier(&md_notifier);
3520         raid_table_header = register_sysctl_table(raid_root_table, 1);
3521
3522         md_geninit();
3523         return (0);
3524 }
3525
3526
3527 #ifndef MODULE
3528
3529 /*
3530  * Searches all registered partitions for autorun RAID arrays
3531  * at boot time.
3532  */
3533 static dev_t detected_devices[128];
3534 static int dev_cnt;
3535
3536 void md_autodetect_dev(dev_t dev)
3537 {
3538         if (dev_cnt >= 0 && dev_cnt < 127)
3539                 detected_devices[dev_cnt++] = dev;
3540 }
3541
3542
3543 static void autostart_arrays(void)
3544 {
3545         char b[BDEVNAME_SIZE];
3546         mdk_rdev_t *rdev;
3547         int i;
3548
3549         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
3550
3551         for (i = 0; i < dev_cnt; i++) {
3552                 dev_t dev = detected_devices[i];
3553
3554                 rdev = md_import_device(dev,0, 0);
3555                 if (IS_ERR(rdev)) {
3556                         printk(KERN_ALERT "md: could not import %s!\n",
3557                                 __bdevname(dev, b));
3558                         continue;
3559                 }
3560                 if (rdev->faulty) {
3561                         MD_BUG();
3562                         continue;
3563                 }
3564                 list_add(&rdev->same_set, &pending_raid_disks);
3565         }
3566         dev_cnt = 0;
3567
3568         autorun_devices();
3569 }
3570
3571 #endif
3572
3573 static __exit void md_exit(void)
3574 {
3575         int i;
3576         blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
3577         for (i=0; i < MAX_MD_DEVS; i++)
3578                 devfs_remove("md/%d", i);
3579         devfs_remove("md");
3580
3581         unregister_blkdev(MAJOR_NR,"md");
3582         unregister_reboot_notifier(&md_notifier);
3583         unregister_sysctl_table(raid_table_header);
3584 #ifdef CONFIG_PROC_FS
3585         remove_proc_entry("mdstat", NULL);
3586 #endif
3587         for (i = 0; i < MAX_MD_DEVS; i++) {
3588                 struct gendisk *disk = disks[i];
3589                 mddev_t *mddev;
3590                 if (!disks[i])
3591                         continue;
3592                 mddev = disk->private_data;
3593                 del_gendisk(disk);
3594                 put_disk(disk);
3595                 mddev_put(mddev);
3596         }
3597 }
3598
3599 module_init(md_init)
3600 module_exit(md_exit)
3601
3602 EXPORT_SYMBOL(register_md_personality);
3603 EXPORT_SYMBOL(unregister_md_personality);
3604 EXPORT_SYMBOL(md_error);
3605 EXPORT_SYMBOL(md_sync_acct);
3606 EXPORT_SYMBOL(md_done_sync);
3607 EXPORT_SYMBOL(md_write_start);
3608 EXPORT_SYMBOL(md_write_end);
3609 EXPORT_SYMBOL(md_handle_safemode);
3610 EXPORT_SYMBOL(md_register_thread);
3611 EXPORT_SYMBOL(md_unregister_thread);
3612 EXPORT_SYMBOL(md_wakeup_thread);
3613 EXPORT_SYMBOL(md_print_devices);
3614 EXPORT_SYMBOL(md_interrupt_thread);
3615 EXPORT_SYMBOL(md_check_recovery);
3616 MODULE_LICENSE("GPL");