2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
11 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
12 - kmod support by: Cyrus Durgin
13 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
14 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16 - lots of fixes and improvements to the RAID1/RAID5 and generic
17 RAID code (such as request based resynchronization):
19 Neil Brown <neilb@cse.unsw.edu.au>.
21 This program is free software; you can redistribute it and/or modify
22 it under the terms of the GNU General Public License as published by
23 the Free Software Foundation; either version 2, or (at your option)
26 You should have received a copy of the GNU General Public License
27 (for example /usr/src/linux/COPYING); if not, write to the Free
28 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/module.h>
32 #include <linux/config.h>
33 #include <linux/linkage.h>
34 #include <linux/raid/md.h>
35 #include <linux/sysctl.h>
36 #include <linux/devfs_fs_kernel.h>
37 #include <linux/buffer_head.h> /* for invalidate_bdev */
38 #include <linux/suspend.h>
40 #include <linux/init.h>
43 #include <linux/kmod.h>
46 #define __KERNEL_SYSCALLS__
47 #include <linux/unistd.h>
49 #include <asm/unaligned.h>
51 #define MAJOR_NR MD_MAJOR
53 #define DEVICE_NR(device) (minor(device))
56 #define dprintk(x...) ((void)(DEBUG && printk(x)))
60 static void autostart_arrays (void);
63 static mdk_personality_t *pers[MAX_PERSONALITY];
64 static spinlock_t pers_lock = SPIN_LOCK_UNLOCKED;
67 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
68 * is 1000 KB/sec, so the extra system load does not show up that much.
69 * Increase it if you want to have more _guaranteed_ speed. Note that
70 * the RAID driver will use the maximum available bandwith if the IO
71 * subsystem is idle. There is also an 'absolute maximum' reconstruction
72 * speed limit - in case reconstruction slows down your system despite
75 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
78 static int sysctl_speed_limit_min = 1000;
79 static int sysctl_speed_limit_max = 200000;
81 static struct ctl_table_header *raid_table_header;
83 static ctl_table raid_table[] = {
85 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
86 .procname = "speed_limit_min",
87 .data = &sysctl_speed_limit_min,
88 .maxlen = sizeof(int),
90 .proc_handler = &proc_dointvec,
93 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
94 .procname = "speed_limit_max",
95 .data = &sysctl_speed_limit_max,
96 .maxlen = sizeof(int),
98 .proc_handler = &proc_dointvec,
103 static ctl_table raid_dir_table[] = {
105 .ctl_name = DEV_RAID,
114 static ctl_table raid_root_table[] = {
120 .child = raid_dir_table,
125 static struct block_device_operations md_fops;
127 static struct gendisk *disks[MAX_MD_DEVS];
130 * Enables to iterate over all existing md arrays
131 * all_mddevs_lock protects this list as well as mddev_map.
133 static LIST_HEAD(all_mddevs);
134 static spinlock_t all_mddevs_lock = SPIN_LOCK_UNLOCKED;
138 * iterates through all used mddevs in the system.
139 * We take care to grab the all_mddevs_lock whenever navigating
140 * the list, and to always hold a refcount when unlocked.
141 * Any code which breaks out of this loop while own
142 * a reference to the current mddev and must mddev_put it.
144 #define ITERATE_MDDEV(mddev,tmp) \
146 for (({ spin_lock(&all_mddevs_lock); \
147 tmp = all_mddevs.next; \
149 ({ if (tmp != &all_mddevs) \
150 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
151 spin_unlock(&all_mddevs_lock); \
152 if (mddev) mddev_put(mddev); \
153 mddev = list_entry(tmp, mddev_t, all_mddevs); \
154 tmp != &all_mddevs;}); \
155 ({ spin_lock(&all_mddevs_lock); \
159 static mddev_t *mddev_map[MAX_MD_DEVS];
161 static int md_fail_request (request_queue_t *q, struct bio *bio)
163 bio_io_error(bio, bio->bi_size);
167 static inline mddev_t *mddev_get(mddev_t *mddev)
169 atomic_inc(&mddev->active);
173 static void mddev_put(mddev_t *mddev)
175 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
177 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
178 list_del(&mddev->all_mddevs);
179 mddev_map[mdidx(mddev)] = NULL;
180 blk_put_queue(mddev->queue);
184 spin_unlock(&all_mddevs_lock);
187 static mddev_t * mddev_find(int unit)
189 mddev_t *mddev, *new = NULL;
192 spin_lock(&all_mddevs_lock);
193 if (mddev_map[unit]) {
194 mddev = mddev_get(mddev_map[unit]);
195 spin_unlock(&all_mddevs_lock);
201 mddev_map[unit] = new;
202 list_add(&new->all_mddevs, &all_mddevs);
203 spin_unlock(&all_mddevs_lock);
207 spin_unlock(&all_mddevs_lock);
209 new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL);
213 memset(new, 0, sizeof(*new));
216 init_MUTEX(&new->reconfig_sem);
217 INIT_LIST_HEAD(&new->disks);
218 INIT_LIST_HEAD(&new->all_mddevs);
219 init_timer(&new->safemode_timer);
220 atomic_set(&new->active, 1);
222 new->queue = blk_alloc_queue(GFP_KERNEL);
228 blk_queue_make_request(new->queue, md_fail_request);
233 static inline int mddev_lock(mddev_t * mddev)
235 return down_interruptible(&mddev->reconfig_sem);
238 static inline void mddev_lock_uninterruptible(mddev_t * mddev)
240 down(&mddev->reconfig_sem);
243 static inline int mddev_trylock(mddev_t * mddev)
245 return down_trylock(&mddev->reconfig_sem);
248 static inline void mddev_unlock(mddev_t * mddev)
250 up(&mddev->reconfig_sem);
253 mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
256 struct list_head *tmp;
258 ITERATE_RDEV(mddev,rdev,tmp) {
259 if (rdev->desc_nr == nr)
265 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
267 struct list_head *tmp;
270 ITERATE_RDEV(mddev,rdev,tmp) {
271 if (rdev->bdev->bd_dev == dev)
277 inline static sector_t calc_dev_sboffset(struct block_device *bdev)
279 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
280 return MD_NEW_SIZE_BLOCKS(size);
283 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
287 size = rdev->sb_offset;
290 size &= ~((sector_t)chunk_size/1024 - 1);
294 static int alloc_disk_sb(mdk_rdev_t * rdev)
299 rdev->sb_page = alloc_page(GFP_KERNEL);
300 if (!rdev->sb_page) {
301 printk(KERN_ALERT "md: out of memory.\n");
308 static void free_disk_sb(mdk_rdev_t * rdev)
311 page_cache_release(rdev->sb_page);
313 rdev->sb_page = NULL;
320 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
325 complete((struct completion*)bio->bi_private);
329 static int sync_page_io(struct block_device *bdev, sector_t sector, int size,
330 struct page *page, int rw)
334 struct completion event;
337 bio.bi_io_vec = &vec;
345 bio.bi_sector = sector;
346 init_completion(&event);
347 bio.bi_private = &event;
348 bio.bi_end_io = bi_complete;
349 submit_bio(rw, &bio);
351 wait_for_completion(&event);
353 return test_bit(BIO_UPTODATE, &bio.bi_flags);
356 static int read_disk_sb(mdk_rdev_t * rdev)
358 char b[BDEVNAME_SIZE];
359 if (!rdev->sb_page) {
367 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, MD_SB_BYTES, rdev->sb_page, READ))
373 printk(KERN_ERR "md: disabled device %s, could not read superblock.\n",
374 bdevname(rdev->bdev,b));
378 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
380 if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
381 (sb1->set_uuid1 == sb2->set_uuid1) &&
382 (sb1->set_uuid2 == sb2->set_uuid2) &&
383 (sb1->set_uuid3 == sb2->set_uuid3))
391 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
394 mdp_super_t *tmp1, *tmp2;
396 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
397 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
399 if (!tmp1 || !tmp2) {
401 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
409 * nr_disks is not constant
414 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
428 static unsigned int calc_sb_csum(mdp_super_t * sb)
430 unsigned int disk_csum, csum;
432 disk_csum = sb->sb_csum;
434 csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
435 sb->sb_csum = disk_csum;
440 * Handle superblock details.
441 * We want to be able to handle multiple superblock formats
442 * so we have a common interface to them all, and an array of
443 * different handlers.
444 * We rely on user-space to write the initial superblock, and support
445 * reading and updating of superblocks.
446 * Interface methods are:
447 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
448 * loads and validates a superblock on dev.
449 * if refdev != NULL, compare superblocks on both devices
451 * 0 - dev has a superblock that is compatible with refdev
452 * 1 - dev has a superblock that is compatible and newer than refdev
453 * so dev should be used as the refdev in future
454 * -EINVAL superblock incompatible or invalid
455 * -othererror e.g. -EIO
457 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
458 * Verify that dev is acceptable into mddev.
459 * The first time, mddev->raid_disks will be 0, and data from
460 * dev should be merged in. Subsequent calls check that dev
461 * is new enough. Return 0 or -EINVAL
463 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
464 * Update the superblock for rdev with data in mddev
465 * This does not write to disc.
471 struct module *owner;
472 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
473 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
474 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
478 * load_super for 0.90.0
480 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
482 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
488 * Calculate the position of the superblock,
489 * it's at the end of the disk.
491 * It also happens to be a multiple of 4Kb.
493 sb_offset = calc_dev_sboffset(rdev->bdev);
494 rdev->sb_offset = sb_offset;
496 ret = read_disk_sb(rdev);
501 bdevname(rdev->bdev, b);
502 sb = (mdp_super_t*)page_address(rdev->sb_page);
504 if (sb->md_magic != MD_SB_MAGIC) {
505 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
510 if (sb->major_version != 0 ||
511 sb->minor_version != 90) {
512 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
513 sb->major_version, sb->minor_version,
518 if (sb->md_minor >= MAX_MD_DEVS) {
519 printk(KERN_ERR "md: %s: invalid raid minor (%x)\n",
523 if (sb->raid_disks <= 0)
526 if (calc_sb_csum(sb) != sb->sb_csum) {
527 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
532 rdev->preferred_minor = sb->md_minor;
533 rdev->data_offset = 0;
535 if (sb->level == MULTIPATH)
538 rdev->desc_nr = sb->this_disk.number;
544 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
545 if (!uuid_equal(refsb, sb)) {
546 printk(KERN_WARNING "md: %s has different UUID to %s\n",
547 b, bdevname(refdev->bdev,b2));
550 if (!sb_equal(refsb, sb)) {
551 printk(KERN_WARNING "md: %s has same UUID"
552 " but different superblock to %s\n",
553 b, bdevname(refdev->bdev, b2));
557 ev2 = md_event(refsb);
563 rdev->size = calc_dev_size(rdev, sb->chunk_size);
570 * validate_super for 0.90.0
572 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
575 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
577 if (mddev->raid_disks == 0) {
578 mddev->major_version = 0;
579 mddev->minor_version = sb->minor_version;
580 mddev->patch_version = sb->patch_version;
581 mddev->persistent = ! sb->not_persistent;
582 mddev->chunk_size = sb->chunk_size;
583 mddev->ctime = sb->ctime;
584 mddev->utime = sb->utime;
585 mddev->level = sb->level;
586 mddev->layout = sb->layout;
587 mddev->raid_disks = sb->raid_disks;
588 mddev->size = sb->size;
589 mddev->events = md_event(sb);
591 if (sb->state & (1<<MD_SB_CLEAN))
592 mddev->recovery_cp = MaxSector;
594 if (sb->events_hi == sb->cp_events_hi &&
595 sb->events_lo == sb->cp_events_lo) {
596 mddev->recovery_cp = sb->recovery_cp;
598 mddev->recovery_cp = 0;
601 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
602 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
603 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
604 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
606 mddev->max_disks = MD_SB_DISKS;
611 if (ev1 < mddev->events)
614 if (mddev->level != LEVEL_MULTIPATH) {
615 rdev->raid_disk = -1;
616 rdev->in_sync = rdev->faulty = 0;
617 desc = sb->disks + rdev->desc_nr;
619 if (desc->state & (1<<MD_DISK_FAULTY))
621 else if (desc->state & (1<<MD_DISK_SYNC) &&
622 desc->raid_disk < mddev->raid_disks) {
624 rdev->raid_disk = desc->raid_disk;
631 * sync_super for 0.90.0
633 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
636 struct list_head *tmp;
638 int next_spare = mddev->raid_disks;
640 /* make rdev->sb match mddev data..
643 * 2/ Add info for each disk, keeping track of highest desc_nr
644 * 3/ any empty disks < highest become removed
646 * disks[0] gets initialised to REMOVED because
647 * we cannot be sure from other fields if it has
648 * been initialised or not.
652 int active=0, working=0,failed=0,spare=0,nr_disks=0;
654 sb = (mdp_super_t*)page_address(rdev->sb_page);
656 memset(sb, 0, sizeof(*sb));
658 sb->md_magic = MD_SB_MAGIC;
659 sb->major_version = mddev->major_version;
660 sb->minor_version = mddev->minor_version;
661 sb->patch_version = mddev->patch_version;
662 sb->gvalid_words = 0; /* ignored */
663 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
664 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
665 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
666 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
668 sb->ctime = mddev->ctime;
669 sb->level = mddev->level;
670 sb->size = mddev->size;
671 sb->raid_disks = mddev->raid_disks;
672 sb->md_minor = mddev->__minor;
673 sb->not_persistent = !mddev->persistent;
674 sb->utime = mddev->utime;
676 sb->events_hi = (mddev->events>>32);
677 sb->events_lo = (u32)mddev->events;
681 sb->recovery_cp = mddev->recovery_cp;
682 sb->cp_events_hi = (mddev->events>>32);
683 sb->cp_events_lo = (u32)mddev->events;
684 if (mddev->recovery_cp == MaxSector)
685 sb->state = (1<< MD_SB_CLEAN);
689 sb->layout = mddev->layout;
690 sb->chunk_size = mddev->chunk_size;
692 sb->disks[0].state = (1<<MD_DISK_REMOVED);
693 ITERATE_RDEV(mddev,rdev2,tmp) {
695 if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty)
696 rdev2->desc_nr = rdev2->raid_disk;
698 rdev2->desc_nr = next_spare++;
699 d = &sb->disks[rdev2->desc_nr];
701 d->number = rdev2->desc_nr;
702 d->major = MAJOR(rdev2->bdev->bd_dev);
703 d->minor = MINOR(rdev2->bdev->bd_dev);
704 if (rdev2->raid_disk >= 0 && rdev->in_sync && !rdev2->faulty)
705 d->raid_disk = rdev2->raid_disk;
707 d->raid_disk = rdev2->desc_nr; /* compatibility */
709 d->state = (1<<MD_DISK_FAULTY);
711 } else if (rdev2->in_sync) {
712 d->state = (1<<MD_DISK_ACTIVE);
713 d->state |= (1<<MD_DISK_SYNC);
721 if (rdev2->desc_nr > highest)
722 highest = rdev2->desc_nr;
725 /* now set the "removed" bit on any non-trailing holes */
726 for (i=0; i<highest; i++) {
727 mdp_disk_t *d = &sb->disks[i];
728 if (d->state == 0 && d->number == 0) {
731 d->state = (1<<MD_DISK_REMOVED);
734 sb->nr_disks = nr_disks;
735 sb->active_disks = active;
736 sb->working_disks = working;
737 sb->failed_disks = failed;
738 sb->spare_disks = spare;
740 sb->this_disk = sb->disks[rdev->desc_nr];
741 sb->sb_csum = calc_sb_csum(sb);
745 * version 1 superblock
748 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
750 unsigned int disk_csum, csum;
751 int size = 256 + sb->max_dev*2;
753 disk_csum = sb->sb_csum;
755 csum = csum_partial((void *)sb, size, 0);
756 sb->sb_csum = disk_csum;
760 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
762 struct mdp_superblock_1 *sb;
765 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
768 * Calculate the position of the superblock.
769 * It is always aligned to a 4K boundary and
770 * depeding on minor_version, it can be:
771 * 0: At least 8K, but less than 12K, from end of device
772 * 1: At start of device
773 * 2: 4K from start of device.
775 switch(minor_version) {
777 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
780 /* convert from sectors to K */
792 rdev->sb_offset = sb_offset;
794 ret = read_disk_sb(rdev);
798 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
800 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
801 sb->major_version != cpu_to_le32(1) ||
802 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
803 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
804 sb->feature_map != 0)
807 if (calc_sb_1_csum(sb) != sb->sb_csum) {
808 printk("md: invalid superblock checksum on %s\n",
809 bdevname(rdev->bdev,b));
812 rdev->preferred_minor = 0xffff;
813 rdev->data_offset = le64_to_cpu(sb->data_offset);
819 struct mdp_superblock_1 *refsb =
820 (struct mdp_superblock_1*)page_address(refdev->sb_page);
822 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
823 sb->level != refsb->level ||
824 sb->layout != refsb->layout ||
825 sb->chunksize != refsb->chunksize) {
826 printk(KERN_WARNING "md: %s has strangely different"
827 " superblock to %s\n",
828 bdevname(rdev->bdev,b),
829 bdevname(refdev->bdev,b2));
832 ev1 = le64_to_cpu(sb->events);
833 ev2 = le64_to_cpu(refsb->events);
839 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
841 rdev->size = rdev->sb_offset;
842 if (rdev->size < le64_to_cpu(sb->data_size)/2)
844 rdev->size = le64_to_cpu(sb->data_size)/2;
845 if (le32_to_cpu(sb->chunksize))
846 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
850 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
852 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
854 if (mddev->raid_disks == 0) {
855 mddev->major_version = 1;
856 mddev->minor_version = 0;
857 mddev->patch_version = 0;
858 mddev->persistent = 1;
859 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
860 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
861 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
862 mddev->level = le32_to_cpu(sb->level);
863 mddev->layout = le32_to_cpu(sb->layout);
864 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
865 mddev->size = (u32)le64_to_cpu(sb->size);
866 mddev->events = le64_to_cpu(sb->events);
868 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
869 memcpy(mddev->uuid, sb->set_uuid, 16);
871 mddev->max_disks = (4096-256)/2;
874 ev1 = le64_to_cpu(sb->events);
876 if (ev1 < mddev->events)
880 if (mddev->level != LEVEL_MULTIPATH) {
882 rdev->desc_nr = le32_to_cpu(sb->dev_number);
883 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
885 case 0xffff: /* spare */
888 rdev->raid_disk = -1;
890 case 0xfffe: /* faulty */
893 rdev->raid_disk = -1;
898 rdev->raid_disk = role;
905 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
907 struct mdp_superblock_1 *sb;
908 struct list_head *tmp;
911 /* make rdev->sb match mddev and rdev data. */
913 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
917 memset(sb->pad1, 0, sizeof(sb->pad1));
918 memset(sb->pad2, 0, sizeof(sb->pad2));
919 memset(sb->pad3, 0, sizeof(sb->pad3));
921 sb->utime = cpu_to_le64((__u64)mddev->utime);
922 sb->events = cpu_to_le64(mddev->events);
924 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
926 sb->resync_offset = cpu_to_le64(0);
929 ITERATE_RDEV(mddev,rdev2,tmp)
930 if (rdev2->desc_nr > max_dev)
931 max_dev = rdev2->desc_nr;
933 sb->max_dev = max_dev;
934 for (i=0; i<max_dev;i++)
935 sb->dev_roles[max_dev] = cpu_to_le16(0xfffe);
937 ITERATE_RDEV(mddev,rdev2,tmp) {
940 sb->dev_roles[i] = cpu_to_le16(0xfffe);
941 else if (rdev2->in_sync)
942 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
944 sb->dev_roles[i] = cpu_to_le16(0xffff);
947 sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
951 struct super_type super_types[] = {
954 .owner = THIS_MODULE,
955 .load_super = super_90_load,
956 .validate_super = super_90_validate,
957 .sync_super = super_90_sync,
961 .owner = THIS_MODULE,
962 .load_super = super_1_load,
963 .validate_super = super_1_validate,
964 .sync_super = super_1_sync,
968 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
970 struct list_head *tmp;
973 ITERATE_RDEV(mddev,rdev,tmp)
974 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
980 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
982 struct list_head *tmp;
985 ITERATE_RDEV(mddev1,rdev,tmp)
986 if (match_dev_unit(mddev2, rdev))
992 static LIST_HEAD(pending_raid_disks);
994 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
996 mdk_rdev_t *same_pdev;
997 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1003 same_pdev = match_dev_unit(mddev, rdev);
1006 "md%d: WARNING: %s appears to be on the same physical"
1007 " disk as %s. True\n protection against single-disk"
1008 " failure might be compromised.\n",
1009 mdidx(mddev), bdevname(rdev->bdev,b),
1010 bdevname(same_pdev->bdev,b2));
1012 /* Verify rdev->desc_nr is unique.
1013 * If it is -1, assign a free number, else
1014 * check number is not in use
1016 if (rdev->desc_nr < 0) {
1018 if (mddev->pers) choice = mddev->raid_disks;
1019 while (find_rdev_nr(mddev, choice))
1021 rdev->desc_nr = choice;
1023 if (find_rdev_nr(mddev, rdev->desc_nr))
1027 list_add(&rdev->same_set, &mddev->disks);
1028 rdev->mddev = mddev;
1029 printk(KERN_INFO "md: bind<%s>\n", bdevname(rdev->bdev,b));
1033 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1035 char b[BDEVNAME_SIZE];
1040 list_del_init(&rdev->same_set);
1041 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1046 * prevent the device from being mounted, repartitioned or
1047 * otherwise reused by a RAID array (or any other kernel
1048 * subsystem), by opening the device. [simply getting an
1049 * inode is not enough, the SCSI module usage code needs
1050 * an explicit open() on the device]
1052 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1055 struct block_device *bdev;
1057 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE, BDEV_RAW);
1059 return PTR_ERR(bdev);
1060 err = bd_claim(bdev, rdev);
1062 blkdev_put(bdev, BDEV_RAW);
1069 static void unlock_rdev(mdk_rdev_t *rdev)
1071 struct block_device *bdev = rdev->bdev;
1076 blkdev_put(bdev, BDEV_RAW);
1079 void md_autodetect_dev(dev_t dev);
1081 static void export_rdev(mdk_rdev_t * rdev)
1083 char b[BDEVNAME_SIZE];
1084 printk(KERN_INFO "md: export_rdev(%s)\n",
1085 bdevname(rdev->bdev,b));
1089 list_del_init(&rdev->same_set);
1091 md_autodetect_dev(rdev->bdev->bd_dev);
1097 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1099 unbind_rdev_from_array(rdev);
1103 static void export_array(mddev_t *mddev)
1105 struct list_head *tmp;
1108 ITERATE_RDEV(mddev,rdev,tmp) {
1113 kick_rdev_from_array(rdev);
1115 if (!list_empty(&mddev->disks))
1117 mddev->raid_disks = 0;
1118 mddev->major_version = 0;
1121 static void print_desc(mdp_disk_t *desc)
1123 char b[BDEVNAME_SIZE];
1125 printk(" DISK<N:%d,%s(%d,%d),R:%d,S:%d>\n", desc->number,
1126 __bdevname(MKDEV(desc->major, desc->minor), b),
1127 desc->major,desc->minor,desc->raid_disk,desc->state);
1130 static void print_sb(mdp_super_t *sb)
1135 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1136 sb->major_version, sb->minor_version, sb->patch_version,
1137 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1139 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1140 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1141 sb->md_minor, sb->layout, sb->chunk_size);
1142 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1143 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1144 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1145 sb->failed_disks, sb->spare_disks,
1146 sb->sb_csum, (unsigned long)sb->events_lo);
1149 for (i = 0; i < MD_SB_DISKS; i++) {
1152 desc = sb->disks + i;
1153 if (desc->number || desc->major || desc->minor ||
1154 desc->raid_disk || (desc->state && (desc->state != 4))) {
1155 printk(" D %2d: ", i);
1159 printk(KERN_INFO "md: THIS: ");
1160 print_desc(&sb->this_disk);
1164 static void print_rdev(mdk_rdev_t *rdev)
1166 char b[BDEVNAME_SIZE];
1167 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1168 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1169 rdev->faulty, rdev->in_sync, rdev->desc_nr);
1170 if (rdev->sb_loaded) {
1171 printk(KERN_INFO "md: rdev superblock:\n");
1172 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1174 printk(KERN_INFO "md: no rdev superblock!\n");
1177 void md_print_devices(void)
1179 struct list_head *tmp, *tmp2;
1182 char b[BDEVNAME_SIZE];
1185 printk("md: **********************************\n");
1186 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1187 printk("md: **********************************\n");
1188 ITERATE_MDDEV(mddev,tmp) {
1189 printk("md%d: ", mdidx(mddev));
1191 ITERATE_RDEV(mddev,rdev,tmp2)
1192 printk("<%s>", bdevname(rdev->bdev,b));
1195 ITERATE_RDEV(mddev,rdev,tmp2)
1198 printk("md: **********************************\n");
1203 static int write_disk_sb(mdk_rdev_t * rdev)
1205 char b[BDEVNAME_SIZE];
1206 if (!rdev->sb_loaded) {
1215 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1216 bdevname(rdev->bdev,b),
1217 (unsigned long long)rdev->sb_offset);
1219 if (sync_page_io(rdev->bdev, rdev->sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE))
1222 printk("md: write_disk_sb failed for device %s\n",
1223 bdevname(rdev->bdev,b));
1227 static void sync_sbs(mddev_t * mddev)
1230 struct list_head *tmp;
1232 ITERATE_RDEV(mddev,rdev,tmp) {
1233 super_types[mddev->major_version].
1234 sync_super(mddev, rdev);
1235 rdev->sb_loaded = 1;
1239 static void md_update_sb(mddev_t * mddev)
1241 int err, count = 100;
1242 struct list_head *tmp;
1245 mddev->sb_dirty = 0;
1247 mddev->utime = get_seconds();
1250 if (!mddev->events) {
1252 * oops, this 64-bit counter should never wrap.
1253 * Either we are in around ~1 trillion A.C., assuming
1254 * 1 reboot per second, or we have a bug:
1262 * do not write anything to disk if using
1263 * nonpersistent superblocks
1265 if (!mddev->persistent)
1269 "md: updating md%d RAID superblock on device (in sync %d)\n",
1270 mdidx(mddev),mddev->in_sync);
1273 ITERATE_RDEV(mddev,rdev,tmp) {
1274 char b[BDEVNAME_SIZE];
1275 dprintk(KERN_INFO "md: ");
1277 dprintk("(skipping faulty ");
1279 dprintk("%s ", bdevname(rdev->bdev,b));
1280 if (!rdev->faulty) {
1281 err += write_disk_sb(rdev);
1284 if (!err && mddev->level == LEVEL_MULTIPATH)
1285 /* only need to write one superblock... */
1290 printk(KERN_ERR "md: errors occurred during superblock"
1291 " update, repeating\n");
1295 "md: excessive errors occurred during superblock update, exiting\n");
1300 * Import a device. If 'super_format' >= 0, then sanity check the superblock
1302 * mark the device faulty if:
1304 * - the device is nonexistent (zero size)
1305 * - the device has no valid superblock
1307 * a faulty rdev _never_ has rdev->sb set.
1309 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1311 char b[BDEVNAME_SIZE];
1316 rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
1318 printk(KERN_ERR "md: could not alloc mem for %s!\n",
1319 __bdevname(newdev, b));
1320 return ERR_PTR(-ENOMEM);
1322 memset(rdev, 0, sizeof(*rdev));
1324 if ((err = alloc_disk_sb(rdev)))
1327 err = lock_rdev(rdev, newdev);
1329 printk(KERN_ERR "md: could not lock %s.\n",
1330 __bdevname(newdev, b));
1336 rdev->data_offset = 0;
1337 atomic_set(&rdev->nr_pending, 0);
1339 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1342 "md: %s has zero or unknown size, marking faulty!\n",
1343 bdevname(rdev->bdev,b));
1348 if (super_format >= 0) {
1349 err = super_types[super_format].
1350 load_super(rdev, NULL, super_minor);
1351 if (err == -EINVAL) {
1353 "md: %s has invalid sb, not importing!\n",
1354 bdevname(rdev->bdev,b));
1359 "md: could not read %s's sb, not importing!\n",
1360 bdevname(rdev->bdev,b));
1364 INIT_LIST_HEAD(&rdev->same_set);
1369 if (rdev->sb_page) {
1375 return ERR_PTR(err);
1379 * Check a full RAID array for plausibility
1383 static int analyze_sbs(mddev_t * mddev)
1386 struct list_head *tmp;
1387 mdk_rdev_t *rdev, *freshest;
1388 char b[BDEVNAME_SIZE];
1391 ITERATE_RDEV(mddev,rdev,tmp)
1392 switch (super_types[mddev->major_version].
1393 load_super(rdev, freshest, mddev->minor_version)) {
1401 "md: fatal superblock inconsistency in %s"
1402 " -- removing from array\n",
1403 bdevname(rdev->bdev,b));
1404 kick_rdev_from_array(rdev);
1408 super_types[mddev->major_version].
1409 validate_super(mddev, freshest);
1412 ITERATE_RDEV(mddev,rdev,tmp) {
1413 if (rdev != freshest)
1414 if (super_types[mddev->major_version].
1415 validate_super(mddev, rdev)) {
1416 printk(KERN_WARNING "md: kicking non-fresh %s"
1418 bdevname(rdev->bdev,b));
1419 kick_rdev_from_array(rdev);
1422 if (mddev->level == LEVEL_MULTIPATH) {
1423 rdev->desc_nr = i++;
1424 rdev->raid_disk = rdev->desc_nr;
1431 * Check if we can support this RAID array
1433 if (mddev->major_version != MD_MAJOR_VERSION ||
1434 mddev->minor_version > MD_MINOR_VERSION) {
1436 "md: md%d: unsupported raid array version %d.%d.%d\n",
1437 mdidx(mddev), mddev->major_version,
1438 mddev->minor_version, mddev->patch_version);
1442 if ((mddev->recovery_cp != MaxSector) && ((mddev->level == 1) ||
1443 (mddev->level == 4) || (mddev->level == 5)))
1444 printk(KERN_ERR "md: md%d: raid array is not clean"
1445 " -- starting background reconstruction\n",
1454 static struct kobject *md_probe(dev_t dev, int *part, void *data)
1456 static DECLARE_MUTEX(disks_sem);
1457 int unit = MINOR(dev);
1458 mddev_t *mddev = mddev_find(unit);
1459 struct gendisk *disk;
1470 disk = alloc_disk(1);
1476 disk->major = MD_MAJOR;
1477 disk->first_minor = mdidx(mddev);
1478 sprintf(disk->disk_name, "md%d", mdidx(mddev));
1479 disk->fops = &md_fops;
1480 disk->private_data = mddev;
1481 disk->queue = mddev->queue;
1483 disks[mdidx(mddev)] = disk;
1488 void md_wakeup_thread(mdk_thread_t *thread);
1490 static void md_safemode_timeout(unsigned long data)
1492 mddev_t *mddev = (mddev_t *) data;
1494 mddev->safemode = 1;
1495 md_wakeup_thread(mddev->thread);
1499 static int do_md_run(mddev_t * mddev)
1503 struct list_head *tmp;
1505 struct gendisk *disk;
1506 char b[BDEVNAME_SIZE];
1508 if (list_empty(&mddev->disks)) {
1517 * Analyze all RAID superblock(s)
1519 if (!mddev->raid_disks && analyze_sbs(mddev)) {
1524 chunk_size = mddev->chunk_size;
1525 pnum = level_to_pers(mddev->level);
1527 if ((pnum != MULTIPATH) && (pnum != RAID1)) {
1530 * 'default chunksize' in the old md code used to
1531 * be PAGE_SIZE, baaad.
1532 * we abort here to be on the safe side. We don't
1533 * want to continue the bad practice.
1536 "no chunksize specified, see 'man raidtab'\n");
1539 if (chunk_size > MAX_CHUNK_SIZE) {
1540 printk(KERN_ERR "too big chunk_size: %d > %d\n",
1541 chunk_size, MAX_CHUNK_SIZE);
1545 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
1547 if ( (1 << ffz(~chunk_size)) != chunk_size) {
1551 if (chunk_size < PAGE_SIZE) {
1552 printk(KERN_ERR "too small chunk_size: %d < %ld\n",
1553 chunk_size, PAGE_SIZE);
1557 /* devices must have minimum size of one chunk */
1558 ITERATE_RDEV(mddev,rdev,tmp) {
1561 if (rdev->size < chunk_size / 1024) {
1563 "md: Dev %s smaller than chunk_size:"
1565 bdevname(rdev->bdev,b),
1566 (unsigned long long)rdev->size,
1573 if (pnum >= MAX_PERSONALITY) {
1581 request_module("md-personality-%d", pnum);
1586 * Drop all container device buffers, from now on
1587 * the only valid external interface is through the md
1589 * Also find largest hardsector size
1591 ITERATE_RDEV(mddev,rdev,tmp) {
1594 sync_blockdev(rdev->bdev);
1595 invalidate_bdev(rdev->bdev, 0);
1598 md_probe(mdidx(mddev), NULL, NULL);
1599 disk = disks[mdidx(mddev)];
1603 spin_lock(&pers_lock);
1604 if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) {
1605 spin_unlock(&pers_lock);
1606 printk(KERN_ERR "md: personality %d is not loaded!\n",
1611 mddev->pers = pers[pnum];
1612 spin_unlock(&pers_lock);
1614 blk_queue_make_request(mddev->queue, mddev->pers->make_request);
1615 printk("%s: setting max_sectors to %d, segment boundary to %d\n",
1619 blk_queue_max_sectors(mddev->queue, chunk_size >> 9);
1620 blk_queue_segment_boundary(mddev->queue, (chunk_size>>1) - 1);
1621 mddev->queue->queuedata = mddev;
1623 err = mddev->pers->run(mddev);
1625 printk(KERN_ERR "md: pers->run() failed ...\n");
1626 module_put(mddev->pers->owner);
1630 atomic_set(&mddev->writes_pending,0);
1631 mddev->safemode = 0;
1632 mddev->safemode_timer.function = md_safemode_timeout;
1633 mddev->safemode_timer.data = (unsigned long) mddev;
1634 mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
1637 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1638 md_wakeup_thread(mddev->thread);
1639 set_capacity(disk, mddev->array_size<<1);
1643 static int restart_array(mddev_t *mddev)
1645 struct gendisk *disk = disks[mdidx(mddev)];
1649 * Complain if it has no devices
1652 if (list_empty(&mddev->disks))
1660 mddev->safemode = 0;
1662 set_disk_ro(disk, 0);
1664 printk(KERN_INFO "md: md%d switched to read-write mode.\n",
1667 * Kick recovery or resync if necessary
1669 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1670 md_wakeup_thread(mddev->thread);
1673 printk(KERN_ERR "md: md%d has no personality assigned.\n",
1682 static int do_md_stop(mddev_t * mddev, int ro)
1685 struct gendisk *disk = disks[mdidx(mddev)];
1688 if (atomic_read(&mddev->active)>2) {
1689 printk("md: md%d still in use.\n",mdidx(mddev));
1693 if (mddev->sync_thread) {
1694 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1695 md_unregister_thread(mddev->sync_thread);
1696 mddev->sync_thread = NULL;
1699 del_timer_sync(&mddev->safemode_timer);
1701 invalidate_partition(disk, 0);
1710 set_disk_ro(disk, 0);
1711 if (mddev->pers->stop(mddev)) {
1714 set_disk_ro(disk, 1);
1717 module_put(mddev->pers->owner);
1722 if (mddev->raid_disks) {
1723 /* mark array as shutdown cleanly */
1725 md_update_sb(mddev);
1728 set_disk_ro(disk, 1);
1731 * Free resources if final stop
1734 struct gendisk *disk;
1735 printk(KERN_INFO "md: md%d stopped.\n", mdidx(mddev));
1737 export_array(mddev);
1739 mddev->array_size = 0;
1740 disk = disks[mdidx(mddev)];
1742 set_capacity(disk, 0);
1744 printk(KERN_INFO "md: md%d switched to read-only mode.\n",
1751 static void autorun_array(mddev_t *mddev)
1754 struct list_head *tmp;
1757 if (list_empty(&mddev->disks)) {
1762 printk(KERN_INFO "md: running: ");
1764 ITERATE_RDEV(mddev,rdev,tmp) {
1765 char b[BDEVNAME_SIZE];
1766 printk("<%s>", bdevname(rdev->bdev,b));
1770 err = do_md_run (mddev);
1772 printk(KERN_WARNING "md :do_md_run() returned %d\n", err);
1773 do_md_stop (mddev, 0);
1778 * lets try to run arrays based on all disks that have arrived
1779 * until now. (those are in pending_raid_disks)
1781 * the method: pick the first pending disk, collect all disks with
1782 * the same UUID, remove all from the pending list and put them into
1783 * the 'same_array' list. Then order this list based on superblock
1784 * update time (freshest comes first), kick out 'old' disks and
1785 * compare superblocks. If everything's fine then run it.
1787 * If "unit" is allocated, then bump its reference count
1789 static void autorun_devices(void)
1791 struct list_head candidates;
1792 struct list_head *tmp;
1793 mdk_rdev_t *rdev0, *rdev;
1795 char b[BDEVNAME_SIZE];
1797 printk(KERN_INFO "md: autorun ...\n");
1798 while (!list_empty(&pending_raid_disks)) {
1799 rdev0 = list_entry(pending_raid_disks.next,
1800 mdk_rdev_t, same_set);
1802 printk(KERN_INFO "md: considering %s ...\n",
1803 bdevname(rdev0->bdev,b));
1804 INIT_LIST_HEAD(&candidates);
1805 ITERATE_RDEV_PENDING(rdev,tmp)
1806 if (super_90_load(rdev, rdev0, 0) >= 0) {
1807 printk(KERN_INFO "md: adding %s ...\n",
1808 bdevname(rdev->bdev,b));
1809 list_move(&rdev->same_set, &candidates);
1812 * now we have a set of devices, with all of them having
1813 * mostly sane superblocks. It's time to allocate the
1817 mddev = mddev_find(rdev0->preferred_minor);
1820 "md: cannot allocate memory for md drive.\n");
1823 if (mddev_lock(mddev))
1824 printk(KERN_WARNING "md: md%d locked, cannot run\n",
1826 else if (mddev->raid_disks || mddev->major_version
1827 || !list_empty(&mddev->disks)) {
1829 "md: md%d already running, cannot run %s\n",
1830 mdidx(mddev), bdevname(rdev0->bdev,b));
1831 mddev_unlock(mddev);
1833 printk(KERN_INFO "md: created md%d\n", mdidx(mddev));
1834 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
1835 list_del_init(&rdev->same_set);
1836 if (bind_rdev_to_array(rdev, mddev))
1839 autorun_array(mddev);
1840 mddev_unlock(mddev);
1842 /* on success, candidates will be empty, on error
1845 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
1849 printk(KERN_INFO "md: ... autorun DONE.\n");
1853 * import RAID devices based on one partition
1854 * if possible, the array gets run as well.
1857 static int autostart_array(dev_t startdev)
1859 char b[BDEVNAME_SIZE];
1860 int err = -EINVAL, i;
1861 mdp_super_t *sb = NULL;
1862 mdk_rdev_t *start_rdev = NULL, *rdev;
1864 start_rdev = md_import_device(startdev, 0, 0);
1865 if (IS_ERR(start_rdev)) {
1866 printk(KERN_WARNING "md: could not import %s!\n",
1867 __bdevname(startdev, b));
1871 /* NOTE: this can only work for 0.90.0 superblocks */
1872 sb = (mdp_super_t*)page_address(start_rdev->sb_page);
1873 if (sb->major_version != 0 ||
1874 sb->minor_version != 90 ) {
1875 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
1876 export_rdev(start_rdev);
1880 if (start_rdev->faulty) {
1882 "md: can not autostart based on faulty %s!\n",
1883 bdevname(start_rdev->bdev,b));
1884 export_rdev(start_rdev);
1887 list_add(&start_rdev->same_set, &pending_raid_disks);
1889 for (i = 0; i < MD_SB_DISKS; i++) {
1893 desc = sb->disks + i;
1894 dev = MKDEV(desc->major, desc->minor);
1898 if (dev == startdev)
1900 rdev = md_import_device(dev, 0, 0);
1902 printk(KERN_WARNING "md: could not import %s,"
1903 " trying to run array nevertheless.\n",
1904 __bdevname(dev, b));
1907 list_add(&rdev->same_set, &pending_raid_disks);
1911 * possibly return codes
1919 static int get_version(void * arg)
1923 ver.major = MD_MAJOR_VERSION;
1924 ver.minor = MD_MINOR_VERSION;
1925 ver.patchlevel = MD_PATCHLEVEL_VERSION;
1927 if (copy_to_user(arg, &ver, sizeof(ver)))
1933 static int get_array_info(mddev_t * mddev, void * arg)
1935 mdu_array_info_t info;
1936 int nr,working,active,failed,spare;
1938 struct list_head *tmp;
1940 nr=working=active=failed=spare=0;
1941 ITERATE_RDEV(mddev,rdev,tmp) {
1954 info.major_version = mddev->major_version;
1955 info.minor_version = mddev->minor_version;
1956 info.patch_version = 1;
1957 info.ctime = mddev->ctime;
1958 info.level = mddev->level;
1959 info.size = mddev->size;
1961 info.raid_disks = mddev->raid_disks;
1962 info.md_minor = mddev->__minor;
1963 info.not_persistent= !mddev->persistent;
1965 info.utime = mddev->utime;
1968 info.state = (1<<MD_SB_CLEAN);
1969 info.active_disks = active;
1970 info.working_disks = working;
1971 info.failed_disks = failed;
1972 info.spare_disks = spare;
1974 info.layout = mddev->layout;
1975 info.chunk_size = mddev->chunk_size;
1977 if (copy_to_user(arg, &info, sizeof(info)))
1983 static int get_disk_info(mddev_t * mddev, void * arg)
1985 mdu_disk_info_t info;
1989 if (copy_from_user(&info, arg, sizeof(info)))
1994 rdev = find_rdev_nr(mddev, nr);
1996 info.major = MAJOR(rdev->bdev->bd_dev);
1997 info.minor = MINOR(rdev->bdev->bd_dev);
1998 info.raid_disk = rdev->raid_disk;
2001 info.state |= (1<<MD_DISK_FAULTY);
2002 else if (rdev->in_sync) {
2003 info.state |= (1<<MD_DISK_ACTIVE);
2004 info.state |= (1<<MD_DISK_SYNC);
2007 info.major = info.minor = 0;
2008 info.raid_disk = -1;
2009 info.state = (1<<MD_DISK_REMOVED);
2012 if (copy_to_user(arg, &info, sizeof(info)))
2018 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2020 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
2023 dev = MKDEV(info->major,info->minor);
2024 if (!mddev->raid_disks) {
2026 /* expecting a device which has a superblock */
2027 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
2030 "md: md_import_device returned %ld\n",
2032 return PTR_ERR(rdev);
2034 if (!list_empty(&mddev->disks)) {
2035 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2036 mdk_rdev_t, same_set);
2037 int err = super_types[mddev->major_version]
2038 .load_super(rdev, rdev0, mddev->minor_version);
2041 "md: %s has different UUID to %s\n",
2042 bdevname(rdev->bdev,b),
2043 bdevname(rdev0->bdev,b2));
2048 err = bind_rdev_to_array(rdev, mddev);
2055 * add_new_disk can be used once the array is assembled
2056 * to add "hot spares". They must already have a superblock
2061 if (!mddev->pers->hot_add_disk) {
2063 "md%d: personality does not support diskops!\n",
2067 rdev = md_import_device(dev, mddev->major_version,
2068 mddev->minor_version);
2071 "md: md_import_device returned %ld\n",
2073 return PTR_ERR(rdev);
2075 rdev->in_sync = 0; /* just to be sure */
2076 rdev->raid_disk = -1;
2077 err = bind_rdev_to_array(rdev, mddev);
2081 md_wakeup_thread(mddev->thread);
2085 /* otherwise, add_new_disk is only allowed
2086 * for major_version==0 superblocks
2088 if (mddev->major_version != 0) {
2089 printk(KERN_WARNING "md%d: ADD_NEW_DISK not supported\n",
2094 if (!(info->state & (1<<MD_DISK_FAULTY))) {
2096 rdev = md_import_device (dev, -1, 0);
2099 "md: error, md_import_device() returned %ld\n",
2101 return PTR_ERR(rdev);
2103 rdev->desc_nr = info->number;
2104 if (info->raid_disk < mddev->raid_disks)
2105 rdev->raid_disk = info->raid_disk;
2107 rdev->raid_disk = -1;
2110 if (rdev->raid_disk < mddev->raid_disks)
2111 rdev->in_sync = (info->state & (1<<MD_DISK_SYNC));
2115 err = bind_rdev_to_array(rdev, mddev);
2121 if (!mddev->persistent) {
2122 printk(KERN_INFO "md: nonpersistent superblock ...\n");
2123 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2125 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2126 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
2128 if (!mddev->size || (mddev->size > rdev->size))
2129 mddev->size = rdev->size;
2135 static int hot_generate_error(mddev_t * mddev, dev_t dev)
2137 char b[BDEVNAME_SIZE];
2138 struct request_queue *q;
2144 printk(KERN_INFO "md: trying to generate %s error in md%d ... \n",
2145 __bdevname(dev, b), mdidx(mddev));
2147 rdev = find_rdev(mddev, dev);
2153 if (rdev->desc_nr == -1) {
2160 q = bdev_get_queue(rdev->bdev);
2165 printk(KERN_INFO "md: okay, generating error!\n");
2166 // q->oneshot_error = 1; // disabled for now
2171 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
2173 char b[BDEVNAME_SIZE];
2179 printk(KERN_INFO "md: trying to remove %s from md%d ... \n",
2180 __bdevname(dev, b), mdidx(mddev));
2182 rdev = find_rdev(mddev, dev);
2186 if (rdev->raid_disk >= 0)
2189 kick_rdev_from_array(rdev);
2190 md_update_sb(mddev);
2194 printk(KERN_WARNING "md: cannot remove active disk %s from md%d ... \n",
2195 bdevname(rdev->bdev,b), mdidx(mddev));
2199 static int hot_add_disk(mddev_t * mddev, dev_t dev)
2201 char b[BDEVNAME_SIZE];
2209 printk(KERN_INFO "md: trying to hot-add %s to md%d ... \n",
2210 __bdevname(dev, b), mdidx(mddev));
2212 if (mddev->major_version != 0) {
2213 printk(KERN_WARNING "md%d: HOT_ADD may only be used with"
2214 " version-0 superblocks.\n",
2218 if (!mddev->pers->hot_add_disk) {
2220 "md%d: personality does not support diskops!\n",
2225 rdev = md_import_device (dev, -1, 0);
2228 "md: error, md_import_device() returned %ld\n",
2233 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2234 size = calc_dev_size(rdev, mddev->chunk_size);
2237 if (size < mddev->size) {
2239 "md%d: disk size %llu blocks < array size %llu\n",
2240 mdidx(mddev), (unsigned long long)size,
2241 (unsigned long long)mddev->size);
2248 "md: can not hot-add faulty %s disk to md%d!\n",
2249 bdevname(rdev->bdev,b), mdidx(mddev));
2255 bind_rdev_to_array(rdev, mddev);
2258 * The rest should better be atomic, we can have disk failures
2259 * noticed in interrupt contexts ...
2262 if (rdev->desc_nr == mddev->max_disks) {
2263 printk(KERN_WARNING "md%d: can not hot-add to full array!\n",
2266 goto abort_unbind_export;
2269 rdev->raid_disk = -1;
2271 md_update_sb(mddev);
2274 * Kick recovery, maybe this spare has to be added to the
2275 * array immediately.
2277 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2278 md_wakeup_thread(mddev->thread);
2282 abort_unbind_export:
2283 unbind_rdev_from_array(rdev);
2291 * set_array_info is used two different ways
2292 * The original usage is when creating a new array.
2293 * In this usage, raid_disks is > = and it together with
2294 * level, size, not_persistent,layout,chunksize determine the
2295 * shape of the array.
2296 * This will always create an array with a type-0.90.0 superblock.
2297 * The newer usage is when assembling an array.
2298 * In this case raid_disks will be 0, and the major_version field is
2299 * use to determine which style super-blocks are to be found on the devices.
2300 * The minor and patch _version numbers are also kept incase the
2301 * super_block handler wishes to interpret them.
2303 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2306 if (info->raid_disks == 0) {
2307 /* just setting version number for superblock loading */
2308 if (info->major_version < 0 ||
2309 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
2310 super_types[info->major_version].name == NULL) {
2311 /* maybe try to auto-load a module? */
2313 "md: superblock version %d not known\n",
2314 info->major_version);
2317 mddev->major_version = info->major_version;
2318 mddev->minor_version = info->minor_version;
2319 mddev->patch_version = info->patch_version;
2322 mddev->major_version = MD_MAJOR_VERSION;
2323 mddev->minor_version = MD_MINOR_VERSION;
2324 mddev->patch_version = MD_PATCHLEVEL_VERSION;
2325 mddev->ctime = get_seconds();
2327 mddev->level = info->level;
2328 mddev->size = info->size;
2329 mddev->raid_disks = info->raid_disks;
2330 /* don't set __minor, it is determined by which /dev/md* was
2333 if (info->state & (1<<MD_SB_CLEAN))
2334 mddev->recovery_cp = MaxSector;
2336 mddev->recovery_cp = 0;
2337 mddev->persistent = ! info->not_persistent;
2339 mddev->layout = info->layout;
2340 mddev->chunk_size = info->chunk_size;
2342 mddev->max_disks = MD_SB_DISKS;
2346 * Generate a 128 bit UUID
2348 get_random_bytes(mddev->uuid, 16);
2353 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
2357 rdev = find_rdev(mddev, dev);
2361 md_error(mddev, rdev);
2365 static int md_ioctl(struct inode *inode, struct file *file,
2366 unsigned int cmd, unsigned long arg)
2368 char b[BDEVNAME_SIZE];
2371 struct hd_geometry *loc = (struct hd_geometry *) arg;
2372 mddev_t *mddev = NULL;
2375 if (!capable(CAP_SYS_ADMIN))
2378 dev = inode->i_rdev;
2380 if (minor >= MAX_MD_DEVS) {
2386 * Commands dealing with the RAID driver but not any
2392 err = get_version((void *)arg);
2395 case PRINT_RAID_DEBUG:
2410 * Commands creating/starting a new array:
2413 mddev = inode->i_bdev->bd_inode->u.generic_ip;
2421 if (cmd == START_ARRAY) {
2422 /* START_ARRAY doesn't need to lock the array as autostart_array
2423 * does the locking, and it could even be a different array
2425 err = autostart_array(arg);
2427 printk(KERN_WARNING "md: autostart %s failed!\n",
2428 __bdevname(arg, b));
2434 err = mddev_lock(mddev);
2437 "md: ioctl lock interrupted, reason %d, cmd %d\n",
2444 case SET_ARRAY_INFO:
2446 if (!list_empty(&mddev->disks)) {
2448 "md: array md%d already has disks!\n",
2453 if (mddev->raid_disks) {
2455 "md: array md%d already initialised!\n",
2461 mdu_array_info_t info;
2463 memset(&info, 0, sizeof(info));
2464 else if (copy_from_user(&info, (void*)arg, sizeof(info))) {
2468 err = set_array_info(mddev, &info);
2470 printk(KERN_WARNING "md: couldn't set"
2471 " array info. %d\n", err);
2481 * Commands querying/configuring an existing array:
2483 /* if we are initialised yet, only ADD_NEW_DISK or STOP_ARRAY is allowed */
2484 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY && cmd != RUN_ARRAY) {
2490 * Commands even a read-only array can execute:
2494 case GET_ARRAY_INFO:
2495 err = get_array_info(mddev, (void *)arg);
2499 err = get_disk_info(mddev, (void *)arg);
2502 case RESTART_ARRAY_RW:
2503 err = restart_array(mddev);
2507 err = do_md_stop (mddev, 0);
2511 err = do_md_stop (mddev, 1);
2515 * We have a problem here : there is no easy way to give a CHS
2516 * virtual geometry. We currently pretend that we have a 2 heads
2517 * 4 sectors (with a BIG number of cylinders...). This drives
2518 * dosfs just mad... ;-)
2525 err = put_user (2, (char *) &loc->heads);
2528 err = put_user (4, (char *) &loc->sectors);
2531 err = put_user(get_capacity(disks[mdidx(mddev)])/8,
2532 (short *) &loc->cylinders);
2535 err = put_user (get_start_sect(inode->i_bdev),
2536 (long *) &loc->start);
2541 * The remaining ioctls are changing the state of the
2542 * superblock, so we do not allow read-only arrays
2554 mdu_disk_info_t info;
2555 if (copy_from_user(&info, (void*)arg, sizeof(info)))
2558 err = add_new_disk(mddev, &info);
2561 case HOT_GENERATE_ERROR:
2562 err = hot_generate_error(mddev, arg);
2564 case HOT_REMOVE_DISK:
2565 err = hot_remove_disk(mddev, arg);
2569 err = hot_add_disk(mddev, arg);
2572 case SET_DISK_FAULTY:
2573 err = set_disk_faulty(mddev, arg);
2578 err = do_md_run (mddev);
2580 * we have to clean up the mess if
2581 * the array cannot be run for some
2583 * ->pers will not be set, to superblock will
2587 do_md_stop (mddev, 0);
2592 if (_IOC_TYPE(cmd) == MD_MAJOR)
2593 printk(KERN_WARNING "md: %s(pid %d) used"
2594 " obsolete MD ioctl, upgrade your"
2595 " software to use new ictls.\n",
2596 current->comm, current->pid);
2603 mddev_unlock(mddev);
2613 static int md_open(struct inode *inode, struct file *file)
2616 * Succeed if we can find or allocate a mddev structure.
2618 mddev_t *mddev = mddev_find(minor(inode->i_rdev));
2624 if ((err = mddev_lock(mddev)))
2628 mddev_unlock(mddev);
2629 inode->i_bdev->bd_inode->u.generic_ip = mddev_get(mddev);
2636 static int md_release(struct inode *inode, struct file * file)
2638 mddev_t *mddev = inode->i_bdev->bd_inode->u.generic_ip;
2647 static struct block_device_operations md_fops =
2649 .owner = THIS_MODULE,
2651 .release = md_release,
2655 int md_thread(void * arg)
2657 mdk_thread_t *thread = arg;
2665 daemonize(thread->name, mdidx(thread->mddev));
2667 current->exit_signal = SIGCHLD;
2668 allow_signal(SIGKILL);
2669 thread->tsk = current;
2672 * md_thread is a 'system-thread', it's priority should be very
2673 * high. We avoid resource deadlocks individually in each
2674 * raid personality. (RAID5 does preallocation) We also use RR and
2675 * the very same RT priority as kswapd, thus we will never get
2676 * into a priority inversion deadlock.
2678 * we definitely have to have equal or higher priority than
2679 * bdflush, otherwise bdflush will deadlock if there are too
2680 * many dirty RAID5 blocks.
2684 complete(thread->event);
2685 while (thread->run) {
2686 void (*run)(mddev_t *);
2688 wait_event_interruptible(thread->wqueue,
2689 test_bit(THREAD_WAKEUP, &thread->flags));
2690 if (current->flags & PF_FREEZE)
2691 refrigerator(PF_IOTHREAD);
2693 clear_bit(THREAD_WAKEUP, &thread->flags);
2700 if (signal_pending(current))
2701 flush_signals(current);
2703 complete(thread->event);
2707 void md_wakeup_thread(mdk_thread_t *thread)
2710 dprintk("md: waking up MD thread %p.\n", thread);
2711 set_bit(THREAD_WAKEUP, &thread->flags);
2712 wake_up(&thread->wqueue);
2716 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
2719 mdk_thread_t *thread;
2721 struct completion event;
2723 thread = (mdk_thread_t *) kmalloc
2724 (sizeof(mdk_thread_t), GFP_KERNEL);
2728 memset(thread, 0, sizeof(mdk_thread_t));
2729 init_waitqueue_head(&thread->wqueue);
2731 init_completion(&event);
2732 thread->event = &event;
2734 thread->mddev = mddev;
2735 thread->name = name;
2736 ret = kernel_thread(md_thread, thread, 0);
2741 wait_for_completion(&event);
2745 void md_interrupt_thread(mdk_thread_t *thread)
2751 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
2752 send_sig(SIGKILL, thread->tsk, 1);
2755 void md_unregister_thread(mdk_thread_t *thread)
2757 struct completion event;
2759 init_completion(&event);
2761 thread->event = &event;
2763 thread->name = NULL;
2764 md_interrupt_thread(thread);
2765 wait_for_completion(&event);
2769 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
2771 dprintk("md_error dev:(%d:%d), rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
2772 MD_MAJOR,mdidx(mddev),
2773 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
2774 __builtin_return_address(0),__builtin_return_address(1),
2775 __builtin_return_address(2),__builtin_return_address(3));
2782 if (!rdev || rdev->faulty)
2784 if (!mddev->pers->error_handler)
2786 mddev->pers->error_handler(mddev,rdev);
2787 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2788 md_wakeup_thread(mddev->thread);
2791 /* seq_file implementation /proc/mdstat */
2793 static void status_unused(struct seq_file *seq)
2797 struct list_head *tmp;
2799 seq_printf(seq, "unused devices: ");
2801 ITERATE_RDEV_PENDING(rdev,tmp) {
2802 char b[BDEVNAME_SIZE];
2804 seq_printf(seq, "%s ",
2805 bdevname(rdev->bdev,b));
2808 seq_printf(seq, "<none>");
2810 seq_printf(seq, "\n");
2814 static void status_resync(struct seq_file *seq, mddev_t * mddev)
2816 unsigned long max_blocks, resync, res, dt, db, rt;
2818 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
2819 max_blocks = mddev->size;
2822 * Should not happen.
2828 res = (resync/1024)*1000/(max_blocks/1024 + 1);
2830 int i, x = res/50, y = 20-x;
2831 seq_printf(seq, "[");
2832 for (i = 0; i < x; i++)
2833 seq_printf(seq, "=");
2834 seq_printf(seq, ">");
2835 for (i = 0; i < y; i++)
2836 seq_printf(seq, ".");
2837 seq_printf(seq, "] ");
2839 seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)",
2840 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
2841 "resync" : "recovery"),
2842 res/10, res % 10, resync, max_blocks);
2845 * We do not want to overflow, so the order of operands and
2846 * the * 100 / 100 trick are important. We do a +1 to be
2847 * safe against division by zero. We only estimate anyway.
2849 * dt: time from mark until now
2850 * db: blocks written from mark until now
2851 * rt: remaining time
2853 dt = ((jiffies - mddev->resync_mark) / HZ);
2855 db = resync - (mddev->resync_mark_cnt/2);
2856 rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
2858 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
2860 seq_printf(seq, " speed=%ldK/sec", db/dt);
2863 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
2865 struct list_head *tmp;
2875 spin_lock(&all_mddevs_lock);
2876 list_for_each(tmp,&all_mddevs)
2878 mddev = list_entry(tmp, mddev_t, all_mddevs);
2880 spin_unlock(&all_mddevs_lock);
2883 spin_unlock(&all_mddevs_lock);
2885 return (void*)2;/* tail */
2889 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2891 struct list_head *tmp;
2892 mddev_t *next_mddev, *mddev = v;
2898 spin_lock(&all_mddevs_lock);
2900 tmp = all_mddevs.next;
2902 tmp = mddev->all_mddevs.next;
2903 if (tmp != &all_mddevs)
2904 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
2906 next_mddev = (void*)2;
2909 spin_unlock(&all_mddevs_lock);
2917 static void md_seq_stop(struct seq_file *seq, void *v)
2921 if (mddev && v != (void*)1 && v != (void*)2)
2925 static int md_seq_show(struct seq_file *seq, void *v)
2929 struct list_head *tmp2;
2933 if (v == (void*)1) {
2934 seq_printf(seq, "Personalities : ");
2935 spin_lock(&pers_lock);
2936 for (i = 0; i < MAX_PERSONALITY; i++)
2938 seq_printf(seq, "[%s] ", pers[i]->name);
2940 spin_unlock(&pers_lock);
2941 seq_printf(seq, "\n");
2944 if (v == (void*)2) {
2949 if (mddev_lock(mddev)!=0)
2951 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
2952 seq_printf(seq, "md%d : %sactive", mdidx(mddev),
2953 mddev->pers ? "" : "in");
2956 seq_printf(seq, " (read-only)");
2957 seq_printf(seq, " %s", mddev->pers->name);
2961 ITERATE_RDEV(mddev,rdev,tmp2) {
2962 char b[BDEVNAME_SIZE];
2963 seq_printf(seq, " %s[%d]",
2964 bdevname(rdev->bdev,b), rdev->desc_nr);
2966 seq_printf(seq, "(F)");
2972 if (!list_empty(&mddev->disks)) {
2974 seq_printf(seq, "\n %llu blocks",
2975 (unsigned long long)mddev->array_size);
2977 seq_printf(seq, "\n %llu blocks",
2978 (unsigned long long)size);
2982 mddev->pers->status (seq, mddev);
2983 seq_printf(seq, "\n ");
2984 if (mddev->curr_resync > 2)
2985 status_resync (seq, mddev);
2986 else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
2987 seq_printf(seq, " resync=DELAYED");
2990 seq_printf(seq, "\n");
2992 mddev_unlock(mddev);
2997 static struct seq_operations md_seq_ops = {
2998 .start = md_seq_start,
2999 .next = md_seq_next,
3000 .stop = md_seq_stop,
3001 .show = md_seq_show,
3004 static int md_seq_open(struct inode *inode, struct file *file)
3008 error = seq_open(file, &md_seq_ops);
3012 static struct file_operations md_seq_fops = {
3013 .open = md_seq_open,
3015 .llseek = seq_lseek,
3016 .release = seq_release,
3019 int register_md_personality(int pnum, mdk_personality_t *p)
3021 if (pnum >= MAX_PERSONALITY) {
3026 spin_lock(&pers_lock);
3028 spin_unlock(&pers_lock);
3034 printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
3035 spin_unlock(&pers_lock);
3039 int unregister_md_personality(int pnum)
3041 if (pnum >= MAX_PERSONALITY) {
3046 printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
3047 spin_lock(&pers_lock);
3049 spin_unlock(&pers_lock);
3053 void md_sync_acct(mdk_rdev_t *rdev, unsigned long nr_sectors)
3055 rdev->bdev->bd_contains->bd_disk->sync_io += nr_sectors;
3058 static int is_mddev_idle(mddev_t *mddev)
3061 struct list_head *tmp;
3063 unsigned long curr_events;
3066 ITERATE_RDEV(mddev,rdev,tmp) {
3067 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
3068 curr_events = disk_stat_read(disk, read_sectors) +
3069 disk_stat_read(disk, write_sectors) -
3071 if ((curr_events - rdev->last_events) > 32) {
3072 rdev->last_events = curr_events;
3079 void md_done_sync(mddev_t *mddev, int blocks, int ok)
3081 /* another "blocks" (512byte) blocks have been synced */
3082 atomic_sub(blocks, &mddev->recovery_active);
3083 wake_up(&mddev->recovery_wait);
3085 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3086 md_wakeup_thread(mddev->thread);
3087 // stop recovery, signal do_sync ....
3092 void md_write_start(mddev_t *mddev)
3094 if (!atomic_read(&mddev->writes_pending)) {
3095 mddev_lock_uninterruptible(mddev);
3096 if (mddev->in_sync) {
3098 del_timer(&mddev->safemode_timer);
3099 md_update_sb(mddev);
3101 atomic_inc(&mddev->writes_pending);
3102 mddev_unlock(mddev);
3104 atomic_inc(&mddev->writes_pending);
3107 void md_write_end(mddev_t *mddev)
3109 if (atomic_dec_and_test(&mddev->writes_pending)) {
3110 if (mddev->safemode == 2)
3111 md_wakeup_thread(mddev->thread);
3113 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
3117 static inline void md_enter_safemode(mddev_t *mddev)
3119 mddev_lock_uninterruptible(mddev);
3120 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
3121 !mddev->in_sync && mddev->recovery_cp == MaxSector) {
3123 md_update_sb(mddev);
3125 mddev_unlock(mddev);
3127 if (mddev->safemode == 1)
3128 mddev->safemode = 0;
3131 void md_handle_safemode(mddev_t *mddev)
3133 if (signal_pending(current)) {
3134 printk(KERN_INFO "md: md%d in immediate safe mode\n",
3136 mddev->safemode = 2;
3137 flush_signals(current);
3139 if (mddev->safemode)
3140 md_enter_safemode(mddev);
3144 DECLARE_WAIT_QUEUE_HEAD(resync_wait);
3146 #define SYNC_MARKS 10
3147 #define SYNC_MARK_STEP (3*HZ)
3148 static void md_do_sync(mddev_t *mddev)
3151 unsigned int max_sectors, currspeed = 0,
3153 unsigned long mark[SYNC_MARKS];
3154 unsigned long mark_cnt[SYNC_MARKS];
3156 struct list_head *tmp;
3157 unsigned long last_check;
3159 /* just incase thread restarts... */
3160 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
3163 /* we overload curr_resync somewhat here.
3164 * 0 == not engaged in resync at all
3165 * 2 == checking that there is no conflict with another sync
3166 * 1 == like 2, but have yielded to allow conflicting resync to
3168 * other == active in resync - this many blocks
3171 mddev->curr_resync = 2;
3173 ITERATE_MDDEV(mddev2,tmp) {
3174 if (mddev2 == mddev)
3176 if (mddev2->curr_resync &&
3177 match_mddev_units(mddev,mddev2)) {
3178 printk(KERN_INFO "md: delaying resync of md%d"
3179 " until md%d has finished resync (they"
3180 " share one or more physical units)\n",
3181 mdidx(mddev), mdidx(mddev2));
3182 if (mddev < mddev2) {/* arbitrarily yield */
3183 mddev->curr_resync = 1;
3184 wake_up(&resync_wait);
3186 if (wait_event_interruptible(resync_wait,
3187 mddev2->curr_resync < mddev->curr_resync)) {
3188 flush_signals(current);
3193 if (mddev->curr_resync == 1) {
3198 } while (mddev->curr_resync < 2);
3200 max_sectors = mddev->size << 1;
3202 printk(KERN_INFO "md: syncing RAID array md%d\n", mdidx(mddev));
3203 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
3204 " %d KB/sec/disc.\n", sysctl_speed_limit_min);
3205 printk(KERN_INFO "md: using maximum available idle IO bandwith "
3206 "(but not more than %d KB/sec) for reconstruction.\n",
3207 sysctl_speed_limit_max);
3209 is_mddev_idle(mddev); /* this also initializes IO event counters */
3210 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3211 j = mddev->recovery_cp;
3214 for (m = 0; m < SYNC_MARKS; m++) {
3219 mddev->resync_mark = mark[last_mark];
3220 mddev->resync_mark_cnt = mark_cnt[last_mark];
3223 * Tune reconstruction:
3225 window = 32*(PAGE_SIZE/512);
3226 printk(KERN_INFO "md: using %dk window, over a total of %d blocks.\n",
3227 window/2,max_sectors/2);
3229 atomic_set(&mddev->recovery_active, 0);
3230 init_waitqueue_head(&mddev->recovery_wait);
3235 "md: resuming recovery of md%d from checkpoint.\n",
3238 while (j < max_sectors) {
3241 sectors = mddev->pers->sync_request(mddev, j, currspeed < sysctl_speed_limit_min);
3243 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3246 atomic_add(sectors, &mddev->recovery_active);
3248 if (j>1) mddev->curr_resync = j;
3250 if (last_check + window > j)
3255 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
3256 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
3262 if (jiffies >= mark[last_mark] + SYNC_MARK_STEP ) {
3264 int next = (last_mark+1) % SYNC_MARKS;
3266 mddev->resync_mark = mark[next];
3267 mddev->resync_mark_cnt = mark_cnt[next];
3268 mark[next] = jiffies;
3269 mark_cnt[next] = j - atomic_read(&mddev->recovery_active);
3274 if (signal_pending(current)) {
3276 * got a signal, exit.
3279 "md: md_do_sync() got signal ... exiting\n");
3280 flush_signals(current);
3281 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3286 * this loop exits only if either when we are slower than
3287 * the 'hard' speed limit, or the system was IO-idle for
3289 * the system might be non-idle CPU-wise, but we only care
3290 * about not overloading the IO subsystem. (things like an
3291 * e2fsck being done on the RAID array should execute fast)
3295 currspeed = (j-mddev->resync_mark_cnt)/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
3297 if (currspeed > sysctl_speed_limit_min) {
3298 if ((currspeed > sysctl_speed_limit_max) ||
3299 !is_mddev_idle(mddev)) {
3300 current->state = TASK_INTERRUPTIBLE;
3301 schedule_timeout(HZ/4);
3306 printk(KERN_INFO "md: md%d: sync done.\n",mdidx(mddev));
3308 * this also signals 'finished resyncing' to md_stop
3311 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
3313 /* tell personality that we are finished */
3314 mddev->pers->sync_request(mddev, max_sectors, 1);
3316 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
3317 mddev->curr_resync > 2 &&
3318 mddev->curr_resync > mddev->recovery_cp) {
3319 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
3321 "md: checkpointing recovery of md%d.\n",
3323 mddev->recovery_cp = mddev->curr_resync;
3325 mddev->recovery_cp = MaxSector;
3328 if (mddev->safemode)
3329 md_enter_safemode(mddev);
3331 mddev->curr_resync = 0;
3332 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
3333 md_wakeup_thread(mddev->thread);
3338 * This routine is regularly called by all per-raid-array threads to
3339 * deal with generic issues like resync and super-block update.
3340 * Raid personalities that don't have a thread (linear/raid0) do not
3341 * need this as they never do any recovery or update the superblock.
3343 * It does not do any resync itself, but rather "forks" off other threads
3344 * to do that as needed.
3345 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
3346 * "->recovery" and create a thread at ->sync_thread.
3347 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
3348 * and wakeups up this thread which will reap the thread and finish up.
3349 * This thread also removes any faulty devices (with nr_pending == 0).
3351 * The overall approach is:
3352 * 1/ if the superblock needs updating, update it.
3353 * 2/ If a recovery thread is running, don't do anything else.
3354 * 3/ If recovery has finished, clean up, possibly marking spares active.
3355 * 4/ If there are any faulty devices, remove them.
3356 * 5/ If array is degraded, try to add spares devices
3357 * 6/ If array has spares or is not in-sync, start a resync thread.
3359 void md_check_recovery(mddev_t *mddev)
3362 struct list_head *rtmp;
3365 dprintk(KERN_INFO "md: recovery thread got woken up ...\n");
3371 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
3372 test_bit(MD_RECOVERY_DONE, &mddev->recovery)
3375 if (mddev_trylock(mddev)==0) {
3377 if (mddev->sb_dirty)
3378 md_update_sb(mddev);
3379 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
3380 !test_bit(MD_RECOVERY_DONE, &mddev->recovery))
3381 /* resync/recovery still happening */
3383 if (mddev->sync_thread) {
3384 /* resync has finished, collect result */
3385 md_unregister_thread(mddev->sync_thread);
3386 mddev->sync_thread = NULL;
3387 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery)) {
3389 /* activate any spares */
3390 mddev->pers->spare_active(mddev);
3392 md_update_sb(mddev);
3393 mddev->recovery = 0;
3394 wake_up(&resync_wait);
3397 if (mddev->recovery) {
3399 mddev->recovery = 0;
3400 wake_up(&resync_wait);
3403 /* no recovery is running.
3404 * remove any failed drives, then
3405 * add spares if possible
3407 ITERATE_RDEV(mddev,rdev,rtmp) {
3408 if (rdev->raid_disk >= 0 &&
3410 atomic_read(&rdev->nr_pending)==0) {
3411 mddev->pers->hot_remove_disk(mddev, rdev->raid_disk);
3412 rdev->raid_disk = -1;
3414 if (!rdev->faulty && rdev->raid_disk >= 0 && !rdev->in_sync)
3417 if (mddev->degraded) {
3418 ITERATE_RDEV(mddev,rdev,rtmp)
3419 if (rdev->raid_disk < 0
3421 if (mddev->pers->hot_add_disk(mddev,rdev))
3428 if (!spares && (mddev->recovery_cp == MaxSector )) {
3429 /* nothing we can do ... */
3432 if (mddev->pers->sync_request) {
3433 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3435 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3436 mddev->sync_thread = md_register_thread(md_do_sync,
3439 if (!mddev->sync_thread) {
3440 printk(KERN_ERR "md%d: could not start resync"
3443 /* leave the spares where they are, it shouldn't hurt */
3444 mddev->recovery = 0;
3446 md_wakeup_thread(mddev->sync_thread);
3450 mddev_unlock(mddev);
3454 int md_notify_reboot(struct notifier_block *this,
3455 unsigned long code, void *x)
3457 struct list_head *tmp;
3460 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
3462 printk(KERN_INFO "md: stopping all md devices.\n");
3464 ITERATE_MDDEV(mddev,tmp)
3465 if (mddev_trylock(mddev)==0)
3466 do_md_stop (mddev, 1);
3468 * certain more exotic SCSI devices are known to be
3469 * volatile wrt too early system reboots. While the
3470 * right place to handle this issue is the given
3471 * driver, we do want to have a safe RAID driver ...
3478 struct notifier_block md_notifier = {
3479 .notifier_call = md_notify_reboot,
3481 .priority = INT_MAX, /* before any real devices */
3484 static void md_geninit(void)
3486 struct proc_dir_entry *p;
3488 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
3490 #ifdef CONFIG_PROC_FS
3491 p = create_proc_entry("mdstat", S_IRUGO, NULL);
3493 p->proc_fops = &md_seq_fops;
3497 int __init md_init(void)
3501 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
3502 " MD_SB_DISKS=%d\n",
3503 MD_MAJOR_VERSION, MD_MINOR_VERSION,
3504 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
3506 if (register_blkdev(MAJOR_NR, "md"))
3510 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
3511 md_probe, NULL, NULL);
3513 for (minor=0; minor < MAX_MD_DEVS; ++minor) {
3514 devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
3515 S_IFBLK|S_IRUSR|S_IWUSR,
3519 register_reboot_notifier(&md_notifier);
3520 raid_table_header = register_sysctl_table(raid_root_table, 1);
3530 * Searches all registered partitions for autorun RAID arrays
3533 static dev_t detected_devices[128];
3536 void md_autodetect_dev(dev_t dev)
3538 if (dev_cnt >= 0 && dev_cnt < 127)
3539 detected_devices[dev_cnt++] = dev;
3543 static void autostart_arrays(void)
3545 char b[BDEVNAME_SIZE];
3549 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
3551 for (i = 0; i < dev_cnt; i++) {
3552 dev_t dev = detected_devices[i];
3554 rdev = md_import_device(dev,0, 0);
3556 printk(KERN_ALERT "md: could not import %s!\n",
3557 __bdevname(dev, b));
3564 list_add(&rdev->same_set, &pending_raid_disks);
3573 static __exit void md_exit(void)
3576 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
3577 for (i=0; i < MAX_MD_DEVS; i++)
3578 devfs_remove("md/%d", i);
3581 unregister_blkdev(MAJOR_NR,"md");
3582 unregister_reboot_notifier(&md_notifier);
3583 unregister_sysctl_table(raid_table_header);
3584 #ifdef CONFIG_PROC_FS
3585 remove_proc_entry("mdstat", NULL);
3587 for (i = 0; i < MAX_MD_DEVS; i++) {
3588 struct gendisk *disk = disks[i];
3592 mddev = disk->private_data;
3599 module_init(md_init)
3600 module_exit(md_exit)
3602 EXPORT_SYMBOL(register_md_personality);
3603 EXPORT_SYMBOL(unregister_md_personality);
3604 EXPORT_SYMBOL(md_error);
3605 EXPORT_SYMBOL(md_sync_acct);
3606 EXPORT_SYMBOL(md_done_sync);
3607 EXPORT_SYMBOL(md_write_start);
3608 EXPORT_SYMBOL(md_write_end);
3609 EXPORT_SYMBOL(md_handle_safemode);
3610 EXPORT_SYMBOL(md_register_thread);
3611 EXPORT_SYMBOL(md_unregister_thread);
3612 EXPORT_SYMBOL(md_wakeup_thread);
3613 EXPORT_SYMBOL(md_print_devices);
3614 EXPORT_SYMBOL(md_interrupt_thread);
3615 EXPORT_SYMBOL(md_check_recovery);
3616 MODULE_LICENSE("GPL");