160c004bd9359c9d5431d92d1aca6c66ea942cbf
[linux-flexiantxendom0-3.2.10.git] / drivers / md / raid1.c
1 /*
2  * raid1.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5  *
6  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7  *
8  * RAID-1 management functions.
9  *
10  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
11  *
12  * Fixes to reconstruction by Jakob Ã˜stergaard" <jakob@ostenfeld.dk>
13  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
14  *
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License as published by
17  * the Free Software Foundation; either version 2, or (at your option)
18  * any later version.
19  *
20  * You should have received a copy of the GNU General Public License
21  * (for example /usr/src/linux/COPYING); if not, write to the Free
22  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24
25 #include <linux/raid/raid1.h>
26
27 #define MAJOR_NR MD_MAJOR
28 #define MD_DRIVER
29 #define MD_PERSONALITY
30
31 /*
32  * Number of guaranteed r1bios in case of extreme VM load:
33  */
34 #define NR_RAID1_BIOS 256
35
36 static mdk_personality_t raid1_personality;
37 static spinlock_t retry_list_lock = SPIN_LOCK_UNLOCKED;
38 static LIST_HEAD(retry_list_head);
39
40 static void * r1bio_pool_alloc(int gfp_flags, void *data)
41 {
42         mddev_t *mddev = data;
43         r1bio_t *r1_bio;
44
45         /* allocate a r1bio with room for raid_disks entries in the write_bios array */
46         r1_bio = kmalloc(sizeof(r1bio_t) + sizeof(struct bio*)*mddev->raid_disks,
47                          gfp_flags);
48         if (r1_bio)
49                 memset(r1_bio, 0, sizeof(*r1_bio) + sizeof(struct bio*)*mddev->raid_disks);
50
51         return r1_bio;
52 }
53
54 static void r1bio_pool_free(void *r1_bio, void *data)
55 {
56         kfree(r1_bio);
57 }
58
59 //#define RESYNC_BLOCK_SIZE (64*1024)
60 #define RESYNC_BLOCK_SIZE PAGE_SIZE
61 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
62 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
63 #define RESYNC_WINDOW (2048*1024)
64
65 static void * r1buf_pool_alloc(int gfp_flags, void *data)
66 {
67         conf_t *conf = data;
68         struct page *page;
69         r1bio_t *r1_bio;
70         struct bio *bio;
71         int i, j;
72
73         r1_bio = r1bio_pool_alloc(gfp_flags, conf->mddev);
74         if (!r1_bio)
75                 return NULL;
76         bio = bio_alloc(gfp_flags, RESYNC_PAGES);
77         if (!bio)
78                 goto out_free_r1_bio;
79
80         for (i = 0; i < RESYNC_PAGES; i++) {
81                 page = alloc_page(gfp_flags);
82                 if (unlikely(!page))
83                         goto out_free_pages;
84
85                 bio->bi_io_vec[i].bv_page = page;
86                 bio->bi_io_vec[i].bv_len = PAGE_SIZE;
87                 bio->bi_io_vec[i].bv_offset = 0;
88         }
89
90         /*
91          * Allocate a single data page for this iovec.
92          */
93         bio->bi_vcnt = RESYNC_PAGES;
94         bio->bi_idx = 0;
95         bio->bi_size = RESYNC_BLOCK_SIZE;
96         bio->bi_end_io = NULL;
97         atomic_set(&bio->bi_cnt, 1);
98
99         r1_bio->master_bio = bio;
100
101         return r1_bio;
102
103 out_free_pages:
104         for (j = 0; j < i; j++)
105                 __free_page(bio->bi_io_vec[j].bv_page);
106         bio_put(bio);
107 out_free_r1_bio:
108         r1bio_pool_free(r1_bio, conf->mddev);
109         return NULL;
110 }
111
112 static void r1buf_pool_free(void *__r1_bio, void *data)
113 {
114         int i;
115         conf_t *conf = data;
116         r1bio_t *r1bio = __r1_bio;
117         struct bio *bio = r1bio->master_bio;
118
119         if (atomic_read(&bio->bi_cnt) != 1)
120                 BUG();
121         for (i = 0; i < RESYNC_PAGES; i++) {
122                 __free_page(bio->bi_io_vec[i].bv_page);
123                 bio->bi_io_vec[i].bv_page = NULL;
124         }
125         if (atomic_read(&bio->bi_cnt) != 1)
126                 BUG();
127         bio_put(bio);
128         r1bio_pool_free(r1bio, conf->mddev);
129 }
130
131 static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
132 {
133         int i;
134
135         if (r1_bio->read_bio) {
136                 if (atomic_read(&r1_bio->read_bio->bi_cnt) != 1)
137                         BUG();
138                 bio_put(r1_bio->read_bio);
139                 r1_bio->read_bio = NULL;
140         }
141         for (i = 0; i < conf->raid_disks; i++) {
142                 struct bio **bio = r1_bio->write_bios + i;
143                 if (*bio) {
144                         if (atomic_read(&(*bio)->bi_cnt) != 1)
145                                 BUG();
146                         bio_put(*bio);
147                 }
148                 *bio = NULL;
149         }
150 }
151
152 static inline void free_r1bio(r1bio_t *r1_bio)
153 {
154         unsigned long flags;
155
156         conf_t *conf = mddev_to_conf(r1_bio->mddev);
157
158         /*
159          * Wake up any possible resync thread that waits for the device
160          * to go idle.
161          */
162         spin_lock_irqsave(&conf->resync_lock, flags);
163         if (!--conf->nr_pending) {
164                 wake_up(&conf->wait_idle);
165                 wake_up(&conf->wait_resume);
166         }
167         spin_unlock_irqrestore(&conf->resync_lock, flags);
168
169         put_all_bios(conf, r1_bio);
170         mempool_free(r1_bio, conf->r1bio_pool);
171 }
172
173 static inline void put_buf(r1bio_t *r1_bio)
174 {
175         conf_t *conf = mddev_to_conf(r1_bio->mddev);
176         struct bio *bio = r1_bio->master_bio;
177         unsigned long flags;
178
179         /*
180          * undo any possible partial request fixup magic:
181          */
182         if (bio->bi_size != RESYNC_BLOCK_SIZE)
183                 bio->bi_io_vec[bio->bi_vcnt-1].bv_len = PAGE_SIZE;
184         put_all_bios(conf, r1_bio);
185         mempool_free(r1_bio, conf->r1buf_pool);
186
187         spin_lock_irqsave(&conf->resync_lock, flags);
188         if (!conf->barrier)
189                 BUG();
190         --conf->barrier;
191         wake_up(&conf->wait_resume);
192         wake_up(&conf->wait_idle);
193
194         if (!--conf->nr_pending) {
195                 wake_up(&conf->wait_idle);
196                 wake_up(&conf->wait_resume);
197         }
198         spin_unlock_irqrestore(&conf->resync_lock, flags);
199 }
200
201 static int map(mddev_t *mddev, mdk_rdev_t **rdevp)
202 {
203         conf_t *conf = mddev_to_conf(mddev);
204         int i, disks = conf->raid_disks;
205
206         /*
207          * Later we do read balancing on the read side
208          * now we use the first available disk.
209          */
210
211         spin_lock_irq(&conf->device_lock);
212         for (i = 0; i < disks; i++) {
213                 mdk_rdev_t *rdev = conf->mirrors[i].rdev;
214                 if (rdev && rdev->in_sync) {
215                         *rdevp = rdev;
216                         atomic_inc(&rdev->nr_pending);
217                         spin_unlock_irq(&conf->device_lock);
218                         return 0;
219                 }
220         }
221         spin_unlock_irq(&conf->device_lock);
222
223         printk(KERN_ERR "raid1_map(): huh, no more operational devices?\n");
224         return -1;
225 }
226
227 static void reschedule_retry(r1bio_t *r1_bio)
228 {
229         unsigned long flags;
230         mddev_t *mddev = r1_bio->mddev;
231
232         spin_lock_irqsave(&retry_list_lock, flags);
233         list_add(&r1_bio->retry_list, &retry_list_head);
234         spin_unlock_irqrestore(&retry_list_lock, flags);
235
236         md_wakeup_thread(mddev->thread);
237 }
238
239 /*
240  * raid_end_bio_io() is called when we have finished servicing a mirrored
241  * operation and are ready to return a success/failure code to the buffer
242  * cache layer.
243  */
244 static void raid_end_bio_io(r1bio_t *r1_bio)
245 {
246         struct bio *bio = r1_bio->master_bio;
247
248         bio_endio(bio, bio->bi_size,
249                 test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
250         free_r1bio(r1_bio);
251 }
252
253 /*
254  * Update disk head position estimator based on IRQ completion info.
255  */
256 static inline void update_head_pos(int disk, r1bio_t *r1_bio)
257 {
258         conf_t *conf = mddev_to_conf(r1_bio->mddev);
259
260         conf->mirrors[disk].head_position =
261                 r1_bio->sector + (r1_bio->master_bio->bi_size >> 9);
262 }
263
264 static int raid1_end_request(struct bio *bio, unsigned int bytes_done, int error)
265 {
266         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
267         r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
268         int mirror;
269         conf_t *conf = mddev_to_conf(r1_bio->mddev);
270
271         if (bio->bi_size)
272                 return 1;
273         
274         if (r1_bio->cmd == READ || r1_bio->cmd == READA)
275                 mirror = r1_bio->read_disk;
276         else {
277                 for (mirror = 0; mirror < conf->raid_disks; mirror++)
278                         if (r1_bio->write_bios[mirror] == bio)
279                                 break;
280         }
281         /*
282          * this branch is our 'one mirror IO has finished' event handler:
283          */
284         if (!uptodate)
285                 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
286         else
287                 /*
288                  * Set R1BIO_Uptodate in our master bio, so that
289                  * we will return a good error code for to the higher
290                  * levels even if IO on some other mirrored buffer fails.
291                  *
292                  * The 'master' represents the composite IO operation to
293                  * user-side. So if something waits for IO, then it will
294                  * wait for the 'master' bio.
295                  */
296                 set_bit(R1BIO_Uptodate, &r1_bio->state);
297
298         update_head_pos(mirror, r1_bio);
299         if ((r1_bio->cmd == READ) || (r1_bio->cmd == READA)) {
300                 if (!r1_bio->read_bio)
301                         BUG();
302                 /*
303                  * we have only one bio on the read side
304                  */
305                 if (uptodate)
306                         raid_end_bio_io(r1_bio);
307                 else {
308                         /*
309                          * oops, read error:
310                          */
311                         char b[BDEVNAME_SIZE];
312                         printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
313                                 bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
314                         reschedule_retry(r1_bio);
315                 }
316         } else {
317
318                 if (r1_bio->read_bio)
319                         BUG();
320                 /*
321                  * WRITE:
322                  *
323                  * Let's see if all mirrored write operations have finished
324                  * already.
325                  */
326                 if (atomic_dec_and_test(&r1_bio->remaining)) {
327                         md_write_end(r1_bio->mddev);
328                         raid_end_bio_io(r1_bio);
329                 }       
330         }
331         atomic_dec(&conf->mirrors[mirror].rdev->nr_pending);
332         return 0;
333 }
334
335 /*
336  * This routine returns the disk from which the requested read should
337  * be done. There is a per-array 'next expected sequential IO' sector
338  * number - if this matches on the next IO then we use the last disk.
339  * There is also a per-disk 'last know head position' sector that is
340  * maintained from IRQ contexts, both the normal and the resync IO
341  * completion handlers update this position correctly. If there is no
342  * perfect sequential match then we pick the disk whose head is closest.
343  *
344  * If there are 2 mirrors in the same 2 devices, performance degrades
345  * because position is mirror, not device based.
346  *
347  * The rdev for the device selected will have nr_pending incremented.
348  */
349 static int read_balance(conf_t *conf, struct bio *bio, r1bio_t *r1_bio)
350 {
351         const unsigned long this_sector = r1_bio->sector;
352         int new_disk = conf->last_used, disk = new_disk;
353         const int sectors = bio->bi_size >> 9;
354         sector_t new_distance, current_distance;
355
356         spin_lock_irq(&conf->device_lock);
357         /*
358          * Check if it if we can balance. We can balance on the whole
359          * device if no resync is going on, or below the resync window.
360          * We take the first readable disk when above the resync window.
361          */
362         if (!conf->mddev->in_sync && (this_sector + sectors >= conf->next_resync)) {
363                 /* make sure that disk is operational */
364                 new_disk = 0;
365
366                 while (!conf->mirrors[new_disk].rdev ||
367                        !conf->mirrors[new_disk].rdev->in_sync) {
368                         new_disk++;
369                         if (new_disk == conf->raid_disks) {
370                                 new_disk = 0;
371                                 break;
372                         }
373                 }
374                 goto rb_out;
375         }
376
377
378         /* make sure the disk is operational */
379         while (!conf->mirrors[new_disk].rdev ||
380                !conf->mirrors[new_disk].rdev->in_sync) {
381                 if (new_disk <= 0)
382                         new_disk = conf->raid_disks;
383                 new_disk--;
384                 if (new_disk == disk) {
385                         new_disk = conf->last_used;
386                         goto rb_out;
387                 }
388         }
389         disk = new_disk;
390         /* now disk == new_disk == starting point for search */
391
392         /*
393          * Don't change to another disk for sequential reads:
394          */
395         if (conf->next_seq_sect == this_sector)
396                 goto rb_out;
397         if (this_sector == conf->mirrors[new_disk].head_position)
398                 goto rb_out;
399
400         current_distance = abs(this_sector - conf->mirrors[disk].head_position);
401
402         /* Find the disk whose head is closest */
403
404         do {
405                 if (disk <= 0)
406                         disk = conf->raid_disks;
407                 disk--;
408
409                 if (!conf->mirrors[disk].rdev ||
410                     !conf->mirrors[disk].rdev->in_sync)
411                         continue;
412
413                 if (!atomic_read(&conf->mirrors[disk].rdev->nr_pending)) {
414                         new_disk = disk;
415                         break;
416                 }
417                 new_distance = abs(this_sector - conf->mirrors[disk].head_position);
418                 if (new_distance < current_distance) {
419                         current_distance = new_distance;
420                         new_disk = disk;
421                 }
422         } while (disk != conf->last_used);
423
424 rb_out:
425         r1_bio->read_disk = new_disk;
426         conf->next_seq_sect = this_sector + sectors;
427
428         conf->last_used = new_disk;
429
430         if (conf->mirrors[new_disk].rdev)
431                 atomic_inc(&conf->mirrors[new_disk].rdev->nr_pending);
432         spin_unlock_irq(&conf->device_lock);
433
434         return new_disk;
435 }
436
437 /*
438  * Throttle resync depth, so that we can both get proper overlapping of
439  * requests, but are still able to handle normal requests quickly.
440  */
441 #define RESYNC_DEPTH 32
442
443 static void device_barrier(conf_t *conf, sector_t sect)
444 {
445         spin_lock_irq(&conf->resync_lock);
446         wait_event_lock_irq(conf->wait_idle, !waitqueue_active(&conf->wait_resume), conf->resync_lock);
447         
448         if (!conf->barrier++) {
449                 wait_event_lock_irq(conf->wait_idle, !conf->nr_pending, conf->resync_lock);
450                 if (conf->nr_pending)
451                         BUG();
452         }
453         wait_event_lock_irq(conf->wait_resume, conf->barrier < RESYNC_DEPTH, conf->resync_lock);
454         conf->next_resync = sect;
455         spin_unlock_irq(&conf->resync_lock);
456 }
457
458 static int make_request(request_queue_t *q, struct bio * bio)
459 {
460         mddev_t *mddev = q->queuedata;
461         conf_t *conf = mddev_to_conf(mddev);
462         mirror_info_t *mirror;
463         r1bio_t *r1_bio;
464         struct bio *read_bio;
465         int i, disks = conf->raid_disks;
466
467         /*
468          * Register the new request and wait if the reconstruction
469          * thread has put up a bar for new requests.
470          * Continue immediately if no resync is active currently.
471          */
472         spin_lock_irq(&conf->resync_lock);
473         wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock);
474         conf->nr_pending++;
475         spin_unlock_irq(&conf->resync_lock);
476
477         if (bio_data_dir(bio)==WRITE) {
478                 disk_stat_inc(mddev->gendisk, writes);
479                 disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bio));
480         } else {
481                 disk_stat_inc(mddev->gendisk, reads);
482                 disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bio));
483         }
484
485         /*
486          * make_request() can abort the operation when READA is being
487          * used and no empty request is available.
488          *
489          */
490         r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
491
492         r1_bio->master_bio = bio;
493
494         r1_bio->mddev = mddev;
495         r1_bio->sector = bio->bi_sector;
496         r1_bio->cmd = bio_data_dir(bio);
497
498         if (r1_bio->cmd == READ) {
499                 /*
500                  * read balancing logic:
501                  */
502                 mirror = conf->mirrors + read_balance(conf, bio, r1_bio);
503
504                 read_bio = bio_clone(bio, GFP_NOIO);
505                 if (r1_bio->read_bio)
506                         BUG();
507                 r1_bio->read_bio = read_bio;
508
509                 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
510                 read_bio->bi_bdev = mirror->rdev->bdev;
511                 read_bio->bi_end_io = raid1_end_request;
512                 read_bio->bi_rw = r1_bio->cmd;
513                 read_bio->bi_private = r1_bio;
514
515                 generic_make_request(read_bio);
516                 return 0;
517         }
518
519         /*
520          * WRITE:
521          */
522         /* first select target devices under spinlock and
523          * inc refcount on their rdev.  Record them by setting
524          * write_bios[x] to bio
525          */
526         spin_lock_irq(&conf->device_lock);
527         for (i = 0;  i < disks; i++) {
528                 if (conf->mirrors[i].rdev &&
529                     !conf->mirrors[i].rdev->faulty) {
530                         atomic_inc(&conf->mirrors[i].rdev->nr_pending);
531                         r1_bio->write_bios[i] = bio;
532                 } else
533                         r1_bio->write_bios[i] = NULL;
534         }
535         spin_unlock_irq(&conf->device_lock);
536
537         atomic_set(&r1_bio->remaining, 1);
538         md_write_start(mddev);
539         for (i = 0; i < disks; i++) {
540                 struct bio *mbio;
541                 if (!r1_bio->write_bios[i])
542                         continue;
543
544                 mbio = bio_clone(bio, GFP_NOIO);
545                 r1_bio->write_bios[i] = mbio;
546
547                 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
548                 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
549                 mbio->bi_end_io = raid1_end_request;
550                 mbio->bi_rw = r1_bio->cmd;
551                 mbio->bi_private = r1_bio;
552
553                 atomic_inc(&r1_bio->remaining);
554                 generic_make_request(mbio);
555         }
556
557         if (atomic_dec_and_test(&r1_bio->remaining)) {
558                 md_write_end(mddev);
559                 raid_end_bio_io(r1_bio);
560         }
561
562         return 0;
563 }
564
565 static void status(struct seq_file *seq, mddev_t *mddev)
566 {
567         conf_t *conf = mddev_to_conf(mddev);
568         int i;
569
570         seq_printf(seq, " [%d/%d] [", conf->raid_disks,
571                                                 conf->working_disks);
572         for (i = 0; i < conf->raid_disks; i++)
573                 seq_printf(seq, "%s",
574                               conf->mirrors[i].rdev &&
575                               conf->mirrors[i].rdev->in_sync ? "U" : "_");
576         seq_printf(seq, "]");
577 }
578
579
580 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
581 {
582         char b[BDEVNAME_SIZE];
583         conf_t *conf = mddev_to_conf(mddev);
584
585         /*
586          * If it is not operational, then we have already marked it as dead
587          * else if it is the last working disks, ignore the error, let the
588          * next level up know.
589          * else mark the drive as failed
590          */
591         if (rdev->in_sync
592             && conf->working_disks == 1)
593                 /*
594                  * Don't fail the drive, act as though we were just a
595                  * normal single drive
596                  */
597                 return;
598         if (rdev->in_sync) {
599                 mddev->degraded++;
600                 conf->working_disks--;
601                 /*
602                  * if recovery is running, make sure it aborts.
603                  */
604                 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
605         }
606         rdev->in_sync = 0;
607         rdev->faulty = 1;
608         mddev->sb_dirty = 1;
609         printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
610                 "       Operation continuing on %d devices\n",
611                 bdevname(rdev->bdev,b), conf->working_disks);
612 }
613
614 static void print_conf(conf_t *conf)
615 {
616         int i;
617         mirror_info_t *tmp;
618
619         printk("RAID1 conf printout:\n");
620         if (!conf) {
621                 printk("(!conf)\n");
622                 return;
623         }
624         printk(" --- wd:%d rd:%d\n", conf->working_disks,
625                 conf->raid_disks);
626
627         for (i = 0; i < conf->raid_disks; i++) {
628                 char b[BDEVNAME_SIZE];
629                 tmp = conf->mirrors + i;
630                 if (tmp->rdev)
631                         printk(" disk %d, wo:%d, o:%d, dev:%s\n",
632                                 i, !tmp->rdev->in_sync, !tmp->rdev->faulty,
633                                 bdevname(tmp->rdev->bdev,b));
634         }
635 }
636
637 static void close_sync(conf_t *conf)
638 {
639         spin_lock_irq(&conf->resync_lock);
640         wait_event_lock_irq(conf->wait_resume, !conf->barrier, conf->resync_lock);
641         spin_unlock_irq(&conf->resync_lock);
642
643         if (conf->barrier) BUG();
644         if (waitqueue_active(&conf->wait_idle)) BUG();
645
646         mempool_destroy(conf->r1buf_pool);
647         conf->r1buf_pool = NULL;
648 }
649
650 static int raid1_spare_active(mddev_t *mddev)
651 {
652         int i;
653         conf_t *conf = mddev->private;
654         mirror_info_t *tmp;
655
656         spin_lock_irq(&conf->device_lock);
657         /*
658          * Find all failed disks within the RAID1 configuration 
659          * and mark them readable
660          */
661         for (i = 0; i < conf->raid_disks; i++) {
662                 tmp = conf->mirrors + i;
663                 if (tmp->rdev 
664                     && !tmp->rdev->faulty
665                     && !tmp->rdev->in_sync) {
666                         conf->working_disks++;
667                         mddev->degraded--;
668                         tmp->rdev->in_sync = 1;
669                 }
670         }
671         spin_unlock_irq(&conf->device_lock);
672
673         print_conf(conf);
674         return 0;
675 }
676
677
678 static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
679 {
680         conf_t *conf = mddev->private;
681         int found = 0;
682         int mirror;
683         mirror_info_t *p;
684
685         spin_lock_irq(&conf->device_lock);
686         for (mirror=0; mirror < mddev->raid_disks; mirror++)
687                 if ( !(p=conf->mirrors+mirror)->rdev) {
688                         p->rdev = rdev;
689
690                         blk_queue_stack_limits(mddev->queue,
691                                                rdev->bdev->bd_disk->queue);
692                         /* as we don't honour merge_bvec_fn, we must never risk
693                          * violating it, so limit ->max_sector to one PAGE, as
694                          * a one page request is never in violation.
695                          */
696                         if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
697                             mddev->queue->max_sectors > (PAGE_SIZE>>9))
698                                 mddev->queue->max_sectors = (PAGE_SIZE>>9);
699
700                         p->head_position = 0;
701                         rdev->raid_disk = mirror;
702                         found = 1;
703                         break;
704                 }
705         spin_unlock_irq(&conf->device_lock);
706
707         print_conf(conf);
708         return found;
709 }
710
711 static int raid1_remove_disk(mddev_t *mddev, int number)
712 {
713         conf_t *conf = mddev->private;
714         int err = 1;
715         mirror_info_t *p = conf->mirrors+ number;
716
717         print_conf(conf);
718         spin_lock_irq(&conf->device_lock);
719         if (p->rdev) {
720                 if (p->rdev->in_sync ||
721                     atomic_read(&p->rdev->nr_pending)) {
722                         err = -EBUSY;
723                         goto abort;
724                 }
725                 p->rdev = NULL;
726                 err = 0;
727         }
728         if (err)
729                 MD_BUG();
730 abort:
731         spin_unlock_irq(&conf->device_lock);
732
733         print_conf(conf);
734         return err;
735 }
736
737
738 static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
739 {
740         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
741         r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
742         conf_t *conf = mddev_to_conf(r1_bio->mddev);
743
744         if (bio->bi_size)
745                 return 1;
746
747         if (r1_bio->read_bio != bio)
748                 BUG();
749         update_head_pos(r1_bio->read_disk, r1_bio);
750         /*
751          * we have read a block, now it needs to be re-written,
752          * or re-read if the read failed.
753          * We don't do much here, just schedule handling by raid1d
754          */
755         if (!uptodate)
756                 md_error(r1_bio->mddev,
757                          conf->mirrors[r1_bio->read_disk].rdev);
758         else
759                 set_bit(R1BIO_Uptodate, &r1_bio->state);
760         atomic_dec(&conf->mirrors[r1_bio->read_disk].rdev->nr_pending);
761         reschedule_retry(r1_bio);
762         return 0;
763 }
764
765 static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
766 {
767         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
768         r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
769         mddev_t *mddev = r1_bio->mddev;
770         conf_t *conf = mddev_to_conf(mddev);
771         int i;
772         int mirror=0;
773
774         if (bio->bi_size)
775                 return 1;
776
777         for (i = 0; i < conf->raid_disks; i++)
778                 if (r1_bio->write_bios[i] == bio) {
779                         mirror = i;
780                         break;
781                 }
782         if (!uptodate)
783                 md_error(mddev, conf->mirrors[mirror].rdev);
784         update_head_pos(mirror, r1_bio);
785
786         if (atomic_dec_and_test(&r1_bio->remaining)) {
787                 md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, uptodate);
788                 put_buf(r1_bio);
789         }
790         atomic_dec(&conf->mirrors[mirror].rdev->nr_pending);
791         return 0;
792 }
793
794 static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
795 {
796         conf_t *conf = mddev_to_conf(mddev);
797         int i;
798         int disks = conf->raid_disks;
799         struct bio *bio, *mbio;
800
801         bio = r1_bio->master_bio;
802
803         /*
804          * have to allocate lots of bio structures and
805          * schedule writes
806          */
807         if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
808                 /*
809                  * There is no point trying a read-for-reconstruct as
810                  * reconstruct is about to be aborted
811                  */
812                 char b[BDEVNAME_SIZE];
813                 printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
814                         " for block %llu\n",
815                         bdevname(bio->bi_bdev,b), 
816                         (unsigned long long)r1_bio->sector);
817                 md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0);
818                 put_buf(r1_bio);
819                 return;
820         }
821
822         spin_lock_irq(&conf->device_lock);
823         for (i = 0; i < disks ; i++) {
824                 r1_bio->write_bios[i] = NULL;
825                 if (!conf->mirrors[i].rdev || 
826                     conf->mirrors[i].rdev->faulty)
827                         continue;
828                 if (conf->mirrors[i].rdev->bdev == bio->bi_bdev)
829                         /*
830                          * we read from here, no need to write
831                          */
832                         continue;
833                 if (conf->mirrors[i].rdev->in_sync && 
834                         r1_bio->sector + (bio->bi_size>>9) <= mddev->recovery_cp)
835                         /*
836                          * don't need to write this we are just rebuilding
837                          */
838                         continue;
839                 atomic_inc(&conf->mirrors[i].rdev->nr_pending);
840                 r1_bio->write_bios[i] = bio;
841         }
842         spin_unlock_irq(&conf->device_lock);
843
844         atomic_set(&r1_bio->remaining, 1);
845         for (i = disks; i-- ; ) {
846                 if (!r1_bio->write_bios[i])
847                         continue;
848                 mbio = bio_clone(bio, GFP_NOIO);
849                 r1_bio->write_bios[i] = mbio;
850                 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
851                 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
852                 mbio->bi_end_io = end_sync_write;
853                 mbio->bi_rw = WRITE;
854                 mbio->bi_private = r1_bio;
855
856                 atomic_inc(&r1_bio->remaining);
857                 md_sync_acct(conf->mirrors[i].rdev, mbio->bi_size >> 9);
858                 generic_make_request(mbio);
859         }
860
861         if (atomic_dec_and_test(&r1_bio->remaining)) {
862                 md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 1);
863                 put_buf(r1_bio);
864         }
865 }
866
867 /*
868  * This is a kernel thread which:
869  *
870  *      1.      Retries failed read operations on working mirrors.
871  *      2.      Updates the raid superblock when problems encounter.
872  *      3.      Performs writes following reads for array syncronising.
873  */
874
875 static void raid1d(mddev_t *mddev)
876 {
877         struct list_head *head = &retry_list_head;
878         r1bio_t *r1_bio;
879         struct bio *bio;
880         unsigned long flags;
881         conf_t *conf = mddev_to_conf(mddev);
882         mdk_rdev_t *rdev;
883
884         md_check_recovery(mddev);
885         md_handle_safemode(mddev);
886         
887         for (;;) {
888                 char b[BDEVNAME_SIZE];
889                 spin_lock_irqsave(&retry_list_lock, flags);
890                 if (list_empty(head))
891                         break;
892                 r1_bio = list_entry(head->prev, r1bio_t, retry_list);
893                 list_del(head->prev);
894                 spin_unlock_irqrestore(&retry_list_lock, flags);
895
896                 mddev = r1_bio->mddev;
897                 conf = mddev_to_conf(mddev);
898                 bio = r1_bio->master_bio;
899                 switch(r1_bio->cmd) {
900                 case SPECIAL:
901                         sync_request_write(mddev, r1_bio);
902                         break;
903                 case READ:
904                 case READA:
905                         if (map(mddev, &rdev) == -1) {
906                                 printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
907                                 " read error for block %llu\n",
908                                 bdevname(bio->bi_bdev,b),
909                                 (unsigned long long)r1_bio->sector);
910                                 raid_end_bio_io(r1_bio);
911                                 break;
912                         }
913                         printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
914                                 " another mirror\n",
915                                 bdevname(rdev->bdev,b),
916                                 (unsigned long long)r1_bio->sector);
917                         bio->bi_bdev = rdev->bdev;
918                         bio->bi_sector = r1_bio->sector + rdev->data_offset;
919                         bio->bi_rw = r1_bio->cmd;
920
921                         generic_make_request(bio);
922                         break;
923                 }
924         }
925         spin_unlock_irqrestore(&retry_list_lock, flags);
926 }
927
928
929 static int init_resync(conf_t *conf)
930 {
931         int buffs;
932
933         buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
934         if (conf->r1buf_pool)
935                 BUG();
936         conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, conf);
937         if (!conf->r1buf_pool)
938                 return -ENOMEM;
939         conf->next_resync = 0;
940         return 0;
941 }
942
943 /*
944  * perform a "sync" on one "block"
945  *
946  * We need to make sure that no normal I/O request - particularly write
947  * requests - conflict with active sync requests.
948  *
949  * This is achieved by tracking pending requests and a 'barrier' concept
950  * that can be installed to exclude normal IO requests.
951  */
952
953 static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster)
954 {
955         conf_t *conf = mddev_to_conf(mddev);
956         mirror_info_t *mirror;
957         r1bio_t *r1_bio;
958         struct bio *read_bio, *bio;
959         sector_t max_sector, nr_sectors;
960         int disk, partial;
961
962         if (!conf->r1buf_pool)
963                 if (init_resync(conf))
964                         return -ENOMEM;
965
966         max_sector = mddev->size << 1;
967         if (sector_nr >= max_sector) {
968                 close_sync(conf);
969                 return 0;
970         }
971
972         /*
973          * If there is non-resync activity waiting for us then
974          * put in a delay to throttle resync.
975          */
976         if (!go_faster && waitqueue_active(&conf->wait_resume))
977                 schedule_timeout(HZ);
978         device_barrier(conf, sector_nr + RESYNC_SECTORS);
979
980         /*
981          * If reconstructing, and >1 working disc,
982          * could dedicate one to rebuild and others to
983          * service read requests ..
984          */
985         disk = conf->last_used;
986         /* make sure disk is operational */
987         spin_lock_irq(&conf->device_lock);
988         while (conf->mirrors[disk].rdev == NULL ||
989                !conf->mirrors[disk].rdev->in_sync) {
990                 if (disk <= 0)
991                         disk = conf->raid_disks;
992                 disk--;
993                 if (disk == conf->last_used)
994                         break;
995         }
996         conf->last_used = disk;
997         atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
998         spin_unlock_irq(&conf->device_lock);
999
1000         mirror = conf->mirrors + disk;
1001
1002         r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
1003
1004         spin_lock_irq(&conf->resync_lock);
1005         conf->nr_pending++;
1006         spin_unlock_irq(&conf->resync_lock);
1007
1008         r1_bio->mddev = mddev;
1009         r1_bio->sector = sector_nr;
1010         r1_bio->cmd = SPECIAL;
1011         r1_bio->read_disk = disk;
1012
1013         bio = r1_bio->master_bio;
1014         nr_sectors = RESYNC_BLOCK_SIZE >> 9;
1015         if (max_sector - sector_nr < nr_sectors)
1016                 nr_sectors = max_sector - sector_nr;
1017         bio->bi_size = nr_sectors << 9;
1018         bio->bi_vcnt = (bio->bi_size + PAGE_SIZE-1) / PAGE_SIZE;
1019         /*
1020          * Is there a partial page at the end of the request?
1021          */
1022         partial = bio->bi_size % PAGE_SIZE;
1023         if (partial)
1024                 bio->bi_io_vec[bio->bi_vcnt-1].bv_len = partial;
1025
1026
1027         read_bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1028
1029         read_bio->bi_sector = sector_nr + mirror->rdev->data_offset;
1030         read_bio->bi_bdev = mirror->rdev->bdev;
1031         read_bio->bi_end_io = end_sync_read;
1032         read_bio->bi_rw = READ;
1033         read_bio->bi_private = r1_bio;
1034
1035         if (r1_bio->read_bio)
1036                 BUG();
1037         r1_bio->read_bio = read_bio;
1038
1039         md_sync_acct(mirror->rdev, nr_sectors);
1040
1041         generic_make_request(read_bio);
1042
1043         return nr_sectors;
1044 }
1045
1046 static int run(mddev_t *mddev)
1047 {
1048         conf_t *conf;
1049         int i, j, disk_idx;
1050         mirror_info_t *disk;
1051         mdk_rdev_t *rdev;
1052         struct list_head *tmp;
1053
1054         if (mddev->level != 1) {
1055                 printk("raid1: %s: raid level not set to mirroring (%d)\n",
1056                        mdname(mddev), mddev->level);
1057                 goto out;
1058         }
1059         /*
1060          * copy the already verified devices into our private RAID1
1061          * bookkeeping area. [whatever we allocate in run(),
1062          * should be freed in stop()]
1063          */
1064         conf = kmalloc(sizeof(conf_t), GFP_KERNEL);
1065         mddev->private = conf;
1066         if (!conf) {
1067                 printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
1068                         mdname(mddev));
1069                 goto out;
1070         }
1071         memset(conf, 0, sizeof(*conf));
1072         conf->mirrors = kmalloc(sizeof(struct mirror_info)*mddev->raid_disks, 
1073                                  GFP_KERNEL);
1074         if (!conf->mirrors) {
1075                 printk(KERN_ERR "raid1: couldn't allocate memory for %s\n",
1076                        mdname(mddev));
1077                 goto out_free_conf;
1078         }
1079         memset(conf->mirrors, 0, sizeof(struct mirror_info)*mddev->raid_disks);
1080
1081         conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
1082                                                 r1bio_pool_free, mddev);
1083         if (!conf->r1bio_pool) {
1084                 printk(KERN_ERR "raid1: couldn't allocate memory for %s\n", 
1085                         mdname(mddev));
1086                 goto out_free_conf;
1087         }
1088
1089
1090         ITERATE_RDEV(mddev, rdev, tmp) {
1091                 disk_idx = rdev->raid_disk;
1092                 if (disk_idx >= mddev->raid_disks
1093                     || disk_idx < 0)
1094                         continue;
1095                 disk = conf->mirrors + disk_idx;
1096
1097                 disk->rdev = rdev;
1098
1099                 blk_queue_stack_limits(mddev->queue,
1100                                        rdev->bdev->bd_disk->queue);
1101                 /* as we don't honour merge_bvec_fn, we must never risk
1102                  * violating it, so limit ->max_sector to one PAGE, as
1103                  * a one page request is never in violation.
1104                  */
1105                 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1106                     mddev->queue->max_sectors > (PAGE_SIZE>>9))
1107                         mddev->queue->max_sectors = (PAGE_SIZE>>9);
1108
1109                 disk->head_position = 0;
1110                 if (!rdev->faulty && rdev->in_sync)
1111                         conf->working_disks++;
1112         }
1113         conf->raid_disks = mddev->raid_disks;
1114         conf->mddev = mddev;
1115         conf->device_lock = SPIN_LOCK_UNLOCKED;
1116         if (conf->working_disks == 1)
1117                 mddev->recovery_cp = MaxSector;
1118
1119         conf->resync_lock = SPIN_LOCK_UNLOCKED;
1120         init_waitqueue_head(&conf->wait_idle);
1121         init_waitqueue_head(&conf->wait_resume);
1122
1123         if (!conf->working_disks) {
1124                 printk(KERN_ERR "raid1: no operational mirrors for %s\n",
1125                         mdname(mddev));
1126                 goto out_free_conf;
1127         }
1128
1129         mddev->degraded = 0;
1130         for (i = 0; i < conf->raid_disks; i++) {
1131
1132                 disk = conf->mirrors + i;
1133
1134                 if (!disk->rdev) {
1135                         disk->head_position = 0;
1136                         mddev->degraded++;
1137                 }
1138         }
1139
1140         /*
1141          * find the first working one and use it as a starting point
1142          * to read balancing.
1143          */
1144         for (j = 0; j < conf->raid_disks &&
1145                      (!conf->mirrors[j].rdev ||
1146                       !conf->mirrors[j].rdev->in_sync) ; j++)
1147                 /* nothing */;
1148         conf->last_used = j;
1149
1150
1151
1152         {
1153                 mddev->thread = md_register_thread(raid1d, mddev, "%s_raid1");
1154                 if (!mddev->thread) {
1155                         printk(KERN_ERR 
1156                                 "raid1: couldn't allocate thread for %s\n", 
1157                                 mdname(mddev));
1158                         goto out_free_conf;
1159                 }
1160         }
1161         printk(KERN_INFO 
1162                 "raid1: raid set %s active with %d out of %d mirrors\n",
1163                 mdname(mddev), mddev->raid_disks - mddev->degraded, 
1164                 mddev->raid_disks);
1165         /*
1166          * Ok, everything is just fine now
1167          */
1168         mddev->array_size = mddev->size;
1169
1170         return 0;
1171
1172 out_free_conf:
1173         if (conf->r1bio_pool)
1174                 mempool_destroy(conf->r1bio_pool);
1175         if (conf->mirrors)
1176                 kfree(conf->mirrors);
1177         kfree(conf);
1178         mddev->private = NULL;
1179 out:
1180         return -EIO;
1181 }
1182
1183 static int stop(mddev_t *mddev)
1184 {
1185         conf_t *conf = mddev_to_conf(mddev);
1186
1187         md_unregister_thread(mddev->thread);
1188         mddev->thread = NULL;
1189         if (conf->r1bio_pool)
1190                 mempool_destroy(conf->r1bio_pool);
1191         if (conf->mirrors)
1192                 kfree(conf->mirrors);
1193         kfree(conf);
1194         mddev->private = NULL;
1195         return 0;
1196 }
1197
1198 static mdk_personality_t raid1_personality =
1199 {
1200         .name           = "raid1",
1201         .owner          = THIS_MODULE,
1202         .make_request   = make_request,
1203         .run            = run,
1204         .stop           = stop,
1205         .status         = status,
1206         .error_handler  = error,
1207         .hot_add_disk   = raid1_add_disk,
1208         .hot_remove_disk= raid1_remove_disk,
1209         .spare_active   = raid1_spare_active,
1210         .sync_request   = sync_request,
1211 };
1212
1213 static int __init raid_init(void)
1214 {
1215         return register_md_personality(RAID1, &raid1_personality);
1216 }
1217
1218 static void raid_exit(void)
1219 {
1220         unregister_md_personality(RAID1);
1221 }
1222
1223 module_init(raid_init);
1224 module_exit(raid_exit);
1225 MODULE_LICENSE("GPL");
1226 MODULE_ALIAS("md-personality-3"); /* RAID1 */