+- add patches.fixes/linux-post-2.6.3-20040220
[linux-flexiantxendom0-3.2.10.git] / drivers / md / raid6main.c
1 /*
2  * raid6main.c : Multiple Devices driver for Linux
3  *         Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4  *         Copyright (C) 1999, 2000 Ingo Molnar
5  *         Copyright (C) 2002, 2003 H. Peter Anvin
6  *
7  * RAID-6 management functions.  This code is derived from raid5.c.
8  * Last merge from raid5.c bkcvs version 1.79 (kernel 2.6.1).
9  *
10  * Thanks to Penguin Computing for making the RAID-6 development possible
11  * by donating a test server!
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * You should have received a copy of the GNU General Public License
19  * (for example /usr/src/linux/COPYING); if not, write to the Free
20  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/highmem.h>
28 #include <asm/bitops.h>
29 #include <asm/atomic.h>
30 #include "raid6.h"
31
32 /*
33  * Stripe cache
34  */
35
36 #define NR_STRIPES              256
37 #define STRIPE_SIZE             PAGE_SIZE
38 #define STRIPE_SHIFT            (PAGE_SHIFT - 9)
39 #define STRIPE_SECTORS          (STRIPE_SIZE>>9)
40 #define IO_THRESHOLD            1
41 #define HASH_PAGES              1
42 #define HASH_PAGES_ORDER        0
43 #define NR_HASH                 (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
44 #define HASH_MASK               (NR_HASH - 1)
45
46 #define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])
47
48 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
49  * order without overlap.  There may be several bio's per stripe+device, and
50  * a bio could span several devices.
51  * When walking this list for a particular stripe+device, we must never proceed
52  * beyond a bio that extends past this device, as the next bio might no longer
53  * be valid.
54  * This macro is used to determine the 'next' bio in the list, given the sector
55  * of the current stripe+device
56  */
57 #define r5_next_bio(bio, sect) ( ( bio->bi_sector + (bio->bi_size>>9) < sect + STRIPE_SECTORS) ? bio->bi_next : NULL)
58 /*
59  * The following can be used to debug the driver
60  */
61 #define RAID6_DEBUG     0       /* Extremely verbose printk */
62 #define RAID6_PARANOIA  1       /* Check spinlocks */
63 #define RAID6_DUMPSTATE 0       /* Include stripe cache state in /proc/mdstat */
64 #if RAID6_PARANOIA && CONFIG_SMP
65 # define CHECK_DEVLOCK() if (!spin_is_locked(&conf->device_lock)) BUG()
66 #else
67 # define CHECK_DEVLOCK()
68 #endif
69
70 #define PRINTK(x...) ((void)(RAID6_DEBUG && printk(KERN_DEBUG x)))
71 #if RAID6_DEBUG
72 #undef inline
73 #undef __inline__
74 #define inline
75 #define __inline__
76 #endif
77
78 #if !RAID6_USE_EMPTY_ZERO_PAGE
79 /* In .bss so it's zeroed */
80 const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
81 #endif
82
83 static inline int raid6_next_disk(int disk, int raid_disks)
84 {
85         disk++;
86         return (disk < raid_disks) ? disk : 0;
87 }
88
89 static void print_raid6_conf (raid6_conf_t *conf);
90
91 static inline void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
92 {
93         if (atomic_dec_and_test(&sh->count)) {
94                 if (!list_empty(&sh->lru))
95                         BUG();
96                 if (atomic_read(&conf->active_stripes)==0)
97                         BUG();
98                 if (test_bit(STRIPE_HANDLE, &sh->state)) {
99                         if (test_bit(STRIPE_DELAYED, &sh->state))
100                                 list_add_tail(&sh->lru, &conf->delayed_list);
101                         else
102                                 list_add_tail(&sh->lru, &conf->handle_list);
103                         md_wakeup_thread(conf->mddev->thread);
104                 } else {
105                         if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
106                                 atomic_dec(&conf->preread_active_stripes);
107                                 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
108                                         md_wakeup_thread(conf->mddev->thread);
109                         }
110                         list_add_tail(&sh->lru, &conf->inactive_list);
111                         atomic_dec(&conf->active_stripes);
112                         if (!conf->inactive_blocked ||
113                             atomic_read(&conf->active_stripes) < (NR_STRIPES*3/4))
114                                 wake_up(&conf->wait_for_stripe);
115                 }
116         }
117 }
118 static void release_stripe(struct stripe_head *sh)
119 {
120         raid6_conf_t *conf = sh->raid_conf;
121         unsigned long flags;
122
123         spin_lock_irqsave(&conf->device_lock, flags);
124         __release_stripe(conf, sh);
125         spin_unlock_irqrestore(&conf->device_lock, flags);
126 }
127
128 static void remove_hash(struct stripe_head *sh)
129 {
130         PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
131
132         if (sh->hash_pprev) {
133                 if (sh->hash_next)
134                         sh->hash_next->hash_pprev = sh->hash_pprev;
135                 *sh->hash_pprev = sh->hash_next;
136                 sh->hash_pprev = NULL;
137         }
138 }
139
140 static __inline__ void insert_hash(raid6_conf_t *conf, struct stripe_head *sh)
141 {
142         struct stripe_head **shp = &stripe_hash(conf, sh->sector);
143
144         PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
145
146         CHECK_DEVLOCK();
147         if ((sh->hash_next = *shp) != NULL)
148                 (*shp)->hash_pprev = &sh->hash_next;
149         *shp = sh;
150         sh->hash_pprev = shp;
151 }
152
153
154 /* find an idle stripe, make sure it is unhashed, and return it. */
155 static struct stripe_head *get_free_stripe(raid6_conf_t *conf)
156 {
157         struct stripe_head *sh = NULL;
158         struct list_head *first;
159
160         CHECK_DEVLOCK();
161         if (list_empty(&conf->inactive_list))
162                 goto out;
163         first = conf->inactive_list.next;
164         sh = list_entry(first, struct stripe_head, lru);
165         list_del_init(first);
166         remove_hash(sh);
167         atomic_inc(&conf->active_stripes);
168 out:
169         return sh;
170 }
171
172 static void shrink_buffers(struct stripe_head *sh, int num)
173 {
174         struct page *p;
175         int i;
176
177         for (i=0; i<num ; i++) {
178                 p = sh->dev[i].page;
179                 if (!p)
180                         continue;
181                 sh->dev[i].page = NULL;
182                 page_cache_release(p);
183         }
184 }
185
186 static int grow_buffers(struct stripe_head *sh, int num)
187 {
188         int i;
189
190         for (i=0; i<num; i++) {
191                 struct page *page;
192
193                 if (!(page = alloc_page(GFP_KERNEL))) {
194                         return 1;
195                 }
196                 sh->dev[i].page = page;
197         }
198         return 0;
199 }
200
201 static void raid6_build_block (struct stripe_head *sh, int i);
202
203 static inline void init_stripe(struct stripe_head *sh, unsigned long sector, int pd_idx)
204 {
205         raid6_conf_t *conf = sh->raid_conf;
206         int disks = conf->raid_disks, i;
207
208         if (atomic_read(&sh->count) != 0)
209                 BUG();
210         if (test_bit(STRIPE_HANDLE, &sh->state))
211                 BUG();
212
213         CHECK_DEVLOCK();
214         PRINTK("init_stripe called, stripe %llu\n",
215                 (unsigned long long)sh->sector);
216
217         remove_hash(sh);
218
219         sh->sector = sector;
220         sh->pd_idx = pd_idx;
221         sh->state = 0;
222
223         for (i=disks; i--; ) {
224                 struct r5dev *dev = &sh->dev[i];
225
226                 if (dev->toread || dev->towrite || dev->written ||
227                     test_bit(R5_LOCKED, &dev->flags)) {
228                         PRINTK("sector=%llx i=%d %p %p %p %d\n",
229                                (unsigned long long)sh->sector, i, dev->toread,
230                                dev->towrite, dev->written,
231                                test_bit(R5_LOCKED, &dev->flags));
232                         BUG();
233                 }
234                 dev->flags = 0;
235                 raid6_build_block(sh, i);
236         }
237         insert_hash(conf, sh);
238 }
239
240 static struct stripe_head *__find_stripe(raid6_conf_t *conf, unsigned long sector)
241 {
242         struct stripe_head *sh;
243
244         CHECK_DEVLOCK();
245         PRINTK("__find_stripe, sector %lu\n", sector);
246         for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next)
247                 if (sh->sector == sector)
248                         return sh;
249         PRINTK("__stripe %lu not in cache\n", sector);
250         return NULL;
251 }
252
253 static struct stripe_head *get_active_stripe(raid6_conf_t *conf, unsigned long sector,
254                                              int pd_idx, int noblock)
255 {
256         struct stripe_head *sh;
257
258         PRINTK("get_stripe, sector %lu\n", sector);
259
260         spin_lock_irq(&conf->device_lock);
261
262         do {
263                 sh = __find_stripe(conf, sector);
264                 if (!sh) {
265                         if (!conf->inactive_blocked)
266                                 sh = get_free_stripe(conf);
267                         if (noblock && sh == NULL)
268                                 break;
269                         if (!sh) {
270                                 conf->inactive_blocked = 1;
271                                 wait_event_lock_irq(conf->wait_for_stripe,
272                                                     !list_empty(&conf->inactive_list) &&
273                                                     (atomic_read(&conf->active_stripes) < (NR_STRIPES *3/4)
274                                                      || !conf->inactive_blocked),
275                                                     conf->device_lock);
276                                 conf->inactive_blocked = 0;
277                         } else
278                                 init_stripe(sh, sector, pd_idx);
279                 } else {
280                         if (atomic_read(&sh->count)) {
281                                 if (!list_empty(&sh->lru))
282                                         BUG();
283                         } else {
284                                 if (!test_bit(STRIPE_HANDLE, &sh->state))
285                                         atomic_inc(&conf->active_stripes);
286                                 if (list_empty(&sh->lru))
287                                         BUG();
288                                 list_del_init(&sh->lru);
289                         }
290                 }
291         } while (sh == NULL);
292
293         if (sh)
294                 atomic_inc(&sh->count);
295
296         spin_unlock_irq(&conf->device_lock);
297         return sh;
298 }
299
300 static int grow_stripes(raid6_conf_t *conf, int num)
301 {
302         struct stripe_head *sh;
303         kmem_cache_t *sc;
304         int devs = conf->raid_disks;
305
306         sprintf(conf->cache_name, "raid6/%s", mdname(conf->mddev));
307
308         sc = kmem_cache_create(conf->cache_name,
309                                sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
310                                0, 0, NULL, NULL);
311         if (!sc)
312                 return 1;
313         conf->slab_cache = sc;
314         while (num--) {
315                 sh = kmem_cache_alloc(sc, GFP_KERNEL);
316                 if (!sh)
317                         return 1;
318                 memset(sh, 0, sizeof(*sh) + (devs-1)*sizeof(struct r5dev));
319                 sh->raid_conf = conf;
320                 sh->lock = SPIN_LOCK_UNLOCKED;
321
322                 if (grow_buffers(sh, conf->raid_disks)) {
323                         shrink_buffers(sh, conf->raid_disks);
324                         kmem_cache_free(sc, sh);
325                         return 1;
326                 }
327                 /* we just created an active stripe so... */
328                 atomic_set(&sh->count, 1);
329                 atomic_inc(&conf->active_stripes);
330                 INIT_LIST_HEAD(&sh->lru);
331                 release_stripe(sh);
332         }
333         return 0;
334 }
335
336 static void shrink_stripes(raid6_conf_t *conf)
337 {
338         struct stripe_head *sh;
339
340         while (1) {
341                 spin_lock_irq(&conf->device_lock);
342                 sh = get_free_stripe(conf);
343                 spin_unlock_irq(&conf->device_lock);
344                 if (!sh)
345                         break;
346                 if (atomic_read(&sh->count))
347                         BUG();
348                 shrink_buffers(sh, conf->raid_disks);
349                 kmem_cache_free(conf->slab_cache, sh);
350                 atomic_dec(&conf->active_stripes);
351         }
352         kmem_cache_destroy(conf->slab_cache);
353         conf->slab_cache = NULL;
354 }
355
356 static int raid6_end_read_request (struct bio * bi, unsigned int bytes_done,
357                                    int error)
358 {
359         struct stripe_head *sh = bi->bi_private;
360         raid6_conf_t *conf = sh->raid_conf;
361         int disks = conf->raid_disks, i;
362         int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
363
364         if (bi->bi_size)
365                 return 1;
366
367         for (i=0 ; i<disks; i++)
368                 if (bi == &sh->dev[i].req)
369                         break;
370
371         PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
372                 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
373                 uptodate);
374         if (i == disks) {
375                 BUG();
376                 return 0;
377         }
378
379         if (uptodate) {
380 #if 0
381                 struct bio *bio;
382                 unsigned long flags;
383                 spin_lock_irqsave(&conf->device_lock, flags);
384                 /* we can return a buffer if we bypassed the cache or
385                  * if the top buffer is not in highmem.  If there are
386                  * multiple buffers, leave the extra work to
387                  * handle_stripe
388                  */
389                 buffer = sh->bh_read[i];
390                 if (buffer &&
391                     (!PageHighMem(buffer->b_page)
392                      || buffer->b_page == bh->b_page )
393                         ) {
394                         sh->bh_read[i] = buffer->b_reqnext;
395                         buffer->b_reqnext = NULL;
396                 } else
397                         buffer = NULL;
398                 spin_unlock_irqrestore(&conf->device_lock, flags);
399                 if (sh->bh_page[i]==bh->b_page)
400                         set_buffer_uptodate(bh);
401                 if (buffer) {
402                         if (buffer->b_page != bh->b_page)
403                                 memcpy(buffer->b_data, bh->b_data, bh->b_size);
404                         buffer->b_end_io(buffer, 1);
405                 }
406 #else
407                 set_bit(R5_UPTODATE, &sh->dev[i].flags);
408 #endif
409         } else {
410                 md_error(conf->mddev, conf->disks[i].rdev);
411                 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
412         }
413         atomic_dec(&conf->disks[i].rdev->nr_pending);
414 #if 0
415         /* must restore b_page before unlocking buffer... */
416         if (sh->bh_page[i] != bh->b_page) {
417                 bh->b_page = sh->bh_page[i];
418                 bh->b_data = page_address(bh->b_page);
419                 clear_buffer_uptodate(bh);
420         }
421 #endif
422         clear_bit(R5_LOCKED, &sh->dev[i].flags);
423         set_bit(STRIPE_HANDLE, &sh->state);
424         release_stripe(sh);
425         return 0;
426 }
427
428 static int raid6_end_write_request (struct bio *bi, unsigned int bytes_done,
429                                     int error)
430 {
431         struct stripe_head *sh = bi->bi_private;
432         raid6_conf_t *conf = sh->raid_conf;
433         int disks = conf->raid_disks, i;
434         unsigned long flags;
435         int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
436
437         if (bi->bi_size)
438                 return 1;
439
440         for (i=0 ; i<disks; i++)
441                 if (bi == &sh->dev[i].req)
442                         break;
443
444         PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
445                 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
446                 uptodate);
447         if (i == disks) {
448                 BUG();
449                 return 0;
450         }
451
452         spin_lock_irqsave(&conf->device_lock, flags);
453         if (!uptodate)
454                 md_error(conf->mddev, conf->disks[i].rdev);
455
456         atomic_dec(&conf->disks[i].rdev->nr_pending);
457
458         clear_bit(R5_LOCKED, &sh->dev[i].flags);
459         set_bit(STRIPE_HANDLE, &sh->state);
460         __release_stripe(conf, sh);
461         spin_unlock_irqrestore(&conf->device_lock, flags);
462         return 0;
463 }
464
465
466 static sector_t compute_blocknr(struct stripe_head *sh, int i);
467
468 static void raid6_build_block (struct stripe_head *sh, int i)
469 {
470         struct r5dev *dev = &sh->dev[i];
471         int pd_idx = sh->pd_idx;
472         int qd_idx = raid6_next_disk(pd_idx, sh->raid_conf->raid_disks);
473
474         bio_init(&dev->req);
475         dev->req.bi_io_vec = &dev->vec;
476         dev->req.bi_vcnt++;
477         dev->vec.bv_page = dev->page;
478         dev->vec.bv_len = STRIPE_SIZE;
479         dev->vec.bv_offset = 0;
480
481         dev->req.bi_sector = sh->sector;
482         dev->req.bi_private = sh;
483
484         dev->flags = 0;
485         if (i != pd_idx && i != qd_idx)
486                 dev->sector = compute_blocknr(sh, i);
487 }
488
489 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
490 {
491         char b[BDEVNAME_SIZE];
492         raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
493         PRINTK("raid6: error called\n");
494
495         if (!rdev->faulty) {
496                 mddev->sb_dirty = 1;
497                 conf->working_disks--;
498                 if (rdev->in_sync) {
499                         mddev->degraded++;
500                         conf->failed_disks++;
501                         rdev->in_sync = 0;
502                         /*
503                          * if recovery was running, make sure it aborts.
504                          */
505                         set_bit(MD_RECOVERY_ERR, &mddev->recovery);
506                 }
507                 rdev->faulty = 1;
508                 printk (KERN_ALERT
509                         "raid6: Disk failure on %s, disabling device."
510                         " Operation continuing on %d devices\n",
511                         bdevname(rdev->bdev,b), conf->working_disks);
512         }
513 }
514
515 /*
516  * Input: a 'big' sector number,
517  * Output: index of the data and parity disk, and the sector # in them.
518  */
519 static unsigned long raid6_compute_sector(sector_t r_sector, unsigned int raid_disks,
520                         unsigned int data_disks, unsigned int * dd_idx,
521                         unsigned int * pd_idx, raid6_conf_t *conf)
522 {
523         long stripe;
524         unsigned long chunk_number;
525         unsigned int chunk_offset;
526         sector_t new_sector;
527         int sectors_per_chunk = conf->chunk_size >> 9;
528
529         /* First compute the information on this sector */
530
531         /*
532          * Compute the chunk number and the sector offset inside the chunk
533          */
534         chunk_offset = sector_div(r_sector, sectors_per_chunk);
535         chunk_number = r_sector;
536         if ( r_sector != chunk_number ) {
537                 printk(KERN_CRIT "raid6: ERROR: r_sector = %llu, chunk_number = %lu\n",
538                        (unsigned long long)r_sector, (unsigned long)chunk_number);
539                 BUG();
540         }
541
542         /*
543          * Compute the stripe number
544          */
545         stripe = chunk_number / data_disks;
546
547         /*
548          * Compute the data disk and parity disk indexes inside the stripe
549          */
550         *dd_idx = chunk_number % data_disks;
551
552         /*
553          * Select the parity disk based on the user selected algorithm.
554          */
555
556         /**** FIX THIS ****/
557         switch (conf->algorithm) {
558         case ALGORITHM_LEFT_ASYMMETRIC:
559                 *pd_idx = raid_disks - 1 - (stripe % raid_disks);
560                 if (*pd_idx == raid_disks-1)
561                         (*dd_idx)++;    /* Q D D D P */
562                 else if (*dd_idx >= *pd_idx)
563                         (*dd_idx) += 2; /* D D P Q D */
564                 break;
565         case ALGORITHM_RIGHT_ASYMMETRIC:
566                 *pd_idx = stripe % raid_disks;
567                 if (*pd_idx == raid_disks-1)
568                         (*dd_idx)++;    /* Q D D D P */
569                 else if (*dd_idx >= *pd_idx)
570                         (*dd_idx) += 2; /* D D P Q D */
571                 break;
572         case ALGORITHM_LEFT_SYMMETRIC:
573                 *pd_idx = raid_disks - 1 - (stripe % raid_disks);
574                 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
575                 break;
576         case ALGORITHM_RIGHT_SYMMETRIC:
577                 *pd_idx = stripe % raid_disks;
578                 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks;
579                 break;
580         default:
581                 printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
582                         conf->algorithm);
583         }
584
585         PRINTK("raid6: chunk_number = %lu, pd_idx = %u, dd_idx = %u\n",
586                chunk_number, *pd_idx, *dd_idx);
587
588         /*
589          * Finally, compute the new sector number
590          */
591         new_sector = stripe * sectors_per_chunk + chunk_offset;
592         return new_sector;
593 }
594
595
596 static sector_t compute_blocknr(struct stripe_head *sh, int i)
597 {
598         raid6_conf_t *conf = sh->raid_conf;
599         int raid_disks = conf->raid_disks, data_disks = raid_disks - 2;
600         sector_t new_sector = sh->sector, check;
601         int sectors_per_chunk = conf->chunk_size >> 9;
602         long stripe;
603         int chunk_offset;
604         int chunk_number, dummy1, dummy2, dd_idx = i;
605         sector_t r_sector;
606         int i0 = i;
607
608         chunk_offset = sector_div(new_sector, sectors_per_chunk);
609         stripe = new_sector;
610         if ( new_sector != stripe ) {
611                 printk(KERN_CRIT "raid6: ERROR: new_sector = %llu, stripe = %lu\n",
612                        (unsigned long long)new_sector, (unsigned long)stripe);
613                 BUG();
614         }
615
616         switch (conf->algorithm) {
617                 case ALGORITHM_LEFT_ASYMMETRIC:
618                 case ALGORITHM_RIGHT_ASYMMETRIC:
619                         if (sh->pd_idx == raid_disks-1)
620                                 i--;    /* Q D D D P */
621                         else if (i > sh->pd_idx)
622                                 i -= 2; /* D D P Q D */
623                         break;
624                 case ALGORITHM_LEFT_SYMMETRIC:
625                 case ALGORITHM_RIGHT_SYMMETRIC:
626                         if (sh->pd_idx == raid_disks-1)
627                                 i--; /* Q D D D P */
628                         else {
629                                 /* D D P Q D */
630                                 if (i < sh->pd_idx)
631                                         i += raid_disks;
632                                 i -= (sh->pd_idx + 2);
633                         }
634                         break;
635                 default:
636                         printk (KERN_CRIT "raid6: unsupported algorithm %d\n",
637                                 conf->algorithm);
638         }
639
640         PRINTK("raid6: compute_blocknr: pd_idx = %u, i0 = %u, i = %u\n", sh->pd_idx, i0, i);
641
642         chunk_number = stripe * data_disks + i;
643         r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
644
645         check = raid6_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
646         if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
647                 printk(KERN_CRIT "raid6: compute_blocknr: map not correct\n");
648                 return 0;
649         }
650         return r_sector;
651 }
652
653
654
655 /*
656  * Copy data between a page in the stripe cache, and one or more bion
657  * The page could align with the middle of the bio, or there could be
658  * several bion, each with several bio_vecs, which cover part of the page
659  * Multiple bion are linked together on bi_next.  There may be extras
660  * at the end of this list.  We ignore them.
661  */
662 static void copy_data(int frombio, struct bio *bio,
663                      struct page *page,
664                      sector_t sector)
665 {
666         char *pa = page_address(page);
667         struct bio_vec *bvl;
668         int i;
669
670         for (;bio && bio->bi_sector < sector+STRIPE_SECTORS;
671               bio = r5_next_bio(bio, sector) ) {
672                 int page_offset;
673                 if (bio->bi_sector >= sector)
674                         page_offset = (signed)(bio->bi_sector - sector) * 512;
675                 else
676                         page_offset = (signed)(sector - bio->bi_sector) * -512;
677                 bio_for_each_segment(bvl, bio, i) {
678                         int len = bio_iovec_idx(bio,i)->bv_len;
679                         int clen;
680                         int b_offset = 0;
681
682                         if (page_offset < 0) {
683                                 b_offset = -page_offset;
684                                 page_offset += b_offset;
685                                 len -= b_offset;
686                         }
687
688                         if (len > 0 && page_offset + len > STRIPE_SIZE)
689                                 clen = STRIPE_SIZE - page_offset;
690                         else clen = len;
691
692                         if (clen > 0) {
693                                 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
694                                 if (frombio)
695                                         memcpy(pa+page_offset, ba+b_offset, clen);
696                                 else
697                                         memcpy(ba+b_offset, pa+page_offset, clen);
698                                 __bio_kunmap_atomic(ba, KM_USER0);
699                         }
700                         if (clen < len) /* hit end of page */
701                                 break;
702                         page_offset +=  len;
703                 }
704         }
705 }
706
707 #define check_xor()     do {                                            \
708                            if (count == MAX_XOR_BLOCKS) {               \
709                                 xor_block(count, STRIPE_SIZE, ptr);     \
710                                 count = 1;                              \
711                            }                                            \
712                         } while(0)
713
714 /* Compute P and Q syndromes */
715 static void compute_parity(struct stripe_head *sh, int method)
716 {
717         raid6_conf_t *conf = sh->raid_conf;
718         int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = conf->raid_disks, count;
719         struct bio *chosen;
720         /**** FIX THIS: This could be very bad if disks is close to 256 ****/
721         void *ptrs[disks];
722
723         qd_idx = raid6_next_disk(pd_idx, disks);
724         d0_idx = raid6_next_disk(qd_idx, disks);
725
726         PRINTK("compute_parity, stripe %llu, method %d\n",
727                 (unsigned long long)sh->sector, method);
728
729         switch(method) {
730         case READ_MODIFY_WRITE:
731                 BUG();          /* READ_MODIFY_WRITE N/A for RAID-6 */
732         case RECONSTRUCT_WRITE:
733         case UPDATE_PARITY:     /* Is this right? */
734                 for (i= disks; i-- ;)
735                         if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) {
736                                 chosen = sh->dev[i].towrite;
737                                 sh->dev[i].towrite = NULL;
738                                 if (sh->dev[i].written) BUG();
739                                 sh->dev[i].written = chosen;
740                         }
741                 break;
742         case CHECK_PARITY:
743                 BUG();          /* Not implemented yet */
744         }
745
746         for (i = disks; i--;)
747                 if (sh->dev[i].written) {
748                         sector_t sector = sh->dev[i].sector;
749                         struct bio *wbi = sh->dev[i].written;
750                         while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
751                                 copy_data(1, wbi, sh->dev[i].page, sector);
752                                 wbi = r5_next_bio(wbi, sector);
753                         }
754
755                         set_bit(R5_LOCKED, &sh->dev[i].flags);
756                         set_bit(R5_UPTODATE, &sh->dev[i].flags);
757                 }
758
759 //      switch(method) {
760 //      case RECONSTRUCT_WRITE:
761 //      case CHECK_PARITY:
762 //      case UPDATE_PARITY:
763                 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */
764                 /* FIX: Is this ordering of drives even remotely optimal? */
765                 count = 0;
766                 i = d0_idx;
767                 do {
768                         ptrs[count++] = page_address(sh->dev[i].page);
769
770                         i = raid6_next_disk(i, disks);
771                 } while ( i != d0_idx );
772 //              break;
773 //      }
774
775         raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs);
776
777         switch(method) {
778         case RECONSTRUCT_WRITE:
779                 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
780                 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
781                 set_bit(R5_LOCKED,   &sh->dev[pd_idx].flags);
782                 set_bit(R5_LOCKED,   &sh->dev[qd_idx].flags);
783                 break;
784         case UPDATE_PARITY:
785                 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
786                 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags);
787                 break;
788         }
789 }
790
791 /* Compute one missing block */
792 static void compute_block_1(struct stripe_head *sh, int dd_idx)
793 {
794         raid6_conf_t *conf = sh->raid_conf;
795         int i, count, disks = conf->raid_disks;
796         void *ptr[MAX_XOR_BLOCKS], *p;
797         int pd_idx = sh->pd_idx;
798         int qd_idx = raid6_next_disk(pd_idx, disks);
799
800         PRINTK("compute_block_1, stripe %llu, idx %d\n",
801                 (unsigned long long)sh->sector, dd_idx);
802
803         if ( dd_idx == qd_idx ) {
804                 /* We're actually computing the Q drive */
805                 compute_parity(sh, UPDATE_PARITY);
806         } else {
807                 ptr[0] = page_address(sh->dev[dd_idx].page);
808                 memset(ptr[0], 0, STRIPE_SIZE);
809                 count = 1;
810                 for (i = disks ; i--; ) {
811                         if (i == dd_idx || i == qd_idx)
812                                 continue;
813                         p = page_address(sh->dev[i].page);
814                         if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
815                                 ptr[count++] = p;
816                         else
817                                 PRINTK("compute_block() %d, stripe %llu, %d"
818                                        " not present\n", dd_idx,
819                                        (unsigned long long)sh->sector, i);
820
821                         check_xor();
822                 }
823                 if (count != 1)
824                         xor_block(count, STRIPE_SIZE, ptr);
825                 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
826         }
827 }
828
829 /* Compute two missing blocks */
830 static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2)
831 {
832         raid6_conf_t *conf = sh->raid_conf;
833         int i, count, disks = conf->raid_disks;
834         int pd_idx = sh->pd_idx;
835         int qd_idx = raid6_next_disk(pd_idx, disks);
836         int d0_idx = raid6_next_disk(qd_idx, disks);
837         int faila, failb;
838
839         /* faila and failb are disk numbers relative to d0_idx */
840         /* pd_idx become disks-2 and qd_idx become disks-1 */
841         faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx;
842         failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx;
843
844         BUG_ON(faila == failb);
845         if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; }
846
847         PRINTK("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n",
848                (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb);
849
850         if ( failb == disks-1 ) {
851                 /* Q disk is one of the missing disks */
852                 if ( faila == disks-2 ) {
853                         /* Missing P+Q, just recompute */
854                         compute_parity(sh, UPDATE_PARITY);
855                         return;
856                 } else {
857                         /* We're missing D+Q; recompute D from P */
858                         compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1);
859                         compute_parity(sh, UPDATE_PARITY); /* Is this necessary? */
860                         return;
861                 }
862         }
863
864         /* We're missing D+P or D+D; build pointer table */
865         {
866                 /**** FIX THIS: This could be very bad if disks is close to 256 ****/
867                 void *ptrs[disks];
868
869                 count = 0;
870                 i = d0_idx;
871                 do {
872                         ptrs[count++] = page_address(sh->dev[i].page);
873                         i = raid6_next_disk(i, disks);
874                 } while ( i != d0_idx );
875
876                 if ( failb == disks-2 ) {
877                         /* We're missing D+P. */
878                         raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs);
879                 } else {
880                         /* We're missing D+D. */
881                         raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs);
882                 }
883
884                 /* Both the above update both missing blocks */
885                 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags);
886                 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags);
887         }
888 }
889
890
891 /*
892  * Each stripe/dev can have one or more bion attached.
893  * toread/towrite point to the first in a chain.
894  * The bi_next chain must be in order.
895  */
896 static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
897 {
898         struct bio **bip;
899         raid6_conf_t *conf = sh->raid_conf;
900
901         PRINTK("adding bh b#%llu to stripe s#%llu\n",
902                 (unsigned long long)bi->bi_sector,
903                 (unsigned long long)sh->sector);
904
905
906         spin_lock(&sh->lock);
907         spin_lock_irq(&conf->device_lock);
908         if (forwrite)
909                 bip = &sh->dev[dd_idx].towrite;
910         else
911                 bip = &sh->dev[dd_idx].toread;
912         while (*bip && (*bip)->bi_sector < bi->bi_sector) {
913                 BUG_ON((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector);
914                 bip = & (*bip)->bi_next;
915         }
916 /* FIXME do I need to worry about overlapping bion */
917         if (*bip && bi->bi_next && (*bip) != bi->bi_next)
918                 BUG();
919         if (*bip)
920                 bi->bi_next = *bip;
921         *bip = bi;
922         bi->bi_phys_segments ++;
923         spin_unlock_irq(&conf->device_lock);
924         spin_unlock(&sh->lock);
925
926         PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
927                 (unsigned long long)bi->bi_sector,
928                 (unsigned long long)sh->sector, dd_idx);
929
930         if (forwrite) {
931                 /* check if page is coverred */
932                 sector_t sector = sh->dev[dd_idx].sector;
933                 for (bi=sh->dev[dd_idx].towrite;
934                      sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
935                              bi && bi->bi_sector <= sector;
936                      bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
937                         if (bi->bi_sector + (bi->bi_size>>9) >= sector)
938                                 sector = bi->bi_sector + (bi->bi_size>>9);
939                 }
940                 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
941                         set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
942         }
943 }
944
945
946 /*
947  * handle_stripe - do things to a stripe.
948  *
949  * We lock the stripe and then examine the state of various bits
950  * to see what needs to be done.
951  * Possible results:
952  *    return some read request which now have data
953  *    return some write requests which are safely on disc
954  *    schedule a read on some buffers
955  *    schedule a write of some buffers
956  *    return confirmation of parity correctness
957  *
958  * Parity calculations are done inside the stripe lock
959  * buffers are taken off read_list or write_list, and bh_cache buffers
960  * get BH_Lock set before the stripe lock is released.
961  *
962  */
963
964 static void handle_stripe(struct stripe_head *sh)
965 {
966         raid6_conf_t *conf = sh->raid_conf;
967         int disks = conf->raid_disks;
968         struct bio *return_bi= NULL;
969         struct bio *bi;
970         int i;
971         int syncing;
972         int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
973         int non_overwrite = 0;
974         int failed_num[2] = {0, 0};
975         struct r5dev *dev, *pdev, *qdev;
976         int pd_idx = sh->pd_idx;
977         int qd_idx = raid6_next_disk(pd_idx, disks);
978         int p_failed, q_failed;
979
980         PRINTK("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d, qd_idx=%d\n",
981                (unsigned long long)sh->sector, sh->state, atomic_read(&sh->count),
982                pd_idx, qd_idx);
983
984         spin_lock(&sh->lock);
985         clear_bit(STRIPE_HANDLE, &sh->state);
986         clear_bit(STRIPE_DELAYED, &sh->state);
987
988         syncing = test_bit(STRIPE_SYNCING, &sh->state);
989         /* Now to look around and see what can be done */
990
991         for (i=disks; i--; ) {
992                 mdk_rdev_t *rdev;
993                 dev = &sh->dev[i];
994                 clear_bit(R5_Insync, &dev->flags);
995                 clear_bit(R5_Syncio, &dev->flags);
996
997                 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
998                         i, dev->flags, dev->toread, dev->towrite, dev->written);
999                 /* maybe we can reply to a read */
1000                 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
1001                         struct bio *rbi, *rbi2;
1002                         PRINTK("Return read for disc %d\n", i);
1003                         spin_lock_irq(&conf->device_lock);
1004                         rbi = dev->toread;
1005                         dev->toread = NULL;
1006                         spin_unlock_irq(&conf->device_lock);
1007                         while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1008                                 copy_data(0, rbi, dev->page, dev->sector);
1009                                 rbi2 = r5_next_bio(rbi, dev->sector);
1010                                 spin_lock_irq(&conf->device_lock);
1011                                 if (--rbi->bi_phys_segments == 0) {
1012                                         rbi->bi_next = return_bi;
1013                                         return_bi = rbi;
1014                                 }
1015                                 spin_unlock_irq(&conf->device_lock);
1016                                 rbi = rbi2;
1017                         }
1018                 }
1019
1020                 /* now count some things */
1021                 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
1022                 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
1023
1024
1025                 if (dev->toread) to_read++;
1026                 if (dev->towrite) {
1027                         to_write++;
1028                         if (!test_bit(R5_OVERWRITE, &dev->flags))
1029                                 non_overwrite++;
1030                 }
1031                 if (dev->written) written++;
1032                 rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */
1033                 if (!rdev || !rdev->in_sync) {
1034                         if ( failed < 2 )
1035                                 failed_num[failed] = i;
1036                         failed++;
1037                 } else
1038                         set_bit(R5_Insync, &dev->flags);
1039         }
1040         PRINTK("locked=%d uptodate=%d to_read=%d"
1041                " to_write=%d failed=%d failed_num=%d,%d\n",
1042                locked, uptodate, to_read, to_write, failed,
1043                failed_num[0], failed_num[1]);
1044         /* check if the array has lost >2 devices and, if so, some requests might
1045          * need to be failed
1046          */
1047         if (failed > 2 && to_read+to_write+written) {
1048                 spin_lock_irq(&conf->device_lock);
1049                 for (i=disks; i--; ) {
1050                         /* fail all writes first */
1051                         bi = sh->dev[i].towrite;
1052                         sh->dev[i].towrite = NULL;
1053                         if (bi) to_write--;
1054
1055                         while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1056                                 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1057                                 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1058                                 if (--bi->bi_phys_segments == 0) {
1059                                         md_write_end(conf->mddev);
1060                                         bi->bi_next = return_bi;
1061                                         return_bi = bi;
1062                                 }
1063                                 bi = nextbi;
1064                         }
1065                         /* and fail all 'written' */
1066                         bi = sh->dev[i].written;
1067                         sh->dev[i].written = NULL;
1068                         while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1069                                 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1070                                 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1071                                 if (--bi->bi_phys_segments == 0) {
1072                                         md_write_end(conf->mddev);
1073                                         bi->bi_next = return_bi;
1074                                         return_bi = bi;
1075                                 }
1076                                 bi = bi2;
1077                         }
1078
1079                         /* fail any reads if this device is non-operational */
1080                         if (!test_bit(R5_Insync, &sh->dev[i].flags)) {
1081                                 bi = sh->dev[i].toread;
1082                                 sh->dev[i].toread = NULL;
1083                                 if (bi) to_read--;
1084                                 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1085                                         struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1086                                         clear_bit(BIO_UPTODATE, &bi->bi_flags);
1087                                         if (--bi->bi_phys_segments == 0) {
1088                                                 bi->bi_next = return_bi;
1089                                                 return_bi = bi;
1090                                         }
1091                                         bi = nextbi;
1092                                 }
1093                         }
1094                 }
1095                 spin_unlock_irq(&conf->device_lock);
1096         }
1097         if (failed > 2 && syncing) {
1098                 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1099                 clear_bit(STRIPE_SYNCING, &sh->state);
1100                 syncing = 0;
1101         }
1102
1103         /*
1104          * might be able to return some write requests if the parity blocks
1105          * are safe, or on a failed drive
1106          */
1107         pdev = &sh->dev[pd_idx];
1108         p_failed = (failed >= 1 && failed_num[0] == pd_idx)
1109                 || (failed >= 2 && failed_num[1] == pd_idx);
1110         qdev = &sh->dev[qd_idx];
1111         q_failed = (failed >= 1 && failed_num[0] == qd_idx)
1112                 || (failed >= 2 && failed_num[1] == qd_idx);
1113
1114         if ( written &&
1115              ( p_failed || ((test_bit(R5_Insync, &pdev->flags)
1116                              && !test_bit(R5_LOCKED, &pdev->flags)
1117                              && test_bit(R5_UPTODATE, &pdev->flags))) ) &&
1118              ( q_failed || ((test_bit(R5_Insync, &qdev->flags)
1119                              && !test_bit(R5_LOCKED, &qdev->flags)
1120                              && test_bit(R5_UPTODATE, &qdev->flags))) ) ) {
1121                 /* any written block on an uptodate or failed drive can be
1122                  * returned.  Note that if we 'wrote' to a failed drive,
1123                  * it will be UPTODATE, but never LOCKED, so we don't need
1124                  * to test 'failed' directly.
1125                  */
1126                 for (i=disks; i--; )
1127                         if (sh->dev[i].written) {
1128                                 dev = &sh->dev[i];
1129                                 if (!test_bit(R5_LOCKED, &dev->flags) &&
1130                                     test_bit(R5_UPTODATE, &dev->flags) ) {
1131                                         /* We can return any write requests */
1132                                         struct bio *wbi, *wbi2;
1133                                         PRINTK("Return write for stripe %llu disc %d\n",
1134                                                (unsigned long long)sh->sector, i);
1135                                         spin_lock_irq(&conf->device_lock);
1136                                         wbi = dev->written;
1137                                         dev->written = NULL;
1138                                         while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1139                                                 wbi2 = r5_next_bio(wbi, dev->sector);
1140                                                 if (--wbi->bi_phys_segments == 0) {
1141                                                         md_write_end(conf->mddev);
1142                                                         wbi->bi_next = return_bi;
1143                                                         return_bi = wbi;
1144                                                 }
1145                                                 wbi = wbi2;
1146                                         }
1147                                         spin_unlock_irq(&conf->device_lock);
1148                                 }
1149                         }
1150         }
1151
1152         /* Now we might consider reading some blocks, either to check/generate
1153          * parity, or to satisfy requests
1154          * or to load a block that is being partially written.
1155          */
1156         if (to_read || non_overwrite || (syncing && (uptodate+failed < disks))) {
1157                 for (i=disks; i--;) {
1158                         dev = &sh->dev[i];
1159                         if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1160                             (dev->toread ||
1161                              (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1162                              syncing ||
1163                              (failed >= 1 && (sh->dev[failed_num[0]].toread ||
1164                                          (sh->dev[failed_num[0]].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num[0]].flags)))) ||
1165                              (failed >= 2 && (sh->dev[failed_num[1]].toread ||
1166                                          (sh->dev[failed_num[1]].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num[1]].flags))))
1167                                     )
1168                                 ) {
1169                                 /* we would like to get this block, possibly
1170                                  * by computing it, but we might not be able to
1171                                  */
1172                                 if (uptodate == disks-1) {
1173                                         PRINTK("Computing stripe %llu block %d\n",
1174                                                (unsigned long long)sh->sector, i);
1175                                         compute_block_1(sh, i);
1176                                         uptodate++;
1177                                 } else if ( uptodate == disks-2 && failed >= 2 ) {
1178                                         /* Computing 2-failure is *very* expensive; only do it if failed >= 2 */
1179                                         int other;
1180                                         for (other=disks; other--;) {
1181                                                 if ( other == i )
1182                                                         continue;
1183                                                 if ( !test_bit(R5_UPTODATE, &sh->dev[other].flags) )
1184                                                         break;
1185                                         }
1186                                         BUG_ON(other < 0);
1187                                         PRINTK("Computing stripe %llu blocks %d,%d\n",
1188                                                (unsigned long long)sh->sector, i, other);
1189                                         compute_block_2(sh, i, other);
1190                                         uptodate += 2;
1191                                 } else if (test_bit(R5_Insync, &dev->flags)) {
1192                                         set_bit(R5_LOCKED, &dev->flags);
1193                                         set_bit(R5_Wantread, &dev->flags);
1194 #if 0
1195                                         /* if I am just reading this block and we don't have
1196                                            a failed drive, or any pending writes then sidestep the cache */
1197                                         if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1198                                             ! syncing && !failed && !to_write) {
1199                                                 sh->bh_cache[i]->b_page =  sh->bh_read[i]->b_page;
1200                                                 sh->bh_cache[i]->b_data =  sh->bh_read[i]->b_data;
1201                                         }
1202 #endif
1203                                         locked++;
1204                                         PRINTK("Reading block %d (sync=%d)\n",
1205                                                 i, syncing);
1206                                         if (syncing)
1207                                                 md_sync_acct(conf->disks[i].rdev, STRIPE_SECTORS);
1208                                 }
1209                         }
1210                 }
1211                 set_bit(STRIPE_HANDLE, &sh->state);
1212         }
1213
1214         /* now to consider writing and what else, if anything should be read */
1215         if (to_write) {
1216                 int rcw=0, must_compute=0;
1217                 for (i=disks ; i--;) {
1218                         dev = &sh->dev[i];
1219                         /* Would I have to read this buffer for reconstruct_write */
1220                         if (!test_bit(R5_OVERWRITE, &dev->flags)
1221                             && i != pd_idx && i != qd_idx
1222                             && (!test_bit(R5_LOCKED, &dev->flags)
1223 #if 0
1224                                 || sh->bh_page[i] != bh->b_page
1225 #endif
1226                                     ) &&
1227                             !test_bit(R5_UPTODATE, &dev->flags)) {
1228                                 if (test_bit(R5_Insync, &dev->flags)) rcw++;
1229                                 else {
1230                                         PRINTK("raid6: must_compute: disk %d flags=%#lx\n", i, dev->flags);
1231                                         must_compute++;
1232                                 }
1233                         }
1234                 }
1235                 PRINTK("for sector %llu, rcw=%d, must_compute=%d\n",
1236                        (unsigned long long)sh->sector, rcw, must_compute);
1237                 set_bit(STRIPE_HANDLE, &sh->state);
1238
1239                 if (rcw > 0)
1240                         /* want reconstruct write, but need to get some data */
1241                         for (i=disks; i--;) {
1242                                 dev = &sh->dev[i];
1243                                 if (!test_bit(R5_OVERWRITE, &dev->flags)
1244                                     && !(failed == 0 && (i == pd_idx || i == qd_idx))
1245                                     && !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1246                                     test_bit(R5_Insync, &dev->flags)) {
1247                                         if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1248                                         {
1249                                                 PRINTK("Read_old stripe %llu block %d for Reconstruct\n",
1250                                                        (unsigned long long)sh->sector, i);
1251                                                 set_bit(R5_LOCKED, &dev->flags);
1252                                                 set_bit(R5_Wantread, &dev->flags);
1253                                                 locked++;
1254                                         } else {
1255                                                 PRINTK("Request delayed stripe %llu block %d for Reconstruct\n",
1256                                                        (unsigned long long)sh->sector, i);
1257                                                 set_bit(STRIPE_DELAYED, &sh->state);
1258                                                 set_bit(STRIPE_HANDLE, &sh->state);
1259                                         }
1260                                 }
1261                         }
1262                 /* now if nothing is locked, and if we have enough data, we can start a write request */
1263                 if (locked == 0 && rcw == 0) {
1264                         if ( must_compute > 0 ) {
1265                                 /* We have failed blocks and need to compute them */
1266                                 switch ( failed ) {
1267                                 case 0: BUG();
1268                                 case 1: compute_block_1(sh, failed_num[0]); break;
1269                                 case 2: compute_block_2(sh, failed_num[0], failed_num[1]); break;
1270                                 default: BUG(); /* This request should have been failed? */
1271                                 }
1272                         }
1273
1274                         PRINTK("Computing parity for stripe %llu\n", (unsigned long long)sh->sector);
1275                         compute_parity(sh, RECONSTRUCT_WRITE);
1276                         /* now every locked buffer is ready to be written */
1277                         for (i=disks; i--;)
1278                                 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1279                                         PRINTK("Writing stripe %llu block %d\n",
1280                                                (unsigned long long)sh->sector, i);
1281                                         locked++;
1282                                         set_bit(R5_Wantwrite, &sh->dev[i].flags);
1283 #if 0 /**** FIX: I don't understand the logic here... ****/
1284                                         if (!test_bit(R5_Insync, &sh->dev[i].flags)
1285                                             || ((i==pd_idx || i==qd_idx) && failed == 0)) /* FIX? */
1286                                                 set_bit(STRIPE_INSYNC, &sh->state);
1287 #endif
1288                                 }
1289                         if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1290                                 atomic_dec(&conf->preread_active_stripes);
1291                                 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1292                                         md_wakeup_thread(conf->mddev->thread);
1293                         }
1294                 }
1295         }
1296
1297         /* maybe we need to check and possibly fix the parity for this stripe
1298          * Any reads will already have been scheduled, so we just see if enough data
1299          * is available
1300          */
1301         if (syncing && locked == 0 &&
1302             !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 2) {
1303                 set_bit(STRIPE_HANDLE, &sh->state);
1304 #if 0 /* RAID-6: Don't support CHECK PARITY yet */
1305                 if (failed == 0) {
1306                         char *pagea;
1307                         if (uptodate != disks)
1308                                 BUG();
1309                         compute_parity(sh, CHECK_PARITY);
1310                         uptodate--;
1311                         pagea = page_address(sh->dev[pd_idx].page);
1312                         if ((*(u32*)pagea) == 0 &&
1313                             !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1314                                 /* parity is correct (on disc, not in buffer any more) */
1315                                 set_bit(STRIPE_INSYNC, &sh->state);
1316                         }
1317                 }
1318 #endif
1319                 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1320                         int failed_needupdate[2];
1321                         struct r5dev *adev, *bdev;
1322
1323                         if ( failed < 1 )
1324                                 failed_num[0] = pd_idx;
1325                         if ( failed < 2 )
1326                                 failed_num[1] = (failed_num[0] == qd_idx) ? pd_idx : qd_idx;
1327
1328                         failed_needupdate[0] = !test_bit(R5_UPTODATE, &sh->dev[failed_num[0]].flags);
1329                         failed_needupdate[1] = !test_bit(R5_UPTODATE, &sh->dev[failed_num[1]].flags);
1330
1331                         PRINTK("sync: failed=%d num=%d,%d fnu=%u%u\n",
1332                                failed, failed_num[0], failed_num[1], failed_needupdate[0], failed_needupdate[1]);
1333
1334 #if 0  /* RAID-6: This code seems to require that CHECK_PARITY destroys the uptodateness of the parity */
1335                         /* should be able to compute the missing block(s) and write to spare */
1336                         if ( failed_needupdate[0] ^ failed_needupdate[1] ) {
1337                                 if (uptodate+1 != disks)
1338                                         BUG();
1339                                 compute_block_1(sh, failed_needupdate[0] ? failed_num[0] : failed_num[1]);
1340                                 uptodate++;
1341                         } else if ( failed_needupdate[0] & failed_needupdate[1] ) {
1342                                 if (uptodate+2 != disks)
1343                                         BUG();
1344                                 compute_block_2(sh, failed_num[0], failed_num[1]);
1345                                 uptodate += 2;
1346                         }
1347 #else
1348                         compute_block_2(sh, failed_num[0], failed_num[1]);
1349                         uptodate += failed_needupdate[0] + failed_needupdate[1];
1350 #endif
1351
1352                         if (uptodate != disks)
1353                                 BUG();
1354
1355                         PRINTK("Marking for sync stripe %llu blocks %d,%d\n",
1356                                (unsigned long long)sh->sector, failed_num[0], failed_num[1]);
1357
1358                         /**** FIX: Should we really do both of these unconditionally? ****/
1359                         adev = &sh->dev[failed_num[0]];
1360                         locked += !test_bit(R5_LOCKED, &adev->flags);
1361                         set_bit(R5_LOCKED, &adev->flags);
1362                         set_bit(R5_Wantwrite, &adev->flags);
1363                         bdev = &sh->dev[failed_num[1]];
1364                         locked += !test_bit(R5_LOCKED, &bdev->flags);
1365                         set_bit(R5_LOCKED, &bdev->flags);
1366                         set_bit(R5_Wantwrite, &bdev->flags);
1367
1368                         set_bit(STRIPE_INSYNC, &sh->state);
1369                         set_bit(R5_Syncio, &adev->flags);
1370                         set_bit(R5_Syncio, &bdev->flags);
1371                 }
1372         }
1373         if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1374                 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1375                 clear_bit(STRIPE_SYNCING, &sh->state);
1376         }
1377
1378         spin_unlock(&sh->lock);
1379
1380         while ((bi=return_bi)) {
1381                 int bytes = bi->bi_size;
1382
1383                 return_bi = bi->bi_next;
1384                 bi->bi_next = NULL;
1385                 bi->bi_size = 0;
1386                 bi->bi_end_io(bi, bytes, 0);
1387         }
1388         for (i=disks; i-- ;) {
1389                 int rw;
1390                 struct bio *bi;
1391                 mdk_rdev_t *rdev;
1392                 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1393                         rw = 1;
1394                 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1395                         rw = 0;
1396                 else
1397                         continue;
1398
1399                 bi = &sh->dev[i].req;
1400
1401                 bi->bi_rw = rw;
1402                 if (rw)
1403                         bi->bi_end_io = raid6_end_write_request;
1404                 else
1405                         bi->bi_end_io = raid6_end_read_request;
1406
1407                 spin_lock_irq(&conf->device_lock);
1408                 rdev = conf->disks[i].rdev;
1409                 if (rdev && rdev->faulty)
1410                         rdev = NULL;
1411                 if (rdev)
1412                         atomic_inc(&rdev->nr_pending);
1413                 spin_unlock_irq(&conf->device_lock);
1414
1415                 if (rdev) {
1416                         if (test_bit(R5_Syncio, &sh->dev[i].flags))
1417                                 md_sync_acct(rdev, STRIPE_SECTORS);
1418
1419                         bi->bi_bdev = rdev->bdev;
1420                         PRINTK("for %llu schedule op %ld on disc %d\n",
1421                                 (unsigned long long)sh->sector, bi->bi_rw, i);
1422                         atomic_inc(&sh->count);
1423                         bi->bi_sector = sh->sector + rdev->data_offset;
1424                         bi->bi_flags = 1 << BIO_UPTODATE;
1425                         bi->bi_vcnt = 1;
1426                         bi->bi_idx = 0;
1427                         bi->bi_io_vec = &sh->dev[i].vec;
1428                         bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1429                         bi->bi_io_vec[0].bv_offset = 0;
1430                         bi->bi_size = STRIPE_SIZE;
1431                         bi->bi_next = NULL;
1432                         generic_make_request(bi);
1433                 } else {
1434                         PRINTK("skip op %ld on disc %d for sector %llu\n",
1435                                 bi->bi_rw, i, (unsigned long long)sh->sector);
1436                         clear_bit(R5_LOCKED, &sh->dev[i].flags);
1437                         set_bit(STRIPE_HANDLE, &sh->state);
1438                 }
1439         }
1440 }
1441
1442 static inline void raid6_activate_delayed(raid6_conf_t *conf)
1443 {
1444         if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
1445                 while (!list_empty(&conf->delayed_list)) {
1446                         struct list_head *l = conf->delayed_list.next;
1447                         struct stripe_head *sh;
1448                         sh = list_entry(l, struct stripe_head, lru);
1449                         list_del_init(l);
1450                         clear_bit(STRIPE_DELAYED, &sh->state);
1451                         if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1452                                 atomic_inc(&conf->preread_active_stripes);
1453                         list_add_tail(&sh->lru, &conf->handle_list);
1454                 }
1455         }
1456 }
1457 static void raid6_unplug_device(void *data)
1458 {
1459         request_queue_t *q = data;
1460         mddev_t *mddev = q->queuedata;
1461         raid6_conf_t *conf = mddev_to_conf(mddev);
1462         unsigned long flags;
1463
1464         spin_lock_irqsave(&conf->device_lock, flags);
1465
1466         if (blk_remove_plug(q))
1467                 raid6_activate_delayed(conf);
1468         md_wakeup_thread(mddev->thread);
1469
1470         spin_unlock_irqrestore(&conf->device_lock, flags);
1471 }
1472
1473 static inline void raid6_plug_device(raid6_conf_t *conf)
1474 {
1475         spin_lock_irq(&conf->device_lock);
1476         blk_plug_device(conf->mddev->queue);
1477         spin_unlock_irq(&conf->device_lock);
1478 }
1479
1480 static int make_request (request_queue_t *q, struct bio * bi)
1481 {
1482         mddev_t *mddev = q->queuedata;
1483         raid6_conf_t *conf = mddev_to_conf(mddev);
1484         const unsigned int raid_disks = conf->raid_disks;
1485         const unsigned int data_disks = raid_disks - 2;
1486         unsigned int dd_idx, pd_idx;
1487         sector_t new_sector;
1488         sector_t logical_sector, last_sector;
1489         struct stripe_head *sh;
1490
1491         if (bio_data_dir(bi)==WRITE) {
1492                 disk_stat_inc(mddev->gendisk, writes);
1493                 disk_stat_add(mddev->gendisk, write_sectors, bio_sectors(bi));
1494         } else {
1495                 disk_stat_inc(mddev->gendisk, reads);
1496                 disk_stat_add(mddev->gendisk, read_sectors, bio_sectors(bi));
1497         }
1498
1499         logical_sector = bi->bi_sector & ~(STRIPE_SECTORS-1);
1500         last_sector = bi->bi_sector + (bi->bi_size>>9);
1501
1502         bi->bi_next = NULL;
1503         bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
1504         if ( bio_data_dir(bi) == WRITE )
1505                 md_write_start(mddev);
1506         for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1507
1508                 new_sector = raid6_compute_sector(logical_sector,
1509                                                   raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1510
1511                 PRINTK("raid6: make_request, sector %Lu logical %Lu\n",
1512                        (unsigned long long)new_sector,
1513                        (unsigned long long)logical_sector);
1514
1515                 sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
1516                 if (sh) {
1517
1518                         add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK));
1519
1520                         raid6_plug_device(conf);
1521                         handle_stripe(sh);
1522                         release_stripe(sh);
1523                 } else {
1524                         /* cannot get stripe for read-ahead, just give-up */
1525                         clear_bit(BIO_UPTODATE, &bi->bi_flags);
1526                         break;
1527                 }
1528
1529         }
1530         spin_lock_irq(&conf->device_lock);
1531         if (--bi->bi_phys_segments == 0) {
1532                 int bytes = bi->bi_size;
1533
1534                 if ( bio_data_dir(bi) == WRITE )
1535                         md_write_end(mddev);
1536                 bi->bi_size = 0;
1537                 bi->bi_end_io(bi, bytes, 0);
1538         }
1539         spin_unlock_irq(&conf->device_lock);
1540         return 0;
1541 }
1542
1543 /* FIXME go_faster isn't used */
1544 static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster)
1545 {
1546         raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
1547         struct stripe_head *sh;
1548         int sectors_per_chunk = conf->chunk_size >> 9;
1549         sector_t x;
1550         unsigned long stripe;
1551         int chunk_offset;
1552         int dd_idx, pd_idx;
1553         unsigned long first_sector;
1554         int raid_disks = conf->raid_disks;
1555         int data_disks = raid_disks - 2;
1556
1557         if (sector_nr >= mddev->size <<1)
1558                 /* just being told to finish up .. nothing to do */
1559                 return 0;
1560
1561         x = sector_nr;
1562         chunk_offset = sector_div(x, sectors_per_chunk);
1563         stripe = x;
1564         BUG_ON(x != stripe);
1565
1566         first_sector = raid6_compute_sector(stripe*data_disks*sectors_per_chunk
1567                 + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1568         sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
1569         if (sh == NULL) {
1570                 sh = get_active_stripe(conf, sector_nr, pd_idx, 0);
1571                 /* make sure we don't swamp the stripe cache if someone else
1572                  * is trying to get access
1573                  */
1574                 yield();
1575         }
1576         spin_lock(&sh->lock);
1577         set_bit(STRIPE_SYNCING, &sh->state);
1578         clear_bit(STRIPE_INSYNC, &sh->state);
1579         spin_unlock(&sh->lock);
1580
1581         handle_stripe(sh);
1582         release_stripe(sh);
1583
1584         return STRIPE_SECTORS;
1585 }
1586
1587 /*
1588  * This is our raid6 kernel thread.
1589  *
1590  * We scan the hash table for stripes which can be handled now.
1591  * During the scan, completed stripes are saved for us by the interrupt
1592  * handler, so that they will not have to wait for our next wakeup.
1593  */
1594 static void raid6d (mddev_t *mddev)
1595 {
1596         struct stripe_head *sh;
1597         raid6_conf_t *conf = mddev_to_conf(mddev);
1598         int handled;
1599
1600         PRINTK("+++ raid6d active\n");
1601
1602         md_check_recovery(mddev);
1603         md_handle_safemode(mddev);
1604
1605         handled = 0;
1606         spin_lock_irq(&conf->device_lock);
1607         while (1) {
1608                 struct list_head *first;
1609
1610                 if (list_empty(&conf->handle_list) &&
1611                     atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
1612                     !blk_queue_plugged(mddev->queue) &&
1613                     !list_empty(&conf->delayed_list))
1614                         raid6_activate_delayed(conf);
1615
1616                 if (list_empty(&conf->handle_list))
1617                         break;
1618
1619                 first = conf->handle_list.next;
1620                 sh = list_entry(first, struct stripe_head, lru);
1621
1622                 list_del_init(first);
1623                 atomic_inc(&sh->count);
1624                 if (atomic_read(&sh->count)!= 1)
1625                         BUG();
1626                 spin_unlock_irq(&conf->device_lock);
1627
1628                 handled++;
1629                 handle_stripe(sh);
1630                 release_stripe(sh);
1631
1632                 spin_lock_irq(&conf->device_lock);
1633         }
1634         PRINTK("%d stripes handled\n", handled);
1635
1636         spin_unlock_irq(&conf->device_lock);
1637
1638         PRINTK("--- raid6d inactive\n");
1639 }
1640
1641 static int run (mddev_t *mddev)
1642 {
1643         raid6_conf_t *conf;
1644         int raid_disk, memory;
1645         mdk_rdev_t *rdev;
1646         struct disk_info *disk;
1647         struct list_head *tmp;
1648
1649         if (mddev->level != 6) {
1650                 PRINTK("raid6: %s: raid level not set to 6 (%d)\n", mdname(mddev), mddev->level);
1651                 return -EIO;
1652         }
1653
1654         mddev->private = kmalloc (sizeof (raid6_conf_t)
1655                                   + mddev->raid_disks * sizeof(struct disk_info),
1656                                   GFP_KERNEL);
1657         if ((conf = mddev->private) == NULL)
1658                 goto abort;
1659         memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) );
1660         conf->mddev = mddev;
1661
1662         if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL)
1663                 goto abort;
1664         memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
1665
1666         conf->device_lock = SPIN_LOCK_UNLOCKED;
1667         init_waitqueue_head(&conf->wait_for_stripe);
1668         INIT_LIST_HEAD(&conf->handle_list);
1669         INIT_LIST_HEAD(&conf->delayed_list);
1670         INIT_LIST_HEAD(&conf->inactive_list);
1671         atomic_set(&conf->active_stripes, 0);
1672         atomic_set(&conf->preread_active_stripes, 0);
1673
1674         mddev->queue->unplug_fn = raid6_unplug_device;
1675
1676         PRINTK("raid6: run(%s) called.\n", mdname(mddev));
1677
1678         ITERATE_RDEV(mddev,rdev,tmp) {
1679                 raid_disk = rdev->raid_disk;
1680                 if (raid_disk >= mddev->raid_disks
1681                     || raid_disk < 0)
1682                         continue;
1683                 disk = conf->disks + raid_disk;
1684
1685                 disk->rdev = rdev;
1686
1687                 if (rdev->in_sync) {
1688                         char b[BDEVNAME_SIZE];
1689                         printk(KERN_INFO "raid6: device %s operational as raid"
1690                                " disk %d\n", bdevname(rdev->bdev,b),
1691                                raid_disk);
1692                         conf->working_disks++;
1693                 }
1694         }
1695
1696         conf->raid_disks = mddev->raid_disks;
1697
1698         /*
1699          * 0 for a fully functional array, 1 or 2 for a degraded array.
1700          */
1701         mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
1702         conf->mddev = mddev;
1703         conf->chunk_size = mddev->chunk_size;
1704         conf->level = mddev->level;
1705         conf->algorithm = mddev->layout;
1706         conf->max_nr_stripes = NR_STRIPES;
1707
1708         if (conf->raid_disks < 4) {
1709                 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
1710                        mdname(mddev), conf->raid_disks);
1711                 goto abort;
1712         }
1713         if (!conf->chunk_size || conf->chunk_size % 4) {
1714                 printk(KERN_ERR "raid6: invalid chunk size %d for %s\n",
1715                        conf->chunk_size, mdname(mddev));
1716                 goto abort;
1717         }
1718         if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
1719                 printk(KERN_ERR
1720                        "raid6: unsupported parity algorithm %d for %s\n",
1721                        conf->algorithm, mdname(mddev));
1722                 goto abort;
1723         }
1724         if (mddev->degraded > 2) {
1725                 printk(KERN_ERR "raid6: not enough operational devices for %s"
1726                        " (%d/%d failed)\n",
1727                        mdname(mddev), conf->failed_disks, conf->raid_disks);
1728                 goto abort;
1729         }
1730
1731 #if 0                           /* FIX: For now */
1732         if (mddev->degraded > 0 &&
1733             mddev->recovery_cp != MaxSector) {
1734                 printk(KERN_ERR "raid6: cannot start dirty degraded array for %s\n", mdname(mddev));
1735                 goto abort;
1736         }
1737 #endif
1738
1739         {
1740                 mddev->thread = md_register_thread(raid6d, mddev, "%s_raid6");
1741                 if (!mddev->thread) {
1742                         printk(KERN_ERR
1743                                "raid6: couldn't allocate thread for %s\n",
1744                                mdname(mddev));
1745                         goto abort;
1746                 }
1747         }
1748
1749         memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
1750                  conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
1751         if (grow_stripes(conf, conf->max_nr_stripes)) {
1752                 printk(KERN_ERR
1753                        "raid6: couldn't allocate %dkB for buffers\n", memory);
1754                 shrink_stripes(conf);
1755                 md_unregister_thread(mddev->thread);
1756                 goto abort;
1757         } else
1758                 printk(KERN_INFO "raid6: allocated %dkB for %s\n",
1759                        memory, mdname(mddev));
1760
1761         if (mddev->degraded == 0)
1762                 printk(KERN_INFO "raid6: raid level %d set %s active with %d out of %d"
1763                        " devices, algorithm %d\n", conf->level, mdname(mddev),
1764                        mddev->raid_disks-mddev->degraded, mddev->raid_disks,
1765                        conf->algorithm);
1766         else
1767                 printk(KERN_ALERT "raid6: raid level %d set %s active with %d"
1768                        " out of %d devices, algorithm %d\n", conf->level,
1769                        mdname(mddev), mddev->raid_disks - mddev->degraded,
1770                        mddev->raid_disks, conf->algorithm);
1771
1772         print_raid6_conf(conf);
1773
1774         /* read-ahead size must cover a whole stripe, which is
1775          * (n-2) * chunksize where 'n' is the number of raid devices
1776          */
1777         {
1778                 int stripe = (mddev->raid_disks-2) * mddev->chunk_size
1779                         / PAGE_CACHE_SIZE;
1780                 if (mddev->queue->backing_dev_info.ra_pages < stripe)
1781                         mddev->queue->backing_dev_info.ra_pages = stripe;
1782         }
1783
1784         /* Ok, everything is just fine now */
1785         mddev->array_size =  mddev->size * (mddev->raid_disks - 2);
1786         return 0;
1787 abort:
1788         if (conf) {
1789                 print_raid6_conf(conf);
1790                 if (conf->stripe_hashtbl)
1791                         free_pages((unsigned long) conf->stripe_hashtbl,
1792                                                         HASH_PAGES_ORDER);
1793                 kfree(conf);
1794         }
1795         mddev->private = NULL;
1796         printk(KERN_ALERT "raid6: failed to run raid set %s\n", mdname(mddev));
1797         return -EIO;
1798 }
1799
1800
1801
1802 static int stop (mddev_t *mddev)
1803 {
1804         raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
1805
1806         md_unregister_thread(mddev->thread);
1807         mddev->thread = NULL;
1808         shrink_stripes(conf);
1809         free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER);
1810         kfree(conf);
1811         mddev->private = NULL;
1812         return 0;
1813 }
1814
1815 #if RAID6_DUMPSTATE
1816 static void print_sh (struct seq_file *seq, struct stripe_head *sh)
1817 {
1818         int i;
1819
1820         seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
1821                    (unsigned long long)sh->sector, sh->pd_idx, sh->state);
1822         seq_printf(seq, "sh %llu,  count %d.\n",
1823                    (unsigned long long)sh->sector, atomic_read(&sh->count));
1824         seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
1825         for (i = 0; i < sh->raid_conf->raid_disks; i++) {
1826                 seq_printf(seq, "(cache%d: %p %ld) ",
1827                            i, sh->dev[i].page, sh->dev[i].flags);
1828         }
1829         seq_printf(seq, "\n");
1830 }
1831
1832 static void printall (struct seq_file *seq, raid6_conf_t *conf)
1833 {
1834         struct stripe_head *sh;
1835         int i;
1836
1837         spin_lock_irq(&conf->device_lock);
1838         for (i = 0; i < NR_HASH; i++) {
1839                 sh = conf->stripe_hashtbl[i];
1840                 for (; sh; sh = sh->hash_next) {
1841                         if (sh->raid_conf != conf)
1842                                 continue;
1843                         print_sh(seq, sh);
1844                 }
1845         }
1846         spin_unlock_irq(&conf->device_lock);
1847 }
1848 #endif
1849
1850 static void status (struct seq_file *seq, mddev_t *mddev)
1851 {
1852         raid6_conf_t *conf = (raid6_conf_t *) mddev->private;
1853         int i;
1854
1855         seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
1856         seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
1857         for (i = 0; i < conf->raid_disks; i++)
1858                 seq_printf (seq, "%s",
1859                             conf->disks[i].rdev &&
1860                             conf->disks[i].rdev->in_sync ? "U" : "_");
1861         seq_printf (seq, "]");
1862 #if RAID6_DUMPSTATE
1863         seq_printf (seq, "\n");
1864         printall(seq, conf);
1865 #endif
1866 }
1867
1868 static void print_raid6_conf (raid6_conf_t *conf)
1869 {
1870         int i;
1871         struct disk_info *tmp;
1872
1873         printk("RAID6 conf printout:\n");
1874         if (!conf) {
1875                 printk("(conf==NULL)\n");
1876                 return;
1877         }
1878         printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
1879                  conf->working_disks, conf->failed_disks);
1880
1881         for (i = 0; i < conf->raid_disks; i++) {
1882                 char b[BDEVNAME_SIZE];
1883                 tmp = conf->disks + i;
1884                 if (tmp->rdev)
1885                 printk(" disk %d, o:%d, dev:%s\n",
1886                         i, !tmp->rdev->faulty,
1887                         bdevname(tmp->rdev->bdev,b));
1888         }
1889 }
1890
1891 static int raid6_spare_active(mddev_t *mddev)
1892 {
1893         int i;
1894         raid6_conf_t *conf = mddev->private;
1895         struct disk_info *tmp;
1896
1897         spin_lock_irq(&conf->device_lock);
1898         for (i = 0; i < conf->raid_disks; i++) {
1899                 tmp = conf->disks + i;
1900                 if (tmp->rdev
1901                     && !tmp->rdev->faulty
1902                     && !tmp->rdev->in_sync) {
1903                         mddev->degraded--;
1904                         conf->failed_disks--;
1905                         conf->working_disks++;
1906                         tmp->rdev->in_sync = 1;
1907                 }
1908         }
1909         spin_unlock_irq(&conf->device_lock);
1910         print_raid6_conf(conf);
1911         return 0;
1912 }
1913
1914 static int raid6_remove_disk(mddev_t *mddev, int number)
1915 {
1916         raid6_conf_t *conf = mddev->private;
1917         int err = 1;
1918         struct disk_info *p = conf->disks + number;
1919
1920         print_raid6_conf(conf);
1921         spin_lock_irq(&conf->device_lock);
1922
1923         if (p->rdev) {
1924                 if (p->rdev->in_sync ||
1925                     atomic_read(&p->rdev->nr_pending)) {
1926                         err = -EBUSY;
1927                         goto abort;
1928                 }
1929                 p->rdev = NULL;
1930                 err = 0;
1931         }
1932         if (err)
1933                 MD_BUG();
1934 abort:
1935         spin_unlock_irq(&conf->device_lock);
1936         print_raid6_conf(conf);
1937         return err;
1938 }
1939
1940 static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1941 {
1942         raid6_conf_t *conf = mddev->private;
1943         int found = 0;
1944         int disk;
1945         struct disk_info *p;
1946
1947         spin_lock_irq(&conf->device_lock);
1948         /*
1949          * find the disk ...
1950          */
1951         for (disk=0; disk < mddev->raid_disks; disk++)
1952                 if ((p=conf->disks + disk)->rdev == NULL) {
1953                         p->rdev = rdev;
1954                         rdev->in_sync = 0;
1955                         rdev->raid_disk = disk;
1956                         found = 1;
1957                         break;
1958                 }
1959         spin_unlock_irq(&conf->device_lock);
1960         print_raid6_conf(conf);
1961         return found;
1962 }
1963
1964 static mdk_personality_t raid6_personality=
1965 {
1966         .name           = "raid6",
1967         .owner          = THIS_MODULE,
1968         .make_request   = make_request,
1969         .run            = run,
1970         .stop           = stop,
1971         .status         = status,
1972         .error_handler  = error,
1973         .hot_add_disk   = raid6_add_disk,
1974         .hot_remove_disk= raid6_remove_disk,
1975         .spare_active   = raid6_spare_active,
1976         .sync_request   = sync_request,
1977 };
1978
1979 static int __init raid6_init (void)
1980 {
1981         int e;
1982
1983         e = raid6_select_algo();
1984         if ( e )
1985                 return e;
1986
1987         return register_md_personality (RAID6, &raid6_personality);
1988 }
1989
1990 static void raid6_exit (void)
1991 {
1992         unregister_md_personality (RAID6);
1993 }
1994
1995 module_init(raid6_init);
1996 module_exit(raid6_exit);
1997 MODULE_LICENSE("GPL");
1998 MODULE_ALIAS("md-personality-8"); /* RAID6 */