md/raid10: fix transcription error in calc_sectors conversion.
[linux-flexiantxendom0-3.2.10.git] / drivers / md / raid10.c
index 5f03558..3f91c2e 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/blkdev.h>
+#include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/ratelimit.h>
 #include "md.h"
  */
 #define        NR_RAID10_BIOS 256
 
-static void allow_barrier(conf_t *conf);
-static void lower_barrier(conf_t *conf);
+/* When there are this many requests queue to be written by
+ * the raid10 thread, we become 'congested' to provide back-pressure
+ * for writeback.
+ */
+static int max_queued_requests = 1024;
+
+static void allow_barrier(struct r10conf *conf);
+static void lower_barrier(struct r10conf *conf);
+static int enough(struct r10conf *conf, int ignore);
 
 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 {
-       conf_t *conf = data;
-       int size = offsetof(struct r10bio_s, devs[conf->copies]);
+       struct r10conf *conf = data;
+       int size = offsetof(struct r10bio, devs[conf->copies]);
 
-       /* allocate a r10bio with room for raid_disks entries in the bios array */
+       /* allocate a r10bio with room for raid_disks entries in the
+        * bios array */
        return kzalloc(size, gfp_flags);
 }
 
@@ -92,9 +101,9 @@ static void r10bio_pool_free(void *r10_bio, void *data)
  */
 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
 {
-       conf_t *conf = data;
+       struct r10conf *conf = data;
        struct page *page;
-       r10bio_t *r10_bio;
+       struct r10bio *r10_bio;
        struct bio *bio;
        int i, j;
        int nalloc;
@@ -116,12 +125,19 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
                if (!bio)
                        goto out_free_bio;
                r10_bio->devs[j].bio = bio;
+               if (!conf->have_replacement)
+                       continue;
+               bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+               if (!bio)
+                       goto out_free_bio;
+               r10_bio->devs[j].repl_bio = bio;
        }
        /*
         * Allocate RESYNC_PAGES data pages and attach them
         * where needed.
         */
        for (j = 0 ; j < nalloc; j++) {
+               struct bio *rbio = r10_bio->devs[j].repl_bio;
                bio = r10_bio->devs[j].bio;
                for (i = 0; i < RESYNC_PAGES; i++) {
                        if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
@@ -136,6 +152,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
                                goto out_free_pages;
 
                        bio->bi_io_vec[i].bv_page = page;
+                       if (rbio)
+                               rbio->bi_io_vec[i].bv_page = page;
                }
        }
 
@@ -149,8 +167,11 @@ out_free_pages:
                        safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
        j = -1;
 out_free_bio:
-       while ( ++j < nalloc )
+       while (++j < nalloc) {
                bio_put(r10_bio->devs[j].bio);
+               if (r10_bio->devs[j].repl_bio)
+                       bio_put(r10_bio->devs[j].repl_bio);
+       }
        r10bio_pool_free(r10_bio, conf);
        return NULL;
 }
@@ -158,8 +179,8 @@ out_free_bio:
 static void r10buf_pool_free(void *__r10_bio, void *data)
 {
        int i;
-       conf_t *conf = data;
-       r10bio_t *r10bio = __r10_bio;
+       struct r10conf *conf = data;
+       struct r10bio *r10bio = __r10_bio;
        int j;
 
        for (j=0; j < conf->copies; j++) {
@@ -171,44 +192,51 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
                        }
                        bio_put(bio);
                }
+               bio = r10bio->devs[j].repl_bio;
+               if (bio)
+                       bio_put(bio);
        }
        r10bio_pool_free(r10bio, conf);
 }
 
-static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
+static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
 {
        int i;
 
        for (i = 0; i < conf->copies; i++) {
                struct bio **bio = & r10_bio->devs[i].bio;
-               if (*bio && *bio != IO_BLOCKED)
+               if (!BIO_SPECIAL(*bio))
+                       bio_put(*bio);
+               *bio = NULL;
+               bio = &r10_bio->devs[i].repl_bio;
+               if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
                        bio_put(*bio);
                *bio = NULL;
        }
 }
 
-static void free_r10bio(r10bio_t *r10_bio)
+static void free_r10bio(struct r10bio *r10_bio)
 {
-       conf_t *conf = r10_bio->mddev->private;
+       struct r10conf *conf = r10_bio->mddev->private;
 
        put_all_bios(conf, r10_bio);
        mempool_free(r10_bio, conf->r10bio_pool);
 }
 
-static void put_buf(r10bio_t *r10_bio)
+static void put_buf(struct r10bio *r10_bio)
 {
-       conf_t *conf = r10_bio->mddev->private;
+       struct r10conf *conf = r10_bio->mddev->private;
 
        mempool_free(r10_bio, conf->r10buf_pool);
 
        lower_barrier(conf);
 }
 
-static void reschedule_retry(r10bio_t *r10_bio)
+static void reschedule_retry(struct r10bio *r10_bio)
 {
        unsigned long flags;
-       mddev_t *mddev = r10_bio->mddev;
-       conf_t *conf = mddev->private;
+       struct mddev *mddev = r10_bio->mddev;
+       struct r10conf *conf = mddev->private;
 
        spin_lock_irqsave(&conf->device_lock, flags);
        list_add(&r10_bio->retry_list, &conf->retry_list);
@@ -226,11 +254,11 @@ static void reschedule_retry(r10bio_t *r10_bio)
  * operation and are ready to return a success/failure code to the buffer
  * cache layer.
  */
-static void raid_end_bio_io(r10bio_t *r10_bio)
+static void raid_end_bio_io(struct r10bio *r10_bio)
 {
        struct bio *bio = r10_bio->master_bio;
        int done;
-       conf_t *conf = r10_bio->mddev->private;
+       struct r10conf *conf = r10_bio->mddev->private;
 
        if (bio->bi_phys_segments) {
                unsigned long flags;
@@ -256,9 +284,9 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
 /*
  * Update disk head position estimator based on IRQ completion info.
  */
-static inline void update_head_pos(int slot, r10bio_t *r10_bio)
+static inline void update_head_pos(int slot, struct r10bio *r10_bio)
 {
-       conf_t *conf = r10_bio->mddev->private;
+       struct r10conf *conf = r10_bio->mddev->private;
 
        conf->mirrors[r10_bio->devs[slot].devnum].head_position =
                r10_bio->devs[slot].addr + (r10_bio->sectors);
@@ -267,30 +295,43 @@ static inline void update_head_pos(int slot, r10bio_t *r10_bio)
 /*
  * Find the disk number which triggered given bio
  */
-static int find_bio_disk(conf_t *conf, r10bio_t *r10_bio, struct bio *bio)
+static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
+                        struct bio *bio, int *slotp, int *replp)
 {
        int slot;
+       int repl = 0;
 
-       for (slot = 0; slot < conf->copies; slot++)
+       for (slot = 0; slot < conf->copies; slot++) {
                if (r10_bio->devs[slot].bio == bio)
                        break;
+               if (r10_bio->devs[slot].repl_bio == bio) {
+                       repl = 1;
+                       break;
+               }
+       }
 
        BUG_ON(slot == conf->copies);
        update_head_pos(slot, r10_bio);
 
+       if (slotp)
+               *slotp = slot;
+       if (replp)
+               *replp = repl;
        return r10_bio->devs[slot].devnum;
 }
 
 static void raid10_end_read_request(struct bio *bio, int error)
 {
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       r10bio_t *r10_bio = bio->bi_private;
+       struct r10bio *r10_bio = bio->bi_private;
        int slot, dev;
-       conf_t *conf = r10_bio->mddev->private;
+       struct md_rdev *rdev;
+       struct r10conf *conf = r10_bio->mddev->private;
 
 
        slot = r10_bio->read_slot;
        dev = r10_bio->devs[slot].devnum;
+       rdev = r10_bio->devs[slot].rdev;
        /*
         * this branch is our 'one mirror IO has finished' event handler:
         */
@@ -307,8 +348,21 @@ static void raid10_end_read_request(struct bio *bio, int error)
                 * wait for the 'master' bio.
                 */
                set_bit(R10BIO_Uptodate, &r10_bio->state);
+       } else {
+               /* If all other devices that store this block have
+                * failed, we want to return the error upwards rather
+                * than fail the last device.  Here we redefine
+                * "uptodate" to mean "Don't want to retry"
+                */
+               unsigned long flags;
+               spin_lock_irqsave(&conf->device_lock, flags);
+               if (!enough(conf, rdev->raid_disk))
+                       uptodate = 1;
+               spin_unlock_irqrestore(&conf->device_lock, flags);
+       }
+       if (uptodate) {
                raid_end_bio_io(r10_bio);
-               rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+               rdev_dec_pending(rdev, conf->mddev);
        } else {
                /*
                 * oops, read error - keep the refcount on the rdev
@@ -317,30 +371,75 @@ static void raid10_end_read_request(struct bio *bio, int error)
                printk_ratelimited(KERN_ERR
                                   "md/raid10:%s: %s: rescheduling sector %llu\n",
                                   mdname(conf->mddev),
-                                  bdevname(conf->mirrors[dev].rdev->bdev, b),
+                                  bdevname(rdev->bdev, b),
                                   (unsigned long long)r10_bio->sector);
                set_bit(R10BIO_ReadError, &r10_bio->state);
                reschedule_retry(r10_bio);
        }
 }
 
+static void close_write(struct r10bio *r10_bio)
+{
+       /* clear the bitmap if all writes complete successfully */
+       bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
+                       r10_bio->sectors,
+                       !test_bit(R10BIO_Degraded, &r10_bio->state),
+                       0);
+       md_write_end(r10_bio->mddev);
+}
+
+static void one_write_done(struct r10bio *r10_bio)
+{
+       if (atomic_dec_and_test(&r10_bio->remaining)) {
+               if (test_bit(R10BIO_WriteError, &r10_bio->state))
+                       reschedule_retry(r10_bio);
+               else {
+                       close_write(r10_bio);
+                       if (test_bit(R10BIO_MadeGood, &r10_bio->state))
+                               reschedule_retry(r10_bio);
+                       else
+                               raid_end_bio_io(r10_bio);
+               }
+       }
+}
+
 static void raid10_end_write_request(struct bio *bio, int error)
 {
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       r10bio_t *r10_bio = bio->bi_private;
+       struct r10bio *r10_bio = bio->bi_private;
        int dev;
-       conf_t *conf = r10_bio->mddev->private;
-
-       dev = find_bio_disk(conf, r10_bio, bio);
-
+       int dec_rdev = 1;
+       struct r10conf *conf = r10_bio->mddev->private;
+       int slot, repl;
+       struct md_rdev *rdev = NULL;
+
+       dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+
+       if (repl)
+               rdev = conf->mirrors[dev].replacement;
+       if (!rdev) {
+               smp_rmb();
+               repl = 0;
+               rdev = conf->mirrors[dev].rdev;
+       }
        /*
         * this branch is our 'one mirror IO has finished' event handler:
         */
        if (!uptodate) {
-               md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
-               /* an I/O failed, we can't clear the bitmap */
-               set_bit(R10BIO_Degraded, &r10_bio->state);
-       } else
+               if (repl)
+                       /* Never record new bad blocks to replacement,
+                        * just fail it.
+                        */
+                       md_error(rdev->mddev, rdev);
+               else {
+                       set_bit(WriteErrorSeen, &rdev->flags);
+                       if (!test_and_set_bit(WantReplacement, &rdev->flags))
+                               set_bit(MD_RECOVERY_NEEDED,
+                                       &rdev->mddev->recovery);
+                       set_bit(R10BIO_WriteError, &r10_bio->state);
+                       dec_rdev = 0;
+               }
+       } else {
                /*
                 * Set R10BIO_Uptodate in our master bio, so that
                 * we will return a good error code for to the higher
@@ -350,27 +449,36 @@ static void raid10_end_write_request(struct bio *bio, int error)
                 * user-side. So if something waits for IO, then it will
                 * wait for the 'master' bio.
                 */
+               sector_t first_bad;
+               int bad_sectors;
+
                set_bit(R10BIO_Uptodate, &r10_bio->state);
 
+               /* Maybe we can clear some bad blocks. */
+               if (is_badblock(rdev,
+                               r10_bio->devs[slot].addr,
+                               r10_bio->sectors,
+                               &first_bad, &bad_sectors)) {
+                       bio_put(bio);
+                       if (repl)
+                               r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
+                       else
+                               r10_bio->devs[slot].bio = IO_MADE_GOOD;
+                       dec_rdev = 0;
+                       set_bit(R10BIO_MadeGood, &r10_bio->state);
+               }
+       }
+
        /*
         *
         * Let's see if all mirrored write operations have finished
         * already.
         */
-       if (atomic_dec_and_test(&r10_bio->remaining)) {
-               /* clear the bitmap if all writes complete successfully */
-               bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
-                               r10_bio->sectors,
-                               !test_bit(R10BIO_Degraded, &r10_bio->state),
-                               0);
-               md_write_end(r10_bio->mddev);
-               raid_end_bio_io(r10_bio);
-       }
-
-       rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+       one_write_done(r10_bio);
+       if (dec_rdev)
+               rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
 }
 
-
 /*
  * RAID10 layout manager
  * As well as the chunksize and raid_disks count, there are two
@@ -396,7 +504,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
  * sector offset to a virtual address
  */
 
-static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
+static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
 {
        int n,f;
        sector_t sector;
@@ -444,7 +552,7 @@ static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
        BUG_ON(slot != conf->copies);
 }
 
-static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
+static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
 {
        sector_t offset, chunk, vchunk;
 
@@ -478,25 +586,68 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
  *     @biovec: the request that could be merged to it.
  *
  *     Return amount of bytes we can accept at this offset
- *      If near_copies == raid_disk, there are no striping issues,
- *      but in that case, the function isn't called at all.
+ *     This requires checking for end-of-chunk if near_copies != raid_disks,
+ *     and for subordinate merge_bvec_fns if merge_check_needed.
  */
 static int raid10_mergeable_bvec(struct request_queue *q,
                                 struct bvec_merge_data *bvm,
                                 struct bio_vec *biovec)
 {
-       mddev_t *mddev = q->queuedata;
+       struct mddev *mddev = q->queuedata;
+       struct r10conf *conf = mddev->private;
        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
        int max;
        unsigned int chunk_sectors = mddev->chunk_sectors;
        unsigned int bio_sectors = bvm->bi_size >> 9;
 
-       max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
-       if (max < 0) max = 0; /* bio_add cannot handle a negative return */
-       if (max <= biovec->bv_len && bio_sectors == 0)
-               return biovec->bv_len;
-       else
-               return max;
+       if (conf->near_copies < conf->raid_disks) {
+               max = (chunk_sectors - ((sector & (chunk_sectors - 1))
+                                       + bio_sectors)) << 9;
+               if (max < 0)
+                       /* bio_add cannot handle a negative return */
+                       max = 0;
+               if (max <= biovec->bv_len && bio_sectors == 0)
+                       return biovec->bv_len;
+       } else
+               max = biovec->bv_len;
+
+       if (mddev->merge_check_needed) {
+               struct r10bio r10_bio;
+               int s;
+               r10_bio.sector = sector;
+               raid10_find_phys(conf, &r10_bio);
+               rcu_read_lock();
+               for (s = 0; s < conf->copies; s++) {
+                       int disk = r10_bio.devs[s].devnum;
+                       struct md_rdev *rdev = rcu_dereference(
+                               conf->mirrors[disk].rdev);
+                       if (rdev && !test_bit(Faulty, &rdev->flags)) {
+                               struct request_queue *q =
+                                       bdev_get_queue(rdev->bdev);
+                               if (q->merge_bvec_fn) {
+                                       bvm->bi_sector = r10_bio.devs[s].addr
+                                               + rdev->data_offset;
+                                       bvm->bi_bdev = rdev->bdev;
+                                       max = min(max, q->merge_bvec_fn(
+                                                         q, bvm, biovec));
+                               }
+                       }
+                       rdev = rcu_dereference(conf->mirrors[disk].replacement);
+                       if (rdev && !test_bit(Faulty, &rdev->flags)) {
+                               struct request_queue *q =
+                                       bdev_get_queue(rdev->bdev);
+                               if (q->merge_bvec_fn) {
+                                       bvm->bi_sector = r10_bio.devs[s].addr
+                                               + rdev->data_offset;
+                                       bvm->bi_bdev = rdev->bdev;
+                                       max = min(max, q->merge_bvec_fn(
+                                                         q, bvm, biovec));
+                               }
+                       }
+               }
+               rcu_read_unlock();
+       }
+       return max;
 }
 
 /*
@@ -518,14 +669,16 @@ static int raid10_mergeable_bvec(struct request_queue *q,
  * FIXME: possibly should rethink readbalancing and do it differently
  * depending on near_copies / far_copies geometry.
  */
-static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors)
+static struct md_rdev *read_balance(struct r10conf *conf,
+                                   struct r10bio *r10_bio,
+                                   int *max_sectors)
 {
        const sector_t this_sector = r10_bio->sector;
        int disk, slot;
        int sectors = r10_bio->sectors;
        int best_good_sectors;
        sector_t new_distance, best_dist;
-       mdk_rdev_t *rdev;
+       struct md_rdev *rdev, *best_rdev;
        int do_balance;
        int best_slot;
 
@@ -534,6 +687,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio, int *max_sectors)
 retry:
        sectors = r10_bio->sectors;
        best_slot = -1;
+       best_rdev = NULL;
        best_dist = MaxSector;
        best_good_sectors = 0;
        do_balance = 1;
@@ -555,10 +709,17 @@ retry:
                if (r10_bio->devs[slot].bio == IO_BLOCKED)
                        continue;
                disk = r10_bio->devs[slot].devnum;
-               rdev = rcu_dereference(conf->mirrors[disk].rdev);
-               if (rdev == NULL)
+               rdev = rcu_dereference(conf->mirrors[disk].replacement);
+               if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
+                   test_bit(Unmerged, &rdev->flags) ||
+                   r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
+                       rdev = rcu_dereference(conf->mirrors[disk].rdev);
+               if (rdev == NULL ||
+                   test_bit(Faulty, &rdev->flags) ||
+                   test_bit(Unmerged, &rdev->flags))
                        continue;
-               if (!test_bit(In_sync, &rdev->flags))
+               if (!test_bit(In_sync, &rdev->flags) &&
+                   r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
                        continue;
 
                dev_sector = r10_bio->devs[slot].addr;
@@ -583,6 +744,7 @@ retry:
                                if (good_sectors > best_good_sectors) {
                                        best_good_sectors = good_sectors;
                                        best_slot = slot;
+                                       best_rdev = rdev;
                                }
                                if (!do_balance)
                                        /* Must read from here */
@@ -611,16 +773,15 @@ retry:
                if (new_distance < best_dist) {
                        best_dist = new_distance;
                        best_slot = slot;
+                       best_rdev = rdev;
                }
        }
-       if (slot == conf->copies)
+       if (slot >= conf->copies) {
                slot = best_slot;
+               rdev = best_rdev;
+       }
 
        if (slot >= 0) {
-               disk = r10_bio->devs[slot].devnum;
-               rdev = rcu_dereference(conf->mirrors[disk].rdev);
-               if (!rdev)
-                       goto retry;
                atomic_inc(&rdev->nr_pending);
                if (test_bit(Faulty, &rdev->flags)) {
                        /* Cannot risk returning a device that failed
@@ -631,24 +792,28 @@ retry:
                }
                r10_bio->read_slot = slot;
        } else
-               disk = -1;
+               rdev = NULL;
        rcu_read_unlock();
        *max_sectors = best_good_sectors;
 
-       return disk;
+       return rdev;
 }
 
 static int raid10_congested(void *data, int bits)
 {
-       mddev_t *mddev = data;
-       conf_t *conf = mddev->private;
+       struct mddev *mddev = data;
+       struct r10conf *conf = mddev->private;
        int i, ret = 0;
 
+       if ((bits & (1 << BDI_async_congested)) &&
+           conf->pending_count >= max_queued_requests)
+               return 1;
+
        if (mddev_congested(mddev, bits))
                return 1;
        rcu_read_lock();
        for (i = 0; i < conf->raid_disks && ret == 0; i++) {
-               mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
+               struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        struct request_queue *q = bdev_get_queue(rdev->bdev);
 
@@ -659,7 +824,7 @@ static int raid10_congested(void *data, int bits)
        return ret;
 }
 
-static void flush_pending_writes(conf_t *conf)
+static void flush_pending_writes(struct r10conf *conf)
 {
        /* Any writes that have been queued but are awaiting
         * bitmap updates get flushed here.
@@ -669,10 +834,12 @@ static void flush_pending_writes(conf_t *conf)
        if (conf->pending_bio_list.head) {
                struct bio *bio;
                bio = bio_list_get(&conf->pending_bio_list);
+               conf->pending_count = 0;
                spin_unlock_irq(&conf->device_lock);
                /* flush any pending bitmap writes to disk
                 * before proceeding w/ I/O */
                bitmap_unplug(conf->mddev->bitmap);
+               wake_up(&conf->wait_barrier);
 
                while (bio) { /* submit pending writes */
                        struct bio *next = bio->bi_next;
@@ -706,7 +873,7 @@ static void flush_pending_writes(conf_t *conf)
  *    lower_barrier when the particular background IO completes.
  */
 
-static void raise_barrier(conf_t *conf, int force)
+static void raise_barrier(struct r10conf *conf, int force)
 {
        BUG_ON(force && !conf->barrier);
        spin_lock_irq(&conf->resync_lock);
@@ -726,7 +893,7 @@ static void raise_barrier(conf_t *conf, int force)
        spin_unlock_irq(&conf->resync_lock);
 }
 
-static void lower_barrier(conf_t *conf)
+static void lower_barrier(struct r10conf *conf)
 {
        unsigned long flags;
        spin_lock_irqsave(&conf->resync_lock, flags);
@@ -735,21 +902,34 @@ static void lower_barrier(conf_t *conf)
        wake_up(&conf->wait_barrier);
 }
 
-static void wait_barrier(conf_t *conf)
+static void wait_barrier(struct r10conf *conf)
 {
        spin_lock_irq(&conf->resync_lock);
        if (conf->barrier) {
                conf->nr_waiting++;
-               wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
+               /* Wait for the barrier to drop.
+                * However if there are already pending
+                * requests (preventing the barrier from
+                * rising completely), and the
+                * pre-process bio queue isn't empty,
+                * then don't wait, as we need to empty
+                * that queue to get the nr_pending
+                * count down.
+                */
+               wait_event_lock_irq(conf->wait_barrier,
+                                   !conf->barrier ||
+                                   (conf->nr_pending &&
+                                    current->bio_list &&
+                                    !bio_list_empty(current->bio_list)),
                                    conf->resync_lock,
-                                   );
+                       );
                conf->nr_waiting--;
        }
        conf->nr_pending++;
        spin_unlock_irq(&conf->resync_lock);
 }
 
-static void allow_barrier(conf_t *conf)
+static void allow_barrier(struct r10conf *conf)
 {
        unsigned long flags;
        spin_lock_irqsave(&conf->resync_lock, flags);
@@ -758,7 +938,7 @@ static void allow_barrier(conf_t *conf)
        wake_up(&conf->wait_barrier);
 }
 
-static void freeze_array(conf_t *conf)
+static void freeze_array(struct r10conf *conf)
 {
        /* stop syncio and normal IO and wait for everything to
         * go quiet.
@@ -783,7 +963,7 @@ static void freeze_array(conf_t *conf)
        spin_unlock_irq(&conf->resync_lock);
 }
 
-static void unfreeze_array(conf_t *conf)
+static void unfreeze_array(struct r10conf *conf)
 {
        /* reverse the effect of the freeze */
        spin_lock_irq(&conf->resync_lock);
@@ -793,11 +973,10 @@ static void unfreeze_array(conf_t *conf)
        spin_unlock_irq(&conf->resync_lock);
 }
 
-static int make_request(mddev_t *mddev, struct bio * bio)
+static void make_request(struct mddev *mddev, struct bio * bio)
 {
-       conf_t *conf = mddev->private;
-       mirror_info_t *mirror;
-       r10bio_t *r10_bio;
+       struct r10conf *conf = mddev->private;
+       struct r10bio *r10_bio;
        struct bio *read_bio;
        int i;
        int chunk_sects = conf->chunk_mask + 1;
@@ -805,12 +984,14 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
        unsigned long flags;
-       mdk_rdev_t *blocked_rdev;
+       struct md_rdev *blocked_rdev;
        int plugged;
+       int sectors_handled;
+       int max_sectors;
 
        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bio);
-               return 0;
+               return;
        }
 
        /* If this request crosses a chunk boundary, we need to
@@ -842,10 +1023,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                conf->nr_waiting++;
                spin_unlock_irq(&conf->resync_lock);
 
-               if (make_request(mddev, &bp->bio1))
-                       generic_make_request(&bp->bio1);
-               if (make_request(mddev, &bp->bio2))
-                       generic_make_request(&bp->bio2);
+               make_request(mddev, &bp->bio1);
+               make_request(mddev, &bp->bio2);
 
                spin_lock_irq(&conf->resync_lock);
                conf->nr_waiting--;
@@ -853,14 +1032,14 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                spin_unlock_irq(&conf->resync_lock);
 
                bio_pair_release(bp);
-               return 0;
+               return;
        bad_map:
                printk("md/raid10:%s: make_request bug: can't convert block across chunks"
                       " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
                       (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
 
                bio_io_error(bio);
-               return 0;
+               return;
        }
 
        md_write_start(mddev, bio);
@@ -895,28 +1074,27 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                /*
                 * read balancing logic:
                 */
-               int max_sectors;
-               int disk;
+               struct md_rdev *rdev;
                int slot;
 
 read_again:
-               disk = read_balance(conf, r10_bio, &max_sectors);
-               slot = r10_bio->read_slot;
-               if (disk < 0) {
+               rdev = read_balance(conf, r10_bio, &max_sectors);
+               if (!rdev) {
                        raid_end_bio_io(r10_bio);
-                       return 0;
+                       return;
                }
-               mirror = conf->mirrors + disk;
+               slot = r10_bio->read_slot;
 
                read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
                md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
                            max_sectors);
 
                r10_bio->devs[slot].bio = read_bio;
+               r10_bio->devs[slot].rdev = rdev;
 
                read_bio->bi_sector = r10_bio->devs[slot].addr +
-                       mirror->rdev->data_offset;
-               read_bio->bi_bdev = mirror->rdev->bdev;
+                       rdev->data_offset;
+               read_bio->bi_bdev = rdev->bdev;
                read_bio->bi_end_io = raid10_end_read_request;
                read_bio->bi_rw = READ | do_sync;
                read_bio->bi_private = r10_bio;
@@ -925,8 +1103,6 @@ read_again:
                        /* Could not read all from this device, so we will
                         * need another r10_bio.
                         */
-                       int sectors_handled;
-
                        sectors_handled = (r10_bio->sectors + max_sectors
                                           - bio->bi_sector);
                        r10_bio->sectors = max_sectors;
@@ -954,36 +1130,112 @@ read_again:
                        goto read_again;
                } else
                        generic_make_request(read_bio);
-               return 0;
+               return;
        }
 
        /*
         * WRITE:
         */
+       if (conf->pending_count >= max_queued_requests) {
+               md_wakeup_thread(mddev->thread);
+               wait_event(conf->wait_barrier,
+                          conf->pending_count < max_queued_requests);
+       }
        /* first select target devices under rcu_lock and
         * inc refcount on their rdev.  Record them by setting
         * bios[x] to bio
+        * If there are known/acknowledged bad blocks on any device
+        * on which we have seen a write error, we want to avoid
+        * writing to those blocks.  This potentially requires several
+        * writes to write around the bad blocks.  Each set of writes
+        * gets its own r10_bio with a set of bios attached.  The number
+        * of r10_bios is recored in bio->bi_phys_segments just as with
+        * the read case.
         */
        plugged = mddev_check_plugged(mddev);
 
+       r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
        raid10_find_phys(conf, r10_bio);
- retry_write:
+retry_write:
        blocked_rdev = NULL;
        rcu_read_lock();
+       max_sectors = r10_bio->sectors;
+
        for (i = 0;  i < conf->copies; i++) {
                int d = r10_bio->devs[i].devnum;
-               mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
+               struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
+               struct md_rdev *rrdev = rcu_dereference(
+                       conf->mirrors[d].replacement);
+               if (rdev == rrdev)
+                       rrdev = NULL;
                if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
                        atomic_inc(&rdev->nr_pending);
                        blocked_rdev = rdev;
                        break;
                }
-               if (rdev && !test_bit(Faulty, &rdev->flags)) {
-                       atomic_inc(&rdev->nr_pending);
-                       r10_bio->devs[i].bio = bio;
-               } else {
-                       r10_bio->devs[i].bio = NULL;
+               if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
+                       atomic_inc(&rrdev->nr_pending);
+                       blocked_rdev = rrdev;
+                       break;
+               }
+               if (rrdev && (test_bit(Faulty, &rrdev->flags)
+                             || test_bit(Unmerged, &rrdev->flags)))
+                       rrdev = NULL;
+
+               r10_bio->devs[i].bio = NULL;
+               r10_bio->devs[i].repl_bio = NULL;
+               if (!rdev || test_bit(Faulty, &rdev->flags) ||
+                   test_bit(Unmerged, &rdev->flags)) {
                        set_bit(R10BIO_Degraded, &r10_bio->state);
+                       continue;
+               }
+               if (test_bit(WriteErrorSeen, &rdev->flags)) {
+                       sector_t first_bad;
+                       sector_t dev_sector = r10_bio->devs[i].addr;
+                       int bad_sectors;
+                       int is_bad;
+
+                       is_bad = is_badblock(rdev, dev_sector,
+                                            max_sectors,
+                                            &first_bad, &bad_sectors);
+                       if (is_bad < 0) {
+                               /* Mustn't write here until the bad block
+                                * is acknowledged
+                                */
+                               atomic_inc(&rdev->nr_pending);
+                               set_bit(BlockedBadBlocks, &rdev->flags);
+                               blocked_rdev = rdev;
+                               break;
+                       }
+                       if (is_bad && first_bad <= dev_sector) {
+                               /* Cannot write here at all */
+                               bad_sectors -= (dev_sector - first_bad);
+                               if (bad_sectors < max_sectors)
+                                       /* Mustn't write more than bad_sectors
+                                        * to other devices yet
+                                        */
+                                       max_sectors = bad_sectors;
+                               /* We don't set R10BIO_Degraded as that
+                                * only applies if the disk is missing,
+                                * so it might be re-added, and we want to
+                                * know to recover this chunk.
+                                * In this case the device is here, and the
+                                * fact that this chunk is not in-sync is
+                                * recorded in the bad block log.
+                                */
+                               continue;
+                       }
+                       if (is_bad) {
+                               int good_sectors = first_bad - dev_sector;
+                               if (good_sectors < max_sectors)
+                                       max_sectors = good_sectors;
+                       }
+               }
+               r10_bio->devs[i].bio = bio;
+               atomic_inc(&rdev->nr_pending);
+               if (rrdev) {
+                       r10_bio->devs[i].repl_bio = bio;
+                       atomic_inc(&rrdev->nr_pending);
                }
        }
        rcu_read_unlock();
@@ -993,19 +1245,45 @@ read_again:
                int j;
                int d;
 
-               for (j = 0; j < i; j++)
+               for (j = 0; j < i; j++) {
                        if (r10_bio->devs[j].bio) {
                                d = r10_bio->devs[j].devnum;
                                rdev_dec_pending(conf->mirrors[d].rdev, mddev);
                        }
+                       if (r10_bio->devs[j].repl_bio) {
+                               struct md_rdev *rdev;
+                               d = r10_bio->devs[j].devnum;
+                               rdev = conf->mirrors[d].replacement;
+                               if (!rdev) {
+                                       /* Race with remove_disk */
+                                       smp_mb();
+                                       rdev = conf->mirrors[d].rdev;
+                               }
+                               rdev_dec_pending(rdev, mddev);
+                       }
+               }
                allow_barrier(conf);
                md_wait_for_blocked_rdev(blocked_rdev, mddev);
                wait_barrier(conf);
                goto retry_write;
        }
 
+       if (max_sectors < r10_bio->sectors) {
+               /* We are splitting this into multiple parts, so
+                * we need to prepare for allocating another r10_bio.
+                */
+               r10_bio->sectors = max_sectors;
+               spin_lock_irq(&conf->device_lock);
+               if (bio->bi_phys_segments == 0)
+                       bio->bi_phys_segments = 2;
+               else
+                       bio->bi_phys_segments++;
+               spin_unlock_irq(&conf->device_lock);
+       }
+       sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
+
        atomic_set(&r10_bio->remaining, 1);
-       bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
+       bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
 
        for (i = 0; i < conf->copies; i++) {
                struct bio *mbio;
@@ -1014,10 +1292,12 @@ read_again:
                        continue;
 
                mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+               md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+                           max_sectors);
                r10_bio->devs[i].bio = mbio;
 
-               mbio->bi_sector = r10_bio->devs[i].addr+
-                       conf->mirrors[d].rdev->data_offset;
+               mbio->bi_sector = (r10_bio->devs[i].addr+
+                                  conf->mirrors[d].rdev->data_offset);
                mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
                mbio->bi_end_io = raid10_end_write_request;
                mbio->bi_rw = WRITE | do_sync | do_fua;
@@ -1026,30 +1306,66 @@ read_again:
                atomic_inc(&r10_bio->remaining);
                spin_lock_irqsave(&conf->device_lock, flags);
                bio_list_add(&conf->pending_bio_list, mbio);
+               conf->pending_count++;
+               spin_unlock_irqrestore(&conf->device_lock, flags);
+
+               if (!r10_bio->devs[i].repl_bio)
+                       continue;
+
+               mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+               md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+                           max_sectors);
+               r10_bio->devs[i].repl_bio = mbio;
+
+               /* We are actively writing to the original device
+                * so it cannot disappear, so the replacement cannot
+                * become NULL here
+                */
+               mbio->bi_sector = (r10_bio->devs[i].addr+
+                                  conf->mirrors[d].replacement->data_offset);
+               mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
+               mbio->bi_end_io = raid10_end_write_request;
+               mbio->bi_rw = WRITE | do_sync | do_fua;
+               mbio->bi_private = r10_bio;
+
+               atomic_inc(&r10_bio->remaining);
+               spin_lock_irqsave(&conf->device_lock, flags);
+               bio_list_add(&conf->pending_bio_list, mbio);
+               conf->pending_count++;
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
 
-       if (atomic_dec_and_test(&r10_bio->remaining)) {
-               /* This matches the end of raid10_end_write_request() */
-               bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
-                               r10_bio->sectors,
-                               !test_bit(R10BIO_Degraded, &r10_bio->state),
-                               0);
-               md_write_end(mddev);
-               raid_end_bio_io(r10_bio);
+       /* Don't remove the bias on 'remaining' (one_write_done) until
+        * after checking if we need to go around again.
+        */
+
+       if (sectors_handled < (bio->bi_size >> 9)) {
+               one_write_done(r10_bio);
+               /* We need another r10_bio.  It has already been counted
+                * in bio->bi_phys_segments.
+                */
+               r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+               r10_bio->master_bio = bio;
+               r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+
+               r10_bio->mddev = mddev;
+               r10_bio->sector = bio->bi_sector + sectors_handled;
+               r10_bio->state = 0;
+               goto retry_write;
        }
+       one_write_done(r10_bio);
 
        /* In case raid10d snuck in to freeze_array */
        wake_up(&conf->wait_barrier);
 
        if (do_sync || !mddev->bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
-       return 0;
 }
 
-static void status(struct seq_file *seq, mddev_t *mddev)
+static void status(struct seq_file *seq, struct mddev *mddev)
 {
-       conf_t *conf = mddev->private;
+       struct r10conf *conf = mddev->private;
        int i;
 
        if (conf->near_copies < conf->raid_disks)
@@ -1076,7 +1392,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
  * Don't consider the device numbered 'ignore'
  * as we might be about to remove it.
  */
-static int enough(conf_t *conf, int ignore)
+static int enough(struct r10conf *conf, int ignore)
 {
        int first = 0;
 
@@ -1095,10 +1411,10 @@ static int enough(conf_t *conf, int ignore)
        return 1;
 }
 
-static void error(mddev_t *mddev, mdk_rdev_t *rdev)
+static void error(struct mddev *mddev, struct md_rdev *rdev)
 {
        char b[BDEVNAME_SIZE];
-       conf_t *conf = mddev->private;
+       struct r10conf *conf = mddev->private;
 
        /*
         * If it is not operational, then we have already marked it as dead
@@ -1132,10 +1448,10 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
               mdname(mddev), conf->raid_disks - mddev->degraded);
 }
 
-static void print_conf(conf_t *conf)
+static void print_conf(struct r10conf *conf)
 {
        int i;
-       mirror_info_t *tmp;
+       struct mirror_info *tmp;
 
        printk(KERN_DEBUG "RAID10 conf printout:\n");
        if (!conf) {
@@ -1156,7 +1472,7 @@ static void print_conf(conf_t *conf)
        }
 }
 
-static void close_sync(conf_t *conf)
+static void close_sync(struct r10conf *conf)
 {
        wait_barrier(conf);
        allow_barrier(conf);
@@ -1165,11 +1481,11 @@ static void close_sync(conf_t *conf)
        conf->r10buf_pool = NULL;
 }
 
-static int raid10_spare_active(mddev_t *mddev)
+static int raid10_spare_active(struct mddev *mddev)
 {
        int i;
-       conf_t *conf = mddev->private;
-       mirror_info_t *tmp;
+       struct r10conf *conf = mddev->private;
+       struct mirror_info *tmp;
        int count = 0;
        unsigned long flags;
 
@@ -1179,9 +1495,27 @@ static int raid10_spare_active(mddev_t *mddev)
         */
        for (i = 0; i < conf->raid_disks; i++) {
                tmp = conf->mirrors + i;
-               if (tmp->rdev
-                   && !test_bit(Faulty, &tmp->rdev->flags)
-                   && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
+               if (tmp->replacement
+                   && tmp->replacement->recovery_offset == MaxSector
+                   && !test_bit(Faulty, &tmp->replacement->flags)
+                   && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
+                       /* Replacement has just become active */
+                       if (!tmp->rdev
+                           || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
+                               count++;
+                       if (tmp->rdev) {
+                               /* Replaced device not technically faulty,
+                                * but we need to be sure it gets removed
+                                * and never re-added.
+                                */
+                               set_bit(Faulty, &tmp->rdev->flags);
+                               sysfs_notify_dirent_safe(
+                                       tmp->rdev->sysfs_state);
+                       }
+                       sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
+               } else if (tmp->rdev
+                          && !test_bit(Faulty, &tmp->rdev->flags)
+                          && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
                        count++;
                        sysfs_notify_dirent(tmp->rdev->sysfs_state);
                }
@@ -1195,52 +1529,60 @@ static int raid10_spare_active(mddev_t *mddev)
 }
 
 
-static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
+static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
 {
-       conf_t *conf = mddev->private;
+       struct r10conf *conf = mddev->private;
        int err = -EEXIST;
        int mirror;
        int first = 0;
        int last = conf->raid_disks - 1;
+       struct request_queue *q = bdev_get_queue(rdev->bdev);
 
        if (mddev->recovery_cp < MaxSector)
                /* only hot-add to in-sync arrays, as recovery is
                 * very different from resync
                 */
                return -EBUSY;
-       if (!enough(conf, -1))
+       if (rdev->saved_raid_disk < 0 && !enough(conf, -1))
                return -EINVAL;
 
        if (rdev->raid_disk >= 0)
                first = last = rdev->raid_disk;
 
+       if (q->merge_bvec_fn) {
+               set_bit(Unmerged, &rdev->flags);
+               mddev->merge_check_needed = 1;
+       }
+
        if (rdev->saved_raid_disk >= first &&
            conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
                mirror = rdev->saved_raid_disk;
        else
                mirror = first;
        for ( ; mirror <= last ; mirror++) {
-               mirror_info_t *p = &conf->mirrors[mirror];
+               struct mirror_info *p = &conf->mirrors[mirror];
                if (p->recovery_disabled == mddev->recovery_disabled)
                        continue;
-               if (!p->rdev)
-                       continue;
+               if (p->rdev) {
+                       if (!test_bit(WantReplacement, &p->rdev->flags) ||
+                           p->replacement != NULL)
+                               continue;
+                       clear_bit(In_sync, &rdev->flags);
+                       set_bit(Replacement, &rdev->flags);
+                       rdev->raid_disk = mirror;
+                       err = 0;
+                       disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                         rdev->data_offset << 9);
+                       conf->fullsync = 1;
+                       rcu_assign_pointer(p->replacement, rdev);
+                       break;
+               }
 
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
-               /* as we don't honour merge_bvec_fn, we must
-                * never risk violating it, so limit
-                * ->max_segments to one lying with a single
-                * page, as a one page request is never in
-                * violation.
-                */
-               if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
-                       blk_queue_max_segments(mddev->queue, 1);
-                       blk_queue_segment_boundary(mddev->queue,
-                                                  PAGE_CACHE_SIZE - 1);
-               }
 
                p->head_position = 0;
+               p->recovery_disabled = mddev->recovery_disabled - 1;
                rdev->raid_disk = mirror;
                err = 0;
                if (rdev->saved_raid_disk != mirror)
@@ -1248,46 +1590,79 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                rcu_assign_pointer(p->rdev, rdev);
                break;
        }
-
+       if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
+               /* Some requests might not have seen this new
+                * merge_bvec_fn.  We must wait for them to complete
+                * before merging the device fully.
+                * First we make sure any code which has tested
+                * our function has submitted the request, then
+                * we wait for all outstanding requests to complete.
+                */
+               synchronize_sched();
+               raise_barrier(conf, 0);
+               lower_barrier(conf);
+               clear_bit(Unmerged, &rdev->flags);
+       }
        md_integrity_add_rdev(rdev, mddev);
        print_conf(conf);
        return err;
 }
 
-static int raid10_remove_disk(mddev_t *mddev, int number)
+static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
 {
-       conf_t *conf = mddev->private;
+       struct r10conf *conf = mddev->private;
        int err = 0;
-       mdk_rdev_t *rdev;
-       mirror_info_t *p = conf->mirrors+ number;
+       int number = rdev->raid_disk;
+       struct md_rdev **rdevp;
+       struct mirror_info *p = conf->mirrors + number;
 
        print_conf(conf);
-       rdev = p->rdev;
-       if (rdev) {
-               if (test_bit(In_sync, &rdev->flags) ||
-                   atomic_read(&rdev->nr_pending)) {
-                       err = -EBUSY;
-                       goto abort;
-               }
-               /* Only remove faulty devices in recovery
-                * is not possible.
-                */
-               if (!test_bit(Faulty, &rdev->flags) &&
-                   mddev->recovery_disabled != p->recovery_disabled &&
-                   enough(conf, -1)) {
-                       err = -EBUSY;
-                       goto abort;
-               }
-               p->rdev = NULL;
-               synchronize_rcu();
-               if (atomic_read(&rdev->nr_pending)) {
-                       /* lost the race, try later */
-                       err = -EBUSY;
-                       p->rdev = rdev;
-                       goto abort;
-               }
-               err = md_integrity_register(mddev);
+       if (rdev == p->rdev)
+               rdevp = &p->rdev;
+       else if (rdev == p->replacement)
+               rdevp = &p->replacement;
+       else
+               return 0;
+
+       if (test_bit(In_sync, &rdev->flags) ||
+           atomic_read(&rdev->nr_pending)) {
+               err = -EBUSY;
+               goto abort;
        }
+       /* Only remove faulty devices if recovery
+        * is not possible.
+        */
+       if (!test_bit(Faulty, &rdev->flags) &&
+           mddev->recovery_disabled != p->recovery_disabled &&
+           (!p->replacement || p->replacement == rdev) &&
+           enough(conf, -1)) {
+               err = -EBUSY;
+               goto abort;
+       }
+       *rdevp = NULL;
+       synchronize_rcu();
+       if (atomic_read(&rdev->nr_pending)) {
+               /* lost the race, try later */
+               err = -EBUSY;
+               *rdevp = rdev;
+               goto abort;
+       } else if (p->replacement) {
+               /* We must have just cleared 'rdev' */
+               p->rdev = p->replacement;
+               clear_bit(Replacement, &p->replacement->flags);
+               smp_mb(); /* Make sure other CPUs may see both as identical
+                          * but will never see neither -- if they are careful.
+                          */
+               p->replacement = NULL;
+               clear_bit(WantReplacement, &rdev->flags);
+       } else
+               /* We might have just remove the Replacement as faulty
+                * Clear the flag just in case
+                */
+               clear_bit(WantReplacement, &rdev->flags);
+
+       err = md_integrity_register(mddev);
+
 abort:
 
        print_conf(conf);
@@ -1297,21 +1672,20 @@ abort:
 
 static void end_sync_read(struct bio *bio, int error)
 {
-       r10bio_t *r10_bio = bio->bi_private;
-       conf_t *conf = r10_bio->mddev->private;
+       struct r10bio *r10_bio = bio->bi_private;
+       struct r10conf *conf = r10_bio->mddev->private;
        int d;
 
-       d = find_bio_disk(conf, r10_bio, bio);
+       d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
 
        if (test_bit(BIO_UPTODATE, &bio->bi_flags))
                set_bit(R10BIO_Uptodate, &r10_bio->state);
-       else {
+       else
+               /* The write handler will notice the lack of
+                * R10BIO_Uptodate and record any errors etc
+                */
                atomic_add(r10_bio->sectors,
                           &conf->mirrors[d].rdev->corrected_errors);
-               if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
-                       md_error(r10_bio->mddev,
-                                conf->mirrors[d].rdev);
-       }
 
        /* for reconstruct, we always reschedule after a read.
         * for resync, only after all reads
@@ -1326,35 +1700,73 @@ static void end_sync_read(struct bio *bio, int error)
        }
 }
 
-static void end_sync_write(struct bio *bio, int error)
+static void end_sync_request(struct r10bio *r10_bio)
 {
-       int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       r10bio_t *r10_bio = bio->bi_private;
-       mddev_t *mddev = r10_bio->mddev;
-       conf_t *conf = mddev->private;
-       int d;
-
-       d = find_bio_disk(conf, r10_bio, bio);
-
-       if (!uptodate)
-               md_error(mddev, conf->mirrors[d].rdev);
+       struct mddev *mddev = r10_bio->mddev;
 
-       rdev_dec_pending(conf->mirrors[d].rdev, mddev);
        while (atomic_dec_and_test(&r10_bio->remaining)) {
                if (r10_bio->master_bio == NULL) {
                        /* the primary of several recovery bios */
                        sector_t s = r10_bio->sectors;
-                       put_buf(r10_bio);
+                       if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+                           test_bit(R10BIO_WriteError, &r10_bio->state))
+                               reschedule_retry(r10_bio);
+                       else
+                               put_buf(r10_bio);
                        md_done_sync(mddev, s, 1);
                        break;
                } else {
-                       r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
-                       put_buf(r10_bio);
+                       struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
+                       if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+                           test_bit(R10BIO_WriteError, &r10_bio->state))
+                               reschedule_retry(r10_bio);
+                       else
+                               put_buf(r10_bio);
                        r10_bio = r10_bio2;
                }
        }
 }
 
+static void end_sync_write(struct bio *bio, int error)
+{
+       int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+       struct r10bio *r10_bio = bio->bi_private;
+       struct mddev *mddev = r10_bio->mddev;
+       struct r10conf *conf = mddev->private;
+       int d;
+       sector_t first_bad;
+       int bad_sectors;
+       int slot;
+       int repl;
+       struct md_rdev *rdev = NULL;
+
+       d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+       if (repl)
+               rdev = conf->mirrors[d].replacement;
+       else
+               rdev = conf->mirrors[d].rdev;
+
+       if (!uptodate) {
+               if (repl)
+                       md_error(mddev, rdev);
+               else {
+                       set_bit(WriteErrorSeen, &rdev->flags);
+                       if (!test_and_set_bit(WantReplacement, &rdev->flags))
+                               set_bit(MD_RECOVERY_NEEDED,
+                                       &rdev->mddev->recovery);
+                       set_bit(R10BIO_WriteError, &r10_bio->state);
+               }
+       } else if (is_badblock(rdev,
+                            r10_bio->devs[slot].addr,
+                            r10_bio->sectors,
+                            &first_bad, &bad_sectors))
+               set_bit(R10BIO_MadeGood, &r10_bio->state);
+
+       rdev_dec_pending(rdev, mddev);
+
+       end_sync_request(r10_bio);
+}
+
 /*
  * Note: sync and recover and handled very differently for raid10
  * This code is for resync.
@@ -1371,11 +1783,12 @@ static void end_sync_write(struct bio *bio, int error)
  * We check if all blocks are in-sync and only write to blocks that
  * aren't in sync
  */
-static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
+static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 {
-       conf_t *conf = mddev->private;
+       struct r10conf *conf = mddev->private;
        int i, first;
        struct bio *tbio, *fbio;
+       int vcnt;
 
        atomic_set(&r10_bio->remaining, 1);
 
@@ -1390,10 +1803,10 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
        first = i;
        fbio = r10_bio->devs[i].bio;
 
+       vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
        /* now find blocks with errors */
        for (i=0 ; i < conf->copies ; i++) {
                int  j, d;
-               int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
 
                tbio = r10_bio->devs[i].bio;
 
@@ -1409,16 +1822,17 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
                        for (j = 0; j < vcnt; j++)
                                if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
                                           page_address(tbio->bi_io_vec[j].bv_page),
-                                          PAGE_SIZE))
+                                          fbio->bi_io_vec[j].bv_len))
                                        break;
                        if (j == vcnt)
                                continue;
                        mddev->resync_mismatches += r10_bio->sectors;
+                       if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+                               /* Don't fix anything. */
+                               continue;
                }
-               if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
-                       /* Don't fix anything. */
-                       continue;
-               /* Ok, we need to write this bio
+               /* Ok, we need to write this bio, either to correct an
+                * inconsistency or to correct an unreadable block.
                 * First we need to fixup bv_offset, bv_len and
                 * bi_vecs, as the read request might have corrupted these
                 */
@@ -1453,6 +1867,28 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
                generic_make_request(tbio);
        }
 
+       /* Now write out to any replacement devices
+        * that are active
+        */
+       for (i = 0; i < conf->copies; i++) {
+               int j, d;
+
+               tbio = r10_bio->devs[i].repl_bio;
+               if (!tbio || !tbio->bi_end_io)
+                       continue;
+               if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
+                   && r10_bio->devs[i].bio != fbio)
+                       for (j = 0; j < vcnt; j++)
+                               memcpy(page_address(tbio->bi_io_vec[j].bv_page),
+                                      page_address(fbio->bi_io_vec[j].bv_page),
+                                      PAGE_SIZE);
+               d = r10_bio->devs[i].devnum;
+               atomic_inc(&r10_bio->remaining);
+               md_sync_acct(conf->mirrors[d].replacement->bdev,
+                            tbio->bi_size >> 9);
+               generic_make_request(tbio);
+       }
+
 done:
        if (atomic_dec_and_test(&r10_bio->remaining)) {
                md_done_sync(mddev, r10_bio->sectors, 1);
@@ -1470,31 +1906,119 @@ done:
  * The second for writing.
  *
  */
+static void fix_recovery_read_error(struct r10bio *r10_bio)
+{
+       /* We got a read error during recovery.
+        * We repeat the read in smaller page-sized sections.
+        * If a read succeeds, write it to the new device or record
+        * a bad block if we cannot.
+        * If a read fails, record a bad block on both old and
+        * new devices.
+        */
+       struct mddev *mddev = r10_bio->mddev;
+       struct r10conf *conf = mddev->private;
+       struct bio *bio = r10_bio->devs[0].bio;
+       sector_t sect = 0;
+       int sectors = r10_bio->sectors;
+       int idx = 0;
+       int dr = r10_bio->devs[0].devnum;
+       int dw = r10_bio->devs[1].devnum;
+
+       while (sectors) {
+               int s = sectors;
+               struct md_rdev *rdev;
+               sector_t addr;
+               int ok;
 
-static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
+               if (s > (PAGE_SIZE>>9))
+                       s = PAGE_SIZE >> 9;
+
+               rdev = conf->mirrors[dr].rdev;
+               addr = r10_bio->devs[0].addr + sect,
+               ok = sync_page_io(rdev,
+                                 addr,
+                                 s << 9,
+                                 bio->bi_io_vec[idx].bv_page,
+                                 READ, false);
+               if (ok) {
+                       rdev = conf->mirrors[dw].rdev;
+                       addr = r10_bio->devs[1].addr + sect;
+                       ok = sync_page_io(rdev,
+                                         addr,
+                                         s << 9,
+                                         bio->bi_io_vec[idx].bv_page,
+                                         WRITE, false);
+                       if (!ok) {
+                               set_bit(WriteErrorSeen, &rdev->flags);
+                               if (!test_and_set_bit(WantReplacement,
+                                                     &rdev->flags))
+                                       set_bit(MD_RECOVERY_NEEDED,
+                                               &rdev->mddev->recovery);
+                       }
+               }
+               if (!ok) {
+                       /* We don't worry if we cannot set a bad block -
+                        * it really is bad so there is no loss in not
+                        * recording it yet
+                        */
+                       rdev_set_badblocks(rdev, addr, s, 0);
+
+                       if (rdev != conf->mirrors[dw].rdev) {
+                               /* need bad block on destination too */
+                               struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
+                               addr = r10_bio->devs[1].addr + sect;
+                               ok = rdev_set_badblocks(rdev2, addr, s, 0);
+                               if (!ok) {
+                                       /* just abort the recovery */
+                                       printk(KERN_NOTICE
+                                              "md/raid10:%s: recovery aborted"
+                                              " due to read error\n",
+                                              mdname(mddev));
+
+                                       conf->mirrors[dw].recovery_disabled
+                                               = mddev->recovery_disabled;
+                                       set_bit(MD_RECOVERY_INTR,
+                                               &mddev->recovery);
+                                       break;
+                               }
+                       }
+               }
+
+               sectors -= s;
+               sect += s;
+               idx++;
+       }
+}
+
+static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 {
-       conf_t *conf = mddev->private;
+       struct r10conf *conf = mddev->private;
        int d;
-       struct bio *wbio;
+       struct bio *wbio, *wbio2;
+
+       if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
+               fix_recovery_read_error(r10_bio);
+               end_sync_request(r10_bio);
+               return;
+       }
 
        /*
         * share the pages with the first bio
         * and submit the write request
         */
-       wbio = r10_bio->devs[1].bio;
        d = r10_bio->devs[1].devnum;
-
-       atomic_inc(&conf->mirrors[d].rdev->nr_pending);
-       md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
-       if (test_bit(R10BIO_Uptodate, &r10_bio->state))
+       wbio = r10_bio->devs[1].bio;
+       wbio2 = r10_bio->devs[1].repl_bio;
+       if (wbio->bi_end_io) {
+               atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+               md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
                generic_make_request(wbio);
-       else {
-               printk(KERN_NOTICE
-                      "md/raid10:%s: recovery aborted due to read error\n",
-                      mdname(mddev));
-               conf->mirrors[d].recovery_disabled = mddev->recovery_disabled;
-               set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-               bio_endio(wbio, 0);
+       }
+       if (wbio2 && wbio2->bi_end_io) {
+               atomic_inc(&conf->mirrors[d].replacement->nr_pending);
+               md_sync_acct(conf->mirrors[d].replacement->bdev,
+                            wbio2->bi_size >> 9);
+               generic_make_request(wbio2);
        }
 }
 
@@ -1505,7 +2029,7 @@ static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
  * since the last recorded read error.
  *
  */
-static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
+static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
 {
        struct timespec cur_time_mon;
        unsigned long hours_since_last;
@@ -1536,6 +2060,30 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
                atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
 }
 
+static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
+                           int sectors, struct page *page, int rw)
+{
+       sector_t first_bad;
+       int bad_sectors;
+
+       if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
+           && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
+               return -1;
+       if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
+               /* success */
+               return 1;
+       if (rw == WRITE) {
+               set_bit(WriteErrorSeen, &rdev->flags);
+               if (!test_and_set_bit(WantReplacement, &rdev->flags))
+                       set_bit(MD_RECOVERY_NEEDED,
+                               &rdev->mddev->recovery);
+       }
+       /* need to record an error - either for the block or the device */
+       if (!rdev_set_badblocks(rdev, sector, sectors, 0))
+               md_error(rdev->mddev, rdev);
+       return 0;
+}
+
 /*
  * This is a kernel thread which:
  *
@@ -1544,11 +2092,11 @@ static void check_decay_read_errors(mddev_t *mddev, mdk_rdev_t *rdev)
  *     3.      Performs writes following reads for array synchronising.
  */
 
-static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
+static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
 {
        int sect = 0; /* Offset from r10_bio->sector */
        int sectors = r10_bio->sectors;
-       mdk_rdev_t*rdev;
+       struct md_rdev*rdev;
        int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
        int d = r10_bio->devs[r10_bio->read_slot].devnum;
 
@@ -1577,6 +2125,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                       "md/raid10:%s: %s: Failing raid device\n",
                       mdname(mddev), b);
                md_error(mddev, conf->mirrors[d].rdev);
+               r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
                return;
        }
 
@@ -1597,6 +2146,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                        d = r10_bio->devs[sl].devnum;
                        rdev = rcu_dereference(conf->mirrors[d].rdev);
                        if (rdev &&
+                           !test_bit(Unmerged, &rdev->flags) &&
                            test_bit(In_sync, &rdev->flags) &&
                            is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
                                        &first_bad, &bad_sectors) == 0) {
@@ -1619,9 +2169,22 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                rcu_read_unlock();
 
                if (!success) {
-                       /* Cannot read from anywhere -- bye bye array */
+                       /* Cannot read from anywhere, just mark the block
+                        * as bad on the first device to discourage future
+                        * reads.
+                        */
                        int dn = r10_bio->devs[r10_bio->read_slot].devnum;
-                       md_error(mddev, conf->mirrors[dn].rdev);
+                       rdev = conf->mirrors[dn].rdev;
+
+                       if (!rdev_set_badblocks(
+                                   rdev,
+                                   r10_bio->devs[r10_bio->read_slot].addr
+                                   + sect,
+                                   s, 0)) {
+                               md_error(mddev, rdev);
+                               r10_bio->devs[r10_bio->read_slot].bio
+                                       = IO_BLOCKED;
+                       }
                        break;
                }
 
@@ -1637,15 +2200,16 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                        d = r10_bio->devs[sl].devnum;
                        rdev = rcu_dereference(conf->mirrors[d].rdev);
                        if (!rdev ||
+                           test_bit(Unmerged, &rdev->flags) ||
                            !test_bit(In_sync, &rdev->flags))
                                continue;
 
                        atomic_inc(&rdev->nr_pending);
                        rcu_read_unlock();
-                       if (sync_page_io(rdev,
-                                        r10_bio->devs[sl].addr +
-                                        sect,
-                                        s<<9, conf->tmppage, WRITE, false)
+                       if (r10_sync_page_io(rdev,
+                                            r10_bio->devs[sl].addr +
+                                            sect,
+                                            s<<9, conf->tmppage, WRITE)
                            == 0) {
                                /* Well, this device is dead */
                                printk(KERN_NOTICE
@@ -1660,7 +2224,6 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                                       "drive\n",
                                       mdname(mddev),
                                       bdevname(rdev->bdev, b));
-                               md_error(mddev, rdev);
                        }
                        rdev_dec_pending(rdev, mddev);
                        rcu_read_lock();
@@ -1680,11 +2243,12 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
 
                        atomic_inc(&rdev->nr_pending);
                        rcu_read_unlock();
-                       if (sync_page_io(rdev,
-                                        r10_bio->devs[sl].addr +
-                                        sect,
-                                        s<<9, conf->tmppage,
-                                        READ, false) == 0) {
+                       switch (r10_sync_page_io(rdev,
+                                            r10_bio->devs[sl].addr +
+                                            sect,
+                                            s<<9, conf->tmppage,
+                                                READ)) {
+                       case 0:
                                /* Well, this device is dead */
                                printk(KERN_NOTICE
                                       "md/raid10:%s: unable to read back "
@@ -1698,9 +2262,8 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
                                       "drive\n",
                                       mdname(mddev),
                                       bdevname(rdev->bdev, b));
-
-                               md_error(mddev, rdev);
-                       } else {
+                               break;
+                       case 1:
                                printk(KERN_INFO
                                       "md/raid10:%s: read error corrected"
                                       " (%d sectors at %llu on %s)\n",
@@ -1721,13 +2284,88 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
        }
 }
 
-static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
+static void bi_complete(struct bio *bio, int error)
+{
+       complete((struct completion *)bio->bi_private);
+}
+
+static int submit_bio_wait(int rw, struct bio *bio)
+{
+       struct completion event;
+       rw |= REQ_SYNC;
+
+       init_completion(&event);
+       bio->bi_private = &event;
+       bio->bi_end_io = bi_complete;
+       submit_bio(rw, bio);
+       wait_for_completion(&event);
+
+       return test_bit(BIO_UPTODATE, &bio->bi_flags);
+}
+
+static int narrow_write_error(struct r10bio *r10_bio, int i)
+{
+       struct bio *bio = r10_bio->master_bio;
+       struct mddev *mddev = r10_bio->mddev;
+       struct r10conf *conf = mddev->private;
+       struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
+       /* bio has the data to be written to slot 'i' where
+        * we just recently had a write error.
+        * We repeatedly clone the bio and trim down to one block,
+        * then try the write.  Where the write fails we record
+        * a bad block.
+        * It is conceivable that the bio doesn't exactly align with
+        * blocks.  We must handle this.
+        *
+        * We currently own a reference to the rdev.
+        */
+
+       int block_sectors;
+       sector_t sector;
+       int sectors;
+       int sect_to_write = r10_bio->sectors;
+       int ok = 1;
+
+       if (rdev->badblocks.shift < 0)
+               return 0;
+
+       block_sectors = 1 << rdev->badblocks.shift;
+       sector = r10_bio->sector;
+       sectors = ((r10_bio->sector + block_sectors)
+                  & ~(sector_t)(block_sectors - 1))
+               - sector;
+
+       while (sect_to_write) {
+               struct bio *wbio;
+               if (sectors > sect_to_write)
+                       sectors = sect_to_write;
+               /* Write at 'sector' for 'sectors' */
+               wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+               md_trim_bio(wbio, sector - bio->bi_sector, sectors);
+               wbio->bi_sector = (r10_bio->devs[i].addr+
+                                  rdev->data_offset+
+                                  (sector - r10_bio->sector));
+               wbio->bi_bdev = rdev->bdev;
+               if (submit_bio_wait(WRITE, wbio) == 0)
+                       /* Failure! */
+                       ok = rdev_set_badblocks(rdev, sector,
+                                               sectors, 0)
+                               && ok;
+
+               bio_put(wbio);
+               sect_to_write -= sectors;
+               sector += sectors;
+               sectors = block_sectors;
+       }
+       return ok;
+}
+
+static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
 {
        int slot = r10_bio->read_slot;
-       int mirror = r10_bio->devs[slot].devnum;
        struct bio *bio;
-       conf_t *conf = mddev->private;
-       mdk_rdev_t *rdev;
+       struct r10conf *conf = mddev->private;
+       struct md_rdev *rdev = r10_bio->devs[slot].rdev;
        char b[BDEVNAME_SIZE];
        unsigned long do_sync;
        int max_sectors;
@@ -1740,34 +2378,33 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio)
         * This is all done synchronously while the array is
         * frozen.
         */
+       bio = r10_bio->devs[slot].bio;
+       bdevname(bio->bi_bdev, b);
+       bio_put(bio);
+       r10_bio->devs[slot].bio = NULL;
+
        if (mddev->ro == 0) {
                freeze_array(conf);
                fix_read_error(conf, mddev, r10_bio);
                unfreeze_array(conf);
-       }
-       rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
+       } else
+               r10_bio->devs[slot].bio = IO_BLOCKED;
+
+       rdev_dec_pending(rdev, mddev);
 
-       bio = r10_bio->devs[slot].bio;
-       bdevname(bio->bi_bdev, b);
-       r10_bio->devs[slot].bio =
-               mddev->ro ? IO_BLOCKED : NULL;
 read_more:
-       mirror = read_balance(conf, r10_bio, &max_sectors);
-       if (mirror == -1) {
+       rdev = read_balance(conf, r10_bio, &max_sectors);
+       if (rdev == NULL) {
                printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
                       " read error for block %llu\n",
                       mdname(mddev), b,
                       (unsigned long long)r10_bio->sector);
                raid_end_bio_io(r10_bio);
-               bio_put(bio);
                return;
        }
 
        do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
-       if (bio)
-               bio_put(bio);
        slot = r10_bio->read_slot;
-       rdev = conf->mirrors[mirror].rdev;
        printk_ratelimited(
                KERN_ERR
                "md/raid10:%s: %s: redirecting"
@@ -1781,6 +2418,7 @@ read_more:
                    r10_bio->sector - bio->bi_sector,
                    max_sectors);
        r10_bio->devs[slot].bio = bio;
+       r10_bio->devs[slot].rdev = rdev;
        bio->bi_sector = r10_bio->devs[slot].addr
                + rdev->data_offset;
        bio->bi_bdev = rdev->bdev;
@@ -1801,7 +2439,6 @@ read_more:
                        mbio->bi_phys_segments++;
                spin_unlock_irq(&conf->device_lock);
                generic_make_request(bio);
-               bio = NULL;
 
                r10_bio = mempool_alloc(conf->r10bio_pool,
                                        GFP_NOIO);
@@ -1820,11 +2457,97 @@ read_more:
                generic_make_request(bio);
 }
 
-static void raid10d(mddev_t *mddev)
+static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
+{
+       /* Some sort of write request has finished and it
+        * succeeded in writing where we thought there was a
+        * bad block.  So forget the bad block.
+        * Or possibly if failed and we need to record
+        * a bad block.
+        */
+       int m;
+       struct md_rdev *rdev;
+
+       if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
+           test_bit(R10BIO_IsRecover, &r10_bio->state)) {
+               for (m = 0; m < conf->copies; m++) {
+                       int dev = r10_bio->devs[m].devnum;
+                       rdev = conf->mirrors[dev].rdev;
+                       if (r10_bio->devs[m].bio == NULL)
+                               continue;
+                       if (test_bit(BIO_UPTODATE,
+                                    &r10_bio->devs[m].bio->bi_flags)) {
+                               rdev_clear_badblocks(
+                                       rdev,
+                                       r10_bio->devs[m].addr,
+                                       r10_bio->sectors);
+                       } else {
+                               if (!rdev_set_badblocks(
+                                           rdev,
+                                           r10_bio->devs[m].addr,
+                                           r10_bio->sectors, 0))
+                                       md_error(conf->mddev, rdev);
+                       }
+                       rdev = conf->mirrors[dev].replacement;
+                       if (r10_bio->devs[m].repl_bio == NULL)
+                               continue;
+                       if (test_bit(BIO_UPTODATE,
+                                    &r10_bio->devs[m].repl_bio->bi_flags)) {
+                               rdev_clear_badblocks(
+                                       rdev,
+                                       r10_bio->devs[m].addr,
+                                       r10_bio->sectors);
+                       } else {
+                               if (!rdev_set_badblocks(
+                                           rdev,
+                                           r10_bio->devs[m].addr,
+                                           r10_bio->sectors, 0))
+                                       md_error(conf->mddev, rdev);
+                       }
+               }
+               put_buf(r10_bio);
+       } else {
+               for (m = 0; m < conf->copies; m++) {
+                       int dev = r10_bio->devs[m].devnum;
+                       struct bio *bio = r10_bio->devs[m].bio;
+                       rdev = conf->mirrors[dev].rdev;
+                       if (bio == IO_MADE_GOOD) {
+                               rdev_clear_badblocks(
+                                       rdev,
+                                       r10_bio->devs[m].addr,
+                                       r10_bio->sectors);
+                               rdev_dec_pending(rdev, conf->mddev);
+                       } else if (bio != NULL &&
+                                  !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+                               if (!narrow_write_error(r10_bio, m)) {
+                                       md_error(conf->mddev, rdev);
+                                       set_bit(R10BIO_Degraded,
+                                               &r10_bio->state);
+                               }
+                               rdev_dec_pending(rdev, conf->mddev);
+                       }
+                       bio = r10_bio->devs[m].repl_bio;
+                       rdev = conf->mirrors[dev].replacement;
+                       if (rdev && bio == IO_MADE_GOOD) {
+                               rdev_clear_badblocks(
+                                       rdev,
+                                       r10_bio->devs[m].addr,
+                                       r10_bio->sectors);
+                               rdev_dec_pending(rdev, conf->mddev);
+                       }
+               }
+               if (test_bit(R10BIO_WriteError,
+                            &r10_bio->state))
+                       close_write(r10_bio);
+               raid_end_bio_io(r10_bio);
+       }
+}
+
+static void raid10d(struct mddev *mddev)
 {
-       r10bio_t *r10_bio;
+       struct r10bio *r10_bio;
        unsigned long flags;
-       conf_t *conf = mddev->private;
+       struct r10conf *conf = mddev->private;
        struct list_head *head = &conf->retry_list;
        struct blk_plug plug;
 
@@ -1840,14 +2563,17 @@ static void raid10d(mddev_t *mddev)
                        spin_unlock_irqrestore(&conf->device_lock, flags);
                        break;
                }
-               r10_bio = list_entry(head->prev, r10bio_t, retry_list);
+               r10_bio = list_entry(head->prev, struct r10bio, retry_list);
                list_del(head->prev);
                conf->nr_queued--;
                spin_unlock_irqrestore(&conf->device_lock, flags);
 
                mddev = r10_bio->mddev;
                conf = mddev->private;
-               if (test_bit(R10BIO_IsSync, &r10_bio->state))
+               if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
+                   test_bit(R10BIO_WriteError, &r10_bio->state))
+                       handle_write_completed(conf, r10_bio);
+               else if (test_bit(R10BIO_IsSync, &r10_bio->state))
                        sync_request_write(mddev, r10_bio);
                else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
                        recovery_request_write(mddev, r10_bio);
@@ -1869,12 +2595,17 @@ static void raid10d(mddev_t *mddev)
 }
 
 
-static int init_resync(conf_t *conf)
+static int init_resync(struct r10conf *conf)
 {
        int buffs;
+       int i;
 
        buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
        BUG_ON(conf->r10buf_pool);
+       conf->have_replacement = 0;
+       for (i = 0; i < conf->raid_disks; i++)
+               if (conf->mirrors[i].replacement)
+                       conf->have_replacement = 1;
        conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
        if (!conf->r10buf_pool)
                return -ENOMEM;
@@ -1914,11 +2645,11 @@ static int init_resync(conf_t *conf)
  *
  */
 
-static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
+static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
                             int *skipped, int go_faster)
 {
-       conf_t *conf = mddev->private;
-       r10bio_t *r10_bio;
+       struct r10conf *conf = mddev->private;
+       struct r10bio *r10_bio;
        struct bio *biolist = NULL, *bio;
        sector_t max_sector, nr_sectors;
        int i;
@@ -1955,9 +2686,22 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
                                bitmap_end_sync(mddev->bitmap, sect,
                                                &sync_blocks, 1);
                        }
-               } else /* completed sync */
+               } else {
+                       /* completed sync */
+                       if ((!mddev->bitmap || conf->fullsync)
+                           && conf->have_replacement
+                           && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+                               /* Completed a full sync so the replacements
+                                * are now fully recovered.
+                                */
+                               for (i = 0; i < conf->raid_disks; i++)
+                                       if (conf->mirrors[i].replacement)
+                                               conf->mirrors[i].replacement
+                                                       ->recovery_offset
+                                                       = MaxSector;
+                       }
                        conf->fullsync = 0;
-
+               }
                bitmap_close_sync(mddev->bitmap);
                close_sync(conf);
                *skipped = 1;
@@ -2005,31 +2749,39 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
        max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
        if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
                /* recovery... the complicated one */
-               int j, k;
+               int j;
                r10_bio = NULL;
 
                for (i=0 ; i<conf->raid_disks; i++) {
                        int still_degraded;
-                       r10bio_t *rb2;
+                       struct r10bio *rb2;
                        sector_t sect;
                        int must_sync;
-
-                       if (conf->mirrors[i].rdev == NULL ||
-                           test_bit(In_sync, &conf->mirrors[i].rdev->flags)) 
+                       int any_working;
+                       struct mirror_info *mirror = &conf->mirrors[i];
+
+                       if ((mirror->rdev == NULL ||
+                            test_bit(In_sync, &mirror->rdev->flags))
+                           &&
+                           (mirror->replacement == NULL ||
+                            test_bit(Faulty,
+                                     &mirror->replacement->flags)))
                                continue;
 
                        still_degraded = 0;
                        /* want to reconstruct this device */
                        rb2 = r10_bio;
                        sect = raid10_find_virt(conf, sector_nr, i);
-                       /* Unless we are doing a full sync, we only need
-                        * to recover the block if it is set in the bitmap
+                       /* Unless we are doing a full sync, or a replacement
+                        * we only need to recover the block if it is set in
+                        * the bitmap
                         */
                        must_sync = bitmap_start_sync(mddev->bitmap, sect,
                                                      &sync_blocks, 1);
                        if (sync_blocks < max_sync)
                                max_sync = sync_blocks;
                        if (!must_sync &&
+                           mirror->replacement == NULL &&
                            !conf->fullsync) {
                                /* yep, skip the sync_blocks here, but don't assume
                                 * that there will never be anything to do here
@@ -2064,15 +2816,19 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
                        must_sync = bitmap_start_sync(mddev->bitmap, sect,
                                                      &sync_blocks, still_degraded);
 
+                       any_working = 0;
                        for (j=0; j<conf->copies;j++) {
+                               int k;
                                int d = r10_bio->devs[j].devnum;
-                               mdk_rdev_t *rdev;
+                               sector_t from_addr, to_addr;
+                               struct md_rdev *rdev;
                                sector_t sector, first_bad;
                                int bad_sectors;
                                if (!conf->mirrors[d].rdev ||
                                    !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
                                        continue;
                                /* This is where we read from */
+                               any_working = 1;
                                rdev = conf->mirrors[d].rdev;
                                sector = r10_bio->devs[j].addr;
 
@@ -2094,50 +2850,108 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
                                bio->bi_private = r10_bio;
                                bio->bi_end_io = end_sync_read;
                                bio->bi_rw = READ;
-                               bio->bi_sector = r10_bio->devs[j].addr +
-                                       conf->mirrors[d].rdev->data_offset;
-                               bio->bi_bdev = conf->mirrors[d].rdev->bdev;
-                               atomic_inc(&conf->mirrors[d].rdev->nr_pending);
-                               atomic_inc(&r10_bio->remaining);
-                               /* and we write to 'i' */
+                               from_addr = r10_bio->devs[j].addr;
+                               bio->bi_sector = from_addr + rdev->data_offset;
+                               bio->bi_bdev = rdev->bdev;
+                               atomic_inc(&rdev->nr_pending);
+                               /* and we write to 'i' (if not in_sync) */
 
                                for (k=0; k<conf->copies; k++)
                                        if (r10_bio->devs[k].devnum == i)
                                                break;
                                BUG_ON(k == conf->copies);
-                               bio = r10_bio->devs[1].bio;
+                               to_addr = r10_bio->devs[k].addr;
+                               r10_bio->devs[0].devnum = d;
+                               r10_bio->devs[0].addr = from_addr;
+                               r10_bio->devs[1].devnum = i;
+                               r10_bio->devs[1].addr = to_addr;
+
+                               rdev = mirror->rdev;
+                               if (!test_bit(In_sync, &rdev->flags)) {
+                                       bio = r10_bio->devs[1].bio;
+                                       bio->bi_next = biolist;
+                                       biolist = bio;
+                                       bio->bi_private = r10_bio;
+                                       bio->bi_end_io = end_sync_write;
+                                       bio->bi_rw = WRITE;
+                                       bio->bi_sector = to_addr
+                                               + rdev->data_offset;
+                                       bio->bi_bdev = rdev->bdev;
+                                       atomic_inc(&r10_bio->remaining);
+                               } else
+                                       r10_bio->devs[1].bio->bi_end_io = NULL;
+
+                               /* and maybe write to replacement */
+                               bio = r10_bio->devs[1].repl_bio;
+                               if (bio)
+                                       bio->bi_end_io = NULL;
+                               rdev = mirror->replacement;
+                               /* Note: if rdev != NULL, then bio
+                                * cannot be NULL as r10buf_pool_alloc will
+                                * have allocated it.
+                                * So the second test here is pointless.
+                                * But it keeps semantic-checkers happy, and
+                                * this comment keeps human reviewers
+                                * happy.
+                                */
+                               if (rdev == NULL || bio == NULL ||
+                                   test_bit(Faulty, &rdev->flags))
+                                       break;
                                bio->bi_next = biolist;
                                biolist = bio;
                                bio->bi_private = r10_bio;
                                bio->bi_end_io = end_sync_write;
                                bio->bi_rw = WRITE;
-                               bio->bi_sector = r10_bio->devs[k].addr +
-                                       conf->mirrors[i].rdev->data_offset;
-                               bio->bi_bdev = conf->mirrors[i].rdev->bdev;
-
-                               r10_bio->devs[0].devnum = d;
-                               r10_bio->devs[1].devnum = i;
-
+                               bio->bi_sector = to_addr + rdev->data_offset;
+                               bio->bi_bdev = rdev->bdev;
+                               atomic_inc(&r10_bio->remaining);
                                break;
                        }
                        if (j == conf->copies) {
-                               /* Cannot recover, so abort the recovery */
+                               /* Cannot recover, so abort the recovery or
+                                * record a bad block */
                                put_buf(r10_bio);
                                if (rb2)
                                        atomic_dec(&rb2->remaining);
                                r10_bio = rb2;
-                               if (!test_and_set_bit(MD_RECOVERY_INTR,
-                                                     &mddev->recovery))
-                                       printk(KERN_INFO "md/raid10:%s: insufficient "
-                                              "working devices for recovery.\n",
-                                              mdname(mddev));
+                               if (any_working) {
+                                       /* problem is that there are bad blocks
+                                        * on other device(s)
+                                        */
+                                       int k;
+                                       for (k = 0; k < conf->copies; k++)
+                                               if (r10_bio->devs[k].devnum == i)
+                                                       break;
+                                       if (!test_bit(In_sync,
+                                                     &mirror->rdev->flags)
+                                           && !rdev_set_badblocks(
+                                                   mirror->rdev,
+                                                   r10_bio->devs[k].addr,
+                                                   max_sync, 0))
+                                               any_working = 0;
+                                       if (mirror->replacement &&
+                                           !rdev_set_badblocks(
+                                                   mirror->replacement,
+                                                   r10_bio->devs[k].addr,
+                                                   max_sync, 0))
+                                               any_working = 0;
+                               }
+                               if (!any_working)  {
+                                       if (!test_and_set_bit(MD_RECOVERY_INTR,
+                                                             &mddev->recovery))
+                                               printk(KERN_INFO "md/raid10:%s: insufficient "
+                                                      "working devices for recovery.\n",
+                                                      mdname(mddev));
+                                       mirror->recovery_disabled
+                                               = mddev->recovery_disabled;
+                               }
                                break;
                        }
                }
                if (biolist == NULL) {
                        while (r10_bio) {
-                               r10bio_t *rb2 = r10_bio;
-                               r10_bio = (r10bio_t*) rb2->master_bio;
+                               struct r10bio *rb2 = r10_bio;
+                               r10_bio = (struct r10bio*) rb2->master_bio;
                                rb2->master_bio = NULL;
                                put_buf(rb2);
                        }
@@ -2177,6 +2991,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
                        sector_t first_bad, sector;
                        int bad_sectors;
 
+                       if (r10_bio->devs[i].repl_bio)
+                               r10_bio->devs[i].repl_bio->bi_end_io = NULL;
+
                        bio = r10_bio->devs[i].bio;
                        bio->bi_end_io = NULL;
                        clear_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -2207,6 +3024,27 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
                                conf->mirrors[d].rdev->data_offset;
                        bio->bi_bdev = conf->mirrors[d].rdev->bdev;
                        count++;
+
+                       if (conf->mirrors[d].replacement == NULL ||
+                           test_bit(Faulty,
+                                    &conf->mirrors[d].replacement->flags))
+                               continue;
+
+                       /* Need to set up for writing to the replacement */
+                       bio = r10_bio->devs[i].repl_bio;
+                       clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+                       sector = r10_bio->devs[i].addr;
+                       atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+                       bio->bi_next = biolist;
+                       biolist = bio;
+                       bio->bi_private = r10_bio;
+                       bio->bi_end_io = end_sync_write;
+                       bio->bi_rw = WRITE;
+                       bio->bi_sector = sector +
+                               conf->mirrors[d].replacement->data_offset;
+                       bio->bi_bdev = conf->mirrors[d].replacement->bdev;
+                       count++;
                }
 
                if (count < 2) {
@@ -2215,6 +3053,11 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
                                if (r10_bio->devs[i].bio->bi_end_io)
                                        rdev_dec_pending(conf->mirrors[d].rdev,
                                                         mddev);
+                               if (r10_bio->devs[i].repl_bio &&
+                                   r10_bio->devs[i].repl_bio->bi_end_io)
+                                       rdev_dec_pending(
+                                               conf->mirrors[d].replacement,
+                                               mddev);
                        }
                        put_buf(r10_bio);
                        biolist = NULL;
@@ -2290,7 +3133,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
        return sectors_skipped + nr_sectors;
  giveup:
        /* There is nowhere to write, so all non-sync
-        * drives must be failed, so try the next chunk...
+        * drives must be failed or in resync, all drives
+        * have a bad block, so try the next chunk...
         */
        if (sector_nr + max_sync < max_sector)
                max_sector = sector_nr + max_sync;
@@ -2302,10 +3146,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr,
 }
 
 static sector_t
-raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
+raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
 {
        sector_t size;
-       conf_t *conf = mddev->private;
+       struct r10conf *conf = mddev->private;
 
        if (!raid_disks)
                raid_disks = conf->raid_disks;
@@ -2320,12 +3164,40 @@ raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
        return size << conf->chunk_shift;
 }
 
+static void calc_sectors(struct r10conf *conf, sector_t size)
+{
+       /* Calculate the number of sectors-per-device that will
+        * actually be used, and set conf->dev_sectors and
+        * conf->stride
+        */
+
+       size = size >> conf->chunk_shift;
+       sector_div(size, conf->far_copies);
+       size = size * conf->raid_disks;
+       sector_div(size, conf->near_copies);
+       /* 'size' is now the number of chunks in the array */
+       /* calculate "used chunks per device" */
+       size = size * conf->copies;
+
+       /* We need to round up when dividing by raid_disks to
+        * get the stride size.
+        */
+       size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks);
+
+       conf->dev_sectors = size << conf->chunk_shift;
 
-static conf_t *setup_conf(mddev_t *mddev)
+       if (conf->far_offset)
+               conf->stride = 1 << conf->chunk_shift;
+       else {
+               sector_div(size, conf->far_copies);
+               conf->stride = size << conf->chunk_shift;
+       }
+}
+
+static struct r10conf *setup_conf(struct mddev *mddev)
 {
-       conf_t *conf = NULL;
+       struct r10conf *conf = NULL;
        int nc, fc, fo;
-       sector_t stride, size;
        int err = -EINVAL;
 
        if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
@@ -2348,7 +3220,7 @@ static conf_t *setup_conf(mddev_t *mddev)
        }
 
        err = -ENOMEM;
-       conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
+       conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
        if (!conf)
                goto out;
 
@@ -2375,28 +3247,7 @@ static conf_t *setup_conf(mddev_t *mddev)
        if (!conf->r10bio_pool)
                goto out;
 
-       size = mddev->dev_sectors >> conf->chunk_shift;
-       sector_div(size, fc);
-       size = size * conf->raid_disks;
-       sector_div(size, nc);
-       /* 'size' is now the number of chunks in the array */
-       /* calculate "used chunks per device" in 'stride' */
-       stride = size * conf->copies;
-
-       /* We need to round up when dividing by raid_disks to
-        * get the stride size.
-        */
-       stride += conf->raid_disks - 1;
-       sector_div(stride, conf->raid_disks);
-
-       conf->dev_sectors = stride << conf->chunk_shift;
-
-       if (fo)
-               stride = 1;
-       else
-               sector_div(stride, fc);
-       conf->stride = stride << conf->chunk_shift;
-
+       calc_sectors(conf, mddev->dev_sectors);
 
        spin_lock_init(&conf->device_lock);
        INIT_LIST_HEAD(&conf->retry_list);
@@ -2424,12 +3275,12 @@ static conf_t *setup_conf(mddev_t *mddev)
        return ERR_PTR(err);
 }
 
-static int run(mddev_t *mddev)
+static int run(struct mddev *mddev)
 {
-       conf_t *conf;
+       struct r10conf *conf;
        int i, disk_idx, chunk_size;
-       mirror_info_t *disk;
-       mdk_rdev_t *rdev;
+       struct mirror_info *disk;
+       struct md_rdev *rdev;
        sector_t size;
 
        /*
@@ -2459,7 +3310,7 @@ static int run(mddev_t *mddev)
                blk_queue_io_opt(mddev->queue, chunk_size *
                                 (conf->raid_disks / conf->near_copies));
 
-       list_for_each_entry(rdev, &mddev->disks, same_set) {
+       rdev_for_each(rdev, mddev) {
 
                disk_idx = rdev->raid_disk;
                if (disk_idx >= conf->raid_disks
@@ -2467,18 +3318,18 @@ static int run(mddev_t *mddev)
                        continue;
                disk = conf->mirrors + disk_idx;
 
-               disk->rdev = rdev;
+               if (test_bit(Replacement, &rdev->flags)) {
+                       if (disk->replacement)
+                               goto out_free_conf;
+                       disk->replacement = rdev;
+               } else {
+                       if (disk->rdev)
+                               goto out_free_conf;
+                       disk->rdev = rdev;
+               }
+
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
-               /* as we don't honour merge_bvec_fn, we must never risk
-                * violating it, so limit max_segments to 1 lying
-                * within a single page.
-                */
-               if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
-                       blk_queue_max_segments(mddev->queue, 1);
-                       blk_queue_segment_boundary(mddev->queue,
-                                                  PAGE_CACHE_SIZE - 1);
-               }
 
                disk->head_position = 0;
        }
@@ -2494,6 +3345,13 @@ static int run(mddev_t *mddev)
 
                disk = conf->mirrors + i;
 
+               if (!disk->rdev && disk->replacement) {
+                       /* The replacement is all we have - use it */
+                       disk->rdev = disk->replacement;
+                       disk->replacement = NULL;
+                       clear_bit(Replacement, &disk->rdev->flags);
+               }
+
                if (!disk->rdev ||
                    !test_bit(In_sync, &disk->rdev->flags)) {
                        disk->head_position = 0;
@@ -2501,6 +3359,7 @@ static int run(mddev_t *mddev)
                        if (disk->rdev)
                                conf->fullsync = 1;
                }
+               disk->recovery_disabled = mddev->recovery_disabled - 1;
        }
 
        if (mddev->recovery_cp != MaxSector)
@@ -2534,8 +3393,7 @@ static int run(mddev_t *mddev)
                        mddev->queue->backing_dev_info.ra_pages = 2* stripe;
        }
 
-       if (conf->near_copies < conf->raid_disks)
-               blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
+       blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
 
        if (md_integrity_register(mddev))
                goto out_free_conf;
@@ -2543,7 +3401,7 @@ static int run(mddev_t *mddev)
        return 0;
 
 out_free_conf:
-       md_unregister_thread(mddev->thread);
+       md_unregister_thread(&mddev->thread);
        if (conf->r10bio_pool)
                mempool_destroy(conf->r10bio_pool);
        safe_put_page(conf->tmppage);
@@ -2554,15 +3412,14 @@ out:
        return -EIO;
 }
 
-static int stop(mddev_t *mddev)
+static int stop(struct mddev *mddev)
 {
-       conf_t *conf = mddev->private;
+       struct r10conf *conf = mddev->private;
 
        raise_barrier(conf, 0);
        lower_barrier(conf);
 
-       md_unregister_thread(mddev->thread);
-       mddev->thread = NULL;
+       md_unregister_thread(&mddev->thread);
        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
        if (conf->r10bio_pool)
                mempool_destroy(conf->r10bio_pool);
@@ -2572,9 +3429,9 @@ static int stop(mddev_t *mddev)
        return 0;
 }
 
-static void raid10_quiesce(mddev_t *mddev, int state)
+static void raid10_quiesce(struct mddev *mddev, int state)
 {
-       conf_t *conf = mddev->private;
+       struct r10conf *conf = mddev->private;
 
        switch(state) {
        case 1:
@@ -2586,10 +3443,48 @@ static void raid10_quiesce(mddev_t *mddev, int state)
        }
 }
 
-static void *raid10_takeover_raid0(mddev_t *mddev)
+static int raid10_resize(struct mddev *mddev, sector_t sectors)
 {
-       mdk_rdev_t *rdev;
-       conf_t *conf;
+       /* Resize of 'far' arrays is not supported.
+        * For 'near' and 'offset' arrays we can set the
+        * number of sectors used to be an appropriate multiple
+        * of the chunk size.
+        * For 'offset', this is far_copies*chunksize.
+        * For 'near' the multiplier is the LCM of
+        * near_copies and raid_disks.
+        * So if far_copies > 1 && !far_offset, fail.
+        * Else find LCM(raid_disks, near_copy)*far_copies and
+        * multiply by chunk_size.  Then round to this number.
+        * This is mostly done by raid10_size()
+        */
+       struct r10conf *conf = mddev->private;
+       sector_t oldsize, size;
+
+       if (conf->far_copies > 1 && !conf->far_offset)
+               return -EINVAL;
+
+       oldsize = raid10_size(mddev, 0, 0);
+       size = raid10_size(mddev, sectors, 0);
+       md_set_array_sectors(mddev, size);
+       if (mddev->array_sectors > size)
+               return -EINVAL;
+       set_capacity(mddev->gendisk, mddev->array_sectors);
+       revalidate_disk(mddev->gendisk);
+       if (sectors > mddev->dev_sectors &&
+           mddev->recovery_cp > oldsize) {
+               mddev->recovery_cp = oldsize;
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       }
+       calc_sectors(conf, sectors);
+       mddev->dev_sectors = conf->dev_sectors;
+       mddev->resync_max_sectors = size;
+       return 0;
+}
+
+static void *raid10_takeover_raid0(struct mddev *mddev)
+{
+       struct md_rdev *rdev;
+       struct r10conf *conf;
 
        if (mddev->degraded > 0) {
                printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
@@ -2609,7 +3504,7 @@ static void *raid10_takeover_raid0(mddev_t *mddev)
 
        conf = setup_conf(mddev);
        if (!IS_ERR(conf)) {
-               list_for_each_entry(rdev, &mddev->disks, same_set)
+               rdev_for_each(rdev, mddev)
                        if (rdev->raid_disk >= 0)
                                rdev->new_raid_disk = rdev->raid_disk * 2;
                conf->barrier = 1;
@@ -2618,17 +3513,17 @@ static void *raid10_takeover_raid0(mddev_t *mddev)
        return conf;
 }
 
-static void *raid10_takeover(mddev_t *mddev)
+static void *raid10_takeover(struct mddev *mddev)
 {
-       struct raid0_private_data *raid0_priv;
+       struct r0conf *raid0_conf;
 
        /* raid10 can take over:
         *  raid0 - providing it has only two drives
         */
        if (mddev->level == 0) {
                /* for raid0 takeover only one zone is supported */
-               raid0_priv = mddev->private;
-               if (raid0_priv->nr_strip_zones > 1) {
+               raid0_conf = mddev->private;
+               if (raid0_conf->nr_strip_zones > 1) {
                        printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
                               " with more than one zone.\n",
                               mdname(mddev));
@@ -2639,7 +3534,7 @@ static void *raid10_takeover(mddev_t *mddev)
        return ERR_PTR(-EINVAL);
 }
 
-static struct mdk_personality raid10_personality =
+static struct md_personality raid10_personality =
 {
        .name           = "raid10",
        .level          = 10,
@@ -2655,6 +3550,7 @@ static struct mdk_personality raid10_personality =
        .sync_request   = sync_request,
        .quiesce        = raid10_quiesce,
        .size           = raid10_size,
+       .resize         = raid10_resize,
        .takeover       = raid10_takeover,
 };
 
@@ -2675,3 +3571,5 @@ MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
 MODULE_ALIAS("md-personality-9"); /* RAID10 */
 MODULE_ALIAS("md-raid10");
 MODULE_ALIAS("md-level-10");
+
+module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);