Merge tag 'md-3.4-fixes' of git://neil.brown.name/md
[linux-flexiantxendom0-3.2.10.git] / drivers / md / raid10.c
index 52bb37d..3f91c2e 100644 (file)
@@ -586,25 +586,68 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
  *     @biovec: the request that could be merged to it.
  *
  *     Return amount of bytes we can accept at this offset
- *      If near_copies == raid_disk, there are no striping issues,
- *      but in that case, the function isn't called at all.
+ *     This requires checking for end-of-chunk if near_copies != raid_disks,
+ *     and for subordinate merge_bvec_fns if merge_check_needed.
  */
 static int raid10_mergeable_bvec(struct request_queue *q,
                                 struct bvec_merge_data *bvm,
                                 struct bio_vec *biovec)
 {
        struct mddev *mddev = q->queuedata;
+       struct r10conf *conf = mddev->private;
        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
        int max;
        unsigned int chunk_sectors = mddev->chunk_sectors;
        unsigned int bio_sectors = bvm->bi_size >> 9;
 
-       max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
-       if (max < 0) max = 0; /* bio_add cannot handle a negative return */
-       if (max <= biovec->bv_len && bio_sectors == 0)
-               return biovec->bv_len;
-       else
-               return max;
+       if (conf->near_copies < conf->raid_disks) {
+               max = (chunk_sectors - ((sector & (chunk_sectors - 1))
+                                       + bio_sectors)) << 9;
+               if (max < 0)
+                       /* bio_add cannot handle a negative return */
+                       max = 0;
+               if (max <= biovec->bv_len && bio_sectors == 0)
+                       return biovec->bv_len;
+       } else
+               max = biovec->bv_len;
+
+       if (mddev->merge_check_needed) {
+               struct r10bio r10_bio;
+               int s;
+               r10_bio.sector = sector;
+               raid10_find_phys(conf, &r10_bio);
+               rcu_read_lock();
+               for (s = 0; s < conf->copies; s++) {
+                       int disk = r10_bio.devs[s].devnum;
+                       struct md_rdev *rdev = rcu_dereference(
+                               conf->mirrors[disk].rdev);
+                       if (rdev && !test_bit(Faulty, &rdev->flags)) {
+                               struct request_queue *q =
+                                       bdev_get_queue(rdev->bdev);
+                               if (q->merge_bvec_fn) {
+                                       bvm->bi_sector = r10_bio.devs[s].addr
+                                               + rdev->data_offset;
+                                       bvm->bi_bdev = rdev->bdev;
+                                       max = min(max, q->merge_bvec_fn(
+                                                         q, bvm, biovec));
+                               }
+                       }
+                       rdev = rcu_dereference(conf->mirrors[disk].replacement);
+                       if (rdev && !test_bit(Faulty, &rdev->flags)) {
+                               struct request_queue *q =
+                                       bdev_get_queue(rdev->bdev);
+                               if (q->merge_bvec_fn) {
+                                       bvm->bi_sector = r10_bio.devs[s].addr
+                                               + rdev->data_offset;
+                                       bvm->bi_bdev = rdev->bdev;
+                                       max = min(max, q->merge_bvec_fn(
+                                                         q, bvm, biovec));
+                               }
+                       }
+               }
+               rcu_read_unlock();
+       }
+       return max;
 }
 
 /*
@@ -668,11 +711,12 @@ retry:
                disk = r10_bio->devs[slot].devnum;
                rdev = rcu_dereference(conf->mirrors[disk].replacement);
                if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
+                   test_bit(Unmerged, &rdev->flags) ||
                    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
                        rdev = rcu_dereference(conf->mirrors[disk].rdev);
-               if (rdev == NULL)
-                       continue;
-               if (test_bit(Faulty, &rdev->flags))
+               if (rdev == NULL ||
+                   test_bit(Faulty, &rdev->flags) ||
+                   test_bit(Unmerged, &rdev->flags))
                        continue;
                if (!test_bit(In_sync, &rdev->flags) &&
                    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
@@ -1134,12 +1178,14 @@ retry_write:
                        blocked_rdev = rrdev;
                        break;
                }
-               if (rrdev && test_bit(Faulty, &rrdev->flags))
+               if (rrdev && (test_bit(Faulty, &rrdev->flags)
+                             || test_bit(Unmerged, &rrdev->flags)))
                        rrdev = NULL;
 
                r10_bio->devs[i].bio = NULL;
                r10_bio->devs[i].repl_bio = NULL;
-               if (!rdev || test_bit(Faulty, &rdev->flags)) {
+               if (!rdev || test_bit(Faulty, &rdev->flags) ||
+                   test_bit(Unmerged, &rdev->flags)) {
                        set_bit(R10BIO_Degraded, &r10_bio->state);
                        continue;
                }
@@ -1490,6 +1536,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        int mirror;
        int first = 0;
        int last = conf->raid_disks - 1;
+       struct request_queue *q = bdev_get_queue(rdev->bdev);
 
        if (mddev->recovery_cp < MaxSector)
                /* only hot-add to in-sync arrays, as recovery is
@@ -1502,6 +1549,11 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        if (rdev->raid_disk >= 0)
                first = last = rdev->raid_disk;
 
+       if (q->merge_bvec_fn) {
+               set_bit(Unmerged, &rdev->flags);
+               mddev->merge_check_needed = 1;
+       }
+
        if (rdev->saved_raid_disk >= first &&
            conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
                mirror = rdev->saved_raid_disk;
@@ -1521,11 +1573,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        err = 0;
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
-                       if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
-                               blk_queue_max_segments(mddev->queue, 1);
-                               blk_queue_segment_boundary(mddev->queue,
-                                                          PAGE_CACHE_SIZE - 1);
-                       }
                        conf->fullsync = 1;
                        rcu_assign_pointer(p->replacement, rdev);
                        break;
@@ -1533,17 +1580,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
 
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
-               /* as we don't honour merge_bvec_fn, we must
-                * never risk violating it, so limit
-                * ->max_segments to one lying with a single
-                * page, as a one page request is never in
-                * violation.
-                */
-               if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
-                       blk_queue_max_segments(mddev->queue, 1);
-                       blk_queue_segment_boundary(mddev->queue,
-                                                  PAGE_CACHE_SIZE - 1);
-               }
 
                p->head_position = 0;
                p->recovery_disabled = mddev->recovery_disabled - 1;
@@ -1554,7 +1590,19 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                rcu_assign_pointer(p->rdev, rdev);
                break;
        }
-
+       if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
+               /* Some requests might not have seen this new
+                * merge_bvec_fn.  We must wait for them to complete
+                * before merging the device fully.
+                * First we make sure any code which has tested
+                * our function has submitted the request, then
+                * we wait for all outstanding requests to complete.
+                */
+               synchronize_sched();
+               raise_barrier(conf, 0);
+               lower_barrier(conf);
+               clear_bit(Unmerged, &rdev->flags);
+       }
        md_integrity_add_rdev(rdev, mddev);
        print_conf(conf);
        return err;
@@ -1740,6 +1788,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
        struct r10conf *conf = mddev->private;
        int i, first;
        struct bio *tbio, *fbio;
+       int vcnt;
 
        atomic_set(&r10_bio->remaining, 1);
 
@@ -1754,10 +1803,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
        first = i;
        fbio = r10_bio->devs[i].bio;
 
+       vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
        /* now find blocks with errors */
        for (i=0 ; i < conf->copies ; i++) {
                int  j, d;
-               int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
 
                tbio = r10_bio->devs[i].bio;
 
@@ -1773,7 +1822,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                        for (j = 0; j < vcnt; j++)
                                if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
                                           page_address(tbio->bi_io_vec[j].bv_page),
-                                          PAGE_SIZE))
+                                          fbio->bi_io_vec[j].bv_len))
                                        break;
                        if (j == vcnt)
                                continue;
@@ -1823,7 +1872,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
         */
        for (i = 0; i < conf->copies; i++) {
                int j, d;
-               int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
 
                tbio = r10_bio->devs[i].repl_bio;
                if (!tbio || !tbio->bi_end_io)
@@ -2098,6 +2146,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        d = r10_bio->devs[sl].devnum;
                        rdev = rcu_dereference(conf->mirrors[d].rdev);
                        if (rdev &&
+                           !test_bit(Unmerged, &rdev->flags) &&
                            test_bit(In_sync, &rdev->flags) &&
                            is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
                                        &first_bad, &bad_sectors) == 0) {
@@ -2151,6 +2200,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        d = r10_bio->devs[sl].devnum;
                        rdev = rcu_dereference(conf->mirrors[d].rdev);
                        if (!rdev ||
+                           test_bit(Unmerged, &rdev->flags) ||
                            !test_bit(In_sync, &rdev->flags))
                                continue;
 
@@ -3114,12 +3164,40 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
        return size << conf->chunk_shift;
 }
 
+static void calc_sectors(struct r10conf *conf, sector_t size)
+{
+       /* Calculate the number of sectors-per-device that will
+        * actually be used, and set conf->dev_sectors and
+        * conf->stride
+        */
+
+       size = size >> conf->chunk_shift;
+       sector_div(size, conf->far_copies);
+       size = size * conf->raid_disks;
+       sector_div(size, conf->near_copies);
+       /* 'size' is now the number of chunks in the array */
+       /* calculate "used chunks per device" */
+       size = size * conf->copies;
+
+       /* We need to round up when dividing by raid_disks to
+        * get the stride size.
+        */
+       size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks);
+
+       conf->dev_sectors = size << conf->chunk_shift;
+
+       if (conf->far_offset)
+               conf->stride = 1 << conf->chunk_shift;
+       else {
+               sector_div(size, conf->far_copies);
+               conf->stride = size << conf->chunk_shift;
+       }
+}
 
 static struct r10conf *setup_conf(struct mddev *mddev)
 {
        struct r10conf *conf = NULL;
        int nc, fc, fo;
-       sector_t stride, size;
        int err = -EINVAL;
 
        if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
@@ -3169,28 +3247,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
        if (!conf->r10bio_pool)
                goto out;
 
-       size = mddev->dev_sectors >> conf->chunk_shift;
-       sector_div(size, fc);
-       size = size * conf->raid_disks;
-       sector_div(size, nc);
-       /* 'size' is now the number of chunks in the array */
-       /* calculate "used chunks per device" in 'stride' */
-       stride = size * conf->copies;
-
-       /* We need to round up when dividing by raid_disks to
-        * get the stride size.
-        */
-       stride += conf->raid_disks - 1;
-       sector_div(stride, conf->raid_disks);
-
-       conf->dev_sectors = stride << conf->chunk_shift;
-
-       if (fo)
-               stride = 1;
-       else
-               sector_div(stride, fc);
-       conf->stride = stride << conf->chunk_shift;
-
+       calc_sectors(conf, mddev->dev_sectors);
 
        spin_lock_init(&conf->device_lock);
        INIT_LIST_HEAD(&conf->retry_list);
@@ -3273,15 +3330,6 @@ static int run(struct mddev *mddev)
 
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
-               /* as we don't honour merge_bvec_fn, we must never risk
-                * violating it, so limit max_segments to 1 lying
-                * within a single page.
-                */
-               if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
-                       blk_queue_max_segments(mddev->queue, 1);
-                       blk_queue_segment_boundary(mddev->queue,
-                                                  PAGE_CACHE_SIZE - 1);
-               }
 
                disk->head_position = 0;
        }
@@ -3345,8 +3393,7 @@ static int run(struct mddev *mddev)
                        mddev->queue->backing_dev_info.ra_pages = 2* stripe;
        }
 
-       if (conf->near_copies < conf->raid_disks)
-               blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
+       blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
 
        if (md_integrity_register(mddev))
                goto out_free_conf;
@@ -3396,6 +3443,44 @@ static void raid10_quiesce(struct mddev *mddev, int state)
        }
 }
 
+static int raid10_resize(struct mddev *mddev, sector_t sectors)
+{
+       /* Resize of 'far' arrays is not supported.
+        * For 'near' and 'offset' arrays we can set the
+        * number of sectors used to be an appropriate multiple
+        * of the chunk size.
+        * For 'offset', this is far_copies*chunksize.
+        * For 'near' the multiplier is the LCM of
+        * near_copies and raid_disks.
+        * So if far_copies > 1 && !far_offset, fail.
+        * Else find LCM(raid_disks, near_copy)*far_copies and
+        * multiply by chunk_size.  Then round to this number.
+        * This is mostly done by raid10_size()
+        */
+       struct r10conf *conf = mddev->private;
+       sector_t oldsize, size;
+
+       if (conf->far_copies > 1 && !conf->far_offset)
+               return -EINVAL;
+
+       oldsize = raid10_size(mddev, 0, 0);
+       size = raid10_size(mddev, sectors, 0);
+       md_set_array_sectors(mddev, size);
+       if (mddev->array_sectors > size)
+               return -EINVAL;
+       set_capacity(mddev->gendisk, mddev->array_sectors);
+       revalidate_disk(mddev->gendisk);
+       if (sectors > mddev->dev_sectors &&
+           mddev->recovery_cp > oldsize) {
+               mddev->recovery_cp = oldsize;
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       }
+       calc_sectors(conf, sectors);
+       mddev->dev_sectors = conf->dev_sectors;
+       mddev->resync_max_sectors = size;
+       return 0;
+}
+
 static void *raid10_takeover_raid0(struct mddev *mddev)
 {
        struct md_rdev *rdev;
@@ -3465,6 +3550,7 @@ static struct md_personality raid10_personality =
        .sync_request   = sync_request,
        .quiesce        = raid10_quiesce,
        .size           = raid10_size,
+       .resize         = raid10_resize,
        .takeover       = raid10_takeover,
 };