target: Fix bug in handling of FILEIO + block_device resize ops
[linux-flexiantxendom0-3.2.10.git] / drivers / md / bitmap.c
1 /*
2  * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
3  *
4  * bitmap_create  - sets up the bitmap structure
5  * bitmap_destroy - destroys the bitmap structure
6  *
7  * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
8  * - added disk storage for bitmap
9  * - changes to allow various bitmap chunk sizes
10  */
11
12 /*
13  * Still to do:
14  *
15  * flush after percent set rather than just time based. (maybe both).
16  */
17
18 #include <linux/blkdev.h>
19 #include <linux/module.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/timer.h>
24 #include <linux/sched.h>
25 #include <linux/list.h>
26 #include <linux/file.h>
27 #include <linux/mount.h>
28 #include <linux/buffer_head.h>
29 #include <linux/seq_file.h>
30 #include "md.h"
31 #include "bitmap.h"
32
33 static inline char *bmname(struct bitmap *bitmap)
34 {
35         return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
36 }
37
38 /*
39  * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
40  *
41  * 1) check to see if this page is allocated, if it's not then try to alloc
42  * 2) if the alloc fails, set the page's hijacked flag so we'll use the
43  *    page pointer directly as a counter
44  *
45  * if we find our page, we increment the page's refcount so that it stays
46  * allocated while we're using it
47  */
48 static int bitmap_checkpage(struct bitmap *bitmap,
49                             unsigned long page, int create)
50 __releases(bitmap->lock)
51 __acquires(bitmap->lock)
52 {
53         unsigned char *mappage;
54
55         if (page >= bitmap->pages) {
56                 /* This can happen if bitmap_start_sync goes beyond
57                  * End-of-device while looking for a whole page.
58                  * It is harmless.
59                  */
60                 return -EINVAL;
61         }
62
63         if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
64                 return 0;
65
66         if (bitmap->bp[page].map) /* page is already allocated, just return */
67                 return 0;
68
69         if (!create)
70                 return -ENOENT;
71
72         /* this page has not been allocated yet */
73
74         spin_unlock_irq(&bitmap->lock);
75         mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
76         spin_lock_irq(&bitmap->lock);
77
78         if (mappage == NULL) {
79                 pr_debug("%s: bitmap map page allocation failed, hijacking\n",
80                          bmname(bitmap));
81                 /* failed - set the hijacked flag so that we can use the
82                  * pointer as a counter */
83                 if (!bitmap->bp[page].map)
84                         bitmap->bp[page].hijacked = 1;
85         } else if (bitmap->bp[page].map ||
86                    bitmap->bp[page].hijacked) {
87                 /* somebody beat us to getting the page */
88                 kfree(mappage);
89                 return 0;
90         } else {
91
92                 /* no page was in place and we have one, so install it */
93
94                 bitmap->bp[page].map = mappage;
95                 bitmap->missing_pages--;
96         }
97         return 0;
98 }
99
100 /* if page is completely empty, put it back on the free list, or dealloc it */
101 /* if page was hijacked, unmark the flag so it might get alloced next time */
102 /* Note: lock should be held when calling this */
103 static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
104 {
105         char *ptr;
106
107         if (bitmap->bp[page].count) /* page is still busy */
108                 return;
109
110         /* page is no longer in use, it can be released */
111
112         if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
113                 bitmap->bp[page].hijacked = 0;
114                 bitmap->bp[page].map = NULL;
115         } else {
116                 /* normal case, free the page */
117                 ptr = bitmap->bp[page].map;
118                 bitmap->bp[page].map = NULL;
119                 bitmap->missing_pages++;
120                 kfree(ptr);
121         }
122 }
123
124 /*
125  * bitmap file handling - read and write the bitmap file and its superblock
126  */
127
128 /*
129  * basic page I/O operations
130  */
131
132 /* IO operations when bitmap is stored near all superblocks */
133 static struct page *read_sb_page(struct mddev *mddev, loff_t offset,
134                                  struct page *page,
135                                  unsigned long index, int size)
136 {
137         /* choose a good rdev and read the page from there */
138
139         struct md_rdev *rdev;
140         sector_t target;
141         int did_alloc = 0;
142
143         if (!page) {
144                 page = alloc_page(GFP_KERNEL);
145                 if (!page)
146                         return ERR_PTR(-ENOMEM);
147                 did_alloc = 1;
148         }
149
150         rdev_for_each(rdev, mddev) {
151                 if (! test_bit(In_sync, &rdev->flags)
152                     || test_bit(Faulty, &rdev->flags))
153                         continue;
154
155                 target = offset + index * (PAGE_SIZE/512);
156
157                 if (sync_page_io(rdev, target,
158                                  roundup(size, bdev_logical_block_size(rdev->bdev)),
159                                  page, READ, true)) {
160                         page->index = index;
161                         attach_page_buffers(page, NULL); /* so that free_buffer will
162                                                           * quietly no-op */
163                         return page;
164                 }
165         }
166         if (did_alloc)
167                 put_page(page);
168         return ERR_PTR(-EIO);
169
170 }
171
172 static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
173 {
174         /* Iterate the disks of an mddev, using rcu to protect access to the
175          * linked list, and raising the refcount of devices we return to ensure
176          * they don't disappear while in use.
177          * As devices are only added or removed when raid_disk is < 0 and
178          * nr_pending is 0 and In_sync is clear, the entries we return will
179          * still be in the same position on the list when we re-enter
180          * list_for_each_continue_rcu.
181          */
182         struct list_head *pos;
183         rcu_read_lock();
184         if (rdev == NULL)
185                 /* start at the beginning */
186                 pos = &mddev->disks;
187         else {
188                 /* release the previous rdev and start from there. */
189                 rdev_dec_pending(rdev, mddev);
190                 pos = &rdev->same_set;
191         }
192         list_for_each_continue_rcu(pos, &mddev->disks) {
193                 rdev = list_entry(pos, struct md_rdev, same_set);
194                 if (rdev->raid_disk >= 0 &&
195                     !test_bit(Faulty, &rdev->flags)) {
196                         /* this is a usable devices */
197                         atomic_inc(&rdev->nr_pending);
198                         rcu_read_unlock();
199                         return rdev;
200                 }
201         }
202         rcu_read_unlock();
203         return NULL;
204 }
205
206 static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
207 {
208         struct md_rdev *rdev = NULL;
209         struct block_device *bdev;
210         struct mddev *mddev = bitmap->mddev;
211
212         while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
213                 int size = PAGE_SIZE;
214                 loff_t offset = mddev->bitmap_info.offset;
215
216                 bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
217
218                 if (page->index == bitmap->file_pages-1)
219                         size = roundup(bitmap->last_page_size,
220                                        bdev_logical_block_size(bdev));
221                 /* Just make sure we aren't corrupting data or
222                  * metadata
223                  */
224                 if (mddev->external) {
225                         /* Bitmap could be anywhere. */
226                         if (rdev->sb_start + offset + (page->index
227                                                        * (PAGE_SIZE/512))
228                             > rdev->data_offset
229                             &&
230                             rdev->sb_start + offset
231                             < (rdev->data_offset + mddev->dev_sectors
232                              + (PAGE_SIZE/512)))
233                                 goto bad_alignment;
234                 } else if (offset < 0) {
235                         /* DATA  BITMAP METADATA  */
236                         if (offset
237                             + (long)(page->index * (PAGE_SIZE/512))
238                             + size/512 > 0)
239                                 /* bitmap runs in to metadata */
240                                 goto bad_alignment;
241                         if (rdev->data_offset + mddev->dev_sectors
242                             > rdev->sb_start + offset)
243                                 /* data runs in to bitmap */
244                                 goto bad_alignment;
245                 } else if (rdev->sb_start < rdev->data_offset) {
246                         /* METADATA BITMAP DATA */
247                         if (rdev->sb_start
248                             + offset
249                             + page->index*(PAGE_SIZE/512) + size/512
250                             > rdev->data_offset)
251                                 /* bitmap runs in to data */
252                                 goto bad_alignment;
253                 } else {
254                         /* DATA METADATA BITMAP - no problems */
255                 }
256                 md_super_write(mddev, rdev,
257                                rdev->sb_start + offset
258                                + page->index * (PAGE_SIZE/512),
259                                size,
260                                page);
261         }
262
263         if (wait)
264                 md_super_wait(mddev);
265         return 0;
266
267  bad_alignment:
268         return -EINVAL;
269 }
270
271 static void bitmap_file_kick(struct bitmap *bitmap);
272 /*
273  * write out a page to a file
274  */
275 static void write_page(struct bitmap *bitmap, struct page *page, int wait)
276 {
277         struct buffer_head *bh;
278
279         if (bitmap->file == NULL) {
280                 switch (write_sb_page(bitmap, page, wait)) {
281                 case -EINVAL:
282                         bitmap->flags |= BITMAP_WRITE_ERROR;
283                 }
284         } else {
285
286                 bh = page_buffers(page);
287
288                 while (bh && bh->b_blocknr) {
289                         atomic_inc(&bitmap->pending_writes);
290                         set_buffer_locked(bh);
291                         set_buffer_mapped(bh);
292                         submit_bh(WRITE | REQ_SYNC, bh);
293                         bh = bh->b_this_page;
294                 }
295
296                 if (wait)
297                         wait_event(bitmap->write_wait,
298                                    atomic_read(&bitmap->pending_writes)==0);
299         }
300         if (bitmap->flags & BITMAP_WRITE_ERROR)
301                 bitmap_file_kick(bitmap);
302 }
303
304 static void end_bitmap_write(struct buffer_head *bh, int uptodate)
305 {
306         struct bitmap *bitmap = bh->b_private;
307         unsigned long flags;
308
309         if (!uptodate) {
310                 spin_lock_irqsave(&bitmap->lock, flags);
311                 bitmap->flags |= BITMAP_WRITE_ERROR;
312                 spin_unlock_irqrestore(&bitmap->lock, flags);
313         }
314         if (atomic_dec_and_test(&bitmap->pending_writes))
315                 wake_up(&bitmap->write_wait);
316 }
317
318 /* copied from buffer.c */
319 static void
320 __clear_page_buffers(struct page *page)
321 {
322         ClearPagePrivate(page);
323         set_page_private(page, 0);
324         page_cache_release(page);
325 }
326 static void free_buffers(struct page *page)
327 {
328         struct buffer_head *bh = page_buffers(page);
329
330         while (bh) {
331                 struct buffer_head *next = bh->b_this_page;
332                 free_buffer_head(bh);
333                 bh = next;
334         }
335         __clear_page_buffers(page);
336         put_page(page);
337 }
338
339 /* read a page from a file.
340  * We both read the page, and attach buffers to the page to record the
341  * address of each block (using bmap).  These addresses will be used
342  * to write the block later, completely bypassing the filesystem.
343  * This usage is similar to how swap files are handled, and allows us
344  * to write to a file with no concerns of memory allocation failing.
345  */
346 static struct page *read_page(struct file *file, unsigned long index,
347                               struct bitmap *bitmap,
348                               unsigned long count)
349 {
350         struct page *page = NULL;
351         struct inode *inode = file->f_path.dentry->d_inode;
352         struct buffer_head *bh;
353         sector_t block;
354
355         pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
356                  (unsigned long long)index << PAGE_SHIFT);
357
358         page = alloc_page(GFP_KERNEL);
359         if (!page)
360                 page = ERR_PTR(-ENOMEM);
361         if (IS_ERR(page))
362                 goto out;
363
364         bh = alloc_page_buffers(page, 1<<inode->i_blkbits, 0);
365         if (!bh) {
366                 put_page(page);
367                 page = ERR_PTR(-ENOMEM);
368                 goto out;
369         }
370         attach_page_buffers(page, bh);
371         block = index << (PAGE_SHIFT - inode->i_blkbits);
372         while (bh) {
373                 if (count == 0)
374                         bh->b_blocknr = 0;
375                 else {
376                         bh->b_blocknr = bmap(inode, block);
377                         if (bh->b_blocknr == 0) {
378                                 /* Cannot use this file! */
379                                 free_buffers(page);
380                                 page = ERR_PTR(-EINVAL);
381                                 goto out;
382                         }
383                         bh->b_bdev = inode->i_sb->s_bdev;
384                         if (count < (1<<inode->i_blkbits))
385                                 count = 0;
386                         else
387                                 count -= (1<<inode->i_blkbits);
388
389                         bh->b_end_io = end_bitmap_write;
390                         bh->b_private = bitmap;
391                         atomic_inc(&bitmap->pending_writes);
392                         set_buffer_locked(bh);
393                         set_buffer_mapped(bh);
394                         submit_bh(READ, bh);
395                 }
396                 block++;
397                 bh = bh->b_this_page;
398         }
399         page->index = index;
400
401         wait_event(bitmap->write_wait,
402                    atomic_read(&bitmap->pending_writes)==0);
403         if (bitmap->flags & BITMAP_WRITE_ERROR) {
404                 free_buffers(page);
405                 page = ERR_PTR(-EIO);
406         }
407 out:
408         if (IS_ERR(page))
409                 printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %ld\n",
410                         (int)PAGE_SIZE,
411                         (unsigned long long)index << PAGE_SHIFT,
412                         PTR_ERR(page));
413         return page;
414 }
415
416 /*
417  * bitmap file superblock operations
418  */
419
420 /* update the event counter and sync the superblock to disk */
421 void bitmap_update_sb(struct bitmap *bitmap)
422 {
423         bitmap_super_t *sb;
424
425         if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
426                 return;
427         if (bitmap->mddev->bitmap_info.external)
428                 return;
429         if (!bitmap->sb_page) /* no superblock */
430                 return;
431         sb = kmap_atomic(bitmap->sb_page);
432         sb->events = cpu_to_le64(bitmap->mddev->events);
433         if (bitmap->mddev->events < bitmap->events_cleared)
434                 /* rocking back to read-only */
435                 bitmap->events_cleared = bitmap->mddev->events;
436         sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
437         sb->state = cpu_to_le32(bitmap->flags);
438         /* Just in case these have been changed via sysfs: */
439         sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
440         sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
441         kunmap_atomic(sb);
442         write_page(bitmap, bitmap->sb_page, 1);
443 }
444
445 /* print out the bitmap file superblock */
446 void bitmap_print_sb(struct bitmap *bitmap)
447 {
448         bitmap_super_t *sb;
449
450         if (!bitmap || !bitmap->sb_page)
451                 return;
452         sb = kmap_atomic(bitmap->sb_page);
453         printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
454         printk(KERN_DEBUG "         magic: %08x\n", le32_to_cpu(sb->magic));
455         printk(KERN_DEBUG "       version: %d\n", le32_to_cpu(sb->version));
456         printk(KERN_DEBUG "          uuid: %08x.%08x.%08x.%08x\n",
457                                         *(__u32 *)(sb->uuid+0),
458                                         *(__u32 *)(sb->uuid+4),
459                                         *(__u32 *)(sb->uuid+8),
460                                         *(__u32 *)(sb->uuid+12));
461         printk(KERN_DEBUG "        events: %llu\n",
462                         (unsigned long long) le64_to_cpu(sb->events));
463         printk(KERN_DEBUG "events cleared: %llu\n",
464                         (unsigned long long) le64_to_cpu(sb->events_cleared));
465         printk(KERN_DEBUG "         state: %08x\n", le32_to_cpu(sb->state));
466         printk(KERN_DEBUG "     chunksize: %d B\n", le32_to_cpu(sb->chunksize));
467         printk(KERN_DEBUG "  daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
468         printk(KERN_DEBUG "     sync size: %llu KB\n",
469                         (unsigned long long)le64_to_cpu(sb->sync_size)/2);
470         printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
471         kunmap_atomic(sb);
472 }
473
474 /*
475  * bitmap_new_disk_sb
476  * @bitmap
477  *
478  * This function is somewhat the reverse of bitmap_read_sb.  bitmap_read_sb
479  * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
480  * This function verifies 'bitmap_info' and populates the on-disk bitmap
481  * structure, which is to be written to disk.
482  *
483  * Returns: 0 on success, -Exxx on error
484  */
485 static int bitmap_new_disk_sb(struct bitmap *bitmap)
486 {
487         bitmap_super_t *sb;
488         unsigned long chunksize, daemon_sleep, write_behind;
489         int err = -EINVAL;
490
491         bitmap->sb_page = alloc_page(GFP_KERNEL);
492         if (IS_ERR(bitmap->sb_page)) {
493                 err = PTR_ERR(bitmap->sb_page);
494                 bitmap->sb_page = NULL;
495                 return err;
496         }
497         bitmap->sb_page->index = 0;
498
499         sb = kmap_atomic(bitmap->sb_page);
500
501         sb->magic = cpu_to_le32(BITMAP_MAGIC);
502         sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
503
504         chunksize = bitmap->mddev->bitmap_info.chunksize;
505         BUG_ON(!chunksize);
506         if (!is_power_of_2(chunksize)) {
507                 kunmap_atomic(sb);
508                 printk(KERN_ERR "bitmap chunksize not a power of 2\n");
509                 return -EINVAL;
510         }
511         sb->chunksize = cpu_to_le32(chunksize);
512
513         daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
514         if (!daemon_sleep ||
515             (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
516                 printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
517                 daemon_sleep = 5 * HZ;
518         }
519         sb->daemon_sleep = cpu_to_le32(daemon_sleep);
520         bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
521
522         /*
523          * FIXME: write_behind for RAID1.  If not specified, what
524          * is a good choice?  We choose COUNTER_MAX / 2 arbitrarily.
525          */
526         write_behind = bitmap->mddev->bitmap_info.max_write_behind;
527         if (write_behind > COUNTER_MAX)
528                 write_behind = COUNTER_MAX / 2;
529         sb->write_behind = cpu_to_le32(write_behind);
530         bitmap->mddev->bitmap_info.max_write_behind = write_behind;
531
532         /* keep the array size field of the bitmap superblock up to date */
533         sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
534
535         memcpy(sb->uuid, bitmap->mddev->uuid, 16);
536
537         bitmap->flags |= BITMAP_STALE;
538         sb->state |= cpu_to_le32(BITMAP_STALE);
539         bitmap->events_cleared = bitmap->mddev->events;
540         sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
541
542         bitmap->flags |= BITMAP_HOSTENDIAN;
543         sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
544
545         kunmap_atomic(sb);
546
547         return 0;
548 }
549
550 /* read the superblock from the bitmap file and initialize some bitmap fields */
551 static int bitmap_read_sb(struct bitmap *bitmap)
552 {
553         char *reason = NULL;
554         bitmap_super_t *sb;
555         unsigned long chunksize, daemon_sleep, write_behind;
556         unsigned long long events;
557         int err = -EINVAL;
558
559         /* page 0 is the superblock, read it... */
560         if (bitmap->file) {
561                 loff_t isize = i_size_read(bitmap->file->f_mapping->host);
562                 int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
563
564                 bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
565         } else {
566                 bitmap->sb_page = read_sb_page(bitmap->mddev,
567                                                bitmap->mddev->bitmap_info.offset,
568                                                NULL,
569                                                0, sizeof(bitmap_super_t));
570         }
571         if (IS_ERR(bitmap->sb_page)) {
572                 err = PTR_ERR(bitmap->sb_page);
573                 bitmap->sb_page = NULL;
574                 return err;
575         }
576
577         sb = kmap_atomic(bitmap->sb_page);
578
579         chunksize = le32_to_cpu(sb->chunksize);
580         daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
581         write_behind = le32_to_cpu(sb->write_behind);
582
583         /* verify that the bitmap-specific fields are valid */
584         if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
585                 reason = "bad magic";
586         else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
587                  le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
588                 reason = "unrecognized superblock version";
589         else if (chunksize < 512)
590                 reason = "bitmap chunksize too small";
591         else if (!is_power_of_2(chunksize))
592                 reason = "bitmap chunksize not a power of 2";
593         else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
594                 reason = "daemon sleep period out of range";
595         else if (write_behind > COUNTER_MAX)
596                 reason = "write-behind limit out of range (0 - 16383)";
597         if (reason) {
598                 printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
599                         bmname(bitmap), reason);
600                 goto out;
601         }
602
603         /* keep the array size field of the bitmap superblock up to date */
604         sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
605
606         if (bitmap->mddev->persistent) {
607                 /*
608                  * We have a persistent array superblock, so compare the
609                  * bitmap's UUID and event counter to the mddev's
610                  */
611                 if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
612                         printk(KERN_INFO
613                                "%s: bitmap superblock UUID mismatch\n",
614                                bmname(bitmap));
615                         goto out;
616                 }
617                 events = le64_to_cpu(sb->events);
618                 if (events < bitmap->mddev->events) {
619                         printk(KERN_INFO
620                                "%s: bitmap file is out of date (%llu < %llu) "
621                                "-- forcing full recovery\n",
622                                bmname(bitmap), events,
623                                (unsigned long long) bitmap->mddev->events);
624                         sb->state |= cpu_to_le32(BITMAP_STALE);
625                 }
626         }
627
628         /* assign fields using values from superblock */
629         bitmap->mddev->bitmap_info.chunksize = chunksize;
630         bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
631         bitmap->mddev->bitmap_info.max_write_behind = write_behind;
632         bitmap->flags |= le32_to_cpu(sb->state);
633         if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
634                 bitmap->flags |= BITMAP_HOSTENDIAN;
635         bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
636         if (bitmap->flags & BITMAP_STALE)
637                 bitmap->events_cleared = bitmap->mddev->events;
638         err = 0;
639 out:
640         kunmap_atomic(sb);
641         if (err)
642                 bitmap_print_sb(bitmap);
643         return err;
644 }
645
646 enum bitmap_mask_op {
647         MASK_SET,
648         MASK_UNSET
649 };
650
651 /* record the state of the bitmap in the superblock.  Return the old value */
652 static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
653                              enum bitmap_mask_op op)
654 {
655         bitmap_super_t *sb;
656         int old;
657
658         if (!bitmap->sb_page) /* can't set the state */
659                 return 0;
660         sb = kmap_atomic(bitmap->sb_page);
661         old = le32_to_cpu(sb->state) & bits;
662         switch (op) {
663         case MASK_SET:
664                 sb->state |= cpu_to_le32(bits);
665                 bitmap->flags |= bits;
666                 break;
667         case MASK_UNSET:
668                 sb->state &= cpu_to_le32(~bits);
669                 bitmap->flags &= ~bits;
670                 break;
671         default:
672                 BUG();
673         }
674         kunmap_atomic(sb);
675         return old;
676 }
677
678 /*
679  * general bitmap file operations
680  */
681
682 /*
683  * on-disk bitmap:
684  *
685  * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
686  * file a page at a time. There's a superblock at the start of the file.
687  */
688 /* calculate the index of the page that contains this bit */
689 static inline unsigned long file_page_index(struct bitmap *bitmap, unsigned long chunk)
690 {
691         if (!bitmap->mddev->bitmap_info.external)
692                 chunk += sizeof(bitmap_super_t) << 3;
693         return chunk >> PAGE_BIT_SHIFT;
694 }
695
696 /* calculate the (bit) offset of this bit within a page */
697 static inline unsigned long file_page_offset(struct bitmap *bitmap, unsigned long chunk)
698 {
699         if (!bitmap->mddev->bitmap_info.external)
700                 chunk += sizeof(bitmap_super_t) << 3;
701         return chunk & (PAGE_BITS - 1);
702 }
703
704 /*
705  * return a pointer to the page in the filemap that contains the given bit
706  *
707  * this lookup is complicated by the fact that the bitmap sb might be exactly
708  * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page
709  * 0 or page 1
710  */
711 static inline struct page *filemap_get_page(struct bitmap *bitmap,
712                                             unsigned long chunk)
713 {
714         if (file_page_index(bitmap, chunk) >= bitmap->file_pages)
715                 return NULL;
716         return bitmap->filemap[file_page_index(bitmap, chunk)
717                                - file_page_index(bitmap, 0)];
718 }
719
720 static void bitmap_file_unmap(struct bitmap *bitmap)
721 {
722         struct page **map, *sb_page;
723         unsigned long *attr;
724         int pages;
725         unsigned long flags;
726
727         spin_lock_irqsave(&bitmap->lock, flags);
728         map = bitmap->filemap;
729         bitmap->filemap = NULL;
730         attr = bitmap->filemap_attr;
731         bitmap->filemap_attr = NULL;
732         pages = bitmap->file_pages;
733         bitmap->file_pages = 0;
734         sb_page = bitmap->sb_page;
735         bitmap->sb_page = NULL;
736         spin_unlock_irqrestore(&bitmap->lock, flags);
737
738         while (pages--)
739                 if (map[pages] != sb_page) /* 0 is sb_page, release it below */
740                         free_buffers(map[pages]);
741         kfree(map);
742         kfree(attr);
743
744         if (sb_page)
745                 free_buffers(sb_page);
746 }
747
748 static void bitmap_file_put(struct bitmap *bitmap)
749 {
750         struct file *file;
751         unsigned long flags;
752
753         spin_lock_irqsave(&bitmap->lock, flags);
754         file = bitmap->file;
755         bitmap->file = NULL;
756         spin_unlock_irqrestore(&bitmap->lock, flags);
757
758         if (file)
759                 wait_event(bitmap->write_wait,
760                            atomic_read(&bitmap->pending_writes)==0);
761         bitmap_file_unmap(bitmap);
762
763         if (file) {
764                 struct inode *inode = file->f_path.dentry->d_inode;
765                 invalidate_mapping_pages(inode->i_mapping, 0, -1);
766                 fput(file);
767         }
768 }
769
770 /*
771  * bitmap_file_kick - if an error occurs while manipulating the bitmap file
772  * then it is no longer reliable, so we stop using it and we mark the file
773  * as failed in the superblock
774  */
775 static void bitmap_file_kick(struct bitmap *bitmap)
776 {
777         char *path, *ptr = NULL;
778
779         if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) {
780                 bitmap_update_sb(bitmap);
781
782                 if (bitmap->file) {
783                         path = kmalloc(PAGE_SIZE, GFP_KERNEL);
784                         if (path)
785                                 ptr = d_path(&bitmap->file->f_path, path,
786                                              PAGE_SIZE);
787
788                         printk(KERN_ALERT
789                               "%s: kicking failed bitmap file %s from array!\n",
790                               bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
791
792                         kfree(path);
793                 } else
794                         printk(KERN_ALERT
795                                "%s: disabling internal bitmap due to errors\n",
796                                bmname(bitmap));
797         }
798
799         bitmap_file_put(bitmap);
800
801         return;
802 }
803
804 enum bitmap_page_attr {
805         BITMAP_PAGE_DIRTY = 0,     /* there are set bits that need to be synced */
806         BITMAP_PAGE_PENDING = 1,   /* there are bits that are being cleaned.
807                                     * i.e. counter is 1 or 2. */
808         BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
809 };
810
811 static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
812                                 enum bitmap_page_attr attr)
813 {
814         __set_bit((page->index<<2) + attr, bitmap->filemap_attr);
815 }
816
817 static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
818                                 enum bitmap_page_attr attr)
819 {
820         __clear_bit((page->index<<2) + attr, bitmap->filemap_attr);
821 }
822
823 static inline unsigned long test_page_attr(struct bitmap *bitmap, struct page *page,
824                                            enum bitmap_page_attr attr)
825 {
826         return test_bit((page->index<<2) + attr, bitmap->filemap_attr);
827 }
828
829 /*
830  * bitmap_file_set_bit -- called before performing a write to the md device
831  * to set (and eventually sync) a particular bit in the bitmap file
832  *
833  * we set the bit immediately, then we record the page number so that
834  * when an unplug occurs, we can flush the dirty pages out to disk
835  */
836 static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
837 {
838         unsigned long bit;
839         struct page *page;
840         void *kaddr;
841         unsigned long chunk = block >> bitmap->chunkshift;
842
843         if (!bitmap->filemap)
844                 return;
845
846         page = filemap_get_page(bitmap, chunk);
847         if (!page)
848                 return;
849         bit = file_page_offset(bitmap, chunk);
850
851         /* set the bit */
852         kaddr = kmap_atomic(page);
853         if (bitmap->flags & BITMAP_HOSTENDIAN)
854                 set_bit(bit, kaddr);
855         else
856                 __set_bit_le(bit, kaddr);
857         kunmap_atomic(kaddr);
858         pr_debug("set file bit %lu page %lu\n", bit, page->index);
859         /* record page number so it gets flushed to disk when unplug occurs */
860         set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
861 }
862
863 /* this gets called when the md device is ready to unplug its underlying
864  * (slave) device queues -- before we let any writes go down, we need to
865  * sync the dirty pages of the bitmap file to disk */
866 void bitmap_unplug(struct bitmap *bitmap)
867 {
868         unsigned long i, flags;
869         int dirty, need_write;
870         struct page *page;
871         int wait = 0;
872
873         if (!bitmap)
874                 return;
875
876         /* look at each page to see if there are any set bits that need to be
877          * flushed out to disk */
878         for (i = 0; i < bitmap->file_pages; i++) {
879                 spin_lock_irqsave(&bitmap->lock, flags);
880                 if (!bitmap->filemap) {
881                         spin_unlock_irqrestore(&bitmap->lock, flags);
882                         return;
883                 }
884                 page = bitmap->filemap[i];
885                 dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
886                 need_write = test_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
887                 clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
888                 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
889                 if (dirty)
890                         wait = 1;
891                 spin_unlock_irqrestore(&bitmap->lock, flags);
892
893                 if (dirty || need_write)
894                         write_page(bitmap, page, 0);
895         }
896         if (wait) { /* if any writes were performed, we need to wait on them */
897                 if (bitmap->file)
898                         wait_event(bitmap->write_wait,
899                                    atomic_read(&bitmap->pending_writes)==0);
900                 else
901                         md_super_wait(bitmap->mddev);
902         }
903         if (bitmap->flags & BITMAP_WRITE_ERROR)
904                 bitmap_file_kick(bitmap);
905 }
906 EXPORT_SYMBOL(bitmap_unplug);
907
908 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
909 /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
910  * the in-memory bitmap from the on-disk bitmap -- also, sets up the
911  * memory mapping of the bitmap file
912  * Special cases:
913  *   if there's no bitmap file, or if the bitmap file had been
914  *   previously kicked from the array, we mark all the bits as
915  *   1's in order to cause a full resync.
916  *
917  * We ignore all bits for sectors that end earlier than 'start'.
918  * This is used when reading an out-of-date bitmap...
919  */
920 static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
921 {
922         unsigned long i, chunks, index, oldindex, bit;
923         struct page *page = NULL, *oldpage = NULL;
924         unsigned long num_pages, bit_cnt = 0;
925         struct file *file;
926         unsigned long bytes, offset;
927         int outofdate;
928         int ret = -ENOSPC;
929         void *paddr;
930
931         chunks = bitmap->chunks;
932         file = bitmap->file;
933
934         BUG_ON(!file && !bitmap->mddev->bitmap_info.offset);
935
936         outofdate = bitmap->flags & BITMAP_STALE;
937         if (outofdate)
938                 printk(KERN_INFO "%s: bitmap file is out of date, doing full "
939                         "recovery\n", bmname(bitmap));
940
941         bytes = DIV_ROUND_UP(bitmap->chunks, 8);
942         if (!bitmap->mddev->bitmap_info.external)
943                 bytes += sizeof(bitmap_super_t);
944
945         num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
946
947         if (file && i_size_read(file->f_mapping->host) < bytes) {
948                 printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
949                         bmname(bitmap),
950                         (unsigned long) i_size_read(file->f_mapping->host),
951                         bytes);
952                 goto err;
953         }
954
955         ret = -ENOMEM;
956
957         bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
958         if (!bitmap->filemap)
959                 goto err;
960
961         /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
962         bitmap->filemap_attr = kzalloc(
963                 roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
964                 GFP_KERNEL);
965         if (!bitmap->filemap_attr)
966                 goto err;
967
968         oldindex = ~0L;
969
970         for (i = 0; i < chunks; i++) {
971                 int b;
972                 index = file_page_index(bitmap, i);
973                 bit = file_page_offset(bitmap, i);
974                 if (index != oldindex) { /* this is a new page, read it in */
975                         int count;
976                         /* unmap the old page, we're done with it */
977                         if (index == num_pages-1)
978                                 count = bytes - index * PAGE_SIZE;
979                         else
980                                 count = PAGE_SIZE;
981                         if (index == 0 && bitmap->sb_page) {
982                                 /*
983                                  * if we're here then the superblock page
984                                  * contains some bits (PAGE_SIZE != sizeof sb)
985                                  * we've already read it in, so just use it
986                                  */
987                                 page = bitmap->sb_page;
988                                 offset = sizeof(bitmap_super_t);
989                                 if (!file)
990                                         page = read_sb_page(
991                                                 bitmap->mddev,
992                                                 bitmap->mddev->bitmap_info.offset,
993                                                 page,
994                                                 index, count);
995                         } else if (file) {
996                                 page = read_page(file, index, bitmap, count);
997                                 offset = 0;
998                         } else {
999                                 page = read_sb_page(bitmap->mddev,
1000                                                     bitmap->mddev->bitmap_info.offset,
1001                                                     NULL,
1002                                                     index, count);
1003                                 offset = 0;
1004                         }
1005                         if (IS_ERR(page)) { /* read error */
1006                                 ret = PTR_ERR(page);
1007                                 goto err;
1008                         }
1009
1010                         oldindex = index;
1011                         oldpage = page;
1012
1013                         bitmap->filemap[bitmap->file_pages++] = page;
1014                         bitmap->last_page_size = count;
1015
1016                         if (outofdate) {
1017                                 /*
1018                                  * if bitmap is out of date, dirty the
1019                                  * whole page and write it out
1020                                  */
1021                                 paddr = kmap_atomic(page);
1022                                 memset(paddr + offset, 0xff,
1023                                        PAGE_SIZE - offset);
1024                                 kunmap_atomic(paddr);
1025                                 write_page(bitmap, page, 1);
1026
1027                                 ret = -EIO;
1028                                 if (bitmap->flags & BITMAP_WRITE_ERROR)
1029                                         goto err;
1030                         }
1031                 }
1032                 paddr = kmap_atomic(page);
1033                 if (bitmap->flags & BITMAP_HOSTENDIAN)
1034                         b = test_bit(bit, paddr);
1035                 else
1036                         b = test_bit_le(bit, paddr);
1037                 kunmap_atomic(paddr);
1038                 if (b) {
1039                         /* if the disk bit is set, set the memory bit */
1040                         int needed = ((sector_t)(i+1) << bitmap->chunkshift
1041                                       >= start);
1042                         bitmap_set_memory_bits(bitmap,
1043                                                (sector_t)i << bitmap->chunkshift,
1044                                                needed);
1045                         bit_cnt++;
1046                 }
1047         }
1048
1049         /* everything went OK */
1050         ret = 0;
1051         bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
1052
1053         if (bit_cnt) { /* Kick recovery if any bits were set */
1054                 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
1055                 md_wakeup_thread(bitmap->mddev->thread);
1056         }
1057
1058         printk(KERN_INFO "%s: bitmap initialized from disk: "
1059                "read %lu/%lu pages, set %lu of %lu bits\n",
1060                bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks);
1061
1062         return 0;
1063
1064  err:
1065         printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
1066                bmname(bitmap), ret);
1067         return ret;
1068 }
1069
1070 void bitmap_write_all(struct bitmap *bitmap)
1071 {
1072         /* We don't actually write all bitmap blocks here,
1073          * just flag them as needing to be written
1074          */
1075         int i;
1076
1077         spin_lock_irq(&bitmap->lock);
1078         for (i = 0; i < bitmap->file_pages; i++)
1079                 set_page_attr(bitmap, bitmap->filemap[i],
1080                               BITMAP_PAGE_NEEDWRITE);
1081         bitmap->allclean = 0;
1082         spin_unlock_irq(&bitmap->lock);
1083 }
1084
1085 static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
1086 {
1087         sector_t chunk = offset >> bitmap->chunkshift;
1088         unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1089         bitmap->bp[page].count += inc;
1090         bitmap_checkfree(bitmap, page);
1091 }
1092 static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
1093                                             sector_t offset, sector_t *blocks,
1094                                             int create);
1095
1096 /*
1097  * bitmap daemon -- periodically wakes up to clean bits and flush pages
1098  *                      out to disk
1099  */
1100
1101 void bitmap_daemon_work(struct mddev *mddev)
1102 {
1103         struct bitmap *bitmap;
1104         unsigned long j;
1105         unsigned long flags;
1106         struct page *page = NULL, *lastpage = NULL;
1107         sector_t blocks;
1108         void *paddr;
1109
1110         /* Use a mutex to guard daemon_work against
1111          * bitmap_destroy.
1112          */
1113         mutex_lock(&mddev->bitmap_info.mutex);
1114         bitmap = mddev->bitmap;
1115         if (bitmap == NULL) {
1116                 mutex_unlock(&mddev->bitmap_info.mutex);
1117                 return;
1118         }
1119         if (time_before(jiffies, bitmap->daemon_lastrun
1120                         + mddev->bitmap_info.daemon_sleep))
1121                 goto done;
1122
1123         bitmap->daemon_lastrun = jiffies;
1124         if (bitmap->allclean) {
1125                 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1126                 goto done;
1127         }
1128         bitmap->allclean = 1;
1129
1130         spin_lock_irqsave(&bitmap->lock, flags);
1131         for (j = 0; j < bitmap->chunks; j++) {
1132                 bitmap_counter_t *bmc;
1133                 if (!bitmap->filemap)
1134                         /* error or shutdown */
1135                         break;
1136
1137                 page = filemap_get_page(bitmap, j);
1138
1139                 if (page != lastpage) {
1140                         /* skip this page unless it's marked as needing cleaning */
1141                         if (!test_page_attr(bitmap, page, BITMAP_PAGE_PENDING)) {
1142                                 int need_write = test_page_attr(bitmap, page,
1143                                                                 BITMAP_PAGE_NEEDWRITE);
1144                                 if (need_write)
1145                                         clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
1146
1147                                 spin_unlock_irqrestore(&bitmap->lock, flags);
1148                                 if (need_write)
1149                                         write_page(bitmap, page, 0);
1150                                 spin_lock_irqsave(&bitmap->lock, flags);
1151                                 j |= (PAGE_BITS - 1);
1152                                 continue;
1153                         }
1154
1155                         /* grab the new page, sync and release the old */
1156                         if (lastpage != NULL) {
1157                                 if (test_page_attr(bitmap, lastpage,
1158                                                    BITMAP_PAGE_NEEDWRITE)) {
1159                                         clear_page_attr(bitmap, lastpage,
1160                                                         BITMAP_PAGE_NEEDWRITE);
1161                                         spin_unlock_irqrestore(&bitmap->lock, flags);
1162                                         write_page(bitmap, lastpage, 0);
1163                                 } else {
1164                                         set_page_attr(bitmap, lastpage,
1165                                                       BITMAP_PAGE_NEEDWRITE);
1166                                         bitmap->allclean = 0;
1167                                         spin_unlock_irqrestore(&bitmap->lock, flags);
1168                                 }
1169                         } else
1170                                 spin_unlock_irqrestore(&bitmap->lock, flags);
1171                         lastpage = page;
1172
1173                         /* We are possibly going to clear some bits, so make
1174                          * sure that events_cleared is up-to-date.
1175                          */
1176                         if (bitmap->need_sync &&
1177                             mddev->bitmap_info.external == 0) {
1178                                 bitmap_super_t *sb;
1179                                 bitmap->need_sync = 0;
1180                                 sb = kmap_atomic(bitmap->sb_page);
1181                                 sb->events_cleared =
1182                                         cpu_to_le64(bitmap->events_cleared);
1183                                 kunmap_atomic(sb);
1184                                 write_page(bitmap, bitmap->sb_page, 1);
1185                         }
1186                         spin_lock_irqsave(&bitmap->lock, flags);
1187                         if (!bitmap->need_sync)
1188                                 clear_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
1189                         else
1190                                 bitmap->allclean = 0;
1191                 }
1192                 bmc = bitmap_get_counter(bitmap,
1193                                          (sector_t)j << bitmap->chunkshift,
1194                                          &blocks, 0);
1195                 if (!bmc)
1196                         j |= PAGE_COUNTER_MASK;
1197                 else if (*bmc) {
1198                         if (*bmc == 1 && !bitmap->need_sync) {
1199                                 /* we can clear the bit */
1200                                 *bmc = 0;
1201                                 bitmap_count_page(bitmap,
1202                                                   (sector_t)j << bitmap->chunkshift,
1203                                                   -1);
1204
1205                                 /* clear the bit */
1206                                 paddr = kmap_atomic(page);
1207                                 if (bitmap->flags & BITMAP_HOSTENDIAN)
1208                                         clear_bit(file_page_offset(bitmap, j),
1209                                                   paddr);
1210                                 else
1211                                         __clear_bit_le(
1212                                                 file_page_offset(bitmap,
1213                                                                  j),
1214                                                 paddr);
1215                                 kunmap_atomic(paddr);
1216                         } else if (*bmc <= 2) {
1217                                 *bmc = 1; /* maybe clear the bit next time */
1218                                 set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
1219                                 bitmap->allclean = 0;
1220                         }
1221                 }
1222         }
1223         spin_unlock_irqrestore(&bitmap->lock, flags);
1224
1225         /* now sync the final page */
1226         if (lastpage != NULL) {
1227                 spin_lock_irqsave(&bitmap->lock, flags);
1228                 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
1229                         clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1230                         spin_unlock_irqrestore(&bitmap->lock, flags);
1231                         write_page(bitmap, lastpage, 0);
1232                 } else {
1233                         set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1234                         bitmap->allclean = 0;
1235                         spin_unlock_irqrestore(&bitmap->lock, flags);
1236                 }
1237         }
1238
1239  done:
1240         if (bitmap->allclean == 0)
1241                 mddev->thread->timeout =
1242                         mddev->bitmap_info.daemon_sleep;
1243         mutex_unlock(&mddev->bitmap_info.mutex);
1244 }
1245
1246 static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
1247                                             sector_t offset, sector_t *blocks,
1248                                             int create)
1249 __releases(bitmap->lock)
1250 __acquires(bitmap->lock)
1251 {
1252         /* If 'create', we might release the lock and reclaim it.
1253          * The lock must have been taken with interrupts enabled.
1254          * If !create, we don't release the lock.
1255          */
1256         sector_t chunk = offset >> bitmap->chunkshift;
1257         unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1258         unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1259         sector_t csize;
1260         int err;
1261
1262         err = bitmap_checkpage(bitmap, page, create);
1263
1264         if (bitmap->bp[page].hijacked ||
1265             bitmap->bp[page].map == NULL)
1266                 csize = ((sector_t)1) << (bitmap->chunkshift +
1267                                           PAGE_COUNTER_SHIFT - 1);
1268         else
1269                 csize = ((sector_t)1) << bitmap->chunkshift;
1270         *blocks = csize - (offset & (csize - 1));
1271
1272         if (err < 0)
1273                 return NULL;
1274
1275         /* now locked ... */
1276
1277         if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1278                 /* should we use the first or second counter field
1279                  * of the hijacked pointer? */
1280                 int hi = (pageoff > PAGE_COUNTER_MASK);
1281                 return  &((bitmap_counter_t *)
1282                           &bitmap->bp[page].map)[hi];
1283         } else /* page is allocated */
1284                 return (bitmap_counter_t *)
1285                         &(bitmap->bp[page].map[pageoff]);
1286 }
1287
1288 int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1289 {
1290         if (!bitmap)
1291                 return 0;
1292
1293         if (behind) {
1294                 int bw;
1295                 atomic_inc(&bitmap->behind_writes);
1296                 bw = atomic_read(&bitmap->behind_writes);
1297                 if (bw > bitmap->behind_writes_used)
1298                         bitmap->behind_writes_used = bw;
1299
1300                 pr_debug("inc write-behind count %d/%lu\n",
1301                          bw, bitmap->mddev->bitmap_info.max_write_behind);
1302         }
1303
1304         while (sectors) {
1305                 sector_t blocks;
1306                 bitmap_counter_t *bmc;
1307
1308                 spin_lock_irq(&bitmap->lock);
1309                 bmc = bitmap_get_counter(bitmap, offset, &blocks, 1);
1310                 if (!bmc) {
1311                         spin_unlock_irq(&bitmap->lock);
1312                         return 0;
1313                 }
1314
1315                 if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1316                         DEFINE_WAIT(__wait);
1317                         /* note that it is safe to do the prepare_to_wait
1318                          * after the test as long as we do it before dropping
1319                          * the spinlock.
1320                          */
1321                         prepare_to_wait(&bitmap->overflow_wait, &__wait,
1322                                         TASK_UNINTERRUPTIBLE);
1323                         spin_unlock_irq(&bitmap->lock);
1324                         io_schedule();
1325                         finish_wait(&bitmap->overflow_wait, &__wait);
1326                         continue;
1327                 }
1328
1329                 switch (*bmc) {
1330                 case 0:
1331                         bitmap_file_set_bit(bitmap, offset);
1332                         bitmap_count_page(bitmap, offset, 1);
1333                         /* fall through */
1334                 case 1:
1335                         *bmc = 2;
1336                 }
1337
1338                 (*bmc)++;
1339
1340                 spin_unlock_irq(&bitmap->lock);
1341
1342                 offset += blocks;
1343                 if (sectors > blocks)
1344                         sectors -= blocks;
1345                 else
1346                         sectors = 0;
1347         }
1348         return 0;
1349 }
1350 EXPORT_SYMBOL(bitmap_startwrite);
1351
1352 void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
1353                      int success, int behind)
1354 {
1355         if (!bitmap)
1356                 return;
1357         if (behind) {
1358                 if (atomic_dec_and_test(&bitmap->behind_writes))
1359                         wake_up(&bitmap->behind_wait);
1360                 pr_debug("dec write-behind count %d/%lu\n",
1361                          atomic_read(&bitmap->behind_writes),
1362                          bitmap->mddev->bitmap_info.max_write_behind);
1363         }
1364
1365         while (sectors) {
1366                 sector_t blocks;
1367                 unsigned long flags;
1368                 bitmap_counter_t *bmc;
1369
1370                 spin_lock_irqsave(&bitmap->lock, flags);
1371                 bmc = bitmap_get_counter(bitmap, offset, &blocks, 0);
1372                 if (!bmc) {
1373                         spin_unlock_irqrestore(&bitmap->lock, flags);
1374                         return;
1375                 }
1376
1377                 if (success && !bitmap->mddev->degraded &&
1378                     bitmap->events_cleared < bitmap->mddev->events) {
1379                         bitmap->events_cleared = bitmap->mddev->events;
1380                         bitmap->need_sync = 1;
1381                         sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1382                 }
1383
1384                 if (!success && !NEEDED(*bmc))
1385                         *bmc |= NEEDED_MASK;
1386
1387                 if (COUNTER(*bmc) == COUNTER_MAX)
1388                         wake_up(&bitmap->overflow_wait);
1389
1390                 (*bmc)--;
1391                 if (*bmc <= 2) {
1392                         set_page_attr(bitmap,
1393                                       filemap_get_page(
1394                                               bitmap,
1395                                               offset >> bitmap->chunkshift),
1396                                       BITMAP_PAGE_PENDING);
1397                         bitmap->allclean = 0;
1398                 }
1399                 spin_unlock_irqrestore(&bitmap->lock, flags);
1400                 offset += blocks;
1401                 if (sectors > blocks)
1402                         sectors -= blocks;
1403                 else
1404                         sectors = 0;
1405         }
1406 }
1407 EXPORT_SYMBOL(bitmap_endwrite);
1408
1409 static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1410                                int degraded)
1411 {
1412         bitmap_counter_t *bmc;
1413         int rv;
1414         if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1415                 *blocks = 1024;
1416                 return 1; /* always resync if no bitmap */
1417         }
1418         spin_lock_irq(&bitmap->lock);
1419         bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
1420         rv = 0;
1421         if (bmc) {
1422                 /* locked */
1423                 if (RESYNC(*bmc))
1424                         rv = 1;
1425                 else if (NEEDED(*bmc)) {
1426                         rv = 1;
1427                         if (!degraded) { /* don't set/clear bits if degraded */
1428                                 *bmc |= RESYNC_MASK;
1429                                 *bmc &= ~NEEDED_MASK;
1430                         }
1431                 }
1432         }
1433         spin_unlock_irq(&bitmap->lock);
1434         return rv;
1435 }
1436
1437 int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1438                       int degraded)
1439 {
1440         /* bitmap_start_sync must always report on multiples of whole
1441          * pages, otherwise resync (which is very PAGE_SIZE based) will
1442          * get confused.
1443          * So call __bitmap_start_sync repeatedly (if needed) until
1444          * At least PAGE_SIZE>>9 blocks are covered.
1445          * Return the 'or' of the result.
1446          */
1447         int rv = 0;
1448         sector_t blocks1;
1449
1450         *blocks = 0;
1451         while (*blocks < (PAGE_SIZE>>9)) {
1452                 rv |= __bitmap_start_sync(bitmap, offset,
1453                                           &blocks1, degraded);
1454                 offset += blocks1;
1455                 *blocks += blocks1;
1456         }
1457         return rv;
1458 }
1459 EXPORT_SYMBOL(bitmap_start_sync);
1460
1461 void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
1462 {
1463         bitmap_counter_t *bmc;
1464         unsigned long flags;
1465
1466         if (bitmap == NULL) {
1467                 *blocks = 1024;
1468                 return;
1469         }
1470         spin_lock_irqsave(&bitmap->lock, flags);
1471         bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
1472         if (bmc == NULL)
1473                 goto unlock;
1474         /* locked */
1475         if (RESYNC(*bmc)) {
1476                 *bmc &= ~RESYNC_MASK;
1477
1478                 if (!NEEDED(*bmc) && aborted)
1479                         *bmc |= NEEDED_MASK;
1480                 else {
1481                         if (*bmc <= 2) {
1482                                 set_page_attr(bitmap,
1483                                               filemap_get_page(bitmap, offset >> bitmap->chunkshift),
1484                                               BITMAP_PAGE_PENDING);
1485                                 bitmap->allclean = 0;
1486                         }
1487                 }
1488         }
1489  unlock:
1490         spin_unlock_irqrestore(&bitmap->lock, flags);
1491 }
1492 EXPORT_SYMBOL(bitmap_end_sync);
1493
1494 void bitmap_close_sync(struct bitmap *bitmap)
1495 {
1496         /* Sync has finished, and any bitmap chunks that weren't synced
1497          * properly have been aborted.  It remains to us to clear the
1498          * RESYNC bit wherever it is still on
1499          */
1500         sector_t sector = 0;
1501         sector_t blocks;
1502         if (!bitmap)
1503                 return;
1504         while (sector < bitmap->mddev->resync_max_sectors) {
1505                 bitmap_end_sync(bitmap, sector, &blocks, 0);
1506                 sector += blocks;
1507         }
1508 }
1509 EXPORT_SYMBOL(bitmap_close_sync);
1510
1511 void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
1512 {
1513         sector_t s = 0;
1514         sector_t blocks;
1515
1516         if (!bitmap)
1517                 return;
1518         if (sector == 0) {
1519                 bitmap->last_end_sync = jiffies;
1520                 return;
1521         }
1522         if (time_before(jiffies, (bitmap->last_end_sync
1523                                   + bitmap->mddev->bitmap_info.daemon_sleep)))
1524                 return;
1525         wait_event(bitmap->mddev->recovery_wait,
1526                    atomic_read(&bitmap->mddev->recovery_active) == 0);
1527
1528         bitmap->mddev->curr_resync_completed = sector;
1529         set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
1530         sector &= ~((1ULL << bitmap->chunkshift) - 1);
1531         s = 0;
1532         while (s < sector && s < bitmap->mddev->resync_max_sectors) {
1533                 bitmap_end_sync(bitmap, s, &blocks, 0);
1534                 s += blocks;
1535         }
1536         bitmap->last_end_sync = jiffies;
1537         sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
1538 }
1539 EXPORT_SYMBOL(bitmap_cond_end_sync);
1540
1541 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1542 {
1543         /* For each chunk covered by any of these sectors, set the
1544          * counter to 1 and set resync_needed.  They should all
1545          * be 0 at this point
1546          */
1547
1548         sector_t secs;
1549         bitmap_counter_t *bmc;
1550         spin_lock_irq(&bitmap->lock);
1551         bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
1552         if (!bmc) {
1553                 spin_unlock_irq(&bitmap->lock);
1554                 return;
1555         }
1556         if (!*bmc) {
1557                 struct page *page;
1558                 *bmc = 2 | (needed ? NEEDED_MASK : 0);
1559                 bitmap_count_page(bitmap, offset, 1);
1560                 page = filemap_get_page(bitmap, offset >> bitmap->chunkshift);
1561                 set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
1562                 bitmap->allclean = 0;
1563         }
1564         spin_unlock_irq(&bitmap->lock);
1565 }
1566
1567 /* dirty the memory and file bits for bitmap chunks "s" to "e" */
1568 void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
1569 {
1570         unsigned long chunk;
1571
1572         for (chunk = s; chunk <= e; chunk++) {
1573                 sector_t sec = (sector_t)chunk << bitmap->chunkshift;
1574                 bitmap_set_memory_bits(bitmap, sec, 1);
1575                 spin_lock_irq(&bitmap->lock);
1576                 bitmap_file_set_bit(bitmap, sec);
1577                 spin_unlock_irq(&bitmap->lock);
1578                 if (sec < bitmap->mddev->recovery_cp)
1579                         /* We are asserting that the array is dirty,
1580                          * so move the recovery_cp address back so
1581                          * that it is obvious that it is dirty
1582                          */
1583                         bitmap->mddev->recovery_cp = sec;
1584         }
1585 }
1586
1587 /*
1588  * flush out any pending updates
1589  */
1590 void bitmap_flush(struct mddev *mddev)
1591 {
1592         struct bitmap *bitmap = mddev->bitmap;
1593         long sleep;
1594
1595         if (!bitmap) /* there was no bitmap */
1596                 return;
1597
1598         /* run the daemon_work three time to ensure everything is flushed
1599          * that can be
1600          */
1601         sleep = mddev->bitmap_info.daemon_sleep * 2;
1602         bitmap->daemon_lastrun -= sleep;
1603         bitmap_daemon_work(mddev);
1604         bitmap->daemon_lastrun -= sleep;
1605         bitmap_daemon_work(mddev);
1606         bitmap->daemon_lastrun -= sleep;
1607         bitmap_daemon_work(mddev);
1608         bitmap_update_sb(bitmap);
1609 }
1610
1611 /*
1612  * free memory that was allocated
1613  */
1614 static void bitmap_free(struct bitmap *bitmap)
1615 {
1616         unsigned long k, pages;
1617         struct bitmap_page *bp;
1618
1619         if (!bitmap) /* there was no bitmap */
1620                 return;
1621
1622         /* release the bitmap file and kill the daemon */
1623         bitmap_file_put(bitmap);
1624
1625         bp = bitmap->bp;
1626         pages = bitmap->pages;
1627
1628         /* free all allocated memory */
1629
1630         if (bp) /* deallocate the page memory */
1631                 for (k = 0; k < pages; k++)
1632                         if (bp[k].map && !bp[k].hijacked)
1633                                 kfree(bp[k].map);
1634         kfree(bp);
1635         kfree(bitmap);
1636 }
1637
1638 void bitmap_destroy(struct mddev *mddev)
1639 {
1640         struct bitmap *bitmap = mddev->bitmap;
1641
1642         if (!bitmap) /* there was no bitmap */
1643                 return;
1644
1645         mutex_lock(&mddev->bitmap_info.mutex);
1646         mddev->bitmap = NULL; /* disconnect from the md device */
1647         mutex_unlock(&mddev->bitmap_info.mutex);
1648         if (mddev->thread)
1649                 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1650
1651         if (bitmap->sysfs_can_clear)
1652                 sysfs_put(bitmap->sysfs_can_clear);
1653
1654         bitmap_free(bitmap);
1655 }
1656
1657 /*
1658  * initialize the bitmap structure
1659  * if this returns an error, bitmap_destroy must be called to do clean up
1660  */
1661 int bitmap_create(struct mddev *mddev)
1662 {
1663         struct bitmap *bitmap;
1664         sector_t blocks = mddev->resync_max_sectors;
1665         unsigned long chunks;
1666         unsigned long pages;
1667         struct file *file = mddev->bitmap_info.file;
1668         int err;
1669         struct sysfs_dirent *bm = NULL;
1670
1671         BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
1672
1673         if (!file
1674             && !mddev->bitmap_info.offset) /* bitmap disabled, nothing to do */
1675                 return 0;
1676
1677         BUG_ON(file && mddev->bitmap_info.offset);
1678
1679         bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1680         if (!bitmap)
1681                 return -ENOMEM;
1682
1683         spin_lock_init(&bitmap->lock);
1684         atomic_set(&bitmap->pending_writes, 0);
1685         init_waitqueue_head(&bitmap->write_wait);
1686         init_waitqueue_head(&bitmap->overflow_wait);
1687         init_waitqueue_head(&bitmap->behind_wait);
1688
1689         bitmap->mddev = mddev;
1690
1691         if (mddev->kobj.sd)
1692                 bm = sysfs_get_dirent(mddev->kobj.sd, NULL, "bitmap");
1693         if (bm) {
1694                 bitmap->sysfs_can_clear = sysfs_get_dirent(bm, NULL, "can_clear");
1695                 sysfs_put(bm);
1696         } else
1697                 bitmap->sysfs_can_clear = NULL;
1698
1699         bitmap->file = file;
1700         if (file) {
1701                 get_file(file);
1702                 /* As future accesses to this file will use bmap,
1703                  * and bypass the page cache, we must sync the file
1704                  * first.
1705                  */
1706                 vfs_fsync(file, 1);
1707         }
1708         /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1709         if (!mddev->bitmap_info.external) {
1710                 /*
1711                  * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
1712                  * instructing us to create a new on-disk bitmap instance.
1713                  */
1714                 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
1715                         err = bitmap_new_disk_sb(bitmap);
1716                 else
1717                         err = bitmap_read_sb(bitmap);
1718         } else {
1719                 err = 0;
1720                 if (mddev->bitmap_info.chunksize == 0 ||
1721                     mddev->bitmap_info.daemon_sleep == 0)
1722                         /* chunksize and time_base need to be
1723                          * set first. */
1724                         err = -EINVAL;
1725         }
1726         if (err)
1727                 goto error;
1728
1729         bitmap->daemon_lastrun = jiffies;
1730         bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize)
1731                               - BITMAP_BLOCK_SHIFT);
1732
1733         /* now that chunksize and chunkshift are set, we can use these macros */
1734         chunks = (blocks + bitmap->chunkshift - 1) >>
1735                         bitmap->chunkshift;
1736         pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
1737
1738         BUG_ON(!pages);
1739
1740         bitmap->chunks = chunks;
1741         bitmap->pages = pages;
1742         bitmap->missing_pages = pages;
1743
1744         bitmap->bp = kzalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
1745
1746         err = -ENOMEM;
1747         if (!bitmap->bp)
1748                 goto error;
1749
1750         printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
1751                 pages, bmname(bitmap));
1752
1753         mddev->bitmap = bitmap;
1754
1755
1756         return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
1757
1758  error:
1759         bitmap_free(bitmap);
1760         return err;
1761 }
1762
1763 int bitmap_load(struct mddev *mddev)
1764 {
1765         int err = 0;
1766         sector_t start = 0;
1767         sector_t sector = 0;
1768         struct bitmap *bitmap = mddev->bitmap;
1769
1770         if (!bitmap)
1771                 goto out;
1772
1773         /* Clear out old bitmap info first:  Either there is none, or we
1774          * are resuming after someone else has possibly changed things,
1775          * so we should forget old cached info.
1776          * All chunks should be clean, but some might need_sync.
1777          */
1778         while (sector < mddev->resync_max_sectors) {
1779                 sector_t blocks;
1780                 bitmap_start_sync(bitmap, sector, &blocks, 0);
1781                 sector += blocks;
1782         }
1783         bitmap_close_sync(bitmap);
1784
1785         if (mddev->degraded == 0
1786             || bitmap->events_cleared == mddev->events)
1787                 /* no need to keep dirty bits to optimise a
1788                  * re-add of a missing device */
1789                 start = mddev->recovery_cp;
1790
1791         err = bitmap_init_from_disk(bitmap, start);
1792
1793         if (err)
1794                 goto out;
1795
1796         mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
1797         md_wakeup_thread(mddev->thread);
1798
1799         bitmap_update_sb(bitmap);
1800
1801         if (bitmap->flags & BITMAP_WRITE_ERROR)
1802                 err = -EIO;
1803 out:
1804         return err;
1805 }
1806 EXPORT_SYMBOL_GPL(bitmap_load);
1807
1808 void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
1809 {
1810         unsigned long chunk_kb;
1811         unsigned long flags;
1812
1813         if (!bitmap)
1814                 return;
1815
1816         spin_lock_irqsave(&bitmap->lock, flags);
1817         chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
1818         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
1819                    "%lu%s chunk",
1820                    bitmap->pages - bitmap->missing_pages,
1821                    bitmap->pages,
1822                    (bitmap->pages - bitmap->missing_pages)
1823                    << (PAGE_SHIFT - 10),
1824                    chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
1825                    chunk_kb ? "KB" : "B");
1826         if (bitmap->file) {
1827                 seq_printf(seq, ", file: ");
1828                 seq_path(seq, &bitmap->file->f_path, " \t\n");
1829         }
1830
1831         seq_printf(seq, "\n");
1832         spin_unlock_irqrestore(&bitmap->lock, flags);
1833 }
1834
1835 static ssize_t
1836 location_show(struct mddev *mddev, char *page)
1837 {
1838         ssize_t len;
1839         if (mddev->bitmap_info.file)
1840                 len = sprintf(page, "file");
1841         else if (mddev->bitmap_info.offset)
1842                 len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
1843         else
1844                 len = sprintf(page, "none");
1845         len += sprintf(page+len, "\n");
1846         return len;
1847 }
1848
1849 static ssize_t
1850 location_store(struct mddev *mddev, const char *buf, size_t len)
1851 {
1852
1853         if (mddev->pers) {
1854                 if (!mddev->pers->quiesce)
1855                         return -EBUSY;
1856                 if (mddev->recovery || mddev->sync_thread)
1857                         return -EBUSY;
1858         }
1859
1860         if (mddev->bitmap || mddev->bitmap_info.file ||
1861             mddev->bitmap_info.offset) {
1862                 /* bitmap already configured.  Only option is to clear it */
1863                 if (strncmp(buf, "none", 4) != 0)
1864                         return -EBUSY;
1865                 if (mddev->pers) {
1866                         mddev->pers->quiesce(mddev, 1);
1867                         bitmap_destroy(mddev);
1868                         mddev->pers->quiesce(mddev, 0);
1869                 }
1870                 mddev->bitmap_info.offset = 0;
1871                 if (mddev->bitmap_info.file) {
1872                         struct file *f = mddev->bitmap_info.file;
1873                         mddev->bitmap_info.file = NULL;
1874                         restore_bitmap_write_access(f);
1875                         fput(f);
1876                 }
1877         } else {
1878                 /* No bitmap, OK to set a location */
1879                 long long offset;
1880                 if (strncmp(buf, "none", 4) == 0)
1881                         /* nothing to be done */;
1882                 else if (strncmp(buf, "file:", 5) == 0) {
1883                         /* Not supported yet */
1884                         return -EINVAL;
1885                 } else {
1886                         int rv;
1887                         if (buf[0] == '+')
1888                                 rv = strict_strtoll(buf+1, 10, &offset);
1889                         else
1890                                 rv = strict_strtoll(buf, 10, &offset);
1891                         if (rv)
1892                                 return rv;
1893                         if (offset == 0)
1894                                 return -EINVAL;
1895                         if (mddev->bitmap_info.external == 0 &&
1896                             mddev->major_version == 0 &&
1897                             offset != mddev->bitmap_info.default_offset)
1898                                 return -EINVAL;
1899                         mddev->bitmap_info.offset = offset;
1900                         if (mddev->pers) {
1901                                 mddev->pers->quiesce(mddev, 1);
1902                                 rv = bitmap_create(mddev);
1903                                 if (!rv)
1904                                         rv = bitmap_load(mddev);
1905                                 if (rv) {
1906                                         bitmap_destroy(mddev);
1907                                         mddev->bitmap_info.offset = 0;
1908                                 }
1909                                 mddev->pers->quiesce(mddev, 0);
1910                                 if (rv)
1911                                         return rv;
1912                         }
1913                 }
1914         }
1915         if (!mddev->external) {
1916                 /* Ensure new bitmap info is stored in
1917                  * metadata promptly.
1918                  */
1919                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1920                 md_wakeup_thread(mddev->thread);
1921         }
1922         return len;
1923 }
1924
1925 static struct md_sysfs_entry bitmap_location =
1926 __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
1927
1928 static ssize_t
1929 timeout_show(struct mddev *mddev, char *page)
1930 {
1931         ssize_t len;
1932         unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
1933         unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
1934
1935         len = sprintf(page, "%lu", secs);
1936         if (jifs)
1937                 len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
1938         len += sprintf(page+len, "\n");
1939         return len;
1940 }
1941
1942 static ssize_t
1943 timeout_store(struct mddev *mddev, const char *buf, size_t len)
1944 {
1945         /* timeout can be set at any time */
1946         unsigned long timeout;
1947         int rv = strict_strtoul_scaled(buf, &timeout, 4);
1948         if (rv)
1949                 return rv;
1950
1951         /* just to make sure we don't overflow... */
1952         if (timeout >= LONG_MAX / HZ)
1953                 return -EINVAL;
1954
1955         timeout = timeout * HZ / 10000;
1956
1957         if (timeout >= MAX_SCHEDULE_TIMEOUT)
1958                 timeout = MAX_SCHEDULE_TIMEOUT-1;
1959         if (timeout < 1)
1960                 timeout = 1;
1961         mddev->bitmap_info.daemon_sleep = timeout;
1962         if (mddev->thread) {
1963                 /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
1964                  * the bitmap is all clean and we don't need to
1965                  * adjust the timeout right now
1966                  */
1967                 if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
1968                         mddev->thread->timeout = timeout;
1969                         md_wakeup_thread(mddev->thread);
1970                 }
1971         }
1972         return len;
1973 }
1974
1975 static struct md_sysfs_entry bitmap_timeout =
1976 __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
1977
1978 static ssize_t
1979 backlog_show(struct mddev *mddev, char *page)
1980 {
1981         return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
1982 }
1983
1984 static ssize_t
1985 backlog_store(struct mddev *mddev, const char *buf, size_t len)
1986 {
1987         unsigned long backlog;
1988         int rv = strict_strtoul(buf, 10, &backlog);
1989         if (rv)
1990                 return rv;
1991         if (backlog > COUNTER_MAX)
1992                 return -EINVAL;
1993         mddev->bitmap_info.max_write_behind = backlog;
1994         return len;
1995 }
1996
1997 static struct md_sysfs_entry bitmap_backlog =
1998 __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
1999
2000 static ssize_t
2001 chunksize_show(struct mddev *mddev, char *page)
2002 {
2003         return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
2004 }
2005
2006 static ssize_t
2007 chunksize_store(struct mddev *mddev, const char *buf, size_t len)
2008 {
2009         /* Can only be changed when no bitmap is active */
2010         int rv;
2011         unsigned long csize;
2012         if (mddev->bitmap)
2013                 return -EBUSY;
2014         rv = strict_strtoul(buf, 10, &csize);
2015         if (rv)
2016                 return rv;
2017         if (csize < 512 ||
2018             !is_power_of_2(csize))
2019                 return -EINVAL;
2020         mddev->bitmap_info.chunksize = csize;
2021         return len;
2022 }
2023
2024 static struct md_sysfs_entry bitmap_chunksize =
2025 __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
2026
2027 static ssize_t metadata_show(struct mddev *mddev, char *page)
2028 {
2029         return sprintf(page, "%s\n", (mddev->bitmap_info.external
2030                                       ? "external" : "internal"));
2031 }
2032
2033 static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
2034 {
2035         if (mddev->bitmap ||
2036             mddev->bitmap_info.file ||
2037             mddev->bitmap_info.offset)
2038                 return -EBUSY;
2039         if (strncmp(buf, "external", 8) == 0)
2040                 mddev->bitmap_info.external = 1;
2041         else if (strncmp(buf, "internal", 8) == 0)
2042                 mddev->bitmap_info.external = 0;
2043         else
2044                 return -EINVAL;
2045         return len;
2046 }
2047
2048 static struct md_sysfs_entry bitmap_metadata =
2049 __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2050
2051 static ssize_t can_clear_show(struct mddev *mddev, char *page)
2052 {
2053         int len;
2054         if (mddev->bitmap)
2055                 len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
2056                                              "false" : "true"));
2057         else
2058                 len = sprintf(page, "\n");
2059         return len;
2060 }
2061
2062 static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
2063 {
2064         if (mddev->bitmap == NULL)
2065                 return -ENOENT;
2066         if (strncmp(buf, "false", 5) == 0)
2067                 mddev->bitmap->need_sync = 1;
2068         else if (strncmp(buf, "true", 4) == 0) {
2069                 if (mddev->degraded)
2070                         return -EBUSY;
2071                 mddev->bitmap->need_sync = 0;
2072         } else
2073                 return -EINVAL;
2074         return len;
2075 }
2076
2077 static struct md_sysfs_entry bitmap_can_clear =
2078 __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
2079
2080 static ssize_t
2081 behind_writes_used_show(struct mddev *mddev, char *page)
2082 {
2083         if (mddev->bitmap == NULL)
2084                 return sprintf(page, "0\n");
2085         return sprintf(page, "%lu\n",
2086                        mddev->bitmap->behind_writes_used);
2087 }
2088
2089 static ssize_t
2090 behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
2091 {
2092         if (mddev->bitmap)
2093                 mddev->bitmap->behind_writes_used = 0;
2094         return len;
2095 }
2096
2097 static struct md_sysfs_entry max_backlog_used =
2098 __ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
2099        behind_writes_used_show, behind_writes_used_reset);
2100
2101 static struct attribute *md_bitmap_attrs[] = {
2102         &bitmap_location.attr,
2103         &bitmap_timeout.attr,
2104         &bitmap_backlog.attr,
2105         &bitmap_chunksize.attr,
2106         &bitmap_metadata.attr,
2107         &bitmap_can_clear.attr,
2108         &max_backlog_used.attr,
2109         NULL
2110 };
2111 struct attribute_group md_bitmap_group = {
2112         .name = "bitmap",
2113         .attrs = md_bitmap_attrs,
2114 };
2115