Linux 3.3
[linux-flexiantxendom0-3.2.10.git] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <asm/unaligned.h>
34 #include "compat.h"
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
42 #include "locking.h"
43 #include "tree-log.h"
44 #include "free-space-cache.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47
48 static struct extent_io_ops btree_extent_io_ops;
49 static void end_workqueue_fn(struct btrfs_work *work);
50 static void free_fs_root(struct btrfs_root *root);
51 static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
52                                     int read_only);
53 static int btrfs_destroy_ordered_operations(struct btrfs_root *root);
54 static int btrfs_destroy_ordered_extents(struct btrfs_root *root);
55 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
56                                       struct btrfs_root *root);
57 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t);
58 static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
59 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
60                                         struct extent_io_tree *dirty_pages,
61                                         int mark);
62 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
63                                        struct extent_io_tree *pinned_extents);
64 static int btrfs_cleanup_transaction(struct btrfs_root *root);
65
66 /*
67  * end_io_wq structs are used to do processing in task context when an IO is
68  * complete.  This is used during reads to verify checksums, and it is used
69  * by writes to insert metadata for new file extents after IO is complete.
70  */
71 struct end_io_wq {
72         struct bio *bio;
73         bio_end_io_t *end_io;
74         void *private;
75         struct btrfs_fs_info *info;
76         int error;
77         int metadata;
78         struct list_head list;
79         struct btrfs_work work;
80 };
81
82 /*
83  * async submit bios are used to offload expensive checksumming
84  * onto the worker threads.  They checksum file and metadata bios
85  * just before they are sent down the IO stack.
86  */
87 struct async_submit_bio {
88         struct inode *inode;
89         struct bio *bio;
90         struct list_head list;
91         extent_submit_bio_hook_t *submit_bio_start;
92         extent_submit_bio_hook_t *submit_bio_done;
93         int rw;
94         int mirror_num;
95         unsigned long bio_flags;
96         /*
97          * bio_offset is optional, can be used if the pages in the bio
98          * can't tell us where in the file the bio should go
99          */
100         u64 bio_offset;
101         struct btrfs_work work;
102 };
103
104 /*
105  * Lockdep class keys for extent_buffer->lock's in this root.  For a given
106  * eb, the lockdep key is determined by the btrfs_root it belongs to and
107  * the level the eb occupies in the tree.
108  *
109  * Different roots are used for different purposes and may nest inside each
110  * other and they require separate keysets.  As lockdep keys should be
111  * static, assign keysets according to the purpose of the root as indicated
112  * by btrfs_root->objectid.  This ensures that all special purpose roots
113  * have separate keysets.
114  *
115  * Lock-nesting across peer nodes is always done with the immediate parent
116  * node locked thus preventing deadlock.  As lockdep doesn't know this, use
117  * subclass to avoid triggering lockdep warning in such cases.
118  *
119  * The key is set by the readpage_end_io_hook after the buffer has passed
120  * csum validation but before the pages are unlocked.  It is also set by
121  * btrfs_init_new_buffer on freshly allocated blocks.
122  *
123  * We also add a check to make sure the highest level of the tree is the
124  * same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this code
125  * needs update as well.
126  */
127 #ifdef CONFIG_DEBUG_LOCK_ALLOC
128 # if BTRFS_MAX_LEVEL != 8
129 #  error
130 # endif
131
132 static struct btrfs_lockdep_keyset {
133         u64                     id;             /* root objectid */
134         const char              *name_stem;     /* lock name stem */
135         char                    names[BTRFS_MAX_LEVEL + 1][20];
136         struct lock_class_key   keys[BTRFS_MAX_LEVEL + 1];
137 } btrfs_lockdep_keysets[] = {
138         { .id = BTRFS_ROOT_TREE_OBJECTID,       .name_stem = "root"     },
139         { .id = BTRFS_EXTENT_TREE_OBJECTID,     .name_stem = "extent"   },
140         { .id = BTRFS_CHUNK_TREE_OBJECTID,      .name_stem = "chunk"    },
141         { .id = BTRFS_DEV_TREE_OBJECTID,        .name_stem = "dev"      },
142         { .id = BTRFS_FS_TREE_OBJECTID,         .name_stem = "fs"       },
143         { .id = BTRFS_CSUM_TREE_OBJECTID,       .name_stem = "csum"     },
144         { .id = BTRFS_ORPHAN_OBJECTID,          .name_stem = "orphan"   },
145         { .id = BTRFS_TREE_LOG_OBJECTID,        .name_stem = "log"      },
146         { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
147         { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
148         { .id = 0,                              .name_stem = "tree"     },
149 };
150
151 void __init btrfs_init_lockdep(void)
152 {
153         int i, j;
154
155         /* initialize lockdep class names */
156         for (i = 0; i < ARRAY_SIZE(btrfs_lockdep_keysets); i++) {
157                 struct btrfs_lockdep_keyset *ks = &btrfs_lockdep_keysets[i];
158
159                 for (j = 0; j < ARRAY_SIZE(ks->names); j++)
160                         snprintf(ks->names[j], sizeof(ks->names[j]),
161                                  "btrfs-%s-%02d", ks->name_stem, j);
162         }
163 }
164
165 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
166                                     int level)
167 {
168         struct btrfs_lockdep_keyset *ks;
169
170         BUG_ON(level >= ARRAY_SIZE(ks->keys));
171
172         /* find the matching keyset, id 0 is the default entry */
173         for (ks = btrfs_lockdep_keysets; ks->id; ks++)
174                 if (ks->id == objectid)
175                         break;
176
177         lockdep_set_class_and_name(&eb->lock,
178                                    &ks->keys[level], ks->names[level]);
179 }
180
181 #endif
182
183 /*
184  * extents on the btree inode are pretty simple, there's one extent
185  * that covers the entire device
186  */
187 static struct extent_map *btree_get_extent(struct inode *inode,
188                 struct page *page, size_t pg_offset, u64 start, u64 len,
189                 int create)
190 {
191         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
192         struct extent_map *em;
193         int ret;
194
195         read_lock(&em_tree->lock);
196         em = lookup_extent_mapping(em_tree, start, len);
197         if (em) {
198                 em->bdev =
199                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
200                 read_unlock(&em_tree->lock);
201                 goto out;
202         }
203         read_unlock(&em_tree->lock);
204
205         em = alloc_extent_map();
206         if (!em) {
207                 em = ERR_PTR(-ENOMEM);
208                 goto out;
209         }
210         em->start = 0;
211         em->len = (u64)-1;
212         em->block_len = (u64)-1;
213         em->block_start = 0;
214         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
215
216         write_lock(&em_tree->lock);
217         ret = add_extent_mapping(em_tree, em);
218         if (ret == -EEXIST) {
219                 u64 failed_start = em->start;
220                 u64 failed_len = em->len;
221
222                 free_extent_map(em);
223                 em = lookup_extent_mapping(em_tree, start, len);
224                 if (em) {
225                         ret = 0;
226                 } else {
227                         em = lookup_extent_mapping(em_tree, failed_start,
228                                                    failed_len);
229                         ret = -EIO;
230                 }
231         } else if (ret) {
232                 free_extent_map(em);
233                 em = NULL;
234         }
235         write_unlock(&em_tree->lock);
236
237         if (ret)
238                 em = ERR_PTR(ret);
239 out:
240         return em;
241 }
242
243 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
244 {
245         return crc32c(seed, data, len);
246 }
247
248 void btrfs_csum_final(u32 crc, char *result)
249 {
250         put_unaligned_le32(~crc, result);
251 }
252
253 /*
254  * compute the csum for a btree block, and either verify it or write it
255  * into the csum field of the block.
256  */
257 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
258                            int verify)
259 {
260         u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
261         char *result = NULL;
262         unsigned long len;
263         unsigned long cur_len;
264         unsigned long offset = BTRFS_CSUM_SIZE;
265         char *kaddr;
266         unsigned long map_start;
267         unsigned long map_len;
268         int err;
269         u32 crc = ~(u32)0;
270         unsigned long inline_result;
271
272         len = buf->len - offset;
273         while (len > 0) {
274                 err = map_private_extent_buffer(buf, offset, 32,
275                                         &kaddr, &map_start, &map_len);
276                 if (err)
277                         return 1;
278                 cur_len = min(len, map_len - (offset - map_start));
279                 crc = btrfs_csum_data(root, kaddr + offset - map_start,
280                                       crc, cur_len);
281                 len -= cur_len;
282                 offset += cur_len;
283         }
284         if (csum_size > sizeof(inline_result)) {
285                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
286                 if (!result)
287                         return 1;
288         } else {
289                 result = (char *)&inline_result;
290         }
291
292         btrfs_csum_final(crc, result);
293
294         if (verify) {
295                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
296                         u32 val;
297                         u32 found = 0;
298                         memcpy(&found, result, csum_size);
299
300                         read_extent_buffer(buf, &val, 0, csum_size);
301                         printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
302                                        "failed on %llu wanted %X found %X "
303                                        "level %d\n",
304                                        root->fs_info->sb->s_id,
305                                        (unsigned long long)buf->start, val, found,
306                                        btrfs_header_level(buf));
307                         if (result != (char *)&inline_result)
308                                 kfree(result);
309                         return 1;
310                 }
311         } else {
312                 write_extent_buffer(buf, result, 0, csum_size);
313         }
314         if (result != (char *)&inline_result)
315                 kfree(result);
316         return 0;
317 }
318
319 /*
320  * we can't consider a given block up to date unless the transid of the
321  * block matches the transid in the parent node's pointer.  This is how we
322  * detect blocks that either didn't get written at all or got written
323  * in the wrong place.
324  */
325 static int verify_parent_transid(struct extent_io_tree *io_tree,
326                                  struct extent_buffer *eb, u64 parent_transid)
327 {
328         struct extent_state *cached_state = NULL;
329         int ret;
330
331         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
332                 return 0;
333
334         lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
335                          0, &cached_state, GFP_NOFS);
336         if (extent_buffer_uptodate(io_tree, eb, cached_state) &&
337             btrfs_header_generation(eb) == parent_transid) {
338                 ret = 0;
339                 goto out;
340         }
341         printk_ratelimited("parent transid verify failed on %llu wanted %llu "
342                        "found %llu\n",
343                        (unsigned long long)eb->start,
344                        (unsigned long long)parent_transid,
345                        (unsigned long long)btrfs_header_generation(eb));
346         ret = 1;
347         clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
348 out:
349         unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
350                              &cached_state, GFP_NOFS);
351         return ret;
352 }
353
354 /*
355  * helper to read a given tree block, doing retries as required when
356  * the checksums don't match and we have alternate mirrors to try.
357  */
358 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
359                                           struct extent_buffer *eb,
360                                           u64 start, u64 parent_transid)
361 {
362         struct extent_io_tree *io_tree;
363         int ret;
364         int num_copies = 0;
365         int mirror_num = 0;
366
367         clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
368         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
369         while (1) {
370                 ret = read_extent_buffer_pages(io_tree, eb, start,
371                                                WAIT_COMPLETE,
372                                                btree_get_extent, mirror_num);
373                 if (!ret &&
374                     !verify_parent_transid(io_tree, eb, parent_transid))
375                         return ret;
376
377                 /*
378                  * This buffer's crc is fine, but its contents are corrupted, so
379                  * there is no reason to read the other copies, they won't be
380                  * any less wrong.
381                  */
382                 if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
383                         return ret;
384
385                 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
386                                               eb->start, eb->len);
387                 if (num_copies == 1)
388                         return ret;
389
390                 mirror_num++;
391                 if (mirror_num > num_copies)
392                         return ret;
393         }
394         return -EIO;
395 }
396
397 /*
398  * checksum a dirty tree block before IO.  This has extra checks to make sure
399  * we only fill in the checksum field in the first page of a multi-page block
400  */
401
402 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
403 {
404         struct extent_io_tree *tree;
405         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
406         u64 found_start;
407         unsigned long len;
408         struct extent_buffer *eb;
409         int ret;
410
411         tree = &BTRFS_I(page->mapping->host)->io_tree;
412
413         if (page->private == EXTENT_PAGE_PRIVATE) {
414                 WARN_ON(1);
415                 goto out;
416         }
417         if (!page->private) {
418                 WARN_ON(1);
419                 goto out;
420         }
421         len = page->private >> 2;
422         WARN_ON(len == 0);
423
424         eb = alloc_extent_buffer(tree, start, len, page);
425         if (eb == NULL) {
426                 WARN_ON(1);
427                 goto out;
428         }
429         ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
430                                              btrfs_header_generation(eb));
431         BUG_ON(ret);
432         WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
433
434         found_start = btrfs_header_bytenr(eb);
435         if (found_start != start) {
436                 WARN_ON(1);
437                 goto err;
438         }
439         if (eb->first_page != page) {
440                 WARN_ON(1);
441                 goto err;
442         }
443         if (!PageUptodate(page)) {
444                 WARN_ON(1);
445                 goto err;
446         }
447         csum_tree_block(root, eb, 0);
448 err:
449         free_extent_buffer(eb);
450 out:
451         return 0;
452 }
453
454 static int check_tree_block_fsid(struct btrfs_root *root,
455                                  struct extent_buffer *eb)
456 {
457         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
458         u8 fsid[BTRFS_UUID_SIZE];
459         int ret = 1;
460
461         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
462                            BTRFS_FSID_SIZE);
463         while (fs_devices) {
464                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
465                         ret = 0;
466                         break;
467                 }
468                 fs_devices = fs_devices->seed;
469         }
470         return ret;
471 }
472
473 #define CORRUPT(reason, eb, root, slot)                         \
474         printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
475                "root=%llu, slot=%d\n", reason,                  \
476                (unsigned long long)btrfs_header_bytenr(eb),     \
477                (unsigned long long)root->objectid, slot)
478
479 static noinline int check_leaf(struct btrfs_root *root,
480                                struct extent_buffer *leaf)
481 {
482         struct btrfs_key key;
483         struct btrfs_key leaf_key;
484         u32 nritems = btrfs_header_nritems(leaf);
485         int slot;
486
487         if (nritems == 0)
488                 return 0;
489
490         /* Check the 0 item */
491         if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
492             BTRFS_LEAF_DATA_SIZE(root)) {
493                 CORRUPT("invalid item offset size pair", leaf, root, 0);
494                 return -EIO;
495         }
496
497         /*
498          * Check to make sure each items keys are in the correct order and their
499          * offsets make sense.  We only have to loop through nritems-1 because
500          * we check the current slot against the next slot, which verifies the
501          * next slot's offset+size makes sense and that the current's slot
502          * offset is correct.
503          */
504         for (slot = 0; slot < nritems - 1; slot++) {
505                 btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
506                 btrfs_item_key_to_cpu(leaf, &key, slot + 1);
507
508                 /* Make sure the keys are in the right order */
509                 if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
510                         CORRUPT("bad key order", leaf, root, slot);
511                         return -EIO;
512                 }
513
514                 /*
515                  * Make sure the offset and ends are right, remember that the
516                  * item data starts at the end of the leaf and grows towards the
517                  * front.
518                  */
519                 if (btrfs_item_offset_nr(leaf, slot) !=
520                         btrfs_item_end_nr(leaf, slot + 1)) {
521                         CORRUPT("slot offset bad", leaf, root, slot);
522                         return -EIO;
523                 }
524
525                 /*
526                  * Check to make sure that we don't point outside of the leaf,
527                  * just incase all the items are consistent to eachother, but
528                  * all point outside of the leaf.
529                  */
530                 if (btrfs_item_end_nr(leaf, slot) >
531                     BTRFS_LEAF_DATA_SIZE(root)) {
532                         CORRUPT("slot end outside of leaf", leaf, root, slot);
533                         return -EIO;
534                 }
535         }
536
537         return 0;
538 }
539
540 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
541                                struct extent_state *state)
542 {
543         struct extent_io_tree *tree;
544         u64 found_start;
545         int found_level;
546         unsigned long len;
547         struct extent_buffer *eb;
548         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
549         int ret = 0;
550
551         tree = &BTRFS_I(page->mapping->host)->io_tree;
552         if (page->private == EXTENT_PAGE_PRIVATE)
553                 goto out;
554         if (!page->private)
555                 goto out;
556
557         len = page->private >> 2;
558         WARN_ON(len == 0);
559
560         eb = alloc_extent_buffer(tree, start, len, page);
561         if (eb == NULL) {
562                 ret = -EIO;
563                 goto out;
564         }
565
566         found_start = btrfs_header_bytenr(eb);
567         if (found_start != start) {
568                 printk_ratelimited(KERN_INFO "btrfs bad tree block start "
569                                "%llu %llu\n",
570                                (unsigned long long)found_start,
571                                (unsigned long long)eb->start);
572                 ret = -EIO;
573                 goto err;
574         }
575         if (eb->first_page != page) {
576                 printk(KERN_INFO "btrfs bad first page %lu %lu\n",
577                        eb->first_page->index, page->index);
578                 WARN_ON(1);
579                 ret = -EIO;
580                 goto err;
581         }
582         if (check_tree_block_fsid(root, eb)) {
583                 printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
584                                (unsigned long long)eb->start);
585                 ret = -EIO;
586                 goto err;
587         }
588         found_level = btrfs_header_level(eb);
589
590         btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
591                                        eb, found_level);
592
593         ret = csum_tree_block(root, eb, 1);
594         if (ret) {
595                 ret = -EIO;
596                 goto err;
597         }
598
599         /*
600          * If this is a leaf block and it is corrupt, set the corrupt bit so
601          * that we don't try and read the other copies of this block, just
602          * return -EIO.
603          */
604         if (found_level == 0 && check_leaf(root, eb)) {
605                 set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
606                 ret = -EIO;
607         }
608
609         end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
610         end = eb->start + end - 1;
611 err:
612         if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
613                 clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
614                 btree_readahead_hook(root, eb, eb->start, ret);
615         }
616
617         free_extent_buffer(eb);
618 out:
619         return ret;
620 }
621
622 static int btree_io_failed_hook(struct bio *failed_bio,
623                          struct page *page, u64 start, u64 end,
624                          int mirror_num, struct extent_state *state)
625 {
626         struct extent_io_tree *tree;
627         unsigned long len;
628         struct extent_buffer *eb;
629         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
630
631         tree = &BTRFS_I(page->mapping->host)->io_tree;
632         if (page->private == EXTENT_PAGE_PRIVATE)
633                 goto out;
634         if (!page->private)
635                 goto out;
636
637         len = page->private >> 2;
638         WARN_ON(len == 0);
639
640         eb = alloc_extent_buffer(tree, start, len, page);
641         if (eb == NULL)
642                 goto out;
643
644         if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
645                 clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
646                 btree_readahead_hook(root, eb, eb->start, -EIO);
647         }
648         free_extent_buffer(eb);
649
650 out:
651         return -EIO;    /* we fixed nothing */
652 }
653
654 static void end_workqueue_bio(struct bio *bio, int err)
655 {
656         struct end_io_wq *end_io_wq = bio->bi_private;
657         struct btrfs_fs_info *fs_info;
658
659         fs_info = end_io_wq->info;
660         end_io_wq->error = err;
661         end_io_wq->work.func = end_workqueue_fn;
662         end_io_wq->work.flags = 0;
663
664         if (bio->bi_rw & REQ_WRITE) {
665                 if (end_io_wq->metadata == 1)
666                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
667                                            &end_io_wq->work);
668                 else if (end_io_wq->metadata == 2)
669                         btrfs_queue_worker(&fs_info->endio_freespace_worker,
670                                            &end_io_wq->work);
671                 else
672                         btrfs_queue_worker(&fs_info->endio_write_workers,
673                                            &end_io_wq->work);
674         } else {
675                 if (end_io_wq->metadata)
676                         btrfs_queue_worker(&fs_info->endio_meta_workers,
677                                            &end_io_wq->work);
678                 else
679                         btrfs_queue_worker(&fs_info->endio_workers,
680                                            &end_io_wq->work);
681         }
682 }
683
684 /*
685  * For the metadata arg you want
686  *
687  * 0 - if data
688  * 1 - if normal metadta
689  * 2 - if writing to the free space cache area
690  */
691 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
692                         int metadata)
693 {
694         struct end_io_wq *end_io_wq;
695         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
696         if (!end_io_wq)
697                 return -ENOMEM;
698
699         end_io_wq->private = bio->bi_private;
700         end_io_wq->end_io = bio->bi_end_io;
701         end_io_wq->info = info;
702         end_io_wq->error = 0;
703         end_io_wq->bio = bio;
704         end_io_wq->metadata = metadata;
705
706         bio->bi_private = end_io_wq;
707         bio->bi_end_io = end_workqueue_bio;
708         return 0;
709 }
710
711 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
712 {
713         unsigned long limit = min_t(unsigned long,
714                                     info->workers.max_workers,
715                                     info->fs_devices->open_devices);
716         return 256 * limit;
717 }
718
719 static void run_one_async_start(struct btrfs_work *work)
720 {
721         struct async_submit_bio *async;
722
723         async = container_of(work, struct  async_submit_bio, work);
724         async->submit_bio_start(async->inode, async->rw, async->bio,
725                                async->mirror_num, async->bio_flags,
726                                async->bio_offset);
727 }
728
729 static void run_one_async_done(struct btrfs_work *work)
730 {
731         struct btrfs_fs_info *fs_info;
732         struct async_submit_bio *async;
733         int limit;
734
735         async = container_of(work, struct  async_submit_bio, work);
736         fs_info = BTRFS_I(async->inode)->root->fs_info;
737
738         limit = btrfs_async_submit_limit(fs_info);
739         limit = limit * 2 / 3;
740
741         atomic_dec(&fs_info->nr_async_submits);
742
743         if (atomic_read(&fs_info->nr_async_submits) < limit &&
744             waitqueue_active(&fs_info->async_submit_wait))
745                 wake_up(&fs_info->async_submit_wait);
746
747         async->submit_bio_done(async->inode, async->rw, async->bio,
748                                async->mirror_num, async->bio_flags,
749                                async->bio_offset);
750 }
751
752 static void run_one_async_free(struct btrfs_work *work)
753 {
754         struct async_submit_bio *async;
755
756         async = container_of(work, struct  async_submit_bio, work);
757         kfree(async);
758 }
759
760 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
761                         int rw, struct bio *bio, int mirror_num,
762                         unsigned long bio_flags,
763                         u64 bio_offset,
764                         extent_submit_bio_hook_t *submit_bio_start,
765                         extent_submit_bio_hook_t *submit_bio_done)
766 {
767         struct async_submit_bio *async;
768
769         async = kmalloc(sizeof(*async), GFP_NOFS);
770         if (!async)
771                 return -ENOMEM;
772
773         async->inode = inode;
774         async->rw = rw;
775         async->bio = bio;
776         async->mirror_num = mirror_num;
777         async->submit_bio_start = submit_bio_start;
778         async->submit_bio_done = submit_bio_done;
779
780         async->work.func = run_one_async_start;
781         async->work.ordered_func = run_one_async_done;
782         async->work.ordered_free = run_one_async_free;
783
784         async->work.flags = 0;
785         async->bio_flags = bio_flags;
786         async->bio_offset = bio_offset;
787
788         atomic_inc(&fs_info->nr_async_submits);
789
790         if (rw & REQ_SYNC)
791                 btrfs_set_work_high_prio(&async->work);
792
793         btrfs_queue_worker(&fs_info->workers, &async->work);
794
795         while (atomic_read(&fs_info->async_submit_draining) &&
796               atomic_read(&fs_info->nr_async_submits)) {
797                 wait_event(fs_info->async_submit_wait,
798                            (atomic_read(&fs_info->nr_async_submits) == 0));
799         }
800
801         return 0;
802 }
803
804 static int btree_csum_one_bio(struct bio *bio)
805 {
806         struct bio_vec *bvec = bio->bi_io_vec;
807         int bio_index = 0;
808         struct btrfs_root *root;
809
810         WARN_ON(bio->bi_vcnt <= 0);
811         while (bio_index < bio->bi_vcnt) {
812                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
813                 csum_dirty_buffer(root, bvec->bv_page);
814                 bio_index++;
815                 bvec++;
816         }
817         return 0;
818 }
819
820 static int __btree_submit_bio_start(struct inode *inode, int rw,
821                                     struct bio *bio, int mirror_num,
822                                     unsigned long bio_flags,
823                                     u64 bio_offset)
824 {
825         /*
826          * when we're called for a write, we're already in the async
827          * submission context.  Just jump into btrfs_map_bio
828          */
829         btree_csum_one_bio(bio);
830         return 0;
831 }
832
833 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
834                                  int mirror_num, unsigned long bio_flags,
835                                  u64 bio_offset)
836 {
837         /*
838          * when we're called for a write, we're already in the async
839          * submission context.  Just jump into btrfs_map_bio
840          */
841         return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
842 }
843
844 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
845                                  int mirror_num, unsigned long bio_flags,
846                                  u64 bio_offset)
847 {
848         int ret;
849
850         ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
851                                           bio, 1);
852         BUG_ON(ret);
853
854         if (!(rw & REQ_WRITE)) {
855                 /*
856                  * called for a read, do the setup so that checksum validation
857                  * can happen in the async kernel threads
858                  */
859                 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
860                                      mirror_num, 0);
861         }
862
863         /*
864          * kthread helpers are used to submit writes so that checksumming
865          * can happen in parallel across all CPUs
866          */
867         return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
868                                    inode, rw, bio, mirror_num, 0,
869                                    bio_offset,
870                                    __btree_submit_bio_start,
871                                    __btree_submit_bio_done);
872 }
873
874 #ifdef CONFIG_MIGRATION
875 static int btree_migratepage(struct address_space *mapping,
876                         struct page *newpage, struct page *page,
877                         enum migrate_mode mode)
878 {
879         /*
880          * we can't safely write a btree page from here,
881          * we haven't done the locking hook
882          */
883         if (PageDirty(page))
884                 return -EAGAIN;
885         /*
886          * Buffers may be managed in a filesystem specific way.
887          * We must have no buffers or drop them.
888          */
889         if (page_has_private(page) &&
890             !try_to_release_page(page, GFP_KERNEL))
891                 return -EAGAIN;
892         return migrate_page(mapping, newpage, page, mode);
893 }
894 #endif
895
896 static int btree_writepage(struct page *page, struct writeback_control *wbc)
897 {
898         struct extent_io_tree *tree;
899         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
900         struct extent_buffer *eb;
901         int was_dirty;
902
903         tree = &BTRFS_I(page->mapping->host)->io_tree;
904         if (!(current->flags & PF_MEMALLOC)) {
905                 return extent_write_full_page(tree, page,
906                                               btree_get_extent, wbc);
907         }
908
909         redirty_page_for_writepage(wbc, page);
910         eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE);
911         WARN_ON(!eb);
912
913         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
914         if (!was_dirty) {
915                 spin_lock(&root->fs_info->delalloc_lock);
916                 root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
917                 spin_unlock(&root->fs_info->delalloc_lock);
918         }
919         free_extent_buffer(eb);
920
921         unlock_page(page);
922         return 0;
923 }
924
925 static int btree_writepages(struct address_space *mapping,
926                             struct writeback_control *wbc)
927 {
928         struct extent_io_tree *tree;
929         tree = &BTRFS_I(mapping->host)->io_tree;
930         if (wbc->sync_mode == WB_SYNC_NONE) {
931                 struct btrfs_root *root = BTRFS_I(mapping->host)->root;
932                 u64 num_dirty;
933                 unsigned long thresh = 32 * 1024 * 1024;
934
935                 if (wbc->for_kupdate)
936                         return 0;
937
938                 /* this is a bit racy, but that's ok */
939                 num_dirty = root->fs_info->dirty_metadata_bytes;
940                 if (num_dirty < thresh)
941                         return 0;
942         }
943         return extent_writepages(tree, mapping, btree_get_extent, wbc);
944 }
945
946 static int btree_readpage(struct file *file, struct page *page)
947 {
948         struct extent_io_tree *tree;
949         tree = &BTRFS_I(page->mapping->host)->io_tree;
950         return extent_read_full_page(tree, page, btree_get_extent, 0);
951 }
952
953 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
954 {
955         struct extent_io_tree *tree;
956         struct extent_map_tree *map;
957         int ret;
958
959         if (PageWriteback(page) || PageDirty(page))
960                 return 0;
961
962         tree = &BTRFS_I(page->mapping->host)->io_tree;
963         map = &BTRFS_I(page->mapping->host)->extent_tree;
964
965         /*
966          * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
967          * slab allocation from alloc_extent_state down the callchain where
968          * it'd hit a BUG_ON as those flags are not allowed.
969          */
970         gfp_flags &= ~GFP_SLAB_BUG_MASK;
971
972         ret = try_release_extent_state(map, tree, page, gfp_flags);
973         if (!ret)
974                 return 0;
975
976         ret = try_release_extent_buffer(tree, page);
977         if (ret == 1) {
978                 ClearPagePrivate(page);
979                 set_page_private(page, 0);
980                 page_cache_release(page);
981         }
982
983         return ret;
984 }
985
986 static void btree_invalidatepage(struct page *page, unsigned long offset)
987 {
988         struct extent_io_tree *tree;
989         tree = &BTRFS_I(page->mapping->host)->io_tree;
990         extent_invalidatepage(tree, page, offset);
991         btree_releasepage(page, GFP_NOFS);
992         if (PagePrivate(page)) {
993                 printk(KERN_WARNING "btrfs warning page private not zero "
994                        "on page %llu\n", (unsigned long long)page_offset(page));
995                 ClearPagePrivate(page);
996                 set_page_private(page, 0);
997                 page_cache_release(page);
998         }
999 }
1000
1001 static const struct address_space_operations btree_aops = {
1002         .readpage       = btree_readpage,
1003         .writepage      = btree_writepage,
1004         .writepages     = btree_writepages,
1005         .releasepage    = btree_releasepage,
1006         .invalidatepage = btree_invalidatepage,
1007 #ifdef CONFIG_MIGRATION
1008         .migratepage    = btree_migratepage,
1009 #endif
1010 };
1011
1012 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1013                          u64 parent_transid)
1014 {
1015         struct extent_buffer *buf = NULL;
1016         struct inode *btree_inode = root->fs_info->btree_inode;
1017         int ret = 0;
1018
1019         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1020         if (!buf)
1021                 return 0;
1022         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1023                                  buf, 0, WAIT_NONE, btree_get_extent, 0);
1024         free_extent_buffer(buf);
1025         return ret;
1026 }
1027
1028 int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
1029                          int mirror_num, struct extent_buffer **eb)
1030 {
1031         struct extent_buffer *buf = NULL;
1032         struct inode *btree_inode = root->fs_info->btree_inode;
1033         struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
1034         int ret;
1035
1036         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1037         if (!buf)
1038                 return 0;
1039
1040         set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
1041
1042         ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
1043                                        btree_get_extent, mirror_num);
1044         if (ret) {
1045                 free_extent_buffer(buf);
1046                 return ret;
1047         }
1048
1049         if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
1050                 free_extent_buffer(buf);
1051                 return -EIO;
1052         } else if (extent_buffer_uptodate(io_tree, buf, NULL)) {
1053                 *eb = buf;
1054         } else {
1055                 free_extent_buffer(buf);
1056         }
1057         return 0;
1058 }
1059
1060 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
1061                                             u64 bytenr, u32 blocksize)
1062 {
1063         struct inode *btree_inode = root->fs_info->btree_inode;
1064         struct extent_buffer *eb;
1065         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1066                                 bytenr, blocksize);
1067         return eb;
1068 }
1069
1070 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
1071                                                  u64 bytenr, u32 blocksize)
1072 {
1073         struct inode *btree_inode = root->fs_info->btree_inode;
1074         struct extent_buffer *eb;
1075
1076         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
1077                                  bytenr, blocksize, NULL);
1078         return eb;
1079 }
1080
1081
1082 int btrfs_write_tree_block(struct extent_buffer *buf)
1083 {
1084         return filemap_fdatawrite_range(buf->first_page->mapping, buf->start,
1085                                         buf->start + buf->len - 1);
1086 }
1087
1088 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
1089 {
1090         return filemap_fdatawait_range(buf->first_page->mapping,
1091                                        buf->start, buf->start + buf->len - 1);
1092 }
1093
1094 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1095                                       u32 blocksize, u64 parent_transid)
1096 {
1097         struct extent_buffer *buf = NULL;
1098         int ret;
1099
1100         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
1101         if (!buf)
1102                 return NULL;
1103
1104         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1105
1106         if (ret == 0)
1107                 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
1108         return buf;
1109
1110 }
1111
1112 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1113                      struct extent_buffer *buf)
1114 {
1115         struct inode *btree_inode = root->fs_info->btree_inode;
1116         if (btrfs_header_generation(buf) ==
1117             root->fs_info->running_transaction->transid) {
1118                 btrfs_assert_tree_locked(buf);
1119
1120                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1121                         spin_lock(&root->fs_info->delalloc_lock);
1122                         if (root->fs_info->dirty_metadata_bytes >= buf->len)
1123                                 root->fs_info->dirty_metadata_bytes -= buf->len;
1124                         else
1125                                 WARN_ON(1);
1126                         spin_unlock(&root->fs_info->delalloc_lock);
1127                 }
1128
1129                 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1130                 btrfs_set_lock_blocking(buf);
1131                 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
1132                                           buf);
1133         }
1134         return 0;
1135 }
1136
1137 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1138                         u32 stripesize, struct btrfs_root *root,
1139                         struct btrfs_fs_info *fs_info,
1140                         u64 objectid)
1141 {
1142         root->node = NULL;
1143         root->commit_root = NULL;
1144         root->sectorsize = sectorsize;
1145         root->nodesize = nodesize;
1146         root->leafsize = leafsize;
1147         root->stripesize = stripesize;
1148         root->ref_cows = 0;
1149         root->track_dirty = 0;
1150         root->in_radix = 0;
1151         root->orphan_item_inserted = 0;
1152         root->orphan_cleanup_state = 0;
1153
1154         root->objectid = objectid;
1155         root->last_trans = 0;
1156         root->highest_objectid = 0;
1157         root->name = NULL;
1158         root->inode_tree = RB_ROOT;
1159         INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1160         root->block_rsv = NULL;
1161         root->orphan_block_rsv = NULL;
1162
1163         INIT_LIST_HEAD(&root->dirty_list);
1164         INIT_LIST_HEAD(&root->orphan_list);
1165         INIT_LIST_HEAD(&root->root_list);
1166         spin_lock_init(&root->orphan_lock);
1167         spin_lock_init(&root->inode_lock);
1168         spin_lock_init(&root->accounting_lock);
1169         mutex_init(&root->objectid_mutex);
1170         mutex_init(&root->log_mutex);
1171         init_waitqueue_head(&root->log_writer_wait);
1172         init_waitqueue_head(&root->log_commit_wait[0]);
1173         init_waitqueue_head(&root->log_commit_wait[1]);
1174         atomic_set(&root->log_commit[0], 0);
1175         atomic_set(&root->log_commit[1], 0);
1176         atomic_set(&root->log_writers, 0);
1177         root->log_batch = 0;
1178         root->log_transid = 0;
1179         root->last_log_commit = 0;
1180         extent_io_tree_init(&root->dirty_log_pages,
1181                              fs_info->btree_inode->i_mapping);
1182
1183         memset(&root->root_key, 0, sizeof(root->root_key));
1184         memset(&root->root_item, 0, sizeof(root->root_item));
1185         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
1186         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
1187         root->defrag_trans_start = fs_info->generation;
1188         init_completion(&root->kobj_unregister);
1189         root->defrag_running = 0;
1190         root->root_key.objectid = objectid;
1191         root->anon_dev = 0;
1192         return 0;
1193 }
1194
1195 static int find_and_setup_root(struct btrfs_root *tree_root,
1196                                struct btrfs_fs_info *fs_info,
1197                                u64 objectid,
1198                                struct btrfs_root *root)
1199 {
1200         int ret;
1201         u32 blocksize;
1202         u64 generation;
1203
1204         __setup_root(tree_root->nodesize, tree_root->leafsize,
1205                      tree_root->sectorsize, tree_root->stripesize,
1206                      root, fs_info, objectid);
1207         ret = btrfs_find_last_root(tree_root, objectid,
1208                                    &root->root_item, &root->root_key);
1209         if (ret > 0)
1210                 return -ENOENT;
1211         BUG_ON(ret);
1212
1213         generation = btrfs_root_generation(&root->root_item);
1214         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1215         root->commit_root = NULL;
1216         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1217                                      blocksize, generation);
1218         if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) {
1219                 free_extent_buffer(root->node);
1220                 root->node = NULL;
1221                 return -EIO;
1222         }
1223         root->commit_root = btrfs_root_node(root);
1224         return 0;
1225 }
1226
1227 static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
1228 {
1229         struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
1230         if (root)
1231                 root->fs_info = fs_info;
1232         return root;
1233 }
1234
1235 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1236                                          struct btrfs_fs_info *fs_info)
1237 {
1238         struct btrfs_root *root;
1239         struct btrfs_root *tree_root = fs_info->tree_root;
1240         struct extent_buffer *leaf;
1241
1242         root = btrfs_alloc_root(fs_info);
1243         if (!root)
1244                 return ERR_PTR(-ENOMEM);
1245
1246         __setup_root(tree_root->nodesize, tree_root->leafsize,
1247                      tree_root->sectorsize, tree_root->stripesize,
1248                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1249
1250         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1251         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1252         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1253         /*
1254          * log trees do not get reference counted because they go away
1255          * before a real commit is actually done.  They do store pointers
1256          * to file data extents, and those reference counts still get
1257          * updated (along with back refs to the log tree).
1258          */
1259         root->ref_cows = 0;
1260
1261         leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1262                                       BTRFS_TREE_LOG_OBJECTID, NULL,
1263                                       0, 0, 0, 0);
1264         if (IS_ERR(leaf)) {
1265                 kfree(root);
1266                 return ERR_CAST(leaf);
1267         }
1268
1269         memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
1270         btrfs_set_header_bytenr(leaf, leaf->start);
1271         btrfs_set_header_generation(leaf, trans->transid);
1272         btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
1273         btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
1274         root->node = leaf;
1275
1276         write_extent_buffer(root->node, root->fs_info->fsid,
1277                             (unsigned long)btrfs_header_fsid(root->node),
1278                             BTRFS_FSID_SIZE);
1279         btrfs_mark_buffer_dirty(root->node);
1280         btrfs_tree_unlock(root->node);
1281         return root;
1282 }
1283
1284 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1285                              struct btrfs_fs_info *fs_info)
1286 {
1287         struct btrfs_root *log_root;
1288
1289         log_root = alloc_log_tree(trans, fs_info);
1290         if (IS_ERR(log_root))
1291                 return PTR_ERR(log_root);
1292         WARN_ON(fs_info->log_root_tree);
1293         fs_info->log_root_tree = log_root;
1294         return 0;
1295 }
1296
1297 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1298                        struct btrfs_root *root)
1299 {
1300         struct btrfs_root *log_root;
1301         struct btrfs_inode_item *inode_item;
1302
1303         log_root = alloc_log_tree(trans, root->fs_info);
1304         if (IS_ERR(log_root))
1305                 return PTR_ERR(log_root);
1306
1307         log_root->last_trans = trans->transid;
1308         log_root->root_key.offset = root->root_key.objectid;
1309
1310         inode_item = &log_root->root_item.inode;
1311         inode_item->generation = cpu_to_le64(1);
1312         inode_item->size = cpu_to_le64(3);
1313         inode_item->nlink = cpu_to_le32(1);
1314         inode_item->nbytes = cpu_to_le64(root->leafsize);
1315         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1316
1317         btrfs_set_root_node(&log_root->root_item, log_root->node);
1318
1319         WARN_ON(root->log_root);
1320         root->log_root = log_root;
1321         root->log_transid = 0;
1322         root->last_log_commit = 0;
1323         return 0;
1324 }
1325
1326 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1327                                                struct btrfs_key *location)
1328 {
1329         struct btrfs_root *root;
1330         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1331         struct btrfs_path *path;
1332         struct extent_buffer *l;
1333         u64 generation;
1334         u32 blocksize;
1335         int ret = 0;
1336
1337         root = btrfs_alloc_root(fs_info);
1338         if (!root)
1339                 return ERR_PTR(-ENOMEM);
1340         if (location->offset == (u64)-1) {
1341                 ret = find_and_setup_root(tree_root, fs_info,
1342                                           location->objectid, root);
1343                 if (ret) {
1344                         kfree(root);
1345                         return ERR_PTR(ret);
1346                 }
1347                 goto out;
1348         }
1349
1350         __setup_root(tree_root->nodesize, tree_root->leafsize,
1351                      tree_root->sectorsize, tree_root->stripesize,
1352                      root, fs_info, location->objectid);
1353
1354         path = btrfs_alloc_path();
1355         if (!path) {
1356                 kfree(root);
1357                 return ERR_PTR(-ENOMEM);
1358         }
1359         ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1360         if (ret == 0) {
1361                 l = path->nodes[0];
1362                 read_extent_buffer(l, &root->root_item,
1363                                 btrfs_item_ptr_offset(l, path->slots[0]),
1364                                 sizeof(root->root_item));
1365                 memcpy(&root->root_key, location, sizeof(*location));
1366         }
1367         btrfs_free_path(path);
1368         if (ret) {
1369                 kfree(root);
1370                 if (ret > 0)
1371                         ret = -ENOENT;
1372                 return ERR_PTR(ret);
1373         }
1374
1375         generation = btrfs_root_generation(&root->root_item);
1376         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1377         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1378                                      blocksize, generation);
1379         root->commit_root = btrfs_root_node(root);
1380         BUG_ON(!root->node);
1381 out:
1382         if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1383                 root->ref_cows = 1;
1384                 btrfs_check_and_init_root_item(&root->root_item);
1385         }
1386
1387         return root;
1388 }
1389
1390 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1391                                               struct btrfs_key *location)
1392 {
1393         struct btrfs_root *root;
1394         int ret;
1395
1396         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1397                 return fs_info->tree_root;
1398         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1399                 return fs_info->extent_root;
1400         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1401                 return fs_info->chunk_root;
1402         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1403                 return fs_info->dev_root;
1404         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1405                 return fs_info->csum_root;
1406 again:
1407         spin_lock(&fs_info->fs_roots_radix_lock);
1408         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1409                                  (unsigned long)location->objectid);
1410         spin_unlock(&fs_info->fs_roots_radix_lock);
1411         if (root)
1412                 return root;
1413
1414         root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1415         if (IS_ERR(root))
1416                 return root;
1417
1418         root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1419         root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1420                                         GFP_NOFS);
1421         if (!root->free_ino_pinned || !root->free_ino_ctl) {
1422                 ret = -ENOMEM;
1423                 goto fail;
1424         }
1425
1426         btrfs_init_free_ino_ctl(root);
1427         mutex_init(&root->fs_commit_mutex);
1428         spin_lock_init(&root->cache_lock);
1429         init_waitqueue_head(&root->cache_wait);
1430
1431         ret = get_anon_bdev(&root->anon_dev);
1432         if (ret)
1433                 goto fail;
1434
1435         if (btrfs_root_refs(&root->root_item) == 0) {
1436                 ret = -ENOENT;
1437                 goto fail;
1438         }
1439
1440         ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
1441         if (ret < 0)
1442                 goto fail;
1443         if (ret == 0)
1444                 root->orphan_item_inserted = 1;
1445
1446         ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
1447         if (ret)
1448                 goto fail;
1449
1450         spin_lock(&fs_info->fs_roots_radix_lock);
1451         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1452                                 (unsigned long)root->root_key.objectid,
1453                                 root);
1454         if (ret == 0)
1455                 root->in_radix = 1;
1456
1457         spin_unlock(&fs_info->fs_roots_radix_lock);
1458         radix_tree_preload_end();
1459         if (ret) {
1460                 if (ret == -EEXIST) {
1461                         free_fs_root(root);
1462                         goto again;
1463                 }
1464                 goto fail;
1465         }
1466
1467         ret = btrfs_find_dead_roots(fs_info->tree_root,
1468                                     root->root_key.objectid);
1469         WARN_ON(ret);
1470         return root;
1471 fail:
1472         free_fs_root(root);
1473         return ERR_PTR(ret);
1474 }
1475
1476 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1477 {
1478         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1479         int ret = 0;
1480         struct btrfs_device *device;
1481         struct backing_dev_info *bdi;
1482
1483         rcu_read_lock();
1484         list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
1485                 if (!device->bdev)
1486                         continue;
1487                 bdi = blk_get_backing_dev_info(device->bdev);
1488                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1489                         ret = 1;
1490                         break;
1491                 }
1492         }
1493         rcu_read_unlock();
1494         return ret;
1495 }
1496
1497 /*
1498  * If this fails, caller must call bdi_destroy() to get rid of the
1499  * bdi again.
1500  */
1501 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1502 {
1503         int err;
1504
1505         bdi->capabilities = BDI_CAP_MAP_COPY;
1506         err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
1507         if (err)
1508                 return err;
1509
1510         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1511         bdi->congested_fn       = btrfs_congested_fn;
1512         bdi->congested_data     = info;
1513         return 0;
1514 }
1515
1516 static int bio_ready_for_csum(struct bio *bio)
1517 {
1518         u64 length = 0;
1519         u64 buf_len = 0;
1520         u64 start = 0;
1521         struct page *page;
1522         struct extent_io_tree *io_tree = NULL;
1523         struct bio_vec *bvec;
1524         int i;
1525         int ret;
1526
1527         bio_for_each_segment(bvec, bio, i) {
1528                 page = bvec->bv_page;
1529                 if (page->private == EXTENT_PAGE_PRIVATE) {
1530                         length += bvec->bv_len;
1531                         continue;
1532                 }
1533                 if (!page->private) {
1534                         length += bvec->bv_len;
1535                         continue;
1536                 }
1537                 length = bvec->bv_len;
1538                 buf_len = page->private >> 2;
1539                 start = page_offset(page) + bvec->bv_offset;
1540                 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1541         }
1542         /* are we fully contained in this bio? */
1543         if (buf_len <= length)
1544                 return 1;
1545
1546         ret = extent_range_uptodate(io_tree, start + length,
1547                                     start + buf_len - 1);
1548         return ret;
1549 }
1550
1551 /*
1552  * called by the kthread helper functions to finally call the bio end_io
1553  * functions.  This is where read checksum verification actually happens
1554  */
1555 static void end_workqueue_fn(struct btrfs_work *work)
1556 {
1557         struct bio *bio;
1558         struct end_io_wq *end_io_wq;
1559         struct btrfs_fs_info *fs_info;
1560         int error;
1561
1562         end_io_wq = container_of(work, struct end_io_wq, work);
1563         bio = end_io_wq->bio;
1564         fs_info = end_io_wq->info;
1565
1566         /* metadata bio reads are special because the whole tree block must
1567          * be checksummed at once.  This makes sure the entire block is in
1568          * ram and up to date before trying to verify things.  For
1569          * blocksize <= pagesize, it is basically a noop
1570          */
1571         if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata &&
1572             !bio_ready_for_csum(bio)) {
1573                 btrfs_queue_worker(&fs_info->endio_meta_workers,
1574                                    &end_io_wq->work);
1575                 return;
1576         }
1577         error = end_io_wq->error;
1578         bio->bi_private = end_io_wq->private;
1579         bio->bi_end_io = end_io_wq->end_io;
1580         kfree(end_io_wq);
1581         bio_endio(bio, error);
1582 }
1583
1584 static int cleaner_kthread(void *arg)
1585 {
1586         struct btrfs_root *root = arg;
1587
1588         do {
1589                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1590
1591                 if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
1592                     mutex_trylock(&root->fs_info->cleaner_mutex)) {
1593                         btrfs_run_delayed_iputs(root);
1594                         btrfs_clean_old_snapshots(root);
1595                         mutex_unlock(&root->fs_info->cleaner_mutex);
1596                         btrfs_run_defrag_inodes(root->fs_info);
1597                 }
1598
1599                 if (!try_to_freeze()) {
1600                         set_current_state(TASK_INTERRUPTIBLE);
1601                         if (!kthread_should_stop())
1602                                 schedule();
1603                         __set_current_state(TASK_RUNNING);
1604                 }
1605         } while (!kthread_should_stop());
1606         return 0;
1607 }
1608
1609 static int transaction_kthread(void *arg)
1610 {
1611         struct btrfs_root *root = arg;
1612         struct btrfs_trans_handle *trans;
1613         struct btrfs_transaction *cur;
1614         u64 transid;
1615         unsigned long now;
1616         unsigned long delay;
1617         int ret;
1618
1619         do {
1620                 delay = HZ * 30;
1621                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1622                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1623
1624                 spin_lock(&root->fs_info->trans_lock);
1625                 cur = root->fs_info->running_transaction;
1626                 if (!cur) {
1627                         spin_unlock(&root->fs_info->trans_lock);
1628                         goto sleep;
1629                 }
1630
1631                 now = get_seconds();
1632                 if (!cur->blocked &&
1633                     (now < cur->start_time || now - cur->start_time < 30)) {
1634                         spin_unlock(&root->fs_info->trans_lock);
1635                         delay = HZ * 5;
1636                         goto sleep;
1637                 }
1638                 transid = cur->transid;
1639                 spin_unlock(&root->fs_info->trans_lock);
1640
1641                 trans = btrfs_join_transaction(root);
1642                 BUG_ON(IS_ERR(trans));
1643                 if (transid == trans->transid) {
1644                         ret = btrfs_commit_transaction(trans, root);
1645                         BUG_ON(ret);
1646                 } else {
1647                         btrfs_end_transaction(trans, root);
1648                 }
1649 sleep:
1650                 wake_up_process(root->fs_info->cleaner_kthread);
1651                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1652
1653                 if (!try_to_freeze()) {
1654                         set_current_state(TASK_INTERRUPTIBLE);
1655                         if (!kthread_should_stop() &&
1656                             !btrfs_transaction_blocked(root->fs_info))
1657                                 schedule_timeout(delay);
1658                         __set_current_state(TASK_RUNNING);
1659                 }
1660         } while (!kthread_should_stop());
1661         return 0;
1662 }
1663
1664 /*
1665  * this will find the highest generation in the array of
1666  * root backups.  The index of the highest array is returned,
1667  * or -1 if we can't find anything.
1668  *
1669  * We check to make sure the array is valid by comparing the
1670  * generation of the latest  root in the array with the generation
1671  * in the super block.  If they don't match we pitch it.
1672  */
1673 static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
1674 {
1675         u64 cur;
1676         int newest_index = -1;
1677         struct btrfs_root_backup *root_backup;
1678         int i;
1679
1680         for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1681                 root_backup = info->super_copy->super_roots + i;
1682                 cur = btrfs_backup_tree_root_gen(root_backup);
1683                 if (cur == newest_gen)
1684                         newest_index = i;
1685         }
1686
1687         /* check to see if we actually wrapped around */
1688         if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
1689                 root_backup = info->super_copy->super_roots;
1690                 cur = btrfs_backup_tree_root_gen(root_backup);
1691                 if (cur == newest_gen)
1692                         newest_index = 0;
1693         }
1694         return newest_index;
1695 }
1696
1697
1698 /*
1699  * find the oldest backup so we know where to store new entries
1700  * in the backup array.  This will set the backup_root_index
1701  * field in the fs_info struct
1702  */
1703 static void find_oldest_super_backup(struct btrfs_fs_info *info,
1704                                      u64 newest_gen)
1705 {
1706         int newest_index = -1;
1707
1708         newest_index = find_newest_super_backup(info, newest_gen);
1709         /* if there was garbage in there, just move along */
1710         if (newest_index == -1) {
1711                 info->backup_root_index = 0;
1712         } else {
1713                 info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
1714         }
1715 }
1716
1717 /*
1718  * copy all the root pointers into the super backup array.
1719  * this will bump the backup pointer by one when it is
1720  * done
1721  */
1722 static void backup_super_roots(struct btrfs_fs_info *info)
1723 {
1724         int next_backup;
1725         struct btrfs_root_backup *root_backup;
1726         int last_backup;
1727
1728         next_backup = info->backup_root_index;
1729         last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
1730                 BTRFS_NUM_BACKUP_ROOTS;
1731
1732         /*
1733          * just overwrite the last backup if we're at the same generation
1734          * this happens only at umount
1735          */
1736         root_backup = info->super_for_commit->super_roots + last_backup;
1737         if (btrfs_backup_tree_root_gen(root_backup) ==
1738             btrfs_header_generation(info->tree_root->node))
1739                 next_backup = last_backup;
1740
1741         root_backup = info->super_for_commit->super_roots + next_backup;
1742
1743         /*
1744          * make sure all of our padding and empty slots get zero filled
1745          * regardless of which ones we use today
1746          */
1747         memset(root_backup, 0, sizeof(*root_backup));
1748
1749         info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1750
1751         btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
1752         btrfs_set_backup_tree_root_gen(root_backup,
1753                                btrfs_header_generation(info->tree_root->node));
1754
1755         btrfs_set_backup_tree_root_level(root_backup,
1756                                btrfs_header_level(info->tree_root->node));
1757
1758         btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
1759         btrfs_set_backup_chunk_root_gen(root_backup,
1760                                btrfs_header_generation(info->chunk_root->node));
1761         btrfs_set_backup_chunk_root_level(root_backup,
1762                                btrfs_header_level(info->chunk_root->node));
1763
1764         btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
1765         btrfs_set_backup_extent_root_gen(root_backup,
1766                                btrfs_header_generation(info->extent_root->node));
1767         btrfs_set_backup_extent_root_level(root_backup,
1768                                btrfs_header_level(info->extent_root->node));
1769
1770         /*
1771          * we might commit during log recovery, which happens before we set
1772          * the fs_root.  Make sure it is valid before we fill it in.
1773          */
1774         if (info->fs_root && info->fs_root->node) {
1775                 btrfs_set_backup_fs_root(root_backup,
1776                                          info->fs_root->node->start);
1777                 btrfs_set_backup_fs_root_gen(root_backup,
1778                                btrfs_header_generation(info->fs_root->node));
1779                 btrfs_set_backup_fs_root_level(root_backup,
1780                                btrfs_header_level(info->fs_root->node));
1781         }
1782
1783         btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
1784         btrfs_set_backup_dev_root_gen(root_backup,
1785                                btrfs_header_generation(info->dev_root->node));
1786         btrfs_set_backup_dev_root_level(root_backup,
1787                                        btrfs_header_level(info->dev_root->node));
1788
1789         btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
1790         btrfs_set_backup_csum_root_gen(root_backup,
1791                                btrfs_header_generation(info->csum_root->node));
1792         btrfs_set_backup_csum_root_level(root_backup,
1793                                btrfs_header_level(info->csum_root->node));
1794
1795         btrfs_set_backup_total_bytes(root_backup,
1796                              btrfs_super_total_bytes(info->super_copy));
1797         btrfs_set_backup_bytes_used(root_backup,
1798                              btrfs_super_bytes_used(info->super_copy));
1799         btrfs_set_backup_num_devices(root_backup,
1800                              btrfs_super_num_devices(info->super_copy));
1801
1802         /*
1803          * if we don't copy this out to the super_copy, it won't get remembered
1804          * for the next commit
1805          */
1806         memcpy(&info->super_copy->super_roots,
1807                &info->super_for_commit->super_roots,
1808                sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1809 }
1810
1811 /*
1812  * this copies info out of the root backup array and back into
1813  * the in-memory super block.  It is meant to help iterate through
1814  * the array, so you send it the number of backups you've already
1815  * tried and the last backup index you used.
1816  *
1817  * this returns -1 when it has tried all the backups
1818  */
1819 static noinline int next_root_backup(struct btrfs_fs_info *info,
1820                                      struct btrfs_super_block *super,
1821                                      int *num_backups_tried, int *backup_index)
1822 {
1823         struct btrfs_root_backup *root_backup;
1824         int newest = *backup_index;
1825
1826         if (*num_backups_tried == 0) {
1827                 u64 gen = btrfs_super_generation(super);
1828
1829                 newest = find_newest_super_backup(info, gen);
1830                 if (newest == -1)
1831                         return -1;
1832
1833                 *backup_index = newest;
1834                 *num_backups_tried = 1;
1835         } else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
1836                 /* we've tried all the backups, all done */
1837                 return -1;
1838         } else {
1839                 /* jump to the next oldest backup */
1840                 newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
1841                         BTRFS_NUM_BACKUP_ROOTS;
1842                 *backup_index = newest;
1843                 *num_backups_tried += 1;
1844         }
1845         root_backup = super->super_roots + newest;
1846
1847         btrfs_set_super_generation(super,
1848                                    btrfs_backup_tree_root_gen(root_backup));
1849         btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
1850         btrfs_set_super_root_level(super,
1851                                    btrfs_backup_tree_root_level(root_backup));
1852         btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
1853
1854         /*
1855          * fixme: the total bytes and num_devices need to match or we should
1856          * need a fsck
1857          */
1858         btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
1859         btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
1860         return 0;
1861 }
1862
1863 /* helper to cleanup tree roots */
1864 static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
1865 {
1866         free_extent_buffer(info->tree_root->node);
1867         free_extent_buffer(info->tree_root->commit_root);
1868         free_extent_buffer(info->dev_root->node);
1869         free_extent_buffer(info->dev_root->commit_root);
1870         free_extent_buffer(info->extent_root->node);
1871         free_extent_buffer(info->extent_root->commit_root);
1872         free_extent_buffer(info->csum_root->node);
1873         free_extent_buffer(info->csum_root->commit_root);
1874
1875         info->tree_root->node = NULL;
1876         info->tree_root->commit_root = NULL;
1877         info->dev_root->node = NULL;
1878         info->dev_root->commit_root = NULL;
1879         info->extent_root->node = NULL;
1880         info->extent_root->commit_root = NULL;
1881         info->csum_root->node = NULL;
1882         info->csum_root->commit_root = NULL;
1883
1884         if (chunk_root) {
1885                 free_extent_buffer(info->chunk_root->node);
1886                 free_extent_buffer(info->chunk_root->commit_root);
1887                 info->chunk_root->node = NULL;
1888                 info->chunk_root->commit_root = NULL;
1889         }
1890 }
1891
1892
1893 int open_ctree(struct super_block *sb,
1894                struct btrfs_fs_devices *fs_devices,
1895                char *options)
1896 {
1897         u32 sectorsize;
1898         u32 nodesize;
1899         u32 leafsize;
1900         u32 blocksize;
1901         u32 stripesize;
1902         u64 generation;
1903         u64 features;
1904         struct btrfs_key location;
1905         struct buffer_head *bh;
1906         struct btrfs_super_block *disk_super;
1907         struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1908         struct btrfs_root *tree_root;
1909         struct btrfs_root *extent_root;
1910         struct btrfs_root *csum_root;
1911         struct btrfs_root *chunk_root;
1912         struct btrfs_root *dev_root;
1913         struct btrfs_root *log_tree_root;
1914         int ret;
1915         int err = -EINVAL;
1916         int num_backups_tried = 0;
1917         int backup_index = 0;
1918
1919         tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
1920         extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
1921         csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
1922         chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
1923         dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
1924
1925         if (!tree_root || !extent_root || !csum_root ||
1926             !chunk_root || !dev_root) {
1927                 err = -ENOMEM;
1928                 goto fail;
1929         }
1930
1931         ret = init_srcu_struct(&fs_info->subvol_srcu);
1932         if (ret) {
1933                 err = ret;
1934                 goto fail;
1935         }
1936
1937         ret = setup_bdi(fs_info, &fs_info->bdi);
1938         if (ret) {
1939                 err = ret;
1940                 goto fail_srcu;
1941         }
1942
1943         fs_info->btree_inode = new_inode(sb);
1944         if (!fs_info->btree_inode) {
1945                 err = -ENOMEM;
1946                 goto fail_bdi;
1947         }
1948
1949         mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1950
1951         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
1952         INIT_LIST_HEAD(&fs_info->trans_list);
1953         INIT_LIST_HEAD(&fs_info->dead_roots);
1954         INIT_LIST_HEAD(&fs_info->delayed_iputs);
1955         INIT_LIST_HEAD(&fs_info->hashers);
1956         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1957         INIT_LIST_HEAD(&fs_info->ordered_operations);
1958         INIT_LIST_HEAD(&fs_info->caching_block_groups);
1959         spin_lock_init(&fs_info->delalloc_lock);
1960         spin_lock_init(&fs_info->trans_lock);
1961         spin_lock_init(&fs_info->ref_cache_lock);
1962         spin_lock_init(&fs_info->fs_roots_radix_lock);
1963         spin_lock_init(&fs_info->delayed_iput_lock);
1964         spin_lock_init(&fs_info->defrag_inodes_lock);
1965         spin_lock_init(&fs_info->free_chunk_lock);
1966         mutex_init(&fs_info->reloc_mutex);
1967
1968         init_completion(&fs_info->kobj_unregister);
1969         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1970         INIT_LIST_HEAD(&fs_info->space_info);
1971         btrfs_mapping_init(&fs_info->mapping_tree);
1972         btrfs_init_block_rsv(&fs_info->global_block_rsv);
1973         btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
1974         btrfs_init_block_rsv(&fs_info->trans_block_rsv);
1975         btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
1976         btrfs_init_block_rsv(&fs_info->empty_block_rsv);
1977         btrfs_init_block_rsv(&fs_info->delayed_block_rsv);
1978         atomic_set(&fs_info->nr_async_submits, 0);
1979         atomic_set(&fs_info->async_delalloc_pages, 0);
1980         atomic_set(&fs_info->async_submit_draining, 0);
1981         atomic_set(&fs_info->nr_async_bios, 0);
1982         atomic_set(&fs_info->defrag_running, 0);
1983         fs_info->sb = sb;
1984         fs_info->max_inline = 8192 * 1024;
1985         fs_info->metadata_ratio = 0;
1986         fs_info->defrag_inodes = RB_ROOT;
1987         fs_info->trans_no_join = 0;
1988         fs_info->free_chunk_space = 0;
1989
1990         /* readahead state */
1991         INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
1992         spin_lock_init(&fs_info->reada_lock);
1993
1994         fs_info->thread_pool_size = min_t(unsigned long,
1995                                           num_online_cpus() + 2, 8);
1996
1997         INIT_LIST_HEAD(&fs_info->ordered_extents);
1998         spin_lock_init(&fs_info->ordered_extent_lock);
1999         fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
2000                                         GFP_NOFS);
2001         if (!fs_info->delayed_root) {
2002                 err = -ENOMEM;
2003                 goto fail_iput;
2004         }
2005         btrfs_init_delayed_root(fs_info->delayed_root);
2006
2007         mutex_init(&fs_info->scrub_lock);
2008         atomic_set(&fs_info->scrubs_running, 0);
2009         atomic_set(&fs_info->scrub_pause_req, 0);
2010         atomic_set(&fs_info->scrubs_paused, 0);
2011         atomic_set(&fs_info->scrub_cancel_req, 0);
2012         init_waitqueue_head(&fs_info->scrub_pause_wait);
2013         init_rwsem(&fs_info->scrub_super_lock);
2014         fs_info->scrub_workers_refcnt = 0;
2015 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2016         fs_info->check_integrity_print_mask = 0;
2017 #endif
2018
2019         spin_lock_init(&fs_info->balance_lock);
2020         mutex_init(&fs_info->balance_mutex);
2021         atomic_set(&fs_info->balance_running, 0);
2022         atomic_set(&fs_info->balance_pause_req, 0);
2023         atomic_set(&fs_info->balance_cancel_req, 0);
2024         fs_info->balance_ctl = NULL;
2025         init_waitqueue_head(&fs_info->balance_wait_q);
2026
2027         sb->s_blocksize = 4096;
2028         sb->s_blocksize_bits = blksize_bits(4096);
2029         sb->s_bdi = &fs_info->bdi;
2030
2031         fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
2032         set_nlink(fs_info->btree_inode, 1);
2033         /*
2034          * we set the i_size on the btree inode to the max possible int.
2035          * the real end of the address space is determined by all of
2036          * the devices in the system
2037          */
2038         fs_info->btree_inode->i_size = OFFSET_MAX;
2039         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
2040         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
2041
2042         RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
2043         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
2044                              fs_info->btree_inode->i_mapping);
2045         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
2046
2047         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
2048
2049         BTRFS_I(fs_info->btree_inode)->root = tree_root;
2050         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2051                sizeof(struct btrfs_key));
2052         BTRFS_I(fs_info->btree_inode)->dummy_inode = 1;
2053         insert_inode_hash(fs_info->btree_inode);
2054
2055         spin_lock_init(&fs_info->block_group_cache_lock);
2056         fs_info->block_group_cache_tree = RB_ROOT;
2057
2058         extent_io_tree_init(&fs_info->freed_extents[0],
2059                              fs_info->btree_inode->i_mapping);
2060         extent_io_tree_init(&fs_info->freed_extents[1],
2061                              fs_info->btree_inode->i_mapping);
2062         fs_info->pinned_extents = &fs_info->freed_extents[0];
2063         fs_info->do_barriers = 1;
2064
2065
2066         mutex_init(&fs_info->ordered_operations_mutex);
2067         mutex_init(&fs_info->tree_log_mutex);
2068         mutex_init(&fs_info->chunk_mutex);
2069         mutex_init(&fs_info->transaction_kthread_mutex);
2070         mutex_init(&fs_info->cleaner_mutex);
2071         mutex_init(&fs_info->volume_mutex);
2072         init_rwsem(&fs_info->extent_commit_sem);
2073         init_rwsem(&fs_info->cleanup_work_sem);
2074         init_rwsem(&fs_info->subvol_sem);
2075
2076         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
2077         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
2078
2079         init_waitqueue_head(&fs_info->transaction_throttle);
2080         init_waitqueue_head(&fs_info->transaction_wait);
2081         init_waitqueue_head(&fs_info->transaction_blocked_wait);
2082         init_waitqueue_head(&fs_info->async_submit_wait);
2083
2084         __setup_root(4096, 4096, 4096, 4096, tree_root,
2085                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
2086
2087         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
2088         if (!bh) {
2089                 err = -EINVAL;
2090                 goto fail_alloc;
2091         }
2092
2093         memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
2094         memcpy(fs_info->super_for_commit, fs_info->super_copy,
2095                sizeof(*fs_info->super_for_commit));
2096         brelse(bh);
2097
2098         memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
2099
2100         disk_super = fs_info->super_copy;
2101         if (!btrfs_super_root(disk_super))
2102                 goto fail_alloc;
2103
2104         /* check FS state, whether FS is broken. */
2105         fs_info->fs_state |= btrfs_super_flags(disk_super);
2106
2107         btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2108
2109         /*
2110          * run through our array of backup supers and setup
2111          * our ring pointer to the oldest one
2112          */
2113         generation = btrfs_super_generation(disk_super);
2114         find_oldest_super_backup(fs_info, generation);
2115
2116         /*
2117          * In the long term, we'll store the compression type in the super
2118          * block, and it'll be used for per file compression control.
2119          */
2120         fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
2121
2122         ret = btrfs_parse_options(tree_root, options);
2123         if (ret) {
2124                 err = ret;
2125                 goto fail_alloc;
2126         }
2127
2128         features = btrfs_super_incompat_flags(disk_super) &
2129                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
2130         if (features) {
2131                 printk(KERN_ERR "BTRFS: couldn't mount because of "
2132                        "unsupported optional features (%Lx).\n",
2133                        (unsigned long long)features);
2134                 err = -EINVAL;
2135                 goto fail_alloc;
2136         }
2137
2138         features = btrfs_super_incompat_flags(disk_super);
2139         features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2140         if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO)
2141                 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2142         btrfs_set_super_incompat_flags(disk_super, features);
2143
2144         features = btrfs_super_compat_ro_flags(disk_super) &
2145                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
2146         if (!(sb->s_flags & MS_RDONLY) && features) {
2147                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
2148                        "unsupported option features (%Lx).\n",
2149                        (unsigned long long)features);
2150                 err = -EINVAL;
2151                 goto fail_alloc;
2152         }
2153
2154         btrfs_init_workers(&fs_info->generic_worker,
2155                            "genwork", 1, NULL);
2156
2157         btrfs_init_workers(&fs_info->workers, "worker",
2158                            fs_info->thread_pool_size,
2159                            &fs_info->generic_worker);
2160
2161         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
2162                            fs_info->thread_pool_size,
2163                            &fs_info->generic_worker);
2164
2165         btrfs_init_workers(&fs_info->submit_workers, "submit",
2166                            min_t(u64, fs_devices->num_devices,
2167                            fs_info->thread_pool_size),
2168                            &fs_info->generic_worker);
2169
2170         btrfs_init_workers(&fs_info->caching_workers, "cache",
2171                            2, &fs_info->generic_worker);
2172
2173         /* a higher idle thresh on the submit workers makes it much more
2174          * likely that bios will be send down in a sane order to the
2175          * devices
2176          */
2177         fs_info->submit_workers.idle_thresh = 64;
2178
2179         fs_info->workers.idle_thresh = 16;
2180         fs_info->workers.ordered = 1;
2181
2182         fs_info->delalloc_workers.idle_thresh = 2;
2183         fs_info->delalloc_workers.ordered = 1;
2184
2185         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1,
2186                            &fs_info->generic_worker);
2187         btrfs_init_workers(&fs_info->endio_workers, "endio",
2188                            fs_info->thread_pool_size,
2189                            &fs_info->generic_worker);
2190         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
2191                            fs_info->thread_pool_size,
2192                            &fs_info->generic_worker);
2193         btrfs_init_workers(&fs_info->endio_meta_write_workers,
2194                            "endio-meta-write", fs_info->thread_pool_size,
2195                            &fs_info->generic_worker);
2196         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
2197                            fs_info->thread_pool_size,
2198                            &fs_info->generic_worker);
2199         btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
2200                            1, &fs_info->generic_worker);
2201         btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
2202                            fs_info->thread_pool_size,
2203                            &fs_info->generic_worker);
2204         btrfs_init_workers(&fs_info->readahead_workers, "readahead",
2205                            fs_info->thread_pool_size,
2206                            &fs_info->generic_worker);
2207
2208         /*
2209          * endios are largely parallel and should have a very
2210          * low idle thresh
2211          */
2212         fs_info->endio_workers.idle_thresh = 4;
2213         fs_info->endio_meta_workers.idle_thresh = 4;
2214
2215         fs_info->endio_write_workers.idle_thresh = 2;
2216         fs_info->endio_meta_write_workers.idle_thresh = 2;
2217         fs_info->readahead_workers.idle_thresh = 2;
2218
2219         /*
2220          * btrfs_start_workers can really only fail because of ENOMEM so just
2221          * return -ENOMEM if any of these fail.
2222          */
2223         ret = btrfs_start_workers(&fs_info->workers);
2224         ret |= btrfs_start_workers(&fs_info->generic_worker);
2225         ret |= btrfs_start_workers(&fs_info->submit_workers);
2226         ret |= btrfs_start_workers(&fs_info->delalloc_workers);
2227         ret |= btrfs_start_workers(&fs_info->fixup_workers);
2228         ret |= btrfs_start_workers(&fs_info->endio_workers);
2229         ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
2230         ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
2231         ret |= btrfs_start_workers(&fs_info->endio_write_workers);
2232         ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
2233         ret |= btrfs_start_workers(&fs_info->delayed_workers);
2234         ret |= btrfs_start_workers(&fs_info->caching_workers);
2235         ret |= btrfs_start_workers(&fs_info->readahead_workers);
2236         if (ret) {
2237                 ret = -ENOMEM;
2238                 goto fail_sb_buffer;
2239         }
2240
2241         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
2242         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2243                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2244
2245         nodesize = btrfs_super_nodesize(disk_super);
2246         leafsize = btrfs_super_leafsize(disk_super);
2247         sectorsize = btrfs_super_sectorsize(disk_super);
2248         stripesize = btrfs_super_stripesize(disk_super);
2249         tree_root->nodesize = nodesize;
2250         tree_root->leafsize = leafsize;
2251         tree_root->sectorsize = sectorsize;
2252         tree_root->stripesize = stripesize;
2253
2254         sb->s_blocksize = sectorsize;
2255         sb->s_blocksize_bits = blksize_bits(sectorsize);
2256
2257         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
2258                     sizeof(disk_super->magic))) {
2259                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
2260                 goto fail_sb_buffer;
2261         }
2262
2263         if (sectorsize < PAGE_SIZE) {
2264                 printk(KERN_WARNING "btrfs: Incompatible sector size "
2265                        "found on %s\n", sb->s_id);
2266                 goto fail_sb_buffer;
2267         }
2268
2269         mutex_lock(&fs_info->chunk_mutex);
2270         ret = btrfs_read_sys_array(tree_root);
2271         mutex_unlock(&fs_info->chunk_mutex);
2272         if (ret) {
2273                 printk(KERN_WARNING "btrfs: failed to read the system "
2274                        "array on %s\n", sb->s_id);
2275                 goto fail_sb_buffer;
2276         }
2277
2278         blocksize = btrfs_level_size(tree_root,
2279                                      btrfs_super_chunk_root_level(disk_super));
2280         generation = btrfs_super_chunk_root_generation(disk_super);
2281
2282         __setup_root(nodesize, leafsize, sectorsize, stripesize,
2283                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
2284
2285         chunk_root->node = read_tree_block(chunk_root,
2286                                            btrfs_super_chunk_root(disk_super),
2287                                            blocksize, generation);
2288         BUG_ON(!chunk_root->node);
2289         if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
2290                 printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
2291                        sb->s_id);
2292                 goto fail_tree_roots;
2293         }
2294         btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
2295         chunk_root->commit_root = btrfs_root_node(chunk_root);
2296
2297         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
2298            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
2299            BTRFS_UUID_SIZE);
2300
2301         ret = btrfs_read_chunk_tree(chunk_root);
2302         if (ret) {
2303                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
2304                        sb->s_id);
2305                 goto fail_tree_roots;
2306         }
2307
2308         btrfs_close_extra_devices(fs_devices);
2309
2310         if (!fs_devices->latest_bdev) {
2311                 printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
2312                        sb->s_id);
2313                 goto fail_tree_roots;
2314         }
2315
2316 retry_root_backup:
2317         blocksize = btrfs_level_size(tree_root,
2318                                      btrfs_super_root_level(disk_super));
2319         generation = btrfs_super_generation(disk_super);
2320
2321         tree_root->node = read_tree_block(tree_root,
2322                                           btrfs_super_root(disk_super),
2323                                           blocksize, generation);
2324         if (!tree_root->node ||
2325             !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
2326                 printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
2327                        sb->s_id);
2328
2329                 goto recovery_tree_root;
2330         }
2331
2332         btrfs_set_root_node(&tree_root->root_item, tree_root->node);
2333         tree_root->commit_root = btrfs_root_node(tree_root);
2334
2335         ret = find_and_setup_root(tree_root, fs_info,
2336                                   BTRFS_EXTENT_TREE_OBJECTID, extent_root);
2337         if (ret)
2338                 goto recovery_tree_root;
2339         extent_root->track_dirty = 1;
2340
2341         ret = find_and_setup_root(tree_root, fs_info,
2342                                   BTRFS_DEV_TREE_OBJECTID, dev_root);
2343         if (ret)
2344                 goto recovery_tree_root;
2345         dev_root->track_dirty = 1;
2346
2347         ret = find_and_setup_root(tree_root, fs_info,
2348                                   BTRFS_CSUM_TREE_OBJECTID, csum_root);
2349         if (ret)
2350                 goto recovery_tree_root;
2351
2352         csum_root->track_dirty = 1;
2353
2354         fs_info->generation = generation;
2355         fs_info->last_trans_committed = generation;
2356
2357         ret = btrfs_init_space_info(fs_info);
2358         if (ret) {
2359                 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
2360                 goto fail_block_groups;
2361         }
2362
2363         ret = btrfs_read_block_groups(extent_root);
2364         if (ret) {
2365                 printk(KERN_ERR "Failed to read block groups: %d\n", ret);
2366                 goto fail_block_groups;
2367         }
2368
2369         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
2370                                                "btrfs-cleaner");
2371         if (IS_ERR(fs_info->cleaner_kthread))
2372                 goto fail_block_groups;
2373
2374         fs_info->transaction_kthread = kthread_run(transaction_kthread,
2375                                                    tree_root,
2376                                                    "btrfs-transaction");
2377         if (IS_ERR(fs_info->transaction_kthread))
2378                 goto fail_cleaner;
2379
2380         if (!btrfs_test_opt(tree_root, SSD) &&
2381             !btrfs_test_opt(tree_root, NOSSD) &&
2382             !fs_info->fs_devices->rotating) {
2383                 printk(KERN_INFO "Btrfs detected SSD devices, enabling SSD "
2384                        "mode\n");
2385                 btrfs_set_opt(fs_info->mount_opt, SSD);
2386         }
2387
2388 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2389         if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
2390                 ret = btrfsic_mount(tree_root, fs_devices,
2391                                     btrfs_test_opt(tree_root,
2392                                         CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
2393                                     1 : 0,
2394                                     fs_info->check_integrity_print_mask);
2395                 if (ret)
2396                         printk(KERN_WARNING "btrfs: failed to initialize"
2397                                " integrity check module %s\n", sb->s_id);
2398         }
2399 #endif
2400
2401         /* do not make disk changes in broken FS */
2402         if (btrfs_super_log_root(disk_super) != 0 &&
2403             !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
2404                 u64 bytenr = btrfs_super_log_root(disk_super);
2405
2406                 if (fs_devices->rw_devices == 0) {
2407                         printk(KERN_WARNING "Btrfs log replay required "
2408                                "on RO media\n");
2409                         err = -EIO;
2410                         goto fail_trans_kthread;
2411                 }
2412                 blocksize =
2413                      btrfs_level_size(tree_root,
2414                                       btrfs_super_log_root_level(disk_super));
2415
2416                 log_tree_root = btrfs_alloc_root(fs_info);
2417                 if (!log_tree_root) {
2418                         err = -ENOMEM;
2419                         goto fail_trans_kthread;
2420                 }
2421
2422                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
2423                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
2424
2425                 log_tree_root->node = read_tree_block(tree_root, bytenr,
2426                                                       blocksize,
2427                                                       generation + 1);
2428                 ret = btrfs_recover_log_trees(log_tree_root);
2429                 BUG_ON(ret);
2430
2431                 if (sb->s_flags & MS_RDONLY) {
2432                         ret =  btrfs_commit_super(tree_root);
2433                         BUG_ON(ret);
2434                 }
2435         }
2436
2437         ret = btrfs_find_orphan_roots(tree_root);
2438         BUG_ON(ret);
2439
2440         if (!(sb->s_flags & MS_RDONLY)) {
2441                 ret = btrfs_cleanup_fs_roots(fs_info);
2442                 BUG_ON(ret);
2443
2444                 ret = btrfs_recover_relocation(tree_root);
2445                 if (ret < 0) {
2446                         printk(KERN_WARNING
2447                                "btrfs: failed to recover relocation\n");
2448                         err = -EINVAL;
2449                         goto fail_trans_kthread;
2450                 }
2451         }
2452
2453         location.objectid = BTRFS_FS_TREE_OBJECTID;
2454         location.type = BTRFS_ROOT_ITEM_KEY;
2455         location.offset = (u64)-1;
2456
2457         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
2458         if (!fs_info->fs_root)
2459                 goto fail_trans_kthread;
2460         if (IS_ERR(fs_info->fs_root)) {
2461                 err = PTR_ERR(fs_info->fs_root);
2462                 goto fail_trans_kthread;
2463         }
2464
2465         if (!(sb->s_flags & MS_RDONLY)) {
2466                 down_read(&fs_info->cleanup_work_sem);
2467                 err = btrfs_orphan_cleanup(fs_info->fs_root);
2468                 if (!err)
2469                         err = btrfs_orphan_cleanup(fs_info->tree_root);
2470                 up_read(&fs_info->cleanup_work_sem);
2471
2472                 if (!err)
2473                         err = btrfs_recover_balance(fs_info->tree_root);
2474
2475                 if (err) {
2476                         close_ctree(tree_root);
2477                         return err;
2478                 }
2479         }
2480
2481         return 0;
2482
2483 fail_trans_kthread:
2484         kthread_stop(fs_info->transaction_kthread);
2485 fail_cleaner:
2486         kthread_stop(fs_info->cleaner_kthread);
2487
2488         /*
2489          * make sure we're done with the btree inode before we stop our
2490          * kthreads
2491          */
2492         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2493         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2494
2495 fail_block_groups:
2496         btrfs_free_block_groups(fs_info);
2497
2498 fail_tree_roots:
2499         free_root_pointers(fs_info, 1);
2500
2501 fail_sb_buffer:
2502         btrfs_stop_workers(&fs_info->generic_worker);
2503         btrfs_stop_workers(&fs_info->readahead_workers);
2504         btrfs_stop_workers(&fs_info->fixup_workers);
2505         btrfs_stop_workers(&fs_info->delalloc_workers);
2506         btrfs_stop_workers(&fs_info->workers);
2507         btrfs_stop_workers(&fs_info->endio_workers);
2508         btrfs_stop_workers(&fs_info->endio_meta_workers);
2509         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2510         btrfs_stop_workers(&fs_info->endio_write_workers);
2511         btrfs_stop_workers(&fs_info->endio_freespace_worker);
2512         btrfs_stop_workers(&fs_info->submit_workers);
2513         btrfs_stop_workers(&fs_info->delayed_workers);
2514         btrfs_stop_workers(&fs_info->caching_workers);
2515 fail_alloc:
2516 fail_iput:
2517         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2518
2519         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2520         iput(fs_info->btree_inode);
2521 fail_bdi:
2522         bdi_destroy(&fs_info->bdi);
2523 fail_srcu:
2524         cleanup_srcu_struct(&fs_info->subvol_srcu);
2525 fail:
2526         btrfs_close_devices(fs_info->fs_devices);
2527         return err;
2528
2529 recovery_tree_root:
2530         if (!btrfs_test_opt(tree_root, RECOVERY))
2531                 goto fail_tree_roots;
2532
2533         free_root_pointers(fs_info, 0);
2534
2535         /* don't use the log in recovery mode, it won't be valid */
2536         btrfs_set_super_log_root(disk_super, 0);
2537
2538         /* we can't trust the free space cache either */
2539         btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2540
2541         ret = next_root_backup(fs_info, fs_info->super_copy,
2542                                &num_backups_tried, &backup_index);
2543         if (ret == -1)
2544                 goto fail_block_groups;
2545         goto retry_root_backup;
2546 }
2547
2548 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2549 {
2550         char b[BDEVNAME_SIZE];
2551
2552         if (uptodate) {
2553                 set_buffer_uptodate(bh);
2554         } else {
2555                 printk_ratelimited(KERN_WARNING "lost page write due to "
2556                                         "I/O error on %s\n",
2557                                        bdevname(bh->b_bdev, b));
2558                 /* note, we dont' set_buffer_write_io_error because we have
2559                  * our own ways of dealing with the IO errors
2560                  */
2561                 clear_buffer_uptodate(bh);
2562         }
2563         unlock_buffer(bh);
2564         put_bh(bh);
2565 }
2566
2567 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2568 {
2569         struct buffer_head *bh;
2570         struct buffer_head *latest = NULL;
2571         struct btrfs_super_block *super;
2572         int i;
2573         u64 transid = 0;
2574         u64 bytenr;
2575
2576         /* we would like to check all the supers, but that would make
2577          * a btrfs mount succeed after a mkfs from a different FS.
2578          * So, we need to add a special mount option to scan for
2579          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2580          */
2581         for (i = 0; i < 1; i++) {
2582                 bytenr = btrfs_sb_offset(i);
2583                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
2584                         break;
2585                 bh = __bread(bdev, bytenr / 4096, 4096);
2586                 if (!bh)
2587                         continue;
2588
2589                 super = (struct btrfs_super_block *)bh->b_data;
2590                 if (btrfs_super_bytenr(super) != bytenr ||
2591                     strncmp((char *)(&super->magic), BTRFS_MAGIC,
2592                             sizeof(super->magic))) {
2593                         brelse(bh);
2594                         continue;
2595                 }
2596
2597                 if (!latest || btrfs_super_generation(super) > transid) {
2598                         brelse(latest);
2599                         latest = bh;
2600                         transid = btrfs_super_generation(super);
2601                 } else {
2602                         brelse(bh);
2603                 }
2604         }
2605         return latest;
2606 }
2607
2608 /*
2609  * this should be called twice, once with wait == 0 and
2610  * once with wait == 1.  When wait == 0 is done, all the buffer heads
2611  * we write are pinned.
2612  *
2613  * They are released when wait == 1 is done.
2614  * max_mirrors must be the same for both runs, and it indicates how
2615  * many supers on this one device should be written.
2616  *
2617  * max_mirrors == 0 means to write them all.
2618  */
2619 static int write_dev_supers(struct btrfs_device *device,
2620                             struct btrfs_super_block *sb,
2621                             int do_barriers, int wait, int max_mirrors)
2622 {
2623         struct buffer_head *bh;
2624         int i;
2625         int ret;
2626         int errors = 0;
2627         u32 crc;
2628         u64 bytenr;
2629
2630         if (max_mirrors == 0)
2631                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2632
2633         for (i = 0; i < max_mirrors; i++) {
2634                 bytenr = btrfs_sb_offset(i);
2635                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2636                         break;
2637
2638                 if (wait) {
2639                         bh = __find_get_block(device->bdev, bytenr / 4096,
2640                                               BTRFS_SUPER_INFO_SIZE);
2641                         BUG_ON(!bh);
2642                         wait_on_buffer(bh);
2643                         if (!buffer_uptodate(bh))
2644                                 errors++;
2645
2646                         /* drop our reference */
2647                         brelse(bh);
2648
2649                         /* drop the reference from the wait == 0 run */
2650                         brelse(bh);
2651                         continue;
2652                 } else {
2653                         btrfs_set_super_bytenr(sb, bytenr);
2654
2655                         crc = ~(u32)0;
2656                         crc = btrfs_csum_data(NULL, (char *)sb +
2657                                               BTRFS_CSUM_SIZE, crc,
2658                                               BTRFS_SUPER_INFO_SIZE -
2659                                               BTRFS_CSUM_SIZE);
2660                         btrfs_csum_final(crc, sb->csum);
2661
2662                         /*
2663                          * one reference for us, and we leave it for the
2664                          * caller
2665                          */
2666                         bh = __getblk(device->bdev, bytenr / 4096,
2667                                       BTRFS_SUPER_INFO_SIZE);
2668                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2669
2670                         /* one reference for submit_bh */
2671                         get_bh(bh);
2672
2673                         set_buffer_uptodate(bh);
2674                         lock_buffer(bh);
2675                         bh->b_end_io = btrfs_end_buffer_write_sync;
2676                 }
2677
2678                 /*
2679                  * we fua the first super.  The others we allow
2680                  * to go down lazy.
2681                  */
2682                 ret = btrfsic_submit_bh(WRITE_FUA, bh);
2683                 if (ret)
2684                         errors++;
2685         }
2686         return errors < i ? 0 : -1;
2687 }
2688
2689 /*
2690  * endio for the write_dev_flush, this will wake anyone waiting
2691  * for the barrier when it is done
2692  */
2693 static void btrfs_end_empty_barrier(struct bio *bio, int err)
2694 {
2695         if (err) {
2696                 if (err == -EOPNOTSUPP)
2697                         set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2698                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
2699         }
2700         if (bio->bi_private)
2701                 complete(bio->bi_private);
2702         bio_put(bio);
2703 }
2704
2705 /*
2706  * trigger flushes for one the devices.  If you pass wait == 0, the flushes are
2707  * sent down.  With wait == 1, it waits for the previous flush.
2708  *
2709  * any device where the flush fails with eopnotsupp are flagged as not-barrier
2710  * capable
2711  */
2712 static int write_dev_flush(struct btrfs_device *device, int wait)
2713 {
2714         struct bio *bio;
2715         int ret = 0;
2716
2717         if (device->nobarriers)
2718                 return 0;
2719
2720         if (wait) {
2721                 bio = device->flush_bio;
2722                 if (!bio)
2723                         return 0;
2724
2725                 wait_for_completion(&device->flush_wait);
2726
2727                 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
2728                         printk("btrfs: disabling barriers on dev %s\n",
2729                                device->name);
2730                         device->nobarriers = 1;
2731                 }
2732                 if (!bio_flagged(bio, BIO_UPTODATE)) {
2733                         ret = -EIO;
2734                 }
2735
2736                 /* drop the reference from the wait == 0 run */
2737                 bio_put(bio);
2738                 device->flush_bio = NULL;
2739
2740                 return ret;
2741         }
2742
2743         /*
2744          * one reference for us, and we leave it for the
2745          * caller
2746          */
2747         device->flush_bio = NULL;;
2748         bio = bio_alloc(GFP_NOFS, 0);
2749         if (!bio)
2750                 return -ENOMEM;
2751
2752         bio->bi_end_io = btrfs_end_empty_barrier;
2753         bio->bi_bdev = device->bdev;
2754         init_completion(&device->flush_wait);
2755         bio->bi_private = &device->flush_wait;
2756         device->flush_bio = bio;
2757
2758         bio_get(bio);
2759         btrfsic_submit_bio(WRITE_FLUSH, bio);
2760
2761         return 0;
2762 }
2763
2764 /*
2765  * send an empty flush down to each device in parallel,
2766  * then wait for them
2767  */
2768 static int barrier_all_devices(struct btrfs_fs_info *info)
2769 {
2770         struct list_head *head;
2771         struct btrfs_device *dev;
2772         int errors = 0;
2773         int ret;
2774
2775         /* send down all the barriers */
2776         head = &info->fs_devices->devices;
2777         list_for_each_entry_rcu(dev, head, dev_list) {
2778                 if (!dev->bdev) {
2779                         errors++;
2780                         continue;
2781                 }
2782                 if (!dev->in_fs_metadata || !dev->writeable)
2783                         continue;
2784
2785                 ret = write_dev_flush(dev, 0);
2786                 if (ret)
2787                         errors++;
2788         }
2789
2790         /* wait for all the barriers */
2791         list_for_each_entry_rcu(dev, head, dev_list) {
2792                 if (!dev->bdev) {
2793                         errors++;
2794                         continue;
2795                 }
2796                 if (!dev->in_fs_metadata || !dev->writeable)
2797                         continue;
2798
2799                 ret = write_dev_flush(dev, 1);
2800                 if (ret)
2801                         errors++;
2802         }
2803         if (errors)
2804                 return -EIO;
2805         return 0;
2806 }
2807
2808 int write_all_supers(struct btrfs_root *root, int max_mirrors)
2809 {
2810         struct list_head *head;
2811         struct btrfs_device *dev;
2812         struct btrfs_super_block *sb;
2813         struct btrfs_dev_item *dev_item;
2814         int ret;
2815         int do_barriers;
2816         int max_errors;
2817         int total_errors = 0;
2818         u64 flags;
2819
2820         max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
2821         do_barriers = !btrfs_test_opt(root, NOBARRIER);
2822         backup_super_roots(root->fs_info);
2823
2824         sb = root->fs_info->super_for_commit;
2825         dev_item = &sb->dev_item;
2826
2827         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2828         head = &root->fs_info->fs_devices->devices;
2829
2830         if (do_barriers)
2831                 barrier_all_devices(root->fs_info);
2832
2833         list_for_each_entry_rcu(dev, head, dev_list) {
2834                 if (!dev->bdev) {
2835                         total_errors++;
2836                         continue;
2837                 }
2838                 if (!dev->in_fs_metadata || !dev->writeable)
2839                         continue;
2840
2841                 btrfs_set_stack_device_generation(dev_item, 0);
2842                 btrfs_set_stack_device_type(dev_item, dev->type);
2843                 btrfs_set_stack_device_id(dev_item, dev->devid);
2844                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2845                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2846                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2847                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2848                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2849                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2850                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2851
2852                 flags = btrfs_super_flags(sb);
2853                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2854
2855                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2856                 if (ret)
2857                         total_errors++;
2858         }
2859         if (total_errors > max_errors) {
2860                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2861                        total_errors);
2862                 BUG();
2863         }
2864
2865         total_errors = 0;
2866         list_for_each_entry_rcu(dev, head, dev_list) {
2867                 if (!dev->bdev)
2868                         continue;
2869                 if (!dev->in_fs_metadata || !dev->writeable)
2870                         continue;
2871
2872                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2873                 if (ret)
2874                         total_errors++;
2875         }
2876         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2877         if (total_errors > max_errors) {
2878                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2879                        total_errors);
2880                 BUG();
2881         }
2882         return 0;
2883 }
2884
2885 int write_ctree_super(struct btrfs_trans_handle *trans,
2886                       struct btrfs_root *root, int max_mirrors)
2887 {
2888         int ret;
2889
2890         ret = write_all_supers(root, max_mirrors);
2891         return ret;
2892 }
2893
2894 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2895 {
2896         spin_lock(&fs_info->fs_roots_radix_lock);
2897         radix_tree_delete(&fs_info->fs_roots_radix,
2898                           (unsigned long)root->root_key.objectid);
2899         spin_unlock(&fs_info->fs_roots_radix_lock);
2900
2901         if (btrfs_root_refs(&root->root_item) == 0)
2902                 synchronize_srcu(&fs_info->subvol_srcu);
2903
2904         __btrfs_remove_free_space_cache(root->free_ino_pinned);
2905         __btrfs_remove_free_space_cache(root->free_ino_ctl);
2906         free_fs_root(root);
2907         return 0;
2908 }
2909
2910 static void free_fs_root(struct btrfs_root *root)
2911 {
2912         iput(root->cache_inode);
2913         WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
2914         if (root->anon_dev)
2915                 free_anon_bdev(root->anon_dev);
2916         free_extent_buffer(root->node);
2917         free_extent_buffer(root->commit_root);
2918         kfree(root->free_ino_ctl);
2919         kfree(root->free_ino_pinned);
2920         kfree(root->name);
2921         kfree(root);
2922 }
2923
2924 static int del_fs_roots(struct btrfs_fs_info *fs_info)
2925 {
2926         int ret;
2927         struct btrfs_root *gang[8];
2928         int i;
2929
2930         while (!list_empty(&fs_info->dead_roots)) {
2931                 gang[0] = list_entry(fs_info->dead_roots.next,
2932                                      struct btrfs_root, root_list);
2933                 list_del(&gang[0]->root_list);
2934
2935                 if (gang[0]->in_radix) {
2936                         btrfs_free_fs_root(fs_info, gang[0]);
2937                 } else {
2938                         free_extent_buffer(gang[0]->node);
2939                         free_extent_buffer(gang[0]->commit_root);
2940                         kfree(gang[0]);
2941                 }
2942         }
2943
2944         while (1) {
2945                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2946                                              (void **)gang, 0,
2947                                              ARRAY_SIZE(gang));
2948                 if (!ret)
2949                         break;
2950                 for (i = 0; i < ret; i++)
2951                         btrfs_free_fs_root(fs_info, gang[i]);
2952         }
2953         return 0;
2954 }
2955
2956 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2957 {
2958         u64 root_objectid = 0;
2959         struct btrfs_root *gang[8];
2960         int i;
2961         int ret;
2962
2963         while (1) {
2964                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2965                                              (void **)gang, root_objectid,
2966                                              ARRAY_SIZE(gang));
2967                 if (!ret)
2968                         break;
2969
2970                 root_objectid = gang[ret - 1]->root_key.objectid + 1;
2971                 for (i = 0; i < ret; i++) {
2972                         int err;
2973
2974                         root_objectid = gang[i]->root_key.objectid;
2975                         err = btrfs_orphan_cleanup(gang[i]);
2976                         if (err)
2977                                 return err;
2978                 }
2979                 root_objectid++;
2980         }
2981         return 0;
2982 }
2983
2984 int btrfs_commit_super(struct btrfs_root *root)
2985 {
2986         struct btrfs_trans_handle *trans;
2987         int ret;
2988
2989         mutex_lock(&root->fs_info->cleaner_mutex);
2990         btrfs_run_delayed_iputs(root);
2991         btrfs_clean_old_snapshots(root);
2992         mutex_unlock(&root->fs_info->cleaner_mutex);
2993
2994         /* wait until ongoing cleanup work done */
2995         down_write(&root->fs_info->cleanup_work_sem);
2996         up_write(&root->fs_info->cleanup_work_sem);
2997
2998         trans = btrfs_join_transaction(root);
2999         if (IS_ERR(trans))
3000                 return PTR_ERR(trans);
3001         ret = btrfs_commit_transaction(trans, root);
3002         BUG_ON(ret);
3003         /* run commit again to drop the original snapshot */
3004         trans = btrfs_join_transaction(root);
3005         if (IS_ERR(trans))
3006                 return PTR_ERR(trans);
3007         btrfs_commit_transaction(trans, root);
3008         ret = btrfs_write_and_wait_transaction(NULL, root);
3009         BUG_ON(ret);
3010
3011         ret = write_ctree_super(NULL, root, 0);
3012         return ret;
3013 }
3014
3015 int close_ctree(struct btrfs_root *root)
3016 {
3017         struct btrfs_fs_info *fs_info = root->fs_info;
3018         int ret;
3019
3020         fs_info->closing = 1;
3021         smp_mb();
3022
3023         /* pause restriper - we want to resume on mount */
3024         btrfs_pause_balance(root->fs_info);
3025
3026         btrfs_scrub_cancel(root);
3027
3028         /* wait for any defraggers to finish */
3029         wait_event(fs_info->transaction_wait,
3030                    (atomic_read(&fs_info->defrag_running) == 0));
3031
3032         /* clear out the rbtree of defraggable inodes */
3033         btrfs_run_defrag_inodes(fs_info);
3034
3035         /*
3036          * Here come 2 situations when btrfs is broken to flip readonly:
3037          *
3038          * 1. when btrfs flips readonly somewhere else before
3039          * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
3040          * and btrfs will skip to write sb directly to keep
3041          * ERROR state on disk.
3042          *
3043          * 2. when btrfs flips readonly just in btrfs_commit_super,
3044          * and in such case, btrfs cannot write sb via btrfs_commit_super,
3045          * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
3046          * btrfs will cleanup all FS resources first and write sb then.
3047          */
3048         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
3049                 ret = btrfs_commit_super(root);
3050                 if (ret)
3051                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3052         }
3053
3054         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
3055                 ret = btrfs_error_commit_super(root);
3056                 if (ret)
3057                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3058         }
3059
3060         btrfs_put_block_group_cache(fs_info);
3061
3062         kthread_stop(fs_info->transaction_kthread);
3063         kthread_stop(fs_info->cleaner_kthread);
3064
3065         fs_info->closing = 2;
3066         smp_mb();
3067
3068         if (fs_info->delalloc_bytes) {
3069                 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
3070                        (unsigned long long)fs_info->delalloc_bytes);
3071         }
3072         if (fs_info->total_ref_cache_size) {
3073                 printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
3074                        (unsigned long long)fs_info->total_ref_cache_size);
3075         }
3076
3077         free_extent_buffer(fs_info->extent_root->node);
3078         free_extent_buffer(fs_info->extent_root->commit_root);
3079         free_extent_buffer(fs_info->tree_root->node);
3080         free_extent_buffer(fs_info->tree_root->commit_root);
3081         free_extent_buffer(fs_info->chunk_root->node);
3082         free_extent_buffer(fs_info->chunk_root->commit_root);
3083         free_extent_buffer(fs_info->dev_root->node);
3084         free_extent_buffer(fs_info->dev_root->commit_root);
3085         free_extent_buffer(fs_info->csum_root->node);
3086         free_extent_buffer(fs_info->csum_root->commit_root);
3087
3088         btrfs_free_block_groups(fs_info);
3089
3090         del_fs_roots(fs_info);
3091
3092         iput(fs_info->btree_inode);
3093
3094         btrfs_stop_workers(&fs_info->generic_worker);
3095         btrfs_stop_workers(&fs_info->fixup_workers);
3096         btrfs_stop_workers(&fs_info->delalloc_workers);
3097         btrfs_stop_workers(&fs_info->workers);
3098         btrfs_stop_workers(&fs_info->endio_workers);
3099         btrfs_stop_workers(&fs_info->endio_meta_workers);
3100         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
3101         btrfs_stop_workers(&fs_info->endio_write_workers);
3102         btrfs_stop_workers(&fs_info->endio_freespace_worker);
3103         btrfs_stop_workers(&fs_info->submit_workers);
3104         btrfs_stop_workers(&fs_info->delayed_workers);
3105         btrfs_stop_workers(&fs_info->caching_workers);
3106         btrfs_stop_workers(&fs_info->readahead_workers);
3107
3108 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3109         if (btrfs_test_opt(root, CHECK_INTEGRITY))
3110                 btrfsic_unmount(root, fs_info->fs_devices);
3111 #endif
3112
3113         btrfs_close_devices(fs_info->fs_devices);
3114         btrfs_mapping_tree_free(&fs_info->mapping_tree);
3115
3116         bdi_destroy(&fs_info->bdi);
3117         cleanup_srcu_struct(&fs_info->subvol_srcu);
3118
3119         return 0;
3120 }
3121
3122 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
3123 {
3124         int ret;
3125         struct inode *btree_inode = buf->first_page->mapping->host;
3126
3127         ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf,
3128                                      NULL);
3129         if (!ret)
3130                 return ret;
3131
3132         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
3133                                     parent_transid);
3134         return !ret;
3135 }
3136
3137 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
3138 {
3139         struct inode *btree_inode = buf->first_page->mapping->host;
3140         return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
3141                                           buf);
3142 }
3143
3144 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3145 {
3146         struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
3147         u64 transid = btrfs_header_generation(buf);
3148         struct inode *btree_inode = root->fs_info->btree_inode;
3149         int was_dirty;
3150
3151         btrfs_assert_tree_locked(buf);
3152         if (transid != root->fs_info->generation) {
3153                 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
3154                        "found %llu running %llu\n",
3155                         (unsigned long long)buf->start,
3156                         (unsigned long long)transid,
3157                         (unsigned long long)root->fs_info->generation);
3158                 WARN_ON(1);
3159         }
3160         was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
3161                                             buf);
3162         if (!was_dirty) {
3163                 spin_lock(&root->fs_info->delalloc_lock);
3164                 root->fs_info->dirty_metadata_bytes += buf->len;
3165                 spin_unlock(&root->fs_info->delalloc_lock);
3166         }
3167 }
3168
3169 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
3170 {
3171         /*
3172          * looks as though older kernels can get into trouble with
3173          * this code, they end up stuck in balance_dirty_pages forever
3174          */
3175         u64 num_dirty;
3176         unsigned long thresh = 32 * 1024 * 1024;
3177
3178         if (current->flags & PF_MEMALLOC)
3179                 return;
3180
3181         btrfs_balance_delayed_items(root);
3182
3183         num_dirty = root->fs_info->dirty_metadata_bytes;
3184
3185         if (num_dirty > thresh) {
3186                 balance_dirty_pages_ratelimited_nr(
3187                                    root->fs_info->btree_inode->i_mapping, 1);
3188         }
3189         return;
3190 }
3191
3192 void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
3193 {
3194         /*
3195          * looks as though older kernels can get into trouble with
3196          * this code, they end up stuck in balance_dirty_pages forever
3197          */
3198         u64 num_dirty;
3199         unsigned long thresh = 32 * 1024 * 1024;
3200
3201         if (current->flags & PF_MEMALLOC)
3202                 return;
3203
3204         num_dirty = root->fs_info->dirty_metadata_bytes;
3205
3206         if (num_dirty > thresh) {
3207                 balance_dirty_pages_ratelimited_nr(
3208                                    root->fs_info->btree_inode->i_mapping, 1);
3209         }
3210         return;
3211 }
3212
3213 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
3214 {
3215         struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
3216         int ret;
3217         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
3218         if (ret == 0)
3219                 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
3220         return ret;
3221 }
3222
3223 static int btree_lock_page_hook(struct page *page, void *data,
3224                                 void (*flush_fn)(void *))
3225 {
3226         struct inode *inode = page->mapping->host;
3227         struct btrfs_root *root = BTRFS_I(inode)->root;
3228         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3229         struct extent_buffer *eb;
3230         unsigned long len;
3231         u64 bytenr = page_offset(page);
3232
3233         if (page->private == EXTENT_PAGE_PRIVATE)
3234                 goto out;
3235
3236         len = page->private >> 2;
3237         eb = find_extent_buffer(io_tree, bytenr, len);
3238         if (!eb)
3239                 goto out;
3240
3241         if (!btrfs_try_tree_write_lock(eb)) {
3242                 flush_fn(data);
3243                 btrfs_tree_lock(eb);
3244         }
3245         btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3246
3247         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3248                 spin_lock(&root->fs_info->delalloc_lock);
3249                 if (root->fs_info->dirty_metadata_bytes >= eb->len)
3250                         root->fs_info->dirty_metadata_bytes -= eb->len;
3251                 else
3252                         WARN_ON(1);
3253                 spin_unlock(&root->fs_info->delalloc_lock);
3254         }
3255
3256         btrfs_tree_unlock(eb);
3257         free_extent_buffer(eb);
3258 out:
3259         if (!trylock_page(page)) {
3260                 flush_fn(data);
3261                 lock_page(page);
3262         }
3263         return 0;
3264 }
3265
3266 static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3267                               int read_only)
3268 {
3269         if (read_only)
3270                 return;
3271
3272         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
3273                 printk(KERN_WARNING "warning: mount fs with errors, "
3274                        "running btrfsck is recommended\n");
3275 }
3276
3277 int btrfs_error_commit_super(struct btrfs_root *root)
3278 {
3279         int ret;
3280
3281         mutex_lock(&root->fs_info->cleaner_mutex);
3282         btrfs_run_delayed_iputs(root);
3283         mutex_unlock(&root->fs_info->cleaner_mutex);
3284
3285         down_write(&root->fs_info->cleanup_work_sem);
3286         up_write(&root->fs_info->cleanup_work_sem);
3287
3288         /* cleanup FS via transaction */
3289         btrfs_cleanup_transaction(root);
3290
3291         ret = write_ctree_super(NULL, root, 0);
3292
3293         return ret;
3294 }
3295
3296 static int btrfs_destroy_ordered_operations(struct btrfs_root *root)
3297 {
3298         struct btrfs_inode *btrfs_inode;
3299         struct list_head splice;
3300
3301         INIT_LIST_HEAD(&splice);
3302
3303         mutex_lock(&root->fs_info->ordered_operations_mutex);
3304         spin_lock(&root->fs_info->ordered_extent_lock);
3305
3306         list_splice_init(&root->fs_info->ordered_operations, &splice);
3307         while (!list_empty(&splice)) {
3308                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3309                                          ordered_operations);
3310
3311                 list_del_init(&btrfs_inode->ordered_operations);
3312
3313                 btrfs_invalidate_inodes(btrfs_inode->root);
3314         }
3315
3316         spin_unlock(&root->fs_info->ordered_extent_lock);
3317         mutex_unlock(&root->fs_info->ordered_operations_mutex);
3318
3319         return 0;
3320 }
3321
3322 static int btrfs_destroy_ordered_extents(struct btrfs_root *root)
3323 {
3324         struct list_head splice;
3325         struct btrfs_ordered_extent *ordered;
3326         struct inode *inode;
3327
3328         INIT_LIST_HEAD(&splice);
3329
3330         spin_lock(&root->fs_info->ordered_extent_lock);
3331
3332         list_splice_init(&root->fs_info->ordered_extents, &splice);
3333         while (!list_empty(&splice)) {
3334                 ordered = list_entry(splice.next, struct btrfs_ordered_extent,
3335                                      root_extent_list);
3336
3337                 list_del_init(&ordered->root_extent_list);
3338                 atomic_inc(&ordered->refs);
3339
3340                 /* the inode may be getting freed (in sys_unlink path). */
3341                 inode = igrab(ordered->inode);
3342
3343                 spin_unlock(&root->fs_info->ordered_extent_lock);
3344                 if (inode)
3345                         iput(inode);
3346
3347                 atomic_set(&ordered->refs, 1);
3348                 btrfs_put_ordered_extent(ordered);
3349
3350                 spin_lock(&root->fs_info->ordered_extent_lock);
3351         }
3352
3353         spin_unlock(&root->fs_info->ordered_extent_lock);
3354
3355         return 0;
3356 }
3357
3358 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3359                                       struct btrfs_root *root)
3360 {
3361         struct rb_node *node;
3362         struct btrfs_delayed_ref_root *delayed_refs;
3363         struct btrfs_delayed_ref_node *ref;
3364         int ret = 0;
3365
3366         delayed_refs = &trans->delayed_refs;
3367
3368         spin_lock(&delayed_refs->lock);
3369         if (delayed_refs->num_entries == 0) {
3370                 spin_unlock(&delayed_refs->lock);
3371                 printk(KERN_INFO "delayed_refs has NO entry\n");
3372                 return ret;
3373         }
3374
3375         node = rb_first(&delayed_refs->root);
3376         while (node) {
3377                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3378                 node = rb_next(node);
3379
3380                 ref->in_tree = 0;
3381                 rb_erase(&ref->rb_node, &delayed_refs->root);
3382                 delayed_refs->num_entries--;
3383
3384                 atomic_set(&ref->refs, 1);
3385                 if (btrfs_delayed_ref_is_head(ref)) {
3386                         struct btrfs_delayed_ref_head *head;
3387
3388                         head = btrfs_delayed_node_to_head(ref);
3389                         mutex_lock(&head->mutex);
3390                         kfree(head->extent_op);
3391                         delayed_refs->num_heads--;
3392                         if (list_empty(&head->cluster))
3393                                 delayed_refs->num_heads_ready--;
3394                         list_del_init(&head->cluster);
3395                         mutex_unlock(&head->mutex);
3396                 }
3397
3398                 spin_unlock(&delayed_refs->lock);
3399                 btrfs_put_delayed_ref(ref);
3400
3401                 cond_resched();
3402                 spin_lock(&delayed_refs->lock);
3403         }
3404
3405         spin_unlock(&delayed_refs->lock);
3406
3407         return ret;
3408 }
3409
3410 static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t)
3411 {
3412         struct btrfs_pending_snapshot *snapshot;
3413         struct list_head splice;
3414
3415         INIT_LIST_HEAD(&splice);
3416
3417         list_splice_init(&t->pending_snapshots, &splice);
3418
3419         while (!list_empty(&splice)) {
3420                 snapshot = list_entry(splice.next,
3421                                       struct btrfs_pending_snapshot,
3422                                       list);
3423
3424                 list_del_init(&snapshot->list);
3425
3426                 kfree(snapshot);
3427         }
3428
3429         return 0;
3430 }
3431
3432 static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3433 {
3434         struct btrfs_inode *btrfs_inode;
3435         struct list_head splice;
3436
3437         INIT_LIST_HEAD(&splice);
3438
3439         spin_lock(&root->fs_info->delalloc_lock);
3440         list_splice_init(&root->fs_info->delalloc_inodes, &splice);
3441
3442         while (!list_empty(&splice)) {
3443                 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3444                                     delalloc_inodes);
3445
3446                 list_del_init(&btrfs_inode->delalloc_inodes);
3447
3448                 btrfs_invalidate_inodes(btrfs_inode->root);
3449         }
3450
3451         spin_unlock(&root->fs_info->delalloc_lock);
3452
3453         return 0;
3454 }
3455
3456 static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3457                                         struct extent_io_tree *dirty_pages,
3458                                         int mark)
3459 {
3460         int ret;
3461         struct page *page;
3462         struct inode *btree_inode = root->fs_info->btree_inode;
3463         struct extent_buffer *eb;
3464         u64 start = 0;
3465         u64 end;
3466         u64 offset;
3467         unsigned long index;
3468
3469         while (1) {
3470                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
3471                                             mark);
3472                 if (ret)
3473                         break;
3474
3475                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
3476                 while (start <= end) {
3477                         index = start >> PAGE_CACHE_SHIFT;
3478                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
3479                         page = find_get_page(btree_inode->i_mapping, index);
3480                         if (!page)
3481                                 continue;
3482                         offset = page_offset(page);
3483
3484                         spin_lock(&dirty_pages->buffer_lock);
3485                         eb = radix_tree_lookup(
3486                              &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
3487                                                offset >> PAGE_CACHE_SHIFT);
3488                         spin_unlock(&dirty_pages->buffer_lock);
3489                         if (eb) {
3490                                 ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3491                                                          &eb->bflags);
3492                                 atomic_set(&eb->refs, 1);
3493                         }
3494                         if (PageWriteback(page))
3495                                 end_page_writeback(page);
3496
3497                         lock_page(page);
3498                         if (PageDirty(page)) {
3499                                 clear_page_dirty_for_io(page);
3500                                 spin_lock_irq(&page->mapping->tree_lock);
3501                                 radix_tree_tag_clear(&page->mapping->page_tree,
3502                                                         page_index(page),
3503                                                         PAGECACHE_TAG_DIRTY);
3504                                 spin_unlock_irq(&page->mapping->tree_lock);
3505                         }
3506
3507                         page->mapping->a_ops->invalidatepage(page, 0);
3508                         unlock_page(page);
3509                 }
3510         }
3511
3512         return ret;
3513 }
3514
3515 static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3516                                        struct extent_io_tree *pinned_extents)
3517 {
3518         struct extent_io_tree *unpin;
3519         u64 start;
3520         u64 end;
3521         int ret;
3522
3523         unpin = pinned_extents;
3524         while (1) {
3525                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3526                                             EXTENT_DIRTY);
3527                 if (ret)
3528                         break;
3529
3530                 /* opt_discard */
3531                 if (btrfs_test_opt(root, DISCARD))
3532                         ret = btrfs_error_discard_extent(root, start,
3533                                                          end + 1 - start,
3534                                                          NULL);
3535
3536                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3537                 btrfs_error_unpin_extent_range(root, start, end);
3538                 cond_resched();
3539         }
3540
3541         return 0;
3542 }
3543
3544 static int btrfs_cleanup_transaction(struct btrfs_root *root)
3545 {
3546         struct btrfs_transaction *t;
3547         LIST_HEAD(list);
3548
3549         WARN_ON(1);
3550
3551         mutex_lock(&root->fs_info->transaction_kthread_mutex);
3552
3553         spin_lock(&root->fs_info->trans_lock);
3554         list_splice_init(&root->fs_info->trans_list, &list);
3555         root->fs_info->trans_no_join = 1;
3556         spin_unlock(&root->fs_info->trans_lock);
3557
3558         while (!list_empty(&list)) {
3559                 t = list_entry(list.next, struct btrfs_transaction, list);
3560                 if (!t)
3561                         break;
3562
3563                 btrfs_destroy_ordered_operations(root);
3564
3565                 btrfs_destroy_ordered_extents(root);
3566
3567                 btrfs_destroy_delayed_refs(t, root);
3568
3569                 btrfs_block_rsv_release(root,
3570                                         &root->fs_info->trans_block_rsv,
3571                                         t->dirty_pages.dirty_bytes);
3572
3573                 /* FIXME: cleanup wait for commit */
3574                 t->in_commit = 1;
3575                 t->blocked = 1;
3576                 if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
3577                         wake_up(&root->fs_info->transaction_blocked_wait);
3578
3579                 t->blocked = 0;
3580                 if (waitqueue_active(&root->fs_info->transaction_wait))
3581                         wake_up(&root->fs_info->transaction_wait);
3582
3583                 t->commit_done = 1;
3584                 if (waitqueue_active(&t->commit_wait))
3585                         wake_up(&t->commit_wait);
3586
3587                 btrfs_destroy_pending_snapshots(t);
3588
3589                 btrfs_destroy_delalloc_inodes(root);
3590
3591                 spin_lock(&root->fs_info->trans_lock);
3592                 root->fs_info->running_transaction = NULL;
3593                 spin_unlock(&root->fs_info->trans_lock);
3594
3595                 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3596                                              EXTENT_DIRTY);
3597
3598                 btrfs_destroy_pinned_extent(root,
3599                                             root->fs_info->pinned_extents);
3600
3601                 atomic_set(&t->use_count, 0);
3602                 list_del_init(&t->list);
3603                 memset(t, 0, sizeof(*t));
3604                 kmem_cache_free(btrfs_transaction_cachep, t);
3605         }
3606
3607         spin_lock(&root->fs_info->trans_lock);
3608         root->fs_info->trans_no_join = 0;
3609         spin_unlock(&root->fs_info->trans_lock);
3610         mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3611
3612         return 0;
3613 }
3614
3615 static struct extent_io_ops btree_extent_io_ops = {
3616         .write_cache_pages_lock_hook = btree_lock_page_hook,
3617         .readpage_end_io_hook = btree_readpage_end_io_hook,
3618         .readpage_io_failed_hook = btree_io_failed_hook,
3619         .submit_bio_hook = btree_submit_bio_hook,
3620         /* note we're sharing with inode.c for the merge bio hook */
3621         .merge_bio_hook = btrfs_merge_bio_hook,
3622 };