fix similar typos to successfull
[linux-flexiantxendom0-natty.git] / fs / ext4 / extents.c
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31
32 #include <linux/module.h>
33 #include <linux/fs.h>
34 #include <linux/time.h>
35 #include <linux/jbd2.h>
36 #include <linux/highuid.h>
37 #include <linux/pagemap.h>
38 #include <linux/quotaops.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
41 #include <linux/falloc.h>
42 #include <asm/uaccess.h>
43 #include <linux/fiemap.h>
44 #include "ext4_jbd2.h"
45 #include "ext4_extents.h"
46
47
48 /*
49  * ext_pblock:
50  * combine low and high parts of physical block number into ext4_fsblk_t
51  */
52 static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
53 {
54         ext4_fsblk_t block;
55
56         block = le32_to_cpu(ex->ee_start_lo);
57         block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
58         return block;
59 }
60
61 /*
62  * idx_pblock:
63  * combine low and high parts of a leaf physical block number into ext4_fsblk_t
64  */
65 ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
66 {
67         ext4_fsblk_t block;
68
69         block = le32_to_cpu(ix->ei_leaf_lo);
70         block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
71         return block;
72 }
73
74 /*
75  * ext4_ext_store_pblock:
76  * stores a large physical block number into an extent struct,
77  * breaking it into parts
78  */
79 void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
80 {
81         ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
82         ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
83 }
84
85 /*
86  * ext4_idx_store_pblock:
87  * stores a large physical block number into an index struct,
88  * breaking it into parts
89  */
90 static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
91 {
92         ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
93         ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
94 }
95
96 static int ext4_ext_journal_restart(handle_t *handle, int needed)
97 {
98         int err;
99
100         if (handle->h_buffer_credits > needed)
101                 return 0;
102         err = ext4_journal_extend(handle, needed);
103         if (err <= 0)
104                 return err;
105         return ext4_journal_restart(handle, needed);
106 }
107
108 /*
109  * could return:
110  *  - EROFS
111  *  - ENOMEM
112  */
113 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
114                                 struct ext4_ext_path *path)
115 {
116         if (path->p_bh) {
117                 /* path points to block */
118                 return ext4_journal_get_write_access(handle, path->p_bh);
119         }
120         /* path points to leaf/index in inode body */
121         /* we use in-core data, no need to protect them */
122         return 0;
123 }
124
125 /*
126  * could return:
127  *  - EROFS
128  *  - ENOMEM
129  *  - EIO
130  */
131 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
132                                 struct ext4_ext_path *path)
133 {
134         int err;
135         if (path->p_bh) {
136                 /* path points to block */
137                 err = ext4_journal_dirty_metadata(handle, path->p_bh);
138         } else {
139                 /* path points to leaf/index in inode body */
140                 err = ext4_mark_inode_dirty(handle, inode);
141         }
142         return err;
143 }
144
145 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
146                               struct ext4_ext_path *path,
147                               ext4_lblk_t block)
148 {
149         struct ext4_inode_info *ei = EXT4_I(inode);
150         ext4_fsblk_t bg_start;
151         ext4_fsblk_t last_block;
152         ext4_grpblk_t colour;
153         int depth;
154
155         if (path) {
156                 struct ext4_extent *ex;
157                 depth = path->p_depth;
158
159                 /* try to predict block placement */
160                 ex = path[depth].p_ext;
161                 if (ex)
162                         return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
163
164                 /* it looks like index is empty;
165                  * try to find starting block from index itself */
166                 if (path[depth].p_bh)
167                         return path[depth].p_bh->b_blocknr;
168         }
169
170         /* OK. use inode's group */
171         bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
172                 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
173         last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
174
175         if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
176                 colour = (current->pid % 16) *
177                         (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
178         else
179                 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
180         return bg_start + colour + block;
181 }
182
183 /*
184  * Allocation for a meta data block
185  */
186 static ext4_fsblk_t
187 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
188                         struct ext4_ext_path *path,
189                         struct ext4_extent *ex, int *err)
190 {
191         ext4_fsblk_t goal, newblock;
192
193         goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
194         newblock = ext4_new_meta_block(handle, inode, goal, err);
195         return newblock;
196 }
197
198 static int ext4_ext_space_block(struct inode *inode)
199 {
200         int size;
201
202         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
203                         / sizeof(struct ext4_extent);
204 #ifdef AGGRESSIVE_TEST
205         if (size > 6)
206                 size = 6;
207 #endif
208         return size;
209 }
210
211 static int ext4_ext_space_block_idx(struct inode *inode)
212 {
213         int size;
214
215         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
216                         / sizeof(struct ext4_extent_idx);
217 #ifdef AGGRESSIVE_TEST
218         if (size > 5)
219                 size = 5;
220 #endif
221         return size;
222 }
223
224 static int ext4_ext_space_root(struct inode *inode)
225 {
226         int size;
227
228         size = sizeof(EXT4_I(inode)->i_data);
229         size -= sizeof(struct ext4_extent_header);
230         size /= sizeof(struct ext4_extent);
231 #ifdef AGGRESSIVE_TEST
232         if (size > 3)
233                 size = 3;
234 #endif
235         return size;
236 }
237
238 static int ext4_ext_space_root_idx(struct inode *inode)
239 {
240         int size;
241
242         size = sizeof(EXT4_I(inode)->i_data);
243         size -= sizeof(struct ext4_extent_header);
244         size /= sizeof(struct ext4_extent_idx);
245 #ifdef AGGRESSIVE_TEST
246         if (size > 4)
247                 size = 4;
248 #endif
249         return size;
250 }
251
252 /*
253  * Calculate the number of metadata blocks needed
254  * to allocate @blocks
255  * Worse case is one block per extent
256  */
257 int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks)
258 {
259         int lcap, icap, rcap, leafs, idxs, num;
260         int newextents = blocks;
261
262         rcap = ext4_ext_space_root_idx(inode);
263         lcap = ext4_ext_space_block(inode);
264         icap = ext4_ext_space_block_idx(inode);
265
266         /* number of new leaf blocks needed */
267         num = leafs = (newextents + lcap - 1) / lcap;
268
269         /*
270          * Worse case, we need separate index block(s)
271          * to link all new leaf blocks
272          */
273         idxs = (leafs + icap - 1) / icap;
274         do {
275                 num += idxs;
276                 idxs = (idxs + icap - 1) / icap;
277         } while (idxs > rcap);
278
279         return num;
280 }
281
282 static int
283 ext4_ext_max_entries(struct inode *inode, int depth)
284 {
285         int max;
286
287         if (depth == ext_depth(inode)) {
288                 if (depth == 0)
289                         max = ext4_ext_space_root(inode);
290                 else
291                         max = ext4_ext_space_root_idx(inode);
292         } else {
293                 if (depth == 0)
294                         max = ext4_ext_space_block(inode);
295                 else
296                         max = ext4_ext_space_block_idx(inode);
297         }
298
299         return max;
300 }
301
302 static int __ext4_ext_check_header(const char *function, struct inode *inode,
303                                         struct ext4_extent_header *eh,
304                                         int depth)
305 {
306         const char *error_msg;
307         int max = 0;
308
309         if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
310                 error_msg = "invalid magic";
311                 goto corrupted;
312         }
313         if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
314                 error_msg = "unexpected eh_depth";
315                 goto corrupted;
316         }
317         if (unlikely(eh->eh_max == 0)) {
318                 error_msg = "invalid eh_max";
319                 goto corrupted;
320         }
321         max = ext4_ext_max_entries(inode, depth);
322         if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
323                 error_msg = "too large eh_max";
324                 goto corrupted;
325         }
326         if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
327                 error_msg = "invalid eh_entries";
328                 goto corrupted;
329         }
330         return 0;
331
332 corrupted:
333         ext4_error(inode->i_sb, function,
334                         "bad header in inode #%lu: %s - magic %x, "
335                         "entries %u, max %u(%u), depth %u(%u)",
336                         inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
337                         le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
338                         max, le16_to_cpu(eh->eh_depth), depth);
339
340         return -EIO;
341 }
342
343 #define ext4_ext_check_header(inode, eh, depth) \
344         __ext4_ext_check_header(__func__, inode, eh, depth)
345
346 #ifdef EXT_DEBUG
347 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
348 {
349         int k, l = path->p_depth;
350
351         ext_debug("path:");
352         for (k = 0; k <= l; k++, path++) {
353                 if (path->p_idx) {
354                   ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
355                             idx_pblock(path->p_idx));
356                 } else if (path->p_ext) {
357                         ext_debug("  %d:%d:%llu ",
358                                   le32_to_cpu(path->p_ext->ee_block),
359                                   ext4_ext_get_actual_len(path->p_ext),
360                                   ext_pblock(path->p_ext));
361                 } else
362                         ext_debug("  []");
363         }
364         ext_debug("\n");
365 }
366
367 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
368 {
369         int depth = ext_depth(inode);
370         struct ext4_extent_header *eh;
371         struct ext4_extent *ex;
372         int i;
373
374         if (!path)
375                 return;
376
377         eh = path[depth].p_hdr;
378         ex = EXT_FIRST_EXTENT(eh);
379
380         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
381                 ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
382                           ext4_ext_get_actual_len(ex), ext_pblock(ex));
383         }
384         ext_debug("\n");
385 }
386 #else
387 #define ext4_ext_show_path(inode, path)
388 #define ext4_ext_show_leaf(inode, path)
389 #endif
390
391 void ext4_ext_drop_refs(struct ext4_ext_path *path)
392 {
393         int depth = path->p_depth;
394         int i;
395
396         for (i = 0; i <= depth; i++, path++)
397                 if (path->p_bh) {
398                         brelse(path->p_bh);
399                         path->p_bh = NULL;
400                 }
401 }
402
403 /*
404  * ext4_ext_binsearch_idx:
405  * binary search for the closest index of the given block
406  * the header must be checked before calling this
407  */
408 static void
409 ext4_ext_binsearch_idx(struct inode *inode,
410                         struct ext4_ext_path *path, ext4_lblk_t block)
411 {
412         struct ext4_extent_header *eh = path->p_hdr;
413         struct ext4_extent_idx *r, *l, *m;
414
415
416         ext_debug("binsearch for %u(idx):  ", block);
417
418         l = EXT_FIRST_INDEX(eh) + 1;
419         r = EXT_LAST_INDEX(eh);
420         while (l <= r) {
421                 m = l + (r - l) / 2;
422                 if (block < le32_to_cpu(m->ei_block))
423                         r = m - 1;
424                 else
425                         l = m + 1;
426                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
427                                 m, le32_to_cpu(m->ei_block),
428                                 r, le32_to_cpu(r->ei_block));
429         }
430
431         path->p_idx = l - 1;
432         ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
433                   idx_pblock(path->p_idx));
434
435 #ifdef CHECK_BINSEARCH
436         {
437                 struct ext4_extent_idx *chix, *ix;
438                 int k;
439
440                 chix = ix = EXT_FIRST_INDEX(eh);
441                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
442                   if (k != 0 &&
443                       le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
444                                 printk(KERN_DEBUG "k=%d, ix=0x%p, "
445                                        "first=0x%p\n", k,
446                                        ix, EXT_FIRST_INDEX(eh));
447                                 printk(KERN_DEBUG "%u <= %u\n",
448                                        le32_to_cpu(ix->ei_block),
449                                        le32_to_cpu(ix[-1].ei_block));
450                         }
451                         BUG_ON(k && le32_to_cpu(ix->ei_block)
452                                            <= le32_to_cpu(ix[-1].ei_block));
453                         if (block < le32_to_cpu(ix->ei_block))
454                                 break;
455                         chix = ix;
456                 }
457                 BUG_ON(chix != path->p_idx);
458         }
459 #endif
460
461 }
462
463 /*
464  * ext4_ext_binsearch:
465  * binary search for closest extent of the given block
466  * the header must be checked before calling this
467  */
468 static void
469 ext4_ext_binsearch(struct inode *inode,
470                 struct ext4_ext_path *path, ext4_lblk_t block)
471 {
472         struct ext4_extent_header *eh = path->p_hdr;
473         struct ext4_extent *r, *l, *m;
474
475         if (eh->eh_entries == 0) {
476                 /*
477                  * this leaf is empty:
478                  * we get such a leaf in split/add case
479                  */
480                 return;
481         }
482
483         ext_debug("binsearch for %u:  ", block);
484
485         l = EXT_FIRST_EXTENT(eh) + 1;
486         r = EXT_LAST_EXTENT(eh);
487
488         while (l <= r) {
489                 m = l + (r - l) / 2;
490                 if (block < le32_to_cpu(m->ee_block))
491                         r = m - 1;
492                 else
493                         l = m + 1;
494                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
495                                 m, le32_to_cpu(m->ee_block),
496                                 r, le32_to_cpu(r->ee_block));
497         }
498
499         path->p_ext = l - 1;
500         ext_debug("  -> %d:%llu:%d ",
501                         le32_to_cpu(path->p_ext->ee_block),
502                         ext_pblock(path->p_ext),
503                         ext4_ext_get_actual_len(path->p_ext));
504
505 #ifdef CHECK_BINSEARCH
506         {
507                 struct ext4_extent *chex, *ex;
508                 int k;
509
510                 chex = ex = EXT_FIRST_EXTENT(eh);
511                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
512                         BUG_ON(k && le32_to_cpu(ex->ee_block)
513                                           <= le32_to_cpu(ex[-1].ee_block));
514                         if (block < le32_to_cpu(ex->ee_block))
515                                 break;
516                         chex = ex;
517                 }
518                 BUG_ON(chex != path->p_ext);
519         }
520 #endif
521
522 }
523
524 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
525 {
526         struct ext4_extent_header *eh;
527
528         eh = ext_inode_hdr(inode);
529         eh->eh_depth = 0;
530         eh->eh_entries = 0;
531         eh->eh_magic = EXT4_EXT_MAGIC;
532         eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
533         ext4_mark_inode_dirty(handle, inode);
534         ext4_ext_invalidate_cache(inode);
535         return 0;
536 }
537
538 struct ext4_ext_path *
539 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
540                                         struct ext4_ext_path *path)
541 {
542         struct ext4_extent_header *eh;
543         struct buffer_head *bh;
544         short int depth, i, ppos = 0, alloc = 0;
545
546         eh = ext_inode_hdr(inode);
547         depth = ext_depth(inode);
548         if (ext4_ext_check_header(inode, eh, depth))
549                 return ERR_PTR(-EIO);
550
551
552         /* account possible depth increase */
553         if (!path) {
554                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
555                                 GFP_NOFS);
556                 if (!path)
557                         return ERR_PTR(-ENOMEM);
558                 alloc = 1;
559         }
560         path[0].p_hdr = eh;
561         path[0].p_bh = NULL;
562
563         i = depth;
564         /* walk through the tree */
565         while (i) {
566                 ext_debug("depth %d: num %d, max %d\n",
567                           ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
568
569                 ext4_ext_binsearch_idx(inode, path + ppos, block);
570                 path[ppos].p_block = idx_pblock(path[ppos].p_idx);
571                 path[ppos].p_depth = i;
572                 path[ppos].p_ext = NULL;
573
574                 bh = sb_bread(inode->i_sb, path[ppos].p_block);
575                 if (!bh)
576                         goto err;
577
578                 eh = ext_block_hdr(bh);
579                 ppos++;
580                 BUG_ON(ppos > depth);
581                 path[ppos].p_bh = bh;
582                 path[ppos].p_hdr = eh;
583                 i--;
584
585                 if (ext4_ext_check_header(inode, eh, i))
586                         goto err;
587         }
588
589         path[ppos].p_depth = i;
590         path[ppos].p_ext = NULL;
591         path[ppos].p_idx = NULL;
592
593         /* find extent */
594         ext4_ext_binsearch(inode, path + ppos, block);
595         /* if not an empty leaf */
596         if (path[ppos].p_ext)
597                 path[ppos].p_block = ext_pblock(path[ppos].p_ext);
598
599         ext4_ext_show_path(inode, path);
600
601         return path;
602
603 err:
604         ext4_ext_drop_refs(path);
605         if (alloc)
606                 kfree(path);
607         return ERR_PTR(-EIO);
608 }
609
610 /*
611  * ext4_ext_insert_index:
612  * insert new index [@logical;@ptr] into the block at @curp;
613  * check where to insert: before @curp or after @curp
614  */
615 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
616                                 struct ext4_ext_path *curp,
617                                 int logical, ext4_fsblk_t ptr)
618 {
619         struct ext4_extent_idx *ix;
620         int len, err;
621
622         err = ext4_ext_get_access(handle, inode, curp);
623         if (err)
624                 return err;
625
626         BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
627         len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
628         if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
629                 /* insert after */
630                 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
631                         len = (len - 1) * sizeof(struct ext4_extent_idx);
632                         len = len < 0 ? 0 : len;
633                         ext_debug("insert new index %d after: %llu. "
634                                         "move %d from 0x%p to 0x%p\n",
635                                         logical, ptr, len,
636                                         (curp->p_idx + 1), (curp->p_idx + 2));
637                         memmove(curp->p_idx + 2, curp->p_idx + 1, len);
638                 }
639                 ix = curp->p_idx + 1;
640         } else {
641                 /* insert before */
642                 len = len * sizeof(struct ext4_extent_idx);
643                 len = len < 0 ? 0 : len;
644                 ext_debug("insert new index %d before: %llu. "
645                                 "move %d from 0x%p to 0x%p\n",
646                                 logical, ptr, len,
647                                 curp->p_idx, (curp->p_idx + 1));
648                 memmove(curp->p_idx + 1, curp->p_idx, len);
649                 ix = curp->p_idx;
650         }
651
652         ix->ei_block = cpu_to_le32(logical);
653         ext4_idx_store_pblock(ix, ptr);
654         le16_add_cpu(&curp->p_hdr->eh_entries, 1);
655
656         BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
657                              > le16_to_cpu(curp->p_hdr->eh_max));
658         BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
659
660         err = ext4_ext_dirty(handle, inode, curp);
661         ext4_std_error(inode->i_sb, err);
662
663         return err;
664 }
665
666 /*
667  * ext4_ext_split:
668  * inserts new subtree into the path, using free index entry
669  * at depth @at:
670  * - allocates all needed blocks (new leaf and all intermediate index blocks)
671  * - makes decision where to split
672  * - moves remaining extents and index entries (right to the split point)
673  *   into the newly allocated blocks
674  * - initializes subtree
675  */
676 static int ext4_ext_split(handle_t *handle, struct inode *inode,
677                                 struct ext4_ext_path *path,
678                                 struct ext4_extent *newext, int at)
679 {
680         struct buffer_head *bh = NULL;
681         int depth = ext_depth(inode);
682         struct ext4_extent_header *neh;
683         struct ext4_extent_idx *fidx;
684         struct ext4_extent *ex;
685         int i = at, k, m, a;
686         ext4_fsblk_t newblock, oldblock;
687         __le32 border;
688         ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
689         int err = 0;
690
691         /* make decision: where to split? */
692         /* FIXME: now decision is simplest: at current extent */
693
694         /* if current leaf will be split, then we should use
695          * border from split point */
696         BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
697         if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
698                 border = path[depth].p_ext[1].ee_block;
699                 ext_debug("leaf will be split."
700                                 " next leaf starts at %d\n",
701                                   le32_to_cpu(border));
702         } else {
703                 border = newext->ee_block;
704                 ext_debug("leaf will be added."
705                                 " next leaf starts at %d\n",
706                                 le32_to_cpu(border));
707         }
708
709         /*
710          * If error occurs, then we break processing
711          * and mark filesystem read-only. index won't
712          * be inserted and tree will be in consistent
713          * state. Next mount will repair buffers too.
714          */
715
716         /*
717          * Get array to track all allocated blocks.
718          * We need this to handle errors and free blocks
719          * upon them.
720          */
721         ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
722         if (!ablocks)
723                 return -ENOMEM;
724
725         /* allocate all needed blocks */
726         ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
727         for (a = 0; a < depth - at; a++) {
728                 newblock = ext4_ext_new_meta_block(handle, inode, path,
729                                                    newext, &err);
730                 if (newblock == 0)
731                         goto cleanup;
732                 ablocks[a] = newblock;
733         }
734
735         /* initialize new leaf */
736         newblock = ablocks[--a];
737         BUG_ON(newblock == 0);
738         bh = sb_getblk(inode->i_sb, newblock);
739         if (!bh) {
740                 err = -EIO;
741                 goto cleanup;
742         }
743         lock_buffer(bh);
744
745         err = ext4_journal_get_create_access(handle, bh);
746         if (err)
747                 goto cleanup;
748
749         neh = ext_block_hdr(bh);
750         neh->eh_entries = 0;
751         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
752         neh->eh_magic = EXT4_EXT_MAGIC;
753         neh->eh_depth = 0;
754         ex = EXT_FIRST_EXTENT(neh);
755
756         /* move remainder of path[depth] to the new leaf */
757         BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
758         /* start copy from next extent */
759         /* TODO: we could do it by single memmove */
760         m = 0;
761         path[depth].p_ext++;
762         while (path[depth].p_ext <=
763                         EXT_MAX_EXTENT(path[depth].p_hdr)) {
764                 ext_debug("move %d:%llu:%d in new leaf %llu\n",
765                                 le32_to_cpu(path[depth].p_ext->ee_block),
766                                 ext_pblock(path[depth].p_ext),
767                                 ext4_ext_get_actual_len(path[depth].p_ext),
768                                 newblock);
769                 /*memmove(ex++, path[depth].p_ext++,
770                                 sizeof(struct ext4_extent));
771                 neh->eh_entries++;*/
772                 path[depth].p_ext++;
773                 m++;
774         }
775         if (m) {
776                 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
777                 le16_add_cpu(&neh->eh_entries, m);
778         }
779
780         set_buffer_uptodate(bh);
781         unlock_buffer(bh);
782
783         err = ext4_journal_dirty_metadata(handle, bh);
784         if (err)
785                 goto cleanup;
786         brelse(bh);
787         bh = NULL;
788
789         /* correct old leaf */
790         if (m) {
791                 err = ext4_ext_get_access(handle, inode, path + depth);
792                 if (err)
793                         goto cleanup;
794                 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
795                 err = ext4_ext_dirty(handle, inode, path + depth);
796                 if (err)
797                         goto cleanup;
798
799         }
800
801         /* create intermediate indexes */
802         k = depth - at - 1;
803         BUG_ON(k < 0);
804         if (k)
805                 ext_debug("create %d intermediate indices\n", k);
806         /* insert new index into current index block */
807         /* current depth stored in i var */
808         i = depth - 1;
809         while (k--) {
810                 oldblock = newblock;
811                 newblock = ablocks[--a];
812                 bh = sb_getblk(inode->i_sb, newblock);
813                 if (!bh) {
814                         err = -EIO;
815                         goto cleanup;
816                 }
817                 lock_buffer(bh);
818
819                 err = ext4_journal_get_create_access(handle, bh);
820                 if (err)
821                         goto cleanup;
822
823                 neh = ext_block_hdr(bh);
824                 neh->eh_entries = cpu_to_le16(1);
825                 neh->eh_magic = EXT4_EXT_MAGIC;
826                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
827                 neh->eh_depth = cpu_to_le16(depth - i);
828                 fidx = EXT_FIRST_INDEX(neh);
829                 fidx->ei_block = border;
830                 ext4_idx_store_pblock(fidx, oldblock);
831
832                 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
833                                 i, newblock, le32_to_cpu(border), oldblock);
834                 /* copy indexes */
835                 m = 0;
836                 path[i].p_idx++;
837
838                 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
839                                 EXT_MAX_INDEX(path[i].p_hdr));
840                 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
841                                 EXT_LAST_INDEX(path[i].p_hdr));
842                 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
843                         ext_debug("%d: move %d:%llu in new index %llu\n", i,
844                                         le32_to_cpu(path[i].p_idx->ei_block),
845                                         idx_pblock(path[i].p_idx),
846                                         newblock);
847                         /*memmove(++fidx, path[i].p_idx++,
848                                         sizeof(struct ext4_extent_idx));
849                         neh->eh_entries++;
850                         BUG_ON(neh->eh_entries > neh->eh_max);*/
851                         path[i].p_idx++;
852                         m++;
853                 }
854                 if (m) {
855                         memmove(++fidx, path[i].p_idx - m,
856                                 sizeof(struct ext4_extent_idx) * m);
857                         le16_add_cpu(&neh->eh_entries, m);
858                 }
859                 set_buffer_uptodate(bh);
860                 unlock_buffer(bh);
861
862                 err = ext4_journal_dirty_metadata(handle, bh);
863                 if (err)
864                         goto cleanup;
865                 brelse(bh);
866                 bh = NULL;
867
868                 /* correct old index */
869                 if (m) {
870                         err = ext4_ext_get_access(handle, inode, path + i);
871                         if (err)
872                                 goto cleanup;
873                         le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
874                         err = ext4_ext_dirty(handle, inode, path + i);
875                         if (err)
876                                 goto cleanup;
877                 }
878
879                 i--;
880         }
881
882         /* insert new index */
883         err = ext4_ext_insert_index(handle, inode, path + at,
884                                     le32_to_cpu(border), newblock);
885
886 cleanup:
887         if (bh) {
888                 if (buffer_locked(bh))
889                         unlock_buffer(bh);
890                 brelse(bh);
891         }
892
893         if (err) {
894                 /* free all allocated blocks in error case */
895                 for (i = 0; i < depth; i++) {
896                         if (!ablocks[i])
897                                 continue;
898                         ext4_free_blocks(handle, inode, ablocks[i], 1, 1);
899                 }
900         }
901         kfree(ablocks);
902
903         return err;
904 }
905
906 /*
907  * ext4_ext_grow_indepth:
908  * implements tree growing procedure:
909  * - allocates new block
910  * - moves top-level data (index block or leaf) into the new block
911  * - initializes new top-level, creating index that points to the
912  *   just created block
913  */
914 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
915                                         struct ext4_ext_path *path,
916                                         struct ext4_extent *newext)
917 {
918         struct ext4_ext_path *curp = path;
919         struct ext4_extent_header *neh;
920         struct ext4_extent_idx *fidx;
921         struct buffer_head *bh;
922         ext4_fsblk_t newblock;
923         int err = 0;
924
925         newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
926         if (newblock == 0)
927                 return err;
928
929         bh = sb_getblk(inode->i_sb, newblock);
930         if (!bh) {
931                 err = -EIO;
932                 ext4_std_error(inode->i_sb, err);
933                 return err;
934         }
935         lock_buffer(bh);
936
937         err = ext4_journal_get_create_access(handle, bh);
938         if (err) {
939                 unlock_buffer(bh);
940                 goto out;
941         }
942
943         /* move top-level index/leaf into new block */
944         memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
945
946         /* set size of new block */
947         neh = ext_block_hdr(bh);
948         /* old root could have indexes or leaves
949          * so calculate e_max right way */
950         if (ext_depth(inode))
951           neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
952         else
953           neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
954         neh->eh_magic = EXT4_EXT_MAGIC;
955         set_buffer_uptodate(bh);
956         unlock_buffer(bh);
957
958         err = ext4_journal_dirty_metadata(handle, bh);
959         if (err)
960                 goto out;
961
962         /* create index in new top-level index: num,max,pointer */
963         err = ext4_ext_get_access(handle, inode, curp);
964         if (err)
965                 goto out;
966
967         curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
968         curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
969         curp->p_hdr->eh_entries = cpu_to_le16(1);
970         curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
971
972         if (path[0].p_hdr->eh_depth)
973                 curp->p_idx->ei_block =
974                         EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
975         else
976                 curp->p_idx->ei_block =
977                         EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
978         ext4_idx_store_pblock(curp->p_idx, newblock);
979
980         neh = ext_inode_hdr(inode);
981         fidx = EXT_FIRST_INDEX(neh);
982         ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
983                   le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
984                   le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
985
986         neh->eh_depth = cpu_to_le16(path->p_depth + 1);
987         err = ext4_ext_dirty(handle, inode, curp);
988 out:
989         brelse(bh);
990
991         return err;
992 }
993
994 /*
995  * ext4_ext_create_new_leaf:
996  * finds empty index and adds new leaf.
997  * if no free index is found, then it requests in-depth growing.
998  */
999 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1000                                         struct ext4_ext_path *path,
1001                                         struct ext4_extent *newext)
1002 {
1003         struct ext4_ext_path *curp;
1004         int depth, i, err = 0;
1005
1006 repeat:
1007         i = depth = ext_depth(inode);
1008
1009         /* walk up to the tree and look for free index entry */
1010         curp = path + depth;
1011         while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1012                 i--;
1013                 curp--;
1014         }
1015
1016         /* we use already allocated block for index block,
1017          * so subsequent data blocks should be contiguous */
1018         if (EXT_HAS_FREE_INDEX(curp)) {
1019                 /* if we found index with free entry, then use that
1020                  * entry: create all needed subtree and add new leaf */
1021                 err = ext4_ext_split(handle, inode, path, newext, i);
1022                 if (err)
1023                         goto out;
1024
1025                 /* refill path */
1026                 ext4_ext_drop_refs(path);
1027                 path = ext4_ext_find_extent(inode,
1028                                     (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1029                                     path);
1030                 if (IS_ERR(path))
1031                         err = PTR_ERR(path);
1032         } else {
1033                 /* tree is full, time to grow in depth */
1034                 err = ext4_ext_grow_indepth(handle, inode, path, newext);
1035                 if (err)
1036                         goto out;
1037
1038                 /* refill path */
1039                 ext4_ext_drop_refs(path);
1040                 path = ext4_ext_find_extent(inode,
1041                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1042                                     path);
1043                 if (IS_ERR(path)) {
1044                         err = PTR_ERR(path);
1045                         goto out;
1046                 }
1047
1048                 /*
1049                  * only first (depth 0 -> 1) produces free space;
1050                  * in all other cases we have to split the grown tree
1051                  */
1052                 depth = ext_depth(inode);
1053                 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1054                         /* now we need to split */
1055                         goto repeat;
1056                 }
1057         }
1058
1059 out:
1060         return err;
1061 }
1062
1063 /*
1064  * search the closest allocated block to the left for *logical
1065  * and returns it at @logical + it's physical address at @phys
1066  * if *logical is the smallest allocated block, the function
1067  * returns 0 at @phys
1068  * return value contains 0 (success) or error code
1069  */
1070 int
1071 ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1072                         ext4_lblk_t *logical, ext4_fsblk_t *phys)
1073 {
1074         struct ext4_extent_idx *ix;
1075         struct ext4_extent *ex;
1076         int depth, ee_len;
1077
1078         BUG_ON(path == NULL);
1079         depth = path->p_depth;
1080         *phys = 0;
1081
1082         if (depth == 0 && path->p_ext == NULL)
1083                 return 0;
1084
1085         /* usually extent in the path covers blocks smaller
1086          * then *logical, but it can be that extent is the
1087          * first one in the file */
1088
1089         ex = path[depth].p_ext;
1090         ee_len = ext4_ext_get_actual_len(ex);
1091         if (*logical < le32_to_cpu(ex->ee_block)) {
1092                 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1093                 while (--depth >= 0) {
1094                         ix = path[depth].p_idx;
1095                         BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1096                 }
1097                 return 0;
1098         }
1099
1100         BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
1101
1102         *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1103         *phys = ext_pblock(ex) + ee_len - 1;
1104         return 0;
1105 }
1106
1107 /*
1108  * search the closest allocated block to the right for *logical
1109  * and returns it at @logical + it's physical address at @phys
1110  * if *logical is the smallest allocated block, the function
1111  * returns 0 at @phys
1112  * return value contains 0 (success) or error code
1113  */
1114 int
1115 ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1116                         ext4_lblk_t *logical, ext4_fsblk_t *phys)
1117 {
1118         struct buffer_head *bh = NULL;
1119         struct ext4_extent_header *eh;
1120         struct ext4_extent_idx *ix;
1121         struct ext4_extent *ex;
1122         ext4_fsblk_t block;
1123         int depth, ee_len;
1124
1125         BUG_ON(path == NULL);
1126         depth = path->p_depth;
1127         *phys = 0;
1128
1129         if (depth == 0 && path->p_ext == NULL)
1130                 return 0;
1131
1132         /* usually extent in the path covers blocks smaller
1133          * then *logical, but it can be that extent is the
1134          * first one in the file */
1135
1136         ex = path[depth].p_ext;
1137         ee_len = ext4_ext_get_actual_len(ex);
1138         if (*logical < le32_to_cpu(ex->ee_block)) {
1139                 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1140                 while (--depth >= 0) {
1141                         ix = path[depth].p_idx;
1142                         BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1143                 }
1144                 *logical = le32_to_cpu(ex->ee_block);
1145                 *phys = ext_pblock(ex);
1146                 return 0;
1147         }
1148
1149         BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
1150
1151         if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1152                 /* next allocated block in this leaf */
1153                 ex++;
1154                 *logical = le32_to_cpu(ex->ee_block);
1155                 *phys = ext_pblock(ex);
1156                 return 0;
1157         }
1158
1159         /* go up and search for index to the right */
1160         while (--depth >= 0) {
1161                 ix = path[depth].p_idx;
1162                 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1163                         break;
1164         }
1165
1166         if (depth < 0) {
1167                 /* we've gone up to the root and
1168                  * found no index to the right */
1169                 return 0;
1170         }
1171
1172         /* we've found index to the right, let's
1173          * follow it and find the closest allocated
1174          * block to the right */
1175         ix++;
1176         block = idx_pblock(ix);
1177         while (++depth < path->p_depth) {
1178                 bh = sb_bread(inode->i_sb, block);
1179                 if (bh == NULL)
1180                         return -EIO;
1181                 eh = ext_block_hdr(bh);
1182                 if (ext4_ext_check_header(inode, eh, depth)) {
1183                         put_bh(bh);
1184                         return -EIO;
1185                 }
1186                 ix = EXT_FIRST_INDEX(eh);
1187                 block = idx_pblock(ix);
1188                 put_bh(bh);
1189         }
1190
1191         bh = sb_bread(inode->i_sb, block);
1192         if (bh == NULL)
1193                 return -EIO;
1194         eh = ext_block_hdr(bh);
1195         if (ext4_ext_check_header(inode, eh, path->p_depth - depth)) {
1196                 put_bh(bh);
1197                 return -EIO;
1198         }
1199         ex = EXT_FIRST_EXTENT(eh);
1200         *logical = le32_to_cpu(ex->ee_block);
1201         *phys = ext_pblock(ex);
1202         put_bh(bh);
1203         return 0;
1204
1205 }
1206
1207 /*
1208  * ext4_ext_next_allocated_block:
1209  * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
1210  * NOTE: it considers block number from index entry as
1211  * allocated block. Thus, index entries have to be consistent
1212  * with leaves.
1213  */
1214 static ext4_lblk_t
1215 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1216 {
1217         int depth;
1218
1219         BUG_ON(path == NULL);
1220         depth = path->p_depth;
1221
1222         if (depth == 0 && path->p_ext == NULL)
1223                 return EXT_MAX_BLOCK;
1224
1225         while (depth >= 0) {
1226                 if (depth == path->p_depth) {
1227                         /* leaf */
1228                         if (path[depth].p_ext !=
1229                                         EXT_LAST_EXTENT(path[depth].p_hdr))
1230                           return le32_to_cpu(path[depth].p_ext[1].ee_block);
1231                 } else {
1232                         /* index */
1233                         if (path[depth].p_idx !=
1234                                         EXT_LAST_INDEX(path[depth].p_hdr))
1235                           return le32_to_cpu(path[depth].p_idx[1].ei_block);
1236                 }
1237                 depth--;
1238         }
1239
1240         return EXT_MAX_BLOCK;
1241 }
1242
1243 /*
1244  * ext4_ext_next_leaf_block:
1245  * returns first allocated block from next leaf or EXT_MAX_BLOCK
1246  */
1247 static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1248                                         struct ext4_ext_path *path)
1249 {
1250         int depth;
1251
1252         BUG_ON(path == NULL);
1253         depth = path->p_depth;
1254
1255         /* zero-tree has no leaf blocks at all */
1256         if (depth == 0)
1257                 return EXT_MAX_BLOCK;
1258
1259         /* go to index block */
1260         depth--;
1261
1262         while (depth >= 0) {
1263                 if (path[depth].p_idx !=
1264                                 EXT_LAST_INDEX(path[depth].p_hdr))
1265                         return (ext4_lblk_t)
1266                                 le32_to_cpu(path[depth].p_idx[1].ei_block);
1267                 depth--;
1268         }
1269
1270         return EXT_MAX_BLOCK;
1271 }
1272
1273 /*
1274  * ext4_ext_correct_indexes:
1275  * if leaf gets modified and modified extent is first in the leaf,
1276  * then we have to correct all indexes above.
1277  * TODO: do we need to correct tree in all cases?
1278  */
1279 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1280                                 struct ext4_ext_path *path)
1281 {
1282         struct ext4_extent_header *eh;
1283         int depth = ext_depth(inode);
1284         struct ext4_extent *ex;
1285         __le32 border;
1286         int k, err = 0;
1287
1288         eh = path[depth].p_hdr;
1289         ex = path[depth].p_ext;
1290         BUG_ON(ex == NULL);
1291         BUG_ON(eh == NULL);
1292
1293         if (depth == 0) {
1294                 /* there is no tree at all */
1295                 return 0;
1296         }
1297
1298         if (ex != EXT_FIRST_EXTENT(eh)) {
1299                 /* we correct tree if first leaf got modified only */
1300                 return 0;
1301         }
1302
1303         /*
1304          * TODO: we need correction if border is smaller than current one
1305          */
1306         k = depth - 1;
1307         border = path[depth].p_ext->ee_block;
1308         err = ext4_ext_get_access(handle, inode, path + k);
1309         if (err)
1310                 return err;
1311         path[k].p_idx->ei_block = border;
1312         err = ext4_ext_dirty(handle, inode, path + k);
1313         if (err)
1314                 return err;
1315
1316         while (k--) {
1317                 /* change all left-side indexes */
1318                 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1319                         break;
1320                 err = ext4_ext_get_access(handle, inode, path + k);
1321                 if (err)
1322                         break;
1323                 path[k].p_idx->ei_block = border;
1324                 err = ext4_ext_dirty(handle, inode, path + k);
1325                 if (err)
1326                         break;
1327         }
1328
1329         return err;
1330 }
1331
1332 static int
1333 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1334                                 struct ext4_extent *ex2)
1335 {
1336         unsigned short ext1_ee_len, ext2_ee_len, max_len;
1337
1338         /*
1339          * Make sure that either both extents are uninitialized, or
1340          * both are _not_.
1341          */
1342         if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1343                 return 0;
1344
1345         if (ext4_ext_is_uninitialized(ex1))
1346                 max_len = EXT_UNINIT_MAX_LEN;
1347         else
1348                 max_len = EXT_INIT_MAX_LEN;
1349
1350         ext1_ee_len = ext4_ext_get_actual_len(ex1);
1351         ext2_ee_len = ext4_ext_get_actual_len(ex2);
1352
1353         if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1354                         le32_to_cpu(ex2->ee_block))
1355                 return 0;
1356
1357         /*
1358          * To allow future support for preallocated extents to be added
1359          * as an RO_COMPAT feature, refuse to merge to extents if
1360          * this can result in the top bit of ee_len being set.
1361          */
1362         if (ext1_ee_len + ext2_ee_len > max_len)
1363                 return 0;
1364 #ifdef AGGRESSIVE_TEST
1365         if (ext1_ee_len >= 4)
1366                 return 0;
1367 #endif
1368
1369         if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
1370                 return 1;
1371         return 0;
1372 }
1373
1374 /*
1375  * This function tries to merge the "ex" extent to the next extent in the tree.
1376  * It always tries to merge towards right. If you want to merge towards
1377  * left, pass "ex - 1" as argument instead of "ex".
1378  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1379  * 1 if they got merged.
1380  */
1381 int ext4_ext_try_to_merge(struct inode *inode,
1382                           struct ext4_ext_path *path,
1383                           struct ext4_extent *ex)
1384 {
1385         struct ext4_extent_header *eh;
1386         unsigned int depth, len;
1387         int merge_done = 0;
1388         int uninitialized = 0;
1389
1390         depth = ext_depth(inode);
1391         BUG_ON(path[depth].p_hdr == NULL);
1392         eh = path[depth].p_hdr;
1393
1394         while (ex < EXT_LAST_EXTENT(eh)) {
1395                 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1396                         break;
1397                 /* merge with next extent! */
1398                 if (ext4_ext_is_uninitialized(ex))
1399                         uninitialized = 1;
1400                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1401                                 + ext4_ext_get_actual_len(ex + 1));
1402                 if (uninitialized)
1403                         ext4_ext_mark_uninitialized(ex);
1404
1405                 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1406                         len = (EXT_LAST_EXTENT(eh) - ex - 1)
1407                                 * sizeof(struct ext4_extent);
1408                         memmove(ex + 1, ex + 2, len);
1409                 }
1410                 le16_add_cpu(&eh->eh_entries, -1);
1411                 merge_done = 1;
1412                 WARN_ON(eh->eh_entries == 0);
1413                 if (!eh->eh_entries)
1414                         ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
1415                            "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
1416         }
1417
1418         return merge_done;
1419 }
1420
1421 /*
1422  * check if a portion of the "newext" extent overlaps with an
1423  * existing extent.
1424  *
1425  * If there is an overlap discovered, it updates the length of the newext
1426  * such that there will be no overlap, and then returns 1.
1427  * If there is no overlap found, it returns 0.
1428  */
1429 unsigned int ext4_ext_check_overlap(struct inode *inode,
1430                                     struct ext4_extent *newext,
1431                                     struct ext4_ext_path *path)
1432 {
1433         ext4_lblk_t b1, b2;
1434         unsigned int depth, len1;
1435         unsigned int ret = 0;
1436
1437         b1 = le32_to_cpu(newext->ee_block);
1438         len1 = ext4_ext_get_actual_len(newext);
1439         depth = ext_depth(inode);
1440         if (!path[depth].p_ext)
1441                 goto out;
1442         b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1443
1444         /*
1445          * get the next allocated block if the extent in the path
1446          * is before the requested block(s)
1447          */
1448         if (b2 < b1) {
1449                 b2 = ext4_ext_next_allocated_block(path);
1450                 if (b2 == EXT_MAX_BLOCK)
1451                         goto out;
1452         }
1453
1454         /* check for wrap through zero on extent logical start block*/
1455         if (b1 + len1 < b1) {
1456                 len1 = EXT_MAX_BLOCK - b1;
1457                 newext->ee_len = cpu_to_le16(len1);
1458                 ret = 1;
1459         }
1460
1461         /* check for overlap */
1462         if (b1 + len1 > b2) {
1463                 newext->ee_len = cpu_to_le16(b2 - b1);
1464                 ret = 1;
1465         }
1466 out:
1467         return ret;
1468 }
1469
1470 /*
1471  * ext4_ext_insert_extent:
1472  * tries to merge requsted extent into the existing extent or
1473  * inserts requested extent as new one into the tree,
1474  * creating new leaf in the no-space case.
1475  */
1476 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1477                                 struct ext4_ext_path *path,
1478                                 struct ext4_extent *newext)
1479 {
1480         struct ext4_extent_header *eh;
1481         struct ext4_extent *ex, *fex;
1482         struct ext4_extent *nearex; /* nearest extent */
1483         struct ext4_ext_path *npath = NULL;
1484         int depth, len, err;
1485         ext4_lblk_t next;
1486         unsigned uninitialized = 0;
1487
1488         BUG_ON(ext4_ext_get_actual_len(newext) == 0);
1489         depth = ext_depth(inode);
1490         ex = path[depth].p_ext;
1491         BUG_ON(path[depth].p_hdr == NULL);
1492
1493         /* try to insert block into found extent and return */
1494         if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1495                 ext_debug("append %d block to %d:%d (from %llu)\n",
1496                                 ext4_ext_get_actual_len(newext),
1497                                 le32_to_cpu(ex->ee_block),
1498                                 ext4_ext_get_actual_len(ex), ext_pblock(ex));
1499                 err = ext4_ext_get_access(handle, inode, path + depth);
1500                 if (err)
1501                         return err;
1502
1503                 /*
1504                  * ext4_can_extents_be_merged should have checked that either
1505                  * both extents are uninitialized, or both aren't. Thus we
1506                  * need to check only one of them here.
1507                  */
1508                 if (ext4_ext_is_uninitialized(ex))
1509                         uninitialized = 1;
1510                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1511                                         + ext4_ext_get_actual_len(newext));
1512                 if (uninitialized)
1513                         ext4_ext_mark_uninitialized(ex);
1514                 eh = path[depth].p_hdr;
1515                 nearex = ex;
1516                 goto merge;
1517         }
1518
1519 repeat:
1520         depth = ext_depth(inode);
1521         eh = path[depth].p_hdr;
1522         if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1523                 goto has_space;
1524
1525         /* probably next leaf has space for us? */
1526         fex = EXT_LAST_EXTENT(eh);
1527         next = ext4_ext_next_leaf_block(inode, path);
1528         if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1529             && next != EXT_MAX_BLOCK) {
1530                 ext_debug("next leaf block - %d\n", next);
1531                 BUG_ON(npath != NULL);
1532                 npath = ext4_ext_find_extent(inode, next, NULL);
1533                 if (IS_ERR(npath))
1534                         return PTR_ERR(npath);
1535                 BUG_ON(npath->p_depth != path->p_depth);
1536                 eh = npath[depth].p_hdr;
1537                 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1538                         ext_debug("next leaf isnt full(%d)\n",
1539                                   le16_to_cpu(eh->eh_entries));
1540                         path = npath;
1541                         goto repeat;
1542                 }
1543                 ext_debug("next leaf has no free space(%d,%d)\n",
1544                           le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1545         }
1546
1547         /*
1548          * There is no free space in the found leaf.
1549          * We're gonna add a new leaf in the tree.
1550          */
1551         err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1552         if (err)
1553                 goto cleanup;
1554         depth = ext_depth(inode);
1555         eh = path[depth].p_hdr;
1556
1557 has_space:
1558         nearex = path[depth].p_ext;
1559
1560         err = ext4_ext_get_access(handle, inode, path + depth);
1561         if (err)
1562                 goto cleanup;
1563
1564         if (!nearex) {
1565                 /* there is no extent in this leaf, create first one */
1566                 ext_debug("first extent in the leaf: %d:%llu:%d\n",
1567                                 le32_to_cpu(newext->ee_block),
1568                                 ext_pblock(newext),
1569                                 ext4_ext_get_actual_len(newext));
1570                 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1571         } else if (le32_to_cpu(newext->ee_block)
1572                            > le32_to_cpu(nearex->ee_block)) {
1573 /*              BUG_ON(newext->ee_block == nearex->ee_block); */
1574                 if (nearex != EXT_LAST_EXTENT(eh)) {
1575                         len = EXT_MAX_EXTENT(eh) - nearex;
1576                         len = (len - 1) * sizeof(struct ext4_extent);
1577                         len = len < 0 ? 0 : len;
1578                         ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
1579                                         "move %d from 0x%p to 0x%p\n",
1580                                         le32_to_cpu(newext->ee_block),
1581                                         ext_pblock(newext),
1582                                         ext4_ext_get_actual_len(newext),
1583                                         nearex, len, nearex + 1, nearex + 2);
1584                         memmove(nearex + 2, nearex + 1, len);
1585                 }
1586                 path[depth].p_ext = nearex + 1;
1587         } else {
1588                 BUG_ON(newext->ee_block == nearex->ee_block);
1589                 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1590                 len = len < 0 ? 0 : len;
1591                 ext_debug("insert %d:%llu:%d before: nearest 0x%p, "
1592                                 "move %d from 0x%p to 0x%p\n",
1593                                 le32_to_cpu(newext->ee_block),
1594                                 ext_pblock(newext),
1595                                 ext4_ext_get_actual_len(newext),
1596                                 nearex, len, nearex + 1, nearex + 2);
1597                 memmove(nearex + 1, nearex, len);
1598                 path[depth].p_ext = nearex;
1599         }
1600
1601         le16_add_cpu(&eh->eh_entries, 1);
1602         nearex = path[depth].p_ext;
1603         nearex->ee_block = newext->ee_block;
1604         ext4_ext_store_pblock(nearex, ext_pblock(newext));
1605         nearex->ee_len = newext->ee_len;
1606
1607 merge:
1608         /* try to merge extents to the right */
1609         ext4_ext_try_to_merge(inode, path, nearex);
1610
1611         /* try to merge extents to the left */
1612
1613         /* time to correct all indexes above */
1614         err = ext4_ext_correct_indexes(handle, inode, path);
1615         if (err)
1616                 goto cleanup;
1617
1618         err = ext4_ext_dirty(handle, inode, path + depth);
1619
1620 cleanup:
1621         if (npath) {
1622                 ext4_ext_drop_refs(npath);
1623                 kfree(npath);
1624         }
1625         ext4_ext_tree_changed(inode);
1626         ext4_ext_invalidate_cache(inode);
1627         return err;
1628 }
1629
1630 int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1631                         ext4_lblk_t num, ext_prepare_callback func,
1632                         void *cbdata)
1633 {
1634         struct ext4_ext_path *path = NULL;
1635         struct ext4_ext_cache cbex;
1636         struct ext4_extent *ex;
1637         ext4_lblk_t next, start = 0, end = 0;
1638         ext4_lblk_t last = block + num;
1639         int depth, exists, err = 0;
1640
1641         BUG_ON(func == NULL);
1642         BUG_ON(inode == NULL);
1643
1644         while (block < last && block != EXT_MAX_BLOCK) {
1645                 num = last - block;
1646                 /* find extent for this block */
1647                 path = ext4_ext_find_extent(inode, block, path);
1648                 if (IS_ERR(path)) {
1649                         err = PTR_ERR(path);
1650                         path = NULL;
1651                         break;
1652                 }
1653
1654                 depth = ext_depth(inode);
1655                 BUG_ON(path[depth].p_hdr == NULL);
1656                 ex = path[depth].p_ext;
1657                 next = ext4_ext_next_allocated_block(path);
1658
1659                 exists = 0;
1660                 if (!ex) {
1661                         /* there is no extent yet, so try to allocate
1662                          * all requested space */
1663                         start = block;
1664                         end = block + num;
1665                 } else if (le32_to_cpu(ex->ee_block) > block) {
1666                         /* need to allocate space before found extent */
1667                         start = block;
1668                         end = le32_to_cpu(ex->ee_block);
1669                         if (block + num < end)
1670                                 end = block + num;
1671                 } else if (block >= le32_to_cpu(ex->ee_block)
1672                                         + ext4_ext_get_actual_len(ex)) {
1673                         /* need to allocate space after found extent */
1674                         start = block;
1675                         end = block + num;
1676                         if (end >= next)
1677                                 end = next;
1678                 } else if (block >= le32_to_cpu(ex->ee_block)) {
1679                         /*
1680                          * some part of requested space is covered
1681                          * by found extent
1682                          */
1683                         start = block;
1684                         end = le32_to_cpu(ex->ee_block)
1685                                 + ext4_ext_get_actual_len(ex);
1686                         if (block + num < end)
1687                                 end = block + num;
1688                         exists = 1;
1689                 } else {
1690                         BUG();
1691                 }
1692                 BUG_ON(end <= start);
1693
1694                 if (!exists) {
1695                         cbex.ec_block = start;
1696                         cbex.ec_len = end - start;
1697                         cbex.ec_start = 0;
1698                         cbex.ec_type = EXT4_EXT_CACHE_GAP;
1699                 } else {
1700                         cbex.ec_block = le32_to_cpu(ex->ee_block);
1701                         cbex.ec_len = ext4_ext_get_actual_len(ex);
1702                         cbex.ec_start = ext_pblock(ex);
1703                         cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1704                 }
1705
1706                 BUG_ON(cbex.ec_len == 0);
1707                 err = func(inode, path, &cbex, ex, cbdata);
1708                 ext4_ext_drop_refs(path);
1709
1710                 if (err < 0)
1711                         break;
1712
1713                 if (err == EXT_REPEAT)
1714                         continue;
1715                 else if (err == EXT_BREAK) {
1716                         err = 0;
1717                         break;
1718                 }
1719
1720                 if (ext_depth(inode) != depth) {
1721                         /* depth was changed. we have to realloc path */
1722                         kfree(path);
1723                         path = NULL;
1724                 }
1725
1726                 block = cbex.ec_block + cbex.ec_len;
1727         }
1728
1729         if (path) {
1730                 ext4_ext_drop_refs(path);
1731                 kfree(path);
1732         }
1733
1734         return err;
1735 }
1736
1737 static void
1738 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1739                         __u32 len, ext4_fsblk_t start, int type)
1740 {
1741         struct ext4_ext_cache *cex;
1742         BUG_ON(len == 0);
1743         cex = &EXT4_I(inode)->i_cached_extent;
1744         cex->ec_type = type;
1745         cex->ec_block = block;
1746         cex->ec_len = len;
1747         cex->ec_start = start;
1748 }
1749
1750 /*
1751  * ext4_ext_put_gap_in_cache:
1752  * calculate boundaries of the gap that the requested block fits into
1753  * and cache this gap
1754  */
1755 static void
1756 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1757                                 ext4_lblk_t block)
1758 {
1759         int depth = ext_depth(inode);
1760         unsigned long len;
1761         ext4_lblk_t lblock;
1762         struct ext4_extent *ex;
1763
1764         ex = path[depth].p_ext;
1765         if (ex == NULL) {
1766                 /* there is no extent yet, so gap is [0;-] */
1767                 lblock = 0;
1768                 len = EXT_MAX_BLOCK;
1769                 ext_debug("cache gap(whole file):");
1770         } else if (block < le32_to_cpu(ex->ee_block)) {
1771                 lblock = block;
1772                 len = le32_to_cpu(ex->ee_block) - block;
1773                 ext_debug("cache gap(before): %u [%u:%u]",
1774                                 block,
1775                                 le32_to_cpu(ex->ee_block),
1776                                  ext4_ext_get_actual_len(ex));
1777         } else if (block >= le32_to_cpu(ex->ee_block)
1778                         + ext4_ext_get_actual_len(ex)) {
1779                 ext4_lblk_t next;
1780                 lblock = le32_to_cpu(ex->ee_block)
1781                         + ext4_ext_get_actual_len(ex);
1782
1783                 next = ext4_ext_next_allocated_block(path);
1784                 ext_debug("cache gap(after): [%u:%u] %u",
1785                                 le32_to_cpu(ex->ee_block),
1786                                 ext4_ext_get_actual_len(ex),
1787                                 block);
1788                 BUG_ON(next == lblock);
1789                 len = next - lblock;
1790         } else {
1791                 lblock = len = 0;
1792                 BUG();
1793         }
1794
1795         ext_debug(" -> %u:%lu\n", lblock, len);
1796         ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1797 }
1798
1799 static int
1800 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1801                         struct ext4_extent *ex)
1802 {
1803         struct ext4_ext_cache *cex;
1804
1805         cex = &EXT4_I(inode)->i_cached_extent;
1806
1807         /* has cache valid data? */
1808         if (cex->ec_type == EXT4_EXT_CACHE_NO)
1809                 return EXT4_EXT_CACHE_NO;
1810
1811         BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1812                         cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1813         if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1814                 ex->ee_block = cpu_to_le32(cex->ec_block);
1815                 ext4_ext_store_pblock(ex, cex->ec_start);
1816                 ex->ee_len = cpu_to_le16(cex->ec_len);
1817                 ext_debug("%u cached by %u:%u:%llu\n",
1818                                 block,
1819                                 cex->ec_block, cex->ec_len, cex->ec_start);
1820                 return cex->ec_type;
1821         }
1822
1823         /* not in cache */
1824         return EXT4_EXT_CACHE_NO;
1825 }
1826
1827 /*
1828  * ext4_ext_rm_idx:
1829  * removes index from the index block.
1830  * It's used in truncate case only, thus all requests are for
1831  * last index in the block only.
1832  */
1833 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1834                         struct ext4_ext_path *path)
1835 {
1836         struct buffer_head *bh;
1837         int err;
1838         ext4_fsblk_t leaf;
1839
1840         /* free index block */
1841         path--;
1842         leaf = idx_pblock(path->p_idx);
1843         BUG_ON(path->p_hdr->eh_entries == 0);
1844         err = ext4_ext_get_access(handle, inode, path);
1845         if (err)
1846                 return err;
1847         le16_add_cpu(&path->p_hdr->eh_entries, -1);
1848         err = ext4_ext_dirty(handle, inode, path);
1849         if (err)
1850                 return err;
1851         ext_debug("index is empty, remove it, free block %llu\n", leaf);
1852         bh = sb_find_get_block(inode->i_sb, leaf);
1853         ext4_forget(handle, 1, inode, bh, leaf);
1854         ext4_free_blocks(handle, inode, leaf, 1, 1);
1855         return err;
1856 }
1857
1858 /*
1859  * ext4_ext_calc_credits_for_single_extent:
1860  * This routine returns max. credits that needed to insert an extent
1861  * to the extent tree.
1862  * When pass the actual path, the caller should calculate credits
1863  * under i_data_sem.
1864  */
1865 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
1866                                                 struct ext4_ext_path *path)
1867 {
1868         if (path) {
1869                 int depth = ext_depth(inode);
1870                 int ret = 0;
1871
1872                 /* probably there is space in leaf? */
1873                 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1874                                 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
1875
1876                         /*
1877                          *  There are some space in the leaf tree, no
1878                          *  need to account for leaf block credit
1879                          *
1880                          *  bitmaps and block group descriptor blocks
1881                          *  and other metadat blocks still need to be
1882                          *  accounted.
1883                          */
1884                         /* 1 bitmap, 1 block group descriptor */
1885                         ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
1886                 }
1887         }
1888
1889         return ext4_chunk_trans_blocks(inode, nrblocks);
1890 }
1891
1892 /*
1893  * How many index/leaf blocks need to change/allocate to modify nrblocks?
1894  *
1895  * if nrblocks are fit in a single extent (chunk flag is 1), then
1896  * in the worse case, each tree level index/leaf need to be changed
1897  * if the tree split due to insert a new extent, then the old tree
1898  * index/leaf need to be updated too
1899  *
1900  * If the nrblocks are discontiguous, they could cause
1901  * the whole tree split more than once, but this is really rare.
1902  */
1903 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
1904 {
1905         int index;
1906         int depth = ext_depth(inode);
1907
1908         if (chunk)
1909                 index = depth * 2;
1910         else
1911                 index = depth * 3;
1912
1913         return index;
1914 }
1915
1916 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
1917                                 struct ext4_extent *ex,
1918                                 ext4_lblk_t from, ext4_lblk_t to)
1919 {
1920         struct buffer_head *bh;
1921         unsigned short ee_len =  ext4_ext_get_actual_len(ex);
1922         int i, metadata = 0;
1923
1924         if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
1925                 metadata = 1;
1926 #ifdef EXTENTS_STATS
1927         {
1928                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1929                 spin_lock(&sbi->s_ext_stats_lock);
1930                 sbi->s_ext_blocks += ee_len;
1931                 sbi->s_ext_extents++;
1932                 if (ee_len < sbi->s_ext_min)
1933                         sbi->s_ext_min = ee_len;
1934                 if (ee_len > sbi->s_ext_max)
1935                         sbi->s_ext_max = ee_len;
1936                 if (ext_depth(inode) > sbi->s_depth_max)
1937                         sbi->s_depth_max = ext_depth(inode);
1938                 spin_unlock(&sbi->s_ext_stats_lock);
1939         }
1940 #endif
1941         if (from >= le32_to_cpu(ex->ee_block)
1942             && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
1943                 /* tail removal */
1944                 ext4_lblk_t num;
1945                 ext4_fsblk_t start;
1946
1947                 num = le32_to_cpu(ex->ee_block) + ee_len - from;
1948                 start = ext_pblock(ex) + ee_len - num;
1949                 ext_debug("free last %u blocks starting %llu\n", num, start);
1950                 for (i = 0; i < num; i++) {
1951                         bh = sb_find_get_block(inode->i_sb, start + i);
1952                         ext4_forget(handle, 0, inode, bh, start + i);
1953                 }
1954                 ext4_free_blocks(handle, inode, start, num, metadata);
1955         } else if (from == le32_to_cpu(ex->ee_block)
1956                    && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
1957                 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
1958                         from, to, le32_to_cpu(ex->ee_block), ee_len);
1959         } else {
1960                 printk(KERN_INFO "strange request: removal(2) "
1961                                 "%u-%u from %u:%u\n",
1962                                 from, to, le32_to_cpu(ex->ee_block), ee_len);
1963         }
1964         return 0;
1965 }
1966
1967 static int
1968 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
1969                 struct ext4_ext_path *path, ext4_lblk_t start)
1970 {
1971         int err = 0, correct_index = 0;
1972         int depth = ext_depth(inode), credits;
1973         struct ext4_extent_header *eh;
1974         ext4_lblk_t a, b, block;
1975         unsigned num;
1976         ext4_lblk_t ex_ee_block;
1977         unsigned short ex_ee_len;
1978         unsigned uninitialized = 0;
1979         struct ext4_extent *ex;
1980
1981         /* the header must be checked already in ext4_ext_remove_space() */
1982         ext_debug("truncate since %u in leaf\n", start);
1983         if (!path[depth].p_hdr)
1984                 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
1985         eh = path[depth].p_hdr;
1986         BUG_ON(eh == NULL);
1987
1988         /* find where to start removing */
1989         ex = EXT_LAST_EXTENT(eh);
1990
1991         ex_ee_block = le32_to_cpu(ex->ee_block);
1992         if (ext4_ext_is_uninitialized(ex))
1993                 uninitialized = 1;
1994         ex_ee_len = ext4_ext_get_actual_len(ex);
1995
1996         while (ex >= EXT_FIRST_EXTENT(eh) &&
1997                         ex_ee_block + ex_ee_len > start) {
1998                 ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
1999                 path[depth].p_ext = ex;
2000
2001                 a = ex_ee_block > start ? ex_ee_block : start;
2002                 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
2003                         ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
2004
2005                 ext_debug("  border %u:%u\n", a, b);
2006
2007                 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
2008                         block = 0;
2009                         num = 0;
2010                         BUG();
2011                 } else if (a != ex_ee_block) {
2012                         /* remove tail of the extent */
2013                         block = ex_ee_block;
2014                         num = a - block;
2015                 } else if (b != ex_ee_block + ex_ee_len - 1) {
2016                         /* remove head of the extent */
2017                         block = a;
2018                         num = b - a;
2019                         /* there is no "make a hole" API yet */
2020                         BUG();
2021                 } else {
2022                         /* remove whole extent: excellent! */
2023                         block = ex_ee_block;
2024                         num = 0;
2025                         BUG_ON(a != ex_ee_block);
2026                         BUG_ON(b != ex_ee_block + ex_ee_len - 1);
2027                 }
2028
2029                 /*
2030                  * 3 for leaf, sb, and inode plus 2 (bmap and group
2031                  * descriptor) for each block group; assume two block
2032                  * groups plus ex_ee_len/blocks_per_block_group for
2033                  * the worst case
2034                  */
2035                 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2036                 if (ex == EXT_FIRST_EXTENT(eh)) {
2037                         correct_index = 1;
2038                         credits += (ext_depth(inode)) + 1;
2039                 }
2040                 credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2041
2042                 err = ext4_ext_journal_restart(handle, credits);
2043                 if (err)
2044                         goto out;
2045
2046                 err = ext4_ext_get_access(handle, inode, path + depth);
2047                 if (err)
2048                         goto out;
2049
2050                 err = ext4_remove_blocks(handle, inode, ex, a, b);
2051                 if (err)
2052                         goto out;
2053
2054                 if (num == 0) {
2055                         /* this extent is removed; mark slot entirely unused */
2056                         ext4_ext_store_pblock(ex, 0);
2057                         le16_add_cpu(&eh->eh_entries, -1);
2058                 }
2059
2060                 ex->ee_block = cpu_to_le32(block);
2061                 ex->ee_len = cpu_to_le16(num);
2062                 /*
2063                  * Do not mark uninitialized if all the blocks in the
2064                  * extent have been removed.
2065                  */
2066                 if (uninitialized && num)
2067                         ext4_ext_mark_uninitialized(ex);
2068
2069                 err = ext4_ext_dirty(handle, inode, path + depth);
2070                 if (err)
2071                         goto out;
2072
2073                 ext_debug("new extent: %u:%u:%llu\n", block, num,
2074                                 ext_pblock(ex));
2075                 ex--;
2076                 ex_ee_block = le32_to_cpu(ex->ee_block);
2077                 ex_ee_len = ext4_ext_get_actual_len(ex);
2078         }
2079
2080         if (correct_index && eh->eh_entries)
2081                 err = ext4_ext_correct_indexes(handle, inode, path);
2082
2083         /* if this leaf is free, then we should
2084          * remove it from index block above */
2085         if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2086                 err = ext4_ext_rm_idx(handle, inode, path + depth);
2087
2088 out:
2089         return err;
2090 }
2091
2092 /*
2093  * ext4_ext_more_to_rm:
2094  * returns 1 if current index has to be freed (even partial)
2095  */
2096 static int
2097 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2098 {
2099         BUG_ON(path->p_idx == NULL);
2100
2101         if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2102                 return 0;
2103
2104         /*
2105          * if truncate on deeper level happened, it wasn't partial,
2106          * so we have to consider current index for truncation
2107          */
2108         if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2109                 return 0;
2110         return 1;
2111 }
2112
2113 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
2114 {
2115         struct super_block *sb = inode->i_sb;
2116         int depth = ext_depth(inode);
2117         struct ext4_ext_path *path;
2118         handle_t *handle;
2119         int i = 0, err = 0;
2120
2121         ext_debug("truncate since %u\n", start);
2122
2123         /* probably first extent we're gonna free will be last in block */
2124         handle = ext4_journal_start(inode, depth + 1);
2125         if (IS_ERR(handle))
2126                 return PTR_ERR(handle);
2127
2128         ext4_ext_invalidate_cache(inode);
2129
2130         /*
2131          * We start scanning from right side, freeing all the blocks
2132          * after i_size and walking into the tree depth-wise.
2133          */
2134         path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2135         if (path == NULL) {
2136                 ext4_journal_stop(handle);
2137                 return -ENOMEM;
2138         }
2139         path[0].p_hdr = ext_inode_hdr(inode);
2140         if (ext4_ext_check_header(inode, path[0].p_hdr, depth)) {
2141                 err = -EIO;
2142                 goto out;
2143         }
2144         path[0].p_depth = depth;
2145
2146         while (i >= 0 && err == 0) {
2147                 if (i == depth) {
2148                         /* this is leaf block */
2149                         err = ext4_ext_rm_leaf(handle, inode, path, start);
2150                         /* root level has p_bh == NULL, brelse() eats this */
2151                         brelse(path[i].p_bh);
2152                         path[i].p_bh = NULL;
2153                         i--;
2154                         continue;
2155                 }
2156
2157                 /* this is index block */
2158                 if (!path[i].p_hdr) {
2159                         ext_debug("initialize header\n");
2160                         path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2161                 }
2162
2163                 if (!path[i].p_idx) {
2164                         /* this level hasn't been touched yet */
2165                         path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2166                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2167                         ext_debug("init index ptr: hdr 0x%p, num %d\n",
2168                                   path[i].p_hdr,
2169                                   le16_to_cpu(path[i].p_hdr->eh_entries));
2170                 } else {
2171                         /* we were already here, see at next index */
2172                         path[i].p_idx--;
2173                 }
2174
2175                 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2176                                 i, EXT_FIRST_INDEX(path[i].p_hdr),
2177                                 path[i].p_idx);
2178                 if (ext4_ext_more_to_rm(path + i)) {
2179                         struct buffer_head *bh;
2180                         /* go to the next level */
2181                         ext_debug("move to level %d (block %llu)\n",
2182                                   i + 1, idx_pblock(path[i].p_idx));
2183                         memset(path + i + 1, 0, sizeof(*path));
2184                         bh = sb_bread(sb, idx_pblock(path[i].p_idx));
2185                         if (!bh) {
2186                                 /* should we reset i_size? */
2187                                 err = -EIO;
2188                                 break;
2189                         }
2190                         if (WARN_ON(i + 1 > depth)) {
2191                                 err = -EIO;
2192                                 break;
2193                         }
2194                         if (ext4_ext_check_header(inode, ext_block_hdr(bh),
2195                                                         depth - i - 1)) {
2196                                 err = -EIO;
2197                                 break;
2198                         }
2199                         path[i + 1].p_bh = bh;
2200
2201                         /* save actual number of indexes since this
2202                          * number is changed at the next iteration */
2203                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2204                         i++;
2205                 } else {
2206                         /* we finished processing this index, go up */
2207                         if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2208                                 /* index is empty, remove it;
2209                                  * handle must be already prepared by the
2210                                  * truncatei_leaf() */
2211                                 err = ext4_ext_rm_idx(handle, inode, path + i);
2212                         }
2213                         /* root level has p_bh == NULL, brelse() eats this */
2214                         brelse(path[i].p_bh);
2215                         path[i].p_bh = NULL;
2216                         i--;
2217                         ext_debug("return to level %d\n", i);
2218                 }
2219         }
2220
2221         /* TODO: flexible tree reduction should be here */
2222         if (path->p_hdr->eh_entries == 0) {
2223                 /*
2224                  * truncate to zero freed all the tree,
2225                  * so we need to correct eh_depth
2226                  */
2227                 err = ext4_ext_get_access(handle, inode, path);
2228                 if (err == 0) {
2229                         ext_inode_hdr(inode)->eh_depth = 0;
2230                         ext_inode_hdr(inode)->eh_max =
2231                                 cpu_to_le16(ext4_ext_space_root(inode));
2232                         err = ext4_ext_dirty(handle, inode, path);
2233                 }
2234         }
2235 out:
2236         ext4_ext_tree_changed(inode);
2237         ext4_ext_drop_refs(path);
2238         kfree(path);
2239         ext4_journal_stop(handle);
2240
2241         return err;
2242 }
2243
2244 /*
2245  * called at mount time
2246  */
2247 void ext4_ext_init(struct super_block *sb)
2248 {
2249         /*
2250          * possible initialization would be here
2251          */
2252
2253         if (test_opt(sb, EXTENTS)) {
2254                 printk(KERN_INFO "EXT4-fs: file extents enabled");
2255 #ifdef AGGRESSIVE_TEST
2256                 printk(", aggressive tests");
2257 #endif
2258 #ifdef CHECK_BINSEARCH
2259                 printk(", check binsearch");
2260 #endif
2261 #ifdef EXTENTS_STATS
2262                 printk(", stats");
2263 #endif
2264                 printk("\n");
2265 #ifdef EXTENTS_STATS
2266                 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2267                 EXT4_SB(sb)->s_ext_min = 1 << 30;
2268                 EXT4_SB(sb)->s_ext_max = 0;
2269 #endif
2270         }
2271 }
2272
2273 /*
2274  * called at umount time
2275  */
2276 void ext4_ext_release(struct super_block *sb)
2277 {
2278         if (!test_opt(sb, EXTENTS))
2279                 return;
2280
2281 #ifdef EXTENTS_STATS
2282         if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2283                 struct ext4_sb_info *sbi = EXT4_SB(sb);
2284                 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2285                         sbi->s_ext_blocks, sbi->s_ext_extents,
2286                         sbi->s_ext_blocks / sbi->s_ext_extents);
2287                 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2288                         sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2289         }
2290 #endif
2291 }
2292
2293 static void bi_complete(struct bio *bio, int error)
2294 {
2295         complete((struct completion *)bio->bi_private);
2296 }
2297
2298 /* FIXME!! we need to try to merge to left or right after zero-out  */
2299 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2300 {
2301         int ret = -EIO;
2302         struct bio *bio;
2303         int blkbits, blocksize;
2304         sector_t ee_pblock;
2305         struct completion event;
2306         unsigned int ee_len, len, done, offset;
2307
2308
2309         blkbits   = inode->i_blkbits;
2310         blocksize = inode->i_sb->s_blocksize;
2311         ee_len    = ext4_ext_get_actual_len(ex);
2312         ee_pblock = ext_pblock(ex);
2313
2314         /* convert ee_pblock to 512 byte sectors */
2315         ee_pblock = ee_pblock << (blkbits - 9);
2316
2317         while (ee_len > 0) {
2318
2319                 if (ee_len > BIO_MAX_PAGES)
2320                         len = BIO_MAX_PAGES;
2321                 else
2322                         len = ee_len;
2323
2324                 bio = bio_alloc(GFP_NOIO, len);
2325                 if (!bio)
2326                         return -ENOMEM;
2327                 bio->bi_sector = ee_pblock;
2328                 bio->bi_bdev   = inode->i_sb->s_bdev;
2329
2330                 done = 0;
2331                 offset = 0;
2332                 while (done < len) {
2333                         ret = bio_add_page(bio, ZERO_PAGE(0),
2334                                                         blocksize, offset);
2335                         if (ret != blocksize) {
2336                                 /*
2337                                  * We can't add any more pages because of
2338                                  * hardware limitations.  Start a new bio.
2339                                  */
2340                                 break;
2341                         }
2342                         done++;
2343                         offset += blocksize;
2344                         if (offset >= PAGE_CACHE_SIZE)
2345                                 offset = 0;
2346                 }
2347
2348                 init_completion(&event);
2349                 bio->bi_private = &event;
2350                 bio->bi_end_io = bi_complete;
2351                 submit_bio(WRITE, bio);
2352                 wait_for_completion(&event);
2353
2354                 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
2355                         ret = 0;
2356                 else {
2357                         ret = -EIO;
2358                         break;
2359                 }
2360                 bio_put(bio);
2361                 ee_len    -= done;
2362                 ee_pblock += done  << (blkbits - 9);
2363         }
2364         return ret;
2365 }
2366
2367 #define EXT4_EXT_ZERO_LEN 7
2368
2369 /*
2370  * This function is called by ext4_ext_get_blocks() if someone tries to write
2371  * to an uninitialized extent. It may result in splitting the uninitialized
2372  * extent into multiple extents (upto three - one initialized and two
2373  * uninitialized).
2374  * There are three possibilities:
2375  *   a> There is no split required: Entire extent should be initialized
2376  *   b> Splits in two extents: Write is happening at either end of the extent
2377  *   c> Splits in three extents: Somone is writing in middle of the extent
2378  */
2379 static int ext4_ext_convert_to_initialized(handle_t *handle,
2380                                                 struct inode *inode,
2381                                                 struct ext4_ext_path *path,
2382                                                 ext4_lblk_t iblock,
2383                                                 unsigned long max_blocks)
2384 {
2385         struct ext4_extent *ex, newex, orig_ex;
2386         struct ext4_extent *ex1 = NULL;
2387         struct ext4_extent *ex2 = NULL;
2388         struct ext4_extent *ex3 = NULL;
2389         struct ext4_extent_header *eh;
2390         ext4_lblk_t ee_block;
2391         unsigned int allocated, ee_len, depth;
2392         ext4_fsblk_t newblock;
2393         int err = 0;
2394         int ret = 0;
2395
2396         depth = ext_depth(inode);
2397         eh = path[depth].p_hdr;
2398         ex = path[depth].p_ext;
2399         ee_block = le32_to_cpu(ex->ee_block);
2400         ee_len = ext4_ext_get_actual_len(ex);
2401         allocated = ee_len - (iblock - ee_block);
2402         newblock = iblock - ee_block + ext_pblock(ex);
2403         ex2 = ex;
2404         orig_ex.ee_block = ex->ee_block;
2405         orig_ex.ee_len   = cpu_to_le16(ee_len);
2406         ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2407
2408         err = ext4_ext_get_access(handle, inode, path + depth);
2409         if (err)
2410                 goto out;
2411         /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2412         if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
2413                 err =  ext4_ext_zeroout(inode, &orig_ex);
2414                 if (err)
2415                         goto fix_extent_len;
2416                 /* update the extent length and mark as initialized */
2417                 ex->ee_block = orig_ex.ee_block;
2418                 ex->ee_len   = orig_ex.ee_len;
2419                 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2420                 ext4_ext_dirty(handle, inode, path + depth);
2421                 /* zeroed the full extent */
2422                 return allocated;
2423         }
2424
2425         /* ex1: ee_block to iblock - 1 : uninitialized */
2426         if (iblock > ee_block) {
2427                 ex1 = ex;
2428                 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2429                 ext4_ext_mark_uninitialized(ex1);
2430                 ex2 = &newex;
2431         }
2432         /*
2433          * for sanity, update the length of the ex2 extent before
2434          * we insert ex3, if ex1 is NULL. This is to avoid temporary
2435          * overlap of blocks.
2436          */
2437         if (!ex1 && allocated > max_blocks)
2438                 ex2->ee_len = cpu_to_le16(max_blocks);
2439         /* ex3: to ee_block + ee_len : uninitialised */
2440         if (allocated > max_blocks) {
2441                 unsigned int newdepth;
2442                 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2443                 if (allocated <= EXT4_EXT_ZERO_LEN) {
2444                         /*
2445                          * iblock == ee_block is handled by the zerouout
2446                          * at the beginning.
2447                          * Mark first half uninitialized.
2448                          * Mark second half initialized and zero out the
2449                          * initialized extent
2450                          */
2451                         ex->ee_block = orig_ex.ee_block;
2452                         ex->ee_len   = cpu_to_le16(ee_len - allocated);
2453                         ext4_ext_mark_uninitialized(ex);
2454                         ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2455                         ext4_ext_dirty(handle, inode, path + depth);
2456
2457                         ex3 = &newex;
2458                         ex3->ee_block = cpu_to_le32(iblock);
2459                         ext4_ext_store_pblock(ex3, newblock);
2460                         ex3->ee_len = cpu_to_le16(allocated);
2461                         err = ext4_ext_insert_extent(handle, inode, path, ex3);
2462                         if (err == -ENOSPC) {
2463                                 err =  ext4_ext_zeroout(inode, &orig_ex);
2464                                 if (err)
2465                                         goto fix_extent_len;
2466                                 ex->ee_block = orig_ex.ee_block;
2467                                 ex->ee_len   = orig_ex.ee_len;
2468                                 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2469                                 ext4_ext_dirty(handle, inode, path + depth);
2470                                 /* blocks available from iblock */
2471                                 return allocated;
2472
2473                         } else if (err)
2474                                 goto fix_extent_len;
2475
2476                         /*
2477                          * We need to zero out the second half because
2478                          * an fallocate request can update file size and
2479                          * converting the second half to initialized extent
2480                          * implies that we can leak some junk data to user
2481                          * space.
2482                          */
2483                         err =  ext4_ext_zeroout(inode, ex3);
2484                         if (err) {
2485                                 /*
2486                                  * We should actually mark the
2487                                  * second half as uninit and return error
2488                                  * Insert would have changed the extent
2489                                  */
2490                                 depth = ext_depth(inode);
2491                                 ext4_ext_drop_refs(path);
2492                                 path = ext4_ext_find_extent(inode,
2493                                                                 iblock, path);
2494                                 if (IS_ERR(path)) {
2495                                         err = PTR_ERR(path);
2496                                         return err;
2497                                 }
2498                                 /* get the second half extent details */
2499                                 ex = path[depth].p_ext;
2500                                 err = ext4_ext_get_access(handle, inode,
2501                                                                 path + depth);
2502                                 if (err)
2503                                         return err;
2504                                 ext4_ext_mark_uninitialized(ex);
2505                                 ext4_ext_dirty(handle, inode, path + depth);
2506                                 return err;
2507                         }
2508
2509                         /* zeroed the second half */
2510                         return allocated;
2511                 }
2512                 ex3 = &newex;
2513                 ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2514                 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2515                 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2516                 ext4_ext_mark_uninitialized(ex3);
2517                 err = ext4_ext_insert_extent(handle, inode, path, ex3);
2518                 if (err == -ENOSPC) {
2519                         err =  ext4_ext_zeroout(inode, &orig_ex);
2520                         if (err)
2521                                 goto fix_extent_len;
2522                         /* update the extent length and mark as initialized */
2523                         ex->ee_block = orig_ex.ee_block;
2524                         ex->ee_len   = orig_ex.ee_len;
2525                         ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2526                         ext4_ext_dirty(handle, inode, path + depth);
2527                         /* zeroed the full extent */
2528                         /* blocks available from iblock */
2529                         return allocated;
2530
2531                 } else if (err)
2532                         goto fix_extent_len;
2533                 /*
2534                  * The depth, and hence eh & ex might change
2535                  * as part of the insert above.
2536                  */
2537                 newdepth = ext_depth(inode);
2538                 /*
2539                  * update the extent length after successful insert of the
2540                  * split extent
2541                  */
2542                 orig_ex.ee_len = cpu_to_le16(ee_len -
2543                                                 ext4_ext_get_actual_len(ex3));
2544                 depth = newdepth;
2545                 ext4_ext_drop_refs(path);
2546                 path = ext4_ext_find_extent(inode, iblock, path);
2547                 if (IS_ERR(path)) {
2548                         err = PTR_ERR(path);
2549                         goto out;
2550                 }
2551                 eh = path[depth].p_hdr;
2552                 ex = path[depth].p_ext;
2553                 if (ex2 != &newex)
2554                         ex2 = ex;
2555
2556                 err = ext4_ext_get_access(handle, inode, path + depth);
2557                 if (err)
2558                         goto out;
2559
2560                 allocated = max_blocks;
2561
2562                 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
2563                  * to insert a extent in the middle zerout directly
2564                  * otherwise give the extent a chance to merge to left
2565                  */
2566                 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2567                                                         iblock != ee_block) {
2568                         err =  ext4_ext_zeroout(inode, &orig_ex);
2569                         if (err)
2570                                 goto fix_extent_len;
2571                         /* update the extent length and mark as initialized */
2572                         ex->ee_block = orig_ex.ee_block;
2573                         ex->ee_len   = orig_ex.ee_len;
2574                         ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2575                         ext4_ext_dirty(handle, inode, path + depth);
2576                         /* zero out the first half */
2577                         /* blocks available from iblock */
2578                         return allocated;
2579                 }
2580         }
2581         /*
2582          * If there was a change of depth as part of the
2583          * insertion of ex3 above, we need to update the length
2584          * of the ex1 extent again here
2585          */
2586         if (ex1 && ex1 != ex) {
2587                 ex1 = ex;
2588                 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2589                 ext4_ext_mark_uninitialized(ex1);
2590                 ex2 = &newex;
2591         }
2592         /* ex2: iblock to iblock + maxblocks-1 : initialised */
2593         ex2->ee_block = cpu_to_le32(iblock);
2594         ext4_ext_store_pblock(ex2, newblock);
2595         ex2->ee_len = cpu_to_le16(allocated);
2596         if (ex2 != ex)
2597                 goto insert;
2598         /*
2599          * New (initialized) extent starts from the first block
2600          * in the current extent. i.e., ex2 == ex
2601          * We have to see if it can be merged with the extent
2602          * on the left.
2603          */
2604         if (ex2 > EXT_FIRST_EXTENT(eh)) {
2605                 /*
2606                  * To merge left, pass "ex2 - 1" to try_to_merge(),
2607                  * since it merges towards right _only_.
2608                  */
2609                 ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2610                 if (ret) {
2611                         err = ext4_ext_correct_indexes(handle, inode, path);
2612                         if (err)
2613                                 goto out;
2614                         depth = ext_depth(inode);
2615                         ex2--;
2616                 }
2617         }
2618         /*
2619          * Try to Merge towards right. This might be required
2620          * only when the whole extent is being written to.
2621          * i.e. ex2 == ex and ex3 == NULL.
2622          */
2623         if (!ex3) {
2624                 ret = ext4_ext_try_to_merge(inode, path, ex2);
2625                 if (ret) {
2626                         err = ext4_ext_correct_indexes(handle, inode, path);
2627                         if (err)
2628                                 goto out;
2629                 }
2630         }
2631         /* Mark modified extent as dirty */
2632         err = ext4_ext_dirty(handle, inode, path + depth);
2633         goto out;
2634 insert:
2635         err = ext4_ext_insert_extent(handle, inode, path, &newex);
2636         if (err == -ENOSPC) {
2637                 err =  ext4_ext_zeroout(inode, &orig_ex);
2638                 if (err)
2639                         goto fix_extent_len;
2640                 /* update the extent length and mark as initialized */
2641                 ex->ee_block = orig_ex.ee_block;
2642                 ex->ee_len   = orig_ex.ee_len;
2643                 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2644                 ext4_ext_dirty(handle, inode, path + depth);
2645                 /* zero out the first half */
2646                 return allocated;
2647         } else if (err)
2648                 goto fix_extent_len;
2649 out:
2650         return err ? err : allocated;
2651
2652 fix_extent_len:
2653         ex->ee_block = orig_ex.ee_block;
2654         ex->ee_len   = orig_ex.ee_len;
2655         ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2656         ext4_ext_mark_uninitialized(ex);
2657         ext4_ext_dirty(handle, inode, path + depth);
2658         return err;
2659 }
2660
2661 /*
2662  * Block allocation/map/preallocation routine for extents based files
2663  *
2664  *
2665  * Need to be called with
2666  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
2667  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
2668  *
2669  * return > 0, number of of blocks already mapped/allocated
2670  *          if create == 0 and these are pre-allocated blocks
2671  *              buffer head is unmapped
2672  *          otherwise blocks are mapped
2673  *
2674  * return = 0, if plain look up failed (blocks have not been allocated)
2675  *          buffer head is unmapped
2676  *
2677  * return < 0, error case.
2678  */
2679 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2680                         ext4_lblk_t iblock,
2681                         unsigned long max_blocks, struct buffer_head *bh_result,
2682                         int create, int extend_disksize)
2683 {
2684         struct ext4_ext_path *path = NULL;
2685         struct ext4_extent_header *eh;
2686         struct ext4_extent newex, *ex;
2687         ext4_fsblk_t goal, newblock;
2688         int err = 0, depth, ret;
2689         unsigned long allocated = 0;
2690         struct ext4_allocation_request ar;
2691         loff_t disksize;
2692
2693         __clear_bit(BH_New, &bh_result->b_state);
2694         ext_debug("blocks %u/%lu requested for inode %u\n",
2695                         iblock, max_blocks, inode->i_ino);
2696
2697         /* check in cache */
2698         goal = ext4_ext_in_cache(inode, iblock, &newex);
2699         if (goal) {
2700                 if (goal == EXT4_EXT_CACHE_GAP) {
2701                         if (!create) {
2702                                 /*
2703                                  * block isn't allocated yet and
2704                                  * user doesn't want to allocate it
2705                                  */
2706                                 goto out2;
2707                         }
2708                         /* we should allocate requested block */
2709                 } else if (goal == EXT4_EXT_CACHE_EXTENT) {
2710                         /* block is already allocated */
2711                         newblock = iblock
2712                                    - le32_to_cpu(newex.ee_block)
2713                                    + ext_pblock(&newex);
2714                         /* number of remaining blocks in the extent */
2715                         allocated = ext4_ext_get_actual_len(&newex) -
2716                                         (iblock - le32_to_cpu(newex.ee_block));
2717                         goto out;
2718                 } else {
2719                         BUG();
2720                 }
2721         }
2722
2723         /* find extent for this block */
2724         path = ext4_ext_find_extent(inode, iblock, NULL);
2725         if (IS_ERR(path)) {
2726                 err = PTR_ERR(path);
2727                 path = NULL;
2728                 goto out2;
2729         }
2730
2731         depth = ext_depth(inode);
2732
2733         /*
2734          * consistent leaf must not be empty;
2735          * this situation is possible, though, _during_ tree modification;
2736          * this is why assert can't be put in ext4_ext_find_extent()
2737          */
2738         BUG_ON(path[depth].p_ext == NULL && depth != 0);
2739         eh = path[depth].p_hdr;
2740
2741         ex = path[depth].p_ext;
2742         if (ex) {
2743                 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
2744                 ext4_fsblk_t ee_start = ext_pblock(ex);
2745                 unsigned short ee_len;
2746
2747                 /*
2748                  * Uninitialized extents are treated as holes, except that
2749                  * we split out initialized portions during a write.
2750                  */
2751                 ee_len = ext4_ext_get_actual_len(ex);
2752                 /* if found extent covers block, simply return it */
2753                 if (iblock >= ee_block && iblock < ee_block + ee_len) {
2754                         newblock = iblock - ee_block + ee_start;
2755                         /* number of remaining blocks in the extent */
2756                         allocated = ee_len - (iblock - ee_block);
2757                         ext_debug("%u fit into %lu:%d -> %llu\n", iblock,
2758                                         ee_block, ee_len, newblock);
2759
2760                         /* Do not put uninitialized extent in the cache */
2761                         if (!ext4_ext_is_uninitialized(ex)) {
2762                                 ext4_ext_put_in_cache(inode, ee_block,
2763                                                         ee_len, ee_start,
2764                                                         EXT4_EXT_CACHE_EXTENT);
2765                                 goto out;
2766                         }
2767                         if (create == EXT4_CREATE_UNINITIALIZED_EXT)
2768                                 goto out;
2769                         if (!create) {
2770                                 /*
2771                                  * We have blocks reserved already.  We
2772                                  * return allocated blocks so that delalloc
2773                                  * won't do block reservation for us.  But
2774                                  * the buffer head will be unmapped so that
2775                                  * a read from the block returns 0s.
2776                                  */
2777                                 if (allocated > max_blocks)
2778                                         allocated = max_blocks;
2779                                 set_buffer_unwritten(bh_result);
2780                                 goto out2;
2781                         }
2782
2783                         ret = ext4_ext_convert_to_initialized(handle, inode,
2784                                                                 path, iblock,
2785                                                                 max_blocks);
2786                         if (ret <= 0) {
2787                                 err = ret;
2788                                 goto out2;
2789                         } else
2790                                 allocated = ret;
2791                         goto outnew;
2792                 }
2793         }
2794
2795         /*
2796          * requested block isn't allocated yet;
2797          * we couldn't try to create block if create flag is zero
2798          */
2799         if (!create) {
2800                 /*
2801                  * put just found gap into cache to speed up
2802                  * subsequent requests
2803                  */
2804                 ext4_ext_put_gap_in_cache(inode, path, iblock);
2805                 goto out2;
2806         }
2807         /*
2808          * Okay, we need to do block allocation.
2809          */
2810
2811         /* find neighbour allocated blocks */
2812         ar.lleft = iblock;
2813         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
2814         if (err)
2815                 goto out2;
2816         ar.lright = iblock;
2817         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
2818         if (err)
2819                 goto out2;
2820
2821         /*
2822          * See if request is beyond maximum number of blocks we can have in
2823          * a single extent. For an initialized extent this limit is
2824          * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
2825          * EXT_UNINIT_MAX_LEN.
2826          */
2827         if (max_blocks > EXT_INIT_MAX_LEN &&
2828             create != EXT4_CREATE_UNINITIALIZED_EXT)
2829                 max_blocks = EXT_INIT_MAX_LEN;
2830         else if (max_blocks > EXT_UNINIT_MAX_LEN &&
2831                  create == EXT4_CREATE_UNINITIALIZED_EXT)
2832                 max_blocks = EXT_UNINIT_MAX_LEN;
2833
2834         /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
2835         newex.ee_block = cpu_to_le32(iblock);
2836         newex.ee_len = cpu_to_le16(max_blocks);
2837         err = ext4_ext_check_overlap(inode, &newex, path);
2838         if (err)
2839                 allocated = ext4_ext_get_actual_len(&newex);
2840         else
2841                 allocated = max_blocks;
2842
2843         /* allocate new block */
2844         ar.inode = inode;
2845         ar.goal = ext4_ext_find_goal(inode, path, iblock);
2846         ar.logical = iblock;
2847         ar.len = allocated;
2848         if (S_ISREG(inode->i_mode))
2849                 ar.flags = EXT4_MB_HINT_DATA;
2850         else
2851                 /* disable in-core preallocation for non-regular files */
2852                 ar.flags = 0;
2853         newblock = ext4_mb_new_blocks(handle, &ar, &err);
2854         if (!newblock)
2855                 goto out2;
2856         ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
2857                         goal, newblock, allocated);
2858
2859         /* try to insert new extent into found leaf and return */
2860         ext4_ext_store_pblock(&newex, newblock);
2861         newex.ee_len = cpu_to_le16(ar.len);
2862         if (create == EXT4_CREATE_UNINITIALIZED_EXT)  /* Mark uninitialized */
2863                 ext4_ext_mark_uninitialized(&newex);
2864         err = ext4_ext_insert_extent(handle, inode, path, &newex);
2865         if (err) {
2866                 /* free data blocks we just allocated */
2867                 /* not a good idea to call discard here directly,
2868                  * but otherwise we'd need to call it every free() */
2869                 ext4_discard_preallocations(inode);
2870                 ext4_free_blocks(handle, inode, ext_pblock(&newex),
2871                                         ext4_ext_get_actual_len(&newex), 0);
2872                 goto out2;
2873         }
2874
2875         /* previous routine could use block we allocated */
2876         newblock = ext_pblock(&newex);
2877         allocated = ext4_ext_get_actual_len(&newex);
2878 outnew:
2879         if (extend_disksize) {
2880                 disksize = ((loff_t) iblock + ar.len) << inode->i_blkbits;
2881                 if (disksize > i_size_read(inode))
2882                         disksize = i_size_read(inode);
2883                 if (disksize > EXT4_I(inode)->i_disksize)
2884                         EXT4_I(inode)->i_disksize = disksize;
2885         }
2886
2887         set_buffer_new(bh_result);
2888
2889         /* Cache only when it is _not_ an uninitialized extent */
2890         if (create != EXT4_CREATE_UNINITIALIZED_EXT)
2891                 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
2892                                                 EXT4_EXT_CACHE_EXTENT);
2893 out:
2894         if (allocated > max_blocks)
2895                 allocated = max_blocks;
2896         ext4_ext_show_leaf(inode, path);
2897         set_buffer_mapped(bh_result);
2898         bh_result->b_bdev = inode->i_sb->s_bdev;
2899         bh_result->b_blocknr = newblock;
2900 out2:
2901         if (path) {
2902                 ext4_ext_drop_refs(path);
2903                 kfree(path);
2904         }
2905         return err ? err : allocated;
2906 }
2907
2908 void ext4_ext_truncate(struct inode *inode)
2909 {
2910         struct address_space *mapping = inode->i_mapping;
2911         struct super_block *sb = inode->i_sb;
2912         ext4_lblk_t last_block;
2913         handle_t *handle;
2914         int err = 0;
2915
2916         /*
2917          * probably first extent we're gonna free will be last in block
2918          */
2919         err = ext4_writepage_trans_blocks(inode);
2920         handle = ext4_journal_start(inode, err);
2921         if (IS_ERR(handle))
2922                 return;
2923
2924         if (inode->i_size & (sb->s_blocksize - 1))
2925                 ext4_block_truncate_page(handle, mapping, inode->i_size);
2926
2927         if (ext4_orphan_add(handle, inode))
2928                 goto out_stop;
2929
2930         down_write(&EXT4_I(inode)->i_data_sem);
2931         ext4_ext_invalidate_cache(inode);
2932
2933         ext4_discard_preallocations(inode);
2934
2935         /*
2936          * TODO: optimization is possible here.
2937          * Probably we need not scan at all,
2938          * because page truncation is enough.
2939          */
2940
2941         /* we have to know where to truncate from in crash case */
2942         EXT4_I(inode)->i_disksize = inode->i_size;
2943         ext4_mark_inode_dirty(handle, inode);
2944
2945         last_block = (inode->i_size + sb->s_blocksize - 1)
2946                         >> EXT4_BLOCK_SIZE_BITS(sb);
2947         err = ext4_ext_remove_space(inode, last_block);
2948
2949         /* In a multi-transaction truncate, we only make the final
2950          * transaction synchronous.
2951          */
2952         if (IS_SYNC(inode))
2953                 handle->h_sync = 1;
2954
2955 out_stop:
2956         up_write(&EXT4_I(inode)->i_data_sem);
2957         /*
2958          * If this was a simple ftruncate() and the file will remain alive,
2959          * then we need to clear up the orphan record which we created above.
2960          * However, if this was a real unlink then we were called by
2961          * ext4_delete_inode(), and we allow that function to clean up the
2962          * orphan info for us.
2963          */
2964         if (inode->i_nlink)
2965                 ext4_orphan_del(handle, inode);
2966
2967         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
2968         ext4_mark_inode_dirty(handle, inode);
2969         ext4_journal_stop(handle);
2970 }
2971
2972 static void ext4_falloc_update_inode(struct inode *inode,
2973                                 int mode, loff_t new_size, int update_ctime)
2974 {
2975         struct timespec now;
2976
2977         if (update_ctime) {
2978                 now = current_fs_time(inode->i_sb);
2979                 if (!timespec_equal(&inode->i_ctime, &now))
2980                         inode->i_ctime = now;
2981         }
2982         /*
2983          * Update only when preallocation was requested beyond
2984          * the file size.
2985          */
2986         if (!(mode & FALLOC_FL_KEEP_SIZE)) {
2987                 if (new_size > i_size_read(inode))
2988                         i_size_write(inode, new_size);
2989                 if (new_size > EXT4_I(inode)->i_disksize)
2990                         ext4_update_i_disksize(inode, new_size);
2991         }
2992
2993 }
2994
2995 /*
2996  * preallocate space for a file. This implements ext4's fallocate inode
2997  * operation, which gets called from sys_fallocate system call.
2998  * For block-mapped files, posix_fallocate should fall back to the method
2999  * of writing zeroes to the required new blocks (the same behavior which is
3000  * expected for file systems which do not support fallocate() system call).
3001  */
3002 long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
3003 {
3004         handle_t *handle;
3005         ext4_lblk_t block;
3006         loff_t new_size;
3007         unsigned long max_blocks;
3008         int ret = 0;
3009         int ret2 = 0;
3010         int retries = 0;
3011         struct buffer_head map_bh;
3012         unsigned int credits, blkbits = inode->i_blkbits;
3013
3014         /*
3015          * currently supporting (pre)allocate mode for extent-based
3016          * files _only_
3017          */
3018         if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
3019                 return -EOPNOTSUPP;
3020
3021         /* preallocation to directories is currently not supported */
3022         if (S_ISDIR(inode->i_mode))
3023                 return -ENODEV;
3024
3025         block = offset >> blkbits;
3026         /*
3027          * We can't just convert len to max_blocks because
3028          * If blocksize = 4096 offset = 3072 and len = 2048
3029          */
3030         max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3031                                                         - block;
3032         /*
3033          * credits to insert 1 extent into extent tree
3034          */
3035         credits = ext4_chunk_trans_blocks(inode, max_blocks);
3036         mutex_lock(&inode->i_mutex);
3037 retry:
3038         while (ret >= 0 && ret < max_blocks) {
3039                 block = block + ret;
3040                 max_blocks = max_blocks - ret;
3041                 handle = ext4_journal_start(inode, credits);
3042                 if (IS_ERR(handle)) {
3043                         ret = PTR_ERR(handle);
3044                         break;
3045                 }
3046                 ret = ext4_get_blocks_wrap(handle, inode, block,
3047                                           max_blocks, &map_bh,
3048                                           EXT4_CREATE_UNINITIALIZED_EXT, 0, 0);
3049                 if (ret <= 0) {
3050 #ifdef EXT4FS_DEBUG
3051                         WARN_ON(ret <= 0);
3052                         printk(KERN_ERR "%s: ext4_ext_get_blocks "
3053                                     "returned error inode#%lu, block=%u, "
3054                                     "max_blocks=%lu", __func__,
3055                                     inode->i_ino, block, max_blocks);
3056 #endif
3057                         ext4_mark_inode_dirty(handle, inode);
3058                         ret2 = ext4_journal_stop(handle);
3059                         break;
3060                 }
3061                 if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
3062                                                 blkbits) >> blkbits))
3063                         new_size = offset + len;
3064                 else
3065                         new_size = (block + ret) << blkbits;
3066
3067                 ext4_falloc_update_inode(inode, mode, new_size,
3068                                                 buffer_new(&map_bh));
3069                 ext4_mark_inode_dirty(handle, inode);
3070                 ret2 = ext4_journal_stop(handle);
3071                 if (ret2)
3072                         break;
3073         }
3074         if (ret == -ENOSPC &&
3075                         ext4_should_retry_alloc(inode->i_sb, &retries)) {
3076                 ret = 0;
3077                 goto retry;
3078         }
3079         mutex_unlock(&inode->i_mutex);
3080         return ret > 0 ? ret2 : ret;
3081 }
3082
3083 /*
3084  * Callback function called for each extent to gather FIEMAP information.
3085  */
3086 int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3087                        struct ext4_ext_cache *newex, struct ext4_extent *ex,
3088                        void *data)
3089 {
3090         struct fiemap_extent_info *fieinfo = data;
3091         unsigned long blksize_bits = inode->i_sb->s_blocksize_bits;
3092         __u64   logical;
3093         __u64   physical;
3094         __u64   length;
3095         __u32   flags = 0;
3096         int     error;
3097
3098         logical =  (__u64)newex->ec_block << blksize_bits;
3099
3100         if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
3101                 pgoff_t offset;
3102                 struct page *page;
3103                 struct buffer_head *bh = NULL;
3104
3105                 offset = logical >> PAGE_SHIFT;
3106                 page = find_get_page(inode->i_mapping, offset);
3107                 if (!page || !page_has_buffers(page))
3108                         return EXT_CONTINUE;
3109
3110                 bh = page_buffers(page);
3111
3112                 if (!bh)
3113                         return EXT_CONTINUE;
3114
3115                 if (buffer_delay(bh)) {
3116                         flags |= FIEMAP_EXTENT_DELALLOC;
3117                         page_cache_release(page);
3118                 } else {
3119                         page_cache_release(page);
3120                         return EXT_CONTINUE;
3121                 }
3122         }
3123
3124         physical = (__u64)newex->ec_start << blksize_bits;
3125         length =   (__u64)newex->ec_len << blksize_bits;
3126
3127         if (ex && ext4_ext_is_uninitialized(ex))
3128                 flags |= FIEMAP_EXTENT_UNWRITTEN;
3129
3130         /*
3131          * If this extent reaches EXT_MAX_BLOCK, it must be last.
3132          *
3133          * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
3134          * this also indicates no more allocated blocks.
3135          *
3136          * XXX this might miss a single-block extent at EXT_MAX_BLOCK
3137          */
3138         if (logical + length - 1 == EXT_MAX_BLOCK ||
3139             ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK)
3140                 flags |= FIEMAP_EXTENT_LAST;
3141
3142         error = fiemap_fill_next_extent(fieinfo, logical, physical,
3143                                         length, flags);
3144         if (error < 0)
3145                 return error;
3146         if (error == 1)
3147                 return EXT_BREAK;
3148
3149         return EXT_CONTINUE;
3150 }
3151
3152 /* fiemap flags we can handle specified here */
3153 #define EXT4_FIEMAP_FLAGS       (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
3154
3155 int ext4_xattr_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo)
3156 {
3157         __u64 physical = 0;
3158         __u64 length;
3159         __u32 flags = FIEMAP_EXTENT_LAST;
3160         int blockbits = inode->i_sb->s_blocksize_bits;
3161         int error = 0;
3162
3163         /* in-inode? */
3164         if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
3165                 struct ext4_iloc iloc;
3166                 int offset;     /* offset of xattr in inode */
3167
3168                 error = ext4_get_inode_loc(inode, &iloc);
3169                 if (error)
3170                         return error;
3171                 physical = iloc.bh->b_blocknr << blockbits;
3172                 offset = EXT4_GOOD_OLD_INODE_SIZE +
3173                                 EXT4_I(inode)->i_extra_isize;
3174                 physical += offset;
3175                 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
3176                 flags |= FIEMAP_EXTENT_DATA_INLINE;
3177         } else { /* external block */
3178                 physical = EXT4_I(inode)->i_file_acl << blockbits;
3179                 length = inode->i_sb->s_blocksize;
3180         }
3181
3182         if (physical)
3183                 error = fiemap_fill_next_extent(fieinfo, 0, physical,
3184                                                 length, flags);
3185         return (error < 0 ? error : 0);
3186 }
3187
3188 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3189                 __u64 start, __u64 len)
3190 {
3191         ext4_lblk_t start_blk;
3192         ext4_lblk_t len_blks;
3193         int error = 0;
3194
3195         /* fallback to generic here if not in extents fmt */
3196         if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
3197                 return generic_block_fiemap(inode, fieinfo, start, len,
3198                         ext4_get_block);
3199
3200         if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
3201                 return -EBADR;
3202
3203         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
3204                 error = ext4_xattr_fiemap(inode, fieinfo);
3205         } else {
3206                 start_blk = start >> inode->i_sb->s_blocksize_bits;
3207                 len_blks = len >> inode->i_sb->s_blocksize_bits;
3208
3209                 /*
3210                  * Walk the extent tree gathering extent information.
3211                  * ext4_ext_fiemap_cb will push extents back to user.
3212                  */
3213                 down_write(&EXT4_I(inode)->i_data_sem);
3214                 error = ext4_ext_walk_space(inode, start_blk, len_blks,
3215                                           ext4_ext_fiemap_cb, fieinfo);
3216                 up_write(&EXT4_I(inode)->i_data_sem);
3217         }
3218
3219         return error;
3220 }
3221