Linux-2.6.12-rc2
[linux-flexiantxendom0-natty.git] / fs / ext3 / inode.c
1 /*
2  *  linux/fs/ext3/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *      (sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *      (jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
23  */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/ext3_jbd.h>
29 #include <linux/jbd.h>
30 #include <linux/smp_lock.h>
31 #include <linux/highuid.h>
32 #include <linux/pagemap.h>
33 #include <linux/quotaops.h>
34 #include <linux/string.h>
35 #include <linux/buffer_head.h>
36 #include <linux/writeback.h>
37 #include <linux/mpage.h>
38 #include <linux/uio.h>
39 #include "xattr.h"
40 #include "acl.h"
41
42 static int ext3_writepage_trans_blocks(struct inode *inode);
43
44 /*
45  * Test whether an inode is a fast symlink.
46  */
47 static inline int ext3_inode_is_fast_symlink(struct inode *inode)
48 {
49         int ea_blocks = EXT3_I(inode)->i_file_acl ?
50                 (inode->i_sb->s_blocksize >> 9) : 0;
51
52         return (S_ISLNK(inode->i_mode) &&
53                 inode->i_blocks - ea_blocks == 0);
54 }
55
56 /* The ext3 forget function must perform a revoke if we are freeing data
57  * which has been journaled.  Metadata (eg. indirect blocks) must be
58  * revoked in all cases. 
59  *
60  * "bh" may be NULL: a metadata block may have been freed from memory
61  * but there may still be a record of it in the journal, and that record
62  * still needs to be revoked.
63  */
64
65 int ext3_forget(handle_t *handle, int is_metadata,
66                        struct inode *inode, struct buffer_head *bh,
67                        int blocknr)
68 {
69         int err;
70
71         might_sleep();
72
73         BUFFER_TRACE(bh, "enter");
74
75         jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
76                   "data mode %lx\n",
77                   bh, is_metadata, inode->i_mode,
78                   test_opt(inode->i_sb, DATA_FLAGS));
79
80         /* Never use the revoke function if we are doing full data
81          * journaling: there is no need to, and a V1 superblock won't
82          * support it.  Otherwise, only skip the revoke on un-journaled
83          * data blocks. */
84
85         if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
86             (!is_metadata && !ext3_should_journal_data(inode))) {
87                 if (bh) {
88                         BUFFER_TRACE(bh, "call journal_forget");
89                         return ext3_journal_forget(handle, bh);
90                 }
91                 return 0;
92         }
93
94         /*
95          * data!=journal && (is_metadata || should_journal_data(inode))
96          */
97         BUFFER_TRACE(bh, "call ext3_journal_revoke");
98         err = ext3_journal_revoke(handle, blocknr, bh);
99         if (err)
100                 ext3_abort(inode->i_sb, __FUNCTION__,
101                            "error %d when attempting revoke", err);
102         BUFFER_TRACE(bh, "exit");
103         return err;
104 }
105
106 /*
107  * Work out how many blocks we need to progress with the next chunk of a
108  * truncate transaction.
109  */
110
111 static unsigned long blocks_for_truncate(struct inode *inode) 
112 {
113         unsigned long needed;
114
115         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
116
117         /* Give ourselves just enough room to cope with inodes in which
118          * i_blocks is corrupt: we've seen disk corruptions in the past
119          * which resulted in random data in an inode which looked enough
120          * like a regular file for ext3 to try to delete it.  Things
121          * will go a bit crazy if that happens, but at least we should
122          * try not to panic the whole kernel. */
123         if (needed < 2)
124                 needed = 2;
125
126         /* But we need to bound the transaction so we don't overflow the
127          * journal. */
128         if (needed > EXT3_MAX_TRANS_DATA) 
129                 needed = EXT3_MAX_TRANS_DATA;
130
131         return EXT3_DATA_TRANS_BLOCKS + needed;
132 }
133
134 /* 
135  * Truncate transactions can be complex and absolutely huge.  So we need to
136  * be able to restart the transaction at a conventient checkpoint to make
137  * sure we don't overflow the journal.
138  *
139  * start_transaction gets us a new handle for a truncate transaction,
140  * and extend_transaction tries to extend the existing one a bit.  If
141  * extend fails, we need to propagate the failure up and restart the
142  * transaction in the top-level truncate loop. --sct 
143  */
144
145 static handle_t *start_transaction(struct inode *inode) 
146 {
147         handle_t *result;
148
149         result = ext3_journal_start(inode, blocks_for_truncate(inode));
150         if (!IS_ERR(result))
151                 return result;
152
153         ext3_std_error(inode->i_sb, PTR_ERR(result));
154         return result;
155 }
156
157 /*
158  * Try to extend this transaction for the purposes of truncation.
159  *
160  * Returns 0 if we managed to create more room.  If we can't create more
161  * room, and the transaction must be restarted we return 1.
162  */
163 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
164 {
165         if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
166                 return 0;
167         if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
168                 return 0;
169         return 1;
170 }
171
172 /*
173  * Restart the transaction associated with *handle.  This does a commit,
174  * so before we call here everything must be consistently dirtied against
175  * this transaction.
176  */
177 static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
178 {
179         jbd_debug(2, "restarting handle %p\n", handle);
180         return ext3_journal_restart(handle, blocks_for_truncate(inode));
181 }
182
183 /*
184  * Called at the last iput() if i_nlink is zero.
185  */
186 void ext3_delete_inode (struct inode * inode)
187 {
188         handle_t *handle;
189
190         if (is_bad_inode(inode))
191                 goto no_delete;
192
193         handle = start_transaction(inode);
194         if (IS_ERR(handle)) {
195                 /* If we're going to skip the normal cleanup, we still
196                  * need to make sure that the in-core orphan linked list
197                  * is properly cleaned up. */
198                 ext3_orphan_del(NULL, inode);
199                 goto no_delete;
200         }
201
202         if (IS_SYNC(inode))
203                 handle->h_sync = 1;
204         inode->i_size = 0;
205         if (inode->i_blocks)
206                 ext3_truncate(inode);
207         /*
208          * Kill off the orphan record which ext3_truncate created.
209          * AKPM: I think this can be inside the above `if'.
210          * Note that ext3_orphan_del() has to be able to cope with the
211          * deletion of a non-existent orphan - this is because we don't
212          * know if ext3_truncate() actually created an orphan record.
213          * (Well, we could do this if we need to, but heck - it works)
214          */
215         ext3_orphan_del(handle, inode);
216         EXT3_I(inode)->i_dtime  = get_seconds();
217
218         /* 
219          * One subtle ordering requirement: if anything has gone wrong
220          * (transaction abort, IO errors, whatever), then we can still
221          * do these next steps (the fs will already have been marked as
222          * having errors), but we can't free the inode if the mark_dirty
223          * fails.  
224          */
225         if (ext3_mark_inode_dirty(handle, inode))
226                 /* If that failed, just do the required in-core inode clear. */
227                 clear_inode(inode);
228         else
229                 ext3_free_inode(handle, inode);
230         ext3_journal_stop(handle);
231         return;
232 no_delete:
233         clear_inode(inode);     /* We must guarantee clearing of inode... */
234 }
235
236 static int ext3_alloc_block (handle_t *handle,
237                         struct inode * inode, unsigned long goal, int *err)
238 {
239         unsigned long result;
240
241         result = ext3_new_block(handle, inode, goal, err);
242         return result;
243 }
244
245
246 typedef struct {
247         __le32  *p;
248         __le32  key;
249         struct buffer_head *bh;
250 } Indirect;
251
252 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
253 {
254         p->key = *(p->p = v);
255         p->bh = bh;
256 }
257
258 static inline int verify_chain(Indirect *from, Indirect *to)
259 {
260         while (from <= to && from->key == *from->p)
261                 from++;
262         return (from > to);
263 }
264
265 /**
266  *      ext3_block_to_path - parse the block number into array of offsets
267  *      @inode: inode in question (we are only interested in its superblock)
268  *      @i_block: block number to be parsed
269  *      @offsets: array to store the offsets in
270  *      @boundary: set this non-zero if the referred-to block is likely to be
271  *             followed (on disk) by an indirect block.
272  *
273  *      To store the locations of file's data ext3 uses a data structure common
274  *      for UNIX filesystems - tree of pointers anchored in the inode, with
275  *      data blocks at leaves and indirect blocks in intermediate nodes.
276  *      This function translates the block number into path in that tree -
277  *      return value is the path length and @offsets[n] is the offset of
278  *      pointer to (n+1)th node in the nth one. If @block is out of range
279  *      (negative or too large) warning is printed and zero returned.
280  *
281  *      Note: function doesn't find node addresses, so no IO is needed. All
282  *      we need to know is the capacity of indirect blocks (taken from the
283  *      inode->i_sb).
284  */
285
286 /*
287  * Portability note: the last comparison (check that we fit into triple
288  * indirect block) is spelled differently, because otherwise on an
289  * architecture with 32-bit longs and 8Kb pages we might get into trouble
290  * if our filesystem had 8Kb blocks. We might use long long, but that would
291  * kill us on x86. Oh, well, at least the sign propagation does not matter -
292  * i_block would have to be negative in the very beginning, so we would not
293  * get there at all.
294  */
295
296 static int ext3_block_to_path(struct inode *inode,
297                         long i_block, int offsets[4], int *boundary)
298 {
299         int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
300         int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
301         const long direct_blocks = EXT3_NDIR_BLOCKS,
302                 indirect_blocks = ptrs,
303                 double_blocks = (1 << (ptrs_bits * 2));
304         int n = 0;
305         int final = 0;
306
307         if (i_block < 0) {
308                 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
309         } else if (i_block < direct_blocks) {
310                 offsets[n++] = i_block;
311                 final = direct_blocks;
312         } else if ( (i_block -= direct_blocks) < indirect_blocks) {
313                 offsets[n++] = EXT3_IND_BLOCK;
314                 offsets[n++] = i_block;
315                 final = ptrs;
316         } else if ((i_block -= indirect_blocks) < double_blocks) {
317                 offsets[n++] = EXT3_DIND_BLOCK;
318                 offsets[n++] = i_block >> ptrs_bits;
319                 offsets[n++] = i_block & (ptrs - 1);
320                 final = ptrs;
321         } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
322                 offsets[n++] = EXT3_TIND_BLOCK;
323                 offsets[n++] = i_block >> (ptrs_bits * 2);
324                 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
325                 offsets[n++] = i_block & (ptrs - 1);
326                 final = ptrs;
327         } else {
328                 ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
329         }
330         if (boundary)
331                 *boundary = (i_block & (ptrs - 1)) == (final - 1);
332         return n;
333 }
334
335 /**
336  *      ext3_get_branch - read the chain of indirect blocks leading to data
337  *      @inode: inode in question
338  *      @depth: depth of the chain (1 - direct pointer, etc.)
339  *      @offsets: offsets of pointers in inode/indirect blocks
340  *      @chain: place to store the result
341  *      @err: here we store the error value
342  *
343  *      Function fills the array of triples <key, p, bh> and returns %NULL
344  *      if everything went OK or the pointer to the last filled triple
345  *      (incomplete one) otherwise. Upon the return chain[i].key contains
346  *      the number of (i+1)-th block in the chain (as it is stored in memory,
347  *      i.e. little-endian 32-bit), chain[i].p contains the address of that
348  *      number (it points into struct inode for i==0 and into the bh->b_data
349  *      for i>0) and chain[i].bh points to the buffer_head of i-th indirect
350  *      block for i>0 and NULL for i==0. In other words, it holds the block
351  *      numbers of the chain, addresses they were taken from (and where we can
352  *      verify that chain did not change) and buffer_heads hosting these
353  *      numbers.
354  *
355  *      Function stops when it stumbles upon zero pointer (absent block)
356  *              (pointer to last triple returned, *@err == 0)
357  *      or when it gets an IO error reading an indirect block
358  *              (ditto, *@err == -EIO)
359  *      or when it notices that chain had been changed while it was reading
360  *              (ditto, *@err == -EAGAIN)
361  *      or when it reads all @depth-1 indirect blocks successfully and finds
362  *      the whole chain, all way to the data (returns %NULL, *err == 0).
363  */
364 static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
365                                  Indirect chain[4], int *err)
366 {
367         struct super_block *sb = inode->i_sb;
368         Indirect *p = chain;
369         struct buffer_head *bh;
370
371         *err = 0;
372         /* i_data is not going away, no lock needed */
373         add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
374         if (!p->key)
375                 goto no_block;
376         while (--depth) {
377                 bh = sb_bread(sb, le32_to_cpu(p->key));
378                 if (!bh)
379                         goto failure;
380                 /* Reader: pointers */
381                 if (!verify_chain(chain, p))
382                         goto changed;
383                 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
384                 /* Reader: end */
385                 if (!p->key)
386                         goto no_block;
387         }
388         return NULL;
389
390 changed:
391         brelse(bh);
392         *err = -EAGAIN;
393         goto no_block;
394 failure:
395         *err = -EIO;
396 no_block:
397         return p;
398 }
399
400 /**
401  *      ext3_find_near - find a place for allocation with sufficient locality
402  *      @inode: owner
403  *      @ind: descriptor of indirect block.
404  *
405  *      This function returns the prefered place for block allocation.
406  *      It is used when heuristic for sequential allocation fails.
407  *      Rules are:
408  *        + if there is a block to the left of our position - allocate near it.
409  *        + if pointer will live in indirect block - allocate near that block.
410  *        + if pointer will live in inode - allocate in the same
411  *          cylinder group. 
412  *
413  * In the latter case we colour the starting block by the callers PID to
414  * prevent it from clashing with concurrent allocations for a different inode
415  * in the same block group.   The PID is used here so that functionally related
416  * files will be close-by on-disk.
417  *
418  *      Caller must make sure that @ind is valid and will stay that way.
419  */
420
421 static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
422 {
423         struct ext3_inode_info *ei = EXT3_I(inode);
424         __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
425         __le32 *p;
426         unsigned long bg_start;
427         unsigned long colour;
428
429         /* Try to find previous block */
430         for (p = ind->p - 1; p >= start; p--)
431                 if (*p)
432                         return le32_to_cpu(*p);
433
434         /* No such thing, so let's try location of indirect block */
435         if (ind->bh)
436                 return ind->bh->b_blocknr;
437
438         /*
439          * It is going to be refered from inode itself? OK, just put it into
440          * the same cylinder group then.
441          */
442         bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
443                 le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
444         colour = (current->pid % 16) *
445                         (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
446         return bg_start + colour;
447 }
448
449 /**
450  *      ext3_find_goal - find a prefered place for allocation.
451  *      @inode: owner
452  *      @block:  block we want
453  *      @chain:  chain of indirect blocks
454  *      @partial: pointer to the last triple within a chain
455  *      @goal:  place to store the result.
456  *
457  *      Normally this function find the prefered place for block allocation,
458  *      stores it in *@goal and returns zero. If the branch had been changed
459  *      under us we return -EAGAIN.
460  */
461
462 static int ext3_find_goal(struct inode *inode, long block, Indirect chain[4],
463                           Indirect *partial, unsigned long *goal)
464 {
465         struct ext3_block_alloc_info *block_i =  EXT3_I(inode)->i_block_alloc_info;
466
467         /*
468          * try the heuristic for sequential allocation,
469          * failing that at least try to get decent locality.
470          */
471         if (block_i && (block == block_i->last_alloc_logical_block + 1)
472                 && (block_i->last_alloc_physical_block != 0)) {
473                 *goal = block_i->last_alloc_physical_block + 1;
474                 return 0;
475         }
476
477         if (verify_chain(chain, partial)) {
478                 *goal = ext3_find_near(inode, partial);
479                 return 0;
480         }
481         return -EAGAIN;
482 }
483
484 /**
485  *      ext3_alloc_branch - allocate and set up a chain of blocks.
486  *      @inode: owner
487  *      @num: depth of the chain (number of blocks to allocate)
488  *      @offsets: offsets (in the blocks) to store the pointers to next.
489  *      @branch: place to store the chain in.
490  *
491  *      This function allocates @num blocks, zeroes out all but the last one,
492  *      links them into chain and (if we are synchronous) writes them to disk.
493  *      In other words, it prepares a branch that can be spliced onto the
494  *      inode. It stores the information about that chain in the branch[], in
495  *      the same format as ext3_get_branch() would do. We are calling it after
496  *      we had read the existing part of chain and partial points to the last
497  *      triple of that (one with zero ->key). Upon the exit we have the same
498  *      picture as after the successful ext3_get_block(), excpet that in one
499  *      place chain is disconnected - *branch->p is still zero (we did not
500  *      set the last link), but branch->key contains the number that should
501  *      be placed into *branch->p to fill that gap.
502  *
503  *      If allocation fails we free all blocks we've allocated (and forget
504  *      their buffer_heads) and return the error value the from failed
505  *      ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
506  *      as described above and return 0.
507  */
508
509 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
510                              int num,
511                              unsigned long goal,
512                              int *offsets,
513                              Indirect *branch)
514 {
515         int blocksize = inode->i_sb->s_blocksize;
516         int n = 0, keys = 0;
517         int err = 0;
518         int i;
519         int parent = ext3_alloc_block(handle, inode, goal, &err);
520
521         branch[0].key = cpu_to_le32(parent);
522         if (parent) {
523                 for (n = 1; n < num; n++) {
524                         struct buffer_head *bh;
525                         /* Allocate the next block */
526                         int nr = ext3_alloc_block(handle, inode, parent, &err);
527                         if (!nr)
528                                 break;
529                         branch[n].key = cpu_to_le32(nr);
530                         keys = n+1;
531
532                         /*
533                          * Get buffer_head for parent block, zero it out
534                          * and set the pointer to new one, then send
535                          * parent to disk.  
536                          */
537                         bh = sb_getblk(inode->i_sb, parent);
538                         branch[n].bh = bh;
539                         lock_buffer(bh);
540                         BUFFER_TRACE(bh, "call get_create_access");
541                         err = ext3_journal_get_create_access(handle, bh);
542                         if (err) {
543                                 unlock_buffer(bh);
544                                 brelse(bh);
545                                 break;
546                         }
547
548                         memset(bh->b_data, 0, blocksize);
549                         branch[n].p = (__le32*) bh->b_data + offsets[n];
550                         *branch[n].p = branch[n].key;
551                         BUFFER_TRACE(bh, "marking uptodate");
552                         set_buffer_uptodate(bh);
553                         unlock_buffer(bh);
554
555                         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
556                         err = ext3_journal_dirty_metadata(handle, bh);
557                         if (err)
558                                 break;
559
560                         parent = nr;
561                 }
562         }
563         if (n == num)
564                 return 0;
565
566         /* Allocation failed, free what we already allocated */
567         for (i = 1; i < keys; i++) {
568                 BUFFER_TRACE(branch[i].bh, "call journal_forget");
569                 ext3_journal_forget(handle, branch[i].bh);
570         }
571         for (i = 0; i < keys; i++)
572                 ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
573         return err;
574 }
575
576 /**
577  *      ext3_splice_branch - splice the allocated branch onto inode.
578  *      @inode: owner
579  *      @block: (logical) number of block we are adding
580  *      @chain: chain of indirect blocks (with a missing link - see
581  *              ext3_alloc_branch)
582  *      @where: location of missing link
583  *      @num:   number of blocks we are adding
584  *
585  *      This function verifies that chain (up to the missing link) had not
586  *      changed, fills the missing link and does all housekeeping needed in
587  *      inode (->i_blocks, etc.). In case of success we end up with the full
588  *      chain to new block and return 0. Otherwise (== chain had been changed)
589  *      we free the new blocks (forgetting their buffer_heads, indeed) and
590  *      return -EAGAIN.
591  */
592
593 static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
594                               Indirect chain[4], Indirect *where, int num)
595 {
596         int i;
597         int err = 0;
598         struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info;
599
600         /*
601          * If we're splicing into a [td]indirect block (as opposed to the
602          * inode) then we need to get write access to the [td]indirect block
603          * before the splice.
604          */
605         if (where->bh) {
606                 BUFFER_TRACE(where->bh, "get_write_access");
607                 err = ext3_journal_get_write_access(handle, where->bh);
608                 if (err)
609                         goto err_out;
610         }
611         /* Verify that place we are splicing to is still there and vacant */
612
613         if (!verify_chain(chain, where-1) || *where->p)
614                 /* Writer: end */
615                 goto changed;
616
617         /* That's it */
618
619         *where->p = where->key;
620
621         /*
622          * update the most recently allocated logical & physical block
623          * in i_block_alloc_info, to assist find the proper goal block for next
624          * allocation
625          */
626         if (block_i) {
627                 block_i->last_alloc_logical_block = block;
628                 block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key);
629         }
630
631         /* We are done with atomic stuff, now do the rest of housekeeping */
632
633         inode->i_ctime = CURRENT_TIME_SEC;
634         ext3_mark_inode_dirty(handle, inode);
635
636         /* had we spliced it onto indirect block? */
637         if (where->bh) {
638                 /*
639                  * akpm: If we spliced it onto an indirect block, we haven't
640                  * altered the inode.  Note however that if it is being spliced
641                  * onto an indirect block at the very end of the file (the
642                  * file is growing) then we *will* alter the inode to reflect
643                  * the new i_size.  But that is not done here - it is done in
644                  * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
645                  */
646                 jbd_debug(5, "splicing indirect only\n");
647                 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
648                 err = ext3_journal_dirty_metadata(handle, where->bh);
649                 if (err) 
650                         goto err_out;
651         } else {
652                 /*
653                  * OK, we spliced it into the inode itself on a direct block.
654                  * Inode was dirtied above.
655                  */
656                 jbd_debug(5, "splicing direct\n");
657         }
658         return err;
659
660 changed:
661         /*
662          * AKPM: if where[i].bh isn't part of the current updating
663          * transaction then we explode nastily.  Test this code path.
664          */
665         jbd_debug(1, "the chain changed: try again\n");
666         err = -EAGAIN;
667
668 err_out:
669         for (i = 1; i < num; i++) {
670                 BUFFER_TRACE(where[i].bh, "call journal_forget");
671                 ext3_journal_forget(handle, where[i].bh);
672         }
673         /* For the normal collision cleanup case, we free up the blocks.
674          * On genuine filesystem errors we don't even think about doing
675          * that. */
676         if (err == -EAGAIN)
677                 for (i = 0; i < num; i++)
678                         ext3_free_blocks(handle, inode, 
679                                          le32_to_cpu(where[i].key), 1);
680         return err;
681 }
682
683 /*
684  * Allocation strategy is simple: if we have to allocate something, we will
685  * have to go the whole way to leaf. So let's do it before attaching anything
686  * to tree, set linkage between the newborn blocks, write them if sync is
687  * required, recheck the path, free and repeat if check fails, otherwise
688  * set the last missing link (that will protect us from any truncate-generated
689  * removals - all blocks on the path are immune now) and possibly force the
690  * write on the parent block.
691  * That has a nice additional property: no special recovery from the failed
692  * allocations is needed - we simply release blocks and do not touch anything
693  * reachable from inode.
694  *
695  * akpm: `handle' can be NULL if create == 0.
696  *
697  * The BKL may not be held on entry here.  Be sure to take it early.
698  */
699
700 static int
701 ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
702                 struct buffer_head *bh_result, int create, int extend_disksize)
703 {
704         int err = -EIO;
705         int offsets[4];
706         Indirect chain[4];
707         Indirect *partial;
708         unsigned long goal;
709         int left;
710         int boundary = 0;
711         int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
712         struct ext3_inode_info *ei = EXT3_I(inode);
713
714         J_ASSERT(handle != NULL || create == 0);
715
716         if (depth == 0)
717                 goto out;
718
719 reread:
720         partial = ext3_get_branch(inode, depth, offsets, chain, &err);
721
722         /* Simplest case - block found, no allocation needed */
723         if (!partial) {
724                 clear_buffer_new(bh_result);
725 got_it:
726                 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
727                 if (boundary)
728                         set_buffer_boundary(bh_result);
729                 /* Clean up and exit */
730                 partial = chain+depth-1; /* the whole chain */
731                 goto cleanup;
732         }
733
734         /* Next simple case - plain lookup or failed read of indirect block */
735         if (!create || err == -EIO) {
736 cleanup:
737                 while (partial > chain) {
738                         BUFFER_TRACE(partial->bh, "call brelse");
739                         brelse(partial->bh);
740                         partial--;
741                 }
742                 BUFFER_TRACE(bh_result, "returned");
743 out:
744                 return err;
745         }
746
747         /*
748          * Indirect block might be removed by truncate while we were
749          * reading it. Handling of that case (forget what we've got and
750          * reread) is taken out of the main path.
751          */
752         if (err == -EAGAIN)
753                 goto changed;
754
755         goal = 0;
756         down(&ei->truncate_sem);
757
758         /* lazy initialize the block allocation info here if necessary */
759         if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) {
760                 ext3_init_block_alloc_info(inode);
761         }
762
763         if (ext3_find_goal(inode, iblock, chain, partial, &goal) < 0) {
764                 up(&ei->truncate_sem);
765                 goto changed;
766         }
767
768         left = (chain + depth) - partial;
769
770         /*
771          * Block out ext3_truncate while we alter the tree
772          */
773         err = ext3_alloc_branch(handle, inode, left, goal,
774                                         offsets+(partial-chain), partial);
775
776         /* The ext3_splice_branch call will free and forget any buffers
777          * on the new chain if there is a failure, but that risks using
778          * up transaction credits, especially for bitmaps where the
779          * credits cannot be returned.  Can we handle this somehow?  We
780          * may need to return -EAGAIN upwards in the worst case.  --sct */
781         if (!err)
782                 err = ext3_splice_branch(handle, inode, iblock, chain,
783                                          partial, left);
784         /* i_disksize growing is protected by truncate_sem
785          * don't forget to protect it if you're about to implement
786          * concurrent ext3_get_block() -bzzz */
787         if (!err && extend_disksize && inode->i_size > ei->i_disksize)
788                 ei->i_disksize = inode->i_size;
789         up(&ei->truncate_sem);
790         if (err == -EAGAIN)
791                 goto changed;
792         if (err)
793                 goto cleanup;
794
795         set_buffer_new(bh_result);
796         goto got_it;
797
798 changed:
799         while (partial > chain) {
800                 jbd_debug(1, "buffer chain changed, retrying\n");
801                 BUFFER_TRACE(partial->bh, "brelsing");
802                 brelse(partial->bh);
803                 partial--;
804         }
805         goto reread;
806 }
807
808 static int ext3_get_block(struct inode *inode, sector_t iblock,
809                         struct buffer_head *bh_result, int create)
810 {
811         handle_t *handle = NULL;
812         int ret;
813
814         if (create) {
815                 handle = ext3_journal_current_handle();
816                 J_ASSERT(handle != 0);
817         }
818         ret = ext3_get_block_handle(handle, inode, iblock,
819                                 bh_result, create, 1);
820         return ret;
821 }
822
823 #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
824
825 static int
826 ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
827                 unsigned long max_blocks, struct buffer_head *bh_result,
828                 int create)
829 {
830         handle_t *handle = journal_current_handle();
831         int ret = 0;
832
833         if (!handle)
834                 goto get_block;         /* A read */
835
836         if (handle->h_transaction->t_state == T_LOCKED) {
837                 /*
838                  * Huge direct-io writes can hold off commits for long
839                  * periods of time.  Let this commit run.
840                  */
841                 ext3_journal_stop(handle);
842                 handle = ext3_journal_start(inode, DIO_CREDITS);
843                 if (IS_ERR(handle))
844                         ret = PTR_ERR(handle);
845                 goto get_block;
846         }
847
848         if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
849                 /*
850                  * Getting low on buffer credits...
851                  */
852                 ret = ext3_journal_extend(handle, DIO_CREDITS);
853                 if (ret > 0) {
854                         /*
855                          * Couldn't extend the transaction.  Start a new one.
856                          */
857                         ret = ext3_journal_restart(handle, DIO_CREDITS);
858                 }
859         }
860
861 get_block:
862         if (ret == 0)
863                 ret = ext3_get_block_handle(handle, inode, iblock,
864                                         bh_result, create, 0);
865         bh_result->b_size = (1 << inode->i_blkbits);
866         return ret;
867 }
868
869 static int ext3_writepages_get_block(struct inode *inode, sector_t iblock,
870                         struct buffer_head *bh, int create)
871 {
872         return ext3_direct_io_get_blocks(inode, iblock, 1, bh, create);
873 }
874
875 /*
876  * `handle' can be NULL if create is zero
877  */
878 struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
879                                 long block, int create, int * errp)
880 {
881         struct buffer_head dummy;
882         int fatal = 0, err;
883
884         J_ASSERT(handle != NULL || create == 0);
885
886         dummy.b_state = 0;
887         dummy.b_blocknr = -1000;
888         buffer_trace_init(&dummy.b_history);
889         *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
890         if (!*errp && buffer_mapped(&dummy)) {
891                 struct buffer_head *bh;
892                 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
893                 if (buffer_new(&dummy)) {
894                         J_ASSERT(create != 0);
895                         J_ASSERT(handle != 0);
896
897                         /* Now that we do not always journal data, we
898                            should keep in mind whether this should
899                            always journal the new buffer as metadata.
900                            For now, regular file writes use
901                            ext3_get_block instead, so it's not a
902                            problem. */
903                         lock_buffer(bh);
904                         BUFFER_TRACE(bh, "call get_create_access");
905                         fatal = ext3_journal_get_create_access(handle, bh);
906                         if (!fatal && !buffer_uptodate(bh)) {
907                                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
908                                 set_buffer_uptodate(bh);
909                         }
910                         unlock_buffer(bh);
911                         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
912                         err = ext3_journal_dirty_metadata(handle, bh);
913                         if (!fatal)
914                                 fatal = err;
915                 } else {
916                         BUFFER_TRACE(bh, "not a new buffer");
917                 }
918                 if (fatal) {
919                         *errp = fatal;
920                         brelse(bh);
921                         bh = NULL;
922                 }
923                 return bh;
924         }
925         return NULL;
926 }
927
928 struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
929                                int block, int create, int *err)
930 {
931         struct buffer_head * bh;
932
933         bh = ext3_getblk(handle, inode, block, create, err);
934         if (!bh)
935                 return bh;
936         if (buffer_uptodate(bh))
937                 return bh;
938         ll_rw_block(READ, 1, &bh);
939         wait_on_buffer(bh);
940         if (buffer_uptodate(bh))
941                 return bh;
942         put_bh(bh);
943         *err = -EIO;
944         return NULL;
945 }
946
947 static int walk_page_buffers(   handle_t *handle,
948                                 struct buffer_head *head,
949                                 unsigned from,
950                                 unsigned to,
951                                 int *partial,
952                                 int (*fn)(      handle_t *handle,
953                                                 struct buffer_head *bh))
954 {
955         struct buffer_head *bh;
956         unsigned block_start, block_end;
957         unsigned blocksize = head->b_size;
958         int err, ret = 0;
959         struct buffer_head *next;
960
961         for (   bh = head, block_start = 0;
962                 ret == 0 && (bh != head || !block_start);
963                 block_start = block_end, bh = next)
964         {
965                 next = bh->b_this_page;
966                 block_end = block_start + blocksize;
967                 if (block_end <= from || block_start >= to) {
968                         if (partial && !buffer_uptodate(bh))
969                                 *partial = 1;
970                         continue;
971                 }
972                 err = (*fn)(handle, bh);
973                 if (!ret)
974                         ret = err;
975         }
976         return ret;
977 }
978
979 /*
980  * To preserve ordering, it is essential that the hole instantiation and
981  * the data write be encapsulated in a single transaction.  We cannot
982  * close off a transaction and start a new one between the ext3_get_block()
983  * and the commit_write().  So doing the journal_start at the start of
984  * prepare_write() is the right place.
985  *
986  * Also, this function can nest inside ext3_writepage() ->
987  * block_write_full_page(). In that case, we *know* that ext3_writepage()
988  * has generated enough buffer credits to do the whole page.  So we won't
989  * block on the journal in that case, which is good, because the caller may
990  * be PF_MEMALLOC.
991  *
992  * By accident, ext3 can be reentered when a transaction is open via
993  * quota file writes.  If we were to commit the transaction while thus
994  * reentered, there can be a deadlock - we would be holding a quota
995  * lock, and the commit would never complete if another thread had a
996  * transaction open and was blocking on the quota lock - a ranking
997  * violation.
998  *
999  * So what we do is to rely on the fact that journal_stop/journal_start
1000  * will _not_ run commit under these circumstances because handle->h_ref
1001  * is elevated.  We'll still have enough credits for the tiny quotafile
1002  * write.  
1003  */
1004
1005 static int do_journal_get_write_access(handle_t *handle, 
1006                                        struct buffer_head *bh)
1007 {
1008         if (!buffer_mapped(bh) || buffer_freed(bh))
1009                 return 0;
1010         return ext3_journal_get_write_access(handle, bh);
1011 }
1012
1013 static int ext3_prepare_write(struct file *file, struct page *page,
1014                               unsigned from, unsigned to)
1015 {
1016         struct inode *inode = page->mapping->host;
1017         int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
1018         handle_t *handle;
1019         int retries = 0;
1020
1021 retry:
1022         handle = ext3_journal_start(inode, needed_blocks);
1023         if (IS_ERR(handle)) {
1024                 ret = PTR_ERR(handle);
1025                 goto out;
1026         }
1027         if (test_opt(inode->i_sb, NOBH))
1028                 ret = nobh_prepare_write(page, from, to, ext3_get_block);
1029         else
1030                 ret = block_prepare_write(page, from, to, ext3_get_block);
1031         if (ret)
1032                 goto prepare_write_failed;
1033
1034         if (ext3_should_journal_data(inode)) {
1035                 ret = walk_page_buffers(handle, page_buffers(page),
1036                                 from, to, NULL, do_journal_get_write_access);
1037         }
1038 prepare_write_failed:
1039         if (ret)
1040                 ext3_journal_stop(handle);
1041         if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1042                 goto retry;
1043 out:
1044         return ret;
1045 }
1046
1047 int
1048 ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1049 {
1050         int err = journal_dirty_data(handle, bh);
1051         if (err)
1052                 ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1053                                                 bh, handle,err);
1054         return err;
1055 }
1056
1057 /* For commit_write() in data=journal mode */
1058 static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
1059 {
1060         if (!buffer_mapped(bh) || buffer_freed(bh))
1061                 return 0;
1062         set_buffer_uptodate(bh);
1063         return ext3_journal_dirty_metadata(handle, bh);
1064 }
1065
1066 /*
1067  * We need to pick up the new inode size which generic_commit_write gave us
1068  * `file' can be NULL - eg, when called from page_symlink().
1069  *
1070  * ext3 never places buffers on inode->i_mapping->private_list.  metadata
1071  * buffers are managed internally.
1072  */
1073
1074 static int ext3_ordered_commit_write(struct file *file, struct page *page,
1075                              unsigned from, unsigned to)
1076 {
1077         handle_t *handle = ext3_journal_current_handle();
1078         struct inode *inode = page->mapping->host;
1079         int ret = 0, ret2;
1080
1081         ret = walk_page_buffers(handle, page_buffers(page),
1082                 from, to, NULL, ext3_journal_dirty_data);
1083
1084         if (ret == 0) {
1085                 /*
1086                  * generic_commit_write() will run mark_inode_dirty() if i_size
1087                  * changes.  So let's piggyback the i_disksize mark_inode_dirty
1088                  * into that.
1089                  */
1090                 loff_t new_i_size;
1091
1092                 new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1093                 if (new_i_size > EXT3_I(inode)->i_disksize)
1094                         EXT3_I(inode)->i_disksize = new_i_size;
1095                 ret = generic_commit_write(file, page, from, to);
1096         }
1097         ret2 = ext3_journal_stop(handle);
1098         if (!ret)
1099                 ret = ret2;
1100         return ret;
1101 }
1102
1103 static int ext3_writeback_commit_write(struct file *file, struct page *page,
1104                              unsigned from, unsigned to)
1105 {
1106         handle_t *handle = ext3_journal_current_handle();
1107         struct inode *inode = page->mapping->host;
1108         int ret = 0, ret2;
1109         loff_t new_i_size;
1110
1111         new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1112         if (new_i_size > EXT3_I(inode)->i_disksize)
1113                 EXT3_I(inode)->i_disksize = new_i_size;
1114
1115         if (test_opt(inode->i_sb, NOBH))
1116                 ret = nobh_commit_write(file, page, from, to);
1117         else
1118                 ret = generic_commit_write(file, page, from, to);
1119
1120         ret2 = ext3_journal_stop(handle);
1121         if (!ret)
1122                 ret = ret2;
1123         return ret;
1124 }
1125
1126 static int ext3_journalled_commit_write(struct file *file,
1127                         struct page *page, unsigned from, unsigned to)
1128 {
1129         handle_t *handle = ext3_journal_current_handle();
1130         struct inode *inode = page->mapping->host;
1131         int ret = 0, ret2;
1132         int partial = 0;
1133         loff_t pos;
1134
1135         /*
1136          * Here we duplicate the generic_commit_write() functionality
1137          */
1138         pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1139
1140         ret = walk_page_buffers(handle, page_buffers(page), from,
1141                                 to, &partial, commit_write_fn);
1142         if (!partial)
1143                 SetPageUptodate(page);
1144         if (pos > inode->i_size)
1145                 i_size_write(inode, pos);
1146         EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1147         if (inode->i_size > EXT3_I(inode)->i_disksize) {
1148                 EXT3_I(inode)->i_disksize = inode->i_size;
1149                 ret2 = ext3_mark_inode_dirty(handle, inode);
1150                 if (!ret) 
1151                         ret = ret2;
1152         }
1153         ret2 = ext3_journal_stop(handle);
1154         if (!ret)
1155                 ret = ret2;
1156         return ret;
1157 }
1158
1159 /* 
1160  * bmap() is special.  It gets used by applications such as lilo and by
1161  * the swapper to find the on-disk block of a specific piece of data.
1162  *
1163  * Naturally, this is dangerous if the block concerned is still in the
1164  * journal.  If somebody makes a swapfile on an ext3 data-journaling
1165  * filesystem and enables swap, then they may get a nasty shock when the
1166  * data getting swapped to that swapfile suddenly gets overwritten by
1167  * the original zero's written out previously to the journal and
1168  * awaiting writeback in the kernel's buffer cache. 
1169  *
1170  * So, if we see any bmap calls here on a modified, data-journaled file,
1171  * take extra steps to flush any blocks which might be in the cache. 
1172  */
1173 static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1174 {
1175         struct inode *inode = mapping->host;
1176         journal_t *journal;
1177         int err;
1178
1179         if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1180                 /* 
1181                  * This is a REALLY heavyweight approach, but the use of
1182                  * bmap on dirty files is expected to be extremely rare:
1183                  * only if we run lilo or swapon on a freshly made file
1184                  * do we expect this to happen. 
1185                  *
1186                  * (bmap requires CAP_SYS_RAWIO so this does not
1187                  * represent an unprivileged user DOS attack --- we'd be
1188                  * in trouble if mortal users could trigger this path at
1189                  * will.) 
1190                  *
1191                  * NB. EXT3_STATE_JDATA is not set on files other than
1192                  * regular files.  If somebody wants to bmap a directory
1193                  * or symlink and gets confused because the buffer
1194                  * hasn't yet been flushed to disk, they deserve
1195                  * everything they get.
1196                  */
1197
1198                 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
1199                 journal = EXT3_JOURNAL(inode);
1200                 journal_lock_updates(journal);
1201                 err = journal_flush(journal);
1202                 journal_unlock_updates(journal);
1203
1204                 if (err)
1205                         return 0;
1206         }
1207
1208         return generic_block_bmap(mapping,block,ext3_get_block);
1209 }
1210
1211 static int bget_one(handle_t *handle, struct buffer_head *bh)
1212 {
1213         get_bh(bh);
1214         return 0;
1215 }
1216
1217 static int bput_one(handle_t *handle, struct buffer_head *bh)
1218 {
1219         put_bh(bh);
1220         return 0;
1221 }
1222
1223 static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1224 {
1225         if (buffer_mapped(bh))
1226                 return ext3_journal_dirty_data(handle, bh);
1227         return 0;
1228 }
1229
1230 /*
1231  * Note that we always start a transaction even if we're not journalling
1232  * data.  This is to preserve ordering: any hole instantiation within
1233  * __block_write_full_page -> ext3_get_block() should be journalled
1234  * along with the data so we don't crash and then get metadata which
1235  * refers to old data.
1236  *
1237  * In all journalling modes block_write_full_page() will start the I/O.
1238  *
1239  * Problem:
1240  *
1241  *      ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1242  *              ext3_writepage()
1243  *
1244  * Similar for:
1245  *
1246  *      ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1247  *
1248  * Same applies to ext3_get_block().  We will deadlock on various things like
1249  * lock_journal and i_truncate_sem.
1250  *
1251  * Setting PF_MEMALLOC here doesn't work - too many internal memory
1252  * allocations fail.
1253  *
1254  * 16May01: If we're reentered then journal_current_handle() will be
1255  *          non-zero. We simply *return*.
1256  *
1257  * 1 July 2001: @@@ FIXME:
1258  *   In journalled data mode, a data buffer may be metadata against the
1259  *   current transaction.  But the same file is part of a shared mapping
1260  *   and someone does a writepage() on it.
1261  *
1262  *   We will move the buffer onto the async_data list, but *after* it has
1263  *   been dirtied. So there's a small window where we have dirty data on
1264  *   BJ_Metadata.
1265  *
1266  *   Note that this only applies to the last partial page in the file.  The
1267  *   bit which block_write_full_page() uses prepare/commit for.  (That's
1268  *   broken code anyway: it's wrong for msync()).
1269  *
1270  *   It's a rare case: affects the final partial page, for journalled data
1271  *   where the file is subject to bith write() and writepage() in the same
1272  *   transction.  To fix it we'll need a custom block_write_full_page().
1273  *   We'll probably need that anyway for journalling writepage() output.
1274  *
1275  * We don't honour synchronous mounts for writepage().  That would be
1276  * disastrous.  Any write() or metadata operation will sync the fs for
1277  * us.
1278  *
1279  * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1280  * we don't need to open a transaction here.
1281  */
1282 static int ext3_ordered_writepage(struct page *page,
1283                         struct writeback_control *wbc)
1284 {
1285         struct inode *inode = page->mapping->host;
1286         struct buffer_head *page_bufs;
1287         handle_t *handle = NULL;
1288         int ret = 0;
1289         int err;
1290
1291         J_ASSERT(PageLocked(page));
1292
1293         /*
1294          * We give up here if we're reentered, because it might be for a
1295          * different filesystem.
1296          */
1297         if (ext3_journal_current_handle())
1298                 goto out_fail;
1299
1300         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1301
1302         if (IS_ERR(handle)) {
1303                 ret = PTR_ERR(handle);
1304                 goto out_fail;
1305         }
1306
1307         if (!page_has_buffers(page)) {
1308                 create_empty_buffers(page, inode->i_sb->s_blocksize,
1309                                 (1 << BH_Dirty)|(1 << BH_Uptodate));
1310         }
1311         page_bufs = page_buffers(page);
1312         walk_page_buffers(handle, page_bufs, 0,
1313                         PAGE_CACHE_SIZE, NULL, bget_one);
1314
1315         ret = block_write_full_page(page, ext3_get_block, wbc);
1316
1317         /*
1318          * The page can become unlocked at any point now, and
1319          * truncate can then come in and change things.  So we
1320          * can't touch *page from now on.  But *page_bufs is
1321          * safe due to elevated refcount.
1322          */
1323
1324         /*
1325          * And attach them to the current transaction.  But only if 
1326          * block_write_full_page() succeeded.  Otherwise they are unmapped,
1327          * and generally junk.
1328          */
1329         if (ret == 0) {
1330                 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1331                                         NULL, journal_dirty_data_fn);
1332                 if (!ret)
1333                         ret = err;
1334         }
1335         walk_page_buffers(handle, page_bufs, 0,
1336                         PAGE_CACHE_SIZE, NULL, bput_one);
1337         err = ext3_journal_stop(handle);
1338         if (!ret)
1339                 ret = err;
1340         return ret;
1341
1342 out_fail:
1343         redirty_page_for_writepage(wbc, page);
1344         unlock_page(page);
1345         return ret;
1346 }
1347
1348 static int
1349 ext3_writeback_writepage_helper(struct page *page,
1350                                 struct writeback_control *wbc)
1351 {
1352         return block_write_full_page(page, ext3_get_block, wbc);
1353 }
1354
1355 static int
1356 ext3_writeback_writepages(struct address_space *mapping,
1357                                 struct writeback_control *wbc)
1358 {
1359         struct inode *inode = mapping->host;
1360         handle_t *handle = NULL;
1361         int err, ret = 0;
1362
1363         if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1364                 return ret;
1365
1366         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1367         if (IS_ERR(handle)) {
1368                 ret = PTR_ERR(handle);
1369                 return ret;
1370         }
1371
1372         ret = __mpage_writepages(mapping, wbc, ext3_writepages_get_block,
1373                                         ext3_writeback_writepage_helper);
1374
1375         /*
1376          * Need to reaquire the handle since ext3_writepages_get_block()
1377          * can restart the handle
1378          */
1379         handle = journal_current_handle();
1380
1381         err = ext3_journal_stop(handle);
1382         if (!ret)
1383                 ret = err;
1384         return ret;
1385 }
1386
1387 static int ext3_writeback_writepage(struct page *page,
1388                                 struct writeback_control *wbc)
1389 {
1390         struct inode *inode = page->mapping->host;
1391         handle_t *handle = NULL;
1392         int ret = 0;
1393         int err;
1394
1395         if (ext3_journal_current_handle())
1396                 goto out_fail;
1397
1398         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1399         if (IS_ERR(handle)) {
1400                 ret = PTR_ERR(handle);
1401                 goto out_fail;
1402         }
1403
1404         if (test_opt(inode->i_sb, NOBH))
1405                 ret = nobh_writepage(page, ext3_get_block, wbc);
1406         else
1407                 ret = block_write_full_page(page, ext3_get_block, wbc);
1408
1409         err = ext3_journal_stop(handle);
1410         if (!ret)
1411                 ret = err;
1412         return ret;
1413
1414 out_fail:
1415         redirty_page_for_writepage(wbc, page);
1416         unlock_page(page);
1417         return ret;
1418 }
1419
1420 static int ext3_journalled_writepage(struct page *page,
1421                                 struct writeback_control *wbc)
1422 {
1423         struct inode *inode = page->mapping->host;
1424         handle_t *handle = NULL;
1425         int ret = 0;
1426         int err;
1427
1428         if (ext3_journal_current_handle())
1429                 goto no_write;
1430
1431         handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1432         if (IS_ERR(handle)) {
1433                 ret = PTR_ERR(handle);
1434                 goto no_write;
1435         }
1436
1437         if (!page_has_buffers(page) || PageChecked(page)) {
1438                 /*
1439                  * It's mmapped pagecache.  Add buffers and journal it.  There
1440                  * doesn't seem much point in redirtying the page here.
1441                  */
1442                 ClearPageChecked(page);
1443                 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1444                                         ext3_get_block);
1445                 if (ret != 0)
1446                         goto out_unlock;
1447                 ret = walk_page_buffers(handle, page_buffers(page), 0,
1448                         PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1449
1450                 err = walk_page_buffers(handle, page_buffers(page), 0,
1451                                 PAGE_CACHE_SIZE, NULL, commit_write_fn);
1452                 if (ret == 0)
1453                         ret = err;
1454                 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1455                 unlock_page(page);
1456         } else {
1457                 /*
1458                  * It may be a page full of checkpoint-mode buffers.  We don't
1459                  * really know unless we go poke around in the buffer_heads.
1460                  * But block_write_full_page will do the right thing.
1461                  */
1462                 ret = block_write_full_page(page, ext3_get_block, wbc);
1463         }
1464         err = ext3_journal_stop(handle);
1465         if (!ret)
1466                 ret = err;
1467 out:
1468         return ret;
1469
1470 no_write:
1471         redirty_page_for_writepage(wbc, page);
1472 out_unlock:
1473         unlock_page(page);
1474         goto out;
1475 }
1476
1477 static int ext3_readpage(struct file *file, struct page *page)
1478 {
1479         return mpage_readpage(page, ext3_get_block);
1480 }
1481
1482 static int
1483 ext3_readpages(struct file *file, struct address_space *mapping,
1484                 struct list_head *pages, unsigned nr_pages)
1485 {
1486         return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1487 }
1488
1489 static int ext3_invalidatepage(struct page *page, unsigned long offset)
1490 {
1491         journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1492
1493         /*
1494          * If it's a full truncate we just forget about the pending dirtying
1495          */
1496         if (offset == 0)
1497                 ClearPageChecked(page);
1498
1499         return journal_invalidatepage(journal, page, offset);
1500 }
1501
1502 static int ext3_releasepage(struct page *page, int wait)
1503 {
1504         journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1505
1506         WARN_ON(PageChecked(page));
1507         if (!page_has_buffers(page))
1508                 return 0;
1509         return journal_try_to_free_buffers(journal, page, wait);
1510 }
1511
1512 /*
1513  * If the O_DIRECT write will extend the file then add this inode to the
1514  * orphan list.  So recovery will truncate it back to the original size
1515  * if the machine crashes during the write.
1516  *
1517  * If the O_DIRECT write is intantiating holes inside i_size and the machine
1518  * crashes then stale disk data _may_ be exposed inside the file.
1519  */
1520 static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1521                         const struct iovec *iov, loff_t offset,
1522                         unsigned long nr_segs)
1523 {
1524         struct file *file = iocb->ki_filp;
1525         struct inode *inode = file->f_mapping->host;
1526         struct ext3_inode_info *ei = EXT3_I(inode);
1527         handle_t *handle = NULL;
1528         ssize_t ret;
1529         int orphan = 0;
1530         size_t count = iov_length(iov, nr_segs);
1531
1532         if (rw == WRITE) {
1533                 loff_t final_size = offset + count;
1534
1535                 handle = ext3_journal_start(inode, DIO_CREDITS);
1536                 if (IS_ERR(handle)) {
1537                         ret = PTR_ERR(handle);
1538                         goto out;
1539                 }
1540                 if (final_size > inode->i_size) {
1541                         ret = ext3_orphan_add(handle, inode);
1542                         if (ret)
1543                                 goto out_stop;
1544                         orphan = 1;
1545                         ei->i_disksize = inode->i_size;
1546                 }
1547         }
1548
1549         ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 
1550                                  offset, nr_segs,
1551                                  ext3_direct_io_get_blocks, NULL);
1552
1553         /*
1554          * Reacquire the handle: ext3_direct_io_get_block() can restart the
1555          * transaction
1556          */
1557         handle = journal_current_handle();
1558
1559 out_stop:
1560         if (handle) {
1561                 int err;
1562
1563                 if (orphan && inode->i_nlink)
1564                         ext3_orphan_del(handle, inode);
1565                 if (orphan && ret > 0) {
1566                         loff_t end = offset + ret;
1567                         if (end > inode->i_size) {
1568                                 ei->i_disksize = end;
1569                                 i_size_write(inode, end);
1570                                 /*
1571                                  * We're going to return a positive `ret'
1572                                  * here due to non-zero-length I/O, so there's
1573                                  * no way of reporting error returns from
1574                                  * ext3_mark_inode_dirty() to userspace.  So
1575                                  * ignore it.
1576                                  */
1577                                 ext3_mark_inode_dirty(handle, inode);
1578                         }
1579                 }
1580                 err = ext3_journal_stop(handle);
1581                 if (ret == 0)
1582                         ret = err;
1583         }
1584 out:
1585         return ret;
1586 }
1587
1588 /*
1589  * Pages can be marked dirty completely asynchronously from ext3's journalling
1590  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1591  * much here because ->set_page_dirty is called under VFS locks.  The page is
1592  * not necessarily locked.
1593  *
1594  * We cannot just dirty the page and leave attached buffers clean, because the
1595  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1596  * or jbddirty because all the journalling code will explode.
1597  *
1598  * So what we do is to mark the page "pending dirty" and next time writepage
1599  * is called, propagate that into the buffers appropriately.
1600  */
1601 static int ext3_journalled_set_page_dirty(struct page *page)
1602 {
1603         SetPageChecked(page);
1604         return __set_page_dirty_nobuffers(page);
1605 }
1606
1607 static struct address_space_operations ext3_ordered_aops = {
1608         .readpage       = ext3_readpage,
1609         .readpages      = ext3_readpages,
1610         .writepage      = ext3_ordered_writepage,
1611         .sync_page      = block_sync_page,
1612         .prepare_write  = ext3_prepare_write,
1613         .commit_write   = ext3_ordered_commit_write,
1614         .bmap           = ext3_bmap,
1615         .invalidatepage = ext3_invalidatepage,
1616         .releasepage    = ext3_releasepage,
1617         .direct_IO      = ext3_direct_IO,
1618 };
1619
1620 static struct address_space_operations ext3_writeback_aops = {
1621         .readpage       = ext3_readpage,
1622         .readpages      = ext3_readpages,
1623         .writepage      = ext3_writeback_writepage,
1624         .writepages     = ext3_writeback_writepages,
1625         .sync_page      = block_sync_page,
1626         .prepare_write  = ext3_prepare_write,
1627         .commit_write   = ext3_writeback_commit_write,
1628         .bmap           = ext3_bmap,
1629         .invalidatepage = ext3_invalidatepage,
1630         .releasepage    = ext3_releasepage,
1631         .direct_IO      = ext3_direct_IO,
1632 };
1633
1634 static struct address_space_operations ext3_journalled_aops = {
1635         .readpage       = ext3_readpage,
1636         .readpages      = ext3_readpages,
1637         .writepage      = ext3_journalled_writepage,
1638         .sync_page      = block_sync_page,
1639         .prepare_write  = ext3_prepare_write,
1640         .commit_write   = ext3_journalled_commit_write,
1641         .set_page_dirty = ext3_journalled_set_page_dirty,
1642         .bmap           = ext3_bmap,
1643         .invalidatepage = ext3_invalidatepage,
1644         .releasepage    = ext3_releasepage,
1645 };
1646
1647 void ext3_set_aops(struct inode *inode)
1648 {
1649         if (ext3_should_order_data(inode))
1650                 inode->i_mapping->a_ops = &ext3_ordered_aops;
1651         else if (ext3_should_writeback_data(inode))
1652                 inode->i_mapping->a_ops = &ext3_writeback_aops;
1653         else
1654                 inode->i_mapping->a_ops = &ext3_journalled_aops;
1655 }
1656
1657 /*
1658  * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
1659  * up to the end of the block which corresponds to `from'.
1660  * This required during truncate. We need to physically zero the tail end
1661  * of that block so it doesn't yield old data if the file is later grown.
1662  */
1663 static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1664                 struct address_space *mapping, loff_t from)
1665 {
1666         unsigned long index = from >> PAGE_CACHE_SHIFT;
1667         unsigned offset = from & (PAGE_CACHE_SIZE-1);
1668         unsigned blocksize, iblock, length, pos;
1669         struct inode *inode = mapping->host;
1670         struct buffer_head *bh;
1671         int err = 0;
1672         void *kaddr;
1673
1674         blocksize = inode->i_sb->s_blocksize;
1675         length = blocksize - (offset & (blocksize - 1));
1676         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1677
1678         /*
1679          * For "nobh" option,  we can only work if we don't need to
1680          * read-in the page - otherwise we create buffers to do the IO.
1681          */
1682         if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH)) {
1683                 if (PageUptodate(page)) {
1684                         kaddr = kmap_atomic(page, KM_USER0);
1685                         memset(kaddr + offset, 0, length);
1686                         flush_dcache_page(page);
1687                         kunmap_atomic(kaddr, KM_USER0);
1688                         set_page_dirty(page);
1689                         goto unlock;
1690                 }
1691         }
1692
1693         if (!page_has_buffers(page))
1694                 create_empty_buffers(page, blocksize, 0);
1695
1696         /* Find the buffer that contains "offset" */
1697         bh = page_buffers(page);
1698         pos = blocksize;
1699         while (offset >= pos) {
1700                 bh = bh->b_this_page;
1701                 iblock++;
1702                 pos += blocksize;
1703         }
1704
1705         err = 0;
1706         if (buffer_freed(bh)) {
1707                 BUFFER_TRACE(bh, "freed: skip");
1708                 goto unlock;
1709         }
1710
1711         if (!buffer_mapped(bh)) {
1712                 BUFFER_TRACE(bh, "unmapped");
1713                 ext3_get_block(inode, iblock, bh, 0);
1714                 /* unmapped? It's a hole - nothing to do */
1715                 if (!buffer_mapped(bh)) {
1716                         BUFFER_TRACE(bh, "still unmapped");
1717                         goto unlock;
1718                 }
1719         }
1720
1721         /* Ok, it's mapped. Make sure it's up-to-date */
1722         if (PageUptodate(page))
1723                 set_buffer_uptodate(bh);
1724
1725         if (!buffer_uptodate(bh)) {
1726                 err = -EIO;
1727                 ll_rw_block(READ, 1, &bh);
1728                 wait_on_buffer(bh);
1729                 /* Uhhuh. Read error. Complain and punt. */
1730                 if (!buffer_uptodate(bh))
1731                         goto unlock;
1732         }
1733
1734         if (ext3_should_journal_data(inode)) {
1735                 BUFFER_TRACE(bh, "get write access");
1736                 err = ext3_journal_get_write_access(handle, bh);
1737                 if (err)
1738                         goto unlock;
1739         }
1740
1741         kaddr = kmap_atomic(page, KM_USER0);
1742         memset(kaddr + offset, 0, length);
1743         flush_dcache_page(page);
1744         kunmap_atomic(kaddr, KM_USER0);
1745
1746         BUFFER_TRACE(bh, "zeroed end of block");
1747
1748         err = 0;
1749         if (ext3_should_journal_data(inode)) {
1750                 err = ext3_journal_dirty_metadata(handle, bh);
1751         } else {
1752                 if (ext3_should_order_data(inode))
1753                         err = ext3_journal_dirty_data(handle, bh);
1754                 mark_buffer_dirty(bh);
1755         }
1756
1757 unlock:
1758         unlock_page(page);
1759         page_cache_release(page);
1760         return err;
1761 }
1762
1763 /*
1764  * Probably it should be a library function... search for first non-zero word
1765  * or memcmp with zero_page, whatever is better for particular architecture.
1766  * Linus?
1767  */
1768 static inline int all_zeroes(__le32 *p, __le32 *q)
1769 {
1770         while (p < q)
1771                 if (*p++)
1772                         return 0;
1773         return 1;
1774 }
1775
1776 /**
1777  *      ext3_find_shared - find the indirect blocks for partial truncation.
1778  *      @inode:   inode in question
1779  *      @depth:   depth of the affected branch
1780  *      @offsets: offsets of pointers in that branch (see ext3_block_to_path)
1781  *      @chain:   place to store the pointers to partial indirect blocks
1782  *      @top:     place to the (detached) top of branch
1783  *
1784  *      This is a helper function used by ext3_truncate().
1785  *
1786  *      When we do truncate() we may have to clean the ends of several
1787  *      indirect blocks but leave the blocks themselves alive. Block is
1788  *      partially truncated if some data below the new i_size is refered
1789  *      from it (and it is on the path to the first completely truncated
1790  *      data block, indeed).  We have to free the top of that path along
1791  *      with everything to the right of the path. Since no allocation
1792  *      past the truncation point is possible until ext3_truncate()
1793  *      finishes, we may safely do the latter, but top of branch may
1794  *      require special attention - pageout below the truncation point
1795  *      might try to populate it.
1796  *
1797  *      We atomically detach the top of branch from the tree, store the
1798  *      block number of its root in *@top, pointers to buffer_heads of
1799  *      partially truncated blocks - in @chain[].bh and pointers to
1800  *      their last elements that should not be removed - in
1801  *      @chain[].p. Return value is the pointer to last filled element
1802  *      of @chain.
1803  *
1804  *      The work left to caller to do the actual freeing of subtrees:
1805  *              a) free the subtree starting from *@top
1806  *              b) free the subtrees whose roots are stored in
1807  *                      (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1808  *              c) free the subtrees growing from the inode past the @chain[0].
1809  *                      (no partially truncated stuff there).  */
1810
1811 static Indirect *ext3_find_shared(struct inode *inode,
1812                                 int depth,
1813                                 int offsets[4],
1814                                 Indirect chain[4],
1815                                 __le32 *top)
1816 {
1817         Indirect *partial, *p;
1818         int k, err;
1819
1820         *top = 0;
1821         /* Make k index the deepest non-null offest + 1 */
1822         for (k = depth; k > 1 && !offsets[k-1]; k--)
1823                 ;
1824         partial = ext3_get_branch(inode, k, offsets, chain, &err);
1825         /* Writer: pointers */
1826         if (!partial)
1827                 partial = chain + k-1;
1828         /*
1829          * If the branch acquired continuation since we've looked at it -
1830          * fine, it should all survive and (new) top doesn't belong to us.
1831          */
1832         if (!partial->key && *partial->p)
1833                 /* Writer: end */
1834                 goto no_top;
1835         for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1836                 ;
1837         /*
1838          * OK, we've found the last block that must survive. The rest of our
1839          * branch should be detached before unlocking. However, if that rest
1840          * of branch is all ours and does not grow immediately from the inode
1841          * it's easier to cheat and just decrement partial->p.
1842          */
1843         if (p == chain + k - 1 && p > chain) {
1844                 p->p--;
1845         } else {
1846                 *top = *p->p;
1847                 /* Nope, don't do this in ext3.  Must leave the tree intact */
1848 #if 0
1849                 *p->p = 0;
1850 #endif
1851         }
1852         /* Writer: end */
1853
1854         while(partial > p)
1855         {
1856                 brelse(partial->bh);
1857                 partial--;
1858         }
1859 no_top:
1860         return partial;
1861 }
1862
1863 /*
1864  * Zero a number of block pointers in either an inode or an indirect block.
1865  * If we restart the transaction we must again get write access to the
1866  * indirect block for further modification.
1867  *
1868  * We release `count' blocks on disk, but (last - first) may be greater
1869  * than `count' because there can be holes in there.
1870  */
1871 static void
1872 ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
1873                 unsigned long block_to_free, unsigned long count,
1874                 __le32 *first, __le32 *last)
1875 {
1876         __le32 *p;
1877         if (try_to_extend_transaction(handle, inode)) {
1878                 if (bh) {
1879                         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1880                         ext3_journal_dirty_metadata(handle, bh);
1881                 }
1882                 ext3_mark_inode_dirty(handle, inode);
1883                 ext3_journal_test_restart(handle, inode);
1884                 if (bh) {
1885                         BUFFER_TRACE(bh, "retaking write access");
1886                         ext3_journal_get_write_access(handle, bh);
1887                 }
1888         }
1889
1890         /*
1891          * Any buffers which are on the journal will be in memory. We find
1892          * them on the hash table so journal_revoke() will run journal_forget()
1893          * on them.  We've already detached each block from the file, so
1894          * bforget() in journal_forget() should be safe.
1895          *
1896          * AKPM: turn on bforget in journal_forget()!!!
1897          */
1898         for (p = first; p < last; p++) {
1899                 u32 nr = le32_to_cpu(*p);
1900                 if (nr) {
1901                         struct buffer_head *bh;
1902
1903                         *p = 0;
1904                         bh = sb_find_get_block(inode->i_sb, nr);
1905                         ext3_forget(handle, 0, inode, bh, nr);
1906                 }
1907         }
1908
1909         ext3_free_blocks(handle, inode, block_to_free, count);
1910 }
1911
1912 /**
1913  * ext3_free_data - free a list of data blocks
1914  * @handle:     handle for this transaction
1915  * @inode:      inode we are dealing with
1916  * @this_bh:    indirect buffer_head which contains *@first and *@last
1917  * @first:      array of block numbers
1918  * @last:       points immediately past the end of array
1919  *
1920  * We are freeing all blocks refered from that array (numbers are stored as
1921  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
1922  *
1923  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
1924  * blocks are contiguous then releasing them at one time will only affect one
1925  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
1926  * actually use a lot of journal space.
1927  *
1928  * @this_bh will be %NULL if @first and @last point into the inode's direct
1929  * block pointers.
1930  */
1931 static void ext3_free_data(handle_t *handle, struct inode *inode,
1932                            struct buffer_head *this_bh,
1933                            __le32 *first, __le32 *last)
1934 {
1935         unsigned long block_to_free = 0;    /* Starting block # of a run */
1936         unsigned long count = 0;            /* Number of blocks in the run */ 
1937         __le32 *block_to_free_p = NULL;     /* Pointer into inode/ind
1938                                                corresponding to
1939                                                block_to_free */
1940         unsigned long nr;                   /* Current block # */
1941         __le32 *p;                          /* Pointer into inode/ind
1942                                                for current block */
1943         int err;
1944
1945         if (this_bh) {                          /* For indirect block */
1946                 BUFFER_TRACE(this_bh, "get_write_access");
1947                 err = ext3_journal_get_write_access(handle, this_bh);
1948                 /* Important: if we can't update the indirect pointers
1949                  * to the blocks, we can't free them. */
1950                 if (err)
1951                         return;
1952         }
1953
1954         for (p = first; p < last; p++) {
1955                 nr = le32_to_cpu(*p);
1956                 if (nr) {
1957                         /* accumulate blocks to free if they're contiguous */
1958                         if (count == 0) {
1959                                 block_to_free = nr;
1960                                 block_to_free_p = p;
1961                                 count = 1;
1962                         } else if (nr == block_to_free + count) {
1963                                 count++;
1964                         } else {
1965                                 ext3_clear_blocks(handle, inode, this_bh, 
1966                                                   block_to_free,
1967                                                   count, block_to_free_p, p);
1968                                 block_to_free = nr;
1969                                 block_to_free_p = p;
1970                                 count = 1;
1971                         }
1972                 }
1973         }
1974
1975         if (count > 0)
1976                 ext3_clear_blocks(handle, inode, this_bh, block_to_free,
1977                                   count, block_to_free_p, p);
1978
1979         if (this_bh) {
1980                 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
1981                 ext3_journal_dirty_metadata(handle, this_bh);
1982         }
1983 }
1984
1985 /**
1986  *      ext3_free_branches - free an array of branches
1987  *      @handle: JBD handle for this transaction
1988  *      @inode: inode we are dealing with
1989  *      @parent_bh: the buffer_head which contains *@first and *@last
1990  *      @first: array of block numbers
1991  *      @last:  pointer immediately past the end of array
1992  *      @depth: depth of the branches to free
1993  *
1994  *      We are freeing all blocks refered from these branches (numbers are
1995  *      stored as little-endian 32-bit) and updating @inode->i_blocks
1996  *      appropriately.
1997  */
1998 static void ext3_free_branches(handle_t *handle, struct inode *inode,
1999                                struct buffer_head *parent_bh,
2000                                __le32 *first, __le32 *last, int depth)
2001 {
2002         unsigned long nr;
2003         __le32 *p;
2004
2005         if (is_handle_aborted(handle))
2006                 return;
2007
2008         if (depth--) {
2009                 struct buffer_head *bh;
2010                 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2011                 p = last;
2012                 while (--p >= first) {
2013                         nr = le32_to_cpu(*p);
2014                         if (!nr)
2015                                 continue;               /* A hole */
2016
2017                         /* Go read the buffer for the next level down */
2018                         bh = sb_bread(inode->i_sb, nr);
2019
2020                         /*
2021                          * A read failure? Report error and clear slot
2022                          * (should be rare).
2023                          */
2024                         if (!bh) {
2025                                 ext3_error(inode->i_sb, "ext3_free_branches",
2026                                            "Read failure, inode=%ld, block=%ld",
2027                                            inode->i_ino, nr);
2028                                 continue;
2029                         }
2030
2031                         /* This zaps the entire block.  Bottom up. */
2032                         BUFFER_TRACE(bh, "free child branches");
2033                         ext3_free_branches(handle, inode, bh,
2034                                            (__le32*)bh->b_data,
2035                                            (__le32*)bh->b_data + addr_per_block,
2036                                            depth);
2037
2038                         /*
2039                          * We've probably journalled the indirect block several
2040                          * times during the truncate.  But it's no longer
2041                          * needed and we now drop it from the transaction via
2042                          * journal_revoke().
2043                          *
2044                          * That's easy if it's exclusively part of this
2045                          * transaction.  But if it's part of the committing
2046                          * transaction then journal_forget() will simply
2047                          * brelse() it.  That means that if the underlying
2048                          * block is reallocated in ext3_get_block(),
2049                          * unmap_underlying_metadata() will find this block
2050                          * and will try to get rid of it.  damn, damn.
2051                          *
2052                          * If this block has already been committed to the
2053                          * journal, a revoke record will be written.  And
2054                          * revoke records must be emitted *before* clearing
2055                          * this block's bit in the bitmaps.
2056                          */
2057                         ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2058
2059                         /*
2060                          * Everything below this this pointer has been
2061                          * released.  Now let this top-of-subtree go.
2062                          *
2063                          * We want the freeing of this indirect block to be
2064                          * atomic in the journal with the updating of the
2065                          * bitmap block which owns it.  So make some room in
2066                          * the journal.
2067                          *
2068                          * We zero the parent pointer *after* freeing its
2069                          * pointee in the bitmaps, so if extend_transaction()
2070                          * for some reason fails to put the bitmap changes and
2071                          * the release into the same transaction, recovery
2072                          * will merely complain about releasing a free block,
2073                          * rather than leaking blocks.
2074                          */
2075                         if (is_handle_aborted(handle))
2076                                 return;
2077                         if (try_to_extend_transaction(handle, inode)) {
2078                                 ext3_mark_inode_dirty(handle, inode);
2079                                 ext3_journal_test_restart(handle, inode);
2080                         }
2081
2082                         ext3_free_blocks(handle, inode, nr, 1);
2083
2084                         if (parent_bh) {
2085                                 /*
2086                                  * The block which we have just freed is
2087                                  * pointed to by an indirect block: journal it
2088                                  */
2089                                 BUFFER_TRACE(parent_bh, "get_write_access");
2090                                 if (!ext3_journal_get_write_access(handle,
2091                                                                    parent_bh)){
2092                                         *p = 0;
2093                                         BUFFER_TRACE(parent_bh,
2094                                         "call ext3_journal_dirty_metadata");
2095                                         ext3_journal_dirty_metadata(handle, 
2096                                                                     parent_bh);
2097                                 }
2098                         }
2099                 }
2100         } else {
2101                 /* We have reached the bottom of the tree. */
2102                 BUFFER_TRACE(parent_bh, "free data blocks");
2103                 ext3_free_data(handle, inode, parent_bh, first, last);
2104         }
2105 }
2106
2107 /*
2108  * ext3_truncate()
2109  *
2110  * We block out ext3_get_block() block instantiations across the entire
2111  * transaction, and VFS/VM ensures that ext3_truncate() cannot run
2112  * simultaneously on behalf of the same inode.
2113  *
2114  * As we work through the truncate and commmit bits of it to the journal there
2115  * is one core, guiding principle: the file's tree must always be consistent on
2116  * disk.  We must be able to restart the truncate after a crash.
2117  *
2118  * The file's tree may be transiently inconsistent in memory (although it
2119  * probably isn't), but whenever we close off and commit a journal transaction,
2120  * the contents of (the filesystem + the journal) must be consistent and
2121  * restartable.  It's pretty simple, really: bottom up, right to left (although
2122  * left-to-right works OK too).
2123  *
2124  * Note that at recovery time, journal replay occurs *before* the restart of
2125  * truncate against the orphan inode list.
2126  *
2127  * The committed inode has the new, desired i_size (which is the same as
2128  * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
2129  * that this inode's truncate did not complete and it will again call
2130  * ext3_truncate() to have another go.  So there will be instantiated blocks
2131  * to the right of the truncation point in a crashed ext3 filesystem.  But
2132  * that's fine - as long as they are linked from the inode, the post-crash
2133  * ext3_truncate() run will find them and release them.
2134  */
2135
2136 void ext3_truncate(struct inode * inode)
2137 {
2138         handle_t *handle;
2139         struct ext3_inode_info *ei = EXT3_I(inode);
2140         __le32 *i_data = ei->i_data;
2141         int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2142         struct address_space *mapping = inode->i_mapping;
2143         int offsets[4];
2144         Indirect chain[4];
2145         Indirect *partial;
2146         __le32 nr = 0;
2147         int n;
2148         long last_block;
2149         unsigned blocksize = inode->i_sb->s_blocksize;
2150         struct page *page;
2151
2152         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2153             S_ISLNK(inode->i_mode)))
2154                 return;
2155         if (ext3_inode_is_fast_symlink(inode))
2156                 return;
2157         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2158                 return;
2159
2160         /*
2161          * We have to lock the EOF page here, because lock_page() nests
2162          * outside journal_start().
2163          */
2164         if ((inode->i_size & (blocksize - 1)) == 0) {
2165                 /* Block boundary? Nothing to do */
2166                 page = NULL;
2167         } else {
2168                 page = grab_cache_page(mapping,
2169                                 inode->i_size >> PAGE_CACHE_SHIFT);
2170                 if (!page)
2171                         return;
2172         }
2173
2174         handle = start_transaction(inode);
2175         if (IS_ERR(handle)) {
2176                 if (page) {
2177                         clear_highpage(page);
2178                         flush_dcache_page(page);
2179                         unlock_page(page);
2180                         page_cache_release(page);
2181                 }
2182                 return;         /* AKPM: return what? */
2183         }
2184
2185         last_block = (inode->i_size + blocksize-1)
2186                                         >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2187
2188         if (page)
2189                 ext3_block_truncate_page(handle, page, mapping, inode->i_size);
2190
2191         n = ext3_block_to_path(inode, last_block, offsets, NULL);
2192         if (n == 0)
2193                 goto out_stop;  /* error */
2194
2195         /*
2196          * OK.  This truncate is going to happen.  We add the inode to the
2197          * orphan list, so that if this truncate spans multiple transactions,
2198          * and we crash, we will resume the truncate when the filesystem
2199          * recovers.  It also marks the inode dirty, to catch the new size.
2200          *
2201          * Implication: the file must always be in a sane, consistent
2202          * truncatable state while each transaction commits.
2203          */
2204         if (ext3_orphan_add(handle, inode))
2205                 goto out_stop;
2206
2207         /*
2208          * The orphan list entry will now protect us from any crash which
2209          * occurs before the truncate completes, so it is now safe to propagate
2210          * the new, shorter inode size (held for now in i_size) into the
2211          * on-disk inode. We do this via i_disksize, which is the value which
2212          * ext3 *really* writes onto the disk inode.
2213          */
2214         ei->i_disksize = inode->i_size;
2215
2216         /*
2217          * From here we block out all ext3_get_block() callers who want to
2218          * modify the block allocation tree.
2219          */
2220         down(&ei->truncate_sem);
2221
2222         if (n == 1) {           /* direct blocks */
2223                 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2224                                i_data + EXT3_NDIR_BLOCKS);
2225                 goto do_indirects;
2226         }
2227
2228         partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2229         /* Kill the top of shared branch (not detached) */
2230         if (nr) {
2231                 if (partial == chain) {
2232                         /* Shared branch grows from the inode */
2233                         ext3_free_branches(handle, inode, NULL,
2234                                            &nr, &nr+1, (chain+n-1) - partial);
2235                         *partial->p = 0;
2236                         /*
2237                          * We mark the inode dirty prior to restart,
2238                          * and prior to stop.  No need for it here.
2239                          */
2240                 } else {
2241                         /* Shared branch grows from an indirect block */
2242                         BUFFER_TRACE(partial->bh, "get_write_access");
2243                         ext3_free_branches(handle, inode, partial->bh,
2244                                         partial->p,
2245                                         partial->p+1, (chain+n-1) - partial);
2246                 }
2247         }
2248         /* Clear the ends of indirect blocks on the shared branch */
2249         while (partial > chain) {
2250                 ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2251                                    (__le32*)partial->bh->b_data+addr_per_block,
2252                                    (chain+n-1) - partial);
2253                 BUFFER_TRACE(partial->bh, "call brelse");
2254                 brelse (partial->bh);
2255                 partial--;
2256         }
2257 do_indirects:
2258         /* Kill the remaining (whole) subtrees */
2259         switch (offsets[0]) {
2260                 default:
2261                         nr = i_data[EXT3_IND_BLOCK];
2262                         if (nr) {
2263                                 ext3_free_branches(handle, inode, NULL,
2264                                                    &nr, &nr+1, 1);
2265                                 i_data[EXT3_IND_BLOCK] = 0;
2266                         }
2267                 case EXT3_IND_BLOCK:
2268                         nr = i_data[EXT3_DIND_BLOCK];
2269                         if (nr) {
2270                                 ext3_free_branches(handle, inode, NULL,
2271                                                    &nr, &nr+1, 2);
2272                                 i_data[EXT3_DIND_BLOCK] = 0;
2273                         }
2274                 case EXT3_DIND_BLOCK:
2275                         nr = i_data[EXT3_TIND_BLOCK];
2276                         if (nr) {
2277                                 ext3_free_branches(handle, inode, NULL,
2278                                                    &nr, &nr+1, 3);
2279                                 i_data[EXT3_TIND_BLOCK] = 0;
2280                         }
2281                 case EXT3_TIND_BLOCK:
2282                         ;
2283         }
2284
2285         ext3_discard_reservation(inode);
2286
2287         up(&ei->truncate_sem);
2288         inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2289         ext3_mark_inode_dirty(handle, inode);
2290
2291         /* In a multi-transaction truncate, we only make the final
2292          * transaction synchronous */
2293         if (IS_SYNC(inode))
2294                 handle->h_sync = 1;
2295 out_stop:
2296         /*
2297          * If this was a simple ftruncate(), and the file will remain alive
2298          * then we need to clear up the orphan record which we created above.
2299          * However, if this was a real unlink then we were called by
2300          * ext3_delete_inode(), and we allow that function to clean up the
2301          * orphan info for us.
2302          */
2303         if (inode->i_nlink)
2304                 ext3_orphan_del(handle, inode);
2305
2306         ext3_journal_stop(handle);
2307 }
2308
2309 static unsigned long ext3_get_inode_block(struct super_block *sb,
2310                 unsigned long ino, struct ext3_iloc *iloc)
2311 {
2312         unsigned long desc, group_desc, block_group;
2313         unsigned long offset, block;
2314         struct buffer_head *bh;
2315         struct ext3_group_desc * gdp;
2316
2317
2318         if ((ino != EXT3_ROOT_INO &&
2319                 ino != EXT3_JOURNAL_INO &&
2320                 ino != EXT3_RESIZE_INO &&
2321                 ino < EXT3_FIRST_INO(sb)) ||
2322                 ino > le32_to_cpu(
2323                         EXT3_SB(sb)->s_es->s_inodes_count)) {
2324                 ext3_error (sb, "ext3_get_inode_block",
2325                             "bad inode number: %lu", ino);
2326                 return 0;
2327         }
2328         block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2329         if (block_group >= EXT3_SB(sb)->s_groups_count) {
2330                 ext3_error (sb, "ext3_get_inode_block",
2331                             "group >= groups count");
2332                 return 0;
2333         }
2334         smp_rmb();
2335         group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
2336         desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
2337         bh = EXT3_SB(sb)->s_group_desc[group_desc];
2338         if (!bh) {
2339                 ext3_error (sb, "ext3_get_inode_block",
2340                             "Descriptor not loaded");
2341                 return 0;
2342         }
2343
2344         gdp = (struct ext3_group_desc *) bh->b_data;
2345         /*
2346          * Figure out the offset within the block group inode table
2347          */
2348         offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2349                 EXT3_INODE_SIZE(sb);
2350         block = le32_to_cpu(gdp[desc].bg_inode_table) +
2351                 (offset >> EXT3_BLOCK_SIZE_BITS(sb));
2352
2353         iloc->block_group = block_group;
2354         iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2355         return block;
2356 }
2357
2358 /*
2359  * ext3_get_inode_loc returns with an extra refcount against the inode's
2360  * underlying buffer_head on success. If 'in_mem' is true, we have all
2361  * data in memory that is needed to recreate the on-disk version of this
2362  * inode.
2363  */
2364 static int __ext3_get_inode_loc(struct inode *inode,
2365                                 struct ext3_iloc *iloc, int in_mem)
2366 {
2367         unsigned long block;
2368         struct buffer_head *bh;
2369
2370         block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2371         if (!block)
2372                 return -EIO;
2373
2374         bh = sb_getblk(inode->i_sb, block);
2375         if (!bh) {
2376                 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2377                                 "unable to read inode block - "
2378                                 "inode=%lu, block=%lu", inode->i_ino, block);
2379                 return -EIO;
2380         }
2381         if (!buffer_uptodate(bh)) {
2382                 lock_buffer(bh);
2383                 if (buffer_uptodate(bh)) {
2384                         /* someone brought it uptodate while we waited */
2385                         unlock_buffer(bh);
2386                         goto has_buffer;
2387                 }
2388
2389                 /*
2390                  * If we have all information of the inode in memory and this
2391                  * is the only valid inode in the block, we need not read the
2392                  * block.
2393                  */
2394                 if (in_mem) {
2395                         struct buffer_head *bitmap_bh;
2396                         struct ext3_group_desc *desc;
2397                         int inodes_per_buffer;
2398                         int inode_offset, i;
2399                         int block_group;
2400                         int start;
2401
2402                         block_group = (inode->i_ino - 1) /
2403                                         EXT3_INODES_PER_GROUP(inode->i_sb);
2404                         inodes_per_buffer = bh->b_size /
2405                                 EXT3_INODE_SIZE(inode->i_sb);
2406                         inode_offset = ((inode->i_ino - 1) %
2407                                         EXT3_INODES_PER_GROUP(inode->i_sb));
2408                         start = inode_offset & ~(inodes_per_buffer - 1);
2409
2410                         /* Is the inode bitmap in cache? */
2411                         desc = ext3_get_group_desc(inode->i_sb,
2412                                                 block_group, NULL);
2413                         if (!desc)
2414                                 goto make_io;
2415
2416                         bitmap_bh = sb_getblk(inode->i_sb,
2417                                         le32_to_cpu(desc->bg_inode_bitmap));
2418                         if (!bitmap_bh)
2419                                 goto make_io;
2420
2421                         /*
2422                          * If the inode bitmap isn't in cache then the
2423                          * optimisation may end up performing two reads instead
2424                          * of one, so skip it.
2425                          */
2426                         if (!buffer_uptodate(bitmap_bh)) {
2427                                 brelse(bitmap_bh);
2428                                 goto make_io;
2429                         }
2430                         for (i = start; i < start + inodes_per_buffer; i++) {
2431                                 if (i == inode_offset)
2432                                         continue;
2433                                 if (ext3_test_bit(i, bitmap_bh->b_data))
2434                                         break;
2435                         }
2436                         brelse(bitmap_bh);
2437                         if (i == start + inodes_per_buffer) {
2438                                 /* all other inodes are free, so skip I/O */
2439                                 memset(bh->b_data, 0, bh->b_size);
2440                                 set_buffer_uptodate(bh);
2441                                 unlock_buffer(bh);
2442                                 goto has_buffer;
2443                         }
2444                 }
2445
2446 make_io:
2447                 /*
2448                  * There are other valid inodes in the buffer, this inode
2449                  * has in-inode xattrs, or we don't have this inode in memory.
2450                  * Read the block from disk.
2451                  */
2452                 get_bh(bh);
2453                 bh->b_end_io = end_buffer_read_sync;
2454                 submit_bh(READ, bh);
2455                 wait_on_buffer(bh);
2456                 if (!buffer_uptodate(bh)) {
2457                         ext3_error(inode->i_sb, "ext3_get_inode_loc",
2458                                         "unable to read inode block - "
2459                                         "inode=%lu, block=%lu",
2460                                         inode->i_ino, block);
2461                         brelse(bh);
2462                         return -EIO;
2463                 }
2464         }
2465 has_buffer:
2466         iloc->bh = bh;
2467         return 0;
2468 }
2469
2470 int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
2471 {
2472         /* We have all inode data except xattrs in memory here. */
2473         return __ext3_get_inode_loc(inode, iloc,
2474                 !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR));
2475 }
2476
2477 void ext3_set_inode_flags(struct inode *inode)
2478 {
2479         unsigned int flags = EXT3_I(inode)->i_flags;
2480
2481         inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2482         if (flags & EXT3_SYNC_FL)
2483                 inode->i_flags |= S_SYNC;
2484         if (flags & EXT3_APPEND_FL)
2485                 inode->i_flags |= S_APPEND;
2486         if (flags & EXT3_IMMUTABLE_FL)
2487                 inode->i_flags |= S_IMMUTABLE;
2488         if (flags & EXT3_NOATIME_FL)
2489                 inode->i_flags |= S_NOATIME;
2490         if (flags & EXT3_DIRSYNC_FL)
2491                 inode->i_flags |= S_DIRSYNC;
2492 }
2493
2494 void ext3_read_inode(struct inode * inode)
2495 {
2496         struct ext3_iloc iloc;
2497         struct ext3_inode *raw_inode;
2498         struct ext3_inode_info *ei = EXT3_I(inode);
2499         struct buffer_head *bh;
2500         int block;
2501
2502 #ifdef CONFIG_EXT3_FS_POSIX_ACL
2503         ei->i_acl = EXT3_ACL_NOT_CACHED;
2504         ei->i_default_acl = EXT3_ACL_NOT_CACHED;
2505 #endif
2506         ei->i_block_alloc_info = NULL;
2507
2508         if (__ext3_get_inode_loc(inode, &iloc, 0))
2509                 goto bad_inode;
2510         bh = iloc.bh;
2511         raw_inode = ext3_raw_inode(&iloc);
2512         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2513         inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2514         inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2515         if(!(test_opt (inode->i_sb, NO_UID32))) {
2516                 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2517                 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2518         }
2519         inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2520         inode->i_size = le32_to_cpu(raw_inode->i_size);
2521         inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
2522         inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
2523         inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
2524         inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2525
2526         ei->i_state = 0;
2527         ei->i_dir_start_lookup = 0;
2528         ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2529         /* We now have enough fields to check if the inode was active or not.
2530          * This is needed because nfsd might try to access dead inodes
2531          * the test is that same one that e2fsck uses
2532          * NeilBrown 1999oct15
2533          */
2534         if (inode->i_nlink == 0) {
2535                 if (inode->i_mode == 0 ||
2536                     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2537                         /* this inode is deleted */
2538                         brelse (bh);
2539                         goto bad_inode;
2540                 }
2541                 /* The only unlinked inodes we let through here have
2542                  * valid i_mode and are being read by the orphan
2543                  * recovery code: that's fine, we're about to complete
2544                  * the process of deleting those. */
2545         }
2546         inode->i_blksize = PAGE_SIZE;   /* This is the optimal IO size
2547                                          * (for stat), not the fs block
2548                                          * size */  
2549         inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2550         ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2551 #ifdef EXT3_FRAGMENTS
2552         ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2553         ei->i_frag_no = raw_inode->i_frag;
2554         ei->i_frag_size = raw_inode->i_fsize;
2555 #endif
2556         ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2557         if (!S_ISREG(inode->i_mode)) {
2558                 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2559         } else {
2560                 inode->i_size |=
2561                         ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2562         }
2563         ei->i_disksize = inode->i_size;
2564         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2565         ei->i_block_group = iloc.block_group;
2566         /*
2567          * NOTE! The in-memory inode i_data array is in little-endian order
2568          * even on big-endian machines: we do NOT byteswap the block numbers!
2569          */
2570         for (block = 0; block < EXT3_N_BLOCKS; block++)
2571                 ei->i_data[block] = raw_inode->i_block[block];
2572         INIT_LIST_HEAD(&ei->i_orphan);
2573
2574         if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2575             EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
2576                 /*
2577                  * When mke2fs creates big inodes it does not zero out
2578                  * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
2579                  * so ignore those first few inodes.
2580                  */
2581                 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2582                 if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2583                     EXT3_INODE_SIZE(inode->i_sb))
2584                         goto bad_inode;
2585                 if (ei->i_extra_isize == 0) {
2586                         /* The extra space is currently unused. Use it. */
2587                         ei->i_extra_isize = sizeof(struct ext3_inode) -
2588                                             EXT3_GOOD_OLD_INODE_SIZE;
2589                 } else {
2590                         __le32 *magic = (void *)raw_inode +
2591                                         EXT3_GOOD_OLD_INODE_SIZE +
2592                                         ei->i_extra_isize;
2593                         if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
2594                                  ei->i_state |= EXT3_STATE_XATTR;
2595                 }
2596         } else
2597                 ei->i_extra_isize = 0;
2598
2599         if (S_ISREG(inode->i_mode)) {
2600                 inode->i_op = &ext3_file_inode_operations;
2601                 inode->i_fop = &ext3_file_operations;
2602                 ext3_set_aops(inode);
2603         } else if (S_ISDIR(inode->i_mode)) {
2604                 inode->i_op = &ext3_dir_inode_operations;
2605                 inode->i_fop = &ext3_dir_operations;
2606         } else if (S_ISLNK(inode->i_mode)) {
2607                 if (ext3_inode_is_fast_symlink(inode))
2608                         inode->i_op = &ext3_fast_symlink_inode_operations;
2609                 else {
2610                         inode->i_op = &ext3_symlink_inode_operations;
2611                         ext3_set_aops(inode);
2612                 }
2613         } else {
2614                 inode->i_op = &ext3_special_inode_operations;
2615                 if (raw_inode->i_block[0])
2616                         init_special_inode(inode, inode->i_mode,
2617                            old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2618                 else 
2619                         init_special_inode(inode, inode->i_mode,
2620                            new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2621         }
2622         brelse (iloc.bh);
2623         ext3_set_inode_flags(inode);
2624         return;
2625
2626 bad_inode:
2627         make_bad_inode(inode);
2628         return;
2629 }
2630
2631 /*
2632  * Post the struct inode info into an on-disk inode location in the
2633  * buffer-cache.  This gobbles the caller's reference to the
2634  * buffer_head in the inode location struct.
2635  *
2636  * The caller must have write access to iloc->bh.
2637  */
2638 static int ext3_do_update_inode(handle_t *handle, 
2639                                 struct inode *inode, 
2640                                 struct ext3_iloc *iloc)
2641 {
2642         struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
2643         struct ext3_inode_info *ei = EXT3_I(inode);
2644         struct buffer_head *bh = iloc->bh;
2645         int err = 0, rc, block;
2646
2647         /* For fields not not tracking in the in-memory inode,
2648          * initialise them to zero for new inodes. */
2649         if (ei->i_state & EXT3_STATE_NEW)
2650                 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
2651
2652         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2653         if(!(test_opt(inode->i_sb, NO_UID32))) {
2654                 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2655                 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2656 /*
2657  * Fix up interoperability with old kernels. Otherwise, old inodes get
2658  * re-used with the upper 16 bits of the uid/gid intact
2659  */
2660                 if(!ei->i_dtime) {
2661                         raw_inode->i_uid_high =
2662                                 cpu_to_le16(high_16_bits(inode->i_uid));
2663                         raw_inode->i_gid_high =
2664                                 cpu_to_le16(high_16_bits(inode->i_gid));
2665                 } else {
2666                         raw_inode->i_uid_high = 0;
2667                         raw_inode->i_gid_high = 0;
2668                 }
2669         } else {
2670                 raw_inode->i_uid_low =
2671                         cpu_to_le16(fs_high2lowuid(inode->i_uid));
2672                 raw_inode->i_gid_low =
2673                         cpu_to_le16(fs_high2lowgid(inode->i_gid));
2674                 raw_inode->i_uid_high = 0;
2675                 raw_inode->i_gid_high = 0;
2676         }
2677         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2678         raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2679         raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
2680         raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
2681         raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
2682         raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2683         raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2684         raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2685 #ifdef EXT3_FRAGMENTS
2686         raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
2687         raw_inode->i_frag = ei->i_frag_no;
2688         raw_inode->i_fsize = ei->i_frag_size;
2689 #endif
2690         raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2691         if (!S_ISREG(inode->i_mode)) {
2692                 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2693         } else {
2694                 raw_inode->i_size_high =
2695                         cpu_to_le32(ei->i_disksize >> 32);
2696                 if (ei->i_disksize > 0x7fffffffULL) {
2697                         struct super_block *sb = inode->i_sb;
2698                         if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
2699                                         EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
2700                             EXT3_SB(sb)->s_es->s_rev_level ==
2701                                         cpu_to_le32(EXT3_GOOD_OLD_REV)) {
2702                                /* If this is the first large file
2703                                 * created, add a flag to the superblock.
2704                                 */
2705                                 err = ext3_journal_get_write_access(handle,
2706                                                 EXT3_SB(sb)->s_sbh);
2707                                 if (err)
2708                                         goto out_brelse;
2709                                 ext3_update_dynamic_rev(sb);
2710                                 EXT3_SET_RO_COMPAT_FEATURE(sb,
2711                                         EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
2712                                 sb->s_dirt = 1;
2713                                 handle->h_sync = 1;
2714                                 err = ext3_journal_dirty_metadata(handle,
2715                                                 EXT3_SB(sb)->s_sbh);
2716                         }
2717                 }
2718         }
2719         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2720         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2721                 if (old_valid_dev(inode->i_rdev)) {
2722                         raw_inode->i_block[0] =
2723                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
2724                         raw_inode->i_block[1] = 0;
2725                 } else {
2726                         raw_inode->i_block[0] = 0;
2727                         raw_inode->i_block[1] =
2728                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
2729                         raw_inode->i_block[2] = 0;
2730                 }
2731         } else for (block = 0; block < EXT3_N_BLOCKS; block++)
2732                 raw_inode->i_block[block] = ei->i_data[block];
2733
2734         if (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE)
2735                 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
2736
2737         BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2738         rc = ext3_journal_dirty_metadata(handle, bh);
2739         if (!err)
2740                 err = rc;
2741         ei->i_state &= ~EXT3_STATE_NEW;
2742
2743 out_brelse:
2744         brelse (bh);
2745         ext3_std_error(inode->i_sb, err);
2746         return err;
2747 }
2748
2749 /*
2750  * ext3_write_inode()
2751  *
2752  * We are called from a few places:
2753  *
2754  * - Within generic_file_write() for O_SYNC files.
2755  *   Here, there will be no transaction running. We wait for any running
2756  *   trasnaction to commit.
2757  *
2758  * - Within sys_sync(), kupdate and such.
2759  *   We wait on commit, if tol to.
2760  *
2761  * - Within prune_icache() (PF_MEMALLOC == true)
2762  *   Here we simply return.  We can't afford to block kswapd on the
2763  *   journal commit.
2764  *
2765  * In all cases it is actually safe for us to return without doing anything,
2766  * because the inode has been copied into a raw inode buffer in
2767  * ext3_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
2768  * knfsd.
2769  *
2770  * Note that we are absolutely dependent upon all inode dirtiers doing the
2771  * right thing: they *must* call mark_inode_dirty() after dirtying info in
2772  * which we are interested.
2773  *
2774  * It would be a bug for them to not do this.  The code:
2775  *
2776  *      mark_inode_dirty(inode)
2777  *      stuff();
2778  *      inode->i_size = expr;
2779  *
2780  * is in error because a kswapd-driven write_inode() could occur while
2781  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
2782  * will no longer be on the superblock's dirty inode list.
2783  */
2784 int ext3_write_inode(struct inode *inode, int wait)
2785 {
2786         if (current->flags & PF_MEMALLOC)
2787                 return 0;
2788
2789         if (ext3_journal_current_handle()) {
2790                 jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
2791                 dump_stack();
2792                 return -EIO;
2793         }
2794
2795         if (!wait)
2796                 return 0;
2797
2798         return ext3_force_commit(inode->i_sb);
2799 }
2800
2801 /*
2802  * ext3_setattr()
2803  *
2804  * Called from notify_change.
2805  *
2806  * We want to trap VFS attempts to truncate the file as soon as
2807  * possible.  In particular, we want to make sure that when the VFS
2808  * shrinks i_size, we put the inode on the orphan list and modify
2809  * i_disksize immediately, so that during the subsequent flushing of
2810  * dirty pages and freeing of disk blocks, we can guarantee that any
2811  * commit will leave the blocks being flushed in an unused state on
2812  * disk.  (On recovery, the inode will get truncated and the blocks will
2813  * be freed, so we have a strong guarantee that no future commit will
2814  * leave these blocks visible to the user.)  
2815  *
2816  * Called with inode->sem down.
2817  */
2818 int ext3_setattr(struct dentry *dentry, struct iattr *attr)
2819 {
2820         struct inode *inode = dentry->d_inode;
2821         int error, rc = 0;
2822         const unsigned int ia_valid = attr->ia_valid;
2823
2824         error = inode_change_ok(inode, attr);
2825         if (error)
2826                 return error;
2827
2828         if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
2829                 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
2830                 handle_t *handle;
2831
2832                 /* (user+group)*(old+new) structure, inode write (sb,
2833                  * inode block, ? - but truncate inode update has it) */
2834                 handle = ext3_journal_start(inode, 4*EXT3_QUOTA_INIT_BLOCKS+3);
2835                 if (IS_ERR(handle)) {
2836                         error = PTR_ERR(handle);
2837                         goto err_out;
2838                 }
2839                 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
2840                 if (error) {
2841                         ext3_journal_stop(handle);
2842                         return error;
2843                 }
2844                 /* Update corresponding info in inode so that everything is in
2845                  * one transaction */
2846                 if (attr->ia_valid & ATTR_UID)
2847                         inode->i_uid = attr->ia_uid;
2848                 if (attr->ia_valid & ATTR_GID)
2849                         inode->i_gid = attr->ia_gid;
2850                 error = ext3_mark_inode_dirty(handle, inode);
2851                 ext3_journal_stop(handle);
2852         }
2853
2854         if (S_ISREG(inode->i_mode) &&
2855             attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
2856                 handle_t *handle;
2857
2858                 handle = ext3_journal_start(inode, 3);
2859                 if (IS_ERR(handle)) {
2860                         error = PTR_ERR(handle);
2861                         goto err_out;
2862                 }
2863
2864                 error = ext3_orphan_add(handle, inode);
2865                 EXT3_I(inode)->i_disksize = attr->ia_size;
2866                 rc = ext3_mark_inode_dirty(handle, inode);
2867                 if (!error)
2868                         error = rc;
2869                 ext3_journal_stop(handle);
2870         }
2871
2872         rc = inode_setattr(inode, attr);
2873
2874         /* If inode_setattr's call to ext3_truncate failed to get a
2875          * transaction handle at all, we need to clean up the in-core
2876          * orphan list manually. */
2877         if (inode->i_nlink)
2878                 ext3_orphan_del(NULL, inode);
2879
2880         if (!rc && (ia_valid & ATTR_MODE))
2881                 rc = ext3_acl_chmod(inode);
2882
2883 err_out:
2884         ext3_std_error(inode->i_sb, error);
2885         if (!error)
2886                 error = rc;
2887         return error;
2888 }
2889
2890
2891 /*
2892  * akpm: how many blocks doth make a writepage()?
2893  *
2894  * With N blocks per page, it may be:
2895  * N data blocks
2896  * 2 indirect block
2897  * 2 dindirect
2898  * 1 tindirect
2899  * N+5 bitmap blocks (from the above)
2900  * N+5 group descriptor summary blocks
2901  * 1 inode block
2902  * 1 superblock.
2903  * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
2904  *
2905  * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
2906  *
2907  * With ordered or writeback data it's the same, less the N data blocks.
2908  *
2909  * If the inode's direct blocks can hold an integral number of pages then a
2910  * page cannot straddle two indirect blocks, and we can only touch one indirect
2911  * and dindirect block, and the "5" above becomes "3".
2912  *
2913  * This still overestimates under most circumstances.  If we were to pass the
2914  * start and end offsets in here as well we could do block_to_path() on each
2915  * block and work out the exact number of indirects which are touched.  Pah.
2916  */
2917
2918 static int ext3_writepage_trans_blocks(struct inode *inode)
2919 {
2920         int bpp = ext3_journal_blocks_per_page(inode);
2921         int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
2922         int ret;
2923
2924         if (ext3_should_journal_data(inode))
2925                 ret = 3 * (bpp + indirects) + 2;
2926         else
2927                 ret = 2 * (bpp + indirects) + 2;
2928
2929 #ifdef CONFIG_QUOTA
2930         /* We know that structure was already allocated during DQUOT_INIT so
2931          * we will be updating only the data blocks + inodes */
2932         ret += 2*EXT3_QUOTA_TRANS_BLOCKS;
2933 #endif
2934
2935         return ret;
2936 }
2937
2938 /*
2939  * The caller must have previously called ext3_reserve_inode_write().
2940  * Give this, we know that the caller already has write access to iloc->bh.
2941  */
2942 int ext3_mark_iloc_dirty(handle_t *handle,
2943                 struct inode *inode, struct ext3_iloc *iloc)
2944 {
2945         int err = 0;
2946
2947         /* the do_update_inode consumes one bh->b_count */
2948         get_bh(iloc->bh);
2949
2950         /* ext3_do_update_inode() does journal_dirty_metadata */
2951         err = ext3_do_update_inode(handle, inode, iloc);
2952         put_bh(iloc->bh);
2953         return err;
2954 }
2955
2956 /* 
2957  * On success, We end up with an outstanding reference count against
2958  * iloc->bh.  This _must_ be cleaned up later. 
2959  */
2960
2961 int
2962 ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 
2963                          struct ext3_iloc *iloc)
2964 {
2965         int err = 0;
2966         if (handle) {
2967                 err = ext3_get_inode_loc(inode, iloc);
2968                 if (!err) {
2969                         BUFFER_TRACE(iloc->bh, "get_write_access");
2970                         err = ext3_journal_get_write_access(handle, iloc->bh);
2971                         if (err) {
2972                                 brelse(iloc->bh);
2973                                 iloc->bh = NULL;
2974                         }
2975                 }
2976         }
2977         ext3_std_error(inode->i_sb, err);
2978         return err;
2979 }
2980
2981 /*
2982  * akpm: What we do here is to mark the in-core inode as clean
2983  * with respect to inode dirtiness (it may still be data-dirty).
2984  * This means that the in-core inode may be reaped by prune_icache
2985  * without having to perform any I/O.  This is a very good thing,
2986  * because *any* task may call prune_icache - even ones which
2987  * have a transaction open against a different journal.
2988  *
2989  * Is this cheating?  Not really.  Sure, we haven't written the
2990  * inode out, but prune_icache isn't a user-visible syncing function.
2991  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
2992  * we start and wait on commits.
2993  *
2994  * Is this efficient/effective?  Well, we're being nice to the system
2995  * by cleaning up our inodes proactively so they can be reaped
2996  * without I/O.  But we are potentially leaving up to five seconds'
2997  * worth of inodes floating about which prune_icache wants us to
2998  * write out.  One way to fix that would be to get prune_icache()
2999  * to do a write_super() to free up some memory.  It has the desired
3000  * effect.
3001  */
3002 int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3003 {
3004         struct ext3_iloc iloc;
3005         int err;
3006
3007         might_sleep();
3008         err = ext3_reserve_inode_write(handle, inode, &iloc);
3009         if (!err)
3010                 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3011         return err;
3012 }
3013
3014 /*
3015  * akpm: ext3_dirty_inode() is called from __mark_inode_dirty()
3016  *
3017  * We're really interested in the case where a file is being extended.
3018  * i_size has been changed by generic_commit_write() and we thus need
3019  * to include the updated inode in the current transaction.
3020  *
3021  * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3022  * are allocated to the file.
3023  *
3024  * If the inode is marked synchronous, we don't honour that here - doing
3025  * so would cause a commit on atime updates, which we don't bother doing.
3026  * We handle synchronous inodes at the highest possible level.
3027  */
3028 void ext3_dirty_inode(struct inode *inode)
3029 {
3030         handle_t *current_handle = ext3_journal_current_handle();
3031         handle_t *handle;
3032
3033         handle = ext3_journal_start(inode, 2);
3034         if (IS_ERR(handle))
3035                 goto out;
3036         if (current_handle &&
3037                 current_handle->h_transaction != handle->h_transaction) {
3038                 /* This task has a transaction open against a different fs */
3039                 printk(KERN_EMERG "%s: transactions do not match!\n",
3040                        __FUNCTION__);
3041         } else {
3042                 jbd_debug(5, "marking dirty.  outer handle=%p\n",
3043                                 current_handle);
3044                 ext3_mark_inode_dirty(handle, inode);
3045         }
3046         ext3_journal_stop(handle);
3047 out:
3048         return;
3049 }
3050
3051 #ifdef AKPM
3052 /* 
3053  * Bind an inode's backing buffer_head into this transaction, to prevent
3054  * it from being flushed to disk early.  Unlike
3055  * ext3_reserve_inode_write, this leaves behind no bh reference and
3056  * returns no iloc structure, so the caller needs to repeat the iloc
3057  * lookup to mark the inode dirty later.
3058  */
3059 static inline int
3060 ext3_pin_inode(handle_t *handle, struct inode *inode)
3061 {
3062         struct ext3_iloc iloc;
3063
3064         int err = 0;
3065         if (handle) {
3066                 err = ext3_get_inode_loc(inode, &iloc);
3067                 if (!err) {
3068                         BUFFER_TRACE(iloc.bh, "get_write_access");
3069                         err = journal_get_write_access(handle, iloc.bh);
3070                         if (!err)
3071                                 err = ext3_journal_dirty_metadata(handle, 
3072                                                                   iloc.bh);
3073                         brelse(iloc.bh);
3074                 }
3075         }
3076         ext3_std_error(inode->i_sb, err);
3077         return err;
3078 }
3079 #endif
3080
3081 int ext3_change_inode_journal_flag(struct inode *inode, int val)
3082 {
3083         journal_t *journal;
3084         handle_t *handle;
3085         int err;
3086
3087         /*
3088          * We have to be very careful here: changing a data block's
3089          * journaling status dynamically is dangerous.  If we write a
3090          * data block to the journal, change the status and then delete
3091          * that block, we risk forgetting to revoke the old log record
3092          * from the journal and so a subsequent replay can corrupt data.
3093          * So, first we make sure that the journal is empty and that
3094          * nobody is changing anything.
3095          */
3096
3097         journal = EXT3_JOURNAL(inode);
3098         if (is_journal_aborted(journal) || IS_RDONLY(inode))
3099                 return -EROFS;
3100
3101         journal_lock_updates(journal);
3102         journal_flush(journal);
3103
3104         /*
3105          * OK, there are no updates running now, and all cached data is
3106          * synced to disk.  We are now in a completely consistent state
3107          * which doesn't have anything in the journal, and we know that
3108          * no filesystem updates are running, so it is safe to modify
3109          * the inode's in-core data-journaling state flag now.
3110          */
3111
3112         if (val)
3113                 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3114         else
3115                 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3116         ext3_set_aops(inode);
3117
3118         journal_unlock_updates(journal);
3119
3120         /* Finally we can mark the inode as dirty. */
3121
3122         handle = ext3_journal_start(inode, 1);
3123         if (IS_ERR(handle))
3124                 return PTR_ERR(handle);
3125
3126         err = ext3_mark_inode_dirty(handle, inode);
3127         handle->h_sync = 1;
3128         ext3_journal_stop(handle);
3129         ext3_std_error(inode->i_sb, err);
3130
3131         return err;
3132 }