2 ** Write ahead logging implementation copyright Chris Mason 2000
4 ** The background commits make this code very interelated, and
5 ** overly complex. I need to rethink things a bit....The major players:
7 ** journal_begin -- call with the number of blocks you expect to log.
8 ** If the current transaction is too
9 ** old, it will block until the current transaction is
10 ** finished, and then start a new one.
11 ** Usually, your transaction will get joined in with
12 ** previous ones for speed.
14 ** journal_join -- same as journal_begin, but won't block on the current
15 ** transaction regardless of age. Don't ever call
16 ** this. Ever. There are only two places it should be
17 ** called from, and they are both inside this file.
19 ** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20 ** that might make them get sent to disk
21 ** and then marks them BH_JDirty. Puts the buffer head
22 ** into the current transaction hash.
24 ** journal_end -- if the current transaction is batchable, it does nothing
25 ** otherwise, it could do an async/synchronous commit, or
26 ** a full flush of all log and real blocks in the
29 ** flush_old_commits -- if the current transaction is too old, it is ended and
30 ** commit blocks are sent to disk. Forces commit blocks
31 ** to disk for all backgrounded commits that have been
33 ** -- Note, if you call this as an immediate flush from
34 ** from within kupdate, it will ignore the immediate flag
36 ** The commit thread -- a writer process for async commits. It allows a
37 ** a process to request a log flush on a task queue.
38 ** the commit will happen once the commit thread wakes up.
39 ** The benefit here is the writer (with whatever
40 ** related locks it has) doesn't have to wait for the
41 ** log blocks to hit disk if it doesn't want to.
44 #include <linux/config.h>
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
48 #include <linux/time.h>
49 #include <asm/semaphore.h>
51 #include <linux/vmalloc.h>
52 #include <linux/reiserfs_fs.h>
54 #include <linux/kernel.h>
55 #include <linux/errno.h>
56 #include <linux/fcntl.h>
57 #include <linux/stat.h>
58 #include <linux/string.h>
59 #include <linux/smp_lock.h>
60 #include <linux/suspend.h>
61 #include <linux/buffer_head.h>
62 #include <linux/workqueue.h>
64 /* the number of mounted filesystems. This is used to decide when to
65 ** start and kill the commit workqueue
67 static int reiserfs_mounted_fs_count;
69 static struct workqueue_struct *commit_wq;
71 #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
73 #define BUFNR 64 /*read ahead */
75 /* cnode stat bits. Move these into reiserfs_fs.h */
77 #define BLOCK_FREED 2 /* this block was freed, and can't be written. */
78 #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
80 #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
82 /* flags for do_journal_end */
83 #define FLUSH_ALL 1 /* flush commit and real blocks */
84 #define COMMIT_NOW 2 /* end and commit this transaction */
85 #define WAIT 4 /* wait for the log blocks to hit the disk*/
87 /* state bits for the journal */
88 #define WRITERS_BLOCKED 1 /* set when new writers not allowed */
90 static int do_journal_end(struct reiserfs_transaction_handle *,struct super_block *,unsigned long nblocks,int flags) ;
91 static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
92 static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
93 static int can_dirty(struct reiserfs_journal_cnode *cn) ;
94 static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks);
95 static int release_journal_dev( struct super_block *super,
96 struct reiserfs_journal *journal );
98 static void init_journal_hash(struct super_block *p_s_sb) {
99 memset(SB_JOURNAL(p_s_sb)->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
103 ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
104 ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
107 static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) {
109 clear_buffer_dirty(bh);
113 static struct reiserfs_bitmap_node *
114 allocate_bitmap_node(struct super_block *p_s_sb) {
115 struct reiserfs_bitmap_node *bn ;
118 bn = reiserfs_kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS, p_s_sb) ;
122 bn->data = reiserfs_kmalloc(p_s_sb->s_blocksize, GFP_NOFS, p_s_sb) ;
124 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
128 memset(bn->data, 0, p_s_sb->s_blocksize) ;
129 INIT_LIST_HEAD(&bn->list) ;
133 static struct reiserfs_bitmap_node *
134 get_bitmap_node(struct super_block *p_s_sb) {
135 struct reiserfs_bitmap_node *bn = NULL;
136 struct list_head *entry = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
138 SB_JOURNAL(p_s_sb)->j_used_bitmap_nodes++ ;
141 if(entry != &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) {
142 bn = list_entry(entry, struct reiserfs_bitmap_node, list) ;
144 memset(bn->data, 0, p_s_sb->s_blocksize) ;
145 SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes-- ;
148 bn = allocate_bitmap_node(p_s_sb) ;
155 static inline void free_bitmap_node(struct super_block *p_s_sb,
156 struct reiserfs_bitmap_node *bn) {
157 SB_JOURNAL(p_s_sb)->j_used_bitmap_nodes-- ;
158 if (SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
159 reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
160 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
162 list_add(&bn->list, &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
163 SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes++ ;
167 static void allocate_bitmap_nodes(struct super_block *p_s_sb) {
169 struct reiserfs_bitmap_node *bn = NULL ;
170 for (i = 0 ; i < REISERFS_MIN_BITMAP_NODES ; i++) {
171 bn = allocate_bitmap_node(p_s_sb) ;
173 list_add(&bn->list, &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
174 SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes++ ;
176 break ; // this is ok, we'll try again when more are needed
181 static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
182 struct reiserfs_list_bitmap *jb) {
183 int bmap_nr = block / (p_s_sb->s_blocksize << 3) ;
184 int bit_nr = block % (p_s_sb->s_blocksize << 3) ;
186 if (!jb->bitmaps[bmap_nr]) {
187 jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb) ;
189 set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data) ;
193 static void cleanup_bitmap_list(struct super_block *p_s_sb,
194 struct reiserfs_list_bitmap *jb) {
196 for (i = 0 ; i < SB_BMAP_NR(p_s_sb) ; i++) {
197 if (jb->bitmaps[i]) {
198 free_bitmap_node(p_s_sb, jb->bitmaps[i]) ;
199 jb->bitmaps[i] = NULL ;
205 ** only call this on FS unmount.
207 static int free_list_bitmaps(struct super_block *p_s_sb,
208 struct reiserfs_list_bitmap *jb_array) {
210 struct reiserfs_list_bitmap *jb ;
211 for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
213 jb->journal_list = NULL ;
214 cleanup_bitmap_list(p_s_sb, jb) ;
221 static int free_bitmap_nodes(struct super_block *p_s_sb) {
222 struct list_head *next = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
223 struct reiserfs_bitmap_node *bn ;
225 while(next != &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) {
226 bn = list_entry(next, struct reiserfs_bitmap_node, list) ;
228 reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
229 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
230 next = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
231 SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes-- ;
238 ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
239 ** jb_array is the array to be filled in.
241 int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
242 struct reiserfs_list_bitmap *jb_array,
246 struct reiserfs_list_bitmap *jb ;
247 int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *) ;
249 for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
251 jb->journal_list = NULL ;
252 jb->bitmaps = vmalloc( mem ) ;
254 reiserfs_warning("clm-2000, unable to allocate bitmaps for journal lists\n") ;
258 memset(jb->bitmaps, 0, mem) ;
261 free_list_bitmaps(p_s_sb, jb_array) ;
268 ** find an available list bitmap. If you can't find one, flush a commit list
271 static struct reiserfs_list_bitmap *
272 get_list_bitmap(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
274 struct reiserfs_list_bitmap *jb = NULL ;
276 for (j = 0 ; j < (JOURNAL_NUM_BITMAPS * 3) ; j++) {
277 i = SB_JOURNAL(p_s_sb)->j_list_bitmap_index ;
278 SB_JOURNAL(p_s_sb)->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS ;
279 jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ;
280 if (SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list) {
281 flush_commit_list(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list, 1) ;
282 if (!SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list) {
289 if (jb->journal_list) { /* double check to make sure if flushed correctly */
292 jb->journal_list = jl ;
297 ** allocates a new chunk of X nodes, and links them all together as a list.
298 ** Uses the cnode->next and cnode->prev pointers
299 ** returns NULL on failure
301 static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) {
302 struct reiserfs_journal_cnode *head ;
304 if (num_cnodes <= 0) {
307 head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
311 memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
312 head[0].prev = NULL ;
313 head[0].next = head + 1 ;
314 for (i = 1 ; i < num_cnodes; i++) {
315 head[i].prev = head + (i - 1) ;
316 head[i].next = head + (i + 1) ; /* if last one, overwrite it after the if */
318 head[num_cnodes -1].next = NULL ;
323 ** pulls a cnode off the free list, or returns NULL on failure
325 static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) {
326 struct reiserfs_journal_cnode *cn ;
328 reiserfs_check_lock_depth("get_cnode") ;
330 if (SB_JOURNAL(p_s_sb)->j_cnode_free <= 0) {
333 SB_JOURNAL(p_s_sb)->j_cnode_used++ ;
334 SB_JOURNAL(p_s_sb)->j_cnode_free-- ;
335 cn = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
340 cn->next->prev = NULL ;
342 SB_JOURNAL(p_s_sb)->j_cnode_free_list = cn->next ;
343 memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ;
348 ** returns a cnode to the free list
350 static void free_cnode(struct super_block *p_s_sb, struct reiserfs_journal_cnode *cn) {
352 reiserfs_check_lock_depth("free_cnode") ;
354 SB_JOURNAL(p_s_sb)->j_cnode_used-- ;
355 SB_JOURNAL(p_s_sb)->j_cnode_free++ ;
356 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
357 cn->next = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
358 if (SB_JOURNAL(p_s_sb)->j_cnode_free_list) {
359 SB_JOURNAL(p_s_sb)->j_cnode_free_list->prev = cn ;
361 cn->prev = NULL ; /* not needed with the memset, but I might kill the memset, and forget to do this */
362 SB_JOURNAL(p_s_sb)->j_cnode_free_list = cn ;
365 static int clear_prepared_bits(struct buffer_head *bh) {
366 clear_bit(BH_JPrepared, &bh->b_state) ;
370 /* buffer is in current transaction */
371 inline int buffer_journaled(const struct buffer_head *bh) {
373 return test_bit(BH_JDirty, &bh->b_state) ;
378 /* disk block was taken off free list before being in a finished transation, or written to disk
379 ** journal_new blocks can be reused immediately, for any purpose
381 inline int buffer_journal_new(const struct buffer_head *bh) {
383 return test_bit(BH_JNew, &bh->b_state) ;
388 inline int mark_buffer_journal_new(struct buffer_head *bh) {
390 set_bit(BH_JNew, &bh->b_state) ;
395 inline int mark_buffer_not_journaled(struct buffer_head *bh) {
397 clear_bit(BH_JDirty, &bh->b_state) ;
401 /* utility function to force a BUG if it is called without the big
402 ** kernel lock held. caller is the string printed just before calling BUG()
404 void reiserfs_check_lock_depth(char *caller) {
406 if (current->lock_depth < 0) {
407 printk("%s called without kernel lock held\n", caller) ;
408 show_reiserfs_locks() ;
416 /* return a cnode with same dev, block number and size in table, or null if not found */
417 static inline struct reiserfs_journal_cnode *
418 get_journal_hash_dev(struct super_block *sb,
419 struct reiserfs_journal_cnode **table,
422 struct reiserfs_journal_cnode *cn ;
423 cn = journal_hash(table, sb, bl) ;
425 if (cn->blocknr == bl && cn->sb == sb)
429 return (struct reiserfs_journal_cnode *)0 ;
432 /* returns a cnode with same size, block number and dev as bh in the current transaction hash. NULL if not found */
433 static inline struct reiserfs_journal_cnode *get_journal_hash(struct super_block *p_s_sb, struct buffer_head *bh) {
434 struct reiserfs_journal_cnode *cn ;
436 cn = get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_hash_table, bh->b_blocknr);
439 return (struct reiserfs_journal_cnode *)0 ;
444 /* once upon a time, the journal would deadlock. a lot. Now, when
445 ** CONFIG_REISERFS_CHECK is defined, anytime someone enters a
446 ** transaction, it pushes itself into this ugly static list, and pops
447 ** itself off before calling journal_end. I made a SysRq key to dump
448 ** the list, and tell me what the writers are when I'm deadlocked. */
450 /* are you depending on the compiler
451 to optimize this function away
452 everywhere it is called? It is not
453 obvious how this works, but I
454 suppose debugging code need not be
456 static char *journal_writers[512] ;
457 int push_journal_writer(char *s) {
458 #ifdef CONFIG_REISERFS_CHECK
460 for (i = 0 ; i < 512 ; i++) {
461 if (!journal_writers[i]) {
462 journal_writers[i] = s ;
471 int pop_journal_writer(int index) {
472 #ifdef CONFIG_REISERFS_CHECK
474 journal_writers[index] = NULL ;
480 int dump_journal_writers(void) {
482 for (i = 0 ; i < 512 ; i++) {
483 if (journal_writers[i]) {
484 printk("%d: %s\n", i, journal_writers[i]) ;
491 ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
492 ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
493 ** being overwritten by a replay after crashing.
495 ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
496 ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
497 ** sure you never write the block without logging it.
499 ** next_zero_bit is a suggestion about the next block to try for find_forward.
500 ** when bl is rejected because it is set in a journal list bitmap, we search
501 ** for the next zero bit in the bitmap that rejected bl. Then, we return that
502 ** through next_zero_bit for find_forward to try.
504 ** Just because we return something in next_zero_bit does not mean we won't
505 ** reject it on the next call to reiserfs_in_journal
508 int reiserfs_in_journal(struct super_block *p_s_sb,
509 int bmap_nr, int bit_nr, int search_all,
510 b_blocknr_t *next_zero_bit) {
511 struct reiserfs_journal_cnode *cn ;
512 struct reiserfs_list_bitmap *jb ;
516 *next_zero_bit = 0 ; /* always start this at zero. */
518 /* we aren't logging all blocks are safe for reuse */
519 if (reiserfs_dont_log(p_s_sb)) {
523 PROC_INFO_INC( p_s_sb, journal.in_journal );
524 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
525 ** if we crash before the transaction that freed it commits, this transaction won't
526 ** have committed either, and the block will never be written
529 for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
530 PROC_INFO_INC( p_s_sb, journal.in_journal_bitmap );
531 jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ;
532 if (jb->journal_list && jb->bitmaps[bmap_nr] &&
533 test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data)) {
534 *next_zero_bit = find_next_zero_bit((unsigned long *)
535 (jb->bitmaps[bmap_nr]->data),
536 p_s_sb->s_blocksize << 3, bit_nr+1) ;
542 bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
543 /* is it in any old transactions? */
544 if (search_all && (cn = get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_hash_table, bl))) {
548 /* is it in the current transaction. This should never happen */
549 if ((cn = get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_hash_table, bl))) {
553 PROC_INFO_INC( p_s_sb, journal.in_journal_reusable );
558 /* insert cn into table
560 inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) {
561 struct reiserfs_journal_cnode *cn_orig ;
563 cn_orig = journal_hash(table, cn->sb, cn->blocknr) ;
564 cn->hnext = cn_orig ;
567 cn_orig->hprev = cn ;
569 journal_hash(table, cn->sb, cn->blocknr) = cn ;
572 /* lock the current transaction */
573 inline static void lock_journal(struct super_block *p_s_sb) {
574 PROC_INFO_INC( p_s_sb, journal.lock_journal );
575 while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_wlock)) > 0) {
576 PROC_INFO_INC( p_s_sb, journal.lock_journal_wait );
577 sleep_on(&(SB_JOURNAL(p_s_sb)->j_wait)) ;
579 atomic_set(&(SB_JOURNAL(p_s_sb)->j_wlock), 1) ;
582 /* unlock the current transaction */
583 inline static void unlock_journal(struct super_block *p_s_sb) {
584 atomic_dec(&(SB_JOURNAL(p_s_sb)->j_wlock)) ;
585 wake_up(&(SB_JOURNAL(p_s_sb)->j_wait)) ;
589 ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
590 ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
593 static void cleanup_freed_for_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
595 struct reiserfs_list_bitmap *jb = jl->j_list_bitmap ;
597 cleanup_bitmap_list(p_s_sb, jb) ;
599 jl->j_list_bitmap->journal_list = NULL ;
600 jl->j_list_bitmap = NULL ;
604 ** if this journal list still has commit blocks unflushed, send them to disk.
606 ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
607 ** Before the commit block can by written, every other log block must be safely on disk
610 static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) {
614 int retry_count = 0 ;
615 int orig_commit_left = 0 ;
616 struct buffer_head *tbh = NULL ;
617 struct reiserfs_journal_list *other_jl ;
619 reiserfs_check_lock_depth("flush_commit_list") ;
621 if (atomic_read(&jl->j_older_commits_done)) {
625 /* before we can put our commit blocks on disk, we have to make sure everyone older than
628 if (jl->j_len <= 0) {
632 /* we _must_ make sure the transactions are committed in order. Start with the
633 ** index after this one, wrap all the way around
635 index = (jl - SB_JOURNAL_LIST(s)) + 1 ;
636 for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
637 other_jl = SB_JOURNAL_LIST(s) + ( (index + i) % JOURNAL_LIST_COUNT) ;
638 if (other_jl && other_jl != jl && other_jl->j_len > 0 && other_jl->j_trans_id > 0 &&
639 other_jl->j_trans_id <= jl->j_trans_id && (atomic_read(&(jl->j_older_commits_done)) == 0)) {
640 flush_commit_list(s, other_jl, 0) ;
646 /* don't flush the commit list for the current transactoin */
647 if (jl == ((SB_JOURNAL_LIST(s) + SB_JOURNAL_LIST_INDEX(s)))) {
651 /* make sure nobody is trying to flush this one at the same time */
652 if (atomic_read(&(jl->j_commit_flushing))) {
653 sleep_on(&(jl->j_commit_wait)) ;
655 atomic_set(&(jl->j_older_commits_done), 1) ;
660 /* this commit is done, exit */
661 if (atomic_read(&(jl->j_commit_left)) <= 0) {
663 atomic_set(&(jl->j_older_commits_done), 1) ;
667 /* keeps others from flushing while we are flushing */
668 atomic_set(&(jl->j_commit_flushing), 1) ;
671 if (jl->j_len > SB_JOURNAL_TRANS_MAX(s)) {
672 reiserfs_panic(s, "journal-512: flush_commit_list: length is %lu, list number %d\n", jl->j_len, jl - SB_JOURNAL_LIST(s)) ;
676 orig_commit_left = atomic_read(&(jl->j_commit_left)) ;
678 /* start by checking all the commit blocks in this transaction.
679 ** Add anyone not on disk into tbh. Stop checking once commit_left <= 1, because that means we
680 ** only have the commit block left
684 for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && i < (jl->j_len + 1) ; i++) { /* everything but commit_bh */
685 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start+i) % SB_ONDISK_JOURNAL_SIZE(s);
686 tbh = journal_find_get_block(s, bn) ;
688 /* kill this sanity check */
689 if (count > (orig_commit_left + 2)) {
690 reiserfs_panic(s, "journal-539: flush_commit_list: BAD count(%d) > orig_commit_left(%d)!\n", count, orig_commit_left) ;
693 if (buffer_locked(tbh)) { /* wait on it, redo it just to make sure */
694 wait_on_buffer(tbh) ;
695 if (!buffer_uptodate(tbh)) {
696 reiserfs_panic(s, "journal-584, buffer write failed\n") ;
699 if (buffer_dirty(tbh)) {
700 printk("journal-569: flush_commit_list, block already dirty!\n") ;
702 mark_buffer_dirty(tbh) ;
704 ll_rw_block(WRITE, 1, &tbh) ;
706 put_bh(tbh) ; /* once for our get_hash */
710 /* wait on everyone in tbh before writing commit block*/
712 for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 &&
713 i < (jl->j_len + 1) ; i++) { /* everything but commit_bh */
714 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s) ;
715 tbh = journal_find_get_block(s, bn) ;
717 wait_on_buffer(tbh) ;
718 if (!buffer_uptodate(tbh)) {
719 reiserfs_panic(s, "journal-601, buffer write failed\n") ;
721 put_bh(tbh) ; /* once for our get_hash */
722 bforget(tbh) ; /* once due to original getblk in do_journal_end */
723 atomic_dec(&(jl->j_commit_left)) ;
727 if (atomic_read(&(jl->j_commit_left)) != 1) { /* just the commit_bh left, flush it without calling getblk for everyone */
728 if (retry_count < 2) {
729 printk("journal-582: flush_commit_list, not all log blocks on disk yet, trying again\n") ;
733 reiserfs_panic(s, "journal-563: flush_commit_list: BAD, j_commit_left is %u, should be 1\n",
734 atomic_read(&(jl->j_commit_left)));
737 mark_buffer_dirty(jl->j_commit_bh) ;
738 sync_dirty_buffer(jl->j_commit_bh) ;
739 if (!buffer_uptodate(jl->j_commit_bh)) {
740 reiserfs_panic(s, "journal-615: buffer write failed\n") ;
742 atomic_dec(&(jl->j_commit_left)) ;
743 bforget(jl->j_commit_bh) ;
745 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
746 cleanup_freed_for_journal_list(s, jl) ;
749 atomic_set(&(jl->j_older_commits_done), 1) ;
751 atomic_set(&(jl->j_commit_flushing), 0) ;
752 wake_up(&(jl->j_commit_wait)) ;
759 ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
760 ** returns NULL if it can't find anything
762 static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) {
763 struct super_block *sb = cn->sb;
764 b_blocknr_t blocknr = cn->blocknr ;
768 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
776 void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **,
777 struct reiserfs_journal_list *, unsigned long, int);
780 ** once all the real blocks have been flushed, it is safe to remove them from the
781 ** journal list for this transaction. Aside from freeing the cnode, this also allows the
782 ** block to be reallocated for data blocks if it had been deleted.
784 static void remove_all_from_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, int debug) {
785 struct reiserfs_journal_cnode *cn, *last ;
786 cn = jl->j_realblock ;
788 /* which is better, to lock once around the whole loop, or
789 ** to lock for each call to remove_journal_hash?
792 if (cn->blocknr != 0) {
794 printk("block %u, bh is %d, state %ld\n", cn->blocknr, cn->bh ? 1: 0,
798 remove_journal_hash(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_hash_table, jl, cn->blocknr, 1) ;
802 free_cnode(p_s_sb, last) ;
804 jl->j_realblock = NULL ;
808 ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
809 ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
810 ** releasing blocks in this transaction for reuse as data blocks.
811 ** called by flush_journal_list, before it calls remove_all_from_journal_list
814 static int _update_journal_header_block(struct super_block *p_s_sb, unsigned long offset, unsigned long trans_id) {
815 struct reiserfs_journal_header *jh ;
816 if (trans_id >= SB_JOURNAL(p_s_sb)->j_last_flush_trans_id) {
817 if (buffer_locked((SB_JOURNAL(p_s_sb)->j_header_bh))) {
818 wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ;
819 if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
820 reiserfs_panic(p_s_sb, "journal-699: buffer write failed\n") ;
823 SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ;
824 SB_JOURNAL(p_s_sb)->j_first_unflushed_offset = offset ;
825 jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ;
826 jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ;
827 jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
828 jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
829 set_buffer_dirty(SB_JOURNAL(p_s_sb)->j_header_bh) ;
830 sync_dirty_buffer(SB_JOURNAL(p_s_sb)->j_header_bh) ;
831 if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
832 printk( "reiserfs: journal-837: IO error during journal replay\n" );
839 static int update_journal_header_block(struct super_block *p_s_sb,
840 unsigned long offset,
841 unsigned long trans_id) {
842 if (_update_journal_header_block(p_s_sb, offset, trans_id)) {
843 reiserfs_panic(p_s_sb, "journal-712: buffer write failed\n") ;
848 ** flush any and all journal lists older than you are
849 ** can only be called from flush_journal_list
851 static int flush_older_journal_lists(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, unsigned long trans_id) {
853 struct reiserfs_journal_list *other_jl ;
855 index = jl - SB_JOURNAL_LIST(p_s_sb) ;
856 for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
857 other_jl = SB_JOURNAL_LIST(p_s_sb) + ((index + i) % JOURNAL_LIST_COUNT) ;
858 if (other_jl && other_jl->j_len > 0 &&
859 other_jl->j_trans_id > 0 &&
860 other_jl->j_trans_id < trans_id &&
862 /* do not flush all */
863 flush_journal_list(p_s_sb, other_jl, 0) ;
869 static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) {
870 char b[BDEVNAME_SIZE];
872 if (buffer_journaled(bh)) {
873 reiserfs_warning("clm-2084: pinned buffer %lu:%s sent to disk\n",
874 bh->b_blocknr, bdevname(bh->b_bdev, b)) ;
877 set_buffer_uptodate(bh) ;
879 clear_buffer_uptodate(bh) ;
883 static void submit_logged_buffer(struct buffer_head *bh) {
886 bh->b_end_io = reiserfs_end_buffer_io_sync ;
887 mark_buffer_notjournal_new(bh) ;
888 clear_buffer_dirty(bh) ;
889 submit_bh(WRITE, bh) ;
892 /* flush a journal list, both commit and real blocks
894 ** always set flushall to 1, unless you are calling from inside
895 ** flush_journal_list
897 ** IMPORTANT. This can only be called while there are no journal writers,
898 ** and the journal is locked. That means it can only be called from
899 ** do_journal_end, or by journal_release
901 static int flush_journal_list(struct super_block *s,
902 struct reiserfs_journal_list *jl, int flushall) {
903 struct reiserfs_journal_list *pjl ;
904 struct reiserfs_journal_cnode *cn, *last ;
908 struct buffer_head *saved_bh ;
909 unsigned long j_len_saved = jl->j_len ;
911 if (j_len_saved <= 0) {
915 if (atomic_read(&SB_JOURNAL(s)->j_wcount) != 0) {
916 reiserfs_warning("clm-2048: flush_journal_list called with wcount %d\n",
917 atomic_read(&SB_JOURNAL(s)->j_wcount)) ;
919 /* if someone is getting the commit list, we must wait for them */
920 while (atomic_read(&(jl->j_commit_flushing))) {
921 sleep_on(&(jl->j_commit_wait)) ;
923 /* if someone is flushing this list, we must wait for them */
924 while (atomic_read(&(jl->j_flushing))) {
925 sleep_on(&(jl->j_flush_wait)) ;
928 /* this list is now ours, we can change anything we want */
929 atomic_set(&(jl->j_flushing), 1) ;
932 if (j_len_saved > SB_JOURNAL_TRANS_MAX(s)) {
933 reiserfs_panic(s, "journal-715: flush_journal_list, length is %lu, list number %d\n", j_len_saved, jl - SB_JOURNAL_LIST(s)) ;
934 atomic_dec(&(jl->j_flushing)) ;
938 /* if all the work is already done, get out of here */
939 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
940 atomic_read(&(jl->j_commit_left)) <= 0) {
941 goto flush_older_and_return ;
944 /* start by putting the commit list on disk. This will also flush
945 ** the commit lists of any olders transactions
947 flush_commit_list(s, jl, 1) ;
949 /* are we done now? */
950 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
951 atomic_read(&(jl->j_commit_left)) <= 0) {
952 goto flush_older_and_return ;
955 /* loop through each cnode, see if we need to write it,
956 ** or wait on a more recent transaction, or just ignore it
958 if (atomic_read(&(SB_JOURNAL(s)->j_wcount)) != 0) {
959 reiserfs_panic(s, "journal-844: panic journal list is flushing, wcount is not 0\n") ;
961 cn = jl->j_realblock ;
966 /* blocknr of 0 is no longer in the hash, ignore it */
967 if (cn->blocknr == 0) {
970 pjl = find_newer_jl_for_cn(cn) ;
971 /* the order is important here. We check pjl to make sure we
972 ** don't clear BH_JDirty_wait if we aren't the one writing this
975 if (!pjl && cn->bh) {
978 /* we do this to make sure nobody releases the buffer while
979 ** we are working with it
983 if (buffer_journal_dirty(saved_bh)) {
985 mark_buffer_notjournal_dirty(saved_bh) ;
986 /* undo the inc from journal_mark_dirty */
994 /* if someone has this block in a newer transaction, just make
995 ** sure they are commited, and don't try writing it to disk
998 flush_commit_list(s, pjl, 1) ;
1002 /* bh == NULL when the block got to disk on its own, OR,
1003 ** the block got freed in a future transaction
1005 if (saved_bh == NULL) {
1009 /* this should never happen. kupdate_one_transaction has this list
1010 ** locked while it works, so we should never see a buffer here that
1011 ** is not marked JDirty_wait
1013 if ((!was_jwait) && !buffer_locked(saved_bh)) {
1014 printk("journal-813: BAD! buffer %llu %cdirty %cjwait, not in a newer tranasction\n", (unsigned long long)saved_bh->b_blocknr,
1015 was_dirty ? ' ' : '!', was_jwait ? ' ' : '!') ;
1017 /* kupdate_one_transaction waits on the buffers it is writing, so we
1018 ** should never see locked buffers here
1020 if (buffer_locked(saved_bh)) {
1021 printk("clm-2083: locked buffer %llu in flush_journal_list\n",
1022 (unsigned long long)saved_bh->b_blocknr) ;
1023 wait_on_buffer(saved_bh) ;
1024 if (!buffer_uptodate(saved_bh)) {
1025 reiserfs_panic(s, "journal-923: buffer write failed\n") ;
1029 /* we inc again because saved_bh gets decremented at free_cnode */
1031 set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
1032 submit_logged_buffer(saved_bh) ;
1035 printk("clm-2082: Unable to flush buffer %llu in flush_journal_list\n",
1036 (unsigned long long)saved_bh->b_blocknr) ;
1042 /* we incremented this to keep others from taking the buffer head away */
1044 if (atomic_read(&(saved_bh->b_count)) < 0) {
1045 printk("journal-945: saved_bh->b_count < 0") ;
1050 cn = jl->j_realblock ;
1052 if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1054 reiserfs_panic(s, "journal-1011: cn->bh is NULL\n") ;
1056 wait_on_buffer(cn->bh) ;
1058 reiserfs_panic(s, "journal-1012: cn->bh is NULL\n") ;
1060 if (!buffer_uptodate(cn->bh)) {
1061 reiserfs_panic(s, "journal-949: buffer write failed\n") ;
1069 flush_older_and_return:
1070 /* before we can update the journal header block, we _must_ flush all
1071 ** real blocks from all older transactions to disk. This is because
1072 ** once the header block is updated, this transaction will not be
1073 ** replayed after a crash
1076 flush_older_journal_lists(s, jl, jl->j_trans_id) ;
1079 /* before we can remove everything from the hash tables for this
1080 ** transaction, we must make sure it can never be replayed
1082 ** since we are only called from do_journal_end, we know for sure there
1083 ** are no allocations going on while we are flushing journal lists. So,
1084 ** we only need to update the journal header block for the last list
1088 update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id) ;
1090 remove_all_from_journal_list(s, jl, 0) ;
1092 atomic_set(&(jl->j_nonzerolen), 0) ;
1094 jl->j_realblock = NULL ;
1095 jl->j_commit_bh = NULL ;
1096 jl->j_trans_id = 0 ;
1097 atomic_dec(&(jl->j_flushing)) ;
1098 wake_up(&(jl->j_flush_wait)) ;
1103 static int kupdate_one_transaction(struct super_block *s,
1104 struct reiserfs_journal_list *jl)
1106 struct reiserfs_journal_list *pjl ; /* previous list for this cn */
1107 struct reiserfs_journal_cnode *cn, *walk_cn ;
1108 b_blocknr_t blocknr ;
1110 int orig_trans_id = jl->j_trans_id ;
1111 struct buffer_head *saved_bh ;
1114 /* if someone is getting the commit list, we must wait for them */
1115 while (atomic_read(&(jl->j_commit_flushing))) {
1116 sleep_on(&(jl->j_commit_wait)) ;
1118 /* if someone is flushing this list, we must wait for them */
1119 while (atomic_read(&(jl->j_flushing))) {
1120 sleep_on(&(jl->j_flush_wait)) ;
1122 /* was it flushed while we slept? */
1123 if (jl->j_len <= 0 || jl->j_trans_id != orig_trans_id) {
1127 /* this list is now ours, we can change anything we want */
1128 atomic_set(&(jl->j_flushing), 1) ;
1131 cn = jl->j_realblock ;
1134 /* if the blocknr == 0, this has been cleared from the hash,
1137 if (cn->blocknr == 0) {
1140 /* look for a more recent transaction that logged this
1141 ** buffer. Only the most recent transaction with a buffer in
1142 ** it is allowed to send that buffer to disk
1144 pjl = find_newer_jl_for_cn(cn) ;
1145 if (run == 0 && !pjl && cn->bh && buffer_journal_dirty(cn->bh) &&
1148 if (!test_bit(BH_JPrepared, &cn->bh->b_state)) {
1149 set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
1150 submit_logged_buffer(cn->bh) ;
1152 /* someone else is using this buffer. We can't
1153 ** send it to disk right now because they might
1154 ** be changing/logging it.
1158 } else if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1159 clear_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
1160 if (!pjl && cn->bh) {
1161 wait_on_buffer(cn->bh) ;
1163 /* check again, someone could have logged while we scheduled */
1164 pjl = find_newer_jl_for_cn(cn) ;
1166 /* before the JDirty_wait bit is set, the
1167 ** buffer is added to the hash list. So, if we are
1168 ** run in the middle of a do_journal_end, we will notice
1169 ** if this buffer was logged and added from the latest
1170 ** transaction. In this case, we don't want to decrement
1173 if (!pjl && cn->bh && buffer_journal_dirty(cn->bh)) {
1174 blocknr = cn->blocknr ;
1177 /* update all older transactions to show this block
1180 mark_buffer_notjournal_dirty(cn->bh) ;
1182 if (walk_cn->bh && walk_cn->blocknr == blocknr &&
1183 walk_cn->sb == cn->sb) {
1184 if (walk_cn->jlist) {
1185 atomic_dec(&(walk_cn->jlist->j_nonzerolen)) ;
1187 walk_cn->bh = NULL ;
1189 walk_cn = walk_cn->hnext ;
1191 if (atomic_read(&saved_bh->b_count) < 1) {
1192 reiserfs_warning("clm-2081: bad count on %lu\n",
1193 saved_bh->b_blocknr) ;
1199 ** if the more recent transaction is committed to the log,
1200 ** this buffer can be considered flushed. Decrement our
1201 ** counters to reflect one less buffer that needs writing.
1203 ** note, this relies on all of the above code being
1204 ** schedule free once pjl comes back non-null.
1206 if (pjl && cn->bh && atomic_read(&pjl->j_commit_left) == 0) {
1207 atomic_dec(&cn->jlist->j_nonzerolen) ;
1213 /* the first run through the loop sends all the dirty buffers to
1215 ** the second run through the loop does all the accounting
1221 atomic_set(&(jl->j_flushing), 0) ;
1222 wake_up(&(jl->j_flush_wait)) ;
1225 /* since we never give dirty buffers to bdflush/kupdate, we have to
1226 ** flush them ourselves. This runs through the journal lists, finds
1227 ** old metadata in need of flushing and sends it to disk.
1228 ** this does not end transactions, commit anything, or free
1231 ** returns the highest transaction id that was flushed last time
1233 static unsigned long reiserfs_journal_kupdate(struct super_block *s) {
1234 struct reiserfs_journal_list *jl ;
1240 start = SB_JOURNAL_LIST_INDEX(s) ;
1242 /* safety check to prevent flush attempts during a mount */
1246 i = (start + 1) % JOURNAL_LIST_COUNT ;
1248 jl = SB_JOURNAL_LIST(s) + i ;
1249 age = get_seconds() - jl->j_timestamp ;
1250 if (jl->j_len > 0 && // age >= (JOURNAL_MAX_COMMIT_AGE * 2) &&
1251 atomic_read(&(jl->j_nonzerolen)) > 0 &&
1252 atomic_read(&(jl->j_commit_left)) == 0) {
1254 if (jl->j_trans_id == SB_JOURNAL(s)->j_trans_id) {
1257 /* if ret was already 1, we want to preserve that */
1258 ret |= kupdate_one_transaction(s, jl) ;
1260 if (atomic_read(&(jl->j_nonzerolen)) > 0) {
1263 i = (i + 1) % JOURNAL_LIST_COUNT ;
1269 ** removes any nodes in table with name block and dev as bh.
1270 ** only touchs the hnext and hprev pointers.
1272 void remove_journal_hash(struct super_block *sb,
1273 struct reiserfs_journal_cnode **table,
1274 struct reiserfs_journal_list *jl,
1275 unsigned long block, int remove_freed)
1277 struct reiserfs_journal_cnode *cur ;
1278 struct reiserfs_journal_cnode **head ;
1280 head= &(journal_hash(table, sb, block)) ;
1286 if (cur->blocknr == block && cur->sb == sb && (jl == NULL || jl == cur->jlist) &&
1287 (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1289 cur->hnext->hprev = cur->hprev ;
1292 cur->hprev->hnext = cur->hnext ;
1294 *head = cur->hnext ;
1299 if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
1300 atomic_dec(&(cur->jlist->j_nonzerolen)) ;
1308 static void free_journal_ram(struct super_block *p_s_sb) {
1309 vfree(SB_JOURNAL(p_s_sb)->j_cnode_free_orig) ;
1310 free_list_bitmaps(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap) ;
1311 free_bitmap_nodes(p_s_sb) ; /* must be after free_list_bitmaps */
1312 if (SB_JOURNAL(p_s_sb)->j_header_bh) {
1313 brelse(SB_JOURNAL(p_s_sb)->j_header_bh) ;
1315 /* j_header_bh is on the journal dev, make sure not to release the journal
1316 * dev until we brelse j_header_bh
1318 release_journal_dev(p_s_sb, SB_JOURNAL(p_s_sb));
1319 vfree(SB_JOURNAL(p_s_sb)) ;
1323 ** call on unmount. Only set error to 1 if you haven't made your way out
1324 ** of read_super() yet. Any other caller must keep error at 0.
1326 static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, int error) {
1327 struct reiserfs_transaction_handle myth ;
1329 /* we only want to flush out transactions if we were called with error == 0
1331 if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
1332 /* end the current trans */
1333 do_journal_end(th, p_s_sb,10, FLUSH_ALL) ;
1335 /* make sure something gets logged to force our way into the flush code */
1336 journal_join(&myth, p_s_sb, 1) ;
1337 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
1338 journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
1339 do_journal_end(&myth, p_s_sb,1, FLUSH_ALL) ;
1342 reiserfs_mounted_fs_count-- ;
1343 /* wait for all commits to finish */
1344 flush_workqueue(commit_wq);
1345 if (!reiserfs_mounted_fs_count) {
1346 destroy_workqueue(commit_wq);
1350 free_journal_ram(p_s_sb) ;
1356 ** call on unmount. flush all journal trans, release all alloc'd ram
1358 int journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
1359 return do_journal_release(th, p_s_sb, 0) ;
1362 ** only call from an error condition inside reiserfs_read_super!
1364 int journal_release_error(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
1365 return do_journal_release(th, p_s_sb, 1) ;
1368 /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
1369 static int journal_compare_desc_commit(struct super_block *p_s_sb, struct reiserfs_journal_desc *desc,
1370 struct reiserfs_journal_commit *commit) {
1371 if (get_commit_trans_id (commit) != get_desc_trans_id (desc) ||
1372 get_commit_trans_len (commit) != get_desc_trans_len (desc) ||
1373 get_commit_trans_len (commit) > SB_JOURNAL_TRANS_MAX(p_s_sb) ||
1374 get_commit_trans_len (commit) <= 0
1380 /* returns 0 if it did not find a description block
1381 ** returns -1 if it found a corrupt commit block
1382 ** returns 1 if both desc and commit were valid
1384 static int journal_transaction_is_valid(struct super_block *p_s_sb, struct buffer_head *d_bh, unsigned long *oldest_invalid_trans_id, unsigned long *newest_mount_id) {
1385 struct reiserfs_journal_desc *desc ;
1386 struct reiserfs_journal_commit *commit ;
1387 struct buffer_head *c_bh ;
1388 unsigned long offset ;
1393 desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
1394 if (get_desc_trans_len(desc) > 0 && !memcmp(get_journal_desc_magic (d_bh), JOURNAL_DESC_MAGIC, 8)) {
1395 if (oldest_invalid_trans_id && *oldest_invalid_trans_id && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
1396 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-986: transaction "
1397 "is valid returning because trans_id %d is greater than "
1398 "oldest_invalid %lu\n", get_desc_trans_id(desc),
1399 *oldest_invalid_trans_id);
1402 if (newest_mount_id && *newest_mount_id > get_desc_mount_id (desc)) {
1403 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1087: transaction "
1404 "is valid returning because mount_id %d is less than "
1405 "newest_mount_id %lu\n", get_desc_mount_id (desc),
1409 offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
1411 /* ok, we have a journal description block, lets see if the transaction was valid */
1412 c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1413 ((offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
1416 commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
1417 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
1418 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1419 "journal_transaction_is_valid, commit offset %ld had bad "
1420 "time %d or length %d\n",
1421 c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1422 get_commit_trans_id (commit),
1423 get_commit_trans_len(commit));
1425 if (oldest_invalid_trans_id)
1426 *oldest_invalid_trans_id = get_desc_trans_id(desc) ;
1427 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1004: "
1428 "transaction_is_valid setting oldest invalid trans_id "
1429 "to %d\n", get_desc_trans_id(desc)) ;
1433 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1006: found valid "
1434 "transaction start offset %lu, len %d id %d\n",
1435 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1436 get_desc_trans_len(desc), get_desc_trans_id(desc)) ;
1443 static void brelse_array(struct buffer_head **heads, int num) {
1445 for (i = 0 ; i < num ; i++) {
1451 ** given the start, and values for the oldest acceptable transactions,
1452 ** this either reads in a replays a transaction, or returns because the transaction
1453 ** is invalid, or too old.
1455 static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cur_dblock, unsigned long oldest_start,
1456 unsigned long oldest_trans_id, unsigned long newest_mount_id) {
1457 struct reiserfs_journal_desc *desc ;
1458 struct reiserfs_journal_commit *commit ;
1459 unsigned long trans_id = 0 ;
1460 struct buffer_head *c_bh ;
1461 struct buffer_head *d_bh ;
1462 struct buffer_head **log_blocks = NULL ;
1463 struct buffer_head **real_blocks = NULL ;
1464 unsigned long trans_offset ;
1468 d_bh = journal_bread(p_s_sb, cur_dblock) ;
1471 desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
1472 trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
1473 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
1474 "journal_read_transaction, offset %lu, len %d mount_id %d\n",
1475 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1476 get_desc_trans_len(desc), get_desc_mount_id(desc)) ;
1477 if (get_desc_trans_id(desc) < oldest_trans_id) {
1478 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
1479 "journal_read_trans skipping because %lu is too old\n",
1480 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
1484 if (get_desc_mount_id(desc) != newest_mount_id) {
1485 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
1486 "journal_read_trans skipping because %d is != "
1487 "newest_mount_id %lu\n", get_desc_mount_id(desc),
1492 c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1493 ((trans_offset + get_desc_trans_len(desc) + 1) %
1494 SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
1499 commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
1500 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
1501 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal_read_transaction, "
1502 "commit offset %ld had bad time %d or length %d\n",
1503 c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1504 get_commit_trans_id(commit), get_commit_trans_len(commit));
1509 trans_id = get_desc_trans_id(desc) ;
1510 /* now we know we've got a good transaction, and it was inside the valid time ranges */
1511 log_blocks = reiserfs_kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
1512 real_blocks = reiserfs_kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
1513 if (!log_blocks || !real_blocks) {
1516 reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1517 reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1518 reiserfs_warning("journal-1169: kmalloc failed, unable to mount FS\n") ;
1521 /* get all the buffer heads */
1522 trans_half = journal_trans_half (p_s_sb->s_blocksize) ;
1523 for(i = 0 ; i < get_desc_trans_len(desc) ; i++) {
1524 log_blocks[i] = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + (trans_offset + 1 + i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
1525 if (i < trans_half) {
1526 real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(desc->j_realblock[i])) ;
1528 real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(commit->j_realblock[i - trans_half])) ;
1530 /* make sure we don't try to replay onto log or reserved area */
1531 if (is_block_in_log_or_reserved_area(p_s_sb, real_blocks[i]->b_blocknr)) {
1532 reiserfs_warning("journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block\n") ;
1533 brelse_array(log_blocks, i) ;
1534 brelse_array(real_blocks, i) ;
1537 reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1538 reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1542 /* read in the log blocks, memcpy to the corresponding real block */
1543 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks) ;
1544 for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
1545 wait_on_buffer(log_blocks[i]) ;
1546 if (!buffer_uptodate(log_blocks[i])) {
1547 reiserfs_warning("journal-1212: REPLAY FAILURE fsck required! buffer write failed\n") ;
1548 brelse_array(log_blocks + i, get_desc_trans_len(desc) - i) ;
1549 brelse_array(real_blocks, get_desc_trans_len(desc)) ;
1552 reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1553 reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1556 memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size) ;
1557 set_buffer_uptodate(real_blocks[i]) ;
1558 brelse(log_blocks[i]) ;
1560 /* flush out the real blocks */
1561 for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
1562 set_buffer_dirty(real_blocks[i]) ;
1563 ll_rw_block(WRITE, 1, real_blocks + i) ;
1565 for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
1566 wait_on_buffer(real_blocks[i]) ;
1567 if (!buffer_uptodate(real_blocks[i])) {
1568 reiserfs_warning("journal-1226: REPLAY FAILURE, fsck required! buffer write failed\n") ;
1569 brelse_array(real_blocks + i, get_desc_trans_len(desc) - i) ;
1572 reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1573 reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1576 brelse(real_blocks[i]) ;
1578 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + ((trans_offset + get_desc_trans_len(desc) + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
1579 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal "
1580 "start to offset %ld\n",
1581 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
1583 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
1584 SB_JOURNAL(p_s_sb)->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
1585 SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ;
1586 SB_JOURNAL(p_s_sb)->j_trans_id = trans_id + 1;
1589 reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
1590 reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
1594 /* This function reads blocks starting from block and to max_block of bufsize
1595 size (but no more than BUFNR blocks at a time). This proved to improve
1596 mounting speed on self-rebuilding raid5 arrays at least.
1597 Right now it is only used from journal code. But later we might use it
1599 Note: Do not use journal_getblk/sb_getblk functions here! */
1600 struct buffer_head * reiserfs_breada (struct block_device *dev, int block, int bufsize,
1601 unsigned int max_block)
1603 struct buffer_head * bhlist[BUFNR];
1604 unsigned int blocks = BUFNR;
1605 struct buffer_head * bh;
1608 bh = __getblk (dev, block, bufsize );
1609 if (buffer_uptodate (bh))
1612 if (block + BUFNR > max_block) {
1613 blocks = max_block - block;
1617 for (i = 1; i < blocks; i++) {
1618 bh = __getblk (dev, block + i, bufsize);
1619 if (buffer_uptodate (bh)) {
1623 else bhlist[j++] = bh;
1625 ll_rw_block (READ, j, bhlist);
1626 for(i = 1; i < j; i++)
1629 wait_on_buffer (bh);
1630 if (buffer_uptodate (bh))
1637 ** read and replay the log
1638 ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
1639 ** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
1641 ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
1643 ** On exit, it sets things up so the first transaction will work correctly.
1645 static int journal_read(struct super_block *p_s_sb) {
1646 struct reiserfs_journal_desc *desc ;
1647 unsigned long oldest_trans_id = 0;
1648 unsigned long oldest_invalid_trans_id = 0 ;
1650 unsigned long oldest_start = 0;
1651 unsigned long cur_dblock = 0 ;
1652 unsigned long newest_mount_id = 9 ;
1653 struct buffer_head *d_bh ;
1654 struct reiserfs_journal_header *jh ;
1655 int valid_journal_header = 0 ;
1656 int replay_count = 0 ;
1657 int continue_replay = 1 ;
1659 char b[BDEVNAME_SIZE];
1661 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
1662 printk("reiserfs: checking transaction log (%s) for (%s)\n",
1663 bdevname(SB_JOURNAL(p_s_sb)->j_dev_bd, b),
1664 reiserfs_bdevname(p_s_sb));
1665 start = get_seconds() ;
1667 /* step 1, read in the journal header block. Check the transaction it says
1668 ** is the first unflushed, and if that transaction is not valid,
1671 SB_JOURNAL(p_s_sb)->j_header_bh = journal_bread(p_s_sb,
1672 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1673 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
1674 if (!SB_JOURNAL(p_s_sb)->j_header_bh) {
1677 jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ;
1678 if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 &&
1679 le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(p_s_sb) &&
1680 le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
1681 oldest_start = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1682 le32_to_cpu(jh->j_first_unflushed_offset) ;
1683 oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
1684 newest_mount_id = le32_to_cpu(jh->j_mount_id);
1685 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1153: found in "
1686 "header: first_unflushed_offset %d, last_flushed_trans_id "
1687 "%lu\n", le32_to_cpu(jh->j_first_unflushed_offset),
1688 le32_to_cpu(jh->j_last_flush_trans_id)) ;
1689 valid_journal_header = 1 ;
1691 /* now, we try to read the first unflushed offset. If it is not valid,
1692 ** there is nothing more we can do, and it makes no sense to read
1693 ** through the whole log.
1695 d_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset)) ;
1696 ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL) ;
1698 continue_replay = 0 ;
1701 goto start_log_replay;
1704 if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
1705 printk("clm-2076: device is readonly, unable to replay log\n") ;
1709 /* ok, there are transactions that need to be replayed. start with the first log block, find
1710 ** all the valid transactions, and pick out the oldest.
1712 while(continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
1713 /* Note that it is required for blocksize of primary fs device and journal
1714 device to be the same */
1715 d_bh = reiserfs_breada(SB_JOURNAL(p_s_sb)->j_dev_bd, cur_dblock, p_s_sb->s_blocksize,
1716 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
1717 ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
1719 desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
1720 if (oldest_start == 0) { /* init all oldest_ values */
1721 oldest_trans_id = get_desc_trans_id(desc) ;
1722 oldest_start = d_bh->b_blocknr ;
1723 newest_mount_id = get_desc_mount_id(desc) ;
1724 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1179: Setting "
1725 "oldest_start to offset %lu, trans_id %lu\n",
1726 oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1728 } else if (oldest_trans_id > get_desc_trans_id(desc)) {
1729 /* one we just read was older */
1730 oldest_trans_id = get_desc_trans_id(desc) ;
1731 oldest_start = d_bh->b_blocknr ;
1732 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting "
1733 "oldest_start to offset %lu, trans_id %lu\n",
1734 oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1737 if (newest_mount_id < get_desc_mount_id(desc)) {
1738 newest_mount_id = get_desc_mount_id(desc) ;
1739 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
1740 "newest_mount_id to %d\n", get_desc_mount_id(desc));
1742 cur_dblock += get_desc_trans_len(desc) + 2 ;
1750 cur_dblock = oldest_start ;
1751 if (oldest_trans_id) {
1752 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay "
1753 "from offset %lu, trans_id %lu\n",
1754 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1759 while(continue_replay && oldest_trans_id > 0) {
1760 ret = journal_read_transaction(p_s_sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id) ;
1763 } else if (ret != 0) {
1766 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start ;
1768 if (cur_dblock == oldest_start)
1772 if (oldest_trans_id == 0) {
1773 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1225: No valid "
1774 "transactions found\n") ;
1776 /* j_start does not get set correctly if we don't replay any transactions.
1777 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
1778 ** copy the trans_id from the header
1780 if (valid_journal_header && replay_count == 0) {
1781 SB_JOURNAL(p_s_sb)->j_start = le32_to_cpu(jh->j_first_unflushed_offset) ;
1782 SB_JOURNAL(p_s_sb)->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
1783 SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) ;
1784 SB_JOURNAL(p_s_sb)->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
1786 SB_JOURNAL(p_s_sb)->j_mount_id = newest_mount_id + 1 ;
1788 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
1789 "newest_mount_id to %lu\n", SB_JOURNAL(p_s_sb)->j_mount_id) ;
1790 SB_JOURNAL(p_s_sb)->j_first_unflushed_offset = SB_JOURNAL(p_s_sb)->j_start ;
1791 if (replay_count > 0) {
1792 printk("reiserfs: replayed %d transactions in %lu seconds\n", replay_count,
1793 get_seconds() - start) ;
1795 if (!bdev_read_only(p_s_sb->s_bdev) &&
1796 _update_journal_header_block(p_s_sb, SB_JOURNAL(p_s_sb)->j_start,
1797 SB_JOURNAL(p_s_sb)->j_last_flush_trans_id))
1799 /* replay failed, caller must call free_journal_ram and abort
1808 struct reiserfs_journal_commit_task {
1809 struct super_block *p_s_sb ;
1811 int wake_on_finish ; /* if this is one, we wake the task_done queue, if it
1812 ** is zero, we free the whole struct on finish
1814 struct reiserfs_journal_commit_task *self ;
1815 struct work_struct work;
1818 static void reiserfs_journal_commit_task_func(void *__ct) {
1819 struct reiserfs_journal_commit_task *ct = __ct;
1820 struct reiserfs_journal_list *jl ;
1822 reiserfs_write_lock(ct->p_s_sb);
1824 jl = SB_JOURNAL_LIST(ct->p_s_sb) + ct->jindex ;
1826 flush_commit_list(ct->p_s_sb, SB_JOURNAL_LIST(ct->p_s_sb) + ct->jindex, 1) ;
1828 if (jl->j_len > 0 && atomic_read(&(jl->j_nonzerolen)) > 0 &&
1829 atomic_read(&(jl->j_commit_left)) == 0) {
1830 kupdate_one_transaction(ct->p_s_sb, jl) ;
1832 reiserfs_kfree(ct->self, sizeof(struct reiserfs_journal_commit_task), ct->p_s_sb) ;
1833 reiserfs_write_unlock(ct->p_s_sb);
1836 static void setup_commit_task_arg(struct reiserfs_journal_commit_task *ct,
1837 struct super_block *p_s_sb,
1840 reiserfs_panic(NULL, "journal-1360: setup_commit_task_arg called with NULL struct\n") ;
1842 ct->p_s_sb = p_s_sb ;
1843 ct->jindex = jindex ;
1844 INIT_WORK(&ct->work, reiserfs_journal_commit_task_func, ct);
1848 static void commit_flush_async(struct super_block *p_s_sb, int jindex) {
1849 struct reiserfs_journal_commit_task *ct ;
1850 /* using GFP_NOFS, GFP_KERNEL could try to flush inodes, which will try
1851 ** to start/join a transaction, which will deadlock
1853 ct = reiserfs_kmalloc(sizeof(struct reiserfs_journal_commit_task), GFP_NOFS, p_s_sb) ;
1855 setup_commit_task_arg(ct, p_s_sb, jindex) ;
1856 queue_work(commit_wq, &ct->work) ;
1858 #ifdef CONFIG_REISERFS_CHECK
1859 reiserfs_warning("journal-1540: kmalloc failed, doing sync commit\n") ;
1861 flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1) ;
1865 static void journal_list_init(struct super_block *p_s_sb) {
1867 for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
1868 init_waitqueue_head(&(SB_JOURNAL_LIST(p_s_sb)[i].j_commit_wait)) ;
1869 init_waitqueue_head(&(SB_JOURNAL_LIST(p_s_sb)[i].j_flush_wait)) ;
1873 static int release_journal_dev( struct super_block *super,
1874 struct reiserfs_journal *journal )
1880 if( journal -> j_dev_file != NULL ) {
1881 result = filp_close( journal -> j_dev_file, NULL );
1882 journal -> j_dev_file = NULL;
1883 journal -> j_dev_bd = NULL;
1884 } else if( journal -> j_dev_bd != NULL ) {
1885 result = blkdev_put( journal -> j_dev_bd, BDEV_FS );
1886 journal -> j_dev_bd = NULL;
1890 reiserfs_warning("sh-457: release_journal_dev: Cannot release journal device: %i\n", result );
1895 static int journal_init_dev( struct super_block *super,
1896 struct reiserfs_journal *journal,
1897 const char *jdev_name )
1901 int blkdev_mode = FMODE_READ | FMODE_WRITE;
1902 char b[BDEVNAME_SIZE];
1906 journal -> j_dev_bd = NULL;
1907 journal -> j_dev_file = NULL;
1908 jdev = SB_ONDISK_JOURNAL_DEVICE( super ) ?
1909 SB_ONDISK_JOURNAL_DEVICE( super ) : super->s_dev;
1911 if (bdev_read_only(super->s_bdev))
1912 blkdev_mode = FMODE_READ;
1914 /* there is no "jdev" option and journal is on separate device */
1915 if( ( !jdev_name || !jdev_name[ 0 ] ) ) {
1916 journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode, BDEV_FS);
1917 if (IS_ERR(journal->j_dev_bd)) {
1918 result = PTR_ERR(journal->j_dev_bd);
1919 journal->j_dev_bd = NULL;
1920 printk( "sh-458: journal_init_dev: cannot init journal device\n '%s': %i",
1921 __bdevname(jdev, b), result );
1923 } else if (jdev != super->s_dev)
1924 set_blocksize(journal->j_dev_bd, super->s_blocksize);
1928 journal -> j_dev_file = filp_open( jdev_name, 0, 0 );
1929 if( !IS_ERR( journal -> j_dev_file ) ) {
1930 struct inode *jdev_inode;
1932 jdev_inode = journal -> j_dev_file -> f_dentry -> d_inode;
1933 journal -> j_dev_bd = jdev_inode -> i_bdev;
1934 if( !S_ISBLK( jdev_inode -> i_mode ) ) {
1935 printk( "journal_init_dev: '%s' is not a block device\n", jdev_name );
1937 } else if( jdev_inode -> i_bdev == NULL ) {
1938 printk( "journal_init_dev: bdev uninitialized for '%s'\n", jdev_name );
1942 jdev = jdev_inode -> i_bdev -> bd_dev;
1943 set_blocksize(journal->j_dev_bd, super->s_blocksize);
1946 result = PTR_ERR( journal -> j_dev_file );
1947 journal -> j_dev_file = NULL;
1948 printk( "journal_init_dev: Cannot open '%s': %i\n", jdev_name, result );
1951 release_journal_dev( super, journal );
1953 printk( "journal_init_dev: journal device: %s\n",
1954 bdevname(journal->j_dev_bd, b));
1959 ** must be called once on fs mount. calls journal_read for you
1961 int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_format) {
1962 int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2 ;
1963 struct buffer_head *bhjh;
1964 struct reiserfs_super_block * rs;
1965 struct reiserfs_journal_header *jh;
1966 struct reiserfs_journal *journal;
1967 char b[BDEVNAME_SIZE];
1969 journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof (struct reiserfs_journal)) ;
1971 printk("journal-1256: unable to get memory for journal structure\n") ;
1974 memset(journal, 0, sizeof(struct reiserfs_journal)) ;
1975 INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
1976 INIT_LIST_HEAD (&SB_JOURNAL(p_s_sb)->j_prealloc_list);
1977 reiserfs_allocate_list_bitmaps(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap,
1978 SB_BMAP_NR(p_s_sb)) ;
1979 allocate_bitmap_nodes(p_s_sb) ;
1981 /* reserved for journal area support */
1982 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
1983 REISERFS_OLD_DISK_OFFSET_IN_BYTES / p_s_sb->s_blocksize +
1984 SB_BMAP_NR(p_s_sb) + 1 :
1985 REISERFS_DISK_OFFSET_IN_BYTES / p_s_sb->s_blocksize + 2);
1987 /* Sanity check to see is the standard journal fitting withing first bitmap
1988 (actual for small blocksizes) */
1989 if ( !SB_ONDISK_JOURNAL_DEVICE( p_s_sb ) &&
1990 (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8) ) {
1991 printk("journal-1393: journal does not fit for area addressed by first of bitmap blocks. "
1992 "It starts at %u and its size is %u. Block size %ld\n",
1993 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb), SB_ONDISK_JOURNAL_SIZE(p_s_sb), p_s_sb->s_blocksize) ;
1994 goto free_and_return;
1997 if( journal_init_dev( p_s_sb, journal, j_dev_name ) != 0 ) {
1998 printk( "sh-462: unable to initialize jornal device\n");
1999 goto free_and_return;
2002 rs = SB_DISK_SUPER_BLOCK(p_s_sb);
2004 /* read journal header */
2005 bhjh = journal_bread(p_s_sb,
2006 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2008 printk("sh-459: unable to read journal header\n") ;
2009 goto free_and_return;
2011 jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2013 /* make sure that journal matches to the super block */
2014 if (is_reiserfs_jr(rs) && (jh->jh_journal.jp_journal_magic != sb_jp_journal_magic(rs))) {
2015 printk("sh-460: journal header magic %x (device %s) does not match "
2016 "to magic found in super block %x (device %s)\n",
2017 jh->jh_journal.jp_journal_magic,
2018 bdevname( SB_JOURNAL(p_s_sb)->j_dev_bd, b),
2019 sb_jp_journal_magic(rs), reiserfs_bdevname (p_s_sb));
2021 goto free_and_return;
2024 SB_JOURNAL_TRANS_MAX(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_trans_max);
2025 SB_JOURNAL_MAX_BATCH(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_max_batch);
2026 SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_max_commit_age);
2027 SB_JOURNAL_MAX_TRANS_AGE(p_s_sb) = JOURNAL_MAX_TRANS_AGE;
2029 if (SB_JOURNAL_TRANS_MAX(p_s_sb)) {
2030 /* make sure these parameters are available, assign it if they are not */
2031 __u32 initial = SB_JOURNAL_TRANS_MAX(p_s_sb);
2034 if (p_s_sb->s_blocksize < 4096)
2035 ratio = 4096 / p_s_sb->s_blocksize;
2037 if (SB_ONDISK_JOURNAL_SIZE(p_s_sb)/SB_JOURNAL_TRANS_MAX(p_s_sb) < JOURNAL_MIN_RATIO)
2038 SB_JOURNAL_TRANS_MAX(p_s_sb) = SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
2039 if (SB_JOURNAL_TRANS_MAX(p_s_sb) > JOURNAL_TRANS_MAX_DEFAULT / ratio)
2040 SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MAX_DEFAULT / ratio;
2041 if (SB_JOURNAL_TRANS_MAX(p_s_sb) < JOURNAL_TRANS_MIN_DEFAULT / ratio)
2042 SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MIN_DEFAULT / ratio;
2044 if (SB_JOURNAL_TRANS_MAX(p_s_sb) != initial)
2045 printk ("sh-461: journal_init: wrong transaction max size (%u). Changed to %u\n",
2046 initial, SB_JOURNAL_TRANS_MAX(p_s_sb));
2048 SB_JOURNAL_MAX_BATCH(p_s_sb) = SB_JOURNAL_TRANS_MAX(p_s_sb)*
2049 JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT;
2052 if (!SB_JOURNAL_TRANS_MAX(p_s_sb)) {
2053 /*we have the file system was created by old version of mkreiserfs
2054 so this field contains zero value */
2055 SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MAX_DEFAULT ;
2056 SB_JOURNAL_MAX_BATCH(p_s_sb) = JOURNAL_MAX_BATCH_DEFAULT ;
2057 SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) = JOURNAL_MAX_COMMIT_AGE ;
2059 /* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
2060 trans max size is decreased proportionally */
2061 if (p_s_sb->s_blocksize < 4096) {
2062 SB_JOURNAL_TRANS_MAX(p_s_sb) /= (4096 / p_s_sb->s_blocksize) ;
2063 SB_JOURNAL_MAX_BATCH(p_s_sb) = (SB_JOURNAL_TRANS_MAX(p_s_sb)) * 9 / 10 ;
2066 printk ("Reiserfs journal params: device %s, size %u, "
2067 "journal first block %u, max trans len %u, max batch %u, "
2068 "max commit age %u, max trans age %u\n",
2069 bdevname( SB_JOURNAL(p_s_sb)->j_dev_bd, b),
2070 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2071 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2072 SB_JOURNAL_TRANS_MAX(p_s_sb),
2073 SB_JOURNAL_MAX_BATCH(p_s_sb),
2074 SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb),
2075 SB_JOURNAL_MAX_TRANS_AGE(p_s_sb));
2079 SB_JOURNAL(p_s_sb)->j_list_bitmap_index = 0 ;
2080 SB_JOURNAL_LIST_INDEX(p_s_sb) = -10000 ; /* make sure flush_old_commits does not try to flush a list while replay is on */
2082 /* clear out the journal list array */
2083 memset(SB_JOURNAL_LIST(p_s_sb), 0, sizeof(struct reiserfs_journal_list) * JOURNAL_LIST_COUNT) ;
2084 journal_list_init(p_s_sb) ;
2086 memset(SB_JOURNAL(p_s_sb)->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
2087 memset(journal_writers, 0, sizeof(char *) * 512) ; /* debug code */
2089 INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_dirty_buffers) ;
2090 spin_lock_init(&SB_JOURNAL(p_s_sb)->j_dirty_buffers_lock) ;
2092 SB_JOURNAL(p_s_sb)->j_start = 0 ;
2093 SB_JOURNAL(p_s_sb)->j_len = 0 ;
2094 SB_JOURNAL(p_s_sb)->j_len_alloc = 0 ;
2095 atomic_set(&(SB_JOURNAL(p_s_sb)->j_wcount), 0) ;
2096 SB_JOURNAL(p_s_sb)->j_bcount = 0 ;
2097 SB_JOURNAL(p_s_sb)->j_trans_start_time = 0 ;
2098 SB_JOURNAL(p_s_sb)->j_last = NULL ;
2099 SB_JOURNAL(p_s_sb)->j_first = NULL ;
2100 init_waitqueue_head(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
2101 init_waitqueue_head(&(SB_JOURNAL(p_s_sb)->j_wait)) ;
2103 SB_JOURNAL(p_s_sb)->j_trans_id = 10 ;
2104 SB_JOURNAL(p_s_sb)->j_mount_id = 10 ;
2105 SB_JOURNAL(p_s_sb)->j_state = 0 ;
2106 atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
2107 atomic_set(&(SB_JOURNAL(p_s_sb)->j_wlock), 0) ;
2108 SB_JOURNAL(p_s_sb)->j_cnode_free_list = allocate_cnodes(num_cnodes) ;
2109 SB_JOURNAL(p_s_sb)->j_cnode_free_orig = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
2110 SB_JOURNAL(p_s_sb)->j_cnode_free = SB_JOURNAL(p_s_sb)->j_cnode_free_list ? num_cnodes : 0 ;
2111 SB_JOURNAL(p_s_sb)->j_cnode_used = 0 ;
2112 SB_JOURNAL(p_s_sb)->j_must_wait = 0 ;
2114 init_journal_hash(p_s_sb) ;
2115 SB_JOURNAL_LIST(p_s_sb)[0].j_list_bitmap = get_list_bitmap(p_s_sb, SB_JOURNAL_LIST(p_s_sb)) ;
2116 if (!(SB_JOURNAL_LIST(p_s_sb)[0].j_list_bitmap)) {
2117 reiserfs_warning("journal-2005, get_list_bitmap failed for journal list 0\n") ;
2118 goto free_and_return;
2120 if (journal_read(p_s_sb) < 0) {
2121 reiserfs_warning("Replay Failure, unable to mount\n") ;
2122 goto free_and_return;
2124 SB_JOURNAL_LIST_INDEX(p_s_sb) = 0 ; /* once the read is done, we can set this
2127 if (reiserfs_dont_log (p_s_sb))
2130 reiserfs_mounted_fs_count++ ;
2131 if (reiserfs_mounted_fs_count <= 1)
2132 commit_wq = create_workqueue("reiserfs");
2136 free_journal_ram(p_s_sb);
2141 ** test for a polite end of the current transaction. Used by file_write, and should
2142 ** be used by delete to make sure they don't write more than can fit inside a single
2145 int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) {
2146 time_t now = get_seconds() ;
2147 if (reiserfs_dont_log(th->t_super))
2149 if ( SB_JOURNAL(th->t_super)->j_must_wait > 0 ||
2150 (SB_JOURNAL(th->t_super)->j_len_alloc + new_alloc) >= SB_JOURNAL_MAX_BATCH(th->t_super) ||
2151 atomic_read(&(SB_JOURNAL(th->t_super)->j_jlock)) ||
2152 (now - SB_JOURNAL(th->t_super)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(th->t_super) ||
2153 SB_JOURNAL(th->t_super)->j_cnode_free < (SB_JOURNAL_TRANS_MAX(th->t_super) * 3)) {
2159 /* this must be called inside a transaction, and requires the
2160 ** kernel_lock to be held
2162 void reiserfs_block_writes(struct reiserfs_transaction_handle *th) {
2163 struct super_block *s = th->t_super ;
2164 SB_JOURNAL(s)->j_must_wait = 1 ;
2165 set_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state) ;
2169 /* this must be called without a transaction started, and does not
2172 void reiserfs_allow_writes(struct super_block *s) {
2173 clear_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state) ;
2174 wake_up(&SB_JOURNAL(s)->j_join_wait) ;
2177 /* this must be called without a transaction started, and does not
2180 void reiserfs_wait_on_write_block(struct super_block *s) {
2181 wait_event(SB_JOURNAL(s)->j_join_wait,
2182 !test_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state)) ;
2185 /* join == true if you must join an existing transaction.
2186 ** join == false if you can deal with waiting for others to finish
2188 ** this will block until the transaction is joinable. send the number of blocks you
2189 ** expect to use in nblocks.
2191 static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,unsigned long nblocks,int join) {
2192 time_t now = get_seconds() ;
2195 reiserfs_check_lock_depth("journal_begin") ;
2196 RFALSE( p_s_sb->s_flags & MS_RDONLY,
2197 "clm-2078: calling journal_begin on readonly FS") ;
2199 if (reiserfs_dont_log(p_s_sb)) {
2200 th->t_super = p_s_sb ; /* others will check this for the don't log flag */
2203 PROC_INFO_INC( p_s_sb, journal.journal_being );
2206 lock_journal(p_s_sb) ;
2208 if (test_bit(WRITERS_BLOCKED, &SB_JOURNAL(p_s_sb)->j_state)) {
2209 unlock_journal(p_s_sb) ;
2210 reiserfs_wait_on_write_block(p_s_sb) ;
2211 PROC_INFO_INC( p_s_sb, journal.journal_relock_writers );
2215 /* if there is no room in the journal OR
2216 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
2217 ** we don't sleep if there aren't other writers
2220 if ( (!join && SB_JOURNAL(p_s_sb)->j_must_wait > 0) ||
2221 ( !join && (SB_JOURNAL(p_s_sb)->j_len_alloc + nblocks + 2) >= SB_JOURNAL_MAX_BATCH(p_s_sb)) ||
2222 (!join && atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0 && SB_JOURNAL(p_s_sb)->j_trans_start_time > 0 &&
2223 (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) ||
2224 (!join && atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) ) ||
2225 (!join && SB_JOURNAL(p_s_sb)->j_cnode_free < (SB_JOURNAL_TRANS_MAX(p_s_sb) * 3))) {
2227 unlock_journal(p_s_sb) ; /* allow others to finish this transaction */
2229 /* if writer count is 0, we can just force this transaction to end, and start
2230 ** a new one afterwards.
2232 if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0) {
2233 struct reiserfs_transaction_handle myth ;
2234 journal_join(&myth, p_s_sb, 1) ;
2235 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
2236 journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
2237 do_journal_end(&myth, p_s_sb,1,COMMIT_NOW) ;
2239 /* but if the writer count isn't zero, we have to wait for the current writers to finish.
2240 ** They won't batch on transaction end once we set j_jlock
2242 atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 1) ;
2243 old_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
2244 while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) &&
2245 SB_JOURNAL(p_s_sb)->j_trans_id == old_trans_id) {
2246 sleep_on(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
2249 PROC_INFO_INC( p_s_sb, journal.journal_relock_wcount );
2253 if (SB_JOURNAL(p_s_sb)->j_trans_start_time == 0) { /* we are the first writer, set trans_id */
2254 SB_JOURNAL(p_s_sb)->j_trans_start_time = now ;
2256 atomic_inc(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
2257 SB_JOURNAL(p_s_sb)->j_len_alloc += nblocks ;
2258 th->t_blocks_logged = 0 ;
2259 th->t_blocks_allocated = nblocks ;
2260 th->t_super = p_s_sb ;
2261 th->t_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
2262 th->t_caller = "Unknown" ;
2263 unlock_journal(p_s_sb) ;
2269 static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
2270 return do_journal_begin_r(th, p_s_sb, nblocks, 1) ;
2273 int journal_begin(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks) {
2274 return do_journal_begin_r(th, p_s_sb, nblocks, 0) ;
2277 /* not used at all */
2278 int journal_prepare(struct super_block * p_s_sb, struct buffer_head *bh) {
2283 ** puts bh into the current transaction. If it was already there, reorders removes the
2284 ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
2286 ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
2287 ** transaction is committed.
2289 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
2291 int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
2292 struct reiserfs_journal_cnode *cn = NULL;
2293 int count_already_incd = 0 ;
2296 PROC_INFO_INC( p_s_sb, journal.mark_dirty );
2297 if (reiserfs_dont_log(th->t_super)) {
2298 mark_buffer_dirty(bh) ;
2302 if (th->t_trans_id != SB_JOURNAL(p_s_sb)->j_trans_id) {
2303 reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n",
2304 th->t_trans_id, SB_JOURNAL(p_s_sb)->j_trans_id);
2306 p_s_sb->s_dirt = 1 ;
2308 prepared = test_and_clear_bit(BH_JPrepared, &bh->b_state) ;
2309 /* already in this transaction, we are done */
2310 if (buffer_journaled(bh)) {
2311 PROC_INFO_INC( p_s_sb, journal.mark_dirty_already );
2315 /* this must be turned into a panic instead of a warning. We can't allow
2316 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
2317 ** could get to disk too early. NOT GOOD.
2319 if (!prepared || buffer_locked(bh)) {
2320 printk("journal-1777: buffer %llu bad state %cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT\n", (unsigned long long)bh->b_blocknr, prepared ? ' ' : '!',
2321 buffer_locked(bh) ? ' ' : '!',
2322 buffer_dirty(bh) ? ' ' : '!',
2323 buffer_journal_dirty(bh) ? ' ' : '!') ;
2324 show_reiserfs_locks() ;
2326 count_already_incd = clear_prepared_bits(bh) ;
2328 if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0) {
2329 printk("journal-1409: journal_mark_dirty returning because j_wcount was %d\n", atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount))) ;
2332 /* this error means I've screwed up, and we've overflowed the transaction.
2333 ** Nothing can be done here, except make the FS readonly or panic.
2335 if (SB_JOURNAL(p_s_sb)->j_len >= SB_JOURNAL_TRANS_MAX(p_s_sb)) {
2336 reiserfs_panic(th->t_super, "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", SB_JOURNAL(p_s_sb)->j_len) ;
2339 if (buffer_journal_dirty(bh)) {
2340 count_already_incd = 1 ;
2341 PROC_INFO_INC( p_s_sb, journal.mark_dirty_notjournal );
2342 mark_buffer_notjournal_dirty(bh) ;
2345 if (buffer_dirty(bh)) {
2346 clear_buffer_dirty(bh) ;
2349 if (buffer_journaled(bh)) { /* must double check after getting lock */
2353 if (SB_JOURNAL(p_s_sb)->j_len > SB_JOURNAL(p_s_sb)->j_len_alloc) {
2354 SB_JOURNAL(p_s_sb)->j_len_alloc = SB_JOURNAL(p_s_sb)->j_len + JOURNAL_PER_BALANCE_CNT ;
2357 set_bit(BH_JDirty, &bh->b_state) ;
2359 /* now put this guy on the end */
2361 cn = get_cnode(p_s_sb) ;
2363 reiserfs_panic(p_s_sb, "get_cnode failed!\n");
2366 if (th->t_blocks_logged == th->t_blocks_allocated) {
2367 th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT ;
2368 SB_JOURNAL(p_s_sb)->j_len_alloc += JOURNAL_PER_BALANCE_CNT ;
2370 th->t_blocks_logged++ ;
2371 SB_JOURNAL(p_s_sb)->j_len++ ;
2374 cn->blocknr = bh->b_blocknr ;
2377 insert_journal_hash(SB_JOURNAL(p_s_sb)->j_hash_table, cn) ;
2378 if (!count_already_incd) {
2383 cn->prev = SB_JOURNAL(p_s_sb)->j_last ;
2385 if (SB_JOURNAL(p_s_sb)->j_last) {
2386 SB_JOURNAL(p_s_sb)->j_last->next = cn ;
2387 SB_JOURNAL(p_s_sb)->j_last = cn ;
2389 SB_JOURNAL(p_s_sb)->j_first = cn ;
2390 SB_JOURNAL(p_s_sb)->j_last = cn ;
2397 ** if buffer already in current transaction, do a journal_mark_dirty
2398 ** otherwise, just mark it dirty and move on. Used for writes to meta blocks
2399 ** that don't need journaling
2401 int journal_mark_dirty_nolog(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
2402 if (reiserfs_dont_log(th->t_super) || buffer_journaled(bh) ||
2403 buffer_journal_dirty(bh)) {
2404 return journal_mark_dirty(th, p_s_sb, bh) ;
2406 if (get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_hash_table, bh->b_blocknr)) {
2407 return journal_mark_dirty(th, p_s_sb, bh) ;
2409 mark_buffer_dirty(bh) ;
2413 int journal_end(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
2414 return do_journal_end(th, p_s_sb, nblocks, 0) ;
2417 /* removes from the current transaction, relsing and descrementing any counters.
2418 ** also files the removed buffer directly onto the clean list
2420 ** called by journal_mark_freed when a block has been deleted
2422 ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
2424 static int remove_from_transaction(struct super_block *p_s_sb, b_blocknr_t blocknr, int already_cleaned) {
2425 struct buffer_head *bh ;
2426 struct reiserfs_journal_cnode *cn ;
2429 cn = get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_hash_table, blocknr) ;
2430 if (!cn || !cn->bh) {
2435 cn->prev->next = cn->next ;
2438 cn->next->prev = cn->prev ;
2440 if (cn == SB_JOURNAL(p_s_sb)->j_first) {
2441 SB_JOURNAL(p_s_sb)->j_first = cn->next ;
2443 if (cn == SB_JOURNAL(p_s_sb)->j_last) {
2444 SB_JOURNAL(p_s_sb)->j_last = cn->prev ;
2447 remove_journal_hash(p_s_sb, SB_JOURNAL(p_s_sb)->j_hash_table, NULL, bh->b_blocknr, 0) ;
2448 mark_buffer_not_journaled(bh) ; /* don't log this one */
2450 if (!already_cleaned) {
2451 mark_buffer_notjournal_dirty(bh) ;
2453 if (atomic_read(&(bh->b_count)) < 0) {
2454 printk("journal-1752: remove from trans, b_count < 0\n") ;
2456 if (!buffer_locked(bh)) reiserfs_clean_and_file_buffer(bh) ;
2459 SB_JOURNAL(p_s_sb)->j_len-- ;
2460 SB_JOURNAL(p_s_sb)->j_len_alloc-- ;
2461 free_cnode(p_s_sb, cn) ;
2466 ** for any cnode in a journal list, it can only be dirtied of all the
2467 ** transactions that include it are commited to disk.
2468 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
2469 ** and 0 if you aren't
2471 ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
2472 ** blocks for a given transaction on disk
2475 static int can_dirty(struct reiserfs_journal_cnode *cn) {
2476 struct super_block *sb = cn->sb;
2477 b_blocknr_t blocknr = cn->blocknr ;
2478 struct reiserfs_journal_cnode *cur = cn->hprev ;
2481 /* first test hprev. These are all newer than cn, so any node here
2482 ** with the name block number and dev means this node can't be sent
2483 ** to disk right now.
2485 while(cur && can_dirty) {
2486 if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
2487 cur->blocknr == blocknr) {
2492 /* then test hnext. These are all older than cn. As long as they
2493 ** are committed to the log, it is safe to write cn to disk
2496 while(cur && can_dirty) {
2497 if (cur->jlist && cur->jlist->j_len > 0 &&
2498 atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
2499 cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
2507 /* syncs the commit blocks, but does not force the real buffers to disk
2508 ** will wait until the current transaction is done/commited before returning
2510 int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
2512 if (SB_JOURNAL(p_s_sb)->j_len == 0) {
2513 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
2514 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
2516 return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT) ;
2519 int show_reiserfs_locks(void) {
2521 dump_journal_writers() ;
2526 ** used to get memory back from async commits that are floating around
2527 ** and to reclaim any blocks deleted but unusable because their commits
2528 ** haven't hit disk yet. called from bitmap.c
2530 ** if it starts flushing things, it ors SCHEDULE_OCCURRED into repeat.
2531 ** note, this is just if schedule has a chance of occurring. I need to
2532 ** change flush_commit_lists to have a repeat parameter too.
2535 void flush_async_commits(struct super_block *p_s_sb) {
2538 for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
2539 if (i != SB_JOURNAL_LIST_INDEX(p_s_sb)) {
2540 flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + i, 1) ;
2546 ** flushes any old transactions to disk
2547 ** ends the current transaction if it is too old
2549 ** also calls flush_journal_list with old_only == 1, which allows me to reclaim
2550 ** memory and such from the journal lists whose real blocks are all on disk.
2552 ** called by sync_dev_journal from buffer.c
2554 int flush_old_commits(struct super_block *p_s_sb, int immediate) {
2559 struct reiserfs_transaction_handle th ;
2561 start = SB_JOURNAL_LIST_INDEX(p_s_sb) ;
2562 now = get_seconds() ;
2564 /* safety check so we don't flush while we are replaying the log during mount */
2565 if (SB_JOURNAL_LIST_INDEX(p_s_sb) < 0) {
2568 /* starting with oldest, loop until we get to the start */
2569 i = (SB_JOURNAL_LIST_INDEX(p_s_sb) + 1) % JOURNAL_LIST_COUNT ;
2571 if (SB_JOURNAL_LIST(p_s_sb)[i].j_len > 0 && ((now - SB_JOURNAL_LIST(p_s_sb)[i].j_timestamp) > SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) ||
2573 /* we have to check again to be sure the current transaction did not change */
2574 if (i != SB_JOURNAL_LIST_INDEX(p_s_sb)) {
2575 flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + i, 1) ;
2578 i = (i + 1) % JOURNAL_LIST_COUNT ;
2581 /* now, check the current transaction. If there are no writers, and it is too old, finish it, and
2582 ** force the commit blocks to disk
2584 if (!immediate && atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0 &&
2585 SB_JOURNAL(p_s_sb)->j_trans_start_time > 0 &&
2586 SB_JOURNAL(p_s_sb)->j_len > 0 &&
2587 (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) {
2588 journal_join(&th, p_s_sb, 1) ;
2589 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
2590 journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
2591 do_journal_end(&th, p_s_sb,1, COMMIT_NOW) ;
2592 } else if (immediate) { /* belongs above, but I wanted this to be very explicit as a special case. If they say to
2593 flush, we must be sure old transactions hit the disk too. */
2594 journal_join(&th, p_s_sb, 1) ;
2595 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
2596 journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
2597 do_journal_end(&th, p_s_sb,1, COMMIT_NOW | WAIT) ;
2599 reiserfs_journal_kupdate(p_s_sb) ;
2604 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
2606 ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
2607 ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
2608 ** flushes the commit list and returns 0.
2610 ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
2612 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
2614 static int check_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,
2615 unsigned long nblocks, int flags) {
2618 int flush = flags & FLUSH_ALL ;
2619 int commit_now = flags & COMMIT_NOW ;
2620 int wait_on_commit = flags & WAIT ;
2622 if (th->t_trans_id != SB_JOURNAL(p_s_sb)->j_trans_id) {
2623 reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n",
2624 th->t_trans_id, SB_JOURNAL(p_s_sb)->j_trans_id);
2627 SB_JOURNAL(p_s_sb)->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged) ;
2628 if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
2629 atomic_dec(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
2632 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
2633 ** will be dealt with by next transaction that actually writes something, but should be taken
2634 ** care of in this trans
2636 if (SB_JOURNAL(p_s_sb)->j_len == 0) {
2637 int wcount = atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
2638 unlock_journal(p_s_sb) ;
2639 if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) > 0 && wcount <= 0) {
2640 atomic_dec(&(SB_JOURNAL(p_s_sb)->j_jlock)) ;
2641 wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
2645 /* if wcount > 0, and we are called to with flush or commit_now,
2646 ** we wait on j_join_wait. We will wake up when the last writer has
2647 ** finished the transaction, and started it on its way to the disk.
2648 ** Then, we flush the commit or journal list, and just return 0
2649 ** because the rest of journal end was already done for this transaction.
2651 if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0) {
2652 if (flush || commit_now) {
2653 int orig_jindex = SB_JOURNAL_LIST_INDEX(p_s_sb) ;
2654 atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 1) ;
2656 SB_JOURNAL(p_s_sb)->j_next_full_flush = 1 ;
2658 unlock_journal(p_s_sb) ;
2659 /* sleep while the current transaction is still j_jlocked */
2660 while(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock)) &&
2661 SB_JOURNAL(p_s_sb)->j_trans_id == th->t_trans_id) {
2662 sleep_on(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
2665 if (wait_on_commit) {
2666 flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex, 1) ;
2668 commit_flush_async(p_s_sb, orig_jindex) ;
2673 unlock_journal(p_s_sb) ;
2677 /* deal with old transactions where we are the last writers */
2678 now = get_seconds() ;
2679 if ((now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) {
2681 SB_JOURNAL(p_s_sb)->j_next_async_flush = 1 ;
2683 /* don't batch when someone is waiting on j_join_wait */
2684 /* don't batch when syncing the commit or flushing the whole trans */
2685 if (!(SB_JOURNAL(p_s_sb)->j_must_wait > 0) && !(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock))) && !flush && !commit_now &&
2686 (SB_JOURNAL(p_s_sb)->j_len < SB_JOURNAL_MAX_BATCH(p_s_sb)) &&
2687 SB_JOURNAL(p_s_sb)->j_len_alloc < SB_JOURNAL_MAX_BATCH(p_s_sb) && SB_JOURNAL(p_s_sb)->j_cnode_free > (SB_JOURNAL_TRANS_MAX(p_s_sb) * 3)) {
2688 SB_JOURNAL(p_s_sb)->j_bcount++ ;
2689 unlock_journal(p_s_sb) ;
2693 if (SB_JOURNAL(p_s_sb)->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
2694 reiserfs_panic(p_s_sb, "journal-003: journal_end: j_start (%ld) is too high\n", SB_JOURNAL(p_s_sb)->j_start) ;
2700 ** Does all the work that makes deleting blocks safe.
2701 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
2704 ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
2705 ** before this transaction has finished.
2707 ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
2708 ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
2709 ** the block can't be reallocated yet.
2711 ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
2713 int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, b_blocknr_t blocknr) {
2714 struct reiserfs_journal_cnode *cn = NULL ;
2715 struct buffer_head *bh = NULL ;
2716 struct reiserfs_list_bitmap *jb = NULL ;
2719 if (reiserfs_dont_log(th->t_super)) {
2720 bh = sb_find_get_block(p_s_sb, blocknr) ;
2721 if (bh && buffer_dirty (bh)) {
2722 printk ("journal_mark_freed(dont_log): dirty buffer on hash list: %lx %d\n", bh->b_state, blocknr);
2728 bh = sb_find_get_block(p_s_sb, blocknr) ;
2729 /* if it is journal new, we just remove it from this transaction */
2730 if (bh && buffer_journal_new(bh)) {
2731 mark_buffer_notjournal_new(bh) ;
2732 clear_prepared_bits(bh) ;
2733 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
2735 /* set the bit for this block in the journal bitmap for this transaction */
2736 jb = SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_list_bitmap ;
2738 reiserfs_panic(p_s_sb, "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n") ;
2740 set_bit_in_list_bitmap(p_s_sb, blocknr, jb) ;
2742 /* Note, the entire while loop is not allowed to schedule. */
2745 clear_prepared_bits(bh) ;
2747 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
2749 /* find all older transactions with this block, make sure they don't try to write it out */
2750 cn = get_journal_hash_dev(p_s_sb,SB_JOURNAL(p_s_sb)->j_list_hash_table, blocknr) ;
2752 if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
2753 set_bit(BLOCK_FREED, &cn->state) ;
2756 /* remove_from_transaction will brelse the buffer if it was
2757 ** in the current trans
2759 mark_buffer_notjournal_dirty(cn->bh) ;
2762 if (atomic_read(&(cn->bh->b_count)) < 0) {
2763 printk("journal-2138: cn->bh->b_count < 0\n") ;
2766 if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
2767 atomic_dec(&(cn->jlist->j_nonzerolen)) ;
2777 reiserfs_clean_and_file_buffer(bh) ;
2778 put_bh(bh) ; /* get_hash grabs the buffer */
2779 if (atomic_read(&(bh->b_count)) < 0) {
2780 printk("journal-2165: bh->b_count < 0\n") ;
2786 void reiserfs_update_inode_transaction(struct inode *inode) {
2788 REISERFS_I(inode)->i_trans_index = SB_JOURNAL_LIST_INDEX(inode->i_sb);
2790 REISERFS_I(inode)->i_trans_id = SB_JOURNAL(inode->i_sb)->j_trans_id ;
2793 static int reiserfs_inode_in_this_transaction(struct inode *inode) {
2794 if (REISERFS_I(inode)->i_trans_id == SB_JOURNAL(inode->i_sb)->j_trans_id ||
2795 REISERFS_I(inode)->i_trans_id == 0) {
2801 void reiserfs_commit_for_inode(struct inode *inode) {
2802 struct reiserfs_journal_list *jl ;
2803 struct reiserfs_transaction_handle th ;
2804 struct super_block *sb = inode->i_sb ;
2806 jl = SB_JOURNAL_LIST(sb) + REISERFS_I(inode)->i_trans_index ;
2808 /* is it from the current transaction, or from an unknown transaction? */
2809 if (reiserfs_inode_in_this_transaction(inode)) {
2810 journal_join(&th, sb, 1) ;
2811 reiserfs_update_inode_transaction(inode) ;
2812 journal_end_sync(&th, sb, 1) ;
2813 } else if (jl->j_trans_id == REISERFS_I(inode)->i_trans_id) {
2814 flush_commit_list(sb, jl, 1) ;
2816 /* if the transaction id does not match, this list is long since flushed
2817 ** and we don't have to do anything here
2821 void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
2822 struct buffer_head *bh) {
2823 PROC_INFO_INC( p_s_sb, journal.restore_prepared );
2824 if (reiserfs_dont_log (p_s_sb))
2830 clear_bit(BH_JPrepared, &bh->b_state) ;
2833 extern struct tree_balance *cur_tb ;
2835 ** before we can change a metadata block, we have to make sure it won't
2836 ** be written to disk while we are altering it. So, we must:
2841 void reiserfs_prepare_for_journal(struct super_block *p_s_sb,
2842 struct buffer_head *bh, int wait) {
2843 int retry_count = 0 ;
2845 PROC_INFO_INC( p_s_sb, journal.prepare );
2846 if (reiserfs_dont_log (p_s_sb))
2849 while(!test_bit(BH_JPrepared, &bh->b_state) ||
2850 (wait && buffer_locked(bh))) {
2851 if (buffer_journaled(bh)) {
2852 set_bit(BH_JPrepared, &bh->b_state) ;
2855 set_bit(BH_JPrepared, &bh->b_state) ;
2857 RFALSE( buffer_locked(bh) && cur_tb != NULL,
2858 "waiting while do_balance was running\n") ;
2859 wait_on_buffer(bh) ;
2861 PROC_INFO_INC( p_s_sb, journal.prepare_retry );
2867 ** long and ugly. If flush, will not return until all commit
2868 ** blocks and all real buffers in the trans are on disk.
2869 ** If no_async, won't return until all commit blocks are on disk.
2871 ** keep reading, there are comments as you go along
2873 static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks,
2875 struct reiserfs_journal_cnode *cn, *next, *jl_cn;
2876 struct reiserfs_journal_cnode *last_cn = NULL;
2877 struct reiserfs_journal_desc *desc ;
2878 struct reiserfs_journal_commit *commit ;
2879 struct buffer_head *c_bh ; /* commit bh */
2880 struct buffer_head *d_bh ; /* desc bh */
2881 int cur_write_start = 0 ; /* start index of current log write */
2882 int cur_blocks_left = 0 ; /* number of journal blocks left to write */
2887 int flush = flags & FLUSH_ALL ;
2888 int commit_now = flags & COMMIT_NOW ;
2889 int wait_on_commit = flags & WAIT ;
2890 struct reiserfs_super_block *rs ;
2893 if (reiserfs_dont_log(th->t_super)) {
2897 lock_journal(p_s_sb) ;
2898 if (SB_JOURNAL(p_s_sb)->j_next_full_flush) {
2899 flags |= FLUSH_ALL ;
2902 if (SB_JOURNAL(p_s_sb)->j_next_async_flush) {
2903 flags |= COMMIT_NOW ;
2907 /* check_journal_end locks the journal, and unlocks if it does not return 1
2908 ** it tells us if we should continue with the journal_end, or just return
2910 if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
2914 /* check_journal_end might set these, check again */
2915 if (SB_JOURNAL(p_s_sb)->j_next_full_flush) {
2918 if (SB_JOURNAL(p_s_sb)->j_next_async_flush) {
2922 ** j must wait means we have to flush the log blocks, and the real blocks for
2925 if (SB_JOURNAL(p_s_sb)->j_must_wait > 0) {
2929 #ifdef REISERFS_PREALLOCATE
2930 reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
2931 * the transaction */
2934 rs = SB_DISK_SUPER_BLOCK(p_s_sb) ;
2935 /* setup description block */
2936 d_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start) ;
2937 set_buffer_uptodate(d_bh) ;
2938 desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
2939 memset(d_bh->b_data, 0, d_bh->b_size) ;
2940 memcpy(get_journal_desc_magic (d_bh), JOURNAL_DESC_MAGIC, 8) ;
2941 set_desc_trans_id(desc, SB_JOURNAL(p_s_sb)->j_trans_id) ;
2943 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
2944 c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2945 ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
2946 commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
2947 memset(c_bh->b_data, 0, c_bh->b_size) ;
2948 set_commit_trans_id(commit, SB_JOURNAL(p_s_sb)->j_trans_id) ;
2949 set_buffer_uptodate(c_bh) ;
2951 /* init this journal list */
2952 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_older_commits_done), 0) ;
2953 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
2954 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_timestamp = SB_JOURNAL(p_s_sb)->j_trans_start_time ;
2955 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_bh = c_bh ;
2956 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_start = SB_JOURNAL(p_s_sb)->j_start ;
2957 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_len = SB_JOURNAL(p_s_sb)->j_len ;
2958 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_nonzerolen), SB_JOURNAL(p_s_sb)->j_len) ;
2959 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_left), SB_JOURNAL(p_s_sb)->j_len + 2);
2960 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_realblock = NULL ;
2961 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_flushing), 1) ;
2962 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_flushing), 1) ;
2964 /* which is faster, locking/unlocking at the start and end of the for
2965 ** or locking once per iteration around the insert_journal_hash?
2966 ** eitherway, we are write locking insert_journal_hash. The ENTIRE FOR
2967 ** LOOP MUST not cause schedule to occur.
2970 /* for each real block, add it to the journal list hash,
2971 ** copy into real block index array in the commit or desc block
2973 trans_half = journal_trans_half(p_s_sb->s_blocksize) ;
2974 for (i = 0, cn = SB_JOURNAL(p_s_sb)->j_first ; cn ; cn = cn->next, i++) {
2975 if (test_bit(BH_JDirty, &cn->bh->b_state) ) {
2976 jl_cn = get_cnode(p_s_sb) ;
2978 reiserfs_panic(p_s_sb, "journal-1676, get_cnode returned NULL\n") ;
2981 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_realblock = jl_cn ;
2983 jl_cn->prev = last_cn ;
2984 jl_cn->next = NULL ;
2986 last_cn->next = jl_cn ;
2989 /* make sure the block we are trying to log is not a block
2990 of journal or reserved area */
2992 if (is_block_in_log_or_reserved_area(p_s_sb, cn->bh->b_blocknr)) {
2993 reiserfs_panic(p_s_sb, "journal-2332: Trying to log block %lu, which is a log block\n", cn->bh->b_blocknr) ;
2995 jl_cn->blocknr = cn->bh->b_blocknr ;
2997 jl_cn->sb = p_s_sb ;
2998 jl_cn->bh = cn->bh ;
2999 jl_cn->jlist = SB_JOURNAL_LIST(p_s_sb) + SB_JOURNAL_LIST_INDEX(p_s_sb) ;
3000 insert_journal_hash(SB_JOURNAL(p_s_sb)->j_list_hash_table, jl_cn) ;
3001 if (i < trans_half) {
3002 desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr) ;
3004 commit->j_realblock[i - trans_half] = cpu_to_le32(cn->bh->b_blocknr) ;
3011 set_desc_trans_len(desc, SB_JOURNAL(p_s_sb)->j_len) ;
3012 set_desc_mount_id(desc, SB_JOURNAL(p_s_sb)->j_mount_id) ;
3013 set_desc_trans_id(desc, SB_JOURNAL(p_s_sb)->j_trans_id) ;
3014 set_commit_trans_len(commit, SB_JOURNAL(p_s_sb)->j_len);
3016 /* special check in case all buffers in the journal were marked for not logging */
3017 if (SB_JOURNAL(p_s_sb)->j_len == 0) {
3020 unlock_journal(p_s_sb) ;
3021 printk("journal-2020: do_journal_end: BAD desc->j_len is ZERO\n") ;
3022 atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
3023 wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
3027 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
3028 cur_write_start = SB_JOURNAL(p_s_sb)->j_start ;
3029 cur_blocks_left = SB_JOURNAL(p_s_sb)->j_len ;
3030 cn = SB_JOURNAL(p_s_sb)->j_first ;
3031 jindex = 1 ; /* start at one so we don't get the desc again */
3032 while(cur_blocks_left > 0) {
3033 /* copy all the real blocks into log area. dirty log blocks */
3034 if (test_bit(BH_JDirty, &cn->bh->b_state)) {
3035 struct buffer_head *tmp_bh ;
3036 tmp_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3037 ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
3038 set_buffer_uptodate(tmp_bh) ;
3039 memcpy(tmp_bh->b_data, cn->bh->b_data, cn->bh->b_size) ;
3042 /* JDirty cleared sometime during transaction. don't log this one */
3043 printk("journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!\n") ;
3049 /* we are done with both the c_bh and d_bh, but
3050 ** c_bh must be written after all other commit blocks,
3051 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
3054 /* now loop through and mark all buffers from this transaction as JDirty_wait
3055 ** clear the JDirty bit, clear BH_JNew too.
3056 ** if they weren't JDirty, they weren't logged, just relse them and move on
3058 cn = SB_JOURNAL(p_s_sb)->j_first ;
3060 clear_bit(BH_JNew, &(cn->bh->b_state)) ;
3061 if (test_bit(BH_JDirty, &(cn->bh->b_state))) {
3062 set_bit(BH_JDirty_wait, &(cn->bh->b_state)) ;
3063 clear_bit(BH_JDirty, &(cn->bh->b_state)) ;
3068 free_cnode(p_s_sb, cn) ;
3072 /* unlock the journal list for committing and flushing */
3073 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_flushing), 0) ;
3074 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_flushing), 0) ;
3076 orig_jindex = SB_JOURNAL_LIST_INDEX(p_s_sb) ;
3077 jindex = (SB_JOURNAL_LIST_INDEX(p_s_sb) + 1) % JOURNAL_LIST_COUNT ;
3078 SB_JOURNAL_LIST_INDEX(p_s_sb) = jindex ;
3080 /* write any buffers that must hit disk before this commit is done */
3081 fsync_buffers_list(&(SB_JOURNAL(p_s_sb)->j_dirty_buffers_lock),
3082 &(SB_JOURNAL(p_s_sb)->j_dirty_buffers)) ;
3084 /* honor the flush and async wishes from the caller */
3087 flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex, 1) ;
3088 flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex , 1) ;
3089 } else if (commit_now) {
3090 if (wait_on_commit) {
3091 flush_commit_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + orig_jindex, 1) ;
3093 commit_flush_async(p_s_sb, orig_jindex) ;
3097 /* reset journal values for the next transaction */
3098 old_start = SB_JOURNAL(p_s_sb)->j_start ;
3099 SB_JOURNAL(p_s_sb)->j_start = (SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
3100 atomic_set(&(SB_JOURNAL(p_s_sb)->j_wcount), 0) ;
3101 SB_JOURNAL(p_s_sb)->j_bcount = 0 ;
3102 SB_JOURNAL(p_s_sb)->j_last = NULL ;
3103 SB_JOURNAL(p_s_sb)->j_first = NULL ;
3104 SB_JOURNAL(p_s_sb)->j_len = 0 ;
3105 SB_JOURNAL(p_s_sb)->j_trans_start_time = 0 ;
3106 SB_JOURNAL(p_s_sb)->j_trans_id++ ;
3107 SB_JOURNAL(p_s_sb)->j_must_wait = 0 ;
3108 SB_JOURNAL(p_s_sb)->j_len_alloc = 0 ;
3109 SB_JOURNAL(p_s_sb)->j_next_full_flush = 0 ;
3110 SB_JOURNAL(p_s_sb)->j_next_async_flush = 0 ;
3111 init_journal_hash(p_s_sb) ;
3113 /* if the next transaction has any chance of wrapping, flush
3114 ** transactions that might get overwritten. If any journal lists are very
3115 ** old flush them as well.
3117 for (i = 0 ; i < JOURNAL_LIST_COUNT ; i++) {
3119 if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 && SB_JOURNAL(p_s_sb)->j_start <= SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
3120 if ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) >= SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
3121 flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1) ;
3123 } else if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 &&
3124 (SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
3125 if (((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >=
3126 SB_JOURNAL_LIST(p_s_sb)[jindex].j_start) {
3127 flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1 ) ;
3130 /* this check should always be run, to send old lists to disk */
3131 if (SB_JOURNAL_LIST(p_s_sb)[jindex].j_len > 0 &&
3132 SB_JOURNAL_LIST(p_s_sb)[jindex].j_timestamp <
3133 (get_seconds() - (SB_JOURNAL_MAX_TRANS_AGE(p_s_sb) * 4))) {
3134 flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + jindex, 1 ) ;
3138 /* if the next journal_list is still in use, flush it */
3139 if (SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_len != 0) {
3140 flush_journal_list(p_s_sb, SB_JOURNAL_LIST(p_s_sb) + SB_JOURNAL_LIST_INDEX(p_s_sb), 1) ;
3143 /* we don't want anyone flushing the new transaction's list */
3144 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_commit_flushing), 1) ;
3145 atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_flushing), 1) ;
3146 SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_list_bitmap = get_list_bitmap(p_s_sb, SB_JOURNAL_LIST(p_s_sb) +
3147 SB_JOURNAL_LIST_INDEX(p_s_sb)) ;
3149 if (!(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_list_bitmap)) {
3150 reiserfs_panic(p_s_sb, "journal-1996: do_journal_end, could not get a list bitmap\n") ;
3152 unlock_journal(p_s_sb) ;
3153 atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
3154 /* wake up any body waiting to join. */
3155 wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;