2 * Copyright (c) International Business Machines Corp., 2000-2003
3 * Portions Copyright (c) Christoph Hellwig, 2001-2002
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * jfs_txnmgr.c: transaction manager
24 * transaction starts with txBegin() and ends with txCommit()
27 * tlock is acquired at the time of update;
28 * (obviate scan at commit time for xtree and dtree)
29 * tlock and mp points to each other;
30 * (no hashlist for mp -> tlock).
33 * tlock on in-memory inode:
34 * in-place tlock in the in-memory inode itself;
35 * converted to page lock by iWrite() at commit time.
37 * tlock during write()/mmap() under anonymous transaction (tid = 0):
38 * transferred (?) to transaction at commit time.
40 * use the page itself to update allocation maps
41 * (obviate intermediate replication of allocation/deallocation data)
42 * hold on to mp+lock thru update of maps
47 #include <linux/vmalloc.h>
48 #include <linux/smp_lock.h>
49 #include <linux/completion.h>
50 #include <linux/suspend.h>
51 #include "jfs_incore.h"
52 #include "jfs_filsys.h"
53 #include "jfs_metapage.h"
54 #include "jfs_dinode.h"
57 #include "jfs_superblock.h"
58 #include "jfs_debug.h"
61 * transaction management structures
65 int freetid; /* index of a free tid structure */
66 wait_queue_head_t freewait; /* eventlist of free tblock */
69 int freelock; /* index first free lock word */
70 wait_queue_head_t freelockwait; /* eventlist of free tlock */
71 wait_queue_head_t lowlockwait; /* eventlist of ample tlocks */
72 int tlocksInUse; /* Number of tlocks in use */
73 int TlocksLow; /* Indicates low number of available tlocks */
74 spinlock_t LazyLock; /* synchronize sync_queue & unlock_queue */
75 /* struct tblock *sync_queue; * Transactions waiting for data sync */
76 struct tblock *unlock_queue; /* Txns waiting to be released */
77 struct tblock *unlock_tail; /* Tail of unlock_queue */
78 struct list_head anon_list; /* inodes having anonymous txns */
79 struct list_head anon_list2; /* inodes having anonymous txns
80 that couldn't be sync'ed */
83 #ifdef CONFIG_JFS_STATISTICS
87 uint txBegin_lockslow;
90 uint txBeginAnon_barrier;
91 uint txBeginAnon_lockslow;
93 uint txLockAlloc_freelock;
97 static int nTxBlock = 512; /* number of transaction blocks */
98 struct tblock *TxBlock; /* transaction block table */
100 static int nTxLock = 4096; /* number of transaction locks */
101 static int TxLockLWM = 4096*.4; /* Low water mark for number of txLocks used */
102 static int TxLockHWM = 4096*.8; /* High water mark for number of txLocks used */
103 struct tlock *TxLock; /* transaction lock table */
107 * transaction management lock
109 static spinlock_t jfsTxnLock = SPIN_LOCK_UNLOCKED;
111 #define TXN_LOCK() spin_lock(&jfsTxnLock)
112 #define TXN_UNLOCK() spin_unlock(&jfsTxnLock)
114 #define LAZY_LOCK_INIT() spin_lock_init(&TxAnchor.LazyLock);
115 #define LAZY_LOCK(flags) spin_lock_irqsave(&TxAnchor.LazyLock, flags)
116 #define LAZY_UNLOCK(flags) spin_unlock_irqrestore(&TxAnchor.LazyLock, flags)
118 DECLARE_WAIT_QUEUE_HEAD(jfs_sync_thread_wait);
119 DECLARE_WAIT_QUEUE_HEAD(jfs_commit_thread_wait);
122 * Retry logic exist outside these macros to protect from spurrious wakeups.
124 static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event)
126 DECLARE_WAITQUEUE(wait, current);
128 add_wait_queue(event, &wait);
129 set_current_state(TASK_UNINTERRUPTIBLE);
132 current->state = TASK_RUNNING;
133 remove_wait_queue(event, &wait);
136 #define TXN_SLEEP(event)\
138 TXN_SLEEP_DROP_LOCK(event);\
142 #define TXN_WAKEUP(event) wake_up_all(event)
149 tid_t maxtid; /* 4: biggest tid ever used */
150 lid_t maxlid; /* 4: biggest lid ever used */
151 int ntid; /* 4: # of transactions performed */
152 int nlid; /* 4: # of tlocks acquired */
153 int waitlock; /* 4: # of tlock wait */
158 * external references
160 extern int lmGroupCommit(struct jfs_log *, struct tblock *);
161 extern void lmSync(struct jfs_log *);
162 extern int jfs_commit_inode(struct inode *, int);
163 extern int jfs_stop_threads;
165 struct task_struct *jfsCommitTask;
166 extern struct completion jfsIOwait;
171 int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
172 struct tlock * tlck, struct commit * cd);
173 int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
174 struct tlock * tlck);
175 void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
176 struct tlock * tlck);
177 void inlineLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
178 struct tlock * tlck);
179 void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
180 struct tlock * tlck);
181 void txAbortCommit(struct commit * cd, int exval);
182 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
183 struct tblock * tblk);
184 void txForce(struct tblock * tblk);
185 static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd);
186 int txMoreLock(void);
187 static void txUpdateMap(struct tblock * tblk);
188 static void txRelease(struct tblock * tblk);
189 void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
190 struct tlock * tlck);
191 static void LogSyncRelease(struct metapage * mp);
194 * transaction block/lock management
195 * ---------------------------------
199 * Get a transaction lock from the free list. If the number in use is
200 * greater than the high water mark, wake up the sync daemon. This should
201 * free some anonymous transaction locks. (TXN_LOCK must be held.)
203 static lid_t txLockAlloc(void)
207 INCREMENT(TxStat.txLockAlloc);
208 if (!TxAnchor.freelock) {
209 INCREMENT(TxStat.txLockAlloc_freelock);
212 while (!(lid = TxAnchor.freelock))
213 TXN_SLEEP(&TxAnchor.freelockwait);
214 TxAnchor.freelock = TxLock[lid].next;
215 HIGHWATERMARK(stattx.maxlid, lid);
216 if ((++TxAnchor.tlocksInUse > TxLockHWM) && (TxAnchor.TlocksLow == 0)) {
217 jfs_info("txLockAlloc TlocksLow");
218 TxAnchor.TlocksLow = 1;
219 wake_up(&jfs_sync_thread_wait);
225 static void txLockFree(lid_t lid)
227 TxLock[lid].next = TxAnchor.freelock;
228 TxAnchor.freelock = lid;
229 TxAnchor.tlocksInUse--;
230 if (TxAnchor.TlocksLow && (TxAnchor.tlocksInUse < TxLockLWM)) {
231 jfs_info("txLockFree TlocksLow no more");
232 TxAnchor.TlocksLow = 0;
233 TXN_WAKEUP(&TxAnchor.lowlockwait);
235 TXN_WAKEUP(&TxAnchor.freelockwait);
241 * FUNCTION: initialize transaction management structures
245 * serialization: single thread at jfs_init()
252 * initialize transaction block (tblock) table
254 * transaction id (tid) = tblock index
255 * tid = 0 is reserved.
257 size = sizeof(struct tblock) * nTxBlock;
258 TxBlock = (struct tblock *) vmalloc(size);
262 for (k = 1; k < nTxBlock - 1; k++) {
263 TxBlock[k].next = k + 1;
264 init_waitqueue_head(&TxBlock[k].gcwait);
265 init_waitqueue_head(&TxBlock[k].waitor);
268 init_waitqueue_head(&TxBlock[k].gcwait);
269 init_waitqueue_head(&TxBlock[k].waitor);
271 TxAnchor.freetid = 1;
272 init_waitqueue_head(&TxAnchor.freewait);
274 stattx.maxtid = 1; /* statistics */
277 * initialize transaction lock (tlock) table
279 * transaction lock id = tlock index
280 * tlock id = 0 is reserved.
282 size = sizeof(struct tlock) * nTxLock;
283 TxLock = (struct tlock *) vmalloc(size);
284 if (TxLock == NULL) {
289 /* initialize tlock table */
290 for (k = 1; k < nTxLock - 1; k++)
291 TxLock[k].next = k + 1;
293 init_waitqueue_head(&TxAnchor.freelockwait);
294 init_waitqueue_head(&TxAnchor.lowlockwait);
296 TxAnchor.freelock = 1;
297 TxAnchor.tlocksInUse = 0;
298 INIT_LIST_HEAD(&TxAnchor.anon_list);
299 INIT_LIST_HEAD(&TxAnchor.anon_list2);
301 stattx.maxlid = 1; /* statistics */
309 * FUNCTION: clean up when module is unloaded
323 * FUNCTION: start a transaction.
325 * PARAMETER: sb - superblock
326 * flag - force for nested tx;
328 * RETURN: tid - transaction id
330 * note: flag force allows to start tx for nested tx
331 * to prevent deadlock on logsync barrier;
333 tid_t txBegin(struct super_block *sb, int flag)
339 jfs_info("txBegin: flag = 0x%x", flag);
340 log = JFS_SBI(sb)->log;
344 INCREMENT(TxStat.txBegin);
347 if (!(flag & COMMIT_FORCE)) {
349 * synchronize with logsync barrier
351 if (test_bit(log_SYNCBARRIER, &log->flag) ||
352 test_bit(log_QUIESCE, &log->flag)) {
353 INCREMENT(TxStat.txBegin_barrier);
354 TXN_SLEEP(&log->syncwait);
360 * Don't begin transaction if we're getting starved for tlocks
361 * unless COMMIT_FORCE or COMMIT_INODE (which may ultimately
364 if (TxAnchor.TlocksLow) {
365 INCREMENT(TxStat.txBegin_lockslow);
366 TXN_SLEEP(&TxAnchor.lowlockwait);
372 * allocate transaction id/block
374 if ((t = TxAnchor.freetid) == 0) {
375 jfs_info("txBegin: waiting for free tid");
376 INCREMENT(TxStat.txBegin_freetid);
377 TXN_SLEEP(&TxAnchor.freewait);
381 tblk = tid_to_tblock(t);
383 if ((tblk->next == 0) && !(flag & COMMIT_FORCE)) {
384 /* Don't let a non-forced transaction take the last tblk */
385 jfs_info("txBegin: waiting for free tid");
386 INCREMENT(TxStat.txBegin_freetid);
387 TXN_SLEEP(&TxAnchor.freewait);
391 TxAnchor.freetid = tblk->next;
394 * initialize transaction
398 * We can't zero the whole thing or we screw up another thread being
399 * awakened after sleeping on tblk->waitor
401 * memset(tblk, 0, sizeof(struct tblock));
403 tblk->next = tblk->last = tblk->xflag = tblk->flag = tblk->lsn = 0;
407 tblk->logtid = log->logtid;
411 HIGHWATERMARK(stattx.maxtid, t); /* statistics */
412 INCREMENT(stattx.ntid); /* statistics */
416 jfs_info("txBegin: returning tid = %d", t);
423 * NAME: txBeginAnon()
425 * FUNCTION: start an anonymous transaction.
426 * Blocks if logsync or available tlocks are low to prevent
427 * anonymous tlocks from depleting supply.
429 * PARAMETER: sb - superblock
433 void txBeginAnon(struct super_block *sb)
437 log = JFS_SBI(sb)->log;
440 INCREMENT(TxStat.txBeginAnon);
444 * synchronize with logsync barrier
446 if (test_bit(log_SYNCBARRIER, &log->flag) ||
447 test_bit(log_QUIESCE, &log->flag)) {
448 INCREMENT(TxStat.txBeginAnon_barrier);
449 TXN_SLEEP(&log->syncwait);
454 * Don't begin transaction if we're getting starved for tlocks
456 if (TxAnchor.TlocksLow) {
457 INCREMENT(TxStat.txBeginAnon_lockslow);
458 TXN_SLEEP(&TxAnchor.lowlockwait);
468 * function: free specified transaction block.
470 * logsync barrier processing:
474 void txEnd(tid_t tid)
476 struct tblock *tblk = tid_to_tblock(tid);
479 jfs_info("txEnd: tid = %d", tid);
483 * wakeup transactions waiting on the page locked
484 * by the current transaction
486 TXN_WAKEUP(&tblk->waitor);
488 log = JFS_SBI(tblk->sb)->log;
491 * Lazy commit thread can't free this guy until we mark it UNLOCKED,
492 * otherwise, we would be left with a transaction that may have been
495 * Lazy commit thread will turn off tblkGC_LAZY before calling this
498 if (tblk->flag & tblkGC_LAZY) {
499 jfs_info("txEnd called w/lazy tid: %d, tblk = 0x%p", tid, tblk);
502 spin_lock_irq(&log->gclock); // LOGGC_LOCK
503 tblk->flag |= tblkGC_UNLOCKED;
504 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
508 jfs_info("txEnd: tid: %d, tblk = 0x%p", tid, tblk);
510 assert(tblk->next == 0);
513 * insert tblock back on freelist
515 tblk->next = TxAnchor.freetid;
516 TxAnchor.freetid = tid;
519 * mark the tblock not active
521 if (--log->active == 0) {
522 clear_bit(log_FLUSH, &log->flag);
525 * synchronize with logsync barrier
527 if (test_bit(log_SYNCBARRIER, &log->flag)) {
528 /* forward log syncpt */
531 jfs_info("log barrier off: 0x%x", log->lsn);
533 /* enable new transactions start */
534 clear_bit(log_SYNCBARRIER, &log->flag);
536 /* wakeup all waitors for logsync barrier */
537 TXN_WAKEUP(&log->syncwait);
542 * wakeup all waitors for a free tblock
544 TXN_WAKEUP(&TxAnchor.freewait);
553 * function: acquire a transaction lock on the specified <mp>
557 * return: transaction lock id
561 struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp,
564 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
569 struct xtlock *xtlck;
570 struct linelock *linelock;
576 if (S_ISDIR(ip->i_mode) && (type & tlckXTREE) &&
577 !(mp->xflag & COMMIT_PAGE)) {
579 * Directory inode is special. It can have both an xtree tlock
580 * and a dtree tlock associated with it.
587 /* is page not locked by a transaction ? */
591 jfs_info("txLock: tid:%d ip:0x%p mp:0x%p lid:%d", tid, ip, mp, lid);
593 /* is page locked by the requester transaction ? */
594 tlck = lid_to_tlock(lid);
595 if ((xtid = tlck->tid) == tid)
599 * is page locked by anonymous transaction/lock ?
601 * (page update without transaction (i.e., file write) is
602 * locked under anonymous transaction tid = 0:
603 * anonymous tlocks maintained on anonymous tlock list of
604 * the inode of the page and available to all anonymous
605 * transactions until txCommit() time at which point
606 * they are transferred to the transaction tlock list of
607 * the commiting transaction of the inode)
611 tblk = tid_to_tblock(tid);
613 * The order of the tlocks in the transaction is important
614 * (during truncate, child xtree pages must be freed before
615 * parent's tlocks change the working map).
616 * Take tlock off anonymous list and add to tail of
619 * Note: We really need to get rid of the tid & lid and
620 * use list_head's. This code is getting UGLY!
622 if (jfs_ip->atlhead == lid) {
623 if (jfs_ip->atltail == lid) {
624 /* only anonymous txn.
625 * Remove from anon_list
627 list_del_init(&jfs_ip->anon_inode_list);
629 jfs_ip->atlhead = tlck->next;
632 for (last = jfs_ip->atlhead;
633 lid_to_tlock(last)->next != lid;
634 last = lid_to_tlock(last)->next) {
637 lid_to_tlock(last)->next = tlck->next;
638 if (jfs_ip->atltail == lid)
639 jfs_ip->atltail = last;
642 /* insert the tlock at tail of transaction tlock list */
645 lid_to_tlock(tblk->last)->next = lid;
661 tlck = lid_to_tlock(lid);
668 /* mark tlock for meta-data page */
669 if (mp->xflag & COMMIT_PAGE) {
671 tlck->flag = tlckPAGELOCK;
673 /* mark the page dirty and nohomeok */
674 mark_metapage_dirty(mp);
675 atomic_inc(&mp->nohomeok);
677 jfs_info("locking mp = 0x%p, nohomeok = %d tid = %d tlck = 0x%p",
678 mp, atomic_read(&mp->nohomeok), tid, tlck);
680 /* if anonymous transaction, and buffer is on the group
681 * commit synclist, mark inode to show this. This will
682 * prevent the buffer from being marked nohomeok for too
685 if ((tid == 0) && mp->lsn)
686 set_cflag(COMMIT_Synclist, ip);
688 /* mark tlock for in-memory inode */
690 tlck->flag = tlckINODELOCK;
694 /* bind the tlock and the page */
703 * enqueue transaction lock to transaction/inode
705 /* insert the tlock at tail of transaction tlock list */
707 tblk = tid_to_tblock(tid);
709 lid_to_tlock(tblk->last)->next = lid;
715 /* anonymous transaction:
716 * insert the tlock at head of inode anonymous tlock list
719 tlck->next = jfs_ip->atlhead;
720 jfs_ip->atlhead = lid;
721 if (tlck->next == 0) {
722 /* This inode's first anonymous transaction */
723 jfs_ip->atltail = lid;
724 list_add_tail(&jfs_ip->anon_inode_list,
725 &TxAnchor.anon_list);
729 /* initialize type dependent area for linelock */
730 linelock = (struct linelock *) & tlck->lock;
732 linelock->flag = tlckLINELOCK;
733 linelock->maxcnt = TLOCKSHORT;
736 switch (type & tlckTYPE) {
738 linelock->l2linesize = L2DTSLOTSIZE;
742 linelock->l2linesize = L2XTSLOTSIZE;
744 xtlck = (struct xtlock *) linelock;
745 xtlck->header.offset = 0;
746 xtlck->header.length = 2;
748 if (type & tlckNEW) {
749 xtlck->lwm.offset = XTENTRYSTART;
751 if (mp->xflag & COMMIT_PAGE)
752 p = (xtpage_t *) mp->data;
754 p = &jfs_ip->i_xtroot;
756 le16_to_cpu(p->header.nextindex);
758 xtlck->lwm.length = 0; /* ! */
759 xtlck->twm.offset = 0;
760 xtlck->hwm.offset = 0;
766 linelock->l2linesize = L2INODESLOTSIZE;
770 linelock->l2linesize = L2DATASLOTSIZE;
774 jfs_err("UFO tlock:0x%p", tlck);
778 * update tlock vector
788 * page is being locked by another transaction:
791 /* Only locks on ipimap or ipaimap should reach here */
792 /* assert(jfs_ip->fileset == AGGREGATE_I); */
793 if (jfs_ip->fileset != AGGREGATE_I) {
794 jfs_err("txLock: trying to lock locked page!");
795 dump_mem("ip", ip, sizeof(struct inode));
796 dump_mem("mp", mp, sizeof(struct metapage));
797 dump_mem("Locker's tblk", tid_to_tblock(tid),
798 sizeof(struct tblock));
799 dump_mem("Tlock", tlck, sizeof(struct tlock));
802 INCREMENT(stattx.waitlock); /* statistics */
803 release_metapage(mp);
805 jfs_info("txLock: in waitLock, tid = %d, xtid = %d, lid = %d",
807 TXN_SLEEP_DROP_LOCK(&tid_to_tblock(xtid)->waitor);
808 jfs_info("txLock: awakened tid = %d, lid = %d", tid, lid);
817 * FUNCTION: Release buffers associated with transaction locks, but don't
818 * mark homeok yet. The allows other transactions to modify
819 * buffers, but won't let them go to disk until commit record
820 * actually gets written.
825 * RETURN: Errors from subroutines.
827 static void txRelease(struct tblock * tblk)
835 for (lid = tblk->next; lid; lid = tlck->next) {
836 tlck = lid_to_tlock(lid);
837 if ((mp = tlck->mp) != NULL &&
838 (tlck->type & tlckBTROOT) == 0) {
839 assert(mp->xflag & COMMIT_PAGE);
845 * wakeup transactions waiting on a page locked
846 * by the current transaction
848 TXN_WAKEUP(&tblk->waitor);
857 * FUNCTION: Initiates pageout of pages modified by tid in journalled
858 * objects and frees their lockwords.
860 static void txUnlock(struct tblock * tblk)
863 struct linelock *linelock;
864 lid_t lid, next, llid, k;
869 jfs_info("txUnlock: tblk = 0x%p", tblk);
870 log = JFS_SBI(tblk->sb)->log;
873 * mark page under tlock homeok (its log has been written):
875 for (lid = tblk->next; lid; lid = next) {
876 tlck = lid_to_tlock(lid);
879 jfs_info("unlocking lid = %d, tlck = 0x%p", lid, tlck);
881 /* unbind page from tlock */
882 if ((mp = tlck->mp) != NULL &&
883 (tlck->type & tlckBTROOT) == 0) {
884 assert(mp->xflag & COMMIT_PAGE);
888 * It's possible that someone else has the metapage.
889 * The only things were changing are nohomeok, which
890 * is handled atomically, and clsn which is protected
891 * by the LOGSYNC_LOCK.
893 hold_metapage(mp, 1);
895 assert(atomic_read(&mp->nohomeok) > 0);
896 atomic_dec(&mp->nohomeok);
898 /* inherit younger/larger clsn */
901 logdiff(difft, tblk->clsn, log);
902 logdiff(diffp, mp->clsn, log);
904 mp->clsn = tblk->clsn;
906 mp->clsn = tblk->clsn;
909 assert(!(tlck->flag & tlckFREEPAGE));
911 if (tlck->flag & tlckWRITEPAGE) {
914 /* release page which has been forced */
915 release_metapage(mp);
919 /* insert tlock, and linelock(s) of the tlock if any,
920 * at head of freelist
924 llid = ((struct linelock *) & tlck->lock)->next;
926 linelock = (struct linelock *) lid_to_tlock(llid);
935 tblk->next = tblk->last = 0;
938 * remove tblock from logsynclist
939 * (allocation map pages inherited lsn of tblk and
940 * has been inserted in logsync list at txUpdateMap())
945 list_del(&tblk->synclist);
954 * function: allocate a transaction lock for freed page/entry;
955 * for freed page, maplock is used as xtlock/dtlock type;
957 struct tlock *txMaplock(tid_t tid, struct inode *ip, int type)
959 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
963 struct maplock *maplock;
971 tlck = lid_to_tlock(lid);
978 /* bind the tlock and the object */
979 tlck->flag = tlckINODELOCK;
986 * enqueue transaction lock to transaction/inode
988 /* insert the tlock at tail of transaction tlock list */
990 tblk = tid_to_tblock(tid);
992 lid_to_tlock(tblk->last)->next = lid;
998 /* anonymous transaction:
999 * insert the tlock at head of inode anonymous tlock list
1002 tlck->next = jfs_ip->atlhead;
1003 jfs_ip->atlhead = lid;
1004 if (tlck->next == 0) {
1005 /* This inode's first anonymous transaction */
1006 jfs_ip->atltail = lid;
1007 list_add_tail(&jfs_ip->anon_inode_list,
1008 &TxAnchor.anon_list);
1014 /* initialize type dependent area for maplock */
1015 maplock = (struct maplock *) & tlck->lock;
1017 maplock->maxcnt = 0;
1027 * function: allocate a transaction lock for log vector list
1029 struct linelock *txLinelock(struct linelock * tlock)
1033 struct linelock *linelock;
1037 /* allocate a TxLock structure */
1038 lid = txLockAlloc();
1039 tlck = lid_to_tlock(lid);
1043 /* initialize linelock */
1044 linelock = (struct linelock *) tlck;
1046 linelock->flag = tlckLINELOCK;
1047 linelock->maxcnt = TLOCKLONG;
1048 linelock->index = 0;
1050 /* append linelock after tlock */
1051 linelock->next = tlock->next;
1060 * transaction commit management
1061 * -----------------------------
1067 * FUNCTION: commit the changes to the objects specified in
1068 * clist. For journalled segments only the
1069 * changes of the caller are committed, ie by tid.
1070 * for non-journalled segments the data are flushed to
1071 * disk and then the change to the disk inode and indirect
1072 * blocks committed (so blocks newly allocated to the
1073 * segment will be made a part of the segment atomically).
1075 * all of the segments specified in clist must be in
1076 * one file system. no more than 6 segments are needed
1077 * to handle all unix svcs.
1079 * if the i_nlink field (i.e. disk inode link count)
1080 * is zero, and the type of inode is a regular file or
1081 * directory, or symbolic link , the inode is truncated
1082 * to zero length. the truncation is committed but the
1083 * VM resources are unaffected until it is closed (see
1091 * on entry the inode lock on each segment is assumed
1096 int txCommit(tid_t tid, /* transaction identifier */
1097 int nip, /* number of inodes to commit */
1098 struct inode **iplist, /* list of inode to commit */
1103 struct jfs_log *log;
1104 struct tblock *tblk;
1108 struct jfs_inode_info *jfs_ip;
1111 struct super_block *sb;
1113 jfs_info("txCommit, tid = %d, flag = %d", tid, flag);
1114 /* is read-only file system ? */
1115 if (isReadOnly(iplist[0])) {
1120 sb = cd.sb = iplist[0]->i_sb;
1124 tid = txBegin(sb, 0);
1125 tblk = tid_to_tblock(tid);
1128 * initialize commit structure
1130 log = JFS_SBI(sb)->log;
1133 /* initialize log record descriptor in commit */
1135 lrd->logtid = cpu_to_le32(tblk->logtid);
1138 tblk->xflag |= flag;
1140 if ((flag & (COMMIT_FORCE | COMMIT_SYNC)) == 0)
1141 tblk->xflag |= COMMIT_LAZY;
1143 * prepare non-journaled objects for commit
1145 * flush data pages of non-journaled file
1146 * to prevent the file getting non-initialized disk blocks
1154 * acquire transaction lock on (on-disk) inodes
1156 * update on-disk inode from in-memory inode
1157 * acquiring transaction locks for AFTER records
1158 * on the on-disk inode of file object
1160 * sort the inodes array by inode number in descending order
1161 * to prevent deadlock when acquiring transaction lock
1162 * of on-disk inodes on multiple on-disk inode pages by
1163 * multiple concurrent transactions
1165 for (k = 0; k < cd.nip; k++) {
1166 top = (cd.iplist[k])->i_ino;
1167 for (n = k + 1; n < cd.nip; n++) {
1169 if (ip->i_ino > top) {
1171 cd.iplist[n] = cd.iplist[k];
1177 jfs_ip = JFS_IP(ip);
1180 * BUGBUG - This code has temporarily been removed. The
1181 * intent is to ensure that any file data is written before
1182 * the metadata is committed to the journal. This prevents
1183 * uninitialized data from appearing in a file after the
1184 * journal has been replayed. (The uninitialized data
1185 * could be sensitive data removed by another user.)
1187 * The problem now is that we are holding the IWRITELOCK
1188 * on the inode, and calling filemap_fdatawrite on an
1189 * unmapped page will cause a deadlock in jfs_get_block.
1191 * The long term solution is to pare down the use of
1192 * IWRITELOCK. We are currently holding it too long.
1193 * We could also be smarter about which data pages need
1194 * to be written before the transaction is committed and
1195 * when we don't need to worry about it at all.
1197 * if ((!S_ISDIR(ip->i_mode))
1198 * && (tblk->flag & COMMIT_DELETE) == 0) {
1199 * filemap_fdatawrite(ip->i_mapping);
1200 * filemap_fdatawait(ip->i_mapping);
1205 * Mark inode as not dirty. It will still be on the dirty
1206 * inode list, but we'll know not to commit it again unless
1207 * it gets marked dirty again
1209 clear_cflag(COMMIT_Dirty, ip);
1211 /* inherit anonymous tlock(s) of inode */
1212 if (jfs_ip->atlhead) {
1213 lid_to_tlock(jfs_ip->atltail)->next = tblk->next;
1214 tblk->next = jfs_ip->atlhead;
1216 tblk->last = jfs_ip->atltail;
1217 jfs_ip->atlhead = jfs_ip->atltail = 0;
1219 list_del_init(&jfs_ip->anon_inode_list);
1224 * acquire transaction lock on on-disk inode page
1225 * (become first tlock of the tblk's tlock list)
1227 if (((rc = diWrite(tid, ip))))
1232 * write log records from transaction locks
1234 * txUpdateMap() resets XAD_NEW in XAD.
1236 if ((rc = txLog(log, tblk, &cd)))
1240 * Ensure that inode isn't reused before
1241 * lazy commit thread finishes processing
1243 if (tblk->xflag & (COMMIT_CREATE | COMMIT_DELETE)) {
1244 atomic_inc(&tblk->ip->i_count);
1246 * Avoid a rare deadlock
1248 * If the inode is locked, we may be blocked in
1249 * jfs_commit_inode. If so, we don't want the
1250 * lazy_commit thread doing the last iput() on the inode
1251 * since that may block on the locked inode. Instead,
1252 * commit the transaction synchronously, so the last iput
1253 * will be done by the calling thread (or later)
1255 if (tblk->ip->i_state & I_LOCK)
1256 tblk->xflag &= ~COMMIT_LAZY;
1259 ASSERT((!(tblk->xflag & COMMIT_DELETE)) ||
1260 ((tblk->ip->i_nlink == 0) &&
1261 !test_cflag(COMMIT_Nolink, tblk->ip)));
1264 * write COMMIT log record
1266 lrd->type = cpu_to_le16(LOG_COMMIT);
1268 lsn = lmLog(log, tblk, lrd, NULL);
1270 lmGroupCommit(log, tblk);
1273 * - transaction is now committed -
1277 * force pages in careful update
1278 * (imap addressing structure update)
1280 if (flag & COMMIT_FORCE)
1284 * update allocation map.
1286 * update inode allocation map and inode:
1287 * free pager lock on memory object of inode if any.
1288 * update block allocation map.
1290 * txUpdateMap() resets XAD_NEW in XAD.
1292 if (tblk->xflag & COMMIT_FORCE)
1296 * free transaction locks and pageout/free pages
1300 if ((tblk->flag & tblkGC_LAZY) == 0)
1305 * reset in-memory object state
1307 for (k = 0; k < cd.nip; k++) {
1309 jfs_ip = JFS_IP(ip);
1312 * reset in-memory inode state
1320 txAbortCommit(&cd, rc);
1323 jfs_info("txCommit: tid = %d, returning %d", tid, rc);
1331 * FUNCTION: Writes AFTER log records for all lines modified
1332 * by tid for segments specified by inodes in comdata.
1333 * Code assumes only WRITELOCKS are recorded in lockwords.
1339 static int txLog(struct jfs_log * log, struct tblock * tblk, struct commit * cd)
1345 struct lrd *lrd = &cd->lrd;
1348 * write log record(s) for each tlock of transaction,
1350 for (lid = tblk->next; lid; lid = tlck->next) {
1351 tlck = lid_to_tlock(lid);
1353 tlck->flag |= tlckLOG;
1355 /* initialize lrd common */
1357 lrd->aggregate = cpu_to_le32(ip->i_sb->s_bdev->bd_dev);
1358 lrd->log.redopage.fileset = cpu_to_le32(JFS_IP(ip)->fileset);
1359 lrd->log.redopage.inode = cpu_to_le32(ip->i_ino);
1362 hold_metapage(tlck->mp, 0);
1364 /* write log record of page from the tlock */
1365 switch (tlck->type & tlckTYPE) {
1367 xtLog(log, tblk, lrd, tlck);
1371 dtLog(log, tblk, lrd, tlck);
1375 diLog(log, tblk, lrd, tlck, cd);
1379 mapLog(log, tblk, lrd, tlck);
1383 dataLog(log, tblk, lrd, tlck);
1387 jfs_err("UFO tlock:0x%p", tlck);
1390 release_metapage(tlck->mp);
1400 * function: log inode tlock and format maplock to update bmap;
1402 int diLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1403 struct tlock * tlck, struct commit * cd)
1406 struct metapage *mp;
1408 struct pxd_lock *pxdlock;
1412 /* initialize as REDOPAGE record format */
1413 lrd->log.redopage.type = cpu_to_le16(LOG_INODE);
1414 lrd->log.redopage.l2linesize = cpu_to_le16(L2INODESLOTSIZE);
1416 pxd = &lrd->log.redopage.pxd;
1421 if (tlck->type & tlckENTRY) {
1422 /* log after-image for logredo(): */
1423 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1424 // *pxd = mp->cm_pxd;
1425 PXDaddress(pxd, mp->index);
1427 mp->logical_size >> tblk->sb->s_blocksize_bits);
1428 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1430 /* mark page as homeward bound */
1431 tlck->flag |= tlckWRITEPAGE;
1432 } else if (tlck->type & tlckFREE) {
1436 * (pages of the freed inode extent have been invalidated and
1437 * a maplock for free of the extent has been formatted at
1440 * the tlock had been acquired on the inode allocation map page
1441 * (iag) that specifies the freed extent, even though the map
1442 * page is not itself logged, to prevent pageout of the map
1443 * page before the log;
1445 assert(tlck->type & tlckFREE);
1447 /* log LOG_NOREDOINOEXT of the freed inode extent for
1448 * logredo() to start NoRedoPage filters, and to update
1449 * imap and bmap for free of the extent;
1451 lrd->type = cpu_to_le16(LOG_NOREDOINOEXT);
1453 * For the LOG_NOREDOINOEXT record, we need
1454 * to pass the IAG number and inode extent
1455 * index (within that IAG) from which the
1456 * the extent being released. These have been
1457 * passed to us in the iplist[1] and iplist[2].
1459 lrd->log.noredoinoext.iagnum =
1460 cpu_to_le32((u32) (size_t) cd->iplist[1]);
1461 lrd->log.noredoinoext.inoext_idx =
1462 cpu_to_le32((u32) (size_t) cd->iplist[2]);
1464 pxdlock = (struct pxd_lock *) & tlck->lock;
1465 *pxd = pxdlock->pxd;
1466 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1469 tlck->flag |= tlckUPDATEMAP;
1471 /* mark page as homeward bound */
1472 tlck->flag |= tlckWRITEPAGE;
1474 jfs_err("diLog: UFO type tlck:0x%p", tlck);
1477 * alloc/free external EA extent
1479 * a maplock for txUpdateMap() to update bPWMAP for alloc/free
1480 * of the extent has been formatted at txLock() time;
1483 assert(tlck->type & tlckEA);
1485 /* log LOG_UPDATEMAP for logredo() to update bmap for
1486 * alloc of new (and free of old) external EA extent;
1488 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1489 pxdlock = (struct pxd_lock *) & tlck->lock;
1490 nlock = pxdlock->index;
1491 for (i = 0; i < nlock; i++, pxdlock++) {
1492 if (pxdlock->flag & mlckALLOCPXD)
1493 lrd->log.updatemap.type =
1494 cpu_to_le16(LOG_ALLOCPXD);
1496 lrd->log.updatemap.type =
1497 cpu_to_le16(LOG_FREEPXD);
1498 lrd->log.updatemap.nxd = cpu_to_le16(1);
1499 lrd->log.updatemap.pxd = pxdlock->pxd;
1501 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1505 tlck->flag |= tlckUPDATEMAP;
1507 #endif /* _JFS_WIP */
1516 * function: log data tlock
1518 int dataLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1519 struct tlock * tlck)
1521 struct metapage *mp;
1526 /* initialize as REDOPAGE record format */
1527 lrd->log.redopage.type = cpu_to_le16(LOG_DATA);
1528 lrd->log.redopage.l2linesize = cpu_to_le16(L2DATASLOTSIZE);
1530 pxd = &lrd->log.redopage.pxd;
1532 /* log after-image for logredo(): */
1533 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1535 if (JFS_IP(tlck->ip)->next_index < MAX_INLINE_DIRTABLE_ENTRY) {
1537 * The table has been truncated, we've must have deleted
1538 * the last entry, so don't bother logging this
1541 atomic_dec(&mp->nohomeok);
1542 discard_metapage(mp);
1547 PXDaddress(pxd, mp->index);
1548 PXDlength(pxd, mp->logical_size >> tblk->sb->s_blocksize_bits);
1550 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1552 /* mark page as homeward bound */
1553 tlck->flag |= tlckWRITEPAGE;
1562 * function: log dtree tlock and format maplock to update bmap;
1564 void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1565 struct tlock * tlck)
1567 struct metapage *mp;
1568 struct pxd_lock *pxdlock;
1573 /* initialize as REDOPAGE/NOREDOPAGE record format */
1574 lrd->log.redopage.type = cpu_to_le16(LOG_DTREE);
1575 lrd->log.redopage.l2linesize = cpu_to_le16(L2DTSLOTSIZE);
1577 pxd = &lrd->log.redopage.pxd;
1579 if (tlck->type & tlckBTROOT)
1580 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1583 * page extension via relocation: entry insertion;
1584 * page extension in-place: entry insertion;
1585 * new right page from page split, reinitialized in-line
1586 * root from root page split: entry insertion;
1588 if (tlck->type & (tlckNEW | tlckEXTEND)) {
1589 /* log after-image of the new page for logredo():
1590 * mark log (LOG_NEW) for logredo() to initialize
1591 * freelist and update bmap for alloc of the new page;
1593 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1594 if (tlck->type & tlckEXTEND)
1595 lrd->log.redopage.type |= cpu_to_le16(LOG_EXTEND);
1597 lrd->log.redopage.type |= cpu_to_le16(LOG_NEW);
1598 // *pxd = mp->cm_pxd;
1599 PXDaddress(pxd, mp->index);
1601 mp->logical_size >> tblk->sb->s_blocksize_bits);
1602 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1604 /* format a maplock for txUpdateMap() to update bPMAP for
1605 * alloc of the new page;
1607 if (tlck->type & tlckBTROOT)
1609 tlck->flag |= tlckUPDATEMAP;
1610 pxdlock = (struct pxd_lock *) & tlck->lock;
1611 pxdlock->flag = mlckALLOCPXD;
1612 pxdlock->pxd = *pxd;
1616 /* mark page as homeward bound */
1617 tlck->flag |= tlckWRITEPAGE;
1622 * entry insertion/deletion,
1623 * sibling page link update (old right page before split);
1625 if (tlck->type & (tlckENTRY | tlckRELINK)) {
1626 /* log after-image for logredo(): */
1627 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1628 PXDaddress(pxd, mp->index);
1630 mp->logical_size >> tblk->sb->s_blocksize_bits);
1631 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1633 /* mark page as homeward bound */
1634 tlck->flag |= tlckWRITEPAGE;
1639 * page deletion: page has been invalidated
1640 * page relocation: source extent
1642 * a maplock for free of the page has been formatted
1643 * at txLock() time);
1645 if (tlck->type & (tlckFREE | tlckRELOCATE)) {
1646 /* log LOG_NOREDOPAGE of the deleted page for logredo()
1647 * to start NoRedoPage filter and to update bmap for free
1648 * of the deletd page
1650 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1651 pxdlock = (struct pxd_lock *) & tlck->lock;
1652 *pxd = pxdlock->pxd;
1653 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1655 /* a maplock for txUpdateMap() for free of the page
1656 * has been formatted at txLock() time;
1658 tlck->flag |= tlckUPDATEMAP;
1667 * function: log xtree tlock and format maplock to update bmap;
1669 void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
1670 struct tlock * tlck)
1673 struct metapage *mp;
1675 struct xtlock *xtlck;
1676 struct maplock *maplock;
1677 struct xdlistlock *xadlock;
1678 struct pxd_lock *pxdlock;
1685 /* initialize as REDOPAGE/NOREDOPAGE record format */
1686 lrd->log.redopage.type = cpu_to_le16(LOG_XTREE);
1687 lrd->log.redopage.l2linesize = cpu_to_le16(L2XTSLOTSIZE);
1689 pxd = &lrd->log.redopage.pxd;
1691 if (tlck->type & tlckBTROOT) {
1692 lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT);
1693 p = &JFS_IP(ip)->i_xtroot;
1694 if (S_ISDIR(ip->i_mode))
1695 lrd->log.redopage.type |=
1696 cpu_to_le16(LOG_DIR_XTREE);
1698 p = (xtpage_t *) mp->data;
1699 next = le16_to_cpu(p->header.nextindex);
1701 xtlck = (struct xtlock *) & tlck->lock;
1703 maplock = (struct maplock *) & tlck->lock;
1704 xadlock = (struct xdlistlock *) maplock;
1707 * entry insertion/extension;
1708 * sibling page link update (old right page before split);
1710 if (tlck->type & (tlckNEW | tlckGROW | tlckRELINK)) {
1711 /* log after-image for logredo():
1712 * logredo() will update bmap for alloc of new/extended
1713 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1714 * after-image of XADlist;
1715 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1716 * applying the after-image to the meta-data page.
1718 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1719 // *pxd = mp->cm_pxd;
1720 PXDaddress(pxd, mp->index);
1722 mp->logical_size >> tblk->sb->s_blocksize_bits);
1723 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1725 /* format a maplock for txUpdateMap() to update bPMAP
1726 * for alloc of new/extended extents of XAD[lwm:next)
1727 * from the page itself;
1728 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1730 lwm = xtlck->lwm.offset;
1732 lwm = XTPAGEMAXSLOT;
1737 tlck->flag |= tlckUPDATEMAP;
1738 xadlock->flag = mlckALLOCXADLIST;
1739 xadlock->count = next - lwm;
1740 if ((xadlock->count <= 2) && (tblk->xflag & COMMIT_LAZY)) {
1743 * Lazy commit may allow xtree to be modified before
1744 * txUpdateMap runs. Copy xad into linelock to
1745 * preserve correct data.
1747 xadlock->xdlist = &xtlck->pxdlock;
1748 memcpy(xadlock->xdlist, &p->xad[lwm],
1749 sizeof(xad_t) * xadlock->count);
1751 for (i = 0; i < xadlock->count; i++)
1752 p->xad[lwm + i].flag &=
1753 ~(XAD_NEW | XAD_EXTENDED);
1756 * xdlist will point to into inode's xtree, ensure
1757 * that transaction is not committed lazily.
1759 xadlock->xdlist = &p->xad[lwm];
1760 tblk->xflag &= ~COMMIT_LAZY;
1762 jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d "
1763 "count:%d", tlck->ip, mp, tlck, lwm, xadlock->count);
1768 /* mark page as homeward bound */
1769 tlck->flag |= tlckWRITEPAGE;
1775 * page deletion: file deletion/truncation (ref. xtTruncate())
1777 * (page will be invalidated after log is written and bmap
1778 * is updated from the page);
1780 if (tlck->type & tlckFREE) {
1781 /* LOG_NOREDOPAGE log for NoRedoPage filter:
1782 * if page free from file delete, NoRedoFile filter from
1783 * inode image of zero link count will subsume NoRedoPage
1784 * filters for each page;
1785 * if page free from file truncattion, write NoRedoPage
1788 * upadte of block allocation map for the page itself:
1789 * if page free from deletion and truncation, LOG_UPDATEMAP
1790 * log for the page itself is generated from processing
1791 * its parent page xad entries;
1793 /* if page free from file truncation, log LOG_NOREDOPAGE
1794 * of the deleted page for logredo() to start NoRedoPage
1795 * filter for the page;
1797 if (tblk->xflag & COMMIT_TRUNCATE) {
1798 /* write NOREDOPAGE for the page */
1799 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
1800 PXDaddress(pxd, mp->index);
1802 mp->logical_size >> tblk->sb->
1805 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1807 if (tlck->type & tlckBTROOT) {
1808 /* Empty xtree must be logged */
1809 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1811 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1815 /* init LOG_UPDATEMAP of the freed extents
1816 * XAD[XTENTRYSTART:hwm) from the deleted page itself
1817 * for logredo() to update bmap;
1819 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1820 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEXADLIST);
1821 xtlck = (struct xtlock *) & tlck->lock;
1822 hwm = xtlck->hwm.offset;
1823 lrd->log.updatemap.nxd =
1824 cpu_to_le16(hwm - XTENTRYSTART + 1);
1825 /* reformat linelock for lmLog() */
1826 xtlck->header.offset = XTENTRYSTART;
1827 xtlck->header.length = hwm - XTENTRYSTART + 1;
1829 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1831 /* format a maplock for txUpdateMap() to update bmap
1832 * to free extents of XAD[XTENTRYSTART:hwm) from the
1833 * deleted page itself;
1835 tlck->flag |= tlckUPDATEMAP;
1836 xadlock->flag = mlckFREEXADLIST;
1837 xadlock->count = hwm - XTENTRYSTART + 1;
1838 if ((xadlock->count <= 2) && (tblk->xflag & COMMIT_LAZY)) {
1840 * Lazy commit may allow xtree to be modified before
1841 * txUpdateMap runs. Copy xad into linelock to
1842 * preserve correct data.
1844 xadlock->xdlist = &xtlck->pxdlock;
1845 memcpy(xadlock->xdlist, &p->xad[XTENTRYSTART],
1846 sizeof(xad_t) * xadlock->count);
1849 * xdlist will point to into inode's xtree, ensure
1850 * that transaction is not committed lazily.
1852 xadlock->xdlist = &p->xad[XTENTRYSTART];
1853 tblk->xflag &= ~COMMIT_LAZY;
1855 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d lwm:2",
1856 tlck->ip, mp, xadlock->count);
1860 /* mark page as invalid */
1861 if (((tblk->xflag & COMMIT_PWMAP) || S_ISDIR(ip->i_mode))
1862 && !(tlck->type & tlckBTROOT))
1863 tlck->flag |= tlckFREEPAGE;
1865 else (tblk->xflag & COMMIT_PMAP)
1872 * page/entry truncation: file truncation (ref. xtTruncate())
1874 * |----------+------+------+---------------|
1876 * | | hwm - hwm before truncation
1877 * | next - truncation point
1878 * lwm - lwm before truncation
1881 if (tlck->type & tlckTRUNCATE) {
1882 pxd_t tpxd; /* truncated extent of xad */
1886 * For truncation the entire linelock may be used, so it would
1887 * be difficult to store xad list in linelock itself.
1888 * Therefore, we'll just force transaction to be committed
1889 * synchronously, so that xtree pages won't be changed before
1892 tblk->xflag &= ~COMMIT_LAZY;
1893 lwm = xtlck->lwm.offset;
1895 lwm = XTPAGEMAXSLOT;
1896 hwm = xtlck->hwm.offset;
1897 twm = xtlck->twm.offset;
1903 * allocate entries XAD[lwm:next]:
1906 /* log after-image for logredo():
1907 * logredo() will update bmap for alloc of new/extended
1908 * extents (XAD_NEW|XAD_EXTEND) of XAD[lwm:next) from
1909 * after-image of XADlist;
1910 * logredo() resets (XAD_NEW|XAD_EXTEND) flag when
1911 * applying the after-image to the meta-data page.
1913 lrd->type = cpu_to_le16(LOG_REDOPAGE);
1914 PXDaddress(pxd, mp->index);
1916 mp->logical_size >> tblk->sb->
1919 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1923 * truncate entry XAD[twm == next - 1]:
1925 if (twm == next - 1) {
1926 /* init LOG_UPDATEMAP for logredo() to update bmap for
1927 * free of truncated delta extent of the truncated
1928 * entry XAD[next - 1]:
1929 * (xtlck->pxdlock = truncated delta extent);
1931 pxdlock = (struct pxd_lock *) & xtlck->pxdlock;
1932 /* assert(pxdlock->type & tlckTRUNCATE); */
1933 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1934 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
1935 lrd->log.updatemap.nxd = cpu_to_le16(1);
1936 lrd->log.updatemap.pxd = pxdlock->pxd;
1937 tpxd = pxdlock->pxd; /* save to format maplock */
1939 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
1943 * free entries XAD[next:hwm]:
1946 /* init LOG_UPDATEMAP of the freed extents
1947 * XAD[next:hwm] from the deleted page itself
1948 * for logredo() to update bmap;
1950 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
1951 lrd->log.updatemap.type =
1952 cpu_to_le16(LOG_FREEXADLIST);
1953 xtlck = (struct xtlock *) & tlck->lock;
1954 hwm = xtlck->hwm.offset;
1955 lrd->log.updatemap.nxd =
1956 cpu_to_le16(hwm - next + 1);
1957 /* reformat linelock for lmLog() */
1958 xtlck->header.offset = next;
1959 xtlck->header.length = hwm - next + 1;
1962 cpu_to_le32(lmLog(log, tblk, lrd, tlck));
1966 * format maplock(s) for txUpdateMap() to update bmap
1971 * allocate entries XAD[lwm:next):
1974 /* format a maplock for txUpdateMap() to update bPMAP
1975 * for alloc of new/extended extents of XAD[lwm:next)
1976 * from the page itself;
1977 * txUpdateMap() resets (XAD_NEW|XAD_EXTEND) flag.
1979 tlck->flag |= tlckUPDATEMAP;
1980 xadlock->flag = mlckALLOCXADLIST;
1981 xadlock->count = next - lwm;
1982 xadlock->xdlist = &p->xad[lwm];
1984 jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d "
1986 tlck->ip, mp, xadlock->count, lwm, next);
1992 * truncate entry XAD[twm == next - 1]:
1994 if (twm == next - 1) {
1995 struct pxd_lock *pxdlock;
1997 /* format a maplock for txUpdateMap() to update bmap
1998 * to free truncated delta extent of the truncated
1999 * entry XAD[next - 1];
2000 * (xtlck->pxdlock = truncated delta extent);
2002 tlck->flag |= tlckUPDATEMAP;
2003 pxdlock = (struct pxd_lock *) xadlock;
2004 pxdlock->flag = mlckFREEPXD;
2006 pxdlock->pxd = tpxd;
2008 jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d "
2009 "hwm:%d", ip, mp, pxdlock->count, hwm);
2015 * free entries XAD[next:hwm]:
2018 /* format a maplock for txUpdateMap() to update bmap
2019 * to free extents of XAD[next:hwm] from thedeleted
2022 tlck->flag |= tlckUPDATEMAP;
2023 xadlock->flag = mlckFREEXADLIST;
2024 xadlock->count = hwm - next + 1;
2025 xadlock->xdlist = &p->xad[next];
2027 jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d "
2029 tlck->ip, mp, xadlock->count, next, hwm);
2033 /* mark page as homeward bound */
2034 tlck->flag |= tlckWRITEPAGE;
2043 * function: log from maplock of freed data extents;
2045 void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
2046 struct tlock * tlck)
2048 struct pxd_lock *pxdlock;
2053 * page relocation: free the source page extent
2055 * a maplock for txUpdateMap() for free of the page
2056 * has been formatted at txLock() time saving the src
2057 * relocated page address;
2059 if (tlck->type & tlckRELOCATE) {
2060 /* log LOG_NOREDOPAGE of the old relocated page
2061 * for logredo() to start NoRedoPage filter;
2063 lrd->type = cpu_to_le16(LOG_NOREDOPAGE);
2064 pxdlock = (struct pxd_lock *) & tlck->lock;
2065 pxd = &lrd->log.redopage.pxd;
2066 *pxd = pxdlock->pxd;
2067 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2069 /* (N.B. currently, logredo() does NOT update bmap
2070 * for free of the page itself for (LOG_XTREE|LOG_NOREDOPAGE);
2071 * if page free from relocation, LOG_UPDATEMAP log is
2072 * specifically generated now for logredo()
2073 * to update bmap for free of src relocated page;
2074 * (new flag LOG_RELOCATE may be introduced which will
2075 * inform logredo() to start NORedoPage filter and also
2076 * update block allocation map at the same time, thus
2077 * avoiding an extra log write);
2079 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2080 lrd->log.updatemap.type = cpu_to_le16(LOG_FREEPXD);
2081 lrd->log.updatemap.nxd = cpu_to_le16(1);
2082 lrd->log.updatemap.pxd = pxdlock->pxd;
2083 lrd->backchain = cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2085 /* a maplock for txUpdateMap() for free of the page
2086 * has been formatted at txLock() time;
2088 tlck->flag |= tlckUPDATEMAP;
2093 * Otherwise it's not a relocate request
2097 /* log LOG_UPDATEMAP for logredo() to update bmap for
2098 * free of truncated/relocated delta extent of the data;
2099 * e.g.: external EA extent, relocated/truncated extent
2100 * from xtTailgate();
2102 lrd->type = cpu_to_le16(LOG_UPDATEMAP);
2103 pxdlock = (struct pxd_lock *) & tlck->lock;
2104 nlock = pxdlock->index;
2105 for (i = 0; i < nlock; i++, pxdlock++) {
2106 if (pxdlock->flag & mlckALLOCPXD)
2107 lrd->log.updatemap.type =
2108 cpu_to_le16(LOG_ALLOCPXD);
2110 lrd->log.updatemap.type =
2111 cpu_to_le16(LOG_FREEPXD);
2112 lrd->log.updatemap.nxd = cpu_to_le16(1);
2113 lrd->log.updatemap.pxd = pxdlock->pxd;
2115 cpu_to_le32(lmLog(log, tblk, lrd, NULL));
2116 jfs_info("mapLog: xaddr:0x%lx xlen:0x%x",
2117 (ulong) addressPXD(&pxdlock->pxd),
2118 lengthPXD(&pxdlock->pxd));
2122 tlck->flag |= tlckUPDATEMAP;
2130 * function: acquire maplock for EA/ACL extents or
2131 * set COMMIT_INLINE flag;
2133 void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea)
2135 struct tlock *tlck = NULL;
2136 struct pxd_lock *maplock = NULL, *pxdlock = NULL;
2139 * format maplock for alloc of new EA extent
2142 /* Since the newea could be a completely zeroed entry we need to
2143 * check for the two flags which indicate we should actually
2144 * commit new EA data
2146 if (newea->flag & DXD_EXTENT) {
2147 tlck = txMaplock(tid, ip, tlckMAP);
2148 maplock = (struct pxd_lock *) & tlck->lock;
2149 pxdlock = (struct pxd_lock *) maplock;
2150 pxdlock->flag = mlckALLOCPXD;
2151 PXDaddress(&pxdlock->pxd, addressDXD(newea));
2152 PXDlength(&pxdlock->pxd, lengthDXD(newea));
2155 } else if (newea->flag & DXD_INLINE) {
2158 set_cflag(COMMIT_Inlineea, ip);
2163 * format maplock for free of old EA extent
2165 if (!test_cflag(COMMIT_Nolink, ip) && oldea->flag & DXD_EXTENT) {
2167 tlck = txMaplock(tid, ip, tlckMAP);
2168 maplock = (struct pxd_lock *) & tlck->lock;
2169 pxdlock = (struct pxd_lock *) maplock;
2172 pxdlock->flag = mlckFREEPXD;
2173 PXDaddress(&pxdlock->pxd, addressDXD(oldea));
2174 PXDlength(&pxdlock->pxd, lengthDXD(oldea));
2183 * function: synchronously write pages locked by transaction
2184 * after txLog() but before txUpdateMap();
2186 void txForce(struct tblock * tblk)
2190 struct metapage *mp;
2193 * reverse the order of transaction tlocks in
2194 * careful update order of address index pages
2195 * (right to left, bottom up)
2197 tlck = lid_to_tlock(tblk->next);
2201 tlck = lid_to_tlock(lid);
2203 tlck->next = tblk->next;
2209 * synchronously write the page, and
2210 * hold the page for txUpdateMap();
2212 for (lid = tblk->next; lid; lid = next) {
2213 tlck = lid_to_tlock(lid);
2216 if ((mp = tlck->mp) != NULL &&
2217 (tlck->type & tlckBTROOT) == 0) {
2218 assert(mp->xflag & COMMIT_PAGE);
2220 if (tlck->flag & tlckWRITEPAGE) {
2221 tlck->flag &= ~tlckWRITEPAGE;
2223 /* do not release page to freelist */
2226 * The "right" thing to do here is to
2227 * synchronously write the metadata.
2228 * With the current implementation this
2229 * is hard since write_metapage requires
2230 * us to kunmap & remap the page. If we
2231 * have tlocks pointing into the metadata
2232 * pages, we don't want to do this. I think
2233 * we can get by with synchronously writing
2234 * the pages when they are released.
2236 assert(atomic_read(&mp->nohomeok));
2237 set_bit(META_dirty, &mp->flag);
2238 set_bit(META_sync, &mp->flag);
2248 * function: update persistent allocation map (and working map
2253 static void txUpdateMap(struct tblock * tblk)
2256 struct inode *ipimap;
2259 struct maplock *maplock;
2260 struct pxd_lock pxdlock;
2263 struct metapage *mp = 0;
2265 ipimap = JFS_SBI(tblk->sb)->ipimap;
2267 maptype = (tblk->xflag & COMMIT_PMAP) ? COMMIT_PMAP : COMMIT_PWMAP;
2271 * update block allocation map
2273 * update allocation state in pmap (and wmap) and
2274 * update lsn of the pmap page;
2277 * scan each tlock/page of transaction for block allocation/free:
2279 * for each tlock/page of transaction, update map.
2280 * ? are there tlock for pmap and pwmap at the same time ?
2282 for (lid = tblk->next; lid; lid = tlck->next) {
2283 tlck = lid_to_tlock(lid);
2285 if ((tlck->flag & tlckUPDATEMAP) == 0)
2288 if (tlck->flag & tlckFREEPAGE) {
2290 * Another thread may attempt to reuse freed space
2291 * immediately, so we want to get rid of the metapage
2292 * before anyone else has a chance to get it.
2293 * Lock metapage, update maps, then invalidate
2297 ASSERT(mp->xflag & COMMIT_PAGE);
2298 hold_metapage(mp, 0);
2303 * . in-line PXD list:
2304 * . out-of-line XAD list:
2306 maplock = (struct maplock *) & tlck->lock;
2307 nlock = maplock->index;
2309 for (k = 0; k < nlock; k++, maplock++) {
2311 * allocate blocks in persistent map:
2313 * blocks have been allocated from wmap at alloc time;
2315 if (maplock->flag & mlckALLOC) {
2316 txAllocPMap(ipimap, maplock, tblk);
2319 * free blocks in persistent and working map:
2320 * blocks will be freed in pmap and then in wmap;
2322 * ? tblock specifies the PMAP/PWMAP based upon
2325 * free blocks in persistent map:
2326 * blocks will be freed from wmap at last reference
2327 * release of the object for regular files;
2329 * Alway free blocks from both persistent & working
2330 * maps for directories
2332 else { /* (maplock->flag & mlckFREE) */
2334 if (S_ISDIR(tlck->ip->i_mode))
2335 txFreeMap(ipimap, maplock,
2336 tblk, COMMIT_PWMAP);
2338 txFreeMap(ipimap, maplock,
2342 if (tlck->flag & tlckFREEPAGE) {
2343 if (!(tblk->flag & tblkGC_LAZY)) {
2344 /* This is equivalent to txRelease */
2345 ASSERT(mp->lid == lid);
2348 assert(atomic_read(&mp->nohomeok) == 1);
2349 atomic_dec(&mp->nohomeok);
2350 discard_metapage(mp);
2355 * update inode allocation map
2357 * update allocation state in pmap and
2358 * update lsn of the pmap page;
2359 * update in-memory inode flag/state
2361 * unlock mapper/write lock
2363 if (tblk->xflag & COMMIT_CREATE) {
2366 ASSERT(test_cflag(COMMIT_New, ip));
2367 clear_cflag(COMMIT_New, ip);
2369 diUpdatePMap(ipimap, ip->i_ino, FALSE, tblk);
2370 ipimap->i_state |= I_DIRTY;
2371 /* update persistent block allocation map
2372 * for the allocation of inode extent;
2374 pxdlock.flag = mlckALLOCPXD;
2375 pxdlock.pxd = JFS_IP(ip)->ixpxd;
2377 txAllocPMap(ip, (struct maplock *) & pxdlock, tblk);
2379 } else if (tblk->xflag & COMMIT_DELETE) {
2381 diUpdatePMap(ipimap, ip->i_ino, TRUE, tblk);
2382 ipimap->i_state |= I_DIRTY;
2391 * function: allocate from persistent map;
2400 * allocate from persistent map;
2401 * free from persistent map;
2402 * (e.g., tmp file - free from working map at releae
2403 * of last reference);
2404 * free from persistent and working map;
2406 * lsn - log sequence number;
2408 static void txAllocPMap(struct inode *ip, struct maplock * maplock,
2409 struct tblock * tblk)
2411 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2412 struct xdlistlock *xadlistlock;
2416 struct pxd_lock *pxdlock;
2417 struct xdlistlock *pxdlistlock;
2422 * allocate from persistent map;
2424 if (maplock->flag & mlckALLOCXADLIST) {
2425 xadlistlock = (struct xdlistlock *) maplock;
2426 xad = xadlistlock->xdlist;
2427 for (n = 0; n < xadlistlock->count; n++, xad++) {
2428 if (xad->flag & (XAD_NEW | XAD_EXTENDED)) {
2429 xaddr = addressXAD(xad);
2430 xlen = lengthXAD(xad);
2431 dbUpdatePMap(ipbmap, FALSE, xaddr,
2433 xad->flag &= ~(XAD_NEW | XAD_EXTENDED);
2434 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2435 (ulong) xaddr, xlen);
2438 } else if (maplock->flag & mlckALLOCPXD) {
2439 pxdlock = (struct pxd_lock *) maplock;
2440 xaddr = addressPXD(&pxdlock->pxd);
2441 xlen = lengthPXD(&pxdlock->pxd);
2442 dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen, tblk);
2443 jfs_info("allocPMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen);
2444 } else { /* (maplock->flag & mlckALLOCPXDLIST) */
2446 pxdlistlock = (struct xdlistlock *) maplock;
2447 pxd = pxdlistlock->xdlist;
2448 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2449 xaddr = addressPXD(pxd);
2450 xlen = lengthPXD(pxd);
2451 dbUpdatePMap(ipbmap, FALSE, xaddr, (s64) xlen,
2453 jfs_info("allocPMap: xaddr:0x%lx xlen:%d",
2454 (ulong) xaddr, xlen);
2463 * function: free from persistent and/or working map;
2465 * todo: optimization
2467 void txFreeMap(struct inode *ip,
2468 struct maplock * maplock, struct tblock * tblk, int maptype)
2470 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
2471 struct xdlistlock *xadlistlock;
2475 struct pxd_lock *pxdlock;
2476 struct xdlistlock *pxdlistlock;
2480 jfs_info("txFreeMap: tblk:0x%p maplock:0x%p maptype:0x%x",
2481 tblk, maplock, maptype);
2484 * free from persistent map;
2486 if (maptype == COMMIT_PMAP || maptype == COMMIT_PWMAP) {
2487 if (maplock->flag & mlckFREEXADLIST) {
2488 xadlistlock = (struct xdlistlock *) maplock;
2489 xad = xadlistlock->xdlist;
2490 for (n = 0; n < xadlistlock->count; n++, xad++) {
2491 if (!(xad->flag & XAD_NEW)) {
2492 xaddr = addressXAD(xad);
2493 xlen = lengthXAD(xad);
2494 dbUpdatePMap(ipbmap, TRUE, xaddr,
2496 jfs_info("freePMap: xaddr:0x%lx "
2498 (ulong) xaddr, xlen);
2501 } else if (maplock->flag & mlckFREEPXD) {
2502 pxdlock = (struct pxd_lock *) maplock;
2503 xaddr = addressPXD(&pxdlock->pxd);
2504 xlen = lengthPXD(&pxdlock->pxd);
2505 dbUpdatePMap(ipbmap, TRUE, xaddr, (s64) xlen,
2507 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2508 (ulong) xaddr, xlen);
2509 } else { /* (maplock->flag & mlckALLOCPXDLIST) */
2511 pxdlistlock = (struct xdlistlock *) maplock;
2512 pxd = pxdlistlock->xdlist;
2513 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2514 xaddr = addressPXD(pxd);
2515 xlen = lengthPXD(pxd);
2516 dbUpdatePMap(ipbmap, TRUE, xaddr,
2518 jfs_info("freePMap: xaddr:0x%lx xlen:%d",
2519 (ulong) xaddr, xlen);
2525 * free from working map;
2527 if (maptype == COMMIT_PWMAP || maptype == COMMIT_WMAP) {
2528 if (maplock->flag & mlckFREEXADLIST) {
2529 xadlistlock = (struct xdlistlock *) maplock;
2530 xad = xadlistlock->xdlist;
2531 for (n = 0; n < xadlistlock->count; n++, xad++) {
2532 xaddr = addressXAD(xad);
2533 xlen = lengthXAD(xad);
2534 dbFree(ip, xaddr, (s64) xlen);
2536 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2537 (ulong) xaddr, xlen);
2539 } else if (maplock->flag & mlckFREEPXD) {
2540 pxdlock = (struct pxd_lock *) maplock;
2541 xaddr = addressPXD(&pxdlock->pxd);
2542 xlen = lengthPXD(&pxdlock->pxd);
2543 dbFree(ip, xaddr, (s64) xlen);
2544 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2545 (ulong) xaddr, xlen);
2546 } else { /* (maplock->flag & mlckFREEPXDLIST) */
2548 pxdlistlock = (struct xdlistlock *) maplock;
2549 pxd = pxdlistlock->xdlist;
2550 for (n = 0; n < pxdlistlock->count; n++, pxd++) {
2551 xaddr = addressPXD(pxd);
2552 xlen = lengthPXD(pxd);
2553 dbFree(ip, xaddr, (s64) xlen);
2554 jfs_info("freeWMap: xaddr:0x%lx xlen:%d",
2555 (ulong) xaddr, xlen);
2565 * function: remove tlock from inode anonymous locklist
2567 void txFreelock(struct inode *ip)
2569 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
2570 struct tlock *xtlck, *tlck;
2571 lid_t xlid = 0, lid;
2573 if (!jfs_ip->atlhead)
2576 xtlck = (struct tlock *) &jfs_ip->atlhead;
2578 while ((lid = xtlck->next)) {
2579 tlck = lid_to_tlock(lid);
2580 if (tlck->flag & tlckFREELOCK) {
2581 xtlck->next = tlck->next;
2589 if (jfs_ip->atlhead)
2590 jfs_ip->atltail = xlid;
2592 jfs_ip->atltail = 0;
2594 * If inode was on anon_list, remove it
2597 list_del_init(&jfs_ip->anon_inode_list);
2606 * function: abort tx before commit;
2608 * frees line-locks and segment locks for all
2609 * segments in comdata structure.
2610 * Optionally sets state of file-system to FM_DIRTY in super-block.
2611 * log age of page-frames in memory for which caller has
2612 * are reset to 0 (to avoid logwarap).
2614 void txAbort(tid_t tid, int dirty)
2617 struct metapage *mp;
2618 struct tblock *tblk = tid_to_tblock(tid);
2620 jfs_warn("txAbort: tid:%d dirty:0x%x", tid, dirty);
2623 * free tlocks of the transaction
2625 for (lid = tblk->next; lid; lid = next) {
2626 next = lid_to_tlock(lid)->next;
2628 mp = lid_to_tlock(lid)->mp;
2634 * reset lsn of page to avoid logwarap:
2636 * (page may have been previously committed by another
2637 * transaction(s) but has not been paged, i.e.,
2638 * it may be on logsync list even though it has not
2639 * been logged for the current tx.)
2641 if (mp->xflag & COMMIT_PAGE && mp->lsn)
2644 /* insert tlock at head of freelist */
2650 /* caller will free the transaction block */
2652 tblk->next = tblk->last = 0;
2655 * mark filesystem dirty
2658 updateSuper(tblk->sb, FM_DIRTY);
2667 * function: abort commit.
2669 * frees tlocks of transaction; line-locks and segment locks for all
2670 * segments in comdata structure. frees malloc storage
2671 * sets state of file-system to FM_MDIRTY in super-block.
2672 * log age of page-frames in memory for which caller has
2673 * are reset to 0 (to avoid logwarap).
2675 void txAbortCommit(struct commit * cd, int exval)
2677 struct tblock *tblk;
2680 struct metapage *mp;
2682 assert(exval == EIO || exval == ENOMEM);
2683 jfs_warn("txAbortCommit: cd:0x%p", cd);
2686 * free tlocks of the transaction
2689 tblk = tid_to_tblock(tid);
2690 for (lid = tblk->next; lid; lid = next) {
2691 next = lid_to_tlock(lid)->next;
2693 mp = lid_to_tlock(lid)->mp;
2698 * reset lsn of page to avoid logwarap;
2700 if (mp->xflag & COMMIT_PAGE)
2704 /* insert tlock at head of freelist */
2710 tblk->next = tblk->last = 0;
2712 /* free the transaction block */
2716 * mark filesystem dirty
2718 updateSuper(cd->sb, FM_DIRTY);
2723 * txLazyCommit(void)
2725 * All transactions except those changing ipimap (COMMIT_FORCE) are
2726 * processed by this routine. This insures that the inode and block
2727 * allocation maps are updated in order. For synchronous transactions,
2728 * let the user thread finish processing after txUpdateMap() is called.
2730 void txLazyCommit(struct tblock * tblk)
2732 struct jfs_log *log;
2734 while (((tblk->flag & tblkGC_READY) == 0) &&
2735 ((tblk->flag & tblkGC_UNLOCKED) == 0)) {
2736 /* We must have gotten ahead of the user thread
2738 jfs_info("jfs_lazycommit: tblk 0x%p not unlocked", tblk);
2742 jfs_info("txLazyCommit: processing tblk 0x%p", tblk);
2746 log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
2748 spin_lock_irq(&log->gclock); // LOGGC_LOCK
2750 tblk->flag |= tblkGC_COMMITTED;
2752 if (tblk->flag & tblkGC_READY)
2755 wake_up_all(&tblk->gcwait); // LOGGC_WAKEUP
2758 * Can't release log->gclock until we've tested tblk->flag
2760 if (tblk->flag & tblkGC_LAZY) {
2761 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
2763 tblk->flag &= ~tblkGC_LAZY;
2764 txEnd(tblk - TxBlock); /* Convert back to tid */
2766 spin_unlock_irq(&log->gclock); // LOGGC_UNLOCK
2768 jfs_info("txLazyCommit: done: tblk = 0x%p", tblk);
2772 * jfs_lazycommit(void)
2774 * To be run as a kernel daemon. If lbmIODone is called in an interrupt
2775 * context, or where blocking is not wanted, this routine will process
2776 * committed transactions from the unlock queue.
2778 int jfs_lazycommit(void *arg)
2781 struct tblock *tblk;
2782 unsigned long flags;
2784 daemonize("jfsCommit");
2786 jfsCommitTask = current;
2789 TxAnchor.unlock_queue = TxAnchor.unlock_tail = 0;
2791 complete(&jfsIOwait);
2797 while ((tblk = TxAnchor.unlock_queue)) {
2799 * We can't get ahead of user thread. Spinning is
2800 * simpler than blocking/waking. We shouldn't spin
2801 * very long, since user thread shouldn't be blocking
2802 * between lmGroupCommit & txEnd.
2807 * Remove first transaction from queue
2809 TxAnchor.unlock_queue = tblk->cqnext;
2811 if (TxAnchor.unlock_tail == tblk)
2812 TxAnchor.unlock_tail = 0;
2818 * We can be running indefinitely if other processors
2819 * are adding transactions to this list
2828 if (current->flags & PF_FREEZE) {
2830 refrigerator(PF_IOTHREAD);
2832 DECLARE_WAITQUEUE(wq, current);
2834 add_wait_queue(&jfs_commit_thread_wait, &wq);
2835 set_current_state(TASK_INTERRUPTIBLE);
2838 current->state = TASK_RUNNING;
2839 remove_wait_queue(&jfs_commit_thread_wait, &wq);
2841 } while (!jfs_stop_threads);
2843 if (TxAnchor.unlock_queue)
2844 jfs_err("jfs_lazycommit being killed w/pending transactions!");
2846 jfs_info("jfs_lazycommit being killed\n");
2847 complete(&jfsIOwait);
2851 void txLazyUnlock(struct tblock * tblk)
2853 unsigned long flags;
2857 if (TxAnchor.unlock_tail)
2858 TxAnchor.unlock_tail->cqnext = tblk;
2860 TxAnchor.unlock_queue = tblk;
2861 TxAnchor.unlock_tail = tblk;
2864 wake_up(&jfs_commit_thread_wait);
2867 static void LogSyncRelease(struct metapage * mp)
2869 struct jfs_log *log = mp->log;
2871 assert(atomic_read(&mp->nohomeok));
2873 atomic_dec(&mp->nohomeok);
2875 if (atomic_read(&mp->nohomeok))
2878 hold_metapage(mp, 0);
2885 list_del_init(&mp->synclist);
2886 LOGSYNC_UNLOCK(log);
2888 release_metapage(mp);
2894 * Block all new transactions and push anonymous transactions to
2897 * This does almost the same thing as jfs_sync below. We don't
2898 * worry about deadlocking when TlocksLow is set, since we would
2899 * expect jfs_sync to get us out of that jam.
2901 void txQuiesce(struct super_block *sb)
2904 struct jfs_inode_info *jfs_ip;
2905 struct jfs_log *log = JFS_SBI(sb)->log;
2908 set_bit(log_QUIESCE, &log->flag);
2912 while (!list_empty(&TxAnchor.anon_list)) {
2913 jfs_ip = list_entry(TxAnchor.anon_list.next,
2914 struct jfs_inode_info,
2916 ip = &jfs_ip->vfs_inode;
2919 * inode will be removed from anonymous list
2920 * when it is committed
2923 tid = txBegin(ip->i_sb, COMMIT_INODE | COMMIT_FORCE);
2924 down(&jfs_ip->commit_sem);
2925 txCommit(tid, 1, &ip, 0);
2927 up(&jfs_ip->commit_sem);
2929 * Just to be safe. I don't know how
2930 * long we can run without blocking
2937 * If jfs_sync is running in parallel, there could be some inodes
2938 * on anon_list2. Let's check.
2940 if (!list_empty(&TxAnchor.anon_list2)) {
2941 list_splice(&TxAnchor.anon_list2, &TxAnchor.anon_list);
2942 INIT_LIST_HEAD(&TxAnchor.anon_list2);
2948 * We may need to kick off the group commit
2950 jfs_flush_journal(log, 0);
2956 * Allows transactions to start again following txQuiesce
2958 void txResume(struct super_block *sb)
2960 struct jfs_log *log = JFS_SBI(sb)->log;
2962 clear_bit(log_QUIESCE, &log->flag);
2963 TXN_WAKEUP(&log->syncwait);
2969 * To be run as a kernel daemon. This is awakened when tlocks run low.
2970 * We write any inodes that have anonymous tlocks so they will become
2973 int jfs_sync(void *arg)
2976 struct jfs_inode_info *jfs_ip;
2980 daemonize("jfsSync");
2982 complete(&jfsIOwait);
2986 * write each inode on the anonymous inode list
2989 while (TxAnchor.TlocksLow && !list_empty(&TxAnchor.anon_list)) {
2990 jfs_ip = list_entry(TxAnchor.anon_list.next,
2991 struct jfs_inode_info,
2993 ip = &jfs_ip->vfs_inode;
2996 * down_trylock returns 0 on success. This is
2997 * inconsistent with spin_trylock.
2999 if (! down_trylock(&jfs_ip->commit_sem)) {
3001 * inode will be removed from anonymous list
3002 * when it is committed
3005 tid = txBegin(ip->i_sb, COMMIT_INODE);
3006 rc = txCommit(tid, 1, &ip, 0);
3008 up(&jfs_ip->commit_sem);
3010 * Just to be safe. I don't know how
3011 * long we can run without blocking
3016 /* We can't get the commit semaphore. It may
3017 * be held by a thread waiting for tlock's
3018 * so let's not block here. Save it to
3019 * put back on the anon_list.
3022 /* Take off anon_list */
3023 list_del(&jfs_ip->anon_inode_list);
3025 /* Put on anon_list2 */
3026 list_add(&jfs_ip->anon_inode_list,
3027 &TxAnchor.anon_list2);
3030 /* Add anon_list2 back to anon_list */
3031 list_splice_init(&TxAnchor.anon_list2, &TxAnchor.anon_list);
3033 if (current->flags & PF_FREEZE) {
3035 refrigerator(PF_IOTHREAD);
3037 DECLARE_WAITQUEUE(wq, current);
3039 add_wait_queue(&jfs_sync_thread_wait, &wq);
3040 set_current_state(TASK_INTERRUPTIBLE);
3043 current->state = TASK_RUNNING;
3044 remove_wait_queue(&jfs_sync_thread_wait, &wq);
3046 } while (!jfs_stop_threads);
3048 jfs_info("jfs_sync being killed");
3049 complete(&jfsIOwait);
3053 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
3054 int jfs_txanchor_read(char *buffer, char **start, off_t offset, int length,
3055 int *eof, void *data)
3064 waitqueue_active(&TxAnchor.freewait) ? "active" : "empty";
3066 waitqueue_active(&TxAnchor.freelockwait) ? "active" : "empty";
3068 waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
3070 len += sprintf(buffer,
3076 "freelockwait = %s\n"
3077 "lowlockwait = %s\n"
3078 "tlocksInUse = %d\n"
3080 "unlock_queue = 0x%p\n"
3081 "unlock_tail = 0x%p\n",
3087 TxAnchor.tlocksInUse,
3089 TxAnchor.unlock_queue,
3090 TxAnchor.unlock_tail);
3093 *start = buffer + begin;
3108 #if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
3109 int jfs_txstats_read(char *buffer, char **start, off_t offset, int length,
3110 int *eof, void *data)
3115 len += sprintf(buffer,
3118 "calls to txBegin = %d\n"
3119 "txBegin blocked by sync barrier = %d\n"
3120 "txBegin blocked by tlocks low = %d\n"
3121 "txBegin blocked by no free tid = %d\n"
3122 "calls to txBeginAnon = %d\n"
3123 "txBeginAnon blocked by sync barrier = %d\n"
3124 "txBeginAnon blocked by tlocks low = %d\n"
3125 "calls to txLockAlloc = %d\n"
3126 "tLockAlloc blocked by no free lock = %d\n",
3128 TxStat.txBegin_barrier,
3129 TxStat.txBegin_lockslow,
3130 TxStat.txBegin_freetid,
3132 TxStat.txBeginAnon_barrier,
3133 TxStat.txBeginAnon_lockslow,
3135 TxStat.txLockAlloc_freelock);
3138 *start = buffer + begin;