4 * Copyright (C) 1991, 1992, 2002 Linus Torvalds
8 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 * Removed a lot of unnecessary code and simplified things now that
11 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 * Speed up hash, lru, and free list operations. Use gfp() for allocating
14 * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
16 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 #include <linux/precache.h>
46 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
48 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
51 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
53 bh->b_end_io = handler;
54 bh->b_private = private;
56 EXPORT_SYMBOL(init_buffer);
58 static int sync_buffer(void *word)
60 struct block_device *bd;
61 struct buffer_head *bh
62 = container_of(word, struct buffer_head, b_state);
67 blk_run_address_space(bd->bd_inode->i_mapping);
72 void __lock_buffer(struct buffer_head *bh)
74 wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
75 TASK_UNINTERRUPTIBLE);
77 EXPORT_SYMBOL(__lock_buffer);
79 void unlock_buffer(struct buffer_head *bh)
81 clear_bit_unlock(BH_Lock, &bh->b_state);
82 smp_mb__after_clear_bit();
83 wake_up_bit(&bh->b_state, BH_Lock);
85 EXPORT_SYMBOL(unlock_buffer);
88 * Block until a buffer comes unlocked. This doesn't stop it
89 * from becoming locked again - you have to lock it yourself
90 * if you want to preserve its state.
92 void __wait_on_buffer(struct buffer_head * bh)
94 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
96 EXPORT_SYMBOL(__wait_on_buffer);
99 __clear_page_buffers(struct page *page)
101 ClearPagePrivate(page);
102 set_page_private(page, 0);
103 page_cache_release(page);
107 static int quiet_error(struct buffer_head *bh)
109 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
115 static void buffer_io_error(struct buffer_head *bh)
117 char b[BDEVNAME_SIZE];
118 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
119 bdevname(bh->b_bdev, b),
120 (unsigned long long)bh->b_blocknr);
124 * End-of-IO handler helper function which does not touch the bh after
126 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
127 * a race there is benign: unlock_buffer() only use the bh's address for
128 * hashing after unlocking the buffer, so it doesn't actually touch the bh
131 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
134 set_buffer_uptodate(bh);
136 /* This happens, due to failed READA attempts. */
137 clear_buffer_uptodate(bh);
143 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
144 * unlock the buffer. This is what ll_rw_block uses too.
146 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
148 __end_buffer_read_notouch(bh, uptodate);
151 EXPORT_SYMBOL(end_buffer_read_sync);
153 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
155 char b[BDEVNAME_SIZE];
158 set_buffer_uptodate(bh);
160 if (!quiet_error(bh)) {
162 printk(KERN_WARNING "lost page write due to "
164 bdevname(bh->b_bdev, b));
166 set_buffer_write_io_error(bh);
167 clear_buffer_uptodate(bh);
172 EXPORT_SYMBOL(end_buffer_write_sync);
175 * Various filesystems appear to want __find_get_block to be non-blocking.
176 * But it's the page lock which protects the buffers. To get around this,
177 * we get exclusion from try_to_free_buffers with the blockdev mapping's
180 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
181 * may be quite high. This code could TryLock the page, and if that
182 * succeeds, there is no need to take private_lock. (But if
183 * private_lock is contended then so is mapping->tree_lock).
185 static struct buffer_head *
186 __find_get_block_slow(struct block_device *bdev, sector_t block)
188 struct inode *bd_inode = bdev->bd_inode;
189 struct address_space *bd_mapping = bd_inode->i_mapping;
190 struct buffer_head *ret = NULL;
192 struct buffer_head *bh;
193 struct buffer_head *head;
197 index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
198 page = find_get_page(bd_mapping, index);
202 spin_lock(&bd_mapping->private_lock);
203 if (!page_has_buffers(page))
205 head = page_buffers(page);
208 if (!buffer_mapped(bh))
210 else if (bh->b_blocknr == block) {
215 bh = bh->b_this_page;
216 } while (bh != head);
218 /* we might be here because some of the buffers on this page are
219 * not mapped. This is due to various races between
220 * file io on the block device and getblk. It gets dealt with
221 * elsewhere, don't buffer_error if we had some unmapped buffers
224 printk("__find_get_block_slow() failed. "
225 "block=%llu, b_blocknr=%llu\n",
226 (unsigned long long)block,
227 (unsigned long long)bh->b_blocknr);
228 printk("b_state=0x%08lx, b_size=%zu\n",
229 bh->b_state, bh->b_size);
230 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
233 spin_unlock(&bd_mapping->private_lock);
234 page_cache_release(page);
239 /* If invalidate_buffers() will trash dirty buffers, it means some kind
240 of fs corruption is going on. Trashing dirty data always imply losing
241 information that was supposed to be just stored on the physical layer
244 Thus invalidate_buffers in general usage is not allwowed to trash
245 dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
246 be preserved. These buffers are simply skipped.
248 We also skip buffers which are still in use. For example this can
249 happen if a userspace program is reading the block device.
251 NOTE: In the case where the user removed a removable-media-disk even if
252 there's still dirty data not synced on disk (due a bug in the device driver
253 or due an error of the user), by not destroying the dirty buffers we could
254 generate corruption also on the next media inserted, thus a parameter is
255 necessary to handle this case in the most safe way possible (trying
256 to not corrupt also the new disk inserted with the data belonging to
257 the old now corrupted disk). Also for the ramdisk the natural thing
258 to do in order to release the ramdisk memory is to destroy dirty buffers.
260 These are two special cases. Normal usage imply the device driver
261 to issue a sync on the device (without waiting I/O completion) and
262 then an invalidate_buffers call that doesn't trash dirty buffers.
264 For handling cache coherency with the blkdev pagecache the 'update' case
265 is been introduced. It is needed to re-read from disk any pinned
266 buffer. NOTE: re-reading from disk is destructive so we can do it only
267 when we assume nobody is changing the buffercache under our I/O and when
268 we think the disk contains more recent information than the buffercache.
269 The update == 1 pass marks the buffers we need to update, the update == 2
270 pass does the actual I/O. */
271 void invalidate_bdev(struct block_device *bdev)
273 struct address_space *mapping = bdev->bd_inode->i_mapping;
275 if (mapping->nrpages == 0)
278 invalidate_bh_lrus();
279 lru_add_drain_all(); /* make sure all lru add caches are flushed */
280 invalidate_mapping_pages(mapping, 0, -1);
282 /* 99% of the time, we don't need to flush the precache on the bdev.
283 * But, for the strange corners, lets be cautious
285 precache_flush_inode(mapping);
287 EXPORT_SYMBOL(invalidate_bdev);
290 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
292 static void free_more_memory(void)
297 wakeup_flusher_threads(1024);
300 for_each_online_node(nid) {
301 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
302 gfp_zone(GFP_NOFS), NULL,
305 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
311 * I/O completion handler for block_read_full_page() - pages
312 * which come unlocked at the end of I/O.
314 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
317 struct buffer_head *first;
318 struct buffer_head *tmp;
320 int page_uptodate = 1;
322 BUG_ON(!buffer_async_read(bh));
326 set_buffer_uptodate(bh);
328 clear_buffer_uptodate(bh);
329 if (!quiet_error(bh))
335 * Be _very_ careful from here on. Bad things can happen if
336 * two buffer heads end IO at almost the same time and both
337 * decide that the page is now completely done.
339 first = page_buffers(page);
340 local_irq_save(flags);
341 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
342 clear_buffer_async_read(bh);
346 if (!buffer_uptodate(tmp))
348 if (buffer_async_read(tmp)) {
349 BUG_ON(!buffer_locked(tmp));
352 tmp = tmp->b_this_page;
354 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
355 local_irq_restore(flags);
358 * If none of the buffers had errors and they are all
359 * uptodate then we can set the page uptodate.
361 if (page_uptodate && !PageError(page))
362 SetPageUptodate(page);
367 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
368 local_irq_restore(flags);
373 * Completion handler for block_write_full_page() - pages which are unlocked
374 * during I/O, and which have PageWriteback cleared upon I/O completion.
376 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
378 char b[BDEVNAME_SIZE];
380 struct buffer_head *first;
381 struct buffer_head *tmp;
384 BUG_ON(!buffer_async_write(bh));
388 set_buffer_uptodate(bh);
390 if (!quiet_error(bh)) {
392 printk(KERN_WARNING "lost page write due to "
394 bdevname(bh->b_bdev, b));
396 set_bit(AS_EIO, &page->mapping->flags);
397 set_buffer_write_io_error(bh);
398 clear_buffer_uptodate(bh);
402 first = page_buffers(page);
403 local_irq_save(flags);
404 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
406 clear_buffer_async_write(bh);
408 tmp = bh->b_this_page;
410 if (buffer_async_write(tmp)) {
411 BUG_ON(!buffer_locked(tmp));
414 tmp = tmp->b_this_page;
416 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417 local_irq_restore(flags);
418 end_page_writeback(page);
422 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
423 local_irq_restore(flags);
426 EXPORT_SYMBOL(end_buffer_async_write);
429 * If a page's buffers are under async readin (end_buffer_async_read
430 * completion) then there is a possibility that another thread of
431 * control could lock one of the buffers after it has completed
432 * but while some of the other buffers have not completed. This
433 * locked buffer would confuse end_buffer_async_read() into not unlocking
434 * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
435 * that this buffer is not under async I/O.
437 * The page comes unlocked when it has no locked buffer_async buffers
440 * PageLocked prevents anyone starting new async I/O reads any of
443 * PageWriteback is used to prevent simultaneous writeout of the same
446 * PageLocked prevents anyone from starting writeback of a page which is
447 * under read I/O (PageWriteback is only ever set against a locked page).
449 static void mark_buffer_async_read(struct buffer_head *bh)
451 bh->b_end_io = end_buffer_async_read;
452 set_buffer_async_read(bh);
455 static void mark_buffer_async_write_endio(struct buffer_head *bh,
456 bh_end_io_t *handler)
458 bh->b_end_io = handler;
459 set_buffer_async_write(bh);
462 void mark_buffer_async_write(struct buffer_head *bh)
464 mark_buffer_async_write_endio(bh, end_buffer_async_write);
466 EXPORT_SYMBOL(mark_buffer_async_write);
470 * fs/buffer.c contains helper functions for buffer-backed address space's
471 * fsync functions. A common requirement for buffer-based filesystems is
472 * that certain data from the backing blockdev needs to be written out for
473 * a successful fsync(). For example, ext2 indirect blocks need to be
474 * written back and waited upon before fsync() returns.
476 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
477 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
478 * management of a list of dependent buffers at ->i_mapping->private_list.
480 * Locking is a little subtle: try_to_free_buffers() will remove buffers
481 * from their controlling inode's queue when they are being freed. But
482 * try_to_free_buffers() will be operating against the *blockdev* mapping
483 * at the time, not against the S_ISREG file which depends on those buffers.
484 * So the locking for private_list is via the private_lock in the address_space
485 * which backs the buffers. Which is different from the address_space
486 * against which the buffers are listed. So for a particular address_space,
487 * mapping->private_lock does *not* protect mapping->private_list! In fact,
488 * mapping->private_list will always be protected by the backing blockdev's
491 * Which introduces a requirement: all buffers on an address_space's
492 * ->private_list must be from the same address_space: the blockdev's.
494 * address_spaces which do not place buffers at ->private_list via these
495 * utility functions are free to use private_lock and private_list for
496 * whatever they want. The only requirement is that list_empty(private_list)
497 * be true at clear_inode() time.
499 * FIXME: clear_inode should not call invalidate_inode_buffers(). The
500 * filesystems should do that. invalidate_inode_buffers() should just go
501 * BUG_ON(!list_empty).
503 * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
504 * take an address_space, not an inode. And it should be called
505 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
508 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
509 * list if it is already on a list. Because if the buffer is on a list,
510 * it *must* already be on the right one. If not, the filesystem is being
511 * silly. This will save a ton of locking. But first we have to ensure
512 * that buffers are taken *off* the old inode's list when they are freed
513 * (presumably in truncate). That requires careful auditing of all
514 * filesystems (do it inside bforget()). It could also be done by bringing
519 * The buffer's backing address_space's private_lock must be held
521 static void __remove_assoc_queue(struct buffer_head *bh)
523 list_del_init(&bh->b_assoc_buffers);
524 WARN_ON(!bh->b_assoc_map);
525 if (buffer_write_io_error(bh))
526 set_bit(AS_EIO, &bh->b_assoc_map->flags);
527 bh->b_assoc_map = NULL;
530 int inode_has_buffers(struct inode *inode)
532 return !list_empty(&inode->i_data.private_list);
536 * osync is designed to support O_SYNC io. It waits synchronously for
537 * all already-submitted IO to complete, but does not queue any new
538 * writes to the disk.
540 * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
541 * you dirty the buffers, and then use osync_inode_buffers to wait for
542 * completion. Any other dirty buffers which are not yet queued for
543 * write will not be flushed to disk by the osync.
545 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
547 struct buffer_head *bh;
553 list_for_each_prev(p, list) {
555 if (buffer_locked(bh)) {
559 if (!buffer_uptodate(bh))
570 static void do_thaw_one(struct super_block *sb, void *unused)
572 char b[BDEVNAME_SIZE];
573 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
574 printk(KERN_WARNING "Emergency Thaw on %s\n",
575 bdevname(sb->s_bdev, b));
578 static void do_thaw_all(struct work_struct *work)
580 iterate_supers(do_thaw_one, NULL);
582 printk(KERN_WARNING "Emergency Thaw complete\n");
586 * emergency_thaw_all -- forcibly thaw every frozen filesystem
588 * Used for emergency unfreeze of all filesystems via SysRq
590 void emergency_thaw_all(void)
592 struct work_struct *work;
594 work = kmalloc(sizeof(*work), GFP_ATOMIC);
596 INIT_WORK(work, do_thaw_all);
602 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
603 * @mapping: the mapping which wants those buffers written
605 * Starts I/O against the buffers at mapping->private_list, and waits upon
608 * Basically, this is a convenience function for fsync().
609 * @mapping is a file or directory which needs those buffers to be written for
610 * a successful fsync().
612 int sync_mapping_buffers(struct address_space *mapping)
614 struct address_space *buffer_mapping = mapping->assoc_mapping;
616 if (buffer_mapping == NULL || list_empty(&mapping->private_list))
619 return fsync_buffers_list(&buffer_mapping->private_lock,
620 &mapping->private_list);
622 EXPORT_SYMBOL(sync_mapping_buffers);
625 * Called when we've recently written block `bblock', and it is known that
626 * `bblock' was for a buffer_boundary() buffer. This means that the block at
627 * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
628 * dirty, schedule it for IO. So that indirects merge nicely with their data.
630 void write_boundary_block(struct block_device *bdev,
631 sector_t bblock, unsigned blocksize)
633 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
635 if (buffer_dirty(bh))
636 ll_rw_block(WRITE, 1, &bh);
641 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
643 struct address_space *mapping = inode->i_mapping;
644 struct address_space *buffer_mapping = bh->b_page->mapping;
646 mark_buffer_dirty(bh);
647 if (!mapping->assoc_mapping) {
648 mapping->assoc_mapping = buffer_mapping;
650 BUG_ON(mapping->assoc_mapping != buffer_mapping);
652 if (!bh->b_assoc_map) {
653 spin_lock(&buffer_mapping->private_lock);
654 list_move_tail(&bh->b_assoc_buffers,
655 &mapping->private_list);
656 bh->b_assoc_map = mapping;
657 spin_unlock(&buffer_mapping->private_lock);
660 EXPORT_SYMBOL(mark_buffer_dirty_inode);
663 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
666 * If warn is true, then emit a warning if the page is not uptodate and has
667 * not been truncated.
669 static void __set_page_dirty(struct page *page,
670 struct address_space *mapping, int warn)
672 spin_lock_irq(&mapping->tree_lock);
673 if (page->mapping) { /* Race with truncate? */
674 WARN_ON_ONCE(warn && !PageUptodate(page));
675 account_page_dirtied(page, mapping);
676 radix_tree_tag_set(&mapping->page_tree,
677 page_index(page), PAGECACHE_TAG_DIRTY);
679 spin_unlock_irq(&mapping->tree_lock);
680 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
684 * Add a page to the dirty page list.
686 * It is a sad fact of life that this function is called from several places
687 * deeply under spinlocking. It may not sleep.
689 * If the page has buffers, the uptodate buffers are set dirty, to preserve
690 * dirty-state coherency between the page and the buffers. It the page does
691 * not have buffers then when they are later attached they will all be set
694 * The buffers are dirtied before the page is dirtied. There's a small race
695 * window in which a writepage caller may see the page cleanness but not the
696 * buffer dirtiness. That's fine. If this code were to set the page dirty
697 * before the buffers, a concurrent writepage caller could clear the page dirty
698 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
699 * page on the dirty page list.
701 * We use private_lock to lock against try_to_free_buffers while using the
702 * page's buffer list. Also use this to protect against clean buffers being
703 * added to the page after it was set dirty.
705 * FIXME: may need to call ->reservepage here as well. That's rather up to the
706 * address_space though.
708 int __set_page_dirty_buffers(struct page *page)
711 struct address_space *mapping = page_mapping(page);
713 if (unlikely(!mapping))
714 return !TestSetPageDirty(page);
716 spin_lock(&mapping->private_lock);
717 if (page_has_buffers(page)) {
718 struct buffer_head *head = page_buffers(page);
719 struct buffer_head *bh = head;
722 set_buffer_dirty(bh);
723 bh = bh->b_this_page;
724 } while (bh != head);
726 newly_dirty = !TestSetPageDirty(page);
727 spin_unlock(&mapping->private_lock);
730 __set_page_dirty(page, mapping, 1);
733 EXPORT_SYMBOL(__set_page_dirty_buffers);
736 * Write out and wait upon a list of buffers.
738 * We have conflicting pressures: we want to make sure that all
739 * initially dirty buffers get waited on, but that any subsequently
740 * dirtied buffers don't. After all, we don't want fsync to last
741 * forever if somebody is actively writing to the file.
743 * Do this in two main stages: first we copy dirty buffers to a
744 * temporary inode list, queueing the writes as we go. Then we clean
745 * up, waiting for those writes to complete.
747 * During this second stage, any subsequent updates to the file may end
748 * up refiling the buffer on the original inode's dirty list again, so
749 * there is a chance we will end up with a buffer queued for write but
750 * not yet completed on that list. So, as a final cleanup we go through
751 * the osync code to catch these locked, dirty buffers without requeuing
752 * any newly dirty buffers for write.
754 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
756 struct buffer_head *bh;
757 struct list_head tmp;
758 struct address_space *mapping, *prev_mapping = NULL;
761 INIT_LIST_HEAD(&tmp);
764 while (!list_empty(list)) {
765 bh = BH_ENTRY(list->next);
766 mapping = bh->b_assoc_map;
767 __remove_assoc_queue(bh);
768 /* Avoid race with mark_buffer_dirty_inode() which does
769 * a lockless check and we rely on seeing the dirty bit */
771 if (buffer_dirty(bh) || buffer_locked(bh)) {
772 list_add(&bh->b_assoc_buffers, &tmp);
773 bh->b_assoc_map = mapping;
774 if (buffer_dirty(bh)) {
778 * Ensure any pending I/O completes so that
779 * write_dirty_buffer() actually writes the
780 * current contents - it is a noop if I/O is
781 * still in flight on potentially older
784 write_dirty_buffer(bh, WRITE_SYNC_PLUG);
787 * Kick off IO for the previous mapping. Note
788 * that we will not run the very last mapping,
789 * wait_on_buffer() will do that for us
790 * through sync_buffer().
792 if (prev_mapping && prev_mapping != mapping)
793 blk_run_address_space(prev_mapping);
794 prev_mapping = mapping;
802 while (!list_empty(&tmp)) {
803 bh = BH_ENTRY(tmp.prev);
805 mapping = bh->b_assoc_map;
806 __remove_assoc_queue(bh);
807 /* Avoid race with mark_buffer_dirty_inode() which does
808 * a lockless check and we rely on seeing the dirty bit */
810 if (buffer_dirty(bh)) {
811 list_add(&bh->b_assoc_buffers,
812 &mapping->private_list);
813 bh->b_assoc_map = mapping;
817 if (!buffer_uptodate(bh))
824 err2 = osync_buffers_list(lock, list);
832 * Invalidate any and all dirty buffers on a given inode. We are
833 * probably unmounting the fs, but that doesn't mean we have already
834 * done a sync(). Just drop the buffers from the inode list.
836 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
837 * assumes that all the buffers are against the blockdev. Not true
840 void invalidate_inode_buffers(struct inode *inode)
842 if (inode_has_buffers(inode)) {
843 struct address_space *mapping = &inode->i_data;
844 struct list_head *list = &mapping->private_list;
845 struct address_space *buffer_mapping = mapping->assoc_mapping;
847 spin_lock(&buffer_mapping->private_lock);
848 while (!list_empty(list))
849 __remove_assoc_queue(BH_ENTRY(list->next));
850 spin_unlock(&buffer_mapping->private_lock);
853 EXPORT_SYMBOL(invalidate_inode_buffers);
856 * Remove any clean buffers from the inode's buffer list. This is called
857 * when we're trying to free the inode itself. Those buffers can pin it.
859 * Returns true if all buffers were removed.
861 int remove_inode_buffers(struct inode *inode)
865 if (inode_has_buffers(inode)) {
866 struct address_space *mapping = &inode->i_data;
867 struct list_head *list = &mapping->private_list;
868 struct address_space *buffer_mapping = mapping->assoc_mapping;
870 spin_lock(&buffer_mapping->private_lock);
871 while (!list_empty(list)) {
872 struct buffer_head *bh = BH_ENTRY(list->next);
873 if (buffer_dirty(bh)) {
877 __remove_assoc_queue(bh);
879 spin_unlock(&buffer_mapping->private_lock);
885 * Create the appropriate buffers when given a page for data area and
886 * the size of each buffer.. Use the bh->b_this_page linked list to
887 * follow the buffers created. Return NULL if unable to create more
890 * The retry flag is used to differentiate async IO (paging, swapping)
891 * which may not fail from ordinary buffer allocations.
893 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
896 struct buffer_head *bh, *head;
902 while ((offset -= size) >= 0) {
903 bh = alloc_buffer_head(GFP_NOFS);
908 bh->b_this_page = head;
913 atomic_set(&bh->b_count, 0);
916 /* Link the buffer to its page */
917 set_bh_page(bh, page, offset);
919 init_buffer(bh, NULL, NULL);
923 * In case anything failed, we just free everything we got.
929 head = head->b_this_page;
930 free_buffer_head(bh);
935 * Return failure for non-async IO requests. Async IO requests
936 * are not allowed to fail, so we have to wait until buffer heads
937 * become available. But we don't want tasks sleeping with
938 * partially complete buffers, so all were released above.
943 /* We're _really_ low on memory. Now we just
944 * wait for old buffer heads to become free due to
945 * finishing IO. Since this is an async request and
946 * the reserve list is empty, we're sure there are
947 * async buffer heads in use.
952 EXPORT_SYMBOL_GPL(alloc_page_buffers);
955 link_dev_buffers(struct page *page, struct buffer_head *head)
957 struct buffer_head *bh, *tail;
962 bh = bh->b_this_page;
964 tail->b_this_page = head;
965 attach_page_buffers(page, head);
969 * Initialise the state of a blockdev page's buffers.
972 init_page_buffers(struct page *page, struct block_device *bdev,
973 sector_t block, int size)
975 struct buffer_head *head = page_buffers(page);
976 struct buffer_head *bh = head;
977 int uptodate = PageUptodate(page);
980 if (!buffer_mapped(bh)) {
981 init_buffer(bh, NULL, NULL);
983 bh->b_blocknr = block;
985 set_buffer_uptodate(bh);
986 set_buffer_mapped(bh);
989 bh = bh->b_this_page;
990 } while (bh != head);
994 * Create the page-cache page that contains the requested block.
996 * This is user purely for blockdev mappings.
999 grow_dev_page(struct block_device *bdev, sector_t block,
1000 pgoff_t index, int size)
1002 struct inode *inode = bdev->bd_inode;
1004 struct buffer_head *bh;
1006 page = find_or_create_page(inode->i_mapping, index,
1007 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1011 BUG_ON(!PageLocked(page));
1013 if (page_has_buffers(page)) {
1014 bh = page_buffers(page);
1015 if (bh->b_size == size) {
1016 init_page_buffers(page, bdev, block, size);
1019 if (!try_to_free_buffers(page))
1024 * Allocate some buffers for this page
1026 bh = alloc_page_buffers(page, size, 0);
1031 * Link the page to the buffers and initialise them. Take the
1032 * lock to be atomic wrt __find_get_block(), which does not
1033 * run under the page lock.
1035 spin_lock(&inode->i_mapping->private_lock);
1036 link_dev_buffers(page, bh);
1037 init_page_buffers(page, bdev, block, size);
1038 spin_unlock(&inode->i_mapping->private_lock);
1044 page_cache_release(page);
1049 * Create buffers for the specified block device block's page. If
1050 * that page was dirty, the buffers are set dirty also.
1053 grow_buffers(struct block_device *bdev, sector_t block, int size)
1062 } while ((size << sizebits) < PAGE_SIZE);
1064 index = block >> sizebits;
1067 * Check for a block which wants to lie outside our maximum possible
1068 * pagecache index. (this comparison is done using sector_t types).
1070 if (unlikely(index != block >> sizebits)) {
1071 char b[BDEVNAME_SIZE];
1073 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1075 __func__, (unsigned long long)block,
1079 block = index << sizebits;
1080 /* Create a page with the proper size buffers.. */
1081 page = grow_dev_page(bdev, block, index, size);
1085 page_cache_release(page);
1089 static struct buffer_head *
1090 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1092 /* Size must be multiple of hard sectorsize */
1093 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1094 (size < 512 || size > PAGE_SIZE))) {
1095 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1097 printk(KERN_ERR "logical block size: %d\n",
1098 bdev_logical_block_size(bdev));
1105 struct buffer_head * bh;
1108 bh = __find_get_block(bdev, block, size);
1112 ret = grow_buffers(bdev, block, size);
1121 * The relationship between dirty buffers and dirty pages:
1123 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1124 * the page is tagged dirty in its radix tree.
1126 * At all times, the dirtiness of the buffers represents the dirtiness of
1127 * subsections of the page. If the page has buffers, the page dirty bit is
1128 * merely a hint about the true dirty state.
1130 * When a page is set dirty in its entirety, all its buffers are marked dirty
1131 * (if the page has buffers).
1133 * When a buffer is marked dirty, its page is dirtied, but the page's other
1136 * Also. When blockdev buffers are explicitly read with bread(), they
1137 * individually become uptodate. But their backing page remains not
1138 * uptodate - even if all of its buffers are uptodate. A subsequent
1139 * block_read_full_page() against that page will discover all the uptodate
1140 * buffers, will set the page uptodate and will perform no I/O.
1144 * mark_buffer_dirty - mark a buffer_head as needing writeout
1145 * @bh: the buffer_head to mark dirty
1147 * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1148 * backing page dirty, then tag the page as dirty in its address_space's radix
1149 * tree and then attach the address_space's inode to its superblock's dirty
1152 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1153 * mapping->tree_lock and the global inode_lock.
1155 void mark_buffer_dirty(struct buffer_head *bh)
1157 WARN_ON_ONCE(!buffer_uptodate(bh));
1160 * Very *carefully* optimize the it-is-already-dirty case.
1162 * Don't let the final "is it dirty" escape to before we
1163 * perhaps modified the buffer.
1165 if (buffer_dirty(bh)) {
1167 if (buffer_dirty(bh))
1171 if (!test_set_buffer_dirty(bh)) {
1172 struct page *page = bh->b_page;
1173 if (!TestSetPageDirty(page)) {
1174 struct address_space *mapping = page_mapping(page);
1176 __set_page_dirty(page, mapping, 0);
1180 EXPORT_SYMBOL(mark_buffer_dirty);
1183 * Decrement a buffer_head's reference count. If all buffers against a page
1184 * have zero reference count, are clean and unlocked, and if the page is clean
1185 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1186 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1187 * a page but it ends up not being freed, and buffers may later be reattached).
1189 void __brelse(struct buffer_head * buf)
1191 if (atomic_read(&buf->b_count)) {
1195 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1197 EXPORT_SYMBOL(__brelse);
1200 * bforget() is like brelse(), except it discards any
1201 * potentially dirty data.
1203 void __bforget(struct buffer_head *bh)
1205 clear_buffer_dirty(bh);
1206 if (bh->b_assoc_map) {
1207 struct address_space *buffer_mapping = bh->b_page->mapping;
1209 spin_lock(&buffer_mapping->private_lock);
1210 list_del_init(&bh->b_assoc_buffers);
1211 bh->b_assoc_map = NULL;
1212 spin_unlock(&buffer_mapping->private_lock);
1216 EXPORT_SYMBOL(__bforget);
1218 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1221 if (buffer_uptodate(bh)) {
1226 bh->b_end_io = end_buffer_read_sync;
1227 submit_bh(READ, bh);
1229 if (buffer_uptodate(bh))
1237 * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
1238 * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
1239 * refcount elevated by one when they're in an LRU. A buffer can only appear
1240 * once in a particular CPU's LRU. A single buffer can be present in multiple
1241 * CPU's LRUs at the same time.
1243 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1244 * sb_find_get_block().
1246 * The LRUs themselves only need locking against invalidate_bh_lrus. We use
1247 * a local interrupt disable for that.
1250 #define BH_LRU_SIZE 8
1253 struct buffer_head *bhs[BH_LRU_SIZE];
1256 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1259 #define bh_lru_lock() local_irq_disable()
1260 #define bh_lru_unlock() local_irq_enable()
1262 #define bh_lru_lock() preempt_disable()
1263 #define bh_lru_unlock() preempt_enable()
1266 static inline void check_irqs_on(void)
1268 #ifdef irqs_disabled
1269 BUG_ON(irqs_disabled());
1274 * The LRU management algorithm is dopey-but-simple. Sorry.
1276 static void bh_lru_install(struct buffer_head *bh)
1278 struct buffer_head *evictee = NULL;
1283 lru = &__get_cpu_var(bh_lrus);
1284 if (lru->bhs[0] != bh) {
1285 struct buffer_head *bhs[BH_LRU_SIZE];
1291 for (in = 0; in < BH_LRU_SIZE; in++) {
1292 struct buffer_head *bh2 = lru->bhs[in];
1297 if (out >= BH_LRU_SIZE) {
1298 BUG_ON(evictee != NULL);
1305 while (out < BH_LRU_SIZE)
1307 memcpy(lru->bhs, bhs, sizeof(bhs));
1316 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1318 static struct buffer_head *
1319 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1321 struct buffer_head *ret = NULL;
1327 lru = &__get_cpu_var(bh_lrus);
1328 for (i = 0; i < BH_LRU_SIZE; i++) {
1329 struct buffer_head *bh = lru->bhs[i];
1331 if (bh && bh->b_bdev == bdev &&
1332 bh->b_blocknr == block && bh->b_size == size) {
1335 lru->bhs[i] = lru->bhs[i - 1];
1350 * Perform a pagecache lookup for the matching buffer. If it's there, refresh
1351 * it in the LRU and mark it as accessed. If it is not present then return
1354 struct buffer_head *
1355 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1357 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1360 bh = __find_get_block_slow(bdev, block);
1368 EXPORT_SYMBOL(__find_get_block);
1371 * __getblk will locate (and, if necessary, create) the buffer_head
1372 * which corresponds to the passed block_device, block and size. The
1373 * returned buffer has its reference count incremented.
1375 * __getblk() cannot fail - it just keeps trying. If you pass it an
1376 * illegal block number, __getblk() will happily return a buffer_head
1377 * which represents the non-existent block. Very weird.
1379 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1380 * attempt is failing. FIXME, perhaps?
1382 struct buffer_head *
1383 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1385 struct buffer_head *bh = __find_get_block(bdev, block, size);
1389 bh = __getblk_slow(bdev, block, size);
1392 EXPORT_SYMBOL(__getblk);
1395 * Do async read-ahead on a buffer..
1397 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1399 struct buffer_head *bh = __getblk(bdev, block, size);
1401 ll_rw_block(READA, 1, &bh);
1405 EXPORT_SYMBOL(__breadahead);
1408 * __bread() - reads a specified block and returns the bh
1409 * @bdev: the block_device to read from
1410 * @block: number of block
1411 * @size: size (in bytes) to read
1413 * Reads a specified block, and returns buffer head that contains it.
1414 * It returns NULL if the block was unreadable.
1416 struct buffer_head *
1417 __bread(struct block_device *bdev, sector_t block, unsigned size)
1419 struct buffer_head *bh = __getblk(bdev, block, size);
1421 if (likely(bh) && !buffer_uptodate(bh))
1422 bh = __bread_slow(bh);
1425 EXPORT_SYMBOL(__bread);
1428 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1429 * This doesn't race because it runs in each cpu either in irq
1430 * or with preempt disabled.
1432 static void invalidate_bh_lru(void *arg)
1434 struct bh_lru *b = &get_cpu_var(bh_lrus);
1437 for (i = 0; i < BH_LRU_SIZE; i++) {
1441 put_cpu_var(bh_lrus);
1444 void invalidate_bh_lrus(void)
1446 on_each_cpu(invalidate_bh_lru, NULL, 1);
1448 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1450 void set_bh_page(struct buffer_head *bh,
1451 struct page *page, unsigned long offset)
1454 BUG_ON(offset >= PAGE_SIZE);
1455 if (PageHighMem(page))
1457 * This catches illegal uses and preserves the offset:
1459 bh->b_data = (char *)(0 + offset);
1461 bh->b_data = page_address(page) + offset;
1463 EXPORT_SYMBOL(set_bh_page);
1466 * Called when truncating a buffer on a page completely.
1468 static void discard_buffer(struct buffer_head * bh)
1471 clear_buffer_dirty(bh);
1473 clear_buffer_mapped(bh);
1474 clear_buffer_req(bh);
1475 clear_buffer_new(bh);
1476 clear_buffer_delay(bh);
1477 clear_buffer_unwritten(bh);
1482 * block_invalidatepage - invalidate part of all of a buffer-backed page
1484 * @page: the page which is affected
1485 * @offset: the index of the truncation point
1487 * block_invalidatepage() is called when all or part of the page has become
1488 * invalidatedby a truncate operation.
1490 * block_invalidatepage() does not have to release all buffers, but it must
1491 * ensure that no dirty buffer is left outside @offset and that no I/O
1492 * is underway against any of the blocks which are outside the truncation
1493 * point. Because the caller is about to free (and possibly reuse) those
1496 void block_invalidatepage(struct page *page, unsigned long offset)
1498 struct buffer_head *head, *bh, *next;
1499 unsigned int curr_off = 0;
1501 BUG_ON(!PageLocked(page));
1502 if (!page_has_buffers(page))
1505 head = page_buffers(page);
1508 unsigned int next_off = curr_off + bh->b_size;
1509 next = bh->b_this_page;
1512 * is this block fully invalidated?
1514 if (offset <= curr_off)
1516 curr_off = next_off;
1518 } while (bh != head);
1521 * We release buffers only if the entire page is being invalidated.
1522 * The get_block cached value has been unconditionally invalidated,
1523 * so real IO is not possible anymore.
1526 try_to_release_page(page, 0);
1530 EXPORT_SYMBOL(block_invalidatepage);
1533 * We attach and possibly dirty the buffers atomically wrt
1534 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
1535 * is already excluded via the page lock.
1537 void create_empty_buffers(struct page *page,
1538 unsigned long blocksize, unsigned long b_state)
1540 struct buffer_head *bh, *head, *tail;
1542 head = alloc_page_buffers(page, blocksize, 1);
1545 bh->b_state |= b_state;
1547 bh = bh->b_this_page;
1549 tail->b_this_page = head;
1551 spin_lock(&page->mapping->private_lock);
1552 if (PageUptodate(page) || PageDirty(page)) {
1555 if (PageDirty(page))
1556 set_buffer_dirty(bh);
1557 if (PageUptodate(page))
1558 set_buffer_uptodate(bh);
1559 bh = bh->b_this_page;
1560 } while (bh != head);
1562 attach_page_buffers(page, head);
1563 spin_unlock(&page->mapping->private_lock);
1565 EXPORT_SYMBOL(create_empty_buffers);
1568 * We are taking a block for data and we don't want any output from any
1569 * buffer-cache aliases starting from return from that function and
1570 * until the moment when something will explicitly mark the buffer
1571 * dirty (hopefully that will not happen until we will free that block ;-)
1572 * We don't even need to mark it not-uptodate - nobody can expect
1573 * anything from a newly allocated buffer anyway. We used to used
1574 * unmap_buffer() for such invalidation, but that was wrong. We definitely
1575 * don't want to mark the alias unmapped, for example - it would confuse
1576 * anyone who might pick it with bread() afterwards...
1578 * Also.. Note that bforget() doesn't lock the buffer. So there can
1579 * be writeout I/O going on against recently-freed buffers. We don't
1580 * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1581 * only if we really need to. That happens here.
1583 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1585 struct buffer_head *old_bh;
1589 old_bh = __find_get_block_slow(bdev, block);
1591 clear_buffer_dirty(old_bh);
1592 wait_on_buffer(old_bh);
1593 clear_buffer_req(old_bh);
1597 EXPORT_SYMBOL(unmap_underlying_metadata);
1600 * NOTE! All mapped/uptodate combinations are valid:
1602 * Mapped Uptodate Meaning
1604 * No No "unknown" - must do get_block()
1605 * No Yes "hole" - zero-filled
1606 * Yes No "allocated" - allocated on disk, not read in
1607 * Yes Yes "valid" - allocated and up-to-date in memory.
1609 * "Dirty" is valid only with the last case (mapped+uptodate).
1613 * While block_write_full_page is writing back the dirty buffers under
1614 * the page lock, whoever dirtied the buffers may decide to clean them
1615 * again at any time. We handle that by only looking at the buffer
1616 * state inside lock_buffer().
1618 * If block_write_full_page() is called for regular writeback
1619 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1620 * locked buffer. This only can happen if someone has written the buffer
1621 * directly, with submit_bh(). At the address_space level PageWriteback
1622 * prevents this contention from occurring.
1624 * If block_write_full_page() is called with wbc->sync_mode ==
1625 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1626 * causes the writes to be flagged as synchronous writes, but the
1627 * block device queue will NOT be unplugged, since usually many pages
1628 * will be pushed to the out before the higher-level caller actually
1629 * waits for the writes to be completed. The various wait functions,
1630 * such as wait_on_writeback_range() will ultimately call sync_page()
1631 * which will ultimately call blk_run_backing_dev(), which will end up
1632 * unplugging the device queue.
1634 static int __block_write_full_page(struct inode *inode, struct page *page,
1635 get_block_t *get_block, struct writeback_control *wbc,
1636 bh_end_io_t *handler)
1640 sector_t last_block;
1641 struct buffer_head *bh, *head;
1642 const unsigned blocksize = 1 << inode->i_blkbits;
1643 int nr_underway = 0;
1644 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1645 WRITE_SYNC_PLUG : WRITE);
1647 BUG_ON(!PageLocked(page));
1649 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1651 if (!page_has_buffers(page)) {
1652 create_empty_buffers(page, blocksize,
1653 (1 << BH_Dirty)|(1 << BH_Uptodate));
1657 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1658 * here, and the (potentially unmapped) buffers may become dirty at
1659 * any time. If a buffer becomes dirty here after we've inspected it
1660 * then we just miss that fact, and the page stays dirty.
1662 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1663 * handle that here by just cleaning them.
1666 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1667 head = page_buffers(page);
1671 * Get all the dirty buffers mapped to disk addresses and
1672 * handle any aliases from the underlying blockdev's mapping.
1675 if (block > last_block) {
1677 * mapped buffers outside i_size will occur, because
1678 * this page can be outside i_size when there is a
1679 * truncate in progress.
1682 * The buffer was zeroed by block_write_full_page()
1684 clear_buffer_dirty(bh);
1685 set_buffer_uptodate(bh);
1686 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1688 WARN_ON(bh->b_size != blocksize);
1689 err = get_block(inode, block, bh, 1);
1692 clear_buffer_delay(bh);
1693 if (buffer_new(bh)) {
1694 /* blockdev mappings never come here */
1695 clear_buffer_new(bh);
1696 unmap_underlying_metadata(bh->b_bdev,
1700 bh = bh->b_this_page;
1702 } while (bh != head);
1705 if (!buffer_mapped(bh))
1708 * If it's a fully non-blocking write attempt and we cannot
1709 * lock the buffer then redirty the page. Note that this can
1710 * potentially cause a busy-wait loop from writeback threads
1711 * and kswapd activity, but those code paths have their own
1712 * higher-level throttling.
1714 if (wbc->sync_mode != WB_SYNC_NONE) {
1716 } else if (!trylock_buffer(bh)) {
1717 redirty_page_for_writepage(wbc, page);
1720 if (test_clear_buffer_dirty(bh)) {
1721 mark_buffer_async_write_endio(bh, handler);
1725 } while ((bh = bh->b_this_page) != head);
1728 * The page and its buffers are protected by PageWriteback(), so we can
1729 * drop the bh refcounts early.
1731 BUG_ON(PageWriteback(page));
1732 set_page_writeback(page);
1735 struct buffer_head *next = bh->b_this_page;
1736 if (buffer_async_write(bh)) {
1737 submit_bh(write_op, bh);
1741 } while (bh != head);
1746 if (nr_underway == 0) {
1748 * The page was marked dirty, but the buffers were
1749 * clean. Someone wrote them back by hand with
1750 * ll_rw_block/submit_bh. A rare case.
1752 end_page_writeback(page);
1755 * The page and buffer_heads can be released at any time from
1763 * ENOSPC, or some other error. We may already have added some
1764 * blocks to the file, so we need to write these out to avoid
1765 * exposing stale data.
1766 * The page is currently locked and not marked for writeback
1769 /* Recovery: lock and submit the mapped buffers */
1771 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1772 !buffer_delay(bh)) {
1774 mark_buffer_async_write_endio(bh, handler);
1777 * The buffer may have been set dirty during
1778 * attachment to a dirty page.
1780 clear_buffer_dirty(bh);
1782 } while ((bh = bh->b_this_page) != head);
1784 BUG_ON(PageWriteback(page));
1785 mapping_set_error(page->mapping, err);
1786 set_page_writeback(page);
1788 struct buffer_head *next = bh->b_this_page;
1789 if (buffer_async_write(bh)) {
1790 clear_buffer_dirty(bh);
1791 submit_bh(write_op, bh);
1795 } while (bh != head);
1801 * If a page has any new buffers, zero them out here, and mark them uptodate
1802 * and dirty so they'll be written out (in order to prevent uninitialised
1803 * block data from leaking). And clear the new bit.
1805 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1807 unsigned int block_start, block_end;
1808 struct buffer_head *head, *bh;
1810 BUG_ON(!PageLocked(page));
1811 if (!page_has_buffers(page))
1814 bh = head = page_buffers(page);
1817 block_end = block_start + bh->b_size;
1819 if (buffer_new(bh)) {
1820 if (block_end > from && block_start < to) {
1821 if (!PageUptodate(page)) {
1822 unsigned start, size;
1824 start = max(from, block_start);
1825 size = min(to, block_end) - start;
1827 zero_user(page, start, size);
1828 set_buffer_uptodate(bh);
1831 clear_buffer_new(bh);
1832 mark_buffer_dirty(bh);
1836 block_start = block_end;
1837 bh = bh->b_this_page;
1838 } while (bh != head);
1840 EXPORT_SYMBOL(page_zero_new_buffers);
1842 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1843 get_block_t *get_block)
1845 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1846 unsigned to = from + len;
1847 struct inode *inode = page->mapping->host;
1848 unsigned block_start, block_end;
1851 unsigned blocksize, bbits;
1852 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1854 BUG_ON(!PageLocked(page));
1855 BUG_ON(from > PAGE_CACHE_SIZE);
1856 BUG_ON(to > PAGE_CACHE_SIZE);
1859 blocksize = 1 << inode->i_blkbits;
1860 if (!page_has_buffers(page))
1861 create_empty_buffers(page, blocksize, 0);
1862 head = page_buffers(page);
1864 bbits = inode->i_blkbits;
1865 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1867 for(bh = head, block_start = 0; bh != head || !block_start;
1868 block++, block_start=block_end, bh = bh->b_this_page) {
1869 block_end = block_start + blocksize;
1870 if (block_end <= from || block_start >= to) {
1871 if (PageUptodate(page)) {
1872 if (!buffer_uptodate(bh))
1873 set_buffer_uptodate(bh);
1878 clear_buffer_new(bh);
1879 if (!buffer_mapped(bh)) {
1880 WARN_ON(bh->b_size != blocksize);
1881 err = get_block(inode, block, bh, 1);
1884 if (buffer_new(bh)) {
1885 unmap_underlying_metadata(bh->b_bdev,
1887 if (PageUptodate(page)) {
1888 clear_buffer_new(bh);
1889 set_buffer_uptodate(bh);
1890 mark_buffer_dirty(bh);
1893 if (block_end > to || block_start < from)
1894 zero_user_segments(page,
1900 if (PageUptodate(page)) {
1901 if (!buffer_uptodate(bh))
1902 set_buffer_uptodate(bh);
1905 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1906 !buffer_unwritten(bh) &&
1907 (block_start < from || block_end > to)) {
1908 ll_rw_block(READ, 1, &bh);
1913 * If we issued read requests - let them complete.
1915 while(wait_bh > wait) {
1916 wait_on_buffer(*--wait_bh);
1917 if (!buffer_uptodate(*wait_bh))
1920 if (unlikely(err)) {
1921 page_zero_new_buffers(page, from, to);
1922 ClearPageUptodate(page);
1926 EXPORT_SYMBOL(__block_write_begin);
1928 static int __block_commit_write(struct inode *inode, struct page *page,
1929 unsigned from, unsigned to)
1931 unsigned block_start, block_end;
1934 struct buffer_head *bh, *head;
1936 blocksize = 1 << inode->i_blkbits;
1938 for(bh = head = page_buffers(page), block_start = 0;
1939 bh != head || !block_start;
1940 block_start=block_end, bh = bh->b_this_page) {
1941 block_end = block_start + blocksize;
1942 if (block_end <= from || block_start >= to) {
1943 if (!buffer_uptodate(bh))
1946 set_buffer_uptodate(bh);
1947 mark_buffer_dirty(bh);
1949 clear_buffer_new(bh);
1953 * If this is a partial write which happened to make all buffers
1954 * uptodate then we can optimize away a bogus readpage() for
1955 * the next read(). Here we 'discover' whether the page went
1956 * uptodate as a result of this (potentially partial) write.
1959 SetPageUptodate(page);
1964 * block_write_begin takes care of the basic task of block allocation and
1965 * bringing partial write blocks uptodate first.
1967 * The filesystem needs to handle block truncation upon failure.
1969 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1970 unsigned flags, struct page **pagep, get_block_t *get_block)
1972 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1976 page = grab_cache_page_write_begin(mapping, index, flags);
1980 status = __block_write_begin(page, pos, len, get_block);
1981 if (unlikely(status)) {
1983 page_cache_release(page);
1990 EXPORT_SYMBOL(block_write_begin);
1992 int block_write_end(struct file *file, struct address_space *mapping,
1993 loff_t pos, unsigned len, unsigned copied,
1994 struct page *page, void *fsdata)
1996 struct inode *inode = mapping->host;
1999 start = pos & (PAGE_CACHE_SIZE - 1);
2001 if (unlikely(copied < len)) {
2003 * The buffers that were written will now be uptodate, so we
2004 * don't have to worry about a readpage reading them and
2005 * overwriting a partial write. However if we have encountered
2006 * a short write and only partially written into a buffer, it
2007 * will not be marked uptodate, so a readpage might come in and
2008 * destroy our partial write.
2010 * Do the simplest thing, and just treat any short write to a
2011 * non uptodate page as a zero-length write, and force the
2012 * caller to redo the whole thing.
2014 if (!PageUptodate(page))
2017 page_zero_new_buffers(page, start+copied, start+len);
2019 flush_dcache_page(page);
2021 /* This could be a short (even 0-length) commit */
2022 __block_commit_write(inode, page, start, start+copied);
2026 EXPORT_SYMBOL(block_write_end);
2028 int generic_write_end(struct file *file, struct address_space *mapping,
2029 loff_t pos, unsigned len, unsigned copied,
2030 struct page *page, void *fsdata)
2032 struct inode *inode = mapping->host;
2033 int i_size_changed = 0;
2035 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2038 * No need to use i_size_read() here, the i_size
2039 * cannot change under us because we hold i_mutex.
2041 * But it's important to update i_size while still holding page lock:
2042 * page writeout could otherwise come in and zero beyond i_size.
2044 if (pos+copied > inode->i_size) {
2045 i_size_write(inode, pos+copied);
2050 page_cache_release(page);
2053 * Don't mark the inode dirty under page lock. First, it unnecessarily
2054 * makes the holding time of page lock longer. Second, it forces lock
2055 * ordering of page lock and transaction start for journaling
2059 mark_inode_dirty(inode);
2063 EXPORT_SYMBOL(generic_write_end);
2066 * block_is_partially_uptodate checks whether buffers within a page are
2069 * Returns true if all buffers which correspond to a file portion
2070 * we want to read are uptodate.
2072 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2075 struct inode *inode = page->mapping->host;
2076 unsigned block_start, block_end, blocksize;
2078 struct buffer_head *bh, *head;
2081 if (!page_has_buffers(page))
2084 blocksize = 1 << inode->i_blkbits;
2085 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2087 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2090 head = page_buffers(page);
2094 block_end = block_start + blocksize;
2095 if (block_end > from && block_start < to) {
2096 if (!buffer_uptodate(bh)) {
2100 if (block_end >= to)
2103 block_start = block_end;
2104 bh = bh->b_this_page;
2105 } while (bh != head);
2109 EXPORT_SYMBOL(block_is_partially_uptodate);
2112 * Generic "read page" function for block devices that have the normal
2113 * get_block functionality. This is most of the block device filesystems.
2114 * Reads the page asynchronously --- the unlock_buffer() and
2115 * set/clear_buffer_uptodate() functions propagate buffer state into the
2116 * page struct once IO has completed.
2118 int block_read_full_page(struct page *page, get_block_t *get_block)
2120 struct inode *inode = page->mapping->host;
2121 sector_t iblock, lblock;
2122 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2123 unsigned int blocksize;
2125 int fully_mapped = 1;
2127 BUG_ON(!PageLocked(page));
2128 blocksize = 1 << inode->i_blkbits;
2129 if (!page_has_buffers(page))
2130 create_empty_buffers(page, blocksize, 0);
2131 head = page_buffers(page);
2133 iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2134 lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2140 if (buffer_uptodate(bh))
2143 if (!buffer_mapped(bh)) {
2147 if (iblock < lblock) {
2148 WARN_ON(bh->b_size != blocksize);
2149 err = get_block(inode, iblock, bh, 0);
2153 if (!buffer_mapped(bh)) {
2154 zero_user(page, i * blocksize, blocksize);
2156 set_buffer_uptodate(bh);
2160 * get_block() might have updated the buffer
2163 if (buffer_uptodate(bh))
2167 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2170 SetPageMappedToDisk(page);
2174 * All buffers are uptodate - we can set the page uptodate
2175 * as well. But not if get_block() returned an error.
2177 if (!PageError(page))
2178 SetPageUptodate(page);
2183 /* Stage two: lock the buffers */
2184 for (i = 0; i < nr; i++) {
2187 mark_buffer_async_read(bh);
2191 * Stage 3: start the IO. Check for uptodateness
2192 * inside the buffer lock in case another process reading
2193 * the underlying blockdev brought it uptodate (the sct fix).
2195 for (i = 0; i < nr; i++) {
2197 if (buffer_uptodate(bh))
2198 end_buffer_async_read(bh, 1);
2200 submit_bh(READ, bh);
2204 EXPORT_SYMBOL(block_read_full_page);
2206 /* utility function for filesystems that need to do work on expanding
2207 * truncates. Uses filesystem pagecache writes to allow the filesystem to
2208 * deal with the hole.
2210 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2212 struct address_space *mapping = inode->i_mapping;
2217 err = inode_newsize_ok(inode, size);
2221 err = pagecache_write_begin(NULL, mapping, size, 0,
2222 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2227 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2233 EXPORT_SYMBOL(generic_cont_expand_simple);
2235 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2236 loff_t pos, loff_t *bytes)
2238 struct inode *inode = mapping->host;
2239 unsigned blocksize = 1 << inode->i_blkbits;
2242 pgoff_t index, curidx;
2244 unsigned zerofrom, offset, len;
2247 index = pos >> PAGE_CACHE_SHIFT;
2248 offset = pos & ~PAGE_CACHE_MASK;
2250 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2251 zerofrom = curpos & ~PAGE_CACHE_MASK;
2252 if (zerofrom & (blocksize-1)) {
2253 *bytes |= (blocksize-1);
2256 len = PAGE_CACHE_SIZE - zerofrom;
2258 err = pagecache_write_begin(file, mapping, curpos, len,
2259 AOP_FLAG_UNINTERRUPTIBLE,
2263 zero_user(page, zerofrom, len);
2264 err = pagecache_write_end(file, mapping, curpos, len, len,
2271 balance_dirty_pages_ratelimited(mapping);
2274 /* page covers the boundary, find the boundary offset */
2275 if (index == curidx) {
2276 zerofrom = curpos & ~PAGE_CACHE_MASK;
2277 /* if we will expand the thing last block will be filled */
2278 if (offset <= zerofrom) {
2281 if (zerofrom & (blocksize-1)) {
2282 *bytes |= (blocksize-1);
2285 len = offset - zerofrom;
2287 err = pagecache_write_begin(file, mapping, curpos, len,
2288 AOP_FLAG_UNINTERRUPTIBLE,
2292 zero_user(page, zerofrom, len);
2293 err = pagecache_write_end(file, mapping, curpos, len, len,
2305 * For moronic filesystems that do not allow holes in file.
2306 * We may have to extend the file.
2308 int cont_write_begin(struct file *file, struct address_space *mapping,
2309 loff_t pos, unsigned len, unsigned flags,
2310 struct page **pagep, void **fsdata,
2311 get_block_t *get_block, loff_t *bytes)
2313 struct inode *inode = mapping->host;
2314 unsigned blocksize = 1 << inode->i_blkbits;
2318 err = cont_expand_zero(file, mapping, pos, bytes);
2322 zerofrom = *bytes & ~PAGE_CACHE_MASK;
2323 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2324 *bytes |= (blocksize-1);
2328 return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2330 EXPORT_SYMBOL(cont_write_begin);
2332 int block_commit_write(struct page *page, unsigned from, unsigned to)
2334 struct inode *inode = page->mapping->host;
2335 __block_commit_write(inode,page,from,to);
2338 EXPORT_SYMBOL(block_commit_write);
2341 * block_page_mkwrite() is not allowed to change the file size as it gets
2342 * called from a page fault handler when a page is first dirtied. Hence we must
2343 * be careful to check for EOF conditions here. We set the page up correctly
2344 * for a written page which means we get ENOSPC checking when writing into
2345 * holes and correct delalloc and unwritten extent mapping on filesystems that
2346 * support these features.
2348 * We are not allowed to take the i_mutex here so we have to play games to
2349 * protect against truncate races as the page could now be beyond EOF. Because
2350 * truncate writes the inode size before removing pages, once we have the
2351 * page lock we can determine safely if the page is beyond EOF. If it is not
2352 * beyond EOF, then the page is guaranteed safe against truncation until we
2356 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2357 get_block_t get_block)
2359 struct page *page = vmf->page;
2360 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2363 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2366 size = i_size_read(inode);
2367 if ((page->mapping != inode->i_mapping) ||
2368 (page_offset(page) > size)) {
2369 /* page got truncated out from underneath us */
2374 /* page is wholly or partially inside EOF */
2375 if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2376 end = size & ~PAGE_CACHE_MASK;
2378 end = PAGE_CACHE_SIZE;
2380 ret = __block_write_begin(page, 0, end, get_block);
2382 ret = block_commit_write(page, 0, end);
2384 if (unlikely(ret)) {
2388 else /* -ENOSPC, -EIO, etc */
2389 ret = VM_FAULT_SIGBUS;
2391 ret = VM_FAULT_LOCKED;
2396 EXPORT_SYMBOL(block_page_mkwrite);
2399 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2400 * immediately, while under the page lock. So it needs a special end_io
2401 * handler which does not touch the bh after unlocking it.
2403 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2405 __end_buffer_read_notouch(bh, uptodate);
2409 * Attach the singly-linked list of buffers created by nobh_write_begin, to
2410 * the page (converting it to circular linked list and taking care of page
2413 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2415 struct buffer_head *bh;
2417 BUG_ON(!PageLocked(page));
2419 spin_lock(&page->mapping->private_lock);
2422 if (PageDirty(page))
2423 set_buffer_dirty(bh);
2424 if (!bh->b_this_page)
2425 bh->b_this_page = head;
2426 bh = bh->b_this_page;
2427 } while (bh != head);
2428 attach_page_buffers(page, head);
2429 spin_unlock(&page->mapping->private_lock);
2433 * On entry, the page is fully not uptodate.
2434 * On exit the page is fully uptodate in the areas outside (from,to)
2435 * The filesystem needs to handle block truncation upon failure.
2437 int nobh_write_begin(struct address_space *mapping,
2438 loff_t pos, unsigned len, unsigned flags,
2439 struct page **pagep, void **fsdata,
2440 get_block_t *get_block)
2442 struct inode *inode = mapping->host;
2443 const unsigned blkbits = inode->i_blkbits;
2444 const unsigned blocksize = 1 << blkbits;
2445 struct buffer_head *head, *bh;
2449 unsigned block_in_page;
2450 unsigned block_start, block_end;
2451 sector_t block_in_file;
2454 int is_mapped_to_disk = 1;
2456 index = pos >> PAGE_CACHE_SHIFT;
2457 from = pos & (PAGE_CACHE_SIZE - 1);
2460 page = grab_cache_page_write_begin(mapping, index, flags);
2466 if (page_has_buffers(page)) {
2467 ret = __block_write_begin(page, pos, len, get_block);
2473 if (PageMappedToDisk(page))
2477 * Allocate buffers so that we can keep track of state, and potentially
2478 * attach them to the page if an error occurs. In the common case of
2479 * no error, they will just be freed again without ever being attached
2480 * to the page (which is all OK, because we're under the page lock).
2482 * Be careful: the buffer linked list is a NULL terminated one, rather
2483 * than the circular one we're used to.
2485 head = alloc_page_buffers(page, blocksize, 0);
2491 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2494 * We loop across all blocks in the page, whether or not they are
2495 * part of the affected region. This is so we can discover if the
2496 * page is fully mapped-to-disk.
2498 for (block_start = 0, block_in_page = 0, bh = head;
2499 block_start < PAGE_CACHE_SIZE;
2500 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2503 block_end = block_start + blocksize;
2506 if (block_start >= to)
2508 ret = get_block(inode, block_in_file + block_in_page,
2512 if (!buffer_mapped(bh))
2513 is_mapped_to_disk = 0;
2515 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2516 if (PageUptodate(page)) {
2517 set_buffer_uptodate(bh);
2520 if (buffer_new(bh) || !buffer_mapped(bh)) {
2521 zero_user_segments(page, block_start, from,
2525 if (buffer_uptodate(bh))
2526 continue; /* reiserfs does this */
2527 if (block_start < from || block_end > to) {
2529 bh->b_end_io = end_buffer_read_nobh;
2530 submit_bh(READ, bh);
2537 * The page is locked, so these buffers are protected from
2538 * any VM or truncate activity. Hence we don't need to care
2539 * for the buffer_head refcounts.
2541 for (bh = head; bh; bh = bh->b_this_page) {
2543 if (!buffer_uptodate(bh))
2550 if (is_mapped_to_disk)
2551 SetPageMappedToDisk(page);
2553 *fsdata = head; /* to be released by nobh_write_end */
2560 * Error recovery is a bit difficult. We need to zero out blocks that
2561 * were newly allocated, and dirty them to ensure they get written out.
2562 * Buffers need to be attached to the page at this point, otherwise
2563 * the handling of potential IO errors during writeout would be hard
2564 * (could try doing synchronous writeout, but what if that fails too?)
2566 attach_nobh_buffers(page, head);
2567 page_zero_new_buffers(page, from, to);
2571 page_cache_release(page);
2576 EXPORT_SYMBOL(nobh_write_begin);
2578 int nobh_write_end(struct file *file, struct address_space *mapping,
2579 loff_t pos, unsigned len, unsigned copied,
2580 struct page *page, void *fsdata)
2582 struct inode *inode = page->mapping->host;
2583 struct buffer_head *head = fsdata;
2584 struct buffer_head *bh;
2585 BUG_ON(fsdata != NULL && page_has_buffers(page));
2587 if (unlikely(copied < len) && head)
2588 attach_nobh_buffers(page, head);
2589 if (page_has_buffers(page))
2590 return generic_write_end(file, mapping, pos, len,
2591 copied, page, fsdata);
2593 SetPageUptodate(page);
2594 set_page_dirty(page);
2595 if (pos+copied > inode->i_size) {
2596 i_size_write(inode, pos+copied);
2597 mark_inode_dirty(inode);
2601 page_cache_release(page);
2605 head = head->b_this_page;
2606 free_buffer_head(bh);
2611 EXPORT_SYMBOL(nobh_write_end);
2614 * nobh_writepage() - based on block_full_write_page() except
2615 * that it tries to operate without attaching bufferheads to
2618 int nobh_writepage(struct page *page, get_block_t *get_block,
2619 struct writeback_control *wbc)
2621 struct inode * const inode = page->mapping->host;
2622 loff_t i_size = i_size_read(inode);
2623 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2627 /* Is the page fully inside i_size? */
2628 if (page->index < end_index)
2631 /* Is the page fully outside i_size? (truncate in progress) */
2632 offset = i_size & (PAGE_CACHE_SIZE-1);
2633 if (page->index >= end_index+1 || !offset) {
2635 * The page may have dirty, unmapped buffers. For example,
2636 * they may have been added in ext3_writepage(). Make them
2637 * freeable here, so the page does not leak.
2640 /* Not really sure about this - do we need this ? */
2641 if (page->mapping->a_ops->invalidatepage)
2642 page->mapping->a_ops->invalidatepage(page, offset);
2645 return 0; /* don't care */
2649 * The page straddles i_size. It must be zeroed out on each and every
2650 * writepage invocation because it may be mmapped. "A file is mapped
2651 * in multiples of the page size. For a file that is not a multiple of
2652 * the page size, the remaining memory is zeroed when mapped, and
2653 * writes to that region are not written out to the file."
2655 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2657 ret = mpage_writepage(page, get_block, wbc);
2659 ret = __block_write_full_page(inode, page, get_block, wbc,
2660 end_buffer_async_write);
2663 EXPORT_SYMBOL(nobh_writepage);
2665 int nobh_truncate_page(struct address_space *mapping,
2666 loff_t from, get_block_t *get_block)
2668 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2669 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2672 unsigned length, pos;
2673 struct inode *inode = mapping->host;
2675 struct buffer_head map_bh;
2678 blocksize = 1 << inode->i_blkbits;
2679 length = offset & (blocksize - 1);
2681 /* Block boundary? Nothing to do */
2685 length = blocksize - length;
2686 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2688 page = grab_cache_page(mapping, index);
2693 if (page_has_buffers(page)) {
2696 page_cache_release(page);
2697 return block_truncate_page(mapping, from, get_block);
2700 /* Find the buffer that contains "offset" */
2702 while (offset >= pos) {
2707 map_bh.b_size = blocksize;
2709 err = get_block(inode, iblock, &map_bh, 0);
2712 /* unmapped? It's a hole - nothing to do */
2713 if (!buffer_mapped(&map_bh))
2716 /* Ok, it's mapped. Make sure it's up-to-date */
2717 if (!PageUptodate(page)) {
2718 err = mapping->a_ops->readpage(NULL, page);
2720 page_cache_release(page);
2724 if (!PageUptodate(page)) {
2728 if (page_has_buffers(page))
2731 zero_user(page, offset, length);
2732 set_page_dirty(page);
2737 page_cache_release(page);
2741 EXPORT_SYMBOL(nobh_truncate_page);
2743 int block_truncate_page(struct address_space *mapping,
2744 loff_t from, get_block_t *get_block)
2746 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2747 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2750 unsigned length, pos;
2751 struct inode *inode = mapping->host;
2753 struct buffer_head *bh;
2756 blocksize = 1 << inode->i_blkbits;
2757 length = offset & (blocksize - 1);
2759 /* Block boundary? Nothing to do */
2763 length = blocksize - length;
2764 iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2766 page = grab_cache_page(mapping, index);
2771 if (!page_has_buffers(page))
2772 create_empty_buffers(page, blocksize, 0);
2774 /* Find the buffer that contains "offset" */
2775 bh = page_buffers(page);
2777 while (offset >= pos) {
2778 bh = bh->b_this_page;
2784 if (!buffer_mapped(bh)) {
2785 WARN_ON(bh->b_size != blocksize);
2786 err = get_block(inode, iblock, bh, 0);
2789 /* unmapped? It's a hole - nothing to do */
2790 if (!buffer_mapped(bh))
2794 /* Ok, it's mapped. Make sure it's up-to-date */
2795 if (PageUptodate(page))
2796 set_buffer_uptodate(bh);
2798 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2800 ll_rw_block(READ, 1, &bh);
2802 /* Uhhuh. Read error. Complain and punt. */
2803 if (!buffer_uptodate(bh))
2807 zero_user(page, offset, length);
2808 mark_buffer_dirty(bh);
2813 page_cache_release(page);
2817 EXPORT_SYMBOL(block_truncate_page);
2820 * The generic ->writepage function for buffer-backed address_spaces
2821 * this form passes in the end_io handler used to finish the IO.
2823 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2824 struct writeback_control *wbc, bh_end_io_t *handler)
2826 struct inode * const inode = page->mapping->host;
2827 loff_t i_size = i_size_read(inode);
2828 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2831 /* Is the page fully inside i_size? */
2832 if (page->index < end_index)
2833 return __block_write_full_page(inode, page, get_block, wbc,
2836 /* Is the page fully outside i_size? (truncate in progress) */
2837 offset = i_size & (PAGE_CACHE_SIZE-1);
2838 if (page->index >= end_index+1 || !offset) {
2840 * The page may have dirty, unmapped buffers. For example,
2841 * they may have been added in ext3_writepage(). Make them
2842 * freeable here, so the page does not leak.
2844 do_invalidatepage(page, 0);
2846 return 0; /* don't care */
2850 * The page straddles i_size. It must be zeroed out on each and every
2851 * writepage invocation because it may be mmapped. "A file is mapped
2852 * in multiples of the page size. For a file that is not a multiple of
2853 * the page size, the remaining memory is zeroed when mapped, and
2854 * writes to that region are not written out to the file."
2856 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2857 return __block_write_full_page(inode, page, get_block, wbc, handler);
2859 EXPORT_SYMBOL(block_write_full_page_endio);
2862 * The generic ->writepage function for buffer-backed address_spaces
2864 int block_write_full_page(struct page *page, get_block_t *get_block,
2865 struct writeback_control *wbc)
2867 return block_write_full_page_endio(page, get_block, wbc,
2868 end_buffer_async_write);
2870 EXPORT_SYMBOL(block_write_full_page);
2872 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2873 get_block_t *get_block)
2875 struct buffer_head tmp;
2876 struct inode *inode = mapping->host;
2879 tmp.b_size = 1 << inode->i_blkbits;
2880 get_block(inode, block, &tmp, 0);
2881 return tmp.b_blocknr;
2883 EXPORT_SYMBOL(generic_block_bmap);
2885 static void end_bio_bh_io_sync(struct bio *bio, int err)
2887 struct buffer_head *bh = bio->bi_private;
2889 if (err == -EOPNOTSUPP) {
2890 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2893 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2894 set_bit(BH_Quiet, &bh->b_state);
2896 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2900 int submit_bh(int rw, struct buffer_head * bh)
2905 BUG_ON(!buffer_locked(bh));
2906 BUG_ON(!buffer_mapped(bh));
2907 BUG_ON(!bh->b_end_io);
2908 BUG_ON(buffer_delay(bh));
2909 BUG_ON(buffer_unwritten(bh));
2912 * Only clear out a write error when rewriting
2914 if (test_set_buffer_req(bh) && (rw & WRITE))
2915 clear_buffer_write_io_error(bh);
2918 * from here on down, it's all bio -- do the initial mapping,
2919 * submit_bio -> generic_make_request may further map this bio around
2921 bio = bio_alloc(GFP_NOIO, 1);
2923 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2924 bio->bi_bdev = bh->b_bdev;
2925 bio->bi_io_vec[0].bv_page = bh->b_page;
2926 bio->bi_io_vec[0].bv_len = bh->b_size;
2927 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2931 bio->bi_size = bh->b_size;
2933 bio->bi_end_io = end_bio_bh_io_sync;
2934 bio->bi_private = bh;
2937 submit_bio(rw, bio);
2939 if (bio_flagged(bio, BIO_EOPNOTSUPP))
2945 EXPORT_SYMBOL(submit_bh);
2948 * ll_rw_block: low-level access to block devices (DEPRECATED)
2949 * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2950 * @nr: number of &struct buffer_heads in the array
2951 * @bhs: array of pointers to &struct buffer_head
2953 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2954 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2955 * %READA option is described in the documentation for generic_make_request()
2956 * which ll_rw_block() calls.
2958 * This function drops any buffer that it cannot get a lock on (with the
2959 * BH_Lock state bit), any buffer that appears to be clean when doing a write
2960 * request, and any buffer that appears to be up-to-date when doing read
2961 * request. Further it marks as clean buffers that are processed for
2962 * writing (the buffer cache won't assume that they are actually clean
2963 * until the buffer gets unlocked).
2965 * ll_rw_block sets b_end_io to simple completion handler that marks
2966 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2969 * All of the buffers must be for the same device, and must also be a
2970 * multiple of the current approved size for the device.
2972 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2976 for (i = 0; i < nr; i++) {
2977 struct buffer_head *bh = bhs[i];
2979 if (!trylock_buffer(bh))
2982 if (test_clear_buffer_dirty(bh)) {
2983 bh->b_end_io = end_buffer_write_sync;
2985 submit_bh(WRITE, bh);
2989 if (!buffer_uptodate(bh)) {
2990 bh->b_end_io = end_buffer_read_sync;
2999 EXPORT_SYMBOL(ll_rw_block);
3001 void write_dirty_buffer(struct buffer_head *bh, int rw)
3004 if (!test_clear_buffer_dirty(bh)) {
3008 bh->b_end_io = end_buffer_write_sync;
3012 EXPORT_SYMBOL(write_dirty_buffer);
3015 * For a data-integrity writeout, we need to wait upon any in-progress I/O
3016 * and then start new I/O and then wait upon it. The caller must have a ref on
3019 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3023 WARN_ON(atomic_read(&bh->b_count) < 1);
3025 if (test_clear_buffer_dirty(bh)) {
3027 bh->b_end_io = end_buffer_write_sync;
3028 ret = submit_bh(rw, bh);
3030 if (!ret && !buffer_uptodate(bh))
3037 EXPORT_SYMBOL(__sync_dirty_buffer);
3039 int sync_dirty_buffer(struct buffer_head *bh)
3041 return __sync_dirty_buffer(bh, WRITE_SYNC);
3043 EXPORT_SYMBOL(sync_dirty_buffer);
3046 * try_to_free_buffers() checks if all the buffers on this particular page
3047 * are unused, and releases them if so.
3049 * Exclusion against try_to_free_buffers may be obtained by either
3050 * locking the page or by holding its mapping's private_lock.
3052 * If the page is dirty but all the buffers are clean then we need to
3053 * be sure to mark the page clean as well. This is because the page
3054 * may be against a block device, and a later reattachment of buffers
3055 * to a dirty page will set *all* buffers dirty. Which would corrupt
3056 * filesystem data on the same device.
3058 * The same applies to regular filesystem pages: if all the buffers are
3059 * clean then we set the page clean and proceed. To do that, we require
3060 * total exclusion from __set_page_dirty_buffers(). That is obtained with
3063 * try_to_free_buffers() is non-blocking.
3065 static inline int buffer_busy(struct buffer_head *bh)
3067 return atomic_read(&bh->b_count) |
3068 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3072 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3074 struct buffer_head *head = page_buffers(page);
3075 struct buffer_head *bh;
3079 if (buffer_write_io_error(bh) && page->mapping)
3080 set_bit(AS_EIO, &page->mapping->flags);
3081 if (buffer_busy(bh))
3083 bh = bh->b_this_page;
3084 } while (bh != head);
3087 struct buffer_head *next = bh->b_this_page;
3089 if (bh->b_assoc_map)
3090 __remove_assoc_queue(bh);
3092 } while (bh != head);
3093 *buffers_to_free = head;
3094 __clear_page_buffers(page);
3100 int try_to_free_buffers(struct page *page)
3102 struct address_space * const mapping = page->mapping;
3103 struct buffer_head *buffers_to_free = NULL;
3106 BUG_ON(!PageLocked(page));
3107 if (PageWriteback(page))
3110 if (mapping == NULL) { /* can this still happen? */
3111 ret = drop_buffers(page, &buffers_to_free);
3115 spin_lock(&mapping->private_lock);
3116 ret = drop_buffers(page, &buffers_to_free);
3119 * If the filesystem writes its buffers by hand (eg ext3)
3120 * then we can have clean buffers against a dirty page. We
3121 * clean the page here; otherwise the VM will never notice
3122 * that the filesystem did any IO at all.
3124 * Also, during truncate, discard_buffer will have marked all
3125 * the page's buffers clean. We discover that here and clean
3128 * private_lock must be held over this entire operation in order
3129 * to synchronise against __set_page_dirty_buffers and prevent the
3130 * dirty bit from being lost.
3133 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3134 spin_unlock(&mapping->private_lock);
3136 if (buffers_to_free) {
3137 struct buffer_head *bh = buffers_to_free;
3140 struct buffer_head *next = bh->b_this_page;
3141 free_buffer_head(bh);
3143 } while (bh != buffers_to_free);
3147 EXPORT_SYMBOL(try_to_free_buffers);
3149 void block_sync_page(struct page *page)
3151 struct address_space *mapping;
3154 mapping = page_mapping(page);
3156 blk_run_backing_dev(mapping->backing_dev_info, page);
3158 EXPORT_SYMBOL(block_sync_page);
3161 * There are no bdflush tunables left. But distributions are
3162 * still running obsolete flush daemons, so we terminate them here.
3164 * Use of bdflush() is deprecated and will be removed in a future kernel.
3165 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3167 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3169 static int msg_count;
3171 if (!capable(CAP_SYS_ADMIN))
3174 if (msg_count < 5) {
3177 "warning: process `%s' used the obsolete bdflush"
3178 " system call\n", current->comm);
3179 printk(KERN_INFO "Fix your initscripts?\n");
3188 * Buffer-head allocation
3190 static struct kmem_cache *bh_cachep;
3193 * Once the number of bh's in the machine exceeds this level, we start
3194 * stripping them in writeback.
3196 static int max_buffer_heads;
3198 int buffer_heads_over_limit;
3200 struct bh_accounting {
3201 int nr; /* Number of live bh's */
3202 int ratelimit; /* Limit cacheline bouncing */
3205 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3207 static void recalc_bh_state(void)
3212 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3214 __get_cpu_var(bh_accounting).ratelimit = 0;
3215 for_each_online_cpu(i)
3216 tot += per_cpu(bh_accounting, i).nr;
3217 buffer_heads_over_limit = (tot > max_buffer_heads);
3220 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3222 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3224 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3225 get_cpu_var(bh_accounting).nr++;
3227 put_cpu_var(bh_accounting);
3231 EXPORT_SYMBOL(alloc_buffer_head);
3233 void free_buffer_head(struct buffer_head *bh)
3235 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3236 kmem_cache_free(bh_cachep, bh);
3237 get_cpu_var(bh_accounting).nr--;
3239 put_cpu_var(bh_accounting);
3241 EXPORT_SYMBOL(free_buffer_head);
3243 static void buffer_exit_cpu(int cpu)
3246 struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3248 for (i = 0; i < BH_LRU_SIZE; i++) {
3252 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3253 per_cpu(bh_accounting, cpu).nr = 0;
3254 put_cpu_var(bh_accounting);
3257 static int buffer_cpu_notify(struct notifier_block *self,
3258 unsigned long action, void *hcpu)
3260 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3261 buffer_exit_cpu((unsigned long)hcpu);
3266 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3267 * @bh: struct buffer_head
3269 * Return true if the buffer is up-to-date and false,
3270 * with the buffer locked, if not.
3272 int bh_uptodate_or_lock(struct buffer_head *bh)
3274 if (!buffer_uptodate(bh)) {
3276 if (!buffer_uptodate(bh))
3282 EXPORT_SYMBOL(bh_uptodate_or_lock);
3285 * bh_submit_read - Submit a locked buffer for reading
3286 * @bh: struct buffer_head
3288 * Returns zero on success and -EIO on error.
3290 int bh_submit_read(struct buffer_head *bh)
3292 BUG_ON(!buffer_locked(bh));
3294 if (buffer_uptodate(bh)) {
3300 bh->b_end_io = end_buffer_read_sync;
3301 submit_bh(READ, bh);
3303 if (buffer_uptodate(bh))
3307 EXPORT_SYMBOL(bh_submit_read);
3309 void __init buffer_init(void)
3313 bh_cachep = kmem_cache_create("buffer_head",
3314 sizeof(struct buffer_head), 0,
3315 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3320 * Limit the bh occupancy to 10% of ZONE_NORMAL
3322 nrpages = (nr_free_buffer_pages() * 10) / 100;
3323 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3324 hotcpu_notifier(buffer_cpu_notify, 0);