Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / mm / page_io.c
index 85bb904..651a912 100644 (file)
 
 #include <linux/mm.h>
 #include <linux/kernel_stat.h>
+#include <linux/gfp.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
-#include <linux/swapctl.h>
-
+#include <linux/bio.h>
+#include <linux/swapops.h>
+#include <linux/writeback.h>
+#include <linux/frontswap.h>
 #include <asm/pgtable.h>
 
-/*
- * Reads or writes a swap page.
- * wait=1: start I/O and wait for completion. wait=0: start asynchronous I/O.
- *
- * Important prevention of race condition: the caller *must* atomically 
- * create a unique swap cache entry for this swap page before calling
- * rw_swap_page, and must lock that page.  By ensuring that there is a
- * single page of memory reserved for the swap entry, the normal VM page
- * lock on that page also doubles as a lock on swap entries.  Having only
- * one lock to deal with per swap entry (rather than locking swap and memory
- * independently) also makes it easier to make certain swapping operations
- * atomic, which is particularly important when we are trying to ensure 
- * that shared pages stay shared while being swapped.
- */
-
-static int rw_swap_page_base(int rw, swp_entry_t entry, struct page *page)
+static struct bio *get_swap_bio(gfp_t gfp_flags,
+                               struct page *page, bio_end_io_t end_io)
 {
-       unsigned long offset;
-       sector_t zones[PAGE_SIZE/512];
-       int zones_used;
-       int block_size;
-       struct inode *swapf = 0;
-       struct block_device *bdev;
+       struct bio *bio;
 
-       if (rw == READ) {
-               ClearPageUptodate(page);
-               kstat.pswpin++;
-       } else
-               kstat.pswpout++;
+       bio = bio_alloc(gfp_flags, 1);
+       if (bio) {
+               bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
+               bio->bi_sector <<= PAGE_SHIFT - 9;
+               bio->bi_io_vec[0].bv_page = page;
+               bio->bi_io_vec[0].bv_len = PAGE_SIZE;
+               bio->bi_io_vec[0].bv_offset = 0;
+               bio->bi_vcnt = 1;
+               bio->bi_idx = 0;
+               bio->bi_size = PAGE_SIZE;
+               bio->bi_end_io = end_io;
+       }
+       return bio;
+}
 
-       get_swaphandle_info(entry, &offset, &swapf);
-       bdev = swapf->i_bdev;
-       if (bdev) {
-               zones[0] = offset;
-               zones_used = 1;
-               block_size = PAGE_SIZE;
-       } else {
-               int i, j;
-               unsigned int block = offset
-                       << (PAGE_SHIFT - swapf->i_sb->s_blocksize_bits);
+static void end_swap_bio_write(struct bio *bio, int err)
+{
+       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+       struct page *page = bio->bi_io_vec[0].bv_page;
 
-               block_size = swapf->i_sb->s_blocksize;
-               for (i=0, j=0; j< PAGE_SIZE ; i++, j += block_size)
-                       if (!(zones[i] = bmap(swapf,block++))) {
-                               printk("rw_swap_page: bad swap file\n");
-                               return 0;
-                       }
-               zones_used = i;
-               bdev = swapf->i_sb->s_bdev;
+       if (!uptodate) {
+               SetPageError(page);
+               /*
+                * We failed to write the page out to swap-space.
+                * Re-dirty the page in order to avoid it being reclaimed.
+                * Also print a dire warning that things will go BAD (tm)
+                * very quickly.
+                *
+                * Also clear PG_reclaim to avoid rotate_reclaimable_page()
+                */
+               set_page_dirty(page);
+               printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
+                               imajor(bio->bi_bdev->bd_inode),
+                               iminor(bio->bi_bdev->bd_inode),
+                               (unsigned long long)bio->bi_sector);
+               ClearPageReclaim(page);
        }
+       end_page_writeback(page);
+       bio_put(bio);
+}
 
-       /* block_size == PAGE_SIZE/zones_used */
-       brw_page(rw, page, bdev, zones, block_size);
+void end_swap_bio_read(struct bio *bio, int err)
+{
+       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+       struct page *page = bio->bi_io_vec[0].bv_page;
 
-       /* Note! For consistency we do all of the logic,
-        * decrementing the page count, and unlocking the page in the
-        * swap lock map - in the IO completion handler.
-        */
-       return 1;
+       if (!uptodate) {
+               SetPageError(page);
+               ClearPageUptodate(page);
+               printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
+                               imajor(bio->bi_bdev->bd_inode),
+                               iminor(bio->bi_bdev->bd_inode),
+                               (unsigned long long)bio->bi_sector);
+       } else {
+               SetPageUptodate(page);
+       }
+       unlock_page(page);
+       bio_put(bio);
 }
 
 /*
- * A simple wrapper so the base function doesn't need to enforce
- * that all swap pages go through the swap cache! We verify that:
- *  - the page is locked
- *  - it's marked as being swap-cache
- *  - it's associated with the swap inode
+ * We may have stale swap cache pages in memory: notice
+ * them here and get rid of the unnecessary final write.
  */
-void rw_swap_page(int rw, struct page *page)
+int swap_writepage(struct page *page, struct writeback_control *wbc)
 {
-       swp_entry_t entry;
-
-       entry.val = page->index;
+       struct bio *bio;
+       int ret = 0, rw = WRITE;
 
-       if (!PageLocked(page))
-               PAGE_BUG(page);
-       if (!PageSwapCache(page))
-               PAGE_BUG(page);
-       if (!rw_swap_page_base(rw, entry, page))
+       if (try_to_free_swap(page)) {
+               unlock_page(page);
+               goto out;
+       }
+       if (frontswap_put_page(page) == 0) {
+               set_page_writeback(page);
                unlock_page(page);
+               end_page_writeback(page);
+               goto out;
+       }
+       bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
+       if (bio == NULL) {
+               set_page_dirty(page);
+               unlock_page(page);
+               ret = -ENOMEM;
+               goto out;
+       }
+       if (wbc->sync_mode == WB_SYNC_ALL)
+               rw |= REQ_SYNC;
+       count_vm_event(PSWPOUT);
+       set_page_writeback(page);
+       unlock_page(page);
+       submit_bio(rw, bio);
+out:
+       return ret;
 }
 
-/*
- * The swap lock map insists that pages be in the page cache!
- * Therefore we can't use it.  Later when we can remove the need for the
- * lock map and we can reduce the number of functions exported.
- */
-void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf)
+int swap_readpage(struct page *page)
 {
-       struct page *page = virt_to_page(buf);
-       
-       if (!PageLocked(page))
-               PAGE_BUG(page);
-       if (page->mapping)
-               PAGE_BUG(page);
-       /* needs sync_page to wait I/O completation */
-       page->mapping = &swapper_space;
-       if (!rw_swap_page_base(rw, entry, page))
+       struct bio *bio;
+       int ret = 0;
+
+       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON(PageUptodate(page));
+       if (frontswap_get_page(page) == 0) {
+               SetPageUptodate(page);
                unlock_page(page);
-       wait_on_page_locked(page);
-       page->mapping = NULL;
+               goto out;
+       }
+       bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
+       if (bio == NULL) {
+               unlock_page(page);
+               ret = -ENOMEM;
+               goto out;
+       }
+       count_vm_event(PSWPIN);
+       submit_bio(READ, bio);
+out:
+       return ret;
 }