*
* Rewritten to use page cache, (C) 1998 Stephen Tweedie
*/
-
#include <linux/mm.h>
+#include <linux/gfp.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
+#include <linux/swapops.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
+#include <linux/pagevec.h>
+#include <linux/migrate.h>
+#include <linux/page_cgroup.h>
#include <asm/pgtable.h>
-static struct backing_dev_info swap_backing_dev_info = {
- .ra_pages = 0, /* No readahead */
- .memory_backed = 1, /* Does not contribute to dirty memory */
+/*
+ * swapper_space is a fiction, retained to simplify the path through
+ * vmscan's shrink_page_list.
+ */
+static const struct address_space_operations swap_aops = {
+ .writepage = swap_writepage,
+ .set_page_dirty = __set_page_dirty_no_writeback,
+ .migratepage = migrate_page,
};
-extern struct address_space_operations swap_aops;
+static struct backing_dev_info swap_backing_dev_info = {
+ .name = "swap",
+ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
+};
struct address_space swapper_space = {
- .page_tree = RADIX_TREE_INIT(GFP_ATOMIC),
- .tree_lock = SPIN_LOCK_UNLOCKED,
- .clean_pages = LIST_HEAD_INIT(swapper_space.clean_pages),
+ .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
+ .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
.a_ops = &swap_aops,
+ .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
.backing_dev_info = &swap_backing_dev_info,
- .i_mmap = LIST_HEAD_INIT(swapper_space.i_mmap),
- .i_mmap_shared = LIST_HEAD_INIT(swapper_space.i_mmap_shared),
- .i_shared_sem = __MUTEX_INITIALIZER(swapper_space.i_shared_sem),
- .truncate_count = ATOMIC_INIT(0),
- .private_lock = SPIN_LOCK_UNLOCKED,
- .private_list = LIST_HEAD_INIT(swapper_space.private_list),
};
#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
unsigned long del_total;
unsigned long find_success;
unsigned long find_total;
- unsigned long noent_race;
- unsigned long exist_race;
} swap_cache_info;
void show_swap_cache_info(void)
{
- printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
+ printk("%lu pages in swap cache\n", total_swapcache_pages);
+ printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
swap_cache_info.add_total, swap_cache_info.del_total,
- swap_cache_info.find_success, swap_cache_info.find_total,
- swap_cache_info.noent_race, swap_cache_info.exist_race);
+ swap_cache_info.find_success, swap_cache_info.find_total);
+ printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
+ printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
}
-static int add_to_swap_cache(struct page *page, swp_entry_t entry)
+/*
+ * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
+ * but sets SwapCache flag and private instead of mapping and index.
+ */
+static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
{
int error;
- if (page->mapping)
- BUG();
- if (!swap_duplicate(entry)) {
- INC_CACHE_INFO(noent_race);
- return -ENOENT;
+ VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON(PageSwapCache(page));
+ VM_BUG_ON(!PageSwapBacked(page));
+
+ page_cache_get(page);
+ SetPageSwapCache(page);
+ set_page_private(page, entry.val);
+
+ spin_lock_irq(&swapper_space.tree_lock);
+ error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
+ if (likely(!error)) {
+ total_swapcache_pages++;
+ __inc_zone_page_state(page, NR_FILE_PAGES);
+ INC_CACHE_INFO(add_total);
}
- error = add_to_page_cache(page, &swapper_space, entry.val, GFP_KERNEL);
- /*
- * Anon pages are already on the LRU, we don't run lru_cache_add here.
- */
- if (error != 0) {
- swap_free(entry);
- if (error == -EEXIST)
- INC_CACHE_INFO(exist_race);
- return error;
+ spin_unlock_irq(&swapper_space.tree_lock);
+
+ if (unlikely(error)) {
+ /*
+ * Only the context which have set SWAP_HAS_CACHE flag
+ * would call add_to_swap_cache().
+ * So add_to_swap_cache() doesn't returns -EEXIST.
+ */
+ VM_BUG_ON(error == -EEXIST);
+ set_page_private(page, 0UL);
+ ClearPageSwapCache(page);
+ page_cache_release(page);
}
- if (!PageLocked(page))
- BUG();
- if (!PageSwapCache(page))
- BUG();
- INC_CACHE_INFO(add_total);
- return 0;
+
+ return error;
+}
+
+
+int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
+{
+ int error;
+
+ error = radix_tree_preload(gfp_mask);
+ if (!error) {
+ error = __add_to_swap_cache(page, entry);
+ radix_tree_preload_end();
+ }
+ return error;
}
/*
*/
void __delete_from_swap_cache(struct page *page)
{
- BUG_ON(!PageLocked(page));
- BUG_ON(!PageSwapCache(page));
- BUG_ON(PageWriteback(page));
- __remove_from_page_cache(page);
+ VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON(!PageSwapCache(page));
+ VM_BUG_ON(PageWriteback(page));
+
+ radix_tree_delete(&swapper_space.page_tree, page_private(page));
+ set_page_private(page, 0);
+ ClearPageSwapCache(page);
+ total_swapcache_pages--;
+ __dec_zone_page_state(page, NR_FILE_PAGES);
INC_CACHE_INFO(del_total);
}
* Allocate swap space for the page and add the page to the
* swap cache. Caller needs to hold the page lock.
*/
-int add_to_swap(struct page * page)
+int add_to_swap(struct page *page)
{
swp_entry_t entry;
- int pf_flags;
int err;
- if (!PageLocked(page))
- BUG();
+ VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON(!PageUptodate(page));
- for (;;) {
- entry = get_swap_page();
- if (!entry.val)
- return 0;
+ entry = get_swap_page();
+ if (!entry.val)
+ return 0;
- /* Radix-tree node allocations are performing
- * GFP_ATOMIC allocations under PF_MEMALLOC.
- * They can completely exhaust the page allocator.
- *
- * So PF_MEMALLOC is dropped here. This causes the slab
- * allocations to fail earlier, so radix-tree nodes will
- * then be allocated from the mempool reserves.
- *
- * We're still using __GFP_HIGH for radix-tree node
- * allocations, so some of the emergency pools are available,
- * just not all of them.
- */
+ if (unlikely(PageTransHuge(page)))
+ if (unlikely(split_huge_page(page))) {
+ swapcache_free(entry, NULL);
+ return 0;
+ }
- pf_flags = current->flags;
- current->flags &= ~PF_MEMALLOC;
+ /*
+ * Radix-tree node allocations from PF_MEMALLOC contexts could
+ * completely exhaust the page allocator. __GFP_NOMEMALLOC
+ * stops emergency reserves from being allocated.
+ *
+ * TODO: this could cause a theoretical memory reclaim
+ * deadlock in the swap out path.
+ */
+ /*
+ * Add it to the swap cache and mark it dirty
+ */
+ err = add_to_swap_cache(page, entry,
+ __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
+ if (!err) { /* Success */
+ SetPageDirty(page);
+ return 1;
+ } else { /* -ENOMEM radix-tree allocation failure */
/*
- * Add it to the swap cache and mark it dirty
+ * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+ * clear SWAP_HAS_CACHE flag.
*/
- err = add_to_page_cache(page, &swapper_space,
- entry.val, GFP_ATOMIC);
-
- if (pf_flags & PF_MEMALLOC)
- current->flags |= PF_MEMALLOC;
-
- switch (err) {
- case 0: /* Success */
- SetPageUptodate(page);
- __clear_page_dirty(page);
- set_page_dirty(page);
- INC_CACHE_INFO(add_total);
- return 1;
- case -EEXIST:
- /* Raced with "speculative" read_swap_cache_async */
- INC_CACHE_INFO(exist_race);
- swap_free(entry);
- continue;
- default:
- /* -ENOMEM radix-tree allocation failure */
- swap_free(entry);
- return 0;
- }
+ swapcache_free(entry, NULL);
+ return 0;
}
}
{
swp_entry_t entry;
- BUG_ON(!PageLocked(page));
- BUG_ON(PageWriteback(page));
- BUG_ON(PagePrivate(page));
-
- entry.val = page->index;
+ entry.val = page_private(page);
spin_lock_irq(&swapper_space.tree_lock);
__delete_from_swap_cache(page);
spin_unlock_irq(&swapper_space.tree_lock);
- swap_free(entry);
+ swapcache_free(entry, page);
page_cache_release(page);
}
-int move_to_swap_cache(struct page *page, swp_entry_t entry)
-{
- struct address_space *mapping = page->mapping;
- int err;
-
- spin_lock_irq(&swapper_space.tree_lock);
- spin_lock(&mapping->tree_lock);
-
- err = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
- if (!err) {
- __remove_from_page_cache(page);
- ___add_to_page_cache(page, &swapper_space, entry.val);
- }
-
- spin_unlock(&mapping->tree_lock);
- spin_unlock_irq(&swapper_space.tree_lock);
-
- if (!err) {
- if (!swap_duplicate(entry))
- BUG();
- BUG_ON(PageDirty(page));
- set_page_dirty(page);
- INC_CACHE_INFO(add_total);
- } else if (err == -EEXIST)
- INC_CACHE_INFO(exist_race);
- return err;
-}
-
-int move_from_swap_cache(struct page *page, unsigned long index,
- struct address_space *mapping)
-{
- swp_entry_t entry;
- int err;
-
- BUG_ON(!PageLocked(page));
- BUG_ON(PageWriteback(page));
- BUG_ON(PagePrivate(page));
-
- entry.val = page->index;
-
- spin_lock_irq(&swapper_space.tree_lock);
- spin_lock(&mapping->tree_lock);
-
- err = radix_tree_insert(&mapping->page_tree, index, page);
- if (!err) {
- __delete_from_swap_cache(page);
- ___add_to_page_cache(page, mapping, index);
- }
-
- spin_unlock(&mapping->tree_lock);
- spin_unlock_irq(&swapper_space.tree_lock);
-
- if (!err) {
- swap_free(entry);
- __clear_page_dirty(page);
- set_page_dirty(page);
- }
- return err;
-}
-
-
/*
* If we are the only user, then try to free up the swap cache.
*
* Its ok to check for PageSwapCache without the page lock
- * here because we are going to recheck again inside
- * exclusive_swap_page() _with_ the lock.
+ * here because we are going to recheck again inside
+ * try_to_free_swap() _with_ the lock.
* - Marcelo
*/
static inline void free_swap_cache(struct page *page)
{
- if (PageSwapCache(page) && !TestSetPageLocked(page)) {
- remove_exclusive_swap_page(page);
+ if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
+ try_to_free_swap(page);
unlock_page(page);
}
}
/*
* Perform a free_page(), also freeing any swap cache associated with
- * this page if it is the last user of the page. Can not do a lock_page,
- * as we are holding the page_table_lock spinlock.
+ * this page if it is the last user of the page.
*/
void free_page_and_swap_cache(struct page *page)
{
*/
void free_pages_and_swap_cache(struct page **pages, int nr)
{
- int chunk = 16;
struct page **pagep = pages;
lru_add_drain();
while (nr) {
- int todo = min(chunk, nr);
+ int todo = min(nr, PAGEVEC_SIZE);
int i;
for (i = 0; i < todo; i++)
*/
struct page * lookup_swap_cache(swp_entry_t entry)
{
- struct page *found;
+ struct page *page;
- found = find_get_page(&swapper_space, entry.val);
- /*
- * Unsafe to assert PageSwapCache and mapping on page found:
- * if SMP nothing prevents swapoff from deleting this page from
- * the swap cache at this moment. find_lock_page would prevent
- * that, but no need to change: we _have_ got the right page.
- */
- INC_CACHE_INFO(find_total);
- if (found)
+ page = find_get_page(&swapper_space, entry.val);
+
+ if (page)
INC_CACHE_INFO(find_success);
- return found;
+
+ INC_CACHE_INFO(find_total);
+ return page;
}
/*
* A failure return means that either the page allocation failed or that
* the swap entry is no longer in use.
*/
-struct page * read_swap_cache_async(swp_entry_t entry)
+struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr)
{
struct page *found_page, *new_page = NULL;
int err;
/*
* First check the swap cache. Since this is normally
* called after lookup_swap_cache() failed, re-calling
- * that would confuse statistics: use find_get_page()
- * directly.
+ * that would confuse statistics.
*/
found_page = find_get_page(&swapper_space, entry.val);
if (found_page)
* Get a new page to read into from swap.
*/
if (!new_page) {
- new_page = alloc_page(GFP_HIGHUSER);
+ new_page = alloc_page_vma(gfp_mask, vma, addr);
if (!new_page)
break; /* Out of memory */
}
/*
- * Associate the page with swap entry in the swap cache.
- * May fail (-ENOENT) if swap entry has been freed since
- * our caller observed it. May fail (-EEXIST) if there
- * is already a page associated with this entry in the
- * swap cache: added by a racing read_swap_cache_async,
- * or by try_to_swap_out (or shmem_writepage) re-using
- * the just freed swap entry for an existing page.
- * May fail (-ENOMEM) if radix-tree node allocation failed.
+ * call radix_tree_preload() while we can wait.
+ */
+ err = radix_tree_preload(gfp_mask & GFP_KERNEL);
+ if (err)
+ break;
+
+ /*
+ * Swap entry may have been freed since our caller observed it.
*/
- err = add_to_swap_cache(new_page, entry);
- if (!err) {
+ err = swapcache_prepare(entry);
+ if (err == -EEXIST) { /* seems racy */
+ radix_tree_preload_end();
+ continue;
+ }
+ if (err) { /* swp entry is obsolete ? */
+ radix_tree_preload_end();
+ break;
+ }
+
+ /* May fail (-ENOMEM) if radix-tree node allocation failed. */
+ __set_page_locked(new_page);
+ SetPageSwapBacked(new_page);
+ err = __add_to_swap_cache(new_page, entry);
+ if (likely(!err)) {
+ radix_tree_preload_end();
/*
* Initiate read into locked page and return.
*/
- lru_cache_add_active(new_page);
- swap_readpage(NULL, new_page);
+ lru_cache_add_anon(new_page);
+ swap_readpage(new_page);
return new_page;
}
- } while (err != -ENOENT && err != -ENOMEM);
+ radix_tree_preload_end();
+ ClearPageSwapBacked(new_page);
+ __clear_page_locked(new_page);
+ /*
+ * add_to_swap_cache() doesn't return -EEXIST, so we can safely
+ * clear SWAP_HAS_CACHE flag.
+ */
+ swapcache_free(entry, NULL);
+ } while (err != -ENOMEM);
if (new_page)
page_cache_release(new_page);
return found_page;
}
+
+/**
+ * swapin_readahead - swap in pages in hope we need them soon
+ * @entry: swap entry of this memory
+ * @gfp_mask: memory allocation flags
+ * @vma: user vma this address belongs to
+ * @addr: target address for mempolicy
+ *
+ * Returns the struct page for entry and addr, after queueing swapin.
+ *
+ * Primitive swap readahead code. We simply read an aligned block of
+ * (1 << page_cluster) entries in the swap area. This method is chosen
+ * because it doesn't cost us any seek time. We also make sure to queue
+ * the 'original' request together with the readahead ones...
+ *
+ * This has been extended to use the NUMA policies from the mm triggering
+ * the readahead.
+ *
+ * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
+ */
+struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ struct page *page;
+ unsigned long offset = swp_offset(entry);
+ unsigned long start_offset, end_offset;
+ unsigned long mask = (1UL << page_cluster) - 1;
+
+ /* Read a page_cluster sized and aligned cluster around offset. */
+ start_offset = offset & ~mask;
+ end_offset = offset | mask;
+ if (!start_offset) /* First page is swap header. */
+ start_offset++;
+
+ for (offset = start_offset; offset <= end_offset ; offset++) {
+ /* Ok, do the async read-ahead now */
+ page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
+ gfp_mask, vma, addr);
+ if (!page)
+ continue;
+ page_cache_release(page);
+ }
+ lru_add_drain(); /* Push any new pages onto the LRU now */
+ return read_swap_cache_async(entry, gfp_mask, vma, addr);
+}