*/
#include <linux/mm.h>
+#include <linux/export.h>
+#include <linux/swap.h>
+#include <linux/bio.h>
#include <linux/pagemap.h>
#include <linux/mempool.h>
#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/hash.h>
+#include <linux/highmem.h>
+#include <linux/kgdb.h>
+#include <asm/tlbflush.h>
-static mempool_t *page_pool, *isa_page_pool;
-static void *page_pool_alloc(int gfp_mask, void *data)
-{
- return alloc_page(gfp_mask);
-}
-
-static void page_pool_free(void *page, void *data)
-{
- __free_page(page);
-}
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+DEFINE_PER_CPU(int, __kmap_atomic_idx);
+#endif
/*
* Virtual_count is not a pure "count".
* n means that there are (n-1) current users of it.
*/
#ifdef CONFIG_HIGHMEM
+
+unsigned long totalhigh_pages __read_mostly;
+EXPORT_SYMBOL(totalhigh_pages);
+
+
+EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
+
+unsigned int nr_free_highpages (void)
+{
+ pg_data_t *pgdat;
+ unsigned int pages = 0;
+
+ for_each_online_pgdat(pgdat) {
+ pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
+ NR_FREE_PAGES);
+ if (zone_movable_is_highmem())
+ pages += zone_page_state(
+ &pgdat->node_zones[ZONE_MOVABLE],
+ NR_FREE_PAGES);
+ }
+
+ return pages;
+}
+
static int pkmap_count[LAST_PKMAP];
static unsigned int last_pkmap_nr;
-static spinlock_t kmap_lock = SPIN_LOCK_UNLOCKED;
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
pte_t * pkmap_page_table;
static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
+/*
+ * Most architectures have no use for kmap_high_get(), so let's abstract
+ * the disabling of IRQ out of the locking in that case to save on a
+ * potential useless overhead.
+ */
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
+#define lock_kmap() spin_lock_irq(&kmap_lock)
+#define unlock_kmap() spin_unlock_irq(&kmap_lock)
+#define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
+#define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
+#else
+#define lock_kmap() spin_lock(&kmap_lock)
+#define unlock_kmap() spin_unlock(&kmap_lock)
+#define lock_kmap_any(flags) \
+ do { spin_lock(&kmap_lock); (void)(flags); } while (0)
+#define unlock_kmap_any(flags) \
+ do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
+#endif
+
static void flush_all_zero_pkmaps(void)
{
int i;
+ int need_flush = 0;
- flush_cache_all();
+ flush_cache_kmaps();
for (i = 0; i < LAST_PKMAP; i++) {
struct page *page;
pkmap_count[i] = 0;
/* sanity check */
- if (pte_none(pkmap_page_table[i]))
- BUG();
+ BUG_ON(pte_none(pkmap_page_table[i]));
/*
* Don't need an atomic fetch-and-clear op here;
* So no dangers, even with speculative execution.
*/
page = pte_page(pkmap_page_table[i]);
- pte_clear(&pkmap_page_table[i]);
+ pte_clear(&init_mm, (unsigned long)page_address(page),
+ &pkmap_page_table[i]);
- page->virtual = NULL;
+ set_page_address(page, NULL);
+ need_flush = 1;
}
- flush_tlb_all();
+ if (need_flush)
+ flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
+}
+
+/**
+ * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings
+ */
+void kmap_flush_unused(void)
+{
+ lock_kmap();
+ flush_all_zero_pkmaps();
+ unlock_kmap();
}
static inline unsigned long map_new_virtual(struct page *page)
{
DECLARE_WAITQUEUE(wait, current);
- current->state = TASK_UNINTERRUPTIBLE;
+ __set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&pkmap_map_wait, &wait);
- spin_unlock(&kmap_lock);
+ unlock_kmap();
schedule();
remove_wait_queue(&pkmap_map_wait, &wait);
- spin_lock(&kmap_lock);
+ lock_kmap();
/* Somebody else might have mapped it while we slept */
- if (page->virtual)
- return (unsigned long) page->virtual;
+ if (page_address(page))
+ return (unsigned long)page_address(page);
/* Re-start */
goto start;
}
}
vaddr = PKMAP_ADDR(last_pkmap_nr);
- set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+ set_pte_at(&init_mm, vaddr,
+ &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
pkmap_count[last_pkmap_nr] = 1;
- page->virtual = (void *) vaddr;
+ set_page_address(page, (void *)vaddr);
return vaddr;
}
+/**
+ * kmap_high - map a highmem page into memory
+ * @page: &struct page to map
+ *
+ * Returns the page's virtual memory address.
+ *
+ * We cannot call this from interrupts, as it may block.
+ */
void *kmap_high(struct page *page)
{
unsigned long vaddr;
/*
* For highmem pages, we can't trust "virtual" until
* after we have the lock.
- *
- * We cannot call this from interrupts, as it may block
*/
- spin_lock(&kmap_lock);
- vaddr = (unsigned long) page->virtual;
+ lock_kmap();
+ vaddr = (unsigned long)page_address(page);
if (!vaddr)
vaddr = map_new_virtual(page);
pkmap_count[PKMAP_NR(vaddr)]++;
- if (pkmap_count[PKMAP_NR(vaddr)] < 2)
- BUG();
- spin_unlock(&kmap_lock);
+ BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
+ unlock_kmap();
return (void*) vaddr;
}
+EXPORT_SYMBOL(kmap_high);
+
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
+/**
+ * kmap_high_get - pin a highmem page into memory
+ * @page: &struct page to pin
+ *
+ * Returns the page's current virtual memory address, or NULL if no mapping
+ * exists. If and only if a non null address is returned then a
+ * matching call to kunmap_high() is necessary.
+ *
+ * This can be called from any context.
+ */
+void *kmap_high_get(struct page *page)
+{
+ unsigned long vaddr, flags;
+
+ lock_kmap_any(flags);
+ vaddr = (unsigned long)page_address(page);
+ if (vaddr) {
+ BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
+ pkmap_count[PKMAP_NR(vaddr)]++;
+ }
+ unlock_kmap_any(flags);
+ return (void*) vaddr;
+}
+#endif
+
+/**
+ * kunmap_high - unmap a highmem page into memory
+ * @page: &struct page to unmap
+ *
+ * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
+ * only from user context.
+ */
void kunmap_high(struct page *page)
{
unsigned long vaddr;
unsigned long nr;
+ unsigned long flags;
int need_wakeup;
- spin_lock(&kmap_lock);
- vaddr = (unsigned long) page->virtual;
- if (!vaddr)
- BUG();
+ lock_kmap_any(flags);
+ vaddr = (unsigned long)page_address(page);
+ BUG_ON(!vaddr);
nr = PKMAP_NR(vaddr);
/*
*/
need_wakeup = waitqueue_active(&pkmap_map_wait);
}
- spin_unlock(&kmap_lock);
+ unlock_kmap_any(flags);
/* do wake-up, if needed, race-free outside of the spin lock */
if (need_wakeup)
wake_up(&pkmap_map_wait);
}
-#define POOL_SIZE 64
-
-static __init int init_emergency_pool(void)
-{
- struct sysinfo i;
- si_meminfo(&i);
- si_swapinfo(&i);
-
- if (!i.totalhigh)
- return 0;
-
- page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL);
- if (!page_pool)
- BUG();
- printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
+EXPORT_SYMBOL(kunmap_high);
+#endif
- return 0;
-}
+#if defined(HASHED_PAGE_VIRTUAL)
-__initcall(init_emergency_pool);
+#define PA_HASH_ORDER 7
/*
- * highmem version, map in to vec
+ * Describes one page->virtual association
*/
-static inline void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
-{
- unsigned long flags;
- unsigned char *vto;
-
- local_irq_save(flags);
- vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
- memcpy(vto + to->bv_offset, vfrom, to->bv_len);
- kunmap_atomic(vto, KM_BOUNCE_READ);
- local_irq_restore(flags);
-}
-
-#else /* CONFIG_HIGHMEM */
-
-#define bounce_copy_vec(to, vfrom) \
- memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
-
-#endif
-
-#define ISA_POOL_SIZE 16
+struct page_address_map {
+ struct page *page;
+ void *virtual;
+ struct list_head list;
+};
/*
- * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
- * as the max address, so check if the pool has already been created.
+ * page_address_map freelist, allocated from page_address_maps.
*/
-int init_emergency_isa_pool(void)
-{
- if (isa_page_pool)
- return 0;
-
- isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, NULL);
- if (!isa_page_pool)
- BUG();
-
- printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
- return 0;
-}
+static struct list_head page_address_pool; /* freelist */
+static spinlock_t pool_lock; /* protects page_address_pool */
/*
- * Simple bounce buffer support for highmem pages. Depending on the
- * queue gfp mask set, *to may or may not be a highmem page. kmap it
- * always, it will do the Right Thing
+ * Hash table bucket
*/
-static inline void copy_to_high_bio_irq(struct bio *to, struct bio *from)
-{
- unsigned char *vfrom;
- struct bio_vec *tovec, *fromvec;
- int i;
-
- __bio_for_each_segment(tovec, to, i, 0) {
- fromvec = &from->bi_io_vec[i];
-
- /*
- * not bounced
- */
- if (tovec->bv_page == fromvec->bv_page)
- continue;
-
- vfrom = page_address(fromvec->bv_page) + fromvec->bv_offset;
+static struct page_address_slot {
+ struct list_head lh; /* List of page_address_maps */
+ spinlock_t lock; /* Protect this bucket's list */
+} ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
- bounce_copy_vec(tovec, vfrom);
- }
+static struct page_address_slot *page_slot(const struct page *page)
+{
+ return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
}
-static inline int bounce_end_io (struct bio *bio, int nr_sectors, mempool_t *pool)
+/**
+ * page_address - get the mapped virtual address of a page
+ * @page: &struct page to get the virtual address of
+ *
+ * Returns the page's virtual address.
+ */
+void *page_address(const struct page *page)
{
- struct bio *bio_orig = bio->bi_private;
- struct bio_vec *bvec, *org_vec;
- int ret, i;
-
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- goto out_eio;
-
- set_bit(BIO_UPTODATE, &bio_orig->bi_flags);
-
- /*
- * free up bounce indirect pages used
- */
- __bio_for_each_segment(bvec, bio, i, 0) {
- org_vec = &bio_orig->bi_io_vec[i];
- if (bvec->bv_page == org_vec->bv_page)
- continue;
-
- mempool_free(bvec->bv_page, pool);
+ unsigned long flags;
+ void *ret;
+ struct page_address_slot *pas;
+
+ if (!PageHighMem(page))
+ return lowmem_page_address(page);
+
+ pas = page_slot(page);
+ ret = NULL;
+ spin_lock_irqsave(&pas->lock, flags);
+ if (!list_empty(&pas->lh)) {
+ struct page_address_map *pam;
+
+ list_for_each_entry(pam, &pas->lh, list) {
+ if (pam->page == page) {
+ ret = pam->virtual;
+ goto done;
+ }
+ }
}
-
-out_eio:
- ret = bio_orig->bi_end_io(bio_orig, nr_sectors);
-
- bio_put(bio);
+done:
+ spin_unlock_irqrestore(&pas->lock, flags);
return ret;
}
-static int bounce_end_io_write(struct bio *bio, int nr_sectors)
-{
- return bounce_end_io(bio, nr_sectors, page_pool);
-}
-
-static int bounce_end_io_write_isa(struct bio *bio, int nr_sectors)
-{
- return bounce_end_io(bio, nr_sectors, isa_page_pool);
-}
-
-static inline int __bounce_end_io_read(struct bio *bio, int nr_sectors,
- mempool_t *pool)
-{
- struct bio *bio_orig = bio->bi_private;
-
- if (test_bit(BIO_UPTODATE, &bio->bi_flags))
- copy_to_high_bio_irq(bio_orig, bio);
-
- return bounce_end_io(bio, nr_sectors, pool);
-}
+EXPORT_SYMBOL(page_address);
-static int bounce_end_io_read(struct bio *bio, int nr_sectors)
-{
- return __bounce_end_io_read(bio, nr_sectors, page_pool);
-}
-
-static int bounce_end_io_read_isa(struct bio *bio, int nr_sectors)
-{
- return __bounce_end_io_read(bio, nr_sectors, isa_page_pool);
-}
-
-void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig)
+/**
+ * set_page_address - set a page's virtual address
+ * @page: &struct page to set
+ * @virtual: virtual address to use
+ */
+void set_page_address(struct page *page, void *virtual)
{
- struct page *page;
- struct bio *bio = NULL;
- int i, rw = bio_data_dir(*bio_orig), bio_gfp;
- struct bio_vec *to, *from;
- mempool_t *pool;
-
- BUG_ON((*bio_orig)->bi_idx);
-
- /*
- * for non-isa bounce case, just check if the bounce pfn is equal
- * to or bigger than the highest pfn in the system -- in that case,
- * don't waste time iterating over bio segments
- */
- if (!(gfp & GFP_DMA)) {
- if (pfn >= blk_max_pfn)
- return;
-
-#ifndef CONFIG_HIGHMEM
- /*
- * should not hit for non-highmem case
- */
- BUG();
-#endif
- bio_gfp = GFP_NOHIGHIO;
- pool = page_pool;
- } else {
- BUG_ON(!isa_page_pool);
- bio_gfp = GFP_NOIO;
- pool = isa_page_pool;
- }
-
- bio_for_each_segment(from, *bio_orig, i) {
- page = from->bv_page;
-
- /*
- * is destination page below bounce pfn?
- */
- if ((page - page->zone->zone_mem_map) + (page->zone->zone_start_paddr >> PAGE_SHIFT) < pfn)
- continue;
-
- /*
- * irk, bounce it
- */
- if (!bio)
- bio = bio_alloc(bio_gfp, (*bio_orig)->bi_vcnt);
-
- to = &bio->bi_io_vec[i];
-
- to->bv_page = mempool_alloc(pool, gfp);
- to->bv_len = from->bv_len;
- to->bv_offset = from->bv_offset;
-
- if (rw & WRITE) {
- char *vto, *vfrom;
-
- vto = page_address(to->bv_page) + to->bv_offset;
- vfrom = kmap(from->bv_page) + from->bv_offset;
- memcpy(vto, vfrom, to->bv_len);
- kunmap(from->bv_page);
+ unsigned long flags;
+ struct page_address_slot *pas;
+ struct page_address_map *pam;
+
+ BUG_ON(!PageHighMem(page));
+
+ pas = page_slot(page);
+ if (virtual) { /* Add */
+ BUG_ON(list_empty(&page_address_pool));
+
+ spin_lock_irqsave(&pool_lock, flags);
+ pam = list_entry(page_address_pool.next,
+ struct page_address_map, list);
+ list_del(&pam->list);
+ spin_unlock_irqrestore(&pool_lock, flags);
+
+ pam->page = page;
+ pam->virtual = virtual;
+
+ spin_lock_irqsave(&pas->lock, flags);
+ list_add_tail(&pam->list, &pas->lh);
+ spin_unlock_irqrestore(&pas->lock, flags);
+ } else { /* Remove */
+ spin_lock_irqsave(&pas->lock, flags);
+ list_for_each_entry(pam, &pas->lh, list) {
+ if (pam->page == page) {
+ list_del(&pam->list);
+ spin_unlock_irqrestore(&pas->lock, flags);
+ spin_lock_irqsave(&pool_lock, flags);
+ list_add_tail(&pam->list, &page_address_pool);
+ spin_unlock_irqrestore(&pool_lock, flags);
+ goto done;
+ }
}
+ spin_unlock_irqrestore(&pas->lock, flags);
}
+done:
+ return;
+}
- /*
- * no pages bounced
- */
- if (!bio)
- return;
+static struct page_address_map page_address_maps[LAST_PKMAP];
- /*
- * at least one page was bounced, fill in possible non-highmem
- * pages
- */
- bio_for_each_segment(from, *bio_orig, i) {
- to = &bio->bi_io_vec[i];
- if (!to->bv_page) {
- to->bv_page = from->bv_page;
- to->bv_len = from->bv_len;
- to->bv_offset = to->bv_offset;
- }
- }
+void __init page_address_init(void)
+{
+ int i;
- bio->bi_dev = (*bio_orig)->bi_dev;
- bio->bi_sector = (*bio_orig)->bi_sector;
- bio->bi_rw = (*bio_orig)->bi_rw;
-
- bio->bi_vcnt = (*bio_orig)->bi_vcnt;
- bio->bi_idx = 0;
- bio->bi_size = (*bio_orig)->bi_size;
-
- if (pool == page_pool) {
- if (rw & WRITE)
- bio->bi_end_io = bounce_end_io_write;
- else
- bio->bi_end_io = bounce_end_io_read;
- } else {
- if (rw & WRITE)
- bio->bi_end_io = bounce_end_io_write_isa;
- else
- bio->bi_end_io = bounce_end_io_read_isa;
+ INIT_LIST_HEAD(&page_address_pool);
+ for (i = 0; i < ARRAY_SIZE(page_address_maps); i++)
+ list_add(&page_address_maps[i].list, &page_address_pool);
+ for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
+ INIT_LIST_HEAD(&page_address_htable[i].lh);
+ spin_lock_init(&page_address_htable[i].lock);
}
-
- bio->bi_private = *bio_orig;
- *bio_orig = bio;
+ spin_lock_init(&pool_lock);
}
+
+#endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */