Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / mm / vmalloc.c
index 1d34d75..d75f965 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/rcupdate.h>
 #include <linux/pfn.h>
 #include <linux/kmemleak.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
 #include <asm/uaccess.h>
 #include <asm/tlbflush.h>
 #include <asm/shmparam.h>
@@ -256,7 +256,7 @@ struct vmap_area {
        struct rb_node rb_node;         /* address sorted rbtree */
        struct list_head list;          /* address sorted list */
        struct list_head purge_list;    /* "lazy purge" list */
-       void *private;
+       struct vm_struct *vm;
        struct rcu_head rcu_head;
 };
 
@@ -452,13 +452,6 @@ overflow:
        return ERR_PTR(-EBUSY);
 }
 
-static void rcu_free_va(struct rcu_head *head)
-{
-       struct vmap_area *va = container_of(head, struct vmap_area, rcu_head);
-
-       kfree(va);
-}
-
 static void __free_vmap_area(struct vmap_area *va)
 {
        BUG_ON(RB_EMPTY_NODE(&va->rb_node));
@@ -491,7 +484,7 @@ static void __free_vmap_area(struct vmap_area *va)
        if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
                vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
 
-       call_rcu(&va->rcu_head, rcu_free_va);
+       kfree_rcu(va, rcu_head);
 }
 
 /*
@@ -732,9 +725,10 @@ static void free_unmap_vmap_area_addr(unsigned long addr)
 #define VMAP_BBMAP_BITS_MIN    (VMAP_MAX_ALLOC*2)
 #define VMAP_MIN(x, y)         ((x) < (y) ? (x) : (y)) /* can't use min() */
 #define VMAP_MAX(x, y)         ((x) > (y) ? (x) : (y)) /* can't use max() */
-#define VMAP_BBMAP_BITS                VMAP_MIN(VMAP_BBMAP_BITS_MAX,           \
-                                       VMAP_MAX(VMAP_BBMAP_BITS_MIN,   \
-                                               VMALLOC_PAGES / NR_CPUS / 16))
+#define VMAP_BBMAP_BITS                \
+               VMAP_MIN(VMAP_BBMAP_BITS_MAX,   \
+               VMAP_MAX(VMAP_BBMAP_BITS_MIN,   \
+                       VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
 
 #define VMAP_BLOCK_SIZE                (VMAP_BBMAP_BITS * PAGE_SIZE)
 
@@ -837,13 +831,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
        return vb;
 }
 
-static void rcu_free_vb(struct rcu_head *head)
-{
-       struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head);
-
-       kfree(vb);
-}
-
 static void free_vmap_block(struct vmap_block *vb)
 {
        struct vmap_block *tmp;
@@ -856,7 +843,7 @@ static void free_vmap_block(struct vmap_block *vb)
        BUG_ON(tmp != vb);
 
        free_vmap_area_noflush(vb->va);
-       call_rcu(&vb->rcu_head, rcu_free_vb);
+       kfree_rcu(vb, rcu_head);
 }
 
 static void purge_fragmented_blocks(int cpu)
@@ -1131,6 +1118,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
 EXPORT_SYMBOL(vm_map_ram);
 
 /**
+ * vm_area_add_early - add vmap area early during boot
+ * @vm: vm_struct to add
+ *
+ * This function is used to add fixed kernel vm area to vmlist before
+ * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
+ * should contain proper values and the other fields should be zero.
+ *
+ * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+ */
+void __init vm_area_add_early(struct vm_struct *vm)
+{
+       struct vm_struct *tmp, **p;
+
+       BUG_ON(vmap_initialized);
+       for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+               if (tmp->addr >= vm->addr) {
+                       BUG_ON(tmp->addr < vm->addr + vm->size);
+                       break;
+               } else
+                       BUG_ON(tmp->addr + tmp->size > vm->addr);
+       }
+       vm->next = *p;
+       *p = vm;
+}
+
+/**
  * vm_area_register_early - register vmap area early during boot
  * @vm: vm_struct to register
  * @align: requested alignment
@@ -1152,8 +1165,7 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align)
 
        vm->addr = (void *)addr;
 
-       vm->next = vmlist;
-       vmlist = vm;
+       vm_area_add_early(vm);
 }
 
 void __init vmalloc_init(void)
@@ -1266,18 +1278,22 @@ EXPORT_SYMBOL_GPL(map_vm_area);
 DEFINE_RWLOCK(vmlist_lock);
 struct vm_struct *vmlist;
 
-static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
                              unsigned long flags, void *caller)
 {
-       struct vm_struct *tmp, **p;
-
        vm->flags = flags;
        vm->addr = (void *)va->va_start;
        vm->size = va->va_end - va->va_start;
        vm->caller = caller;
-       va->private = vm;
+       va->vm = vm;
        va->flags |= VM_VM_AREA;
+}
 
+static void insert_vmalloc_vmlist(struct vm_struct *vm)
+{
+       struct vm_struct *tmp, **p;
+
+       vm->flags &= ~VM_UNLIST;
        write_lock(&vmlist_lock);
        for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
                if (tmp->addr >= vm->addr)
@@ -1288,11 +1304,18 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
        write_unlock(&vmlist_lock);
 }
 
+static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+                             unsigned long flags, void *caller)
+{
+       setup_vmalloc_vm(vm, va, flags, caller);
+       insert_vmalloc_vmlist(vm);
+}
+
 static struct vm_struct *__get_vm_area_node(unsigned long size,
                unsigned long align, unsigned long flags, unsigned long start,
                unsigned long end, int node, gfp_t gfp_mask, void *caller)
 {
-       static struct vmap_area *va;
+       struct vmap_area *va;
        struct vm_struct *area;
 
        BUG_ON(in_interrupt());
@@ -1326,7 +1349,18 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
                return NULL;
        }
 
-       insert_vmalloc_vm(area, va, flags, caller);
+       /*
+        * When this function is called from __vmalloc_node_range,
+        * we do not add vm_struct to vmlist here to avoid
+        * accessing uninitialized members of vm_struct such as
+        * pages and nr_pages fields. They will be set later.
+        * To distinguish it from others, we use a VM_UNLIST flag.
+        */
+       if (flags & VM_UNLIST)
+               setup_vmalloc_vm(area, va, flags, caller);
+       else
+               insert_vmalloc_vm(area, va, flags, caller);
+
        return area;
 }
 
@@ -1374,7 +1408,7 @@ static struct vm_struct *find_vm_area(const void *addr)
 
        va = find_vmap_area((unsigned long)addr);
        if (va && va->flags & VM_VM_AREA)
-               return va->private;
+               return va->vm;
 
        return NULL;
 }
@@ -1393,18 +1427,21 @@ struct vm_struct *remove_vm_area(const void *addr)
 
        va = find_vmap_area((unsigned long)addr);
        if (va && va->flags & VM_VM_AREA) {
-               struct vm_struct *vm = va->private;
-               struct vm_struct *tmp, **p;
-               /*
-                * remove from list and disallow access to this vm_struct
-                * before unmap. (address range confliction is maintained by
-                * vmap.)
-                */
-               write_lock(&vmlist_lock);
-               for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
-                       ;
-               *p = tmp->next;
-               write_unlock(&vmlist_lock);
+               struct vm_struct *vm = va->vm;
+
+               if (!(vm->flags & VM_UNLIST)) {
+                       struct vm_struct *tmp, **p;
+                       /*
+                        * remove from list and disallow access to
+                        * this vm_struct before unmap. (address range
+                        * confliction is maintained by vmap.)
+                        */
+                       write_lock(&vmlist_lock);
+                       for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
+                               ;
+                       *p = tmp->next;
+                       write_unlock(&vmlist_lock);
+               }
 
                vmap_debug_free_range(va->va_start, va->va_end);
                free_unmap_vmap_area(va);
@@ -1538,6 +1575,13 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        struct page **pages;
        unsigned int nr_pages, array_size, i;
        gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
+#ifdef CONFIG_XEN
+       gfp_t dma_mask = gfp_mask & (__GFP_DMA | __GFP_DMA32);
+
+       BUILD_BUG_ON((__GFP_DMA | __GFP_DMA32) != (__GFP_DMA + __GFP_DMA32));
+       if (dma_mask == (__GFP_DMA | __GFP_DMA32))
+               gfp_mask &= ~(__GFP_DMA | __GFP_DMA32);
+#endif
 
        nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
        array_size = (nr_pages * sizeof(struct page *));
@@ -1574,6 +1618,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                        goto fail;
                }
                area->pages[i] = page;
+#ifdef CONFIG_XEN
+               if (dma_mask) {
+                       if (xen_limit_pages_to_max_mfn(page, 0, 32)) {
+                               area->nr_pages = i + 1;
+                               goto fail;
+                       }
+                       if (gfp_mask & __GFP_ZERO)
+                               clear_highpage(page);
+               }
+#endif
        }
 
        if (map_vm_area(area, prot, &pages))
@@ -1581,8 +1635,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        return area->addr;
 
 fail:
-       warn_alloc_failed(gfp_mask, order, "vmalloc: allocation failure, "
-                         "allocated %ld of %ld bytes\n",
+       warn_alloc_failed(gfp_mask, order,
+                         "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
                          (area->nr_pages*PAGE_SIZE), area->size);
        vfree(area->addr);
        return NULL;
@@ -1613,15 +1667,22 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
 
        size = PAGE_ALIGN(size);
        if (!size || (size >> PAGE_SHIFT) > totalram_pages)
-               return NULL;
-
-       area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
-                                 gfp_mask, caller);
+               goto fail;
 
+       area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
+                                 start, end, node, gfp_mask, caller);
        if (!area)
-               return NULL;
+               goto fail;
 
        addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+       if (!addr)
+               return NULL;
+
+       /*
+        * In this function, newly allocated vm_struct is not added
+        * to vmlist at __get_vm_area_node(). so, it is added here.
+        */
+       insert_vmalloc_vmlist(area);
 
        /*
         * A ref_count = 3 is needed because the vm_struct and vmap_area
@@ -1631,6 +1692,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
        kmemleak_alloc(addr, real_size, 3, gfp_mask);
 
        return addr;
+
+fail:
+       warn_alloc_failed(gfp_mask, 0,
+                         "vmalloc: allocation failure: %lu bytes\n",
+                         real_size);
+       return NULL;
 }
 
 /**
@@ -1786,6 +1853,8 @@ void *vmalloc_exec(unsigned long size)
 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
+#elif defined(CONFIG_XEN)
+#define GFP_VMALLOC32 GFP_DMA | GFP_DMA32 | GFP_KERNEL
 #else
 #define GFP_VMALLOC32 GFP_KERNEL
 #endif
@@ -1856,9 +1925,9 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
                         * we can expect USER0 is not used (see vread/vwrite's
                         * function description)
                         */
-                       void *map = kmap_atomic(p, KM_USER0);
+                       void *map = kmap_atomic(p);
                        memcpy(buf, map + offset, length);
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                } else
                        memset(buf, 0, length);
 
@@ -1895,9 +1964,9 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
                         * we can expect USER0 is not used (see vread/vwrite's
                         * function description)
                         */
-                       void *map = kmap_atomic(p, KM_USER0);
+                       void *map = kmap_atomic(p);
                        memcpy(map + offset, buf, length);
-                       kunmap_atomic(map, KM_USER0);
+                       kunmap_atomic(map);
                }
                addr += length;
                buf += length;
@@ -2118,23 +2187,30 @@ void  __attribute__((weak)) vmalloc_sync_all(void)
 
 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
 {
-       /* apply_to_page_range() does all the hard work. */
+       pte_t ***p = data;
+
+       if (p) {
+               *(*p) = pte;
+               (*p)++;
+       }
        return 0;
 }
 
 /**
  *     alloc_vm_area - allocate a range of kernel address space
  *     @size:          size of the area
+ *     @ptes:          returns the PTEs for the address space
  *
  *     Returns:        NULL on failure, vm_struct on success
  *
  *     This function reserves a range of kernel address space, and
  *     allocates pagetables to map that range.  No actual mappings
- *     are created.  If the kernel address space is not shared
- *     between processes, it syncs the pagetable across all
- *     processes.
+ *     are created.
+ *
+ *     If @ptes is non-NULL, pointers to the PTEs (in init_mm)
+ *     allocated for the VM area are returned.
  */
-struct vm_struct *alloc_vm_area(size_t size)
+struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
 {
        struct vm_struct *area;
 
@@ -2148,11 +2224,22 @@ struct vm_struct *alloc_vm_area(size_t size)
         * of kernel virtual address space and mapped into init_mm.
         */
        if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
-                               area->size, f, NULL)) {
+                               size, f, ptes ? &ptes : NULL)) {
                free_vm_area(area);
                return NULL;
        }
 
+#ifdef CONFIG_XEN
+       /*
+        * If the allocated address space is passed to a hypercall before
+        * being used then we cannot rely on a page fault to trigger an update
+        * of the page tables.  So sync all the page tables here unless the
+        * caller is going to have the affected PTEs updated directly.
+        */
+       if (!ptes)
+               vmalloc_sync_all();
+#endif
+
        return area;
 }
 EXPORT_SYMBOL_GPL(alloc_vm_area);
@@ -2321,7 +2408,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
        vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
        vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
        if (!vas || !vms)
-               goto err_free;
+               goto err_free2;
 
        for (area = 0; area < nr_vms; area++) {
                vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
@@ -2419,11 +2506,10 @@ found:
 
 err_free:
        for (area = 0; area < nr_vms; area++) {
-               if (vas)
-                       kfree(vas[area]);
-               if (vms)
-                       kfree(vms[area]);
+               kfree(vas[area]);
+               kfree(vms[area]);
        }
+err_free2:
        kfree(vas);
        kfree(vms);
        return NULL;