Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / mm / sparse.c
index 9b6b93a..a8bc7d3 100644 (file)
@@ -2,10 +2,11 @@
  * sparse memory mappings.
  */
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/mmzone.h>
 #include <linux/bootmem.h>
 #include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/spinlock.h>
 #include <linux/vmalloc.h>
 #include "internal.h"
@@ -39,7 +40,7 @@ static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
 #endif
 
-int page_to_nid(struct page *page)
+int page_to_nid(const struct page *page)
 {
        return section_to_node_table[page_to_section(page)];
 }
@@ -352,42 +353,36 @@ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
 
        usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
                                                                 usemap_count);
-       if (usemap) {
-               for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
-                       if (!present_section_nr(pnum))
-                               continue;
-                       usemap_map[pnum] = usemap;
-                       usemap += size;
+       if (!usemap) {
+               usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
+               if (!usemap) {
+                       printk(KERN_WARNING "%s: allocation failed\n", __func__);
+                       return;
                }
-               return;
        }
 
-       usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
-       if (usemap) {
-               for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
-                       if (!present_section_nr(pnum))
-                               continue;
-                       usemap_map[pnum] = usemap;
-                       usemap += size;
-                       check_usemap_section_nr(nodeid, usemap_map[pnum]);
-               }
-               return;
+       for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
+               if (!present_section_nr(pnum))
+                       continue;
+               usemap_map[pnum] = usemap;
+               usemap += size;
+               check_usemap_section_nr(nodeid, usemap_map[pnum]);
        }
-
-       printk(KERN_WARNING "%s: allocation failed\n", __func__);
 }
 
 #ifndef CONFIG_SPARSEMEM_VMEMMAP
 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
 {
        struct page *map;
+       unsigned long size;
 
        map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
        if (map)
                return map;
 
-       map = alloc_bootmem_pages_node(NODE_DATA(nid),
-                      PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
+       size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
+       map = __alloc_bootmem_node_high(NODE_DATA(nid), size,
+                                        PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
        return map;
 }
 void __init sparse_mem_maps_populate_node(struct page **map_map,
@@ -411,7 +406,8 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
        }
 
        size = PAGE_ALIGN(size);
-       map = alloc_bootmem_pages_node(NODE_DATA(nodeid), size * map_count);
+       map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count,
+                                        PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
        if (map) {
                for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
                        if (!present_section_nr(pnum))
@@ -439,6 +435,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
 }
 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
 
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
 static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
                                 unsigned long pnum_begin,
                                 unsigned long pnum_end,
@@ -447,8 +444,7 @@ static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
        sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
                                         map_count, nodeid);
 }
-
-#ifndef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
+#else
 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
 {
        struct page *map;
@@ -478,14 +474,17 @@ void __init sparse_init(void)
 {
        unsigned long pnum;
        struct page *map;
-       struct page **map_map;
        unsigned long *usemap;
        unsigned long **usemap_map;
-       int size, size2;
+       int size;
        int nodeid_begin = 0;
        unsigned long pnum_begin = 0;
        unsigned long usemap_count;
+#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
        unsigned long map_count;
+       int size2;
+       struct page **map_map;
+#endif
 
        /*
         * map is using big page (aka 2M in x86 64 bit)
@@ -493,7 +492,7 @@ void __init sparse_init(void)
         * so alloc 2M (with 2M align) and 24 bytes in turn will
         * make next 2M slip to one more 2M later.
         * then in big system, the memory will have a lot of holes...
-        * here try to allocate 2M pages continously.
+        * here try to allocate 2M pages continuously.
         *
         * powerpc need to call sparse_init_one_section right after each
         * sparse_early_mem_map_alloc, so allocate usemap_map at first.
@@ -664,10 +663,10 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
 static void free_map_bootmem(struct page *page, unsigned long nr_pages)
 {
        unsigned long maps_section_nr, removing_section_nr, i;
-       int magic;
+       unsigned long magic;
 
        for (i = 0; i < nr_pages; i++, page++) {
-               magic = atomic_read(&page->_mapcount);
+               magic = (unsigned long) page->lru.next;
 
                BUG_ON(magic == NODE_INFO);