extern unsigned long *contiguous_bitmap;
-#ifndef CONFIG_XEN
static unsigned long dma_reserve __initdata;
-#endif
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
extern unsigned long start_pfn;
pud_t *pud;
if (after_bootmem) {
- pud = pud_offset_k(pgd, start & PGDIR_MASK);
+ pud = pud_offset_k(pgd, __PAGE_OFFSET);
make_page_readonly(pud, XENFEAT_writable_page_tables);
pud_phys = __pa(pud);
} else {
set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
}
- if (!after_bootmem) {
- BUG_ON(start_pfn != table_start + (tables_space>>PAGE_SHIFT));
-
- /* Re-vector virtual addresses pointing into the initial
- mapping to the just-established permanent ones. */
- xen_start_info = __va(__pa(xen_start_info));
- xen_start_info->pt_base = (unsigned long)
- __va(__pa(xen_start_info->pt_base));
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- phys_to_machine_mapping =
- __va(__pa(xen_start_info->mfn_list));
- xen_start_info->mfn_list = (unsigned long)
- phys_to_machine_mapping;
- }
- if (xen_start_info->mod_start)
- xen_start_info->mod_start = (unsigned long)
- __va(__pa(xen_start_info->mod_start));
-
- /* Destroy the Xen-created mappings beyond the kernel image as
- * well as the temporary mappings created above. Prevents
- * overlap with modules area (if init mapping is very big).
- */
- start = PAGE_ALIGN((unsigned long)_end);
- end = __START_KERNEL_map + (start_pfn << PAGE_SHIFT);
- for (; start < end; start += PAGE_SIZE)
- WARN_ON(HYPERVISOR_update_va_mapping(
- start, __pte_ma(0), 0));
- }
+ BUG_ON(!after_bootmem && start_pfn != table_start + (tables_space >> PAGE_SHIFT));
__flush_tlb_all();
}
free_area_init_node(0, NODE_DATA(0), zones,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
- /* Switch to the real shared_info page, and clear the
- * dummy page. */
- set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
- HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
- memset(empty_zero_page, 0, sizeof(empty_zero_page));
+ if (!xen_feature(XENFEAT_auto_translated_physmap) ||
+ xen_start_info->shared_info >= xen_start_info->nr_pages) {
+ /* Switch to the real shared_info page, and clear the
+ * dummy page. */
+ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
+ HYPERVISOR_shared_info =
+ (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
+ }
init_mm.context.pinned = 1;
/*
* Memory hotplug specific functions
+ * These are only for non-NUMA machines right now.
*/
-#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
+#ifdef CONFIG_MEMORY_HOTPLUG
void online_page(struct page *page)
{
num_physpages++;
}
-#ifndef CONFIG_MEMORY_HOTPLUG
-/*
- * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
- * just online the pages.
- */
-int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
-{
- int err = -EIO;
- unsigned long pfn;
- unsigned long total = 0, mem = 0;
- for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
- if (pfn_valid(pfn)) {
- online_page(pfn_to_page(pfn));
- err = 0;
- mem++;
- }
- total++;
- }
- if (!err) {
- z->spanned_pages += total;
- z->present_pages += mem;
- z->zone_pgdat->node_spanned_pages += total;
- z->zone_pgdat->node_present_pages += mem;
- }
- return err;
-}
-#endif
-
-/*
- * Memory is added always to NORMAL zone. This means you will never get
- * additional DMA/DMA32 memory.
- */
int add_memory(u64 start, u64 size)
{
struct pglist_data *pgdat = NODE_DATA(0);
void __init mem_init(void)
{
long codesize, reservedpages, datasize, initsize;
- unsigned long pfn;
contiguous_bitmap = alloc_bootmem_low_pages(
(end_pfn + 2*BITS_PER_LONG) >> 3);
#else
totalram_pages = free_all_bootmem();
#endif
- /* XEN: init and count pages outside initial allocation. */
- for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
- ClearPageReserved(&mem_map[pfn]);
- set_page_count(&mem_map[pfn], 1);
- }
reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
after_bootmem = 1;
}
#endif
-#ifndef CONFIG_XEN
void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
{
/* Should check here against the e820 map to avoid double free */
if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
dma_reserve += len / PAGE_SIZE;
}
-#endif
int kern_addr_valid(unsigned long addr)
{
{
return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
}
+
+/*
+ * Local variables:
+ * c-file-style: "linux"
+ * indent-tabs-mode: t
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * tab-width: 8
+ * End:
+ */