2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/bitops.h>
16 #include <linux/memblock.h>
18 struct memblock memblock;
20 static int memblock_debug;
22 static int __init early_memblock(char *p)
24 if (p && strstr(p, "debug"))
28 early_param("memblock", early_memblock);
30 static void memblock_dump(struct memblock_type *region, char *name)
32 unsigned long long base, size;
35 pr_info(" %s.cnt = 0x%lx\n", name, region->cnt);
37 for (i = 0; i < region->cnt; i++) {
38 base = region->regions[i].base;
39 size = region->regions[i].size;
41 pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n",
42 name, i, base, base + size - 1, size);
46 void memblock_dump_all(void)
51 pr_info("MEMBLOCK configuration:\n");
52 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
54 memblock_dump(&memblock.memory, "memory");
55 memblock_dump(&memblock.reserved, "reserved");
58 static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
59 phys_addr_t base2, phys_addr_t size2)
61 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
64 static long memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1,
65 phys_addr_t base2, phys_addr_t size2)
67 if (base2 == base1 + size1)
69 else if (base1 == base2 + size2)
75 static long memblock_regions_adjacent(struct memblock_type *type,
76 unsigned long r1, unsigned long r2)
78 phys_addr_t base1 = type->regions[r1].base;
79 phys_addr_t size1 = type->regions[r1].size;
80 phys_addr_t base2 = type->regions[r2].base;
81 phys_addr_t size2 = type->regions[r2].size;
83 return memblock_addrs_adjacent(base1, size1, base2, size2);
86 static void memblock_remove_region(struct memblock_type *type, unsigned long r)
90 for (i = r; i < type->cnt - 1; i++) {
91 type->regions[i].base = type->regions[i + 1].base;
92 type->regions[i].size = type->regions[i + 1].size;
97 /* Assumption: base addr of region 1 < base addr of region 2 */
98 static void memblock_coalesce_regions(struct memblock_type *type,
99 unsigned long r1, unsigned long r2)
101 type->regions[r1].size += type->regions[r2].size;
102 memblock_remove_region(type, r2);
105 void __init memblock_init(void)
107 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
108 * This simplifies the memblock_add() code below...
110 memblock.memory.regions[0].base = 0;
111 memblock.memory.regions[0].size = 0;
112 memblock.memory.cnt = 1;
115 memblock.reserved.regions[0].base = 0;
116 memblock.reserved.regions[0].size = 0;
117 memblock.reserved.cnt = 1;
119 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
122 void __init memblock_analyze(void)
126 memblock.memory_size = 0;
128 for (i = 0; i < memblock.memory.cnt; i++)
129 memblock.memory_size += memblock.memory.regions[i].size;
132 static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
134 unsigned long coalesced = 0;
137 if ((type->cnt == 1) && (type->regions[0].size == 0)) {
138 type->regions[0].base = base;
139 type->regions[0].size = size;
143 /* First try and coalesce this MEMBLOCK with another. */
144 for (i = 0; i < type->cnt; i++) {
145 phys_addr_t rgnbase = type->regions[i].base;
146 phys_addr_t rgnsize = type->regions[i].size;
148 if ((rgnbase == base) && (rgnsize == size))
149 /* Already have this region, so we're done */
152 adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
154 type->regions[i].base -= size;
155 type->regions[i].size += size;
158 } else if (adjacent < 0) {
159 type->regions[i].size += size;
165 if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1)) {
166 memblock_coalesce_regions(type, i, i+1);
172 if (type->cnt >= MAX_MEMBLOCK_REGIONS)
175 /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
176 for (i = type->cnt - 1; i >= 0; i--) {
177 if (base < type->regions[i].base) {
178 type->regions[i+1].base = type->regions[i].base;
179 type->regions[i+1].size = type->regions[i].size;
181 type->regions[i+1].base = base;
182 type->regions[i+1].size = size;
187 if (base < type->regions[0].base) {
188 type->regions[0].base = base;
189 type->regions[0].size = size;
196 long memblock_add(phys_addr_t base, phys_addr_t size)
198 return memblock_add_region(&memblock.memory, base, size);
202 static long __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
204 phys_addr_t rgnbegin, rgnend;
205 phys_addr_t end = base + size;
208 rgnbegin = rgnend = 0; /* supress gcc warnings */
210 /* Find the region where (base, size) belongs to */
211 for (i=0; i < type->cnt; i++) {
212 rgnbegin = type->regions[i].base;
213 rgnend = rgnbegin + type->regions[i].size;
215 if ((rgnbegin <= base) && (end <= rgnend))
219 /* Didn't find the region */
223 /* Check to see if we are removing entire region */
224 if ((rgnbegin == base) && (rgnend == end)) {
225 memblock_remove_region(type, i);
229 /* Check to see if region is matching at the front */
230 if (rgnbegin == base) {
231 type->regions[i].base = end;
232 type->regions[i].size -= size;
236 /* Check to see if the region is matching at the end */
238 type->regions[i].size -= size;
243 * We need to split the entry - adjust the current one to the
244 * beginging of the hole and add the region after hole.
246 type->regions[i].size = base - type->regions[i].base;
247 return memblock_add_region(type, end, rgnend - end);
250 long memblock_remove(phys_addr_t base, phys_addr_t size)
252 return __memblock_remove(&memblock.memory, base, size);
255 long __init memblock_free(phys_addr_t base, phys_addr_t size)
257 return __memblock_remove(&memblock.reserved, base, size);
260 long __init memblock_reserve(phys_addr_t base, phys_addr_t size)
262 struct memblock_type *_rgn = &memblock.reserved;
266 return memblock_add_region(_rgn, base, size);
269 long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
273 for (i = 0; i < type->cnt; i++) {
274 phys_addr_t rgnbase = type->regions[i].base;
275 phys_addr_t rgnsize = type->regions[i].size;
276 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
280 return (i < type->cnt) ? i : -1;
283 static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size)
285 return addr & ~(size - 1);
288 static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size)
290 return (addr + (size - 1)) & ~(size - 1);
293 static phys_addr_t __init memblock_alloc_region(phys_addr_t start, phys_addr_t end,
294 phys_addr_t size, phys_addr_t align)
296 phys_addr_t base, res_base;
299 base = memblock_align_down((end - size), align);
300 while (start <= base) {
301 j = memblock_overlaps_region(&memblock.reserved, base, size);
303 /* this area isn't reserved, take it */
304 if (memblock_add_region(&memblock.reserved, base, size) < 0)
305 base = ~(phys_addr_t)0;
308 res_base = memblock.reserved.regions[j].base;
311 base = memblock_align_down(res_base - size, align);
314 return ~(phys_addr_t)0;
317 phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
324 static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
326 phys_addr_t align, int nid)
328 phys_addr_t start, end;
331 end = start + mp->size;
333 start = memblock_align_up(start, align);
334 while (start < end) {
335 phys_addr_t this_end;
338 this_end = memblock_nid_range(start, end, &this_nid);
339 if (this_nid == nid) {
340 phys_addr_t ret = memblock_alloc_region(start, this_end, size, align);
341 if (ret != ~(phys_addr_t)0)
347 return ~(phys_addr_t)0;
350 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
352 struct memblock_type *mem = &memblock.memory;
357 /* We do a bottom-up search for a region with the right
358 * nid since that's easier considering how memblock_nid_range()
361 size = memblock_align_up(size, align);
363 for (i = 0; i < mem->cnt; i++) {
364 phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
366 if (ret != ~(phys_addr_t)0)
370 return memblock_alloc(size, align);
373 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
375 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
378 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
382 alloc = __memblock_alloc_base(size, align, max_addr);
385 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
386 (unsigned long long) size, (unsigned long long) max_addr);
391 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
394 phys_addr_t base = 0;
395 phys_addr_t res_base;
399 size = memblock_align_up(size, align);
401 /* Pump up max_addr */
402 if (max_addr == MEMBLOCK_ALLOC_ACCESSIBLE)
403 max_addr = memblock.current_limit;
405 /* We do a top-down search, this tends to limit memory
406 * fragmentation by keeping early boot allocs near the
409 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
410 phys_addr_t memblockbase = memblock.memory.regions[i].base;
411 phys_addr_t memblocksize = memblock.memory.regions[i].size;
413 if (memblocksize < size)
415 base = min(memblockbase + memblocksize, max_addr);
416 res_base = memblock_alloc_region(memblockbase, base, size, align);
417 if (res_base != ~(phys_addr_t)0)
423 /* You must call memblock_analyze() before this. */
424 phys_addr_t __init memblock_phys_mem_size(void)
426 return memblock.memory_size;
429 phys_addr_t memblock_end_of_DRAM(void)
431 int idx = memblock.memory.cnt - 1;
433 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
436 /* You must call memblock_analyze() after this. */
437 void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
441 struct memblock_region *p;
446 /* Truncate the memblock regions to satisfy the memory limit. */
447 limit = memory_limit;
448 for (i = 0; i < memblock.memory.cnt; i++) {
449 if (limit > memblock.memory.regions[i].size) {
450 limit -= memblock.memory.regions[i].size;
454 memblock.memory.regions[i].size = limit;
455 memblock.memory.cnt = i + 1;
459 memory_limit = memblock_end_of_DRAM();
461 /* And truncate any reserves above the limit also. */
462 for (i = 0; i < memblock.reserved.cnt; i++) {
463 p = &memblock.reserved.regions[i];
465 if (p->base > memory_limit)
467 else if ((p->base + p->size) > memory_limit)
468 p->size = memory_limit - p->base;
471 memblock_remove_region(&memblock.reserved, i);
477 static int memblock_search(struct memblock_type *type, phys_addr_t addr)
479 unsigned int left = 0, right = type->cnt;
482 unsigned int mid = (right + left) / 2;
484 if (addr < type->regions[mid].base)
486 else if (addr >= (type->regions[mid].base +
487 type->regions[mid].size))
491 } while (left < right);
495 int __init memblock_is_reserved(phys_addr_t addr)
497 return memblock_search(&memblock.reserved, addr) != -1;
500 int memblock_is_memory(phys_addr_t addr)
502 return memblock_search(&memblock.memory, addr) != -1;
505 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
507 int idx = memblock_search(&memblock.reserved, base);
511 return memblock.reserved.regions[idx].base <= base &&
512 (memblock.reserved.regions[idx].base +
513 memblock.reserved.regions[idx].size) >= (base + size);
516 int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
518 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
522 void __init memblock_set_current_limit(phys_addr_t limit)
524 memblock.current_limit = limit;