2 * fs/proc/kcore.c kernel ELF core dumper
4 * Modelled on fs/exec.c:aout_core_dump()
5 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
6 * ELF version written by David Howells <David.Howells@nexor.co.uk>
7 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
8 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
9 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
13 #include <linux/proc_fs.h>
14 #include <linux/user.h>
15 #include <linux/capability.h>
16 #include <linux/elf.h>
17 #include <linux/elfcore.h>
18 #include <linux/vmalloc.h>
19 #include <linux/highmem.h>
20 #include <linux/bootmem.h>
21 #include <linux/init.h>
22 #include <linux/slab.h>
23 #include <asm/uaccess.h>
25 #include <linux/list.h>
26 #include <linux/ioport.h>
27 #include <linux/memory.h>
28 #include <asm/sections.h>
30 #define CORE_STR "CORE"
32 #ifndef ELF_CORE_EFLAGS
33 #define ELF_CORE_EFLAGS 0
36 static struct proc_dir_entry *proc_root_kcore;
39 #ifndef kc_vaddr_to_offset
40 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
42 #ifndef kc_offset_to_vaddr
43 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
46 /* An ELF note in memory */
55 static LIST_HEAD(kclist_head);
56 static DEFINE_RWLOCK(kclist_lock);
57 static int kcore_need_update = 1;
60 kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
62 new->addr = (unsigned long)addr;
66 write_lock(&kclist_lock);
67 list_add_tail(&new->list, &kclist_head);
68 write_unlock(&kclist_lock);
71 static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
76 *nphdr = 1; /* PT_NOTE */
79 list_for_each_entry(m, &kclist_head, list) {
80 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
85 *elf_buflen = sizeof(struct elfhdr) +
86 (*nphdr + 2)*sizeof(struct elf_phdr) +
87 3 * ((sizeof(struct elf_note)) +
88 roundup(sizeof(CORE_STR), 4)) +
89 roundup(sizeof(struct elf_prstatus), 4) +
90 roundup(sizeof(struct elf_prpsinfo), 4) +
91 roundup(sizeof(struct task_struct), 4);
92 *elf_buflen = PAGE_ALIGN(*elf_buflen);
93 return size + *elf_buflen;
96 static void free_kclist_ents(struct list_head *head)
98 struct kcore_list *tmp, *pos;
100 list_for_each_entry_safe(pos, tmp, head, list) {
101 list_del(&pos->list);
106 * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
108 static void __kcore_update_ram(struct list_head *list)
112 struct kcore_list *tmp, *pos;
115 write_lock(&kclist_lock);
116 if (kcore_need_update) {
117 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
118 if (pos->type == KCORE_RAM
119 || pos->type == KCORE_VMEMMAP)
120 list_move(&pos->list, &garbage);
122 list_splice_tail(list, &kclist_head);
124 list_splice(list, &garbage);
125 kcore_need_update = 0;
126 proc_root_kcore->size = get_kcore_size(&nphdr, &size);
127 write_unlock(&kclist_lock);
129 free_kclist_ents(&garbage);
133 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_XEN)
135 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
136 * because memory hole is not as big as !HIGHMEM case.
137 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
139 static int kcore_update_ram(void)
142 struct kcore_list *ent;
145 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
148 ent->addr = (unsigned long)__va(0);
149 #ifdef CONFIG_HIGHMEM
150 ent->size = max_low_pfn << PAGE_SHIFT;
152 ent->size = max_pfn << PAGE_SHIFT;
154 ent->type = KCORE_RAM;
155 list_add(&ent->list, &head);
156 __kcore_update_ram(&head);
160 #else /* !CONFIG_HIGHMEM */
162 #ifdef CONFIG_SPARSEMEM_VMEMMAP
163 /* calculate vmemmap's address from given system ram pfn and register it */
164 int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
166 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
167 unsigned long nr_pages = ent->size >> PAGE_SHIFT;
168 unsigned long start, end;
169 struct kcore_list *vmm, *tmp;
172 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
173 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
174 end = ALIGN(end, PAGE_SIZE);
175 /* overlap check (because we have to align page */
176 list_for_each_entry(tmp, head, list) {
177 if (tmp->type != KCORE_VMEMMAP)
179 if (start < tmp->addr + tmp->size)
184 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
188 vmm->size = end - start;
189 vmm->type = KCORE_VMEMMAP;
190 list_add_tail(&vmm->list, head);
196 int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
204 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
206 struct list_head *head = (struct list_head *)arg;
207 struct kcore_list *ent;
209 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
212 ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
213 ent->size = nr_pages << PAGE_SHIFT;
215 /* Sanity check: Can happen in 32bit arch...maybe */
216 if (ent->addr < (unsigned long) __va(0))
219 /* cut not-mapped area. ....from ppc-32 code. */
220 if (ULONG_MAX - ent->addr < ent->size)
221 ent->size = ULONG_MAX - ent->addr;
223 /* cut when vmalloc() area is higher than direct-map area */
224 if (VMALLOC_START > (unsigned long)__va(0)) {
225 if (ent->addr > VMALLOC_START)
227 if (VMALLOC_START - ent->addr < ent->size)
228 ent->size = VMALLOC_START - ent->addr;
231 ent->type = KCORE_RAM;
232 list_add_tail(&ent->list, head);
234 if (!get_sparsemem_vmemmap_info(ent, head)) {
235 list_del(&ent->list);
245 static int kcore_update_ram(void)
248 unsigned long end_pfn;
251 /* Not inialized....update now */
252 /* find out "max pfn" */
254 for_each_node_state(nid, N_HIGH_MEMORY) {
255 unsigned long node_end;
256 node_end = NODE_DATA(nid)->node_start_pfn +
257 NODE_DATA(nid)->node_spanned_pages;
258 if (end_pfn < node_end)
261 /* scan 0 to max_pfn */
262 ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
264 free_kclist_ents(&head);
267 __kcore_update_ram(&head);
270 #endif /* CONFIG_HIGHMEM */
272 /*****************************************************************************/
274 * determine size of ELF note
276 static int notesize(struct memelfnote *en)
280 sz = sizeof(struct elf_note);
281 sz += roundup((strlen(en->name) + 1), 4);
282 sz += roundup(en->datasz, 4);
285 } /* end notesize() */
287 /*****************************************************************************/
289 * store a note in the header buffer
291 static char *storenote(struct memelfnote *men, char *bufp)
295 #define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
297 en.n_namesz = strlen(men->name) + 1;
298 en.n_descsz = men->datasz;
299 en.n_type = men->type;
301 DUMP_WRITE(&en, sizeof(en));
302 DUMP_WRITE(men->name, en.n_namesz);
304 /* XXX - cast from long long to long to avoid need for libgcc.a */
305 bufp = (char*) roundup((unsigned long)bufp,4);
306 DUMP_WRITE(men->data, men->datasz);
307 bufp = (char*) roundup((unsigned long)bufp,4);
312 } /* end storenote() */
315 * store an ELF coredump header in the supplied buffer
316 * nphdr is the number of elf_phdr to insert
318 static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
320 struct elf_prstatus prstatus; /* NT_PRSTATUS */
321 struct elf_prpsinfo prpsinfo; /* NT_PRPSINFO */
322 struct elf_phdr *nhdr, *phdr;
324 struct memelfnote notes[3];
326 struct kcore_list *m;
328 /* setup ELF header */
329 elf = (struct elfhdr *) bufp;
330 bufp += sizeof(struct elfhdr);
331 offset += sizeof(struct elfhdr);
332 memcpy(elf->e_ident, ELFMAG, SELFMAG);
333 elf->e_ident[EI_CLASS] = ELF_CLASS;
334 elf->e_ident[EI_DATA] = ELF_DATA;
335 elf->e_ident[EI_VERSION]= EV_CURRENT;
336 elf->e_ident[EI_OSABI] = ELF_OSABI;
337 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
338 elf->e_type = ET_CORE;
339 elf->e_machine = ELF_ARCH;
340 elf->e_version = EV_CURRENT;
342 elf->e_phoff = sizeof(struct elfhdr);
344 elf->e_flags = ELF_CORE_EFLAGS;
345 elf->e_ehsize = sizeof(struct elfhdr);
346 elf->e_phentsize= sizeof(struct elf_phdr);
347 elf->e_phnum = nphdr;
352 /* setup ELF PT_NOTE program header */
353 nhdr = (struct elf_phdr *) bufp;
354 bufp += sizeof(struct elf_phdr);
355 offset += sizeof(struct elf_phdr);
356 nhdr->p_type = PT_NOTE;
365 /* setup ELF PT_LOAD program header for every area */
366 list_for_each_entry(m, &kclist_head, list) {
367 phdr = (struct elf_phdr *) bufp;
368 bufp += sizeof(struct elf_phdr);
369 offset += sizeof(struct elf_phdr);
371 phdr->p_type = PT_LOAD;
372 phdr->p_flags = PF_R|PF_W|PF_X;
373 phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
374 phdr->p_vaddr = (size_t)m->addr;
376 phdr->p_filesz = phdr->p_memsz = m->size;
377 phdr->p_align = PAGE_SIZE;
381 * Set up the notes in similar form to SVR4 core dumps made
382 * with info from their /proc.
384 nhdr->p_offset = offset;
386 /* set up the process status */
387 notes[0].name = CORE_STR;
388 notes[0].type = NT_PRSTATUS;
389 notes[0].datasz = sizeof(struct elf_prstatus);
390 notes[0].data = &prstatus;
392 memset(&prstatus, 0, sizeof(struct elf_prstatus));
394 nhdr->p_filesz = notesize(¬es[0]);
395 bufp = storenote(¬es[0], bufp);
397 /* set up the process info */
398 notes[1].name = CORE_STR;
399 notes[1].type = NT_PRPSINFO;
400 notes[1].datasz = sizeof(struct elf_prpsinfo);
401 notes[1].data = &prpsinfo;
403 memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
404 prpsinfo.pr_state = 0;
405 prpsinfo.pr_sname = 'R';
406 prpsinfo.pr_zomb = 0;
408 strcpy(prpsinfo.pr_fname, "vmlinux");
409 strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);
411 nhdr->p_filesz += notesize(¬es[1]);
412 bufp = storenote(¬es[1], bufp);
414 /* set up the task structure */
415 notes[2].name = CORE_STR;
416 notes[2].type = NT_TASKSTRUCT;
417 notes[2].datasz = sizeof(struct task_struct);
418 notes[2].data = current;
420 nhdr->p_filesz += notesize(¬es[2]);
421 bufp = storenote(¬es[2], bufp);
423 } /* end elf_kcore_store_hdr() */
425 /*****************************************************************************/
427 * read from the ELF header and then kernel memory
430 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
438 read_lock(&kclist_lock);
439 size = get_kcore_size(&nphdr, &elf_buflen);
441 if (buflen == 0 || *fpos >= size) {
442 read_unlock(&kclist_lock);
446 /* trim buflen to not go beyond EOF */
447 if (buflen > size - *fpos)
448 buflen = size - *fpos;
450 /* construct an ELF core header if we'll need some of it */
451 if (*fpos < elf_buflen) {
454 tsz = elf_buflen - *fpos;
457 elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
459 read_unlock(&kclist_lock);
462 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
463 read_unlock(&kclist_lock);
464 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
474 /* leave now if filled buffer already */
478 read_unlock(&kclist_lock);
481 * Check to see if our file offset matches with any of
482 * the addresses in the elf_phdr on our list.
484 start = kc_offset_to_vaddr(*fpos - elf_buflen);
485 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
489 struct kcore_list *m;
491 read_lock(&kclist_lock);
492 list_for_each_entry(m, &kclist_head, list) {
493 if (start >= m->addr && start < (m->addr+m->size))
496 read_unlock(&kclist_lock);
498 if (&m->list == &kclist_head) {
499 if (clear_user(buffer, tsz))
501 } else if (is_vmalloc_or_module_addr((void *)start)) {
504 elf_buf = kzalloc(tsz, GFP_KERNEL);
507 vread(elf_buf, (char *)start, tsz);
508 /* we have to zero-fill user buffer even if no read */
509 if (copy_to_user(buffer, elf_buf, tsz)) {
515 if (kern_addr_valid(start)) {
518 n = copy_to_user(buffer, (char *)start, tsz);
520 * We cannot distingush between fault on source
521 * and fault on destination. When this happens
522 * we clear too and hope it will trigger the
526 if (clear_user(buffer + tsz - n,
531 if (clear_user(buffer, tsz))
540 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
547 static int open_kcore(struct inode *inode, struct file *filp)
549 if (!capable(CAP_SYS_RAWIO))
551 if (kcore_need_update)
553 if (i_size_read(inode) != proc_root_kcore->size) {
554 mutex_lock(&inode->i_mutex);
555 i_size_write(inode, proc_root_kcore->size);
556 mutex_unlock(&inode->i_mutex);
562 static const struct file_operations proc_kcore_operations = {
565 .llseek = default_llseek,
568 #ifdef CONFIG_MEMORY_HOTPLUG
569 /* just remember that we have to update kcore */
570 static int __meminit kcore_callback(struct notifier_block *self,
571 unsigned long action, void *arg)
576 write_lock(&kclist_lock);
577 kcore_need_update = 1;
578 write_unlock(&kclist_lock);
585 static struct kcore_list kcore_vmalloc;
587 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
588 static struct kcore_list kcore_text;
590 * If defined, special segment is used for mapping kernel text instead of
591 * direct-map area. We need to create special TEXT section.
593 static void __init proc_kcore_text_init(void)
595 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
598 static void __init proc_kcore_text_init(void)
603 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
605 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
607 struct kcore_list kcore_modules;
608 static void __init add_modules_range(void)
610 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
611 MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
614 static void __init add_modules_range(void)
619 static int __init proc_kcore_init(void)
621 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
622 &proc_kcore_operations);
623 if (!proc_root_kcore) {
624 printk(KERN_ERR "couldn't create /proc/kcore\n");
625 return 0; /* Always returns 0. */
627 /* Store text area if it's special */
628 proc_kcore_text_init();
629 /* Store vmalloc area */
630 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
631 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
633 /* Store direct-map area from physical memory map */
635 hotplug_memory_notifier(kcore_callback, 0);
639 module_init(proc_kcore_init);