endchoice
+config CRASH_DUMP
+ tristate "Crash dump support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ default n
+ ---help---
+ Say Y here to enable saving an image of system memory when a panic
+ or other error occurs. Dumps can also be forced with the SysRq+d
+ key if MAGIC_SYSRQ is enabled.
+
+config CRASH_DUMP_BLOCKDEV
+ tristate "Crash dump block device driver"
+ depends on CRASH_DUMP
+ help
+ Say Y to allow saving crash dumps directly to a disk device.
+
+config CRASH_DUMP_NETDEV
+ tristate "Crash dump network device driver"
+ depends on CRASH_DUMP
+ help
+ Say Y to allow saving crash dumps over a network device.
+
+config CRASH_DUMP_MEMDEV
+ bool "Crash dump staged memory driver"
+ depends on CRASH_DUMP
+ help
+ Say Y to allow intermediate saving crash dumps in spare
+ memory pages which would then be written out to disk
+ later.
+
+config CRASH_DUMP_SOFTBOOT
+ bool "Save crash dump across a soft reboot"
+ depends on CRASH_DUMP_MEMDEV
+ help
+ Say Y to allow a crash dump to be preserved in memory
+ pages across a soft reboot and written out to disk
+ thereafter. For this to work, CRASH_DUMP must be
+ configured as part of the kernel (not as a module).
+
+config CRASH_DUMP_COMPRESS_RLE
+ tristate "Crash dump RLE compression"
+ depends on CRASH_DUMP
+ help
+ Say Y to allow saving dumps with Run Length Encoding compression.
+
+config CRASH_DUMP_COMPRESS_GZIP
+ tristate "Crash dump GZIP compression"
+ depends on CRASH_DUMP
+ help
+ Say Y to allow saving dumps with Gnu Zip compression.
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
#include <linux/config.h>
#include <linux/module.h>
-
+#include <linux/nmi.h>
#include <linux/string.h>
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memchr);
# endif
# endif
#endif
+
+#ifdef CONFIG_CRASH_DUMP_MODULE
+EXPORT_SYMBOL(irq_affinity);
+#endif
static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
-static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
+#if !(defined(CONFIG_CRASH_DUMP) || defined (CONFIG_CRASH_DUMP_MODULE))
+static
+#endif
+unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
#include <linux/cache.h>
#include <linux/delay.h>
#include <linux/cache.h>
-#include <linux/efi.h>
+#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
+#include <linux/dump.h>
+#endif
+
+#include <linux/efi.h>
#include <asm/atomic.h>
#include <asm/bitops.h>
#include <asm/current.h>
#define IPI_KDB_INTERRUPT 2
#endif /* CONFIG_KDB */
+#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
+#define IPI_DUMP_INTERRUPT 4
+ int (*dump_ipi_function_ptr)(struct pt_regs *) = NULL;
+#endif
+
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned;
-
-static void
+/*changed static void stop_this_cpu -> void stop_this_cpu */
+void
stop_this_cpu (void)
{
extern void cpu_halt (void);
case IPI_CPU_STOP:
stop_this_cpu();
break;
+#if defined(CONFIG_CRASH_DUMP) || defined(CONFIG_CRASH_DUMP_MODULE)
+ case IPI_DUMP_INTERRUPT:
+ if( dump_ipi_function_ptr != NULL ) {
+ if (!dump_ipi_function_ptr(regs)) {
+ printk(KERN_ERR "(*dump_ipi_function_ptr)(): rejected IPI_DUMP_INTERRUPT\n");
+ }
+ }
+ break;
+#endif
#ifdef CONFIG_KDB
case IPI_KDB_INTERRUPT:
send_IPI_allbutself(IPI_KDB_INTERRUPT);
}
#endif /* CONFIG_KDB */
+
+void dump_send_ipi(void)
+{
+ send_IPI_allbutself(IPI_DUMP_INTERRUPT);
+}
#include <asm/intrinsics.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
+#include <asm/nmi.h>
+#include <linux/dump.h>
/*
* fp_emulate() needs to be able to access and update all floating point registers. Those
printk("%s[%d]: %s %ld [%d]\n",
current->comm, current->pid, str, err, ++die_counter);
show_regs(regs);
+ dump((char *)str, regs);
} else
printk(KERN_ERR "Recursive die() failure, output suppressed\n");
dump-$(CONFIG_X86) += dump_i386.o
dump-$(CONFIG_ARM) += dump_arm.o
dump-$(CONFIG_PPC64) += dump_ppc64.o
+dump-$(CONFIG_IA64) += dump_ia64.o
dump-$(CONFIG_CRASH_DUMP_MEMDEV) += dump_memdev.o dump_overlay.o
dump-objs += $(dump-y)
-obj-$(CONFIG_CRASH_DUMP) += dump.o
+obj-$(CONFIG_CRASH_DUMP) += dump.o
obj-$(CONFIG_CRASH_DUMP_BLOCKDEV) += dump_blockdev.o
obj-$(CONFIG_CRASH_DUMP_NETDEV) += dump_netdev.o
obj-$(CONFIG_CRASH_DUMP_COMPRESS_RLE) += dump_rle.o
(page_to_pfn(p) < (min_low_pfn + bootmap_pages)));
}
-static inline int kernel_page(struct page *p)
+int kernel_page(struct page *p)
{
/* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
return (PageReserved(p) && !PageInuse(p)) || (!PageLRU(p) && PageInuse(p));
/* loc marks the beginning of a range of pages */
-int dump_filter_kernpages(int pass, unsigned long loc, unsigned long sz)
+int dump_filter_kernpages(int pass, unsigned long loc, unsigned long phy_addr,unsigned long sz)
{
struct page *page = (struct page *)loc;
/* if any of the pages is a kernel page, select this set */
while (sz) {
+#ifndef CONFIG_IA64
if (dump_low_page(page) || kernel_page(page))
+#else
+ if(IS_PINNED_ADDRESS(phy_addr))
+ return 1;
+
+ if (kernel_page(page))
+#endif
return 1;
sz -= PAGE_SIZE;
page++;
/* loc marks the beginning of a range of pages */
-int dump_filter_userpages(int pass, unsigned long loc, unsigned long sz)
+int dump_filter_userpages(int pass, unsigned long loc, unsigned long phy_addr,unsigned long sz)
{
struct page *page = (struct page *)loc;
int ret = 0;
/* select if the set has any user page, and no kernel pages */
+
while (sz) {
+#ifndef CONFIG_IA64
if (user_page(page) && !dump_low_page(page)) {
ret = 1;
} else if (kernel_page(page) || dump_low_page(page)) {
return 0;
}
+#else
+ if(IS_PINNED_ADDRESS(phy_addr))
+ return 0;
+
+ if (user_page(page)) {
+ ret = 1;
+ } else if (kernel_page(page)) {
+ return 0;
+ }
+#endif
page++;
sz -= PAGE_SIZE;
}
return ret;
}
-
-
/* loc marks the beginning of a range of pages */
-int dump_filter_unusedpages(int pass, unsigned long loc, unsigned long sz)
+int dump_filter_unusedpages(int pass, unsigned long loc,unsigned long phy_addr, unsigned long sz)
{
struct page *page = (struct page *)loc;
+
/* select if the set does not have any used pages */
while (sz) {
+#ifndef CONFIG_IA64
if (!unreferenced_page(page) || dump_low_page(page)) {
+#else
+ if(IS_PINNED_ADDRESS(phy_addr))
+ return 0;
+
+ if (!unreferenced_page(page)) {
+#endif
return 0;
}
page++;
}
/* dummy: last (non-existent) pass */
-int dump_filter_none(int pass, unsigned long loc, unsigned long sz)
+int dump_filter_none(int pass, unsigned long loc,unsigned long phy_addr, unsigned long sz)
{
return 0;
}
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/utsname.h>
-#include <asm/dump.h>
#include <linux/dump.h>
+#include <asm/dump.h>
#include "dump_methods.h"
/*
dump_config.dumper->header_dirty++;
return 0;
}
-
/* save register and task context */
void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
struct task_struct *tsk)
{
dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
-
__dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
-
+
/* take a snapshot of the stack */
/* doing this enables us to tolerate slight drifts on this cpu */
+
if (dump_header_asm.dha_stack[cpu]) {
memcpy((void *)dump_header_asm.dha_stack[cpu],
- tsk->thread_info, THREAD_SIZE);
+ STACK_START_POSITION(tsk),
+ THREAD_SIZE);
}
dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
}
return !is_curr_stack_page(page, size) && !is_dump_page(page, size);
}
-void lcrash_init_pageheader(struct __dump_page *dp, struct page *page,
- unsigned long sz)
+void lcrash_init_pageheader(struct __dump_page *dp, struct page *page,
+ unsigned long phys_addr,
+ unsigned long sz,
+ unsigned long offset_in_page)
{
memset(dp, sizeof(struct __dump_page), 0);
dp->dp_flags = 0;
dp->dp_size = 0;
- if (sz > 0)
- dp->dp_address = (loff_t)page_to_pfn(page) << PAGE_SHIFT;
+ if (sz > 0){
+ dp->dp_address = phys_addr;
+ }
#if DUMP_DEBUG > 6
dp->dp_page_index = dump_header.dh_num_dump_pages;
#endif /* DUMP_DEBUG */
}
-int dump_lcrash_add_data(unsigned long loc, unsigned long len)
+int dump_lcrash_add_data(unsigned long loc, unsigned long phys_addr,unsigned long len,unsigned long offset_in_page)
{
struct page *page = (struct page *)loc;
void *addr, *buf = dump_config.dumper->curr_buf;
if (buf > dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE)
return -ENOMEM;
- lcrash_init_pageheader(dp, page, len);
+ lcrash_init_pageheader(dp, page, phys_addr, len,offset_in_page);
buf += sizeof(struct __dump_page);
while (len) {
+#ifdef CONFIG_IA64
+ if (IS_PINNED_ADDRESS(phys_addr)){
+ addr=(void *)(loc);
+ } else {
+ addr = kmap_atomic(page, KM_DUMP);
+ addr += offset_in_page;
+ }
+#else
addr = kmap_atomic(page, KM_DUMP);
+#endif
size = bytes = (len > PAGE_SIZE) ? PAGE_SIZE : len;
/* check for compression */
if (dump_allow_compress(page, bytes)) {
size = bytes;
}
/* memset(buf, 'A', size); temporary: testing only !! */
- kunmap_atomic(addr, KM_DUMP);
+#ifdef CONFIG_IA64
+ if(!IS_PINNED_ADDRESS(phys_addr)){
+ kunmap_atomic(page, KM_DUMP);
+ }
+#else
+ kunmap_atomic(page, KM_DUMP);
+#endif
+
dp->dp_size += size;
buf += size;
len -= bytes;
unsigned long left;
int ret = 0;
- lcrash_init_pageheader(dp, NULL, 0);
+ lcrash_init_pageheader(dp, NULL,0, 0,0);
dp->dp_flags |= DUMP_DH_END; /* tbd: truncation test ? */
/* now update the header */
static __s32 saved_irq_count; /* saved preempt_count() flags */
+
static int
alloc_dha_stack(void)
{
void
__dump_init(uint64_t local_memory_start)
{
+ dump_mbanks = 1;
+ dump_mbank[ 0 ].start = 0;
+ dump_mbank[ 0 ].end = (((u64) max_mapnr) << PAGE_SHIFT) - 1;
+ dump_mbank[ 0 ].type = DUMP_MBANK_TYPE_CONVENTIONAL_MEMORY;
return;
}
--- /dev/null
+/*
+ * Architecture specific (ia64) functions for Linux crash dumps.
+ *
+ * Created by: Matt Robinson (yakker@sgi.com)
+ * Contributions from SGI, IBM, and others.
+ *
+ * 2.4 kernel modifications by: Matt D. Robinson (yakker@alacritech.com)
+ * ia64 kernel modifications by: Piet Delaney (piet@www.piet.net)
+ *
+ * Copyright (C) 2001 - 2002 Matt D. Robinson (yakker@alacritech.com)
+ * Copyright (C) 2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2002 Free Software Foundation, Inc. All rights reserved.
+ *
+ * This code is released under version 2 of the GNU GPL.
+ */
+
+/*
+ * The hooks for dumping the kernel virtual memory to disk are in this
+ * file. Any time a modification is made to the virtual memory mechanism,
+ * these routines must be changed to use the new mechanisms.
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/dump.h>
+#include "dump_methods.h"
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm-ia64/dump.h>
+#include <asm/hardirq.h>
+#include <linux/irq.h>
+
+extern unsigned long irq_affinity[];
+static __s32 saved_irq_count; /* saved preempt_count() flags */
+static unsigned long saved_affinity[NR_IRQS];
+
+
+static int alloc_dha_stack(void)
+{
+ int i;
+ void *ptr;
+
+ if (dump_header_asm.dha_stack[0])
+ {
+ return 0;
+ }
+
+ ptr = kmalloc(THREAD_SIZE * num_online_cpus(),GFP_KERNEL);
+ bzero(ptr,THREAD_SIZE );
+ if (!ptr) {
+ printk("vmalloc for dha_stacks failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_online_cpus(); i++) {
+ dump_header_asm.dha_stack[i] = (uint64_t)((unsigned long)ptr + (i * THREAD_SIZE));
+ }
+ return 0;
+}
+
+static int free_dha_stack(void)
+{
+ if (dump_header_asm.dha_stack[0])
+ {
+ vfree((void*)dump_header_asm.dha_stack[0]);
+ dump_header_asm.dha_stack[0] = 0;
+ }
+ return 0;
+}
+
+void
+__dump_save_regs(struct pt_regs *dest_regs, const struct pt_regs *regs)
+{
+ *dest_regs = *regs;
+
+ /* In case of panic dumps, we collects regs on entry to panic.
+ * so, we shouldn't 'fix' ssesp here again. But it is hard to
+ * tell just looking at regs whether ssesp need fixing. We make
+ * this decision by looking at xss in regs. If we have better
+ * means to determine that ssesp are valid (by some flag which
+ * tells that we are here due to panic dump), then we can use
+ * that instead of this kludge.
+ */
+
+}
+
+#ifdef CONFIG_SMP
+
+extern unsigned long irq_affinity[];
+#define irq_desc _irq_desc
+extern irq_desc_t irq_desc[];
+extern void dump_send_ipi(void);
+/*
+ * Routine to save the old irq affinities and change affinities of all irqs to
+ * the dumping cpu.
+ */
+static void
+set_irq_affinity(void)
+{
+ int i;
+ int cpu = smp_processor_id();
+
+ memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
+ for (i = 0; i < NR_IRQS; i++) {
+ if (irq_desc[i].handler == NULL)
+ continue;
+ irq_affinity[i] = 1UL << cpu;
+ if (irq_desc[i].handler->set_affinity != NULL)
+ irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
+ }
+}
+
+/*
+ * Restore old irq affinities.
+ */
+static void
+reset_irq_affinity(void)
+{
+ int i;
+
+ memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
+ for (i = 0; i < NR_IRQS; i++) {
+ if (irq_desc[i].handler == NULL)
+ continue;
+ if (irq_desc[i].handler->set_affinity != NULL)
+ irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
+ }
+}
+
+#else /* !CONFIG_SMP */
+#define set_irq_affinity() do { } while (0)
+#define reset_irq_affinity() do { } while (0)
+#define save_other_cpu_states() do { } while (0)
+#endif /* !CONFIG_SMP */
+
+#ifdef CONFIG_SMP
+static int dump_expect_ipi[NR_CPUS];
+static atomic_t waiting_for_dump_ipi;
+extern void (*dump_trace_ptr)(struct pt_regs *);
+
+
+extern void stop_this_cpu(void);
+//extern void unset_nmi_callback(void);
+
+static int
+dump_nmi_callback(struct pt_regs *regs, int cpu)
+{
+ if (!dump_expect_ipi[cpu])
+ return 0;
+
+ dump_expect_ipi[cpu] = 0;
+
+ dump_save_this_cpu(regs);
+ atomic_dec(&waiting_for_dump_ipi);
+
+ level_changed:
+ switch (dump_silence_level) {
+ case DUMP_HARD_SPIN_CPUS: /* Spin until dump is complete */
+ while (dump_oncpu) {
+ barrier(); /* paranoia */
+ if (dump_silence_level != DUMP_HARD_SPIN_CPUS)
+ goto level_changed;
+
+ cpu_relax(); /* kill time nicely */
+ }
+ break;
+
+ case DUMP_HALT_CPUS: /* Execute halt */
+ stop_this_cpu();
+ break;
+
+ case DUMP_SOFT_SPIN_CPUS:
+ /* Mark the task so it spins in schedule */
+ set_tsk_thread_flag(current, TIF_NEED_RESCHED);
+ break;
+ }
+
+ return 1;
+}
+
+int IPI_handler(struct pt_regs *regs)
+{
+ int cpu;
+ cpu = task_cpu(current);
+ dump_nmi_callback(regs, cpu);
+}
+
+/* save registers on other processors */
+void
+__dump_save_other_cpus(void)
+{
+ int i, cpu = smp_processor_id();
+ int other_cpus = num_online_cpus()-1;
+
+ if (other_cpus > 0) {
+ atomic_set(&waiting_for_dump_ipi, other_cpus);
+
+ for (i = 0; i < NR_CPUS; i++) {
+ dump_expect_ipi[i] = (i != cpu && cpu_online(i));
+ }
+
+ /* short circuit normal NMI handling temporarily */
+ /*set_nmi_callback(dump_nmi_callback);*/
+ dump_ipi_function_ptr = IPI_handler;
+
+ wmb();
+
+ dump_send_ipi();
+ /* may be we dont need to wait for NMI to be processed.
+ just write out the header at the end of dumping, if
+ this IPI is not processed until then, there probably
+ is a problem and we just fail to capture state of
+ other cpus. */
+ while(atomic_read(&waiting_for_dump_ipi) > 0) {
+ cpu_relax();
+ }
+
+// unset_nmi_callback();
+ }
+}
+
+#else
+#define save_other_cpu_states()
+#endif
+/*
+ * Kludge - dump from interrupt context is unreliable (Fixme)
+ *
+ * We do this so that softirqs initiated for dump i/o
+ * get processed and we don't hang while waiting for i/o
+ * to complete or in any irq synchronization attempt.
+ *
+ * This is not quite legal of course, as it has the side
+ * effect of making all interrupts & softirqs triggered
+ * while dump is in progress complete before currently
+ * pending softirqs and the currently executing interrupt
+ * code.
+ */
+static inline void
+irq_bh_save(void)
+{
+ saved_irq_count = irq_count();
+ preempt_count() &= ~(HARDIRQ_MASK|SOFTIRQ_MASK);
+}
+
+static inline void
+irq_bh_restore(void)
+{
+ preempt_count() |= saved_irq_count;
+}
+
+/*
+ * Name: __dump_configure_header()
+ * Func: Configure the dump header with all proper values.
+ */
+int
+__dump_configure_header(const struct pt_regs *regs)
+{
+ return (0);
+}
+
+
+#define dim(x) (sizeof(x)/sizeof(*(x)))
+
+/*
+ * Name: __dump_irq_enable
+ * Func: Reset system so interrupts are enabled.
+ * This is used for dump methods that require interrupts
+ * Eventually, all methods will have interrupts disabled
+ * and this code can be removed.
+ *
+ * Change irq affinities
+ * Re-enable interrupts
+ */
+void
+__dump_irq_enable(void)
+{
+ set_irq_affinity();
+ irq_bh_save();
+ ia64_srlz_d();
+ /*
+ * reduce the task priority level
+ * to get disk interrupts
+ */
+ ia64_setreg(_IA64_REG_CR_TPR, 0);
+ ia64_srlz_d();
+ local_irq_enable();
+}
+
+/*
+ * Name: __dump_irq_restore
+ * Func: Resume the system state in an architecture-specific way.
+
+ */
+void
+__dump_irq_restore(void)
+{
+ local_irq_disable();
+ reset_irq_affinity();
+ irq_bh_restore();
+}
+
+/*
+ * Name: __dump_page_valid()
+ * Func: Check if page is valid to dump.
+ */
+int
+__dump_page_valid(unsigned long index)
+{
+ if (!pfn_valid(index))
+ {
+ return 0;
+ }
+ return 1;
+}
+
+
+void
+__init_mem_banks(void)
+{
+ void *p;
+ int i = 0;
+ void *efi_map_start = __va(ia64_boot_param->efi_memmap);
+ void *efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+ u64 efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+ memset (dump_mbank, 0, sizeof dump_mbank);
+
+ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+ efi_memory_desc_t *md = p;
+ u64 start = md->phys_addr;
+ u64 end = start + (md->num_pages << DUMP_EF_PAGE_SHIFT) - 1;
+ int type;
+
+ if(md->num_pages ==0 ) continue;
+ if (i >= dim(dump_mbank)) {
+ break;
+ }
+
+ /*
+ * NB: The attribute EFI_MEMORY_UC means the memory bank can support Uncached access,
+ * not that the memory us curently running with Uncached access.
+ */
+ if ( (md->attribute & (EFI_MEMORY_WB | EFI_MEMORY_WT)) == 0 ) {
+ continue;
+ }
+ switch(md->type) {
+ case EFI_UNUSABLE_MEMORY: /* Ignore it */
+ continue;
+
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ type = DUMP_MBANK_TYPE_CONVENTIONAL_MEMORY;
+ break;
+
+ default:
+ type = DUMP_MBANK_TYPE_OTHER;
+ break;
+
+ }
+ dump_mbank[ i ].type = type;
+ dump_mbank[ i ].start = start;
+ dump_mbank[ i ].end = end;
+ printk(KERN_EMERG "Start:: %lx End:: %lx efi phys addr = %x number of pages= %lx \n",start,end,(unsigned int)md->phys_addr,md->num_pages);
+ i++;
+ }
+ dump_mbanks = i;
+ return;
+}
+
+/*
+ * Name: __dump_init()
+ * Func: Initialize the dumping routine process. This is in case
+ * it's necessary in the future.
+ */
+void
+__dump_init(uint64_t local_memory_start)
+{
+ __init_mem_banks(); /* Initialize Memory Banks */
+ return;
+}
+
+/*
+ * Name: __dump_open()
+ * Func: Open the dump device (architecture specific). This is in
+ * case it's necessary in the future.
+ */
+void
+__dump_open(void)
+{
+ alloc_dha_stack();
+ return;
+}
+
+
+/*
+ * Name: __dump_cleanup()
+ * Func: Free any architecture specific data structures. This is called
+ * when the dump module is being removed.
+ */
+void
+__dump_cleanup(void)
+{
+ free_dha_stack();
+
+ return;
+}
+
+
+
+int __dump_memcpy_mc_expected = 0; /* Doesn't help yet */
+
+/*
+ * An ia64 version of memcpy() that trys to avoid machine checks.
+ *
+ * NB:
+ * By itself __dump_memcpy_mc_expected() ins't providing any
+ * protection against Machine Checks. We are looking into the
+ * possability of adding code to the arch/ia64/kernel/mca.c fuction
+ * ia64_mca_ucmc_handler() to restore state so that a IA64_MCA_CORRECTED
+ * can be returned to the firmware. Curently it always returns
+ * IA64_MCA_COLD_BOOT and reboots the machine.
+ */
+/*
+void * __dump_memcpy(void * dest, const void *src, size_t count)
+{
+ void *vp;
+
+ if (__dump_memcpy_mc_expected) {
+ ia64_pal_mc_expected((u64) 1, 0);
+ }
+
+ vp = memcpy(dest, src, count);
+
+ if (__dump_memcpy_mc_expected) {
+ ia64_pal_mc_expected((u64) 0, 0);
+ }
+ return(vp);
+}
+*/
+/*
+ * Name: manual_handle_crashdump()
+ * Func: Interface for the lkcd dump command. Calls dump_execute()
+ */
+int
+manual_handle_crashdump(void) {
+
+ struct pt_regs regs;
+
+ get_current_regs(®s);
+ dump_execute("manual", ®s);
+ return 0;
+}
+
unsigned long curr_pfn, curr_map, *curr_map_ptr = NULL;
map = (unsigned long *)dump_mdev->indirect_map_root;
- if (!map)
+ if (!map)
return NULL;
-
if (loc > dump_mdev->last_offset >> PAGE_SHIFT)
return NULL;
* in the chain of indirect maps
*/
for (i = 0; i + DUMP_IND_MAP_SZ < index ; i += DUMP_IND_MAP_SZ) {
- if (!(map = next_indirect_map(map)))
+ if (!(map = next_indirect_map(map)))
return NULL;
}
/* then the right direct map */
/* Temporary proof of concept hack, avoid overwriting kern pages */
+#ifdef CONFIG_KEXEC
return (kernel_page(page) || dump_low_page(page) || user_page(page));
+#else
+ return 0;
+#endif
+
}
}
page = virt_to_page(addr);
+
+#ifdef CONFIG_KEXEC
if (dump_low_page(page)) {
dump_free_mem(addr);
continue;
}
+#endif
if (dump_mem_add_space(dump_mdev, page)) {
printk("Warning: Unable to extend memdev "
/* To customise selection of pages to be dumped in a given pass/group */
struct dump_data_filter{
char name[32];
- int (*selector)(int, unsigned long, unsigned long);
+ int (*selector)(int, unsigned long, unsigned long, unsigned long);
ulong level_mask; /* dump level(s) for which this filter applies */
loff_t start[MAX_NUMNODES], end[MAX_NUMNODES]; /* location range applicable */
ulong num_mbanks; /* Number of memory banks. Greater than one for discontig memory (NUMA) */
/* ordering of passes, invoking iterator */
int (*sequencer)(void);
/* iterates over system data, selects and acts on data to dump */
- int (*iterator)(int, int (*)(unsigned long, unsigned long),
+ int (*iterator)(int, int (*)(unsigned long, unsigned long, unsigned long, unsigned long),
struct dump_data_filter *);
/* action when data is selected for dump */
- int (*save_data)(unsigned long, unsigned long);
+ int (*save_data)(unsigned long, unsigned long,unsigned long,unsigned long);
/* action when data is to be excluded from dump */
- int (*skip_data)(unsigned long, unsigned long);
+ int (*skip_data)(unsigned long,unsigned long, unsigned long,unsigned long);
/* policies for space, multiple dump devices etc */
int (*write_buffer)(void *, unsigned long);
};
struct task_struct *);
/* typically called by the save_data action */
/* add formatted data to the dump buffer */
- int (*add_data)(unsigned long, unsigned long);
+ int (*add_data)(unsigned long, unsigned long, unsigned long,unsigned long);
int (*update_end_marker)(void);
};
return dump_config.dumper->scheme->ops->sequencer();
}
-static inline int dump_iterator(int pass, int (*action)(unsigned long,
- unsigned long), struct dump_data_filter *filter)
+static inline int dump_iterator(int pass, int (*action)(unsigned long,unsigned long,
+ unsigned long,unsigned long), struct dump_data_filter *filter)
{
return dump_config.dumper->scheme->ops->iterator(pass, action, filter);
}
return dump_config.dumper->fmt->ops->update_end_marker();
}
-static inline int dump_add_data(unsigned long loc, unsigned long sz)
+static inline int dump_add_data(unsigned long loc, unsigned long phys_addr, unsigned long sz,unsigned long off_in_page)
{
- return dump_config.dumper->fmt->ops->add_data(loc, sz);
+ return dump_config.dumper->fmt->ops->add_data(loc,phys_addr, sz,off_in_page);
}
/* Compression operation */
/* Default scheme methods (dump_scheme.c) */
extern int dump_generic_sequencer(void);
-extern int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned
- long), struct dump_data_filter *filter);
-extern int dump_generic_save_data(unsigned long loc, unsigned long sz);
-extern int dump_generic_skip_data(unsigned long loc, unsigned long sz);
+extern int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned long, unsigned long,unsigned long), struct dump_data_filter *filter);
+extern int dump_generic_save_data(unsigned long loc, unsigned long phys_addr,unsigned long sz,unsigned long offset_in_page);
+extern int dump_generic_skip_data(unsigned long loc, unsigned long phys_addr,unsigned long sz,unsigned long offset_in_page);
extern int dump_generic_write_buffer(void *buf, unsigned long len);
extern int dump_generic_configure(unsigned long);
extern int dump_generic_unconfigure(void);
extern void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
struct task_struct *tsk);
extern int dump_generic_update_header(void);
-extern int dump_lcrash_add_data(unsigned long loc, unsigned long sz);
+extern int dump_lcrash_add_data(unsigned long loc, unsigned long phys_addr, unsigned long sz,unsigned long offset_in_page);
extern int dump_lcrash_update_end_marker(void);
/* Default format (lcrash) template */
struct dump_data_filter *filter = dump_config.dumper->filter;
struct dump_data_filter *filter2 = dumper_stage2.filter;
int pass = 0, err = 0, save = 0;
- int (*action)(unsigned long, unsigned long);
+ int (*action)(unsigned long, unsigned long,unsigned long, unsigned long);
/* Make sure gzip compression is being used */
if (dump_config.dumper->compress->compress_type != DUMP_COMPRESS_GZIP) {
break;
}
printk("\n %d overlay pages %s of %d each in pass %d\n",
- err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
+ err, save ? "saved" : "skipped", (int)DUMP_PAGE_SIZE, pass);
}
return err;
* faster.
*/
int dump_saved_data_iterator(int pass, int (*action)(unsigned long,
- unsigned long), struct dump_data_filter *filter)
+ unsigned long,unsigned long, unsigned long), struct dump_data_filter *filter)
{
loff_t loc, end;
struct page *page;
int i, err = 0;
unsigned long sz;
- for (i = 0; i < filter->num_mbanks; i++) {
- loc = filter->start[i];
- end = filter->end[i];
+ for (i = 0; i < dump_mbanks; i++) {
+ loc = dump_mbank[i].start;
+ end = dump_mbank[i].end;
printk("pass %d, start off 0x%llx end offset 0x%llx\n", pass,
loc, end);
/* loc will get treated as logical offset into stage 1 */
page = dump_get_saved_page(loc);
- for (; loc < end; loc += PAGE_SIZE) {
+ for (; loc < end; loc += /*PAGE_SIZE*/DUMP_PAGE_SIZE) {
dump_config.dumper->curr_loc = loc;
if (!page) {
printk("no more saved data for pass %d\n",
sz = (loc + PAGE_SIZE > end) ? end - loc : PAGE_SIZE;
if (page && filter->selector(pass, (unsigned long)page,
- PAGE_SIZE)) {
- pr_debug("mem offset 0x%llx\n", loc);
- if ((err = action((unsigned long)page, sz)))
+ loc, PAGE_SIZE)) {
+ if ((err = action((unsigned long)page, sz,DUMP_PAGE_SIZE,loc%PAGE_SIZE)))
break;
else
count++;
return err ? err : count;
}
-static inline int dump_overlay_pages_done(struct page *page, int nr)
+static inline int dump_overlay_pages_done(unsigned long loc, unsigned long phys_addr,unsigned long len,unsigned long offset)
{
int ret=0;
+ struct page *page= (struct page *)loc;
- for (; nr ; page++, nr--) {
+ if((offset+len) == PAGE_SIZE)/*This ensures that a complete page is dumped*/
+ {
if (dump_check_and_free_page(dump_memdev, page))
ret++;
}
+
return ret;
}
-int dump_overlay_save_data(unsigned long loc, unsigned long len)
+int dump_overlay_save_data(unsigned long loc, unsigned long phys_addr,unsigned long len,unsigned long offset)
{
int err = 0;
struct page *page = (struct page *)loc;
static unsigned long cnt = 0;
- if ((err = dump_generic_save_data(loc, len)))
+ if ((err = dump_generic_save_data(loc,phys_addr,len,offset)))
return err;
- if (dump_overlay_pages_done(page, len >> PAGE_SHIFT)) {
+ if (dump_overlay_pages_done((unsigned long)page, phys_addr,len /*>> PAGE_SHIFT*/,offset)) {
cnt++;
if (!(cnt & 0x7f))
pr_debug("released page 0x%lx\n", page_to_pfn(page));
}
-int dump_overlay_skip_data(unsigned long loc, unsigned long len)
+int dump_overlay_skip_data(unsigned long loc, unsigned long phys_addr,unsigned long len,unsigned long offset_in_page)
{
struct page *page = (struct page *)loc;
- dump_overlay_pages_done(page, len >> PAGE_SHIFT);
+ dump_overlay_pages_done((unsigned long)page, phys_addr,len /*>> PAGE_SHIFT*/,offset_in_page);
return 0;
}
err = dump_activate_softboot();
#endif
- return err;
+// return err;
err = dump_switchover_stage(); /* plugs into soft boot mechanism */
dump_config.dumper = &dumper_stage1; /* set things back */
return err;
}
dump_config.dump_addr = (unsigned long)dump_saved_config;
printk("Dump config block of size %d set up at 0x%lx\n",
- sizeof(*dump_saved_config), (unsigned long)dump_saved_config);
+ (int)sizeof(*dump_saved_config), (unsigned long)dump_saved_config);
return 0;
}
/* ----- PASSTHRU FILTER ROUTINE --------- */
/* transparent - passes everything through */
-int dump_passthru_filter(int pass, unsigned long loc, unsigned long sz)
+int dump_passthru_filter(int pass, unsigned long loc, unsigned long phy_addr,unsigned long sz)
{
return 1;
}
if ((dph->dp_address & (PAGE_SIZE - 1)) || (dph->dp_flags
> DUMP_DH_COMPRESSED) || (!dph->dp_flags) ||
(dph->dp_size > PAGE_SIZE)) {
- printk("dp->address = 0x%llx, dp->size = 0x%x, dp->flag = 0x%x\n",
+ printk("dp->address = 0x%lx, dp->size = 0x%x, dp->flag = 0x%x\n",
dph->dp_address, dph->dp_size, dph->dp_flags);
return 0;
}
* TBD/Later: Consider avoiding the copy by using a scatter/gather
* vector representation for the dump buffer
*/
-int dump_passthru_add_data(unsigned long loc, unsigned long sz)
+int dump_passthru_add_data(unsigned long loc, unsigned long phys_addr,unsigned long sz,unsigned long offset_in_page)
{
struct page *page = (struct page *)loc;
void *buf = dump_config.dumper->curr_buf;
int err = 0;
+// printk(KERN_EMERG "In dump passsthrough add data \n");
+// mdelay(1000);
if ((err = dump_copy_pages(buf, page, sz))) {
printk("dump_copy_pages failed");
return err;
/* Filter that simply passes along any data within the range (transparent)*/
/* Note: The start and end ranges in the table are filled in at run-time */
-extern int dump_filter_none(int pass, unsigned long loc, unsigned long sz);
+extern int dump_filter_none(int pass, unsigned long loc, unsigned long phy_addr,unsigned long sz);
struct dump_data_filter dump_passthru_filtertable[MAX_PASSES] = {
{.name = "passkern", .selector = dump_passthru_filter,
{
struct dump_data_filter *filter = dump_config.dumper->filter;
int pass = 0, err = 0, save = 0;
- int (*action)(unsigned long, unsigned long);
+ int (*action)(unsigned long, unsigned long,unsigned long,unsigned long);
/*
* We want to save the more critical data areas first in
else
action = dump_skip_data;
- if ((err = dump_iterator(pass, action, filter)) < 0)
+ if ((err = dump_iterator(pass, action, filter)) < 0){
+ printk(KERN_EMERG "Iterator failed \n");
break;
-
- printk("\n %d dump pages %s of %d each in pass %d\n",
- err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
-
+ }
}
return (err < 0) ? err : 0;
{
unsigned long page_index = loc >> PAGE_SHIFT;
-
+ unsigned long vaddr;
+#ifdef CONFIG_IA64
+ if(IS_PINNED_ADDRESS(loc))
+ {
+ vaddr = START + loc - (PHYS_START);
+ return (struct page *)(vaddr);
+ }
+#endif
/* todo: complete this to account for ia64/discontig mem */
/* todo: and to check for validity, ram page, no i/o mem etc */
/* need to use pfn/physaddr equiv of kern_addr_valid */
/* Default iterator: for singlestage and stage 1 of soft-boot dumping */
/* Iterates over range of physical memory pages in DUMP_PAGE_SIZE increments */
-int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned long),
- struct dump_data_filter *filter)
+int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned long, unsigned long,unsigned long),
+ struct dump_data_filter *filter)
{
/* Todo : fix unit, type */
- loff_t loc, start, end;
- int i, count = 0, err = 0;
+ loff_t loc;
+ int count = 0, err = 0,i=0;
struct page *page;
/* Todo: Add membanks code */
/* TBD: Check if we need to address DUMP_PAGE_SIZE < PAGE_SIZE */
- for (i = 0; i < filter->num_mbanks; i++) {
- start = filter->start[i];
- end = filter->end[i];
- for (loc = start; loc < end; loc += DUMP_PAGE_SIZE) {
+ for(i=0;i<dump_mbanks;i++)
+ {
+ //printk(KERN_EMERG "Mbank %d start %lx end %lx\n",i,dump_mbank[ i ].start,dump_mbank[ i ].end);
+ for (loc = dump_mbank[ i ].start; loc < dump_mbank[ i ].end; loc += DUMP_PAGE_SIZE/*PAGE_SIZE*/) {
dump_config.dumper->curr_loc = loc;
page = dump_get_page(loc);
- if (page && filter->selector(pass,
- (unsigned long) page, DUMP_PAGE_SIZE)) {
- if ((err = action((unsigned long)page,
- DUMP_PAGE_SIZE))) {
- printk("dump_page_iterator: err %d for "
- "loc 0x%llx, in pass %d\n",
- err, loc, pass);
- return err ? err : count;
+ if (page && filter->selector(pass, (unsigned long) page,loc, PAGE_SIZE)) {
+ if ((err = action((unsigned long)page, loc/*+offset_in_page*/,DUMP_PAGE_SIZE,loc%PAGE_SIZE)))
+ {
+ break;
} else
count++;
}
}
}
-
return err ? err : count;
}
* Base function that saves the selected block of data in the dump
* Action taken when iterator decides that data needs to be saved
*/
-int dump_generic_save_data(unsigned long loc, unsigned long sz)
+int dump_generic_save_data(unsigned long loc, unsigned long phys_addr,unsigned long sz,unsigned long offset)
{
void *buf;
void *dump_buf = dump_config.dumper->dump_buf;
int left, bytes, ret;
- if ((ret = dump_add_data(loc, sz))) {
+ if ((ret = dump_add_data(loc, phys_addr,sz,offset))) {
return ret;
}
buf = dump_config.dumper->curr_buf;
/* If we've filled up the buffer write it out */
+
if ((left = buf - dump_buf) >= DUMP_BUFFER_SIZE) {
bytes = dump_write_buffer(dump_buf, DUMP_BUFFER_SIZE);
if (bytes < DUMP_BUFFER_SIZE) {
- printk("dump_write_buffer failed %d\n", bytes);
+ printk(KERN_EMERG "dump_write_buffer failed %d\n", bytes);
return bytes ? -ENOSPC : bytes;
}
/* issue warning */
return ret;
}
- printk(".");
+ printk (".");
touch_nmi_watchdog();
} else if (!(dump_config.dumper->count & 0x7)) {
/* Todo: Touch/Refresh watchdog */
/* --- Done with periodic chores -- */
-
- /*
- * extra bit of copying to simplify verification
- * in the second kernel boot based scheme
- */
- memcpy(dump_buf - DUMP_PAGE_SIZE, dump_buf +
- DUMP_BUFFER_SIZE - DUMP_PAGE_SIZE, DUMP_PAGE_SIZE);
-
- /* now adjust the leftover bits back to the top of the page */
- /* this case would not arise during stage 2 (passthru) */
memset(dump_buf, 'z', DUMP_BUFFER_SIZE);
if (left) {
memcpy(dump_buf, dump_buf + DUMP_BUFFER_SIZE, left);
return 0;
}
-int dump_generic_skip_data(unsigned long loc, unsigned long sz)
+int dump_generic_skip_data(unsigned long loc, unsigned long phys_addr,unsigned long sz,unsigned long offset_in_page)
{
/* dummy by default */
return 0;
/* make sure device is ready */
while ((ret = dump_dev_ready(NULL)) == -EAGAIN);
if (ret < 0) {
- printk("dump_dev_ready failed !err %d\n", ret);
+ printk(KERN_EMERG "dump_dev_ready failed !err %d\n", ret);
return ret;
}
-
+
while (len) {
if ((last_transfer = dump_dev_write(buf, len)) <= 0) {
ret = last_transfer;
- printk("dump_dev_write failed !err %d\n",
+ printk(KERN_EMERG "dump_dev_write failed !err %d\n",
ret);
break;
}
- /* wait till complete */
- while ((ret = dump_dev_ready(buf)) == -EAGAIN)
- cpu_relax();
+ /* wait till complete */
+ while ((ret = dump_dev_ready(buf)) == -EAGAIN){
+ // printk(KERN_EMERG "dump_dev_ready failed \n");
+ cpu_relax();
+ }
+
if (ret < 0) {
- printk("i/o failed !err %d\n", ret);
+ printk(KERN_EMERG "i/o failed !err %d\n", ret);
break;
}
/* check for space */
if ((err = dump_dev_seek(dump_config.dumper->curr_offset + len +
2*DUMP_BUFFER_SIZE)) < 0) {
- printk("dump_write_buffer: insuff space after offset 0x%llx\n",
+ printk(KERN_EMERG "dump_write_buffer: insuff space after offset 0x%llx\n",
dump_config.dumper->curr_offset);
return err;
}
written = written ? -ENOSPC : written;
else
dump_config.dumper->curr_offset += len;
-
+
return written;
}
return -ENOMEM; /* fixme: better error code */
}
- /* Initialize the rest of the fields */
+ /* Initialize the rest of the fields *
dump_config.dumper->dump_buf = buf + DUMP_PAGE_SIZE;
+ COMMENTED :: TBD : Query lkcd lists For the offsetting */
+ dump_config.dumper->dump_buf = buf;
dumper_reset();
/* Open the dump device */
printk("Closed dump device\n");
if (buf)
- dump_free_mem((buf - DUMP_PAGE_SIZE));
+ {
+ /* Commented: Query LKCD on the need for offsetting
+ * */
+ dump_free_mem((buf /*- DUMP_PAGE_SIZE*/));
+ }
dump_config.dumper->curr_buf = dump_config.dumper->dump_buf = NULL;
pr_debug("Released dump buffer\n");
#include <linux/sysctl.h>
#include <linux/nmi.h>
#include <linux/init.h>
-
#include <asm/hardirq.h>
#include <asm/uaccess.h>
+
/*
* -----------------------------------------------------------------------
* V A R I A B L E S
.page_offset = PAGE_OFFSET,
};
+int dump_mbanks; /* number of physical memory banks */
+struct __dump_mbank dump_mbank[MAXCHUNKS]; /* describes layout of physical memory */
+
/*
* -----------------------------------------------------------------------
* / P R O C T U N A B L E F U N C T I O N S
{
int state = -1;
unsigned long flags;
-
+
/* make sure we can dump */
if (!dump_okay) {
pr_info("LKCD not yet configured, can't take dump now\n");
*/
dump_oncpu = smp_processor_id() + 1;
dump_silence_level = DUMP_HARD_SPIN_CPUS;
-
- state = dump_generic_execute(panic_str, regs);
+ state = dump_generic_execute(panic_str, regs);
+ printk(KERN_EMERG "dump_generic_execute compelte\n");
dump_oncpu = 0;
spin_unlock_irqrestore(&dump_lock, flags);
if (state < 0) {
- printk("Dump Incomplete or failed!\n");
+ printk(KERN_EMERG "Dump Incomplete or failed!\n");
} else {
- printk("Dump Complete; %d dump pages saved.\n",
+ printk(KERN_EMERG "Dump Complete; %d dump pages saved.\n",
dump_header.dh_num_dump_pages);
}
}
#ifdef CONFIG_CRASH_DUMP_MEMDEV
dump_config.dumper = &dumper_stage1;
#else
+/*
printk("Requires CONFIG_CRASHDUMP_MEMDEV. Can't proceed.\n");
- return -1;
+ return -1;*/
#endif
} else {
dump_config.dumper = &dumper_singlestage;
static int
dump_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
{
- /* check capabilities */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
struct pt_regs regs;
get_current_regs(®s);
+ printk("Inside panic event \n");
dump_execute((const char *)ptr, ®s);
#endif
+ printk("Returning from panic event\n");
return 0;
}
#endif
}
+extern unsigned long num_physpages;
/*
* Name: dump_init()
* Func: Initialize the dump process. This will set up any architecture
/* set the memory size */
dump_header.dh_memory_size = (u64)info.totalram;
+ bzero(&dump_header,sizeof(dump_header));
+ bzero(&dump_header_asm,sizeof(dump_header_asm));
sysctl_header = register_sysctl_table(kernel_root, 0);
dump_sysrq_register();
module_init(dump_init);
module_exit(dump_cleanup);
+
#include <linux/mm.h>
/* definitions */
+#define STACK_START_POSITION(tsk) (tsk)
#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
#define DUMP_ASM_VERSION_NUMBER 0x3 /* version number */
--- /dev/null
+/*
+ * Kernel header file for Linux crash dumps.
+ *
+ * Created by: Matt Robinson (yakker@sgi.com)
+ *
+ * Copyright 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
+ *
+ * This code is released under version 2 of the GNU GPL.
+ */
+
+/* This header file holds the architecture specific crash dump header */
+#ifndef _ASM_DUMP_H
+#define _ASM_DUMP_H
+
+/* definitions */
+#define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */
+#define DUMP_ASM_VERSION_NUMBER 0x4 /* version number */
+
+/* max number of cpus */
+#define DUMP_MAX_NUM_CPUS 32
+
+#ifdef __KERNEL__
+#include <linux/efi.h>
+#include <asm/pal.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_SMP
+extern unsigned long irq_affinity[];
+extern int (*dump_ipi_function_ptr)(struct pt_regs *);
+extern void dump_send_ipi(void);
+#else /* !CONFIG_SMP */
+#define dump_send_ipi() do { } while(0)
+#endif
+
+#else /* !__KERNEL__ */
+/* necessary header files */
+#include <asm/ptrace.h> /* for pt_regs */
+#include <linux/threads.h>
+#endif /* __KERNEL__ */
+
+/*
+ * mkswap.c calls getpagesize() to get the system page size,
+ * which is not necessarily the same as the hardware page size.
+ *
+ * For ia64 the kernel PAGE_SIZE can be configured from 4KB ... 16KB.
+ *
+ * The physical memory is layed out out in the hardware/minimal pages.
+ * This is the size we need to use for dumping physical pages.
+ *
+ * Note ths hardware/minimal page size being use in;
+ * arch/ia64/kernel/efi.c`efi_memmap_walk():
+ * curr.end = curr.start + (md->num_pages << 12);
+ *
+ * Since the system page size could change between the kernel we boot
+ * on the the kernel that cause the core dume we may want to have something
+ * more constant like the maximum system page size (See include/asm-ia64/page.h).
+ */
+#define STACK_START_POSITION(tsk) (tsk)
+#define DUMP_MIN_PAGE_SHIFT 12
+#define DUMP_MIN_PAGE_SIZE (1UL << DUMP_MIN_PAGE_SHIFT)
+#define DUMP_MIN_PAGE_MASK (~(DUMP_MIN_PAGE_SIZE - 1))
+#define DUMP_MIN_PAGE_ALIGN(addr) (((addr) + DUMP_MIN_PAGE_SIZE - 1) & DUMP_MIN_PAGE_MASK)
+
+#define DUMP_MAX_PAGE_SHIFT 16
+#define DUMP_MAX_PAGE_SIZE (1UL << DUMP_MAX_PAGE_SHIFT)
+#define DUMP_MAX_PAGE_MASK (~(DUMP_MAX_PAGE_SIZE - 1))
+#define DUMP_MAX_PAGE_ALIGN(addr) (((addr) + DUMP_MAX_PAGE_SIZE - 1) & DUMP_MAX_PAGE_MASK)
+
+
+#undef DUMP_PAGE_SHIFT /* Redefining Default for ia64 */
+#undef DUMP_PAGE_SIZE /* " " " " */
+#undef DUMP_PAGE_MASK /* " " " " */
+#undef DUMP_PAGE_ALIGN /* " " " " */
+#undef DUMP_HEADER_OFFSET /* " " " " */
+
+#define DUMP_HEADER_OFFSET PAGE_SIZE
+
+#define DUMP_EF_PAGE_SHIFT DUMP_MIN_PAGE_SHIFT
+
+/* changed here coz its already defined in linux.h confirm this */
+#define DUMP_PAGE_SHIFT DUMP_MIN_PAGE_SHIFT
+#define DUMP_PAGE_SIZE DUMP_MIN_PAGE_SIZE
+#define DUMP_PAGE_MASK DUMP_MIN_PAGE_MASK
+#define DUMP_PAGE_ALIGN(addr) DUMP_MIN_PAGE_ALIGN(addr)
+
+extern int _end,_start;
+#define START ((unsigned long) 0xa000000100000000)
+#define END ((unsigned long) &_end)
+#define PHYS_START (64*1024*1024)
+#define IS_PINNED_ADDRESS(loc) (loc > PHYS_START && \
+ loc< \
+ ((PHYS_START \
+ + (END - \
+ START))))
+
+/*
+ * Structure: dump_header_asm_t
+ * Function: This is the header for architecture-specific stuff. It
+ * follows right after the dump header.
+ */
+/*typedef struct _dump_header_asm {*/
+
+typedef struct __dump_header_asm {
+
+ /* the dump magic number -- unique to verify dump is valid */
+ uint64_t dha_magic_number;
+
+ /* the version number of this dump */
+ uint32_t dha_version;
+
+ /* the size of this header (in case we can't read it) */
+ uint32_t dha_header_size;
+
+ /* pointer to pt_regs, (OLD: (struct pt_regs *, NEW: (uint64_t)) */
+ uint64_t dha_pt_regs;
+
+ /* the dump registers */
+ struct pt_regs dha_regs;
+
+ /* the rnat register saved after flushrs */
+ uint64_t dha_rnat;
+
+ /* the pfs register saved after flushrs */
+ uint64_t dha_pfs;
+
+ /* the bspstore register saved after flushrs */
+ uint64_t dha_bspstore;
+
+ /* smp specific */
+ uint32_t dha_smp_num_cpus;
+ uint32_t dha_dumping_cpu;
+ struct pt_regs dha_smp_regs[DUMP_MAX_NUM_CPUS];
+ uint64_t dha_smp_current_task[DUMP_MAX_NUM_CPUS];
+ uint64_t dha_stack[DUMP_MAX_NUM_CPUS];
+ uint64_t dha_stack_ptr[DUMP_MAX_NUM_CPUS];
+
+} __attribute__((packed)) dump_header_asm_t;
+
+
+extern struct __dump_header_asm dump_header_asm;
+
+#ifdef __KERNEL__
+static inline void get_current_regs(struct pt_regs *regs)
+{
+ /*
+ * REMIND: Looking at functions/Macros like:
+ * DO_SAVE_SWITCH_STACK
+ * ia64_switch_to()
+ * ia64_save_extra()
+ * switch_to()
+ * to implement this new feature that Matt seem to have added
+ * to panic.c; seems all platforms are now expected to provide
+ * this function to dump the current registers into the pt_regs
+ * structure.
+ */
+ volatile unsigned long rsc_value;/*for storing the rsc value*/
+ volatile unsigned long ic_value;
+
+ __asm__ __volatile__("mov %0=b6;;":"=r"(regs->b6));
+ __asm__ __volatile__("mov %0=b7;;":"=r"(regs->b7));
+
+ __asm__ __volatile__("mov %0=ar.csd;;":"=r"(regs->ar_csd));
+ __asm__ __volatile__("mov %0=ar.ssd;;":"=r"(regs->ar_ssd));
+ __asm__ __volatile__("mov %0=psr;;":"=r"(ic_value));
+ if(ic_value & 0x1000)/*Within an interrupt*/
+ {
+ __asm__ __volatile__("mov %0=cr.ipsr;;":"=r"(regs->cr_ipsr));
+ __asm__ __volatile__("mov %0=cr.iip;;":"=r"(regs->cr_iip));
+ __asm__ __volatile__("mov %0=cr.ifs;;":"=r"(regs->cr_ifs));
+ }
+ else
+ {
+ regs->cr_ipsr=regs->cr_iip=regs->cr_ifs=(unsigned long)-1;
+ }
+ __asm__ __volatile__("mov %0=ar.unat;;":"=r"(regs->ar_unat));
+ __asm__ __volatile__("mov %0=ar.pfs;;":"=r"(regs->ar_pfs));
+ __asm__ __volatile__("mov %0=ar.rsc;;":"=r"(rsc_value));
+ regs->ar_rsc = rsc_value;
+ /*loadrs is from 16th bit to 29th bit of rsc*/
+ regs->loadrs = rsc_value >> 16 & (unsigned long)0x3fff;
+ /*setting the rsc.mode value to 0 (rsc.mode is the last two bits of rsc)*/
+ __asm__ __volatile__("mov ar.rsc=%0;;"::"r"(rsc_value & (unsigned long)(~3)));
+ __asm__ __volatile__("mov %0=ar.rnat;;":"=r"(regs->ar_rnat));
+ __asm__ __volatile__("mov %0=ar.bspstore;;":"=r"(regs->ar_bspstore));
+ /*copying the original value back*/
+ __asm__ __volatile__("mov ar.rsc=%0;;"::"r"(rsc_value));
+ __asm__ __volatile__("mov %0=pr;;":"=r"(regs->pr));
+ __asm__ __volatile__("mov %0=ar.fpsr;;":"=r"(regs->ar_fpsr));
+ __asm__ __volatile__("mov %0=ar.ccv;;":"=r"(regs->ar_ccv));
+
+ __asm__ __volatile__("mov %0=r2;;":"=r"(regs->r2));
+ __asm__ __volatile__("mov %0=r3;;":"=r"(regs->r3));
+ __asm__ __volatile__("mov %0=r8;;":"=r"(regs->r8));
+ __asm__ __volatile__("mov %0=r9;;":"=r"(regs->r9));
+ __asm__ __volatile__("mov %0=r10;;":"=r"(regs->r10));
+ __asm__ __volatile__("mov %0=r11;;":"=r"(regs->r11));
+ __asm__ __volatile__("mov %0=r12;;":"=r"(regs->r12));
+ __asm__ __volatile__("mov %0=r13;;":"=r"(regs->r13));
+ __asm__ __volatile__("mov %0=r14;;":"=r"(regs->r14));
+ __asm__ __volatile__("mov %0=r15;;":"=r"(regs->r15));
+ __asm__ __volatile__("mov %0=r16;;":"=r"(regs->r16));
+ __asm__ __volatile__("mov %0=r17;;":"=r"(regs->r17));
+ __asm__ __volatile__("mov %0=r18;;":"=r"(regs->r18));
+ __asm__ __volatile__("mov %0=r19;;":"=r"(regs->r19));
+ __asm__ __volatile__("mov %0=r20;;":"=r"(regs->r20));
+ __asm__ __volatile__("mov %0=r21;;":"=r"(regs->r21));
+ __asm__ __volatile__("mov %0=r22;;":"=r"(regs->r22));
+ __asm__ __volatile__("mov %0=r23;;":"=r"(regs->r23));
+ __asm__ __volatile__("mov %0=r24;;":"=r"(regs->r24));
+ __asm__ __volatile__("mov %0=r25;;":"=r"(regs->r25));
+ __asm__ __volatile__("mov %0=r26;;":"=r"(regs->r26));
+ __asm__ __volatile__("mov %0=r27;;":"=r"(regs->r27));
+ __asm__ __volatile__("mov %0=r28;;":"=r"(regs->r28));
+ __asm__ __volatile__("mov %0=r29;;":"=r"(regs->r29));
+ __asm__ __volatile__("mov %0=r30;;":"=r"(regs->r30));
+ __asm__ __volatile__("mov %0=r31;;":"=r"(regs->r31));
+}
+
+/* Perhaps added to Common Arch Specific Functions and moved to dump.h some day */
+extern void * __dump_memcpy(void *, const void *, size_t);
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_DUMP_H */
--- /dev/null
+/*
+ * linux/include/asm-ia64/nmi.h
+ */
+#ifndef ASM_NMI_H
+#define ASM_NMI_H
+
+#include <linux/pm.h>
+
+struct pt_regs;
+
+typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
+
+/**
+ * set_nmi_callback
+ *
+ * Set a handler for an NMI. Only one handler may be
+ * set. Return 1 if the NMI was handled.
+ */
+void set_nmi_callback(nmi_callback_t callback);
+
+/**
+ * unset_nmi_callback
+ *
+ * Remove the handler previously set.
+ */
+void unset_nmi_callback(void);
+
+#endif /* ASM_NMI_H */
* macro to __dump_memcpy() and use it's arch specific version.
*/
#define DUMP_memcpy memcpy
+#define bzero(a,b) memset(a, 0, b)
/* necessary header files */
#include <asm/dump.h> /* for architecture-specific header */
mounts-$(CONFIG_BLK_DEV_MD) += do_mounts_md.o
extra-$(CONFIG_KERNTYPES) += kerntypes.o
-CFLAGS_kerntypes.o := -gstabs
+CFLAGS_kerntypes.o := -gdwarf-2
# files to be removed upon make clean
clean-files := ../include/linux/compile.h
/* Define version type for version validation of dump and kerntypes */
LINUX_COMPILE_VERSION_ID_TYPE;
#endif
+#if defined(CONFIG_SMP) && defined(CONFIG_CRASH_DUMP)
+extern struct runqueue runqueues;
+struct runqueue rn;
+#endif
+struct new_utsname *p;
void
kerntypes_dummy(void)
{
extern int sysrq_enabled;
extern int core_uses_pid;
extern char core_pattern[];
-extern int cad_pid;
+extern int cad_pid;
extern int pid_max;
extern int sysctl_lower_zone_protection;
extern int min_free_kbytes;
int proc_dointvec(ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp)
{
+
+ if ( KERN_CADPID == table->ctl_name){
+ panic("From cad pid");
+ }
return do_proc_dointvec(table,write,filp,buffer,lenp,
NULL,NULL);
}
return flush == Z_FINISH ? finish_done : block_done;
}
-extern int zlib_deflate_workspacesize(void)
+int zlib_deflate_workspacesize(void)
{
return sizeof(deflate_workspace);
}