include/asm-parisc/offsets.h: arch/parisc/kernel/asm-offsets.s
$(call filechk,gen-asm-offsets)
-CLEAN_FILES += palo.conf lifimage include/asm-parisc/offsets.h
+CLEAN_FILES += lifimage include/asm-parisc/offsets.h
+MRPROPER_FILES += palo.conf
define archhelp
@echo '* vmlinux - Uncompressed kernel image (./vmlinux)'
head-y := head.o
head-$(CONFIG_PARISC64) := head64.o
-extra-y := init_task.o pdc_cons.o process.o
+extra-y := init_task.o pdc_cons.o process.o \
unaligned.o $(head-y)
AFLAGS_entry.o := -traditional
#define ELF_CLASS ELFCLASS32
+#define ELF_CORE_COPY_REGS(dst, pt) \
+ memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
+ { int i; \
+ for (i = 0; i < 32; i++) dst[i] = (elf_greg_t) pt->gr[i]; \
+ for (i = 0; i < 8; i++) dst[32 + i] = (elf_greg_t) pt->sr[i]; \
+ } \
+ dst[40] = (elf_greg_t) pt->iaoq[0]; dst[41] = (elf_greg_t) pt->iaoq[1]; \
+ dst[42] = (elf_greg_t) pt->iasq[0]; dst[43] = (elf_greg_t) pt->iasq[1]; \
+ dst[44] = (elf_greg_t) pt->sar; dst[45] = (elf_greg_t) pt->iir; \
+ dst[46] = (elf_greg_t) pt->isr; dst[47] = (elf_greg_t) pt->ior; \
+ dst[48] = (elf_greg_t) mfctl(22); dst[49] = (elf_greg_t) mfctl(0); \
+ dst[50] = (elf_greg_t) mfctl(24); dst[51] = (elf_greg_t) mfctl(25); \
+ dst[52] = (elf_greg_t) mfctl(26); dst[53] = (elf_greg_t) mfctl(27); \
+ dst[54] = (elf_greg_t) mfctl(28); dst[55] = (elf_greg_t) mfctl(29); \
+ dst[56] = (elf_greg_t) mfctl(30); dst[57] = (elf_greg_t) mfctl(31); \
+ dst[58] = (elf_greg_t) mfctl( 8); dst[59] = (elf_greg_t) mfctl( 9); \
+ dst[60] = (elf_greg_t) mfctl(12); dst[61] = (elf_greg_t) mfctl(13); \
+ dst[62] = (elf_greg_t) mfctl(10); dst[63] = (elf_greg_t) mfctl(15);
+
+
typedef unsigned int elf_greg_t;
#include <linux/spinlock.h>
#define ELF_PLATFORM ("PARISC32\0")
-#define ELF_CORE_COPY_REGS(dst, pt) \
- memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
- { int i; \
- for (i = 0; i < 32; i++) dst[i] = (elf_greg_t) pt->gr[i]; \
- for (i = 0; i < 8; i++) dst[32 + i] = (elf_greg_t) pt->sr[i]; \
- } \
- dst[40] = (elf_greg_t) pt->iaoq[0]; dst[41] = (elf_greg_t) pt->iaoq[1]; \
- dst[42] = (elf_greg_t) pt->iasq[0]; dst[43] = (elf_greg_t) pt->iasq[1]; \
- dst[44] = (elf_greg_t) pt->sar; dst[45] = (elf_greg_t) pt->iir; \
- dst[46] = (elf_greg_t) pt->isr; dst[47] = (elf_greg_t) pt->ior; \
- dst[48] = (elf_greg_t) mfctl(22); dst[49] = (elf_greg_t) mfctl(0); \
- dst[50] = (elf_greg_t) mfctl(24); dst[51] = (elf_greg_t) mfctl(25); \
- dst[52] = (elf_greg_t) mfctl(26); dst[53] = (elf_greg_t) mfctl(27); \
- dst[54] = (elf_greg_t) mfctl(28); dst[55] = (elf_greg_t) mfctl(29); \
- dst[56] = (elf_greg_t) mfctl(30); dst[57] = (elf_greg_t) mfctl(31); \
- dst[58] = (elf_greg_t) mfctl( 8); dst[59] = (elf_greg_t) mfctl( 9); \
- dst[60] = (elf_greg_t) mfctl(12); dst[61] = (elf_greg_t) mfctl(13); \
- dst[62] = (elf_greg_t) mfctl(10); dst[63] = (elf_greg_t) mfctl(15);
-
/*
* We should probably use this macro to set a flag somewhere to indicate
* this is a 32 on 64 process. We could use PER_LINUX_32BIT, or we
void __init
parisc_cache_init(void)
{
- if(pdc_cache_info(&cache_info)<0)
+ if (pdc_cache_info(&cache_info) < 0)
panic("parisc_cache_init: pdc_cache_info failed");
#if 0
split_tlb = 0;
if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
-
- if (cache_info.dt_conf.tc_sh == 2)
- printk(KERN_WARNING "Unexpected TLB configuration. "
+ if (cache_info.dt_conf.tc_sh == 2)
+ printk(KERN_WARNING "Unexpected TLB configuration. "
"Will flush I/D separately (could be optimized).\n");
- split_tlb = 1;
+ split_tlb = 1;
}
- dcache_stride = ( (1<<(cache_info.dc_conf.cc_block+3)) *
- cache_info.dc_conf.cc_line );
- icache_stride = ( (1<<(cache_info.ic_conf.cc_block+3)) *
- cache_info.ic_conf.cc_line );
+ dcache_stride = (1 << (cache_info.dc_conf.cc_block + 3)) *
+ cache_info.dc_conf.cc_line;
+ icache_stride = (1 << (cache_info.ic_conf.cc_block + 3)) *
+ cache_info.ic_conf.cc_line;
#ifndef CONFIG_PA20
- if(pdc_btlb_info(&btlb_info)<0) {
+ if (pdc_btlb_info(&btlb_info) < 0) {
memset(&btlb_info, 0, sizeof btlb_info);
}
#endif
- if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) == PDC_MODEL_NVA_UNSUPPORTED) {
+ if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
+ PDC_MODEL_NVA_UNSUPPORTED) {
printk(KERN_WARNING "Only equivalent aliasing supported\n");
#ifndef CONFIG_SMP
panic("SMP kernel required to avoid non-equivalent aliasing");
void disable_sr_hashing(void)
{
- int srhash_type;
+ int srhash_type;
+
+ switch (boot_cpu_data.cpu_type) {
+ case pcx: /* We shouldn't get this far. setup.c should prevent it. */
+ BUG();
+ return;
+
+ case pcxs:
+ case pcxt:
+ case pcxt_:
+ srhash_type = SRHASH_PCXST;
+ break;
+
+ case pcxl:
+ srhash_type = SRHASH_PCXL;
+ break;
- if (boot_cpu_data.cpu_type == pcxl2)
- return; /* pcxl2 doesn't support space register hashing */
+ case pcxl2: /* pcxl2 doesn't support space register hashing */
+ return;
- switch (boot_cpu_data.cpu_type) {
+ default: /* Currently all PA2.0 machines use the same ins. sequence */
+ srhash_type = SRHASH_PA20;
+ break;
+ }
+
+ disable_sr_hashing_asm(srhash_type);
+}
- case pcx:
- BUG(); /* We shouldn't get here. code in setup.c should prevent it */
- return;
+void __flush_dcache_page(struct page *page)
+{
+ struct mm_struct *mm = current->active_mm;
+ struct list_head *l;
- case pcxs:
- case pcxt:
- case pcxt_:
- srhash_type = SRHASH_PCXST;
- break;
+ flush_kernel_dcache_page(page_address(page));
- case pcxl:
- srhash_type = SRHASH_PCXL;
- break;
+ if (!page->mapping)
+ return;
- default: /* Currently all PA2.0 machines use the same ins. sequence */
- srhash_type = SRHASH_PA20;
- break;
- }
+ list_for_each(l, &page->mapping->i_mmap_shared) {
+ struct vm_area_struct *mpnt;
+ unsigned long off;
- disable_sr_hashing_asm(srhash_type);
+ mpnt = list_entry(l, struct vm_area_struct, shared);
+
+ /*
+ * If this VMA is not in our MM, we can ignore it.
+ */
+ if (mpnt->vm_mm != mm)
+ continue;
+
+ if (page->index < mpnt->vm_pgoff)
+ continue;
+
+ off = page->index - mpnt->vm_pgoff;
+ if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT)
+ continue;
+
+ flush_cache_page(mpnt, mpnt->vm_start + (off << PAGE_SHIFT));
+
+ /* All user shared mappings should be equivalently mapped,
+ * so once we've flushed one we should be ok
+ */
+ break;
+ }
}
+
.export ret_from_kernel_thread
ret_from_kernel_thread:
-#if CONFIG_PREEMPT || CONFIG_SMP
/* Call schedule_tail first though */
bl schedule_tail, %r2
nop
-#endif
LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
LDREG TASK_PT_GR25(%r1), %r26
/* Set the return value for the child */
child_return:
-#if CONFIG_SMP || CONFIG_PREEMPT
bl schedule_tail, %r2
nop
-#endif
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
LDREG TASK_PT_GR19(%r1),%r2
{HPHW_NPROC,0x481,0x4,0x81,"Wilbur (E25)"},
{HPHW_NPROC,0x482,0x4,0x81,"WB-80 (E35)"},
{HPHW_NPROC,0x483,0x4,0x81,"WB-96 (E45)"},
- {HPHW_NPROC,0x48,0x4,0x81,"UL Proc L-100 (811/D210,D310)"},
- {HPHW_NPROC,0x48,0x4,0x81,"UL Proc L-75 (801/D200)"},
+ {HPHW_NPROC,0x484,0x4,0x81,"UL Proc L-100 (811/D210,D310)"},
+ {HPHW_NPROC,0x485,0x4,0x81,"UL Proc L-75 (801/D200)"},
{HPHW_NPROC,0x501,0x4,0x81,"Merlin L2 132 (9000/778/B132L)"},
{HPHW_NPROC,0x502,0x4,0x81,"Merlin L2 160 (9000/778/B160L)"},
{HPHW_NPROC,0x503,0x4,0x81,"Merlin L2+ 132 (9000/778/B132L)"},
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
/*
-/* $Id: ioctl32.c,v 1.6 2002/10/21 16:13:22 varenet Exp $
+/* $Id: ioctl32.c,v 1.5 2002/10/18 00:21:43 varenet Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
#define __KERNEL__
#include <scsi/sg.h>
+#include <linux/raid/md_u.h>
+#include <linux/dm-ioctl.h>
+
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/perf.h>
return err;
}
+/* Fix sizeof(sizeof()) breakage */
+#define BLKBSZGET_32 _IOR(0x12,112,int)
+#define BLKBSZSET_32 _IOW(0x12,113,int)
+#define BLKGETSIZE64_32 _IOR(0x12,114,int)
+
+static int do_blkbszget(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ return sys_ioctl(fd, BLKBSZGET, arg);
+}
+
+static int do_blkbszset(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ return sys_ioctl(fd, BLKBSZSET, arg);
+}
+
+static int do_blkgetsize64(unsigned int fd, unsigned int cmd,
+ unsigned long arg)
+{
+ return sys_ioctl(fd, BLKGETSIZE64, arg);
+}
+
static int ioc_settimeout(unsigned int fd, unsigned int cmd, unsigned long arg)
{
return rw_long(fd, AUTOFS_IOC_SETTIMEOUT, arg);
COMPATIBLE_IOCTL(BLKRRPART)
COMPATIBLE_IOCTL(BLKFLSBUF)
COMPATIBLE_IOCTL(BLKSECTSET)
-COMPATIBLE_IOCTL(BLKSSZGET)
-COMPATIBLE_IOCTL(BLKBSZGET)
/* RAID */
COMPATIBLE_IOCTL(RAID_VERSION)
COMPATIBLE_IOCTL(GET_ARRAY_INFO)
COMPATIBLE_IOCTL(GET_DISK_INFO)
COMPATIBLE_IOCTL(PRINT_RAID_DEBUG)
+COMPATIBLE_IOCTL(RAID_AUTORUN)
COMPATIBLE_IOCTL(CLEAR_ARRAY)
COMPATIBLE_IOCTL(ADD_NEW_DISK)
COMPATIBLE_IOCTL(HOT_REMOVE_DISK)
COMPATIBLE_IOCTL(PROTECT_ARRAY)
COMPATIBLE_IOCTL(HOT_ADD_DISK)
COMPATIBLE_IOCTL(SET_DISK_FAULTY)
+COMPATIBLE_IOCTL(HOT_GENERATE_ERROR)
COMPATIBLE_IOCTL(RUN_ARRAY)
COMPATIBLE_IOCTL(START_ARRAY)
COMPATIBLE_IOCTL(STOP_ARRAY)
COMPATIBLE_IOCTL(STOP_ARRAY_RO)
COMPATIBLE_IOCTL(RESTART_ARRAY_RW)
+/* DM */
+COMPATIBLE_IOCTL(DM_VERSION)
+COMPATIBLE_IOCTL(DM_REMOVE_ALL)
+COMPATIBLE_IOCTL(DM_DEV_CREATE)
+COMPATIBLE_IOCTL(DM_DEV_REMOVE)
+COMPATIBLE_IOCTL(DM_DEV_RELOAD)
+COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
+COMPATIBLE_IOCTL(DM_DEV_RENAME)
+COMPATIBLE_IOCTL(DM_DEV_DEPS)
+COMPATIBLE_IOCTL(DM_DEV_STATUS)
+COMPATIBLE_IOCTL(DM_TARGET_STATUS)
+COMPATIBLE_IOCTL(DM_TARGET_WAIT)
+
/* Big K */
COMPATIBLE_IOCTL(PIO_FONT)
COMPATIBLE_IOCTL(GIO_FONT)
HANDLE_IOCTL(0x1260, broken_blkgetsize)
HANDLE_IOCTL(BLKSECTGET, w_long)
HANDLE_IOCTL(BLKPG, blkpg_ioctl_trans)
+/* take care of sizeof(sizeof()) breakage */
+/* block stuff */
+HANDLE_IOCTL(BLKBSZGET_32, do_blkbszget)
+HANDLE_IOCTL(BLKBSZSET_32, do_blkbszset)
+HANDLE_IOCTL(BLKGETSIZE64_32, do_blkgetsize64)
HANDLE_IOCTL(FBIOGET_FSCREENINFO, fb_ioctl_trans)
HANDLE_IOCTL(FBIOGETCMAP, fb_ioctl_trans)
#ifdef __LP64__
me->init = (void *)get_fdesc(me, (Elf_Addr)me->init);
#ifdef CONFIG_MODULE_UNLOAD
- if (me->cleanup)
- me->cleanup = (void *)get_fdesc(me, (Elf_Addr)me->cleanup);
- if (me->destroy)
- me->destroy = (void *)get_fdesc(me, (Elf_Addr)me->destroy);
+ if (me->exit)
+ me->exit = (void *)get_fdesc(me, (Elf_Addr)me->exit);
#endif
#endif
return 0;
u##size in##type (int addr) \
{ \
int b = PCI_PORT_HBA(addr); \
- u##size d = (u##size) -1; \
EISA_IN(size); \
- ASSERT(pci_port); /* make sure services are defined */ \
- ASSERT(parisc_pci_hba[b]); /* make sure ioaddr are "fixed up" */ \
- if (parisc_pci_hba[b] == NULL) { \
- printk(KERN_WARNING "\nPCI or EISA Host Bus Adapter %d not registered. in" #size "(0x%x) returning -1\n", b, addr); \
- } else { \
- d = pci_port->in##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr)); \
- } \
- return d; \
+ if (!parisc_pci_hba[b]) return (u##size) -1; \
+ return pci_port->in##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr)); \
}
PCI_PORT_IN(b, 8)
{ \
int b = PCI_PORT_HBA(addr); \
EISA_OUT(size); \
- ASSERT(pci_port); \
+ if (!parisc_pci_hba[b]) return; \
pci_port->out##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr), d); \
}
EXPORT_SYMBOL(pcibios_resource_to_bus);
#endif
-#define MAX(val1, val2) ((val1) > (val2) ? (val1) : (val2))
-
-
/*
** pcibios align resources() is called everytime generic PCI code
** wants to generate a new address. The process of looking for
align = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
/* Align to largest of MIN or input size */
- mask = MAX(alignment, align) - 1;
+ mask = max(alignment, align) - 1;
res->start += mask;
res->start &= ~mask;
int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r)
{
+ if (regs == NULL)
+ return 0;
+
memcpy(r, regs->fr, sizeof *r);
return 1;
}
+int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
+{
+ memcpy(r, tsk->thread.regs.fr, sizeof(*r));
+ return 1;
+}
+
/* Note that "fork()" is implemented in terms of clone, with
parameters (SIGCHLD, regs->gr[30], regs). */
int
#endif
#if CACHE_FLUSHING_IS_NOT_BROKEN
- flush_icache_range((unsigned long) &frame->tramp[0],
+ flush_user_icache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[4]);
#else
/* It should *always* be cache line-aligned, but the compiler
handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
struct pt_regs *regs, int in_syscall)
{
- struct k_sigaction *ka = ¤t->sig->action[sig-1];
+ struct k_sigaction *ka = ¤t->sighand->action[sig-1];
DBG(("handle_signal(sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p)\n",
sig, ka, info, oldset, regs));
oldset->sig[0], oldset->sig[1]));
- signr = get_signal_to_deliver(&info, regs);
+ signr = get_signal_to_deliver(&info, regs, NULL);
if (signr > 0) {
/* Restart a system call if necessary. */
if (in_syscall) {
break;
case -ERESTARTSYS:
- ka = ¤t->sig->action[signr-1];
+ ka = ¤t->sighand->action[signr-1];
if (!(ka->sa.sa_flags & SA_RESTART)) {
DBG(("ERESTARTSYS: putting -EINTR\n"));
regs->gr[28] = -EINTR;
#ifndef _PARISC64_KERNEL_SYS32_H
#define _PARISC64_KERNEL_SYS32_H
+#include <linux/compat.h>
+
/* Call a kernel syscall which will use kernel space instead of user
* space for its copy_to/from_user.
*/
set_fs (old_fs); \
}
+#ifdef CONFIG_COMPAT
+
typedef __u32 __sighandler_t32;
struct sigaction32 {
};
#endif
+
+#endif
return -ENOSYS;
}
-/*
- * Set a given TLS descriptor:
- */
-asmlinkage int sys_set_thread_area(struct user_desc *u_info)
-{
- return -ENOSYS;
-}
-
-
-/*
- * Get the current Thread-Local Storage area:
- */
-
-asmlinkage int sys_get_thread_area(struct user_desc *u_info)
-{
- return -ENOSYS;
-}
-
-
asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
{
return -ENOMEM;
ENTRY_SAME(rt_sigsuspend_wrapper) /* not really SAME -- see the code */
ENTRY_SAME(chown) /* 180 */
/* setsockopt() used by iptables: SO_SET_REPLACE/SO_SET_ADD_COUNTERS */
- ENTRY_DIFF(setsockopt)
+ ENTRY_COMP(setsockopt)
ENTRY_SAME(getsockopt)
- ENTRY_DIFF(sendmsg)
- ENTRY_DIFF(recvmsg)
+ ENTRY_COMP(sendmsg)
+ ENTRY_COMP(recvmsg)
ENTRY_SAME(semop) /* 185 */
ENTRY_SAME(semget)
ENTRY_DIFF(semctl_broken)
ENTRY_COMP(futex) /* 210 */
ENTRY_SAME(sched_setaffinity)
ENTRY_SAME(sched_getaffinity)
- ENTRY_SAME(set_thread_area)
- ENTRY_SAME(get_thread_area)
+ ENTRY_SAME(ni_syscall)
+ ENTRY_SAME(ni_syscall)
ENTRY_SAME(io_setup) /* 215 */
ENTRY_SAME(io_destroy)
ENTRY_SAME(io_getevents)
#include <linux/console.h>
#include <linux/kallsyms.h>
+#include <asm/assembly.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/io.h>
startstack = (unsigned long *)((unsigned long)stack & ~(THREAD_SIZE - 1));
i = 1;
+ stack = (long *)((long)(stack + 32) &~ (FRAME_SIZE-1)); /* Align */
printk("Kernel addresses on the stack:\n");
- while (stack >= startstack) {
- addr = *stack--;
+ while (stack > startstack) {
+ stack -= 16; /* Stack frames are a multiple of 16 words */
+ addr = stack[16 - RP_OFFSET / sizeof(long)];
/*
* If the address is either in the text segment of the
* kernel, or in the region which contains vmalloc'ed
#ifndef _PARISC_ASSEMBLY_H
#define _PARISC_ASSEMBLY_H
-#if defined(__LP64__) && defined(__ASSEMBLY__)
+#ifdef __LP64__
+#define LDREG ldd
+#define STREG std
+#define LDREGM ldd,mb
+#define STREGM std,ma
+#define RP_OFFSET 16
+#define FRAME_SIZE 128
+#else
+#define LDREG ldw
+#define STREG stw
+#define LDREGM ldwm
+#define STREGM stwm
+#define RP_OFFSET 20
+#define FRAME_SIZE 64
+#endif
+
+#ifdef __ASSEMBLY__
+
+#ifdef __LP64__
/* the 64-bit pa gnu assembler unfortunately defaults to .level 1.1 or 2.0 so
* work around that for now... */
.level 2.0w
ldo R%\value(\reg), \reg
.endm
-#ifdef __LP64__
-#define LDREG ldd
-#define STREG std
-#define LDREGM ldd,mb
-#define STREGM std,ma
-#define RP_OFFSET 16
-#define FRAME_SIZE 128
-#else
-#define LDREG ldw
-#define STREG stw
-#define LDREGM ldwm
-#define STREGM stwm
-#define RP_OFFSET 20
-#define FRAME_SIZE 64
-#endif
-
.macro loadgp
#ifdef __LP64__
ldil L%__gp, %r27
REST_CR (%cr22, PT_PSW (\regs))
.endm
+#endif /* __ASSEMBLY__ */
#endif
* Tell the user there is some problem.
*/
#define BUG() do { \
+ extern void dump_stack(void); \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ dump_stack(); \
} while (0)
#define PAGE_BUG(page) do { \
extern void flush_cache_all_local(void);
+static inline void cacheflush_h_tmp_function(void *dummy)
+{
+ flush_cache_all_local();
+}
+
static inline void flush_cache_all(void)
{
- on_each_cpu((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
+ on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
}
/* The following value needs to be tuned and probably scaled with the
#endif
}
+extern void __flush_dcache_page(struct page *page);
+
static inline void flush_dcache_page(struct page *page)
{
if (page->mapping && list_empty(&page->mapping->i_mmap) &&
list_empty(&page->mapping->i_mmap_shared)) {
set_bit(PG_dcache_dirty, &page->flags);
} else {
- flush_kernel_dcache_page(page_address(page));
+ __flush_dcache_page(page);
}
}
typedef u32 compat_ino_t;
typedef u32 compat_dev_t;
typedef s32 compat_off_t;
+typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_old_sigset_t; /* at least 32 bits */
#define _COMPAT_NSIG 64
-#define _COMPAT_NSIG_BPW BITS_PER_LONG
+#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
+#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
+
struct pt_regs; /* forward declaration... */
#define ide_check_region(from,extent) check_region((from), (extent))
#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
#define ide_release_region(from,extent) release_region((from), (extent))
+/* Generic I/O and MEMIO string operations. */
+
+#define __ide_insw insw
+#define __ide_insl insl
+#define __ide_outsw outsw
+#define __ide_outsl outsl
+
+static __inline__ void __ide_mm_insw(unsigned long port, void *addr, u32 count)
+{
+ while (count--) {
+ *(u16 *)addr = readw(port);
+ addr += 2;
+ }
+}
+
+static __inline__ void __ide_mm_insl(unsigned long port, void *addr, u32 count)
+{
+ while (count--) {
+ *(u32 *)addr = readl(port);
+ addr += 4;
+ }
+}
+
+static __inline__ void __ide_mm_outsw(unsigned long port, void *addr, u32 count)
+{
+ while (count--) {
+ writew(*(u16 *)addr, port);
+ addr += 2;
+ }
+}
+
+static __inline__ void __ide_mm_outsl(unsigned long port, void *addr, u32 count)
+{
+ while (count--) {
+ writel(*(u32 *)addr, port);
+ addr += 4;
+ }
+}
#endif /* __KERNEL__ */
D(8) KM_PTE1,
D(9) KM_IRQ0,
D(10) KM_IRQ1,
-D(11) KM_TYPE_NR
+D(11) KM_SOFTIRQ0,
+D(12) KM_SOFTIRQ1,
+D(13) KM_TYPE_NR
};
#undef D
typedef unsigned int __kernel_gid_t;
typedef int __kernel_suseconds_t;
typedef int __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
typedef int __kernel_daddr_t;
/* Note these change from narrow to wide kernels */
#ifdef __LP64__
* since we have taken branch traps too)
*/
#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
-#define PTRACE_GETSIGINFO 13 /* get child's siginfo structure */
-#define PTRACE_SETSIGINFO 14 /* set child's siginfo structure */
-
#ifdef __KERNEL__
/* XXX should we use iaoq[1] or iaoq[0] ? */
struct sigaction sa;
};
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+
#include <asm/sigcontext.h>
#endif /* __KERNEL__ */