Fix arch headers.
authorAndreas Schwab <schwab@suse.de>
Thu, 15 May 2003 13:35:22 +0000 (13:35 +0000)
committerAndreas Schwab <schwab@suse.de>
Thu, 15 May 2003 13:35:22 +0000 (13:35 +0000)
suse-commit: 12cca2376829ee2f3ce3ed6b1b521ffd4f5ae63c

199 files changed:
Makefile
arch/alpha/kernel/module.c
arch/arm/kernel/module.c
arch/i386/kernel/module.c
arch/ia64/Kconfig
arch/ia64/Makefile
arch/ia64/dig/machvec.c
arch/ia64/hp/common/sba_iommu.c
arch/ia64/hp/sim/hpsim_console.c
arch/ia64/hp/sim/hpsim_machvec.c
arch/ia64/hp/sim/simeth.c
arch/ia64/hp/sim/simserial.c
arch/ia64/hp/zx1/Makefile
arch/ia64/hp/zx1/hpzx1_machvec.c
arch/ia64/hp/zx1/hpzx1_misc.c [deleted file]
arch/ia64/ia32/ia32_entry.S
arch/ia64/ia32/ia32_ioctl.c
arch/ia64/ia32/ia32_traps.c
arch/ia64/ia32/sys_ia32.c
arch/ia64/kernel/Makefile
arch/ia64/kernel/acpi-ext.c
arch/ia64/kernel/acpi.c
arch/ia64/kernel/brl_emu.c
arch/ia64/kernel/efi.c
arch/ia64/kernel/efivars.c
arch/ia64/kernel/fw-emu.c
arch/ia64/kernel/head.S
arch/ia64/kernel/ia64_ksyms.c
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/irq.c
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/machvec.c
arch/ia64/kernel/mca.c
arch/ia64/kernel/mca_asm.S
arch/ia64/kernel/palinfo.c
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/perfmon_mckinley.h
arch/ia64/kernel/process.c
arch/ia64/kernel/ptrace.c
arch/ia64/kernel/sal.c
arch/ia64/kernel/salinfo.c
arch/ia64/kernel/setup.c
arch/ia64/kernel/signal.c
arch/ia64/kernel/smp.c
arch/ia64/kernel/smpboot.c
arch/ia64/kernel/time.c
arch/ia64/kernel/traps.c
arch/ia64/kernel/unaligned.c
arch/ia64/kernel/unwind.c
arch/ia64/lib/copy_user.S
arch/ia64/lib/do_csum.S
arch/ia64/lib/io.c
arch/ia64/lib/swiotlb.c
arch/ia64/mm/fault.c
arch/ia64/mm/init.c
arch/ia64/pci/pci.c
arch/ia64/sn/kernel/machvec.c
arch/ia64/tools/print_offsets.c
arch/parisc/kernel/module.c
arch/ppc/kernel/module.c
arch/s390/kernel/module.c
arch/sparc/kernel/module.c
arch/sparc64/kernel/module.c
arch/v850/kernel/module.c
arch/x86_64/kernel/module.c
arch/x86_64/kernel/reboot.c
drivers/acpi/osl.c
drivers/acpi/pci_irq.c
drivers/acpi/pci_root.c
drivers/char/agp/Kconfig
drivers/char/agp/backend.c
drivers/char/agp/hp-agp.c
drivers/char/agp/i460-agp.c
drivers/char/drm/drmP.h
drivers/char/drm/drm_bufs.h
drivers/char/drm/drm_drv.h
drivers/char/drm/drm_memory.h
drivers/char/drm/drm_memory_debug.h
drivers/char/drm/drm_vm.h
drivers/char/drm/gamma_dma.c
drivers/char/drm/i810_dma.c
drivers/char/drm/i830_dma.c
drivers/char/drm/mga_dma.c
drivers/char/drm/mga_drv.h
drivers/char/drm/r128_cce.c
drivers/char/drm/radeon_cp.c
drivers/char/hvc_console.c
drivers/char/mem.c
drivers/media/radio/Makefile
drivers/media/radio/dummy.c [new file with mode: 0644]
drivers/media/video/Makefile
drivers/media/video/dummy.c [new file with mode: 0644]
drivers/net/tulip/media.c
drivers/scsi/qla1280.c
drivers/scsi/scsi_ioctl.c
drivers/scsi/sym53c8xx_2/sym_glue.c
drivers/scsi/sym53c8xx_2/sym_malloc.c
drivers/serial/8250.c
drivers/serial/8250_acpi.c [new file with mode: 0644]
drivers/serial/8250_hcdp.c [new file with mode: 0644]
drivers/serial/8250_hcdp.h [new file with mode: 0644]
drivers/serial/Kconfig
drivers/serial/Makefile
drivers/serial/acpi.c [new file with mode: 0644]
fs/fcntl.c
fs/proc/base.c
fs/select.c
include/asm-alpha/agp.h
include/asm-alpha/hw_irq.h
include/asm-alpha/ptrace.h
include/asm-arm/ptrace.h
include/asm-cris/ptrace.h
include/asm-h8300/ptrace.h
include/asm-i386/agp.h
include/asm-i386/hw_irq.h
include/asm-i386/ptrace.h
include/asm-ia64/acpi-ext.h
include/asm-ia64/atomic.h
include/asm-ia64/bitops.h
include/asm-ia64/compat.h
include/asm-ia64/dma-mapping.h
include/asm-ia64/ia32.h
include/asm-ia64/intrinsics.h
include/asm-ia64/io.h
include/asm-ia64/iosapic.h
include/asm-ia64/machvec.h
include/asm-ia64/machvec_hpzx1.h
include/asm-ia64/machvec_init.h
include/asm-ia64/machvec_sn1.h
include/asm-ia64/machvec_sn2.h
include/asm-ia64/mca.h
include/asm-ia64/page.h
include/asm-ia64/pal.h
include/asm-ia64/pci.h
include/asm-ia64/percpu.h
include/asm-ia64/perfmon.h
include/asm-ia64/pgtable.h
include/asm-ia64/processor.h
include/asm-ia64/ptrace.h
include/asm-ia64/sal.h
include/asm-ia64/serial.h
include/asm-ia64/spinlock.h
include/asm-ia64/system.h
include/asm-ia64/uaccess.h
include/asm-ia64/unwind.h
include/asm-m68k/ptrace.h
include/asm-m68knommu/ptrace.h
include/asm-mips/hw_irq.h
include/asm-mips/ptrace.h
include/asm-mips64/hw_irq.h
include/asm-mips64/ptrace.h
include/asm-parisc/hw_irq.h
include/asm-parisc/ptrace.h
include/asm-ppc/hw_irq.h
include/asm-ppc/ptrace.h
include/asm-ppc64/hw_irq.h
include/asm-ppc64/ptrace.h
include/asm-s390/ptrace.h
include/asm-sh/hw_irq.h
include/asm-sh/ptrace.h
include/asm-sparc/ptrace.h
include/asm-sparc64/agp.h
include/asm-sparc64/ptrace.h
include/asm-um/hw_irq.h
include/asm-v850/hw_irq.h
include/asm-v850/ptrace.h
include/asm-x86_64/agp.h
include/asm-x86_64/hw_irq.h
include/asm-x86_64/ptrace.h
include/linux/acpi_serial.h
include/linux/efi.h
include/linux/elf.h
include/linux/highmem.h
include/linux/irq.h
include/linux/irq_cpustat.h
include/linux/jbd.h
include/linux/mm.h
include/linux/moduleloader.h
include/linux/nfs_fs.h
include/linux/pci_ids.h
include/linux/percpu.h
include/linux/serial.h
include/linux/smp.h
include/linux/sunrpc/svc.h
include/linux/sysctl.h
kernel/fork.c
kernel/ksyms.c
kernel/module.c
kernel/printk.c
kernel/softirq.c
kernel/sys.c
kernel/sysctl.c
kernel/time.c
kernel/timer.c
mm/memory.c
mm/mmap.c
sound/oss/cs4281/cs4281m.c
sound/oss/cs4281/cs4281pm-24.c
usr/Makefile

index e46eacc..e795bb6 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -182,7 +182,7 @@ AFLAGS_KERNEL       =
 NOSTDINC_FLAGS  = -nostdinc -iwithprefix include
 
 CPPFLAGS       := -D__KERNEL__ -Iinclude
-CFLAGS                 := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -O2 \
+CFLAGS                 := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -g -O2 \
                   -fno-strict-aliasing -fno-common
 AFLAGS         := -D__ASSEMBLY__ $(CPPFLAGS)
 
index f444a94..4259287 100644 (file)
@@ -300,3 +300,8 @@ module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
 {
        return 0;
 }
+
+void
+module_arch_cleanup(struct module *mod)
+{
+}
index c79822e..6d1f4a8 100644 (file)
@@ -159,3 +159,8 @@ module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
 {
        return 0;
 }
+
+void
+module_arch_cleanup(struct module *mod)
+{
+}
index 463a2c7..e8258ad 100644 (file)
@@ -123,3 +123,7 @@ int module_finalize(const Elf_Ehdr *hdr,
        }       
        return 0;
 }
+
+void module_arch_cleanup(struct module *mod)
+{
+}
index fb55bf5..a111092 100644 (file)
@@ -381,6 +381,10 @@ config HUGETLB_PAGE_SIZE_4GB
        depends on MCKINLEY
        bool "4GB"
 
+config HUGETLB_PAGE_SIZE_1GB
+       depends on MCKINLEY
+       bool "1GB"
+
 config HUGETLB_PAGE_SIZE_256MB
        bool "256MB"
 
index a5a4c6c..dd86e79 100644 (file)
@@ -23,6 +23,7 @@ cflags-y      := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \
 CFLAGS_KERNEL  := -mconstant-gp
 
 GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.')
+GCC_MINOR_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f2 -d'.')
 
 GAS_STATUS=$(shell arch/ia64/scripts/check-gas $(CC) $(OBJDUMP))
 
@@ -35,7 +36,14 @@ $(error Sorry, you need a newer version of the assember, one that is built from
 endif
 
 ifneq ($(GCC_VERSION),2)
-       cflags-y += -frename-registers --param max-inline-insns=5000
+       cflags-$(CONFIG_ITANIUM) += -frename-registers
+endif
+
+ifeq ($(GCC_VERSION),3)
+ ifeq ($(GCC_MINOR_VERSION),4)
+       cflags-$(CONFIG_ITANIUM) += -mtune=merced
+       cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
+ endif
 endif
 
 cflags-$(CONFIG_ITANIUM_BSTEP_SPECIFIC)        += -mb-step
@@ -48,14 +56,14 @@ libs-y                              += arch/ia64/lib/
 core-y                         += arch/ia64/kernel/ arch/ia64/mm/
 core-$(CONFIG_IA32_SUPPORT)    += arch/ia64/ia32/
 core-$(CONFIG_IA64_DIG)        += arch/ia64/dig/
-core-$(CONFIG_IA64_GENERIC)    += arch/ia64/dig/ arch/ia64/hp/common/ arch/ia64/hp/zx1/ \
-                                  arch/ia64/hp/sim/
+core-$(CONFIG_IA64_GENERIC)    += arch/ia64/dig/
 core-$(CONFIG_IA64_HP_ZX1)     += arch/ia64/dig/
 core-$(CONFIG_IA64_SGI_SN)     += arch/ia64/sn/
 
 drivers-$(CONFIG_PCI)          += arch/ia64/pci/
 drivers-$(CONFIG_IA64_HP_SIM)  += arch/ia64/hp/sim/
 drivers-$(CONFIG_IA64_HP_ZX1)  += arch/ia64/hp/common/ arch/ia64/hp/zx1/
+drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/
 
 boot := arch/ia64/boot
 tools := arch/ia64/tools
index 4d24527..0c55bda 100644 (file)
@@ -1,2 +1,3 @@
-#define MACHVEC_PLATFORM_NAME  dig
+#define MACHVEC_PLATFORM_NAME          dig
+#define MACHVEC_PLATFORM_HEADER                <asm/machvec_dig.h>
 #include <asm/machvec_init.h>
index f9d86c5..668ca81 100644 (file)
@@ -1,9 +1,9 @@
 /*
 **  IA64 System Bus Adapter (SBA) I/O MMU manager
 **
-**     (c) Copyright 2002 Alex Williamson
-**     (c) Copyright 2002 Grant Grundler
-**     (c) Copyright 2002 Hewlett-Packard Company
+**     (c) Copyright 2002-2003 Alex Williamson
+**     (c) Copyright 2002-2003 Grant Grundler
+**     (c) Copyright 2002-2003 Hewlett-Packard Company
 **
 **     Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
 **     Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
 #include <linux/string.h>
 #include <linux/pci.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/acpi.h>
 #include <linux/efi.h>
 
 #include <asm/delay.h>         /* ia64_get_itc() */
 #include <asm/io.h>
 #include <asm/page.h>          /* PAGE_OFFSET */
+#include <asm/dma.h>
+#include <asm/system.h>                /* wmb() */
 
+#include <asm/acpi-ext.h>
 
-#define DRIVER_NAME "SBA"
+#define PFX "IOC: "
 
+/*
+** This option allows cards capable of 64bit DMA to bypass the IOMMU.  If
+** not defined, all DMA will be 32bit and go through the TLB.
+*/
 #define ALLOW_IOV_BYPASS
+
+/*
+** If a device prefetches beyond the end of a valid pdir entry, it will cause
+** a hard failure, ie. MCA.  Version 3.0 and later of the zx1 LBA should
+** disconnect on 4k boundaries and prevent such issues.  If the device is
+** particularly agressive, this option will keep the entire pdir valid such
+** that prefetching will hit a valid address.  This could severely impact
+** error containment, and is therefore off by default.  The page that is
+** used for spill-over is poisoned, so that should help debugging somewhat.
+*/
+#undef FULL_VALID_PDIR
+
 #define ENABLE_MARK_CLEAN
+
 /*
 ** The number of debug flags is a clue - this code is fragile.
 */
 #undef DEBUG_LARGE_SG_ENTRIES
 #undef DEBUG_BYPASS
 
+#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
+#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
+#endif
+
 #define SBA_INLINE     __inline__
 /* #define SBA_INLINE */
 
 #define ASSERT(expr)
 #endif
 
-#define KB(x) ((x) * 1024)
-#define MB(x) (KB (KB (x)))
-#define GB(x) (MB (KB (x)))
-
 /*
-** The number of pdir entries to "free" before issueing
+** The number of pdir entries to "free" before issuing
 ** a read to PCOM register to flush out PCOM writes.
 ** Interacts with allocation granularity (ie 4 or 8 entries
 ** allocated and free'd/purged at a time might make this
 */
 #define DELAYED_RESOURCE_CNT   16
 
-#define DEFAULT_DMA_HINT_REG(d)        0
-
-#define ZX1_FUNC_ID_VALUE    ((PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP)
-#define ZX1_MC_ID    ((PCI_DEVICE_ID_HP_ZX1_MC << 16) | PCI_VENDOR_ID_HP)
+#define DEFAULT_DMA_HINT_REG   0
 
-#define SBA_FUNC_ID    0x0000  /* function id */
-#define SBA_FCLASS     0x0008  /* function class, bist, header, rev... */
+#define ZX1_IOC_ID     ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
+#define REO_IOC_ID     ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
+#define SX1000_IOC_ID  ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
 
-#define SBA_FUNC_SIZE  0x10000   /* SBA configuration function reg set */
-
-unsigned int __initdata zx1_func_offsets[] = {0x1000, 0x4000, 0x8000,
-                                              0x9000, 0xa000, -1};
-
-#define SBA_IOC_OFFSET 0x1000
-
-#define MAX_IOC                1       /* we only have 1 for now*/
+#define ZX1_IOC_OFFSET 0x1000  /* ACPI reports SBA, we want IOC */
 
+#define IOC_FUNC_ID    0x000
+#define IOC_FCLASS     0x008   /* function class, bist, header, rev... */
 #define IOC_IBASE      0x300   /* IO TLB */
 #define IOC_IMASK      0x308
 #define IOC_PCOM       0x310
 #define IOC_TCNFG      0x318
 #define IOC_PDIR_BASE  0x320
 
-#define IOC_IOVA_SPACE_BASE    0x40000000 /* IOVA ranges start at 1GB */
+/* AGP GART driver looks for this */
+#define ZX1_SBA_IOMMU_COOKIE   0x0000badbadc0ffeeUL
 
 /*
 ** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
@@ -152,7 +168,7 @@ unsigned int __initdata zx1_func_offsets[] = {0x1000, 0x4000, 0x8000,
 #define IOVP_MASK      PAGE_MASK
 
 struct ioc {
-       unsigned long   ioc_hpa;        /* I/O MMU base address */
+       void            *ioc_hpa;       /* I/O MMU base address */
        char            *res_map;       /* resource map, bit == pdir entry */
        u64             *pdir_base;     /* physical base address */
        unsigned long   ibase;          /* pdir IOV Space base */
@@ -193,37 +209,37 @@ struct ioc {
 #endif
 #endif
 
-       /* STUFF We don't need in performance path */
+       /* Stuff we don't need in performance path */
+       struct ioc      *next;          /* list of IOC's in system */
+       acpi_handle     handle;         /* for multiple IOC's */
+       const char      *name;
+       unsigned int    func_id;
+       unsigned int    rev;            /* HW revision of chip */
+       u32             iov_size;
        unsigned int    pdir_size;      /* in bytes, determined by IOV Space size */
+       struct pci_dev  *sac_only_dev;
 };
 
-struct sba_device {
-       struct sba_device       *next;  /* list of SBA's in system */
-       const char              *name;
-       unsigned long           sba_hpa; /* base address */
-       spinlock_t              sba_lock;
-       unsigned int            flags;  /* state/functionality enabled */
-       unsigned int            hw_rev;  /* HW revision of chip */
-
-       unsigned int            num_ioc;  /* number of on-board IOC's */
-       struct ioc              ioc[MAX_IOC];
-};
-
-
-static struct sba_device *sba_list;
-static int sba_count;
+static struct ioc *ioc_list;
 static int reserve_sba_gart = 1;
-static struct pci_dev sac_only_dev;
 
-#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
-#define sba_sg_len(sg) (sg->length)
-#define sba_sg_iova(sg) (sg->dma_address)
-#define sba_sg_iova_len(sg) (sg->dma_length)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
+#define sba_sg_address(sg)     (page_address((sg)->page) + (sg)->offset)
+#else
+#define sba_sg_address(sg)     ((sg)->address ? (sg)->address : \
+                                  page_address((sg)->page) + (sg)->offset)
+#endif
 
-/* REVISIT - fix me for multiple SBAs/IOCs */
-#define GET_IOC(dev) (sba_list->ioc)
-#define SBA_SET_AGP(sba_dev) (sba_dev->flags |= 0x1)
-#define SBA_GET_AGP(sba_dev) (sba_dev->flags & 0x1)
+#ifdef FULL_VALID_PDIR
+static u64 prefetch_spill_page;
+#endif
+
+#ifdef CONFIG_PCI
+# define GET_IOC(dev)  (((dev)->bus == &pci_bus_type)                                          \
+                        ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
+#else
+# define GET_IOC(dev)  NULL
+#endif
 
 /*
 ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
@@ -232,10 +248,7 @@ static struct pci_dev sac_only_dev;
 ** rather than the HW. I/O MMU allocation alogorithms can be
 ** faster with smaller size is (to some degree).
 */
-#define DMA_CHUNK_SIZE  (BITS_PER_LONG*IOVP_SIZE)
-
-/* Looks nice and keeps the compiler happy */
-#define SBA_DEV(d) ((struct sba_device *) (d))
+#define DMA_CHUNK_SIZE  (BITS_PER_LONG*PAGE_SIZE)
 
 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
 
@@ -255,7 +268,7 @@ static struct pci_dev sac_only_dev;
  * sba_dump_tlb - debugging only - print IOMMU operating parameters
  * @hpa: base address of the IOMMU
  *
- * Print the size/location of the IO MMU Pdir.
+ * Print the size/location of the IO MMU PDIR.
  */
 static void
 sba_dump_tlb(char *hpa)
@@ -273,19 +286,19 @@ sba_dump_tlb(char *hpa)
 #ifdef ASSERT_PDIR_SANITY
 
 /**
- * sba_dump_pdir_entry - debugging only - print one IOMMU Pdir entry
+ * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
  * @ioc: IO MMU structure which owns the pdir we are interested in.
  * @msg: text to print ont the output line.
  * @pide: pdir index.
  *
- * Print one entry of the IO MMU Pdir in human readable form.
+ * Print one entry of the IO MMU PDIR in human readable form.
  */
 static void
 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
 {
        /* start printing from lowest pde in rval */
-       u64 *ptr = &(ioc->pdir_base[pide  & ~(BITS_PER_LONG - 1)]);
-       unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
+       u64 *ptr = &ioc->pdir_base[pide  & ~(BITS_PER_LONG - 1)];
+       unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
        uint rcnt;
 
        printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
@@ -296,7 +309,7 @@ sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
                printk(KERN_DEBUG "%s %2d %p %016Lx\n",
                       (rcnt == (pide & (BITS_PER_LONG - 1)))
                       ? "    -->" : "       ",
-                      rcnt, ptr, *ptr );
+                      rcnt, ptr, (unsigned long long) *ptr );
                rcnt++;
                ptr++;
        }
@@ -359,17 +372,18 @@ sba_check_pdir(struct ioc *ioc, char *msg)
  * print the SG list so we can verify it's correct by hand.
  */
 static void
-sba_dump_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
+sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
 {
        while (nents-- > 0) {
                printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
-                      (unsigned long) sba_sg_iova(startsg), sba_sg_iova_len(startsg),
+                      startsg->dma_address, startsg->dma_length,
                       sba_sg_address(startsg));
                startsg++;
        }
 }
+
 static void
-sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
+sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
 {
        struct scatterlist *the_sg = startsg;
        int the_nents = nents;
@@ -398,9 +412,11 @@ sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
 #define PAGES_PER_RANGE 1      /* could increase this to 4 or 8 if needed */
 
 /* Convert from IOVP to IOVA and vice versa. */
-#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir)))
+#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) |         \
+                                           ((hint_reg)<<(ioc->hint_shift_pdir)))
 #define SBA_IOVP(ioc,iova) (((iova) & ioc->hint_mask_pdir) & ~(ioc->ibase))
 
+/* FIXME : review these macros to verify correctness and usage */
 #define PDIR_INDEX(iovp)   ((iovp)>>IOVP_SHIFT)
 
 #define RESMAP_MASK(n)    ~(~0UL << (n))
@@ -408,7 +424,7 @@ sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
 
 
 /**
- * sba_search_bitmap - find free space in IO Pdir resource bitmap
+ * sba_search_bitmap - find free space in IO PDIR resource bitmap
  * @ioc: IO MMU structure which owns the pdir we are interested in.
  * @bits_wanted: number of entries we need.
  *
@@ -445,7 +461,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
                ** We need the alignment to invalidate I/O TLB using
                ** SBA HW features in the unmap path.
                */
-               unsigned long o = 1UL << get_order(bits_wanted << IOVP_SHIFT);
+               unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
                uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
                unsigned long mask;
 
@@ -491,7 +507,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
 
 
 /**
- * sba_alloc_range - find free bits and mark them in IO Pdir resource bitmap
+ * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
  * @ioc: IO MMU structure which owns the pdir we are interested in.
  * @size: number of bytes to create a mapping for
  *
@@ -520,7 +536,8 @@ sba_alloc_range(struct ioc *ioc, size_t size)
        if (pide >= (ioc->res_size << 3)) {
                pide = sba_search_bitmap(ioc, pages_needed);
                if (pide >= (ioc->res_size << 3))
-                       panic(__FILE__ ": I/O MMU @ %lx is out of mapping resources\n", ioc->ioc_hpa);
+                       panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
+                             ioc->ioc_hpa);
        }
 
 #ifdef ASSERT_PDIR_SANITY
@@ -553,7 +570,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
 
 
 /**
- * sba_free_range - unmark bits in IO Pdir resource bitmap
+ * sba_free_range - unmark bits in IO PDIR resource bitmap
  * @ioc: IO MMU structure which owns the pdir we are interested in.
  * @iova: IO virtual address which was previously allocated.
  * @size: number of bytes to create a mapping for
@@ -600,14 +617,14 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
 
 
 /**
- * sba_io_pdir_entry - fill in one IO Pdir entry
- * @pdir_ptr:  pointer to IO Pdir entry
- * @phys_page: phys CPU address of page to map
+ * sba_io_pdir_entry - fill in one IO PDIR entry
+ * @pdir_ptr:  pointer to IO PDIR entry
+ * @vba: Virtual CPU address of buffer to map
  *
  * SBA Mapping Routine
  *
- * Given a physical address (phys_page, arg1) sba_io_pdir_entry()
- * loads the I/O Pdir entry pointed to by pdir_ptr (arg0).
+ * Given a virtual address (vba, arg1) sba_io_pdir_entry()
+ * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
  * Each IO Pdir entry consists of 8 bytes as shown below
  * (LSB == bit 0):
  *
@@ -619,12 +636,21 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
  *  V  == Valid Bit
  *  U  == Unused
  * PPN == Physical Page Number
+ *
+ * The physical address fields are filled with the results of virt_to_phys()
+ * on the vba.
  */
 
-#define SBA_VALID_MASK 0x80000000000000FFULL
-#define sba_io_pdir_entry(pdir_ptr, phys_page) *pdir_ptr = (phys_page | SBA_VALID_MASK)
-#define sba_io_page(pdir_ptr) (*pdir_ptr & ~SBA_VALID_MASK)
-
+#if 1
+#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL)   \
+                                                     | 0x8000000000000000ULL)
+#else
+void SBA_INLINE
+sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
+{
+       *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
+}
+#endif
 
 #ifdef ENABLE_MARK_CLEAN
 /**
@@ -640,7 +666,7 @@ mark_clean (void *addr, size_t size)
        pg_addr = PAGE_ALIGN((unsigned long) addr);
        end = (unsigned long) addr + size;
        while (pg_addr + PAGE_SIZE <= end) {
-               struct page *page = virt_to_page(pg_addr);
+               struct page *page = virt_to_page((void *)pg_addr);
                set_bit(PG_arch_1, &page->flags);
                pg_addr += PAGE_SIZE;
        }
@@ -648,12 +674,12 @@ mark_clean (void *addr, size_t size)
 #endif
 
 /**
- * sba_mark_invalid - invalidate one or more IO Pdir entries
+ * sba_mark_invalid - invalidate one or more IO PDIR entries
  * @ioc: IO MMU structure which owns the pdir we are interested in.
  * @iova:  IO Virtual Address mapped earlier
  * @byte_cnt:  number of bytes this mapping covers.
  *
- * Marking the IO Pdir entry(ies) as Invalid and invalidate
+ * Marking the IO PDIR entry(ies) as Invalid and invalidate
  * corresponding IO TLB entry. The PCOM (Purge Command Register)
  * is to purge stale entries in the IO TLB when unmapping entries.
  *
@@ -687,15 +713,24 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
 
                iovp |= IOVP_SHIFT;     /* set "size" field for PCOM */
 
+#ifndef FULL_VALID_PDIR
                /*
-               ** clear I/O Pdir entry "valid" bit
+               ** clear I/O PDIR entry "valid" bit
                ** Do NOT clear the rest - save it for debugging.
                ** We should only clear bits that have previously
                ** been enabled.
                */
-               ioc->pdir_base[off] &= ~SBA_VALID_MASK;
+               ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
+#else
+               /*
+               ** If we want to maintain the PDIR as valid, put in
+               ** the spill page so devices prefetching won't
+               ** cause a hard fail.
+               */
+               ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
+#endif
        } else {
-               u32 t = get_order(byte_cnt) + IOVP_SHIFT;
+               u32 t = get_order(byte_cnt) + PAGE_SHIFT;
 
                iovp |= t;
                ASSERT(t <= 31);   /* 2GB! Max value of "size" field */
@@ -703,14 +738,18 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
                do {
                        /* verify this pdir entry is enabled */
                        ASSERT(ioc->pdir_base[off]  >> 63);
+#ifndef FULL_VALID_PDIR
                        /* clear I/O Pdir entry "valid" bit first */
-                       ioc->pdir_base[off] &= ~SBA_VALID_MASK;
+                       ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
+#else
+                       ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
+#endif
                        off++;
                        byte_cnt -= IOVP_SIZE;
                } while (byte_cnt > 0);
        }
 
-       WRITE_REG(iovp, ioc->ioc_hpa+IOC_PCOM);
+       WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
 }
 
 /**
@@ -718,26 +757,23 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
  * @dev: instance of PCI owned by the driver that's asking.
  * @addr:  driver buffer to map.
  * @size:  number of bytes to map in driver buffer.
- * @direction:  R/W or both.
+ * @dir:  R/W or both.
  *
  * See Documentation/DMA-mapping.txt
  */
 dma_addr_t
-sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
+sba_map_single(struct device *dev, void *addr, size_t size, int dir)
 {
        struct ioc *ioc;
-       unsigned long flags; 
+       unsigned long flags;
        dma_addr_t iovp;
        dma_addr_t offset;
        u64 *pdir_start;
        int pide;
 #ifdef ALLOW_IOV_BYPASS
-       unsigned long phys_addr = virt_to_phys(addr);
+       unsigned long pci_addr = virt_to_phys(addr);
 #endif
 
-       if (!sba_list)
-               panic("sba_map_single: no SBA found!\n");
-
        ioc = GET_IOC(dev);
        ASSERT(ioc);
 
@@ -745,7 +781,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
        /*
        ** Check if the PCI device can DMA to ptr... if so, just return ptr
        */
-       if ((phys_addr & ~dev->dma_mask) == 0) {
+       if (dev && dev->dma_mask && (pci_addr & ~*dev->dma_mask) == 0) {
                /*
                ** Device is bit capable of DMA'ing to the buffer...
                ** just return the PCI address of ptr
@@ -756,8 +792,8 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
                spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
                DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
-                          dev->dma_mask, phys_addr);
-               return phys_addr;
+                          *dev->dma_mask, pci_addr);
+               return pci_addr;
        }
 #endif
 
@@ -790,8 +826,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
 
        while (size > 0) {
                ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
-
-               sba_io_pdir_entry(pdir_start, virt_to_phys(addr));
+               sba_io_pdir_entry(pdir_start, (unsigned long) addr);
 
                DBG_RUN("     pdir 0x%p %lx\n", pdir_start, *pdir_start);
 
@@ -799,12 +834,15 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
                size -= IOVP_SIZE;
                pdir_start++;
        }
+       /* force pdir update */
+       wmb();
+
        /* form complete address */
 #ifdef ASSERT_PDIR_SANITY
        sba_check_pdir(ioc,"Check after sba_map_single()");
 #endif
        spin_unlock_irqrestore(&ioc->res_lock, flags);
-       return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG(direction));
+       return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
 }
 
 /**
@@ -812,23 +850,19 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
  * @dev: instance of PCI owned by the driver that's asking.
  * @iova:  IOVA of driver buffer previously mapped.
  * @size:  number of bytes mapped in driver buffer.
- * @direction:  R/W or both.
+ * @dir:  R/W or both.
  *
  * See Documentation/DMA-mapping.txt
  */
-void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
-               int direction)
+void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
 {
        struct ioc *ioc;
 #if DELAYED_RESOURCE_CNT > 0
        struct sba_dma_pair *d;
 #endif
-       unsigned long flags; 
+       unsigned long flags;
        dma_addr_t offset;
 
-       if (!sba_list)
-               panic("sba_map_single: no SBA found!\n");
-
        ioc = GET_IOC(dev);
        ASSERT(ioc);
 
@@ -845,7 +879,7 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
                DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
 
 #ifdef ENABLE_MARK_CLEAN
-               if (direction == PCI_DMA_FROMDEVICE) {
+               if (dir == DMA_FROM_DEVICE) {
                        mark_clean(phys_to_virt(iova), size);
                }
 #endif
@@ -861,29 +895,6 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
        size += offset;
        size = ROUNDUP(size, IOVP_SIZE);
 
-#ifdef ENABLE_MARK_CLEAN
-       /*
-       ** Don't need to hold the spinlock while telling VM pages are "clean".
-       ** The pages are "busy" in the resource map until we mark them free.
-       ** But tell VM pages are clean *before* releasing the resource
-       ** in order to avoid race conditions.
-       */
-       if (direction == PCI_DMA_FROMDEVICE) {
-               u32 iovp = (u32) SBA_IOVP(ioc,iova);
-               unsigned int pide = PDIR_INDEX(iovp);
-               u64 *pdirp = &(ioc->pdir_base[pide]);
-               size_t byte_cnt = size;
-               void *addr;
-
-               do {
-                       addr = phys_to_virt(sba_io_page(pdirp));
-                       mark_clean(addr, min(byte_cnt, IOVP_SIZE));
-                       pdirp++;
-                       byte_cnt -= IOVP_SIZE;
-               } while (byte_cnt > 0);
-       }
-#endif
-
        spin_lock_irqsave(&ioc->res_lock, flags);
 #ifdef CONFIG_PROC_FS
        ioc->usingle_calls++;
@@ -909,7 +920,40 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
        sba_free_range(ioc, iova, size);
        READ_REG(ioc->ioc_hpa+IOC_PCOM);        /* flush purges */
 #endif /* DELAYED_RESOURCE_CNT == 0 */
+#ifdef ENABLE_MARK_CLEAN
+       if (dir == DMA_FROM_DEVICE) {
+               u32 iovp = (u32) SBA_IOVP(ioc,iova);
+               int off = PDIR_INDEX(iovp);
+               void *addr;
+
+               if (size <= IOVP_SIZE) {
+                       addr = phys_to_virt(ioc->pdir_base[off] &
+                                           ~0xE000000000000FFFULL);
+                       mark_clean(addr, size);
+               } else {
+                       size_t byte_cnt = size;
+
+                       do {
+                               addr = phys_to_virt(ioc->pdir_base[off] &
+                                                   ~0xE000000000000FFFULL);
+                               mark_clean(addr, min(byte_cnt, IOVP_SIZE));
+                               off++;
+                               byte_cnt -= IOVP_SIZE;
+
+                          } while (byte_cnt > 0);
+               }
+       }
+#endif
        spin_unlock_irqrestore(&ioc->res_lock, flags);
+
+       /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
+       ** For Astro based systems this isn't a big deal WRT performance.
+       ** As long as 2.4 kernels copyin/copyout data from/to userspace,
+       ** we don't need the syncdma. The issue here is I/O MMU cachelines
+       ** are *not* coherent in all cases.  May be hwrev dependent.
+       ** Need to investigate more.
+       asm volatile("syncdma");
+       */
 }
 
 
@@ -922,29 +966,25 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
  * See Documentation/DMA-mapping.txt
  */
 void *
-sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
+sba_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, int flags)
 {
-       void *ret;
-
-       if (!hwdev) {
-               /* only support PCI */
-               *dma_handle = 0;
-               return 0;
-       }
+       struct ioc *ioc;
+       void *addr;
 
-        ret = (void *) __get_free_pages(GFP_ATOMIC, get_order(size));
+        addr = (void *) __get_free_pages(flags, get_order(size));
+       if (!addr)
+               return NULL;
 
-       if (ret) {
-               memset(ret, 0, size);
-               /*
-                * REVISIT: if sba_map_single starts needing more
-                * than dma_mask from the device, this needs to be
-                * updated.
-                */
-               *dma_handle = sba_map_single(&sac_only_dev, ret, size, 0);
-       }
+       /*
+        * REVISIT: if sba_map_single starts needing more than dma_mask from the
+        * device, this needs to be updated.
+        */
+       ioc = GET_IOC(hwdev);
+       ASSERT(ioc);
+       *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
 
-       return ret;
+       memset(addr, 0, size);
+       return addr;
 }
 
 
@@ -957,117 +997,245 @@ sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
  *
  * See Documentation/DMA-mapping.txt
  */
-void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
-               dma_addr_t dma_handle)
+void sba_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
 {
        sba_unmap_single(hwdev, dma_handle, size, 0);
        free_pages((unsigned long) vaddr, get_order(size));
 }
 
 
+/*
+** Since 0 is a valid pdir_base index value, can't use that
+** to determine if a value is valid or not. Use a flag to indicate
+** the SG list entry contains a valid pdir index.
+*/
+#define PIDE_FLAG 0x1UL
+
 #ifdef DEBUG_LARGE_SG_ENTRIES
 int dump_run_sg = 0;
 #endif
 
-#define SG_ENT_VIRT_PAGE(sg) page_address((sg)->page)
-#define SG_ENT_PHYS_PAGE(SG) virt_to_phys(SG_ENT_VIRT_PAGE(SG))
+
+/**
+ * sba_fill_pdir - write allocated SG entries into IO PDIR
+ * @ioc: IO MMU structure which owns the pdir we are interested in.
+ * @startsg:  list of IOVA/size pairs
+ * @nents: number of entries in startsg list
+ *
+ * Take preprocessed SG list and write corresponding entries
+ * in the IO PDIR.
+ */
+
+static SBA_INLINE int
+sba_fill_pdir(
+       struct ioc *ioc,
+       struct scatterlist *startsg,
+       int nents)
+{
+       struct scatterlist *dma_sg = startsg;   /* pointer to current DMA */
+       int n_mappings = 0;
+       u64 *pdirp = 0;
+       unsigned long dma_offset = 0;
+
+       dma_sg--;
+       while (nents-- > 0) {
+               int     cnt = startsg->dma_length;
+               startsg->dma_length = 0;
+
+#ifdef DEBUG_LARGE_SG_ENTRIES
+               if (dump_run_sg)
+                       printk(" %2d : %08lx/%05x %p\n",
+                               nents, startsg->dma_address, cnt,
+                               sba_sg_address(startsg));
+#else
+               DBG_RUN_SG(" %d : %08lx/%05x %p\n",
+                               nents, startsg->dma_address, cnt,
+                               sba_sg_address(startsg));
+#endif
+               /*
+               ** Look for the start of a new DMA stream
+               */
+               if (startsg->dma_address & PIDE_FLAG) {
+                       u32 pide = startsg->dma_address & ~PIDE_FLAG;
+                       dma_offset = (unsigned long) pide & ~IOVP_MASK;
+                       startsg->dma_address = 0;
+                       dma_sg++;
+                       dma_sg->dma_address = pide | ioc->ibase;
+                       pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
+                       n_mappings++;
+               }
+
+               /*
+               ** Look for a VCONTIG chunk
+               */
+               if (cnt) {
+                       unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
+                       ASSERT(pdirp);
+
+                       /* Since multiple Vcontig blocks could make up
+                       ** one DMA stream, *add* cnt to dma_len.
+                       */
+                       dma_sg->dma_length += cnt;
+                       cnt += dma_offset;
+                       dma_offset=0;   /* only want offset on first chunk */
+                       cnt = ROUNDUP(cnt, IOVP_SIZE);
+#ifdef CONFIG_PROC_FS
+                       ioc->msg_pages += cnt >> IOVP_SHIFT;
+#endif
+                       do {
+                               sba_io_pdir_entry(pdirp, vaddr);
+                               vaddr += IOVP_SIZE;
+                               cnt -= IOVP_SIZE;
+                               pdirp++;
+                       } while (cnt > 0);
+               }
+               startsg++;
+       }
+       /* force pdir update */
+       wmb();
+
+#ifdef DEBUG_LARGE_SG_ENTRIES
+       dump_run_sg = 0;
+#endif
+       return(n_mappings);
+}
+
+
+/*
+** Two address ranges are DMA contiguous *iff* "end of prev" and
+** "start of next" are both on a page boundry.
+**
+** (shift left is a quick trick to mask off upper bits)
+*/
+#define DMA_CONTIG(__X, __Y) \
+       (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL)
 
 
 /**
  * sba_coalesce_chunks - preprocess the SG list
  * @ioc: IO MMU structure which owns the pdir we are interested in.
- * @startsg:  input=SG list    output=DMA addr/len pairs filled in
+ * @startsg:  list of IOVA/size pairs
  * @nents: number of entries in startsg list
- * @direction: R/W or both.
  *
- * Walk the SG list and determine where the breaks are in the DMA stream.
- * Allocate IO Pdir resources and fill them in separate loop.
- * Returns the number of DMA streams used for output IOVA list.
- * Note each DMA stream can consume multiple IO Pdir entries.
+ * First pass is to walk the SG list and determine where the breaks are
+ * in the DMA stream. Allocates PDIR entries but does not fill them.
+ * Returns the number of DMA chunks.
  *
- * Code is written assuming some coalescing is possible.
+ * Doing the fill seperate from the coalescing/allocation keeps the
+ * code simpler. Future enhancement could make one pass through
+ * the sglist do both.
  */
 static SBA_INLINE int
-sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg,
-       int nents, int direction)
+sba_coalesce_chunks( struct ioc *ioc,
+       struct scatterlist *startsg,
+       int nents)
 {
-       struct scatterlist *dma_sg = startsg;   /* return array */
+       struct scatterlist *vcontig_sg;    /* VCONTIG chunk head */
+       unsigned long vcontig_len;         /* len of VCONTIG chunk */
+       unsigned long vcontig_end;
+       struct scatterlist *dma_sg;        /* next DMA stream head */
+       unsigned long dma_offset, dma_len; /* start/len of DMA stream */
        int n_mappings = 0;
 
-       ASSERT(nents > 1);
-
-       do {
-               unsigned int dma_cnt = 1; /* number of pages in DMA stream */
-               unsigned int pide;      /* index into IO Pdir array */
-               u64 *pdirp;             /* pointer into IO Pdir array */
-               unsigned long dma_offset, dma_len; /* cumulative DMA stream */
+       while (nents > 0) {
+               unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
 
                /*
                ** Prepare for first/next DMA stream
                */
-               dma_len = sba_sg_len(startsg);
-               dma_offset = (unsigned long) sba_sg_address(startsg);
-               startsg++;
-               nents--;
+               dma_sg = vcontig_sg = startsg;
+               dma_len = vcontig_len = vcontig_end = startsg->length;
+               vcontig_end +=  vaddr;
+               dma_offset = vaddr & ~IOVP_MASK;
+
+               /* PARANOID: clear entries */
+               startsg->dma_address = startsg->dma_length = 0;
 
                /*
-               ** We want to know how many entries can be coalesced
-               ** before trying to allocate IO Pdir space.
-               ** IOVAs can then be allocated "naturally" aligned
-               ** to take advantage of the block IO TLB flush.
+               ** This loop terminates one iteration "early" since
+               ** it's always looking one "ahead".
                */
-               while (nents) {
-                       unsigned long end_offset = dma_offset + dma_len;
+               while (--nents > 0) {
+                       unsigned long vaddr;    /* tmp */
 
-                       /* prev entry must end on a page boundary */
-                       if (end_offset & IOVP_MASK)
-                               break;
+                       startsg++;
 
-                       /* next entry start on a page boundary? */
-                       if (startsg->offset)
-                               break;
+                       /* PARANOID */
+                       startsg->dma_address = startsg->dma_length = 0;
+
+                       /* catch brokenness in SCSI layer */
+                       ASSERT(startsg->length <= DMA_CHUNK_SIZE);
 
                        /*
-                       ** make sure current dma stream won't exceed
-                       ** DMA_CHUNK_SIZE if coalescing entries.
+                       ** First make sure current dma stream won't
+                       ** exceed DMA_CHUNK_SIZE if we coalesce the
+                       ** next entry.
                        */
-                       if (((end_offset + startsg->length + ~IOVP_MASK)
-                                                               & IOVP_MASK)
-                                       > DMA_CHUNK_SIZE)
+                       if (((dma_len + dma_offset + startsg->length + ~IOVP_MASK) & IOVP_MASK)
+                           > DMA_CHUNK_SIZE)
                                break;
 
-                       dma_len += sba_sg_len(startsg);
-                       startsg++;
-                       nents--;
-                       dma_cnt++;
-               }
+                       /*
+                       ** Then look for virtually contiguous blocks.
+                       **
+                       ** append the next transaction?
+                       */
+                       vaddr = (unsigned long) sba_sg_address(startsg);
+                       if  (vcontig_end == vaddr)
+                       {
+                               vcontig_len += startsg->length;
+                               vcontig_end += startsg->length;
+                               dma_len     += startsg->length;
+                               continue;
+                       }
 
-               ASSERT(dma_len <= DMA_CHUNK_SIZE);
+#ifdef DEBUG_LARGE_SG_ENTRIES
+                       dump_run_sg = (vcontig_len > IOVP_SIZE);
+#endif
 
-               /* allocate IO Pdir resource.
-               ** returns index into (u64) IO Pdir array.
-               ** IOVA is formed from this.
-               */
-               pide = sba_alloc_range(ioc, dma_cnt << IOVP_SHIFT);
-               pdirp = &(ioc->pdir_base[pide]);
+                       /*
+                       ** Not virtually contigous.
+                       ** Terminate prev chunk.
+                       ** Start a new chunk.
+                       **
+                       ** Once we start a new VCONTIG chunk, dma_offset
+                       ** can't change. And we need the offset from the first
+                       ** chunk - not the last one. Ergo Successive chunks
+                       ** must start on page boundaries and dove tail
+                       ** with it's predecessor.
+                       */
+                       vcontig_sg->dma_length = vcontig_len;
 
-               /* fill_pdir: write stream into IO Pdir */
-               while (dma_cnt--) {
-                       sba_io_pdir_entry(pdirp, SG_ENT_PHYS_PAGE(startsg));
-                       startsg++;
-                       pdirp++;
-               }
+                       vcontig_sg = startsg;
+                       vcontig_len = startsg->length;
 
-               /* "output" IOVA */
-               sba_sg_iova(dma_sg) = SBA_IOVA(ioc,
-                                       ((dma_addr_t) pide << IOVP_SHIFT),
-                                       dma_offset,
-                                       DEFAULT_DMA_HINT_REG(direction));
-               sba_sg_iova_len(dma_sg) = dma_len;
+                       /*
+                       ** 3) do the entries end/start on page boundaries?
+                       **    Don't update vcontig_end until we've checked.
+                       */
+                       if (DMA_CONTIG(vcontig_end, vaddr))
+                       {
+                               vcontig_end = vcontig_len + vaddr;
+                               dma_len += vcontig_len;
+                               continue;
+                       } else {
+                               break;
+                       }
+               }
 
-               dma_sg++;
+               /*
+               ** End of DMA Stream
+               ** Terminate last VCONTIG block.
+               ** Allocate space for DMA stream.
+               */
+               vcontig_sg->dma_length = vcontig_len;
+               dma_len = (dma_len + dma_offset + ~IOVP_MASK) & IOVP_MASK;
+               ASSERT(dma_len <= DMA_CHUNK_SIZE);
+               dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
+                       | (sba_alloc_range(ioc, dma_len) << IOVP_SHIFT)
+                       | dma_offset);
                n_mappings++;
-       } while (nents);
+       }
 
        return n_mappings;
 }
@@ -1075,60 +1243,51 @@ sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg,
 
 /**
  * sba_map_sg - map Scatter/Gather list
- * @dev: instance of PCI device owned by the driver that's asking.
+ * @dev: instance of PCI owned by the driver that's asking.
  * @sglist:  array of buffer/length pairs
  * @nents:  number of entries in list
- * @direction:  R/W or both.
+ * @dir:  R/W or both.
  *
  * See Documentation/DMA-mapping.txt
  */
-int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
-               int direction)
+int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir)
 {
        struct ioc *ioc;
-       int filled = 0;
+       int coalesced, filled = 0;
        unsigned long flags;
 #ifdef ALLOW_IOV_BYPASS
        struct scatterlist *sg;
 #endif
 
-       DBG_RUN_SG("%s() START %d entries, 0x%p,0x%x\n", __FUNCTION__, nents,
-               sba_sg_address(sglist), sba_sg_len(sglist));
-
-       if (!sba_list)
-               panic("sba_map_single: no SBA found!\n");
-
+       DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
        ioc = GET_IOC(dev);
        ASSERT(ioc);
 
 #ifdef ALLOW_IOV_BYPASS
-       if (dev->dma_mask >= ioc->dma_mask) {
-               for (sg = sglist ; filled < nents ; filled++, sg++) {
-                       sba_sg_iova(sg) = virt_to_phys(sba_sg_address(sg));
-                       sba_sg_iova_len(sg) = sba_sg_len(sg);
+       if (dev && dev->dma_mask && (ioc->dma_mask & ~*dev->dma_mask) == 0) {
+               for (sg = sglist ; filled < nents ; filled++, sg++){
+                       sg->dma_length = sg->length;
+                       sg->dma_address = virt_to_phys(sba_sg_address(sg));
                }
 #ifdef CONFIG_PROC_FS
                spin_lock_irqsave(&ioc->res_lock, flags);
                ioc->msg_bypass++;
                spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
-               DBG_RUN_SG("%s() DONE %d mappings bypassed\n", __FUNCTION__, filled);
                return filled;
        }
 #endif
        /* Fast path single entry scatterlists. */
        if (nents == 1) {
-               sba_sg_iova(sglist) = sba_map_single(dev,
-                                                    (void *) sba_sg_iova(sglist),
-                                                    sba_sg_len(sglist), direction);
-               sba_sg_iova_len(sglist) = sba_sg_len(sglist);
+               sglist->dma_length = sglist->length;
+               sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length,
+                                                    dir);
 #ifdef CONFIG_PROC_FS
                /*
                ** Should probably do some stats counting, but trying to
                ** be precise quickly starts wasting CPU time.
                */
 #endif
-               DBG_RUN_SG("%s() DONE 1 mapping\n", __FUNCTION__);
                return 1;
        }
 
@@ -1145,11 +1304,26 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
 #ifdef CONFIG_PROC_FS
        ioc->msg_calls++;
 #endif
+
        /*
-       ** coalesce and program the I/O Pdir
+       ** First coalesce the chunks and allocate I/O pdir space
+       **
+       ** If this is one DMA stream, we can properly map using the
+       ** correct virtual address associated with each DMA page.
+       ** w/o this association, we wouldn't have coherent DMA!
+       ** Access to the virtual address is what forces a two pass algorithm.
        */
-       filled = sba_coalesce_chunks(ioc, sglist, nents, direction);
+       coalesced = sba_coalesce_chunks(ioc, sglist, nents);
+
+       /*
+       ** Program the I/O Pdir
+       **
+       ** map the virtual addresses to the I/O Pdir
+       ** o dma_address will contain the pdir index
+       ** o dma_len will contain the number of bytes to map
+       ** o address contains the virtual address.
+       */
+       filled = sba_fill_pdir(ioc, sglist, nents);
 
 #ifdef ASSERT_PDIR_SANITY
        if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
@@ -1161,6 +1335,7 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
 
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 
+       ASSERT(coalesced == filled);
        DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
 
        return filled;
@@ -1172,23 +1347,19 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
  * @dev: instance of PCI owned by the driver that's asking.
  * @sglist:  array of buffer/length pairs
  * @nents:  number of entries in list
- * @direction:  R/W or both.
+ * @dir:  R/W or both.
  *
  * See Documentation/DMA-mapping.txt
  */
-void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
-               int direction)
+void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
 {
        struct ioc *ioc;
 #ifdef ASSERT_PDIR_SANITY
        unsigned long flags;
 #endif
 
-       DBG_RUN_SG("%s() START %d entries, 0x%p,0x%x\n",
-               __FUNCTION__, nents, sba_sg_address(sglist), sba_sg_len(sglist));
-
-       if (!sba_list)
-               panic("sba_map_single: no SBA found!\n");
+       DBG_RUN_SG("%s() START %d entries,  %p,%x\n",
+               __FUNCTION__, nents, sba_sg_address(sglist), sglist->length);
 
        ioc = GET_IOC(dev);
        ASSERT(ioc);
@@ -1203,10 +1374,9 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
 
-       while (sba_sg_len(sglist) && nents--) {
+       while (nents && sglist->dma_length) {
 
-               sba_unmap_single(dev, (dma_addr_t)sba_sg_iova(sglist),
-                                sba_sg_iova_len(sglist), direction);
+               sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
 #ifdef CONFIG_PROC_FS
                /*
                ** This leaves inconsistent data in the stats, but we can't
@@ -1214,9 +1384,11 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
                ** were coalesced to a single entry.  The stats are fun,
                ** but speed is more important.
                */
-               ioc->usg_pages += (((u64)sba_sg_iova(sglist) & ~IOVP_MASK) + sba_sg_len(sglist) + IOVP_SIZE - 1) >> IOVP_SHIFT;
+               ioc->usg_pages += ((sglist->dma_address & ~IOVP_MASK) + sglist->dma_length
+                                  + IOVP_SIZE - 1) >> PAGE_SHIFT;
 #endif
-               ++sglist;
+               sglist++;
+               nents--;
        }
 
        DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__,  nents);
@@ -1229,87 +1401,76 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
 
 }
 
-unsigned long
-sba_dma_address (struct scatterlist *sg)
-{
-       return ((unsigned long)sba_sg_iova(sg));
-}
-
-int
-sba_dma_supported (struct pci_dev *dev, u64 mask)
-{
-       return 1;
-}
-
 /**************************************************************
 *
 *   Initialization and claim
 *
 ***************************************************************/
 
-
-static void
-sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
+static void __init
+ioc_iova_init(struct ioc *ioc)
 {
-       u32 iova_space_size, iova_space_mask;
-       void * pdir_base;
-       int pdir_size, iov_order, tcnfg;
+       u32 iova_space_mask;
+       int iov_order, tcnfg;
+       int agp_found = 0;
+       struct pci_dev *device;
+#ifdef FULL_VALID_PDIR
+       unsigned long index;
+#endif
 
        /*
-       ** Firmware programs the maximum IOV space size into the imask reg
+       ** Firmware programs the base and size of a "safe IOVA space"
+       ** (one that doesn't overlap memory or LMMIO space) in the
+       ** IBASE and IMASK registers.
        */
-       iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
+       ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
+       ioc->iov_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
 
        /*
        ** iov_order is always based on a 1GB IOVA space since we want to
        ** turn on the other half for AGP GART.
        */
-       iov_order = get_order(iova_space_size >> (IOVP_SHIFT-PAGE_SHIFT));
-       ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
+       iov_order = get_order(ioc->iov_size >> (IOVP_SHIFT - PAGE_SHIFT));
+       ioc->pdir_size = (ioc->iov_size / IOVP_SIZE) * sizeof(u64);
 
-       DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits) PDIR size 0x%0x\n",
-               __FUNCTION__, ioc->ioc_hpa, iova_space_size>>20,
-               iov_order + PAGE_SHIFT, ioc->pdir_size);
+       DBG_INIT("%s() hpa %p IOV %dMB (%d bits) PDIR size 0x%x\n",
+                __FUNCTION__, ioc->ioc_hpa, ioc->iov_size >> 20,
+                iov_order + PAGE_SHIFT, ioc->pdir_size);
 
-       /* XXX DMA HINTs not used */
+       /* FIXME : DMA HINTs not used */
        ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
        ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
 
-       ioc->pdir_base = pdir_base =
-               (void *) __get_free_pages(GFP_KERNEL, get_order(pdir_size));
-       if (NULL == pdir_base)
-       {
-               panic(__FILE__ ":%s() could not allocate I/O Page Table\n", __FUNCTION__);
-       }
-       memset(pdir_base, 0, pdir_size);
+       ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
+                                                  get_order(ioc->pdir_size));
+       if (!ioc->pdir_base)
+               panic(PFX "Couldn't allocate I/O Page Table\n");
+
+       memset(ioc->pdir_base, 0, ioc->pdir_size);
 
        DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n",
-               __FUNCTION__, pdir_base, pdir_size,
+               __FUNCTION__, ioc->pdir_base, ioc->pdir_size,
                ioc->hint_shift_pdir, ioc->hint_mask_pdir);
 
-       ASSERT((((unsigned long) pdir_base) & PAGE_MASK) == (unsigned long) pdir_base);
-       WRITE_REG(virt_to_phys(pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
+       ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base);
+       WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
 
-       DBG_INIT(" base %p\n", pdir_base);
+       DBG_INIT(" base %p\n", ioc->pdir_base);
 
        /* build IMASK for IOC and Elroy */
        iova_space_mask =  0xffffffff;
-       iova_space_mask <<= (iov_order + IOVP_SHIFT);
-
-       ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & 0xFFFFFFFEUL;
-
-       ioc->imask = iova_space_mask;   /* save it */
+       iova_space_mask <<= (iov_order + PAGE_SHIFT);
+       ioc->imask = iova_space_mask;
 
        DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
                __FUNCTION__, ioc->ibase, ioc->imask);
 
        /*
-       ** XXX DMA HINT registers are programmed with default hint
+       ** FIXME: Hint registers are programmed with default hint
        ** values during boot, so hints should be sane even if we
        ** can't reprogram them the way drivers want.
        */
-
-       WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
+       WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
 
        /*
        ** Setting the upper bits makes checking for bypass addresses
@@ -1317,34 +1478,30 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
        */
        ioc->imask |= 0xFFFFFFFF00000000UL;
 
-       /* Set I/O Pdir page size to system page size */
-       switch (IOVP_SHIFT) {
-               case 12: /* 4K */
-                       tcnfg = 0;
-                       break;
-               case 13: /* 8K */
-                       tcnfg = 1;
-                       break;
-               case 14: /* 16K */
-                       tcnfg = 2;
-                       break;
-               case 16: /* 64K */
-                       tcnfg = 3;
+       /* Set I/O PDIR Page size to system page size */
+       switch (PAGE_SHIFT) {
+               case 12: tcnfg = 0; break;      /*  4K */
+               case 13: tcnfg = 1; break;      /*  8K */
+               case 14: tcnfg = 2; break;      /* 16K */
+               case 16: tcnfg = 3; break;      /* 64K */
+               default:
+                       panic(PFX "Unsupported system page size %d",
+                               1 << PAGE_SHIFT);
                        break;
        }
-       WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
+       WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
 
        /*
        ** Program the IOC's ibase and enable IOVA translation
        ** Bit zero == enable bit.
        */
-       WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
+       WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
 
        /*
        ** Clear I/O TLB of any possible entries.
        ** (Yes. This is a bit paranoid...but so what)
        */
-       WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
+       WRITE_REG(ioc->ibase | (iov_order+PAGE_SHIFT), ioc->ioc_hpa + IOC_PCOM);
 
        /*
        ** If an AGP device is present, only use half of the IOV space
@@ -1354,346 +1511,468 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
        ** We program the next pdir index after we stop w/ a key for
        ** the GART code to handshake on.
        */
-       if (SBA_GET_AGP(sba_dev)) {
-               DBG_INIT("%s() AGP Device found, reserving 512MB for GART support\n", __FUNCTION__);
+       pci_for_each_dev(device)
+               agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
+
+       if (agp_found && reserve_sba_gart) {
+               DBG_INIT("%s: AGP device found, reserving half of IOVA for GART support\n",
+                        __FUNCTION__);
                ioc->pdir_size /= 2;
-               ((u64 *)pdir_base)[PDIR_INDEX(iova_space_size/2)] = 0x0000badbadc0ffeeULL;
+               ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
        }
+#ifdef FULL_VALID_PDIR
+       /*
+       ** Check to see if the spill page has been allocated, we don't need more than
+       ** one across multiple SBAs.
+       */
+       if (!prefetch_spill_page) {
+               char *spill_poison = "SBAIOMMU POISON";
+               int poison_size = 16;
+               void *poison_addr, *addr;
+
+               addr = (void *)__get_free_pages(GFP_KERNEL, get_order(IOVP_SIZE));
+               if (!addr)
+                       panic(PFX "Couldn't allocate PDIR spill page\n");
+
+               poison_addr = addr;
+               for ( ; (u64) poison_addr < addr + IOVP_SIZE; poison_addr += poison_size)
+                       memcpy(poison_addr, spill_poison, poison_size);
+
+               prefetch_spill_page = virt_to_phys(addr);
+
+               DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);
+       }
+       /*
+       ** Set all the PDIR entries valid w/ the spill page as the target
+       */
+       for (index = 0 ; index < (ioc->pdir_size / sizeof(u64)) ; index++)
+               ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
+#endif
 
-       DBG_INIT("%s() DONE\n", __FUNCTION__);
 }
 
+static void __init
+ioc_resource_init(struct ioc *ioc)
+{
+       spin_lock_init(&ioc->res_lock);
 
+       /* resource map size dictated by pdir_size */
+       ioc->res_size = ioc->pdir_size / sizeof(u64); /* entries */
+       ioc->res_size >>= 3;  /* convert bit count to byte count */
+       DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
 
-/**************************************************************************
-**
-**   SBA initialization code (HW and SW)
-**
-**   o identify SBA chip itself
-**   o FIXME: initialize DMA hints for reasonable defaults
-**
-**************************************************************************/
+       ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
+                                                get_order(ioc->res_size));
+       if (!ioc->res_map)
+               panic(PFX "Couldn't allocate resource map\n");
 
-static void
-sba_hw_init(struct sba_device *sba_dev)
-{ 
-       int i;
-       int num_ioc;
-       u64 dma_mask;
-       u32 func_id;
+       memset(ioc->res_map, 0, ioc->res_size);
+       /* next available IOVP - circular search */
+       ioc->res_hint = (unsigned long *) ioc->res_map;
 
-       /*
-       ** Identify the SBA so we can set the dma_mask.  We can make a virtual
-       ** dma_mask of the memory subsystem such that devices not implmenting
-       ** a full 64bit mask might still be able to bypass efficiently.
-       */
-       func_id = READ_REG(sba_dev->sba_hpa + SBA_FUNC_ID);
+#ifdef ASSERT_PDIR_SANITY
+       /* Mark first bit busy - ie no IOVA 0 */
+       ioc->res_map[0] = 0x1;
+       ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
+#endif
+#ifdef FULL_VALID_PDIR
+       /* Mark the last resource used so we don't prefetch beyond IOVA space */
+       ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
+       ioc->pdir_base[(ioc->pdir_size / sizeof(u64)) - 1] = (0x80000000000000FF
+                                                             | prefetch_spill_page);
+#endif
 
-       if (func_id == ZX1_FUNC_ID_VALUE) {
-               dma_mask = 0xFFFFFFFFFFUL;
-       } else {
-               dma_mask = 0xFFFFFFFFFFFFFFFFUL;
-       }
+       DBG_INIT("%s() res_map %x %p\n", __FUNCTION__,
+                ioc->res_size, (void *) ioc->res_map);
+}
+
+static void __init
+ioc_sac_init(struct ioc *ioc)
+{
+       struct pci_dev *sac = NULL;
+       struct pci_controller *controller = NULL;
 
-       DBG_INIT("%s(): ioc->dma_mask == 0x%lx\n", __FUNCTION__, dma_mask);
-       
        /*
-       ** Leaving in the multiple ioc code from parisc for the future,
-       ** currently there are no muli-ioc mckinley sbas
-       */
-       sba_dev->ioc[0].ioc_hpa = SBA_IOC_OFFSET;
-       num_ioc = 1;
-
-       sba_dev->num_ioc = num_ioc;
-       for (i = 0; i < num_ioc; i++) {
-               sba_dev->ioc[i].dma_mask = dma_mask;
-               sba_dev->ioc[i].ioc_hpa += sba_dev->sba_hpa;
-               sba_ioc_init(sba_dev, &(sba_dev->ioc[i]), i);
-       }
+        * pci_alloc_coherent() must return a DMA address which is
+        * SAC (single address cycle) addressable, so allocate a
+        * pseudo-device to enforce that.
+        */
+       sac = kmalloc(sizeof(*sac), GFP_KERNEL);
+       if (!sac)
+               panic(PFX "Couldn't allocate struct pci_dev");
+       memset(sac, 0, sizeof(*sac));
+
+       controller = kmalloc(sizeof(*controller), GFP_KERNEL);
+       if (!controller)
+               panic(PFX "Couldn't allocate struct pci_controller");
+       memset(controller, 0, sizeof(*controller));
+
+       controller->iommu = ioc;
+       sac->sysdata = controller;
+       sac->dma_mask = 0xFFFFFFFFUL;
+#ifdef CONFIG_PCI
+       sac->dev.bus = &pci_bus_type;
+#endif
+       ioc->sac_only_dev = sac;
 }
 
-static void
-sba_common_init(struct sba_device *sba_dev)
+static void __init
+ioc_zx1_init(struct ioc *ioc)
 {
-       int i;
+       if (ioc->rev < 0x20)
+               panic(PFX "IOC 2.0 or later required for IOMMU support\n");
 
-       /* add this one to the head of the list (order doesn't matter)
-       ** This will be useful for debugging - especially if we get coredumps
-       */
-       sba_dev->next = sba_list;
-       sba_list = sba_dev;
-       sba_count++;
+       ioc->dma_mask = 0xFFFFFFFFFFUL;
+}
 
-       for(i=0; i< sba_dev->num_ioc; i++) {
-               int res_size;
+typedef void (initfunc)(struct ioc *);
 
-               /* resource map size dictated by pdir_size */
-               res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */
-               res_size >>= 3;  /* convert bit count to byte count */
-               DBG_INIT("%s() res_size 0x%x\n",
-                       __FUNCTION__, res_size);
+struct ioc_iommu {
+       u32 func_id;
+       char *name;
+       initfunc *init;
+};
 
-               sba_dev->ioc[i].res_size = res_size;
-               sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
+static struct ioc_iommu ioc_iommu_info[] __initdata = {
+       { ZX1_IOC_ID, "zx1", ioc_zx1_init },
+       { REO_IOC_ID, "REO" },
+       { SX1000_IOC_ID, "sx1000" },
+};
 
-               if (NULL == sba_dev->ioc[i].res_map)
-               {
-                       panic(__FILE__ ":%s() could not allocate resource map\n", __FUNCTION__ );
-               }
+static struct ioc * __init
+ioc_init(u64 hpa, void *handle)
+{
+       struct ioc *ioc;
+       struct ioc_iommu *info;
 
-               memset(sba_dev->ioc[i].res_map, 0, res_size);
-               /* next available IOVP - circular search */
-               if ((sba_dev->hw_rev & 0xFF) >= 0x20) {
-                       sba_dev->ioc[i].res_hint = (unsigned long *)
-                           sba_dev->ioc[i].res_map;
-               } else {
-                       u64 reserved_iov;
+       ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
+       if (!ioc)
+               return NULL;
 
-                       /* Yet another 1.x hack */
-                       printk(KERN_DEBUG "zx1 1.x: Starting resource hint offset into "
-                              "IOV space to avoid initial zero value IOVA\n");
-                       sba_dev->ioc[i].res_hint = (unsigned long *)
-                           &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
+       memset(ioc, 0, sizeof(*ioc));
 
-                       sba_dev->ioc[i].res_map[0] = 0x1;
-                       sba_dev->ioc[i].pdir_base[0] = 0x8000badbadc0ffeeULL;
+       ioc->next = ioc_list;
+       ioc_list = ioc;
 
-                       for (reserved_iov = 0xA0000 ; reserved_iov < 0xC0000 ; reserved_iov += IOVP_SIZE) {
-                               u64 *res_ptr = (u64 *) sba_dev->ioc[i].res_map;
-                               int index = PDIR_INDEX(reserved_iov);
-                               int res_word;
-                               u64 mask;
+       ioc->handle = handle;
+       ioc->ioc_hpa = ioremap(hpa, 0x1000);
 
-                               res_word = (int)(index / BITS_PER_LONG);
-                               mask =  0x1UL << (index - (res_word * BITS_PER_LONG));
-                               res_ptr[res_word] |= mask;
-                               sba_dev->ioc[i].pdir_base[PDIR_INDEX(reserved_iov)] = (SBA_VALID_MASK | reserved_iov);
+       ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
+       ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
+       ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL;   /* conservative */
 
-                       }
+       for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
+               if (ioc->func_id == info->func_id) {
+                       ioc->name = info->name;
+                       if (info->init)
+                               (info->init)(ioc);
                }
+       }
 
-#ifdef ASSERT_PDIR_SANITY
-               /* Mark first bit busy - ie no IOVA 0 */
-               sba_dev->ioc[i].res_map[0] = 0x1;
-               sba_dev->ioc[i].pdir_base[0] = 0x8000badbadc0ffeeULL;
-#endif
-
-               DBG_INIT("%s() %d res_map %x %p\n", __FUNCTION__,
-                        i, res_size, (void *)sba_dev->ioc[i].res_map);
+       if (!ioc->name) {
+               ioc->name = kmalloc(24, GFP_KERNEL);
+               if (ioc->name)
+                       sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
+                               ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
+               else
+                       ioc->name = "Unknown";
        }
 
-       sba_dev->sba_lock = SPIN_LOCK_UNLOCKED;
+       ioc_iova_init(ioc);
+       ioc_resource_init(ioc);
+       ioc_sac_init(ioc);
+
+       printk(KERN_INFO PFX
+               "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
+               ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
+               hpa, ioc->iov_size >> 20, ioc->ibase);
+
+       return ioc;
 }
 
+
+
+/**************************************************************************
+**
+**   SBA initialization code (HW and SW)
+**
+**   o identify SBA chip itself
+**   o FIXME: initialize DMA hints for reasonable defaults
+**
+**************************************************************************/
+
 #ifdef CONFIG_PROC_FS
-static int sba_proc_info(char *buf, char **start, off_t offset, int len)
+static void *
+ioc_start(struct seq_file *s, loff_t *pos)
 {
-       struct sba_device *sba_dev;
        struct ioc *ioc;
-       int total_pages;
-       unsigned long i = 0, avg = 0, min, max;
+       loff_t n = *pos;
 
-       for (sba_dev = sba_list; sba_dev; sba_dev = sba_dev->next) {
-               ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
-               total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
+       for (ioc = ioc_list; ioc; ioc = ioc->next)
+               if (!n--)
+                       return ioc;
 
-               sprintf(buf, "%s rev %d.%d\n", "Hewlett-Packard zx1 SBA",
-                       ((sba_dev->hw_rev >> 4) & 0xF), (sba_dev->hw_rev & 0xF));
-               sprintf(buf, "%sIO PDIR size    : %d bytes (%d entries)\n", buf,
-                       (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ total_pages);
+       return NULL;
+}
 
-               sprintf(buf, "%sIO PDIR entries : %ld free  %ld used (%d%%)\n", buf,
-                       total_pages - ioc->used_pages, ioc->used_pages,
-                       (int) (ioc->used_pages * 100 / total_pages));
+static void *
+ioc_next(struct seq_file *s, void *v, loff_t *pos)
+{
+       struct ioc *ioc = v;
 
-               sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n", 
-                       buf, ioc->res_size, ioc->res_size << 3);   /* 8 bits per byte */
+       ++*pos;
+       return ioc->next;
+}
 
-               min = max = ioc->avg_search[0];
-               for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
-                       avg += ioc->avg_search[i];
-                       if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
-                       if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
-               }
-               avg /= SBA_SEARCH_SAMPLE;
-               sprintf(buf, "%s  Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
-                       buf, min, avg, max);
+static void
+ioc_stop(struct seq_file *s, void *v)
+{
+}
 
-               sprintf(buf, "%spci_map_single(): %12ld calls  %12ld pages (avg %d/1000)\n",
-                       buf, ioc->msingle_calls, ioc->msingle_pages,
-                       (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
+static int
+ioc_show(struct seq_file *s, void *v)
+{
+       struct ioc *ioc = v;
+       int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
+       unsigned long i = 0, avg = 0, min, max;
+
+       seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
+               ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
+       seq_printf(s, "IO PDIR size    : %d bytes (%d entries)\n",
+               (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
+               total_pages);
+
+       seq_printf(s, "IO PDIR entries : %ld free  %ld used (%d%%)\n",
+               total_pages - ioc->used_pages, ioc->used_pages,
+               (int) (ioc->used_pages * 100 / total_pages));
+
+       seq_printf(s, "Resource bitmap : %d bytes (%d pages)\n",
+               ioc->res_size, ioc->res_size << 3);   /* 8 bits per byte */
+
+       min = max = ioc->avg_search[0];
+       for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
+               avg += ioc->avg_search[i];
+               if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
+               if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
+       }
+       avg /= SBA_SEARCH_SAMPLE;
+       seq_printf(s, "  Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", min, avg, max);
+
+       seq_printf(s, "pci_map_single(): %12ld calls  %12ld pages (avg %d/1000)\n",
+                  ioc->msingle_calls, ioc->msingle_pages,
+                  (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
 #ifdef ALLOW_IOV_BYPASS
-               sprintf(buf, "%spci_map_single(): %12ld bypasses\n",
-                       buf, ioc->msingle_bypass);
+       seq_printf(s, "pci_map_single(): %12ld bypasses\n", ioc->msingle_bypass);
 #endif
 
-               sprintf(buf, "%spci_unmap_single: %12ld calls  %12ld pages (avg %d/1000)\n",
-                       buf, ioc->usingle_calls, ioc->usingle_pages,
-                       (int) ((ioc->usingle_pages * 1000)/ioc->usingle_calls));
+       seq_printf(s, "pci_unmap_single: %12ld calls  %12ld pages (avg %d/1000)\n",
+                  ioc->usingle_calls, ioc->usingle_pages,
+                  (int) ((ioc->usingle_pages * 1000)/ioc->usingle_calls));
 #ifdef ALLOW_IOV_BYPASS
-               sprintf(buf, "%spci_unmap_single: %12ld bypasses\n",
-                       buf, ioc->usingle_bypass);
+       seq_printf(s, "pci_unmap_single: %12ld bypasses\n", ioc->usingle_bypass);
 #endif
 
-               sprintf(buf, "%spci_map_sg()    : %12ld calls  %12ld pages (avg %d/1000)\n",
-                       buf, ioc->msg_calls, ioc->msg_pages,
-                       (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
+       seq_printf(s, "pci_map_sg()    : %12ld calls  %12ld pages (avg %d/1000)\n",
+                  ioc->msg_calls, ioc->msg_pages,
+                  (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
 #ifdef ALLOW_IOV_BYPASS
-               sprintf(buf, "%spci_map_sg()    : %12ld bypasses\n",
-                       buf, ioc->msg_bypass);
+       seq_printf(s, "pci_map_sg()    : %12ld bypasses\n", ioc->msg_bypass);
 #endif
 
-               sprintf(buf, "%spci_unmap_sg()  : %12ld calls  %12ld pages (avg %d/1000)\n",
-                       buf, ioc->usg_calls, ioc->usg_pages,
-                       (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
-       }
-       return strlen(buf);
+       seq_printf(s, "pci_unmap_sg()  : %12ld calls  %12ld pages (avg %d/1000)\n",
+                  ioc->usg_calls, ioc->usg_pages, (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
+
+       return 0;
 }
 
+static struct seq_operations ioc_seq_ops = {
+       .start = ioc_start,
+       .next  = ioc_next,
+       .stop  = ioc_stop,
+       .show  = ioc_show
+};
+
 static int
-sba_resource_map(char *buf, char **start, off_t offset, int len)
+ioc_open(struct inode *inode, struct file *file)
 {
-       struct ioc *ioc = sba_list->ioc;        /* FIXME: Multi-IOC support! */
-       unsigned int *res_ptr;
-       int i;
+       return seq_open(file, &ioc_seq_ops);
+}
 
-       if (!ioc)
-               return 0;
-
-       res_ptr = (unsigned int *)ioc->res_map;
-       buf[0] = '\0';
-       for(i = 0; i < (ioc->res_size / sizeof(unsigned int)); ++i, ++res_ptr) {
-               if ((i & 7) == 0)
-                   strcat(buf,"\n   ");
-               sprintf(buf, "%s %08x", buf, *res_ptr);
-       }
-       strcat(buf, "\n");
+static struct file_operations ioc_fops = {
+       .open    = ioc_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release
+};
+
+static int
+ioc_map_show(struct seq_file *s, void *v)
+{
+       struct ioc *ioc = v;
+       unsigned int i, *res_ptr = (unsigned int *)ioc->res_map;
+
+       for (i = 0; i < ioc->res_size / sizeof(unsigned int); ++i, ++res_ptr)
+               seq_printf(s, "%s%08x", (i & 7) ? " " : "\n   ", *res_ptr);
+       seq_printf(s, "\n");
 
-       return strlen(buf);
+       return 0;
 }
-#endif
 
-/*
-** Determine if sba should claim this chip (return 0) or not (return 1).
-** If so, initialize the chip and tell other partners in crime they
-** have work to do.
-*/
-void __init sba_init(void)
-{
-       struct sba_device *sba_dev;
-       u32 func_id, hw_rev;
-       u32 *func_offset = NULL;
-       int i, agp_found = 0;
-       static char sba_rev[6];
-       struct pci_dev *device = NULL;
-       u64 hpa = 0;
-
-       if (!(device = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_SBA, NULL)))
-               return;
+static struct seq_operations ioc_map_ops = {
+       .start = ioc_start,
+       .next  = ioc_next,
+       .stop  = ioc_stop,
+       .show  = ioc_map_show
+};
 
-       for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-               if (pci_resource_flags(device, i) == IORESOURCE_MEM) {
-                       hpa = (u64) ioremap(pci_resource_start(device, i),
-                                           pci_resource_len(device, i));
-                       break;
-               }
-       }
+static int
+ioc_map_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &ioc_map_ops);
+}
 
-       func_id = READ_REG(hpa + SBA_FUNC_ID);
-       if (func_id != ZX1_FUNC_ID_VALUE)
-               return;
+static struct file_operations ioc_map_fops = {
+       .open    = ioc_map_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release
+};
 
-       strcpy(sba_rev, "zx1");
-       func_offset = zx1_func_offsets;
+static void __init
+ioc_proc_init(void)
+{
+       if (ioc_list) {
+               struct proc_dir_entry *dir, *entry;
 
-       /* Read HW Rev First */
-       hw_rev = READ_REG(hpa + SBA_FCLASS) & 0xFFUL;
+               dir = proc_mkdir("bus/mckinley", 0);
+               entry = create_proc_entry(ioc_list->name, 0, dir);
+               if (entry)
+                       entry->proc_fops = &ioc_fops;
 
-       /*
-        * Not all revision registers of the chipset are updated on every
-        * turn.  Must scan through all functions looking for the highest rev
-        */
-       if (func_offset) {
-               for (i = 0 ; func_offset[i] != -1 ; i++) {
-                       u32 func_rev;
-
-                       func_rev = READ_REG(hpa + SBA_FCLASS + func_offset[i]) & 0xFFUL;
-                       DBG_INIT("%s() func offset: 0x%x rev: 0x%x\n",
-                                __FUNCTION__, func_offset[i], func_rev);
-                       if (func_rev > hw_rev)
-                               hw_rev = func_rev;
-               }
+               entry = create_proc_entry("bitmap", 0, dir);
+               if (entry)
+                       entry->proc_fops = &ioc_map_fops;
        }
+}
+#endif
 
-       printk(KERN_INFO "%s found %s %d.%d at %s, HPA 0x%lx\n", DRIVER_NAME,
-              sba_rev, ((hw_rev >> 4) & 0xF), (hw_rev & 0xF),
-              device->slot_name, hpa);
+void
+sba_connect_bus(struct pci_bus *bus)
+{
+       acpi_handle handle, parent;
+       acpi_status status;
+       struct ioc *ioc;
+
+       if (!PCI_CONTROLLER(bus))
+               panic(PFX "no sysdata on bus %d!\n",bus->number);
 
-       if ((hw_rev & 0xFF) < 0x20) {
-               printk(KERN_INFO "%s: SBA rev less than 2.0 not supported", DRIVER_NAME);
+       if (PCI_CONTROLLER(bus)->iommu)
                return;
-       }
 
-       sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL);
-       if (NULL == sba_dev) {
-               printk(KERN_ERR DRIVER_NAME " - couldn't alloc sba_device\n");
+       handle = PCI_CONTROLLER(bus)->acpi_handle;
+       if (!handle)
                return;
-       }
 
-       memset(sba_dev, 0, sizeof(struct sba_device));
+       /*
+        * The IOC scope encloses PCI root bridges in the ACPI
+        * namespace, so work our way out until we find an IOC we
+        * claimed previously.
+        */
+       do {
+               for (ioc = ioc_list; ioc; ioc = ioc->next)
+                       if (ioc->handle == handle) {
+                               PCI_CONTROLLER(bus)->iommu = ioc;
+                               return;
+                       }
 
-       for(i=0; i<MAX_IOC; i++)
-               spin_lock_init(&(sba_dev->ioc[i].res_lock));
+               status = acpi_get_parent(handle, &parent);
+               handle = parent;
+       } while (ACPI_SUCCESS(status));
 
-       sba_dev->hw_rev = hw_rev;
-       sba_dev->sba_hpa = hpa;
+       printk(KERN_WARNING "No IOC for PCI Bus %02x:%02x in ACPI\n", PCI_SEGMENT(bus), bus->number);
+}
 
-       /*
-        * We pass this fake device from alloc_consistent to ensure
-        * we only use SAC for alloc_consistent mappings.
-        */
-       sac_only_dev.dma_mask = 0xFFFFFFFFUL;
+static int __init
+acpi_sba_ioc_add(struct acpi_device *device)
+{
+       struct ioc *ioc;
+       acpi_status status;
+       u64 hpa, length;
+       struct acpi_device_info dev_info;
+
+       status = hp_acpi_csr_space(device->handle, &hpa, &length);
+       if (ACPI_FAILURE(status))
+               return 1;
+
+       status = acpi_get_object_info(device->handle, &dev_info);
+       if (ACPI_FAILURE(status))
+               return 1;
 
        /*
-        * We need to check for an AGP device, if we find one, then only
-        * use part of the IOVA space for PCI DMA, the rest is for GART.
-        * REVISIT for multiple IOC.
+        * For HWP0001, only SBA appears in ACPI namespace.  It encloses the PCI
+        * root bridges, and its CSR space includes the IOC function.
         */
-       pci_for_each_dev(device)
-               agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
+       if (strncmp("HWP0001", dev_info.hardware_id, 7) == 0)
+               hpa += ZX1_IOC_OFFSET;
 
-       if (agp_found && reserve_sba_gart)
-               SBA_SET_AGP(sba_dev);
+       ioc = ioc_init(hpa, device->handle);
+       if (!ioc)
+               return 1;
 
-       sba_hw_init(sba_dev);
-       sba_common_init(sba_dev);
+       return 0;
+}
 
-#ifdef CONFIG_PROC_FS
-       {
-               struct proc_dir_entry * proc_mckinley_root;
+static struct acpi_driver acpi_sba_ioc_driver = {
+       name:           "IOC IOMMU Driver",
+       ids:            "HWP0001,HWP0004",
+       ops: {
+               add:    acpi_sba_ioc_add,
+       },
+};
+
+static int __init
+sba_init(void)
+{
+       MAX_DMA_ADDRESS = ~0UL;
+
+       acpi_bus_register_driver(&acpi_sba_ioc_driver);
 
-               proc_mckinley_root = proc_mkdir("bus/mckinley",0);
-               create_proc_info_entry(sba_rev, 0, proc_mckinley_root, sba_proc_info);
-               create_proc_info_entry("bitmap", 0, proc_mckinley_root, sba_resource_map);
+#ifdef CONFIG_PCI
+       {
+               struct pci_bus *b;
+               pci_for_each_bus(b)
+                       sba_connect_bus(b);
        }
 #endif
+
+#ifdef CONFIG_PROC_FS
+       ioc_proc_init();
+#endif
+       return 0;
 }
 
+subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
+
 static int __init
-nosbagart (char *str)
+nosbagart(char *str)
 {
        reserve_sba_gart = 0;
        return 1;
 }
 
-__setup("nosbagart",nosbagart);
+int
+sba_dma_supported (struct device *dev, u64 mask)
+{
+       /* make sure it's at least 32bit capable */
+       return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
+}
+
+__setup("nosbagart", nosbagart);
 
-EXPORT_SYMBOL(sba_init);
 EXPORT_SYMBOL(sba_map_single);
 EXPORT_SYMBOL(sba_unmap_single);
 EXPORT_SYMBOL(sba_map_sg);
 EXPORT_SYMBOL(sba_unmap_sg);
-EXPORT_SYMBOL(sba_dma_address);
 EXPORT_SYMBOL(sba_dma_supported);
-EXPORT_SYMBOL(sba_alloc_consistent);
-EXPORT_SYMBOL(sba_free_consistent);
+EXPORT_SYMBOL(sba_alloc_coherent);
+EXPORT_SYMBOL(sba_free_coherent);
index 540f1d8..29c03cf 100644 (file)
@@ -59,7 +59,7 @@ simcons_write (struct console *cons, const char *buf, unsigned count)
 
 static struct tty_driver *simcons_console_device (struct console *c, int *index)
 {
-       extern struct tty_driver hp_serial_driver;
+       extern struct tty_driver hp_simserial_driver;
        *index = c->index;
-       return &hp_serial_driver;
+       return &hp_simserial_driver;
 }
index 76af3b4..c214193 100644 (file)
@@ -1,2 +1,3 @@
-#define MACHVEC_PLATFORM_NAME  hpsim
+#define MACHVEC_PLATFORM_NAME          hpsim
+#define MACHVEC_PLATFORM_HEADER                <asm/machvec_hpsim.h>
 #include <asm/machvec_init.h>
index 8520822..fef47f1 100644 (file)
@@ -55,7 +55,7 @@ static int simeth_close(struct net_device *dev);
 static int simeth_tx(struct sk_buff *skb, struct net_device *dev);
 static int simeth_rx(struct net_device *dev);
 static struct net_device_stats *simeth_get_stats(struct net_device *dev);
-static void simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs);
+static irqreturn_t simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs);
 static void set_multicast_list(struct net_device *dev);
 static int simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr);
 
@@ -494,20 +494,21 @@ simeth_rx(struct net_device *dev)
 /*
  * Interrupt handler (Yes, we can do it too !!!)
  */
-static void
+static irqreturn_t
 simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs)
 {
        struct net_device *dev = dev_id;
 
        if ( dev == NULL ) {
                printk(KERN_WARNING "simeth: irq %d for unknown device\n", irq);
-               return;
+               return IRQ_NONE;
        }
 
        /*
-        * very simple loop because we get interrupts only when receving
+        * very simple loop because we get interrupts only when receiving
         */
        while (simeth_rx(dev));
+       return IRQ_HANDLED;
 }
 
 static struct net_device_stats *
index 121fb7c..0c04fb0 100644 (file)
@@ -103,7 +103,8 @@ static struct serial_uart_config uart_config[] = {
        { 0, 0}
 };
 
-static struct tty_driver hp_serial_driver, callout_driver;
+struct tty_driver hp_simserial_driver;
+static struct tty_driver callout_driver;
 static int serial_refcount;
 
 static struct async_struct *IRQ_ports[NR_IRQS];
@@ -184,7 +185,7 @@ static  void receive_chars(struct tty_struct *tty, struct pt_regs *regs)
 /*
  * This is the serial driver's interrupt routine for a single port
  */
-static void rs_interrupt_single(int irq, void *dev_id, struct pt_regs * regs)
+static irqreturn_t rs_interrupt_single(int irq, void *dev_id, struct pt_regs * regs)
 {
        struct async_struct * info;
 
@@ -195,13 +196,14 @@ static void rs_interrupt_single(int irq, void *dev_id, struct pt_regs * regs)
        info = IRQ_ports[irq];
        if (!info || !info->tty) {
                printk(KERN_INFO "simrs_interrupt_single: info|tty=0 info=%p problem\n", info);
-               return;
+               return IRQ_NONE;
        }
        /*
         * pretty simple in our case, because we only get interrupts
         * on inbound traffic
         */
        receive_chars(info->tty, regs);
+       return IRQ_HANDLED;
 }
 
 /*
@@ -768,7 +770,7 @@ startup(struct async_struct *info)
 {
        unsigned long flags;
        int     retval=0;
-       void (*handler)(int, void *, struct pt_regs *);
+       irqreturn_t (*handler)(int, void *, struct pt_regs *);
        struct serial_state *state= info->state;
        unsigned long page;
 
@@ -808,8 +810,7 @@ startup(struct async_struct *info)
                } else
                        handler = rs_interrupt_single;
 
-               retval = request_irq(state->irq, handler, IRQ_T(info),
-                                    "simserial", NULL);
+               retval = request_irq(state->irq, handler, IRQ_T(info), "simserial", NULL);
                if (retval) {
                        if (capable(CAP_SYS_ADMIN)) {
                                if (info->tty)
@@ -1028,43 +1029,43 @@ simrs_init (void)
 
        /* Initialize the tty_driver structure */
 
-       memset(&hp_serial_driver, 0, sizeof(struct tty_driver));
-       hp_serial_driver.magic = TTY_DRIVER_MAGIC;
-       hp_serial_driver.driver_name = "simserial";
-       hp_serial_driver.name = "ttyS";
-       hp_serial_driver.major = TTY_MAJOR;
-       hp_serial_driver.minor_start = 64;
-       hp_serial_driver.num = 1;
-       hp_serial_driver.type = TTY_DRIVER_TYPE_SERIAL;
-       hp_serial_driver.subtype = SERIAL_TYPE_NORMAL;
-       hp_serial_driver.init_termios = tty_std_termios;
-       hp_serial_driver.init_termios.c_cflag =
+       memset(&hp_simserial_driver, 0, sizeof(struct tty_driver));
+       hp_simserial_driver.magic = TTY_DRIVER_MAGIC;
+       hp_simserial_driver.driver_name = "simserial";
+       hp_simserial_driver.name = "ttyS";
+       hp_simserial_driver.major = TTY_MAJOR;
+       hp_simserial_driver.minor_start = 64;
+       hp_simserial_driver.num = 1;
+       hp_simserial_driver.type = TTY_DRIVER_TYPE_SERIAL;
+       hp_simserial_driver.subtype = SERIAL_TYPE_NORMAL;
+       hp_simserial_driver.init_termios = tty_std_termios;
+       hp_simserial_driver.init_termios.c_cflag =
                B9600 | CS8 | CREAD | HUPCL | CLOCAL;
-       hp_serial_driver.flags = TTY_DRIVER_REAL_RAW;
-       hp_serial_driver.refcount = &serial_refcount;
-       hp_serial_driver.table = serial_table;
-       hp_serial_driver.termios = serial_termios;
-       hp_serial_driver.termios_locked = serial_termios_locked;
-
-       hp_serial_driver.open = rs_open;
-       hp_serial_driver.close = rs_close;
-       hp_serial_driver.write = rs_write;
-       hp_serial_driver.put_char = rs_put_char;
-       hp_serial_driver.flush_chars = rs_flush_chars;
-       hp_serial_driver.write_room = rs_write_room;
-       hp_serial_driver.chars_in_buffer = rs_chars_in_buffer;
-       hp_serial_driver.flush_buffer = rs_flush_buffer;
-       hp_serial_driver.ioctl = rs_ioctl;
-       hp_serial_driver.throttle = rs_throttle;
-       hp_serial_driver.unthrottle = rs_unthrottle;
-       hp_serial_driver.send_xchar = rs_send_xchar;
-       hp_serial_driver.set_termios = rs_set_termios;
-       hp_serial_driver.stop = rs_stop;
-       hp_serial_driver.start = rs_start;
-       hp_serial_driver.hangup = rs_hangup;
-       hp_serial_driver.break_ctl = rs_break;
-       hp_serial_driver.wait_until_sent = rs_wait_until_sent;
-       hp_serial_driver.read_proc = rs_read_proc;
+       hp_simserial_driver.flags = TTY_DRIVER_REAL_RAW;
+       hp_simserial_driver.refcount = &serial_refcount;
+       hp_simserial_driver.table = serial_table;
+       hp_simserial_driver.termios = serial_termios;
+       hp_simserial_driver.termios_locked = serial_termios_locked;
+
+       hp_simserial_driver.open = rs_open;
+       hp_simserial_driver.close = rs_close;
+       hp_simserial_driver.write = rs_write;
+       hp_simserial_driver.put_char = rs_put_char;
+       hp_simserial_driver.flush_chars = rs_flush_chars;
+       hp_simserial_driver.write_room = rs_write_room;
+       hp_simserial_driver.chars_in_buffer = rs_chars_in_buffer;
+       hp_simserial_driver.flush_buffer = rs_flush_buffer;
+       hp_simserial_driver.ioctl = rs_ioctl;
+       hp_simserial_driver.throttle = rs_throttle;
+       hp_simserial_driver.unthrottle = rs_unthrottle;
+       hp_simserial_driver.send_xchar = rs_send_xchar;
+       hp_simserial_driver.set_termios = rs_set_termios;
+       hp_simserial_driver.stop = rs_stop;
+       hp_simserial_driver.start = rs_start;
+       hp_simserial_driver.hangup = rs_hangup;
+       hp_simserial_driver.break_ctl = rs_break;
+       hp_simserial_driver.wait_until_sent = rs_wait_until_sent;
+       hp_simserial_driver.read_proc = rs_read_proc;
 
        /*
         * Let's have a little bit of fun !
@@ -1087,14 +1088,14 @@ simrs_init (void)
         * The callout device is just like normal device except for
         * major number and the subtype code.
         */
-       callout_driver = hp_serial_driver;
+       callout_driver = hp_simserial_driver;
        callout_driver.name = "cua";
        callout_driver.major = TTYAUX_MAJOR;
        callout_driver.subtype = SERIAL_TYPE_CALLOUT;
        callout_driver.read_proc = 0;
        callout_driver.proc_entry = 0;
 
-       if (tty_register_driver(&hp_serial_driver))
+       if (tty_register_driver(&hp_simserial_driver))
                panic("Couldn't register simserial driver\n");
 
        if (tty_register_driver(&callout_driver))
index 4c00a07..64e39aa 100644 (file)
@@ -5,5 +5,4 @@
 # Copyright (C) Alex Williamson (alex_williamson@hp.com)
 #
 
-obj-y := hpzx1_misc.o
 obj-$(CONFIG_IA64_GENERIC) += hpzx1_machvec.o
index 53b1a13..32518b0 100644 (file)
@@ -1,2 +1,3 @@
-#define MACHVEC_PLATFORM_NAME  hpzx1
+#define MACHVEC_PLATFORM_NAME          hpzx1
+#define MACHVEC_PLATFORM_HEADER                <asm/machvec_hpzx1.h>
 #include <asm/machvec_init.h>
diff --git a/arch/ia64/hp/zx1/hpzx1_misc.c b/arch/ia64/hp/zx1/hpzx1_misc.c
deleted file mode 100644 (file)
index bf6faa9..0000000
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * Misc. support for HP zx1 chipset support
- *
- * Copyright (C) 2002-2003 Hewlett-Packard Co
- *     Alex Williamson <alex_williamson@hp.com>
- *     Bjorn Helgaas <bjorn_helgaas@hp.com>
- */
-
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/acpi.h>
-#include <linux/efi.h>
-
-#include <asm/dma.h>
-#include <asm/iosapic.h>
-
-extern acpi_status acpi_evaluate_integer (acpi_handle, acpi_string, struct acpi_object_list *,
-                                         unsigned long *);
-
-#define PFX "hpzx1: "
-
-static int hpzx1_devices;
-
-struct fake_pci_dev {
-       struct fake_pci_dev *next;
-       struct pci_dev *pci_dev;
-       unsigned long csr_base;
-       unsigned long csr_size;
-       unsigned long mapped_csrs;      // ioremapped
-       int sizing;                     // in middle of BAR sizing operation?
-} *fake_pci_dev_list;
-
-static struct pci_ops *orig_pci_ops;
-
-struct fake_pci_dev *
-lookup_fake_dev (struct pci_bus *bus, unsigned int devfn)
-{
-       struct fake_pci_dev *fake_dev;
-
-       for (fake_dev = fake_pci_dev_list; fake_dev; fake_dev = fake_dev->next)
-               if (fake_dev->pci_dev->bus == bus && fake_dev->pci_dev->devfn == devfn)
-                       return fake_dev;
-       return NULL;
-}
-
-static int
-hp_cfg_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
-{
-       struct fake_pci_dev *fake_dev = lookup_fake_dev(bus, devfn);
-
-       if (!fake_dev)
-               return (*orig_pci_ops->read)(bus, devfn, where, size, value);
-
-       if (where == PCI_BASE_ADDRESS_0) {
-               if (fake_dev->sizing)
-                       *value = ~(fake_dev->csr_size - 1);
-               else
-                       *value = ((fake_dev->csr_base & PCI_BASE_ADDRESS_MEM_MASK)
-                                 | PCI_BASE_ADDRESS_SPACE_MEMORY);
-               fake_dev->sizing = 0;
-               return PCIBIOS_SUCCESSFUL;
-       }
-       switch (size) {
-             case 1: *value = readb(fake_dev->mapped_csrs + where); break;
-             case 2: *value = readw(fake_dev->mapped_csrs + where); break;
-             case 4: *value = readl(fake_dev->mapped_csrs + where); break;
-             default:
-               printk(KERN_WARNING"hp_cfg_read: bad size = %d bytes", size);
-               break;
-       }
-       if (where == PCI_COMMAND)
-               *value |= PCI_COMMAND_MEMORY; /* SBA omits this */
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int
-hp_cfg_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
-{
-       struct fake_pci_dev *fake_dev = lookup_fake_dev(bus, devfn);
-
-       if (!fake_dev)
-               return (*orig_pci_ops->write)(bus, devfn, where, size, value);
-
-       if (where == PCI_BASE_ADDRESS_0) {
-               if (value == ((1UL << 8*size) - 1))
-                       fake_dev->sizing = 1;
-               return PCIBIOS_SUCCESSFUL;
-       }
-       switch (size) {
-             case 1: writeb(value, fake_dev->mapped_csrs + where); break;
-             case 2: writew(value, fake_dev->mapped_csrs + where); break;
-             case 4: writel(value, fake_dev->mapped_csrs + where); break;
-             default:
-               printk(KERN_WARNING"hp_cfg_write: bad size = %d bytes", size);
-               break;
-       }
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops hp_pci_conf = {
-       .read =         hp_cfg_read,
-       .write =        hp_cfg_write
-};
-
-static void
-hpzx1_fake_pci_dev(char *name, unsigned int busnum, unsigned long addr, unsigned int size)
-{
-       struct fake_pci_dev *fake;
-       int slot, ret;
-       struct pci_dev *dev;
-       struct pci_bus *b, *bus = NULL;
-       u8 hdr;
-
-        fake = kmalloc(sizeof(*fake), GFP_KERNEL);
-       if (!fake) {
-               printk(KERN_ERR PFX "No memory for %s (0x%p) sysdata\n", name, (void *) addr);
-               return;
-       }
-
-       memset(fake, 0, sizeof(*fake));
-       fake->csr_base = addr;
-       fake->csr_size = size;
-       fake->mapped_csrs = (unsigned long) ioremap(addr, size);
-       fake->sizing = 0;
-
-       pci_for_each_bus(b)
-               if (busnum == b->number) {
-                       bus = b;
-                       break;
-               }
-
-       if (!bus) {
-               printk(KERN_ERR PFX "No host bus 0x%02x for %s (0x%p)\n",
-                      busnum, name, (void *) addr);
-               kfree(fake);
-               return;
-       }
-
-       for (slot = 0x1e; slot; slot--)
-               if (!pci_find_slot(busnum, PCI_DEVFN(slot, 0)))
-                       break;
-
-       if (slot < 0) {
-               printk(KERN_ERR PFX "No space for %s (0x%p) on bus 0x%02x\n",
-                      name, (void *) addr, busnum);
-               kfree(fake);
-               return;
-       }
-
-        dev = kmalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev) {
-               printk(KERN_ERR PFX "No memory for %s (0x%p)\n", name, (void *) addr);
-               kfree(fake);
-               return;
-       }
-
-       bus->ops = &hp_pci_conf;        // replace pci ops for this bus
-
-       fake->pci_dev = dev;
-       fake->next = fake_pci_dev_list;
-       fake_pci_dev_list = fake;
-
-       memset(dev, 0, sizeof(*dev));
-       dev->bus = bus;
-       dev->sysdata = fake;
-       dev->dev.parent = bus->dev;
-       dev->dev.bus = &pci_bus_type;
-       dev->devfn = PCI_DEVFN(slot, 0);
-       pci_read_config_word(dev, PCI_VENDOR_ID, &dev->vendor);
-       pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
-       pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr);
-       dev->hdr_type = hdr & 0x7f;
-
-       pci_setup_device(dev);
-
-       // pci_insert_device() without running /sbin/hotplug
-       list_add_tail(&dev->bus_list, &bus->devices);
-       list_add_tail(&dev->global_list, &pci_devices);
-
-       strcpy(dev->dev.bus_id, dev->slot_name);
-       ret = device_register(&dev->dev);
-       if (ret < 0)
-               printk(KERN_INFO PFX "fake device registration failed (%d)\n", ret);
-
-       printk(KERN_INFO PFX "%s at 0x%lx; pci dev %s\n", name, addr, dev->slot_name);
-
-       hpzx1_devices++;
-}
-
-struct acpi_hp_vendor_long {
-       u8      guid_id;
-       u8      guid[16];
-       u8      csr_base[8];
-       u8      csr_length[8];
-};
-
-#define HP_CCSR_LENGTH 0x21
-#define HP_CCSR_TYPE   0x2
-#define HP_CCSR_GUID   EFI_GUID(0x69e9adf9, 0x924f, 0xab5f,                            \
-                                0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad)
-
-extern acpi_status acpi_get_crs(acpi_handle, struct acpi_buffer *);
-extern struct acpi_resource *acpi_get_crs_next(struct acpi_buffer *, int *);
-extern union acpi_resource_data *acpi_get_crs_type(struct acpi_buffer *, int *, int);
-extern void acpi_dispose_crs(struct acpi_buffer *);
-
-static acpi_status
-hp_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
-{
-       int i, offset = 0;
-       acpi_status status;
-       struct acpi_buffer buf;
-       struct acpi_resource_vendor *res;
-       struct acpi_hp_vendor_long *hp_res;
-       efi_guid_t vendor_guid;
-
-       *csr_base = 0;
-       *csr_length = 0;
-
-       status = acpi_get_crs(obj, &buf);
-       if (ACPI_FAILURE(status)) {
-               printk(KERN_ERR PFX "Unable to get _CRS data on object\n");
-               return status;
-       }
-
-       res = (struct acpi_resource_vendor *)acpi_get_crs_type(&buf, &offset, ACPI_RSTYPE_VENDOR);
-       if (!res) {
-               printk(KERN_ERR PFX "Failed to find config space for device\n");
-               acpi_dispose_crs(&buf);
-               return AE_NOT_FOUND;
-       }
-
-       hp_res = (struct acpi_hp_vendor_long *)(res->reserved);
-
-       if (res->length != HP_CCSR_LENGTH || hp_res->guid_id != HP_CCSR_TYPE) {
-               printk(KERN_ERR PFX "Unknown Vendor data\n");
-               acpi_dispose_crs(&buf);
-               return AE_TYPE; /* Revisit error? */
-       }
-
-       memcpy(&vendor_guid, hp_res->guid, sizeof(efi_guid_t));
-       if (efi_guidcmp(vendor_guid, HP_CCSR_GUID) != 0) {
-               printk(KERN_ERR PFX "Vendor GUID does not match\n");
-               acpi_dispose_crs(&buf);
-               return AE_TYPE; /* Revisit error? */
-       }
-
-       for (i = 0 ; i < 8 ; i++) {
-               *csr_base |= ((u64)(hp_res->csr_base[i]) << (i * 8));
-               *csr_length |= ((u64)(hp_res->csr_length[i]) << (i * 8));
-       }
-
-       acpi_dispose_crs(&buf);
-
-       return AE_OK;
-}
-
-static acpi_status
-hpzx1_sba_probe(acpi_handle obj, u32 depth, void *context, void **ret)
-{
-       u64 csr_base = 0, csr_length = 0;
-       acpi_status status;
-       char *name = context;
-       char fullname[16];
-
-       status = hp_csr_space(obj, &csr_base, &csr_length);
-       if (ACPI_FAILURE(status))
-               return status;
-
-       /*
-        * Only SBA shows up in ACPI namespace, so its CSR space
-        * includes both SBA and IOC.  Make SBA and IOC show up
-        * separately in PCI space.
-        */
-       sprintf(fullname, "%s SBA", name);
-       hpzx1_fake_pci_dev(fullname, 0, csr_base, 0x1000);
-       sprintf(fullname, "%s IOC", name);
-       hpzx1_fake_pci_dev(fullname, 0, csr_base + 0x1000, 0x1000);
-
-       return AE_OK;
-}
-
-static acpi_status
-hpzx1_lba_probe(acpi_handle obj, u32 depth, void *context, void **ret)
-{
-       u64 csr_base = 0, csr_length = 0;
-       acpi_status status;
-       acpi_native_uint busnum;
-       char *name = context;
-       char fullname[32];
-
-       status = hp_csr_space(obj, &csr_base, &csr_length);
-       if (ACPI_FAILURE(status))
-               return status;
-
-       status = acpi_evaluate_integer(obj, METHOD_NAME__BBN, NULL, &busnum);
-       if (ACPI_FAILURE(status)) {
-               printk(KERN_WARNING PFX "evaluate _BBN fail=0x%x\n", status);
-               busnum = 0;     // no _BBN; stick it on bus 0
-       }
-
-       sprintf(fullname, "%s _BBN 0x%02x", name, (unsigned int) busnum);
-       hpzx1_fake_pci_dev(fullname, busnum, csr_base, csr_length);
-
-       return AE_OK;
-}
-
-static void
-hpzx1_acpi_dev_init(void)
-{
-       extern struct pci_ops *pci_root_ops;
-
-       orig_pci_ops = pci_root_ops;
-
-       /*
-        * Make fake PCI devices for the following hardware in the
-        * ACPI namespace.  This makes it more convenient for drivers
-        * because they can claim these devices based on PCI
-        * information, rather than needing to know about ACPI.  The
-        * 64-bit "HPA" space for this hardware is available as BAR
-        * 0/1.
-        *
-        * HWP0001: Single IOC SBA w/o IOC in namespace
-        * HWP0002: LBA device
-        * HWP0003: AGP LBA device
-        */
-       acpi_get_devices("HWP0001", hpzx1_sba_probe, "HWP0001", NULL);
-       acpi_get_devices("HWP0002", hpzx1_lba_probe, "HWP0002 PCI LBA", NULL);
-       acpi_get_devices("HWP0003", hpzx1_lba_probe, "HWP0003 AGP LBA", NULL);
-}
-
-extern void sba_init(void);
-
-static int
-hpzx1_init (void)
-{
-       /* zx1 has a hardware I/O TLB which lets us DMA from any device to any address */
-       MAX_DMA_ADDRESS = ~0UL;
-
-       hpzx1_acpi_dev_init();
-       sba_init();
-       return 0;
-}
-
-subsys_initcall(hpzx1_init);
index 6001b46..576a189 100644 (file)
@@ -273,9 +273,9 @@ ia32_syscall_table:
        data8 sys32_sigsuspend
        data8 compat_sys_sigpending
        data8 sys_sethostname
-       data8 sys32_setrlimit     /* 75 */
-       data8 sys32_old_getrlimit
-       data8 sys32_getrusage
+       data8 compat_sys_setrlimit        /* 75 */
+       data8 compat_sys_old_getrlimit
+       data8 compat_sys_getrusage
        data8 sys32_gettimeofday
        data8 sys32_settimeofday
        data8 sys32_getgroups16   /* 80 */
@@ -312,7 +312,7 @@ ia32_syscall_table:
        data8 sys_vhangup
        data8 sys32_ni_syscall          /* used to be sys_idle */
        data8 sys32_ni_syscall
-       data8 sys32_wait4
+       data8 compat_sys_wait4
        data8 sys_swapoff         /* 115 */
        data8 sys32_sysinfo
        data8 sys32_ipc
@@ -389,7 +389,7 @@ ia32_syscall_table:
        data8 sys32_ni_syscall            /* streams1 */
        data8 sys32_ni_syscall            /* streams2 */
        data8 sys32_vfork         /* 190 */
-       data8 sys32_getrlimit
+       data8 compat_sys_getrlimit
        data8 sys32_mmap2
        data8 sys32_truncate64
        data8 sys32_ftruncate64
index 0ead044..e86c863 100644 (file)
@@ -3,13 +3,16 @@
  *
  * Copyright (C) 2000 VA Linux Co
  * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
- * Copyright (C) 2001-2002 Hewlett-Packard Co
+ * Copyright (C) 2001-2003 Hewlett-Packard Co
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  */
 
 #include <linux/types.h>
 #include <linux/dirent.h>
 #include <linux/fs.h>          /* argh, msdos_fs.h isn't self-contained... */
+#include <linux/signal.h>      /* argh, msdos_fs.h isn't self-contained... */
+
+#include <asm/ia32.h>
 
 #include <linux/msdos_fs.h>
 #include <linux/mtio.h>
@@ -33,8 +36,6 @@
 #define        __KERNEL__
 #include <scsi/sg.h>
 
-#include <asm/ia32.h>
-
 #include <../drivers/char/drm/drm.h>
 #include <../drivers/char/drm/mga_drm.h>
 #include <../drivers/char/drm/i810_drm.h>
index 20fea69..3090690 100644 (file)
@@ -103,7 +103,7 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
                         * C1 reg you need in case of a stack fault, 0x040 is the stack
                         * fault bit.  We should only be taking one exception at a time,
                         * so if this combination doesn't produce any single exception,
-                        * then we have a bad program that isn't syncronizing its FPU usage
+                        * then we have a bad program that isn't synchronizing its FPU usage
                         * and it will suffer the consequences since we won't be able to
                         * fully reproduce the context of the exception
                         */
index 570b039..fedff08 100644 (file)
 #include <asm/types.h>
 #include <asm/uaccess.h>
 #include <asm/semaphore.h>
+#include <asm/ia32.h>
 
 #include <net/scm.h>
 #include <net/sock.h>
-#include <asm/ia32.h>
 
 #define DEBUG  0
 
@@ -177,7 +177,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat *ubuf)
 {
        int err;
 
-       if (stat->size > MAX_NON_LFS)
+       if ((u64) stat->size > MAX_NON_LFS)
                return -EOVERFLOW;
 
        if (clear_user(ubuf, sizeof(*ubuf)))
@@ -243,8 +243,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
                return -ENOMEM;
 
        if (old_prot)
-               if (copy_from_user(page, (void *) PAGE_START(start), PAGE_SIZE))
-                       return -EFAULT;
+               copy_from_user(page, (void *) PAGE_START(start), PAGE_SIZE);
 
        down_write(&current->mm->mmap_sem);
        {
@@ -837,8 +836,9 @@ sys32_select (int n, fd_set *inp, fd_set *outp, fd_set *exp, struct compat_timev
                }
        }
 
+       size = FDS_BYTES(n);
        ret = -EINVAL;
-       if (n < 0)
+       if (n < 0 || size < n)
                goto out_nofds;
 
        if (n > current->files->max_fdset)
@@ -850,7 +850,6 @@ sys32_select (int n, fd_set *inp, fd_set *outp, fd_set *exp, struct compat_timev
         * long-words.
         */
        ret = -ENOMEM;
-       size = FDS_BYTES(n);
        bits = kmalloc(6 * size, GFP_KERNEL);
        if (!bits)
                goto out_nofds;
@@ -928,8 +927,7 @@ asmlinkage ssize_t sys_writev (unsigned long,const struct iovec *,unsigned long)
 static struct iovec *
 get_compat_iovec (struct compat_iovec *iov32, struct iovec *iov_buf, u32 count, int type)
 {
-       int i;
-       u32 buf, len;
+       u32 i, buf, len;
        struct iovec *ivp, *iov;
 
        /* Get the "struct iovec" from user memory */
@@ -1005,77 +1003,6 @@ sys32_writev (int fd, struct compat_iovec *vector, u32 count)
        return ret;
 }
 
-#define RLIM_INFINITY32        0x7fffffff
-#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
-
-struct rlimit32 {
-       int     rlim_cur;
-       int     rlim_max;
-};
-
-extern asmlinkage long sys_getrlimit (unsigned int resource, struct rlimit *rlim);
-
-asmlinkage long
-sys32_old_getrlimit (unsigned int resource, struct rlimit32 *rlim)
-{
-       mm_segment_t old_fs = get_fs();
-       struct rlimit r;
-       int ret;
-
-       set_fs(KERNEL_DS);
-       ret = sys_getrlimit(resource, &r);
-       set_fs(old_fs);
-       if (!ret) {
-               ret = put_user(RESOURCE32(r.rlim_cur), &rlim->rlim_cur);
-               ret |= put_user(RESOURCE32(r.rlim_max), &rlim->rlim_max);
-       }
-       return ret;
-}
-
-asmlinkage long
-sys32_getrlimit (unsigned int resource, struct rlimit32 *rlim)
-{
-       mm_segment_t old_fs = get_fs();
-       struct rlimit r;
-       int ret;
-
-       set_fs(KERNEL_DS);
-       ret = sys_getrlimit(resource, &r);
-       set_fs(old_fs);
-       if (!ret) {
-               if (r.rlim_cur >= 0xffffffff)
-                       r.rlim_cur = 0xffffffff;
-               if (r.rlim_max >= 0xffffffff)
-                       r.rlim_max = 0xffffffff;
-               ret = put_user(r.rlim_cur, &rlim->rlim_cur);
-               ret |= put_user(r.rlim_max, &rlim->rlim_max);
-       }
-       return ret;
-}
-
-extern asmlinkage long sys_setrlimit (unsigned int resource, struct rlimit *rlim);
-
-asmlinkage long
-sys32_setrlimit (unsigned int resource, struct rlimit32 *rlim)
-{
-       struct rlimit r;
-       int ret;
-       mm_segment_t old_fs = get_fs();
-
-       if (resource >= RLIM_NLIMITS)
-               return -EINVAL;
-       if (get_user(r.rlim_cur, &rlim->rlim_cur) || get_user(r.rlim_max, &rlim->rlim_max))
-               return -EFAULT;
-       if (r.rlim_cur == RLIM_INFINITY32)
-               r.rlim_cur = RLIM_INFINITY;
-       if (r.rlim_max == RLIM_INFINITY32)
-               r.rlim_max = RLIM_INFINITY;
-       set_fs(KERNEL_DS);
-       ret = sys_setrlimit(resource, &r);
-       set_fs(old_fs);
-       return ret;
-}
-
 /*
  * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation..
  *
@@ -1648,19 +1575,35 @@ shmctl32 (int first, int second, void *uptr)
        return err;
 }
 
+extern int sem_ctls[];
+#define sc_semopm      (sem_ctls[2])
+
 static long
-semtimedop32(int semid, struct sembuf *tsems, int nsems,
-            const struct compat_timespec *timeout32)
+semtimedop32(int semid, struct sembuf *tsops, int nsops,
+            struct compat_timespec *timeout32)
 {
        struct timespec t;
-       if (get_user (t.tv_sec, &timeout32->tv_sec) ||
-           get_user (t.tv_nsec, &timeout32->tv_nsec))
+       mm_segment_t oldfs;
+       long ret;
+
+       /* parameter checking precedence should mirror sys_semtimedop() */
+       if (nsops < 1 || semid < 0)
+               return -EINVAL;
+       if (nsops > sc_semopm)
+               return -E2BIG;
+       if (!access_ok(VERIFY_READ, tsops, nsops * sizeof(struct sembuf)) ||
+           get_compat_timespec(&t, timeout32))
                return -EFAULT;
-       return sys_semtimedop(semid, tsems, nsems, &t);
+
+       oldfs = get_fs();
+       set_fs(KERNEL_DS);
+       ret = sys_semtimedop(semid, tsops, nsops, &t);
+       set_fs(oldfs);
+       return ret;
 }
 
 asmlinkage long
-sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
+sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
 {
        int version;
 
@@ -1668,12 +1611,15 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
        call &= 0xffff;
 
        switch (call) {
+             case SEMTIMEDOP:
+               if (fifth)
+                       return semtimedop32(first, (struct sembuf *)AA(ptr),
+                               second, (struct compat_timespec *)AA(fifth));
+               /* else fall through for normal semop() */
              case SEMOP:
                /* struct sembuf is the same on 32 and 64bit :)) */
-               return sys_semtimedop(first, (struct sembuf *)AA(ptr), second, NULL);
-             case SEMTIMEDOP:
-               return semtimedop32(first, (struct sembuf *)AA(ptr), second,
-                                   (const struct compat_timespec *)AA(fifth));
+               return sys_semtimedop(first, (struct sembuf *)AA(ptr), second,
+                                     NULL);
              case SEMGET:
                return sys_semget(first, second, third);
              case SEMCTL:
@@ -1724,98 +1670,10 @@ sys32_time (int *tloc)
        return i;
 }
 
-struct rusage32 {
-       struct compat_timeval ru_utime;
-       struct compat_timeval ru_stime;
-       int    ru_maxrss;
-       int    ru_ixrss;
-       int    ru_idrss;
-       int    ru_isrss;
-       int    ru_minflt;
-       int    ru_majflt;
-       int    ru_nswap;
-       int    ru_inblock;
-       int    ru_oublock;
-       int    ru_msgsnd;
-       int    ru_msgrcv;
-       int    ru_nsignals;
-       int    ru_nvcsw;
-       int    ru_nivcsw;
-};
-
-static int
-put_rusage (struct rusage32 *ru, struct rusage *r)
-{
-       int err;
-
-       if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)))
-               return -EFAULT;
-
-       err = __put_user (r->ru_utime.tv_sec, &ru->ru_utime.tv_sec);
-       err |= __put_user (r->ru_utime.tv_usec, &ru->ru_utime.tv_usec);
-       err |= __put_user (r->ru_stime.tv_sec, &ru->ru_stime.tv_sec);
-       err |= __put_user (r->ru_stime.tv_usec, &ru->ru_stime.tv_usec);
-       err |= __put_user (r->ru_maxrss, &ru->ru_maxrss);
-       err |= __put_user (r->ru_ixrss, &ru->ru_ixrss);
-       err |= __put_user (r->ru_idrss, &ru->ru_idrss);
-       err |= __put_user (r->ru_isrss, &ru->ru_isrss);
-       err |= __put_user (r->ru_minflt, &ru->ru_minflt);
-       err |= __put_user (r->ru_majflt, &ru->ru_majflt);
-       err |= __put_user (r->ru_nswap, &ru->ru_nswap);
-       err |= __put_user (r->ru_inblock, &ru->ru_inblock);
-       err |= __put_user (r->ru_oublock, &ru->ru_oublock);
-       err |= __put_user (r->ru_msgsnd, &ru->ru_msgsnd);
-       err |= __put_user (r->ru_msgrcv, &ru->ru_msgrcv);
-       err |= __put_user (r->ru_nsignals, &ru->ru_nsignals);
-       err |= __put_user (r->ru_nvcsw, &ru->ru_nvcsw);
-       err |= __put_user (r->ru_nivcsw, &ru->ru_nivcsw);
-       return err;
-}
-
-asmlinkage long
-sys32_wait4 (int pid, unsigned int *stat_addr, int options, struct rusage32 *ru)
-{
-       if (!ru)
-               return sys_wait4(pid, stat_addr, options, NULL);
-       else {
-               struct rusage r;
-               int ret;
-               unsigned int status;
-               mm_segment_t old_fs = get_fs();
-
-               set_fs(KERNEL_DS);
-               ret = sys_wait4(pid, stat_addr ? &status : NULL, options, &r);
-               set_fs(old_fs);
-               if (put_rusage(ru, &r))
-                       return -EFAULT;
-               if (stat_addr && put_user(status, stat_addr))
-                       return -EFAULT;
-               return ret;
-       }
-}
-
 asmlinkage long
 sys32_waitpid (int pid, unsigned int *stat_addr, int options)
 {
-       return sys32_wait4(pid, stat_addr, options, NULL);
-}
-
-
-extern asmlinkage long sys_getrusage (int who, struct rusage *ru);
-
-asmlinkage long
-sys32_getrusage (int who, struct rusage32 *ru)
-{
-       struct rusage r;
-       int ret;
-       mm_segment_t old_fs = get_fs();
-
-       set_fs(KERNEL_DS);
-       ret = sys_getrusage(who, &r);
-       set_fs(old_fs);
-       if (put_rusage (ru, &r))
-               return -EFAULT;
-       return ret;
+       return compat_sys_wait4(pid, stat_addr, options, NULL);
 }
 
 static unsigned int
@@ -2211,7 +2069,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
                        ret = -EIO;
                        break;
                }
-               for (i = 0; i < 17*sizeof(int); i += sizeof(int) ) {
+               for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
                        put_user(getreg(child, i), (unsigned int *) A(data));
                        data += sizeof(int);
                }
@@ -2223,7 +2081,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
                        ret = -EIO;
                        break;
                }
-               for (i = 0; i < 17*sizeof(int); i += sizeof(int) ) {
+               for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
                        get_user(tmp, (unsigned int *) A(data));
                        putreg(child, i, tmp);
                        data += sizeof(int);
@@ -2299,7 +2157,7 @@ sys32_iopl (int level)
                return(-EINVAL);
        /* Trying to gain more privileges? */
        asm volatile ("mov %0=ar.eflag ;;" : "=r"(old));
-       if (level > ((old >> 12) & 3)) {
+       if ((unsigned int) level > ((old >> 12) & 3)) {
                if (!capable(CAP_SYS_RAWIO))
                        return -EPERM;
        }
index 176a973..0add23a 100644 (file)
@@ -11,6 +11,8 @@ obj-y := acpi.o entry.o efi.o efi_stub.o gate.o ia64_ksyms.o irq.o irq_ia64.o ir
 obj-$(CONFIG_EFI_VARS)         += efivars.o
 obj-$(CONFIG_FSYS)             += fsys.o
 obj-$(CONFIG_IA64_BRL_EMU)     += brl_emu.o
+obj-$(CONFIG_IA64_GENERIC)     += acpi-ext.o
+obj-$(CONFIG_IA64_HP_ZX1)      += acpi-ext.o
 obj-$(CONFIG_IA64_MCA)         += mca.o mca_asm.o
 obj-$(CONFIG_IA64_PALINFO)     += palinfo.o
 obj-$(CONFIG_IOSAPIC)          += iosapic.o
index cb1b314..42d0c1f 100644 (file)
@@ -3,69 +3,99 @@
  *
  * Copyright (C) 2003 Hewlett-Packard
  * Copyright (C) Alex Williamson
+ * Copyright (C) Bjorn Helgaas
  *
- * Vendor specific extensions to ACPI.  These are used by both
- * HP and NEC.
+ * Vendor specific extensions to ACPI.
  */
 
 #include <linux/config.h>
+#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/acpi.h>
 #include <linux/efi.h>
 
 #include <asm/acpi-ext.h>
 
-/*
- * Note: Strictly speaking, this is only needed for HP and NEC machines.
- *      However, NEC machines identify themselves as DIG-compliant, so there is
- *      no easy way to #ifdef this out.
- */
+struct acpi_vendor_descriptor {
+       u8                              guid_id;
+       efi_guid_t                      guid;
+};
+
+struct acpi_vendor_info {
+       struct acpi_vendor_descriptor   *descriptor;
+       u8                              *data;
+       u32                             length;
+};
+
 acpi_status
-hp_acpi_csr_space (acpi_handle obj, u64 *csr_base, u64 *csr_length)
+acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
 {
-       int i, offset = 0;
-       acpi_status status;
-       struct acpi_buffer buf;
-       struct acpi_resource_vendor *res;
-       struct acpi_hp_vendor_long *hp_res;
-       efi_guid_t vendor_guid;
-
-       *csr_base = 0;
-       *csr_length = 0;
-
-       status = acpi_get_crs(obj, &buf);
-       if (ACPI_FAILURE(status)) {
-               printk(KERN_ERR PREFIX "Unable to get _CRS data on object\n");
-               return status;
-       }
-
-       res = (struct acpi_resource_vendor *)acpi_get_crs_type(&buf, &offset, ACPI_RSTYPE_VENDOR);
-       if (!res) {
-               printk(KERN_ERR PREFIX "Failed to find config space for device\n");
-               acpi_dispose_crs(&buf);
+       struct acpi_vendor_info *info = (struct acpi_vendor_info *) context;
+       struct acpi_resource_vendor *vendor;
+       struct acpi_vendor_descriptor *descriptor;
+       u32 length;
+
+       if (resource->id != ACPI_RSTYPE_VENDOR)
+               return AE_OK;
+
+       vendor = (struct acpi_resource_vendor *) &resource->data;
+       descriptor = (struct acpi_vendor_descriptor *) vendor->reserved;
+       if (vendor->length <= sizeof(*info->descriptor) ||
+           descriptor->guid_id != info->descriptor->guid_id ||
+           efi_guidcmp(descriptor->guid, info->descriptor->guid))
+               return AE_OK;
+
+       length = vendor->length - sizeof(struct acpi_vendor_descriptor);
+       info->data = acpi_os_allocate(length);
+       if (!info->data)
+               return AE_NO_MEMORY;
+
+       memcpy(info->data, vendor->reserved + sizeof(struct acpi_vendor_descriptor), length);
+       info->length = length;
+       return AE_CTRL_TERMINATE;
+}
+
+acpi_status
+acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor *id,
+               u8 **data, u32 *length)
+{
+       struct acpi_vendor_info info;
+
+       info.descriptor = id;
+       info.data = 0;
+
+       acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match, &info);
+       if (!info.data)
                return AE_NOT_FOUND;
-       }
 
-       hp_res = (struct acpi_hp_vendor_long *)(res->reserved);
+       *data = info.data;
+       *length = info.length;
+       return AE_OK;
+}
+
+struct acpi_vendor_descriptor hp_ccsr_descriptor = {
+       .guid_id = 2,
+       .guid    = EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad)
+};
 
-       if (res->length != HP_CCSR_LENGTH || hp_res->guid_id != HP_CCSR_TYPE) {
-               printk(KERN_ERR PREFIX "Unknown Vendor data\n");
-               acpi_dispose_crs(&buf);
-               return AE_TYPE; /* Revisit error? */
-       }
+acpi_status
+hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
+{
+       acpi_status status;
+       u8 *data;
+       u32 length;
+       int i;
 
-       memcpy(&vendor_guid, hp_res->guid, sizeof(efi_guid_t));
-       if (efi_guidcmp(vendor_guid, HP_CCSR_GUID) != 0) {
-               printk(KERN_ERR PREFIX "Vendor GUID does not match\n");
-               acpi_dispose_crs(&buf);
-               return AE_TYPE; /* Revisit error? */
-       }
+       status = acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length);
 
-       for (i = 0 ; i < 8 ; i++) {
-               *csr_base |= ((u64)(hp_res->csr_base[i]) << (i * 8));
-               *csr_length |= ((u64)(hp_res->csr_length[i]) << (i * 8));
-       }
+       if (ACPI_FAILURE(status) || length != 16)
+               return AE_NOT_FOUND;
+
+       memcpy(csr_base, data, sizeof(*csr_base));
+       memcpy(csr_length, data + 8, sizeof(*csr_length));
+       acpi_os_free(data);
 
-       acpi_dispose_crs(&buf);
        return AE_OK;
 }
+
+EXPORT_SYMBOL(hp_acpi_csr_space);
index b1c6734..7f888fb 100644 (file)
@@ -115,134 +115,6 @@ acpi_get_sysname (void)
 #endif
 }
 
-#ifdef CONFIG_ACPI
-
-/**
- * acpi_get_crs - Return the current resource settings for a device
- * obj: A handle for this device
- * buf: A buffer to be populated by this call.
- *
- * Pass a valid handle, typically obtained by walking the namespace and a
- * pointer to an allocated buffer, and this function will fill in the buffer
- * with a list of acpi_resource structures.
- */
-acpi_status
-acpi_get_crs (acpi_handle obj, struct acpi_buffer *buf)
-{
-       acpi_status result;
-       buf->length = 0;
-       buf->pointer = NULL;
-
-       result = acpi_get_current_resources(obj, buf);
-       if (result != AE_BUFFER_OVERFLOW)
-               return result;
-       buf->pointer = kmalloc(buf->length, GFP_KERNEL);
-       if (!buf->pointer)
-               return -ENOMEM;
-
-       return acpi_get_current_resources(obj, buf);
-}
-
-struct acpi_resource *
-acpi_get_crs_next (struct acpi_buffer *buf, int *offset)
-{
-       struct acpi_resource *res;
-
-       if (*offset >= buf->length)
-               return NULL;
-
-       res = buf->pointer + *offset;
-       *offset += res->length;
-       return res;
-}
-
-union acpi_resource_data *
-acpi_get_crs_type (struct acpi_buffer *buf, int *offset, int type)
-{
-       for (;;) {
-               struct acpi_resource *res = acpi_get_crs_next(buf, offset);
-               if (!res)
-                       return NULL;
-               if (res->id == type)
-                       return &res->data;
-       }
-}
-
-void
-acpi_dispose_crs (struct acpi_buffer *buf)
-{
-       kfree(buf->pointer);
-}
-
-void
-acpi_get_crs_addr (struct acpi_buffer *buf, int type, u64 *base, u64 *size, u64 *tra)
-{
-       int offset = 0;
-       struct acpi_resource_address16 *addr16;
-       struct acpi_resource_address32 *addr32;
-       struct acpi_resource_address64 *addr64;
-
-       for (;;) {
-               struct acpi_resource *res = acpi_get_crs_next(buf, &offset);
-               if (!res)
-                       return;
-               switch (res->id) {
-                       case ACPI_RSTYPE_ADDRESS16:
-                               addr16 = (struct acpi_resource_address16 *) &res->data;
-
-                               if (type == addr16->resource_type) {
-                                       *base = addr16->min_address_range;
-                                       *size = addr16->address_length;
-                                       *tra = addr16->address_translation_offset;
-                                       return;
-                               }
-                               break;
-                       case ACPI_RSTYPE_ADDRESS32:
-                               addr32 = (struct acpi_resource_address32 *) &res->data;
-                               if (type == addr32->resource_type) {
-                                       *base = addr32->min_address_range;
-                                       *size = addr32->address_length;
-                                       *tra = addr32->address_translation_offset;
-                                       return;
-                               }
-                               break;
-                       case ACPI_RSTYPE_ADDRESS64:
-                               addr64 = (struct acpi_resource_address64 *) &res->data;
-                               if (type == addr64->resource_type) {
-                                       *base = addr64->min_address_range;
-                                       *size = addr64->address_length;
-                                       *tra = addr64->address_translation_offset;
-                                       return;
-                               }
-                               break;
-               }
-       }
-}
-
-int
-acpi_get_addr_space(void *obj, u8 type, u64 *base, u64 *length, u64 *tra)
-{
-       acpi_status status;
-       struct acpi_buffer buf;
-
-       *base = 0;
-       *length = 0;
-       *tra = 0;
-
-       status = acpi_get_crs((acpi_handle)obj, &buf);
-       if (ACPI_FAILURE(status)) {
-               printk(KERN_ERR PREFIX "Unable to get _CRS data on object\n");
-               return status;
-       }
-
-       acpi_get_crs_addr(&buf, type, base, length, tra);
-
-       acpi_dispose_crs(&buf);
-
-       return AE_OK;
-}
-#endif /* CONFIG_ACPI */
-
 #ifdef CONFIG_ACPI_BOOT
 
 #define ACPI_MAX_PLATFORM_INTERRUPTS   256
@@ -324,7 +196,8 @@ acpi_parse_lsapic (acpi_table_entry_header *header)
                printk(" enabled");
 #ifdef CONFIG_SMP
                smp_boot_data.cpu_phys_id[total_cpus] = (lsapic->id << 8) | lsapic->eid;
-               if (hard_smp_processor_id() == smp_boot_data.cpu_phys_id[total_cpus])
+               if (hard_smp_processor_id()
+                   == (unsigned int) smp_boot_data.cpu_phys_id[total_cpus])
                        printk(" (BSP)");
 #endif
        }
@@ -918,8 +791,7 @@ acpi_register_irq (u32 gsi, u32 polarity, u32 trigger)
                return 0;
 
        /* Turn it on */
-       vector = iosapic_register_intr (gsi, polarity ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW,
-                       trigger ? IOSAPIC_EDGE : IOSAPIC_LEVEL);
+       vector = iosapic_register_intr (gsi, polarity, trigger);
        return vector;
 }
 
index 541addc..0b286ca 100644 (file)
@@ -59,7 +59,7 @@ ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
        unsigned long next_ip;
        struct siginfo siginfo;
        struct illegal_op_return rv;
-       int tmp_taken, unimplemented_address;
+       long tmp_taken, unimplemented_address;
 
        rv.fkt = (unsigned long) -1;
 
index 78c8c05..cb967c5 100644 (file)
@@ -203,16 +203,16 @@ STUB_GET_NEXT_HIGH_MONO_COUNT(virt, )
 STUB_RESET_SYSTEM(virt, )
 
 void
-efi_gettimeofday (struct timeval *tv)
+efi_gettimeofday (struct timespec *ts)
 {
        efi_time_t tm;
 
-       memset(tv, 0, sizeof(tv));
+       memset(ts, 0, sizeof(ts));
        if ((*efi.get_time)(&tm, 0) != EFI_SUCCESS)
                return;
 
-       tv->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
-       tv->tv_usec = tm.nanosecond / 1000;
+       ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
+       ts->tv_nsec = tm.nanosecond;
 }
 
 static int
@@ -512,7 +512,7 @@ efi_init (void)
        /* Show what we know for posterity */
        c16 = __va(efi.systab->fw_vendor);
        if (c16) {
-               for (i = 0;i < sizeof(vendor) && *c16; ++i)
+               for (i = 0;i < (int) sizeof(vendor) && *c16; ++i)
                        vendor[i] = *c16++;
                vendor[i] = '\0';
        }
@@ -520,7 +520,7 @@ efi_init (void)
        printk(KERN_INFO "EFI v%u.%.02u by %s:",
               efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor);
 
-       for (i = 0; i < efi.systab->nr_tables; i++) {
+       for (i = 0; i < (int) efi.systab->nr_tables; i++) {
                if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
                        efi.mps = __va(config_tables[i].table);
                        printk(" MPS=0x%lx", config_tables[i].table);
index b5ab1a0..920675b 100644 (file)
@@ -138,8 +138,7 @@ utf8_strlen(efi_char16_t *data, unsigned long maxlength)
 static inline unsigned long
 utf8_strsize(efi_char16_t *data, unsigned long maxlength)
 {
-       return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) *
-               sizeof(efi_char16_t);
+       return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
 }
 
 
@@ -170,8 +169,7 @@ efivar_create_proc_entry(unsigned long variable_name_size,
                         efi_guid_t *vendor_guid)
 {
 
-       int i, short_name_size = variable_name_size /
-               sizeof(efi_char16_t) + 38;
+       int i, short_name_size = variable_name_size / sizeof(efi_char16_t) + 38;
        char *short_name;
        efivar_entry_t *new_efivar;
 
@@ -192,7 +190,7 @@ efivar_create_proc_entry(unsigned long variable_name_size,
 
        /* Convert Unicode to normal chars (assume top bits are 0),
           ala UTF-8 */
-       for (i=0; i<variable_name_size / sizeof(efi_char16_t); i++) {
+       for (i=0; i< (int) (variable_name_size / sizeof(efi_char16_t)); i++) {
                short_name[i] = variable_name[i] & 0xFF;
        }
 
index bf5f238..138203f 100644 (file)
@@ -37,7 +37,7 @@ static char fw_mem[(  sizeof(struct ia64_boot_param)
                    + NUM_MEM_DESCS*(sizeof(efi_memory_desc_t))
                    + 1024)] __attribute__ ((aligned (8)));
 
-#ifdef CONFIG_IA64_HP_SIM
+#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
 
 /* Simulator system calls: */
 
@@ -233,7 +233,7 @@ asm (
 static efi_status_t
 efi_get_time (efi_time_t *tm, efi_time_cap_t *tc)
 {
-#ifdef CONFIG_IA64_HP_SIM
+#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
        struct {
                int tv_sec;     /* must be 32bits to work */
                int tv_usec;
@@ -255,7 +255,7 @@ efi_get_time (efi_time_t *tm, efi_time_cap_t *tc)
 static void
 efi_reset_system (int reset_type, efi_status_t status, unsigned long data_size, efi_char16_t *data)
 {
-#ifdef CONFIG_IA64_HP_SIM
+#if defined(CONFIG_IA64_HP_SIM) || defined(CONFIG_IA64_GENERIC)
        ssc(status, 0, 0, 0, SSC_EXIT);
 #else
 #      error Not implemented yet...
index 9d40ff4..ff50c19 100644 (file)
@@ -733,3 +733,82 @@ SET_REG(b4);
 SET_REG(b5);
 
 #endif /* CONFIG_IA64_BRL_EMU */
+
+#ifdef CONFIG_SMP
+       /*
+        * This routine handles spinlock contention.  It uses a non-standard calling
+        * convention to avoid converting leaf routines into interior routines.  Because
+        * of this special convention, there are several restrictions:
+        *
+        * - do not use gp relative variables, this code is called from the kernel
+        *   and from modules, r1 is undefined.
+        * - do not use stacked registers, the caller owns them.
+        * - do not use the scratch stack space, the caller owns it.
+        * - do not use any registers other than the ones listed below
+        *
+        * Inputs:
+        *   ar.pfs - saved CFM of caller
+        *   ar.ccv - 0 (and available for use)
+        *   r28    - available for use.
+        *   r29    - available for use.
+        *   r30    - available for use.
+        *   r31    - address of lock, available for use.
+        *   b7     - return address
+        *   p14    - available for use.
+        *
+        * If you patch this code to use more registers, do not forget to update
+        * the clobber lists for spin_lock() in include/asm-ia64/spinlock.h.
+        */
+
+#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
+
+GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4)
+       .prologue
+       .save ar.pfs, r0        // this code effectively has a zero frame size
+       .save rp, r28
+       .body
+       nop 0
+       nop 0
+       .restore sp             // pop existing prologue after next insn
+       mov b6 = r28
+       .prologue
+       .save ar.pfs, r0
+       .altrp b6
+       .body
+.wait:
+       // exponential backoff, kdb, lockmeter etc. go in here
+       hint @pause
+       ld4.bias r30=[r31]
+       nop 0
+       ;;
+       cmp4.eq p14,p0=r30,r0
+(p14)  br.cond.sptk.few b6     // lock is now free, try to acquire
+       br.cond.sptk.few .wait
+END(ia64_spinlock_contention_pre3_4)
+
+#else
+
+GLOBAL_ENTRY(ia64_spinlock_contention)
+       .prologue
+       .altrp b6
+       .body
+.wait:
+       // exponential backoff, kdb, lockmeter etc. go in here
+       hint @pause
+       ld4.bias r30=[r31]
+       ;;
+       cmp4.ne p14,p0=r30,r0
+       mov r30 = 1
+(p14)  br.cond.sptk.few .wait
+       ;;
+       cmpxchg4.acq r30=[r31], r30, ar.ccv
+       ;;
+       cmp4.ne p14,p0=r0,r30
+(p14)  br.cond.sptk.few .wait
+
+       br.ret.sptk.many b6     // lock is now taken
+END(ia64_spinlock_contention)
+
+#endif
+
+#endif /* CONFIG_SMP */
index 0a06065..324f779 100644 (file)
@@ -46,6 +46,7 @@ EXPORT_SYMBOL(ip_fast_csum);
 EXPORT_SYMBOL(__ia64_memcpy_fromio);
 EXPORT_SYMBOL(__ia64_memcpy_toio);
 EXPORT_SYMBOL(__ia64_memset_c_io);
+EXPORT_SYMBOL(io_space);
 
 #include <asm/semaphore.h>
 EXPORT_SYMBOL_NOVERS(__down);
@@ -56,6 +57,12 @@ EXPORT_SYMBOL_NOVERS(__up);
 #include <asm/page.h>
 EXPORT_SYMBOL(clear_page);
 
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+#include <asm/pgtable.h>
+EXPORT_SYMBOL(vmalloc_end);
+EXPORT_SYMBOL(ia64_pfn_valid);
+#endif
+
 #include <asm/processor.h>
 EXPORT_SYMBOL(cpu_info__per_cpu);
 EXPORT_SYMBOL(kernel_thread);
@@ -161,3 +168,13 @@ EXPORT_SYMBOL(unw_access_br);
 EXPORT_SYMBOL(unw_access_fr);
 EXPORT_SYMBOL(unw_access_ar);
 EXPORT_SYMBOL(unw_access_pr);
+
+#ifdef CONFIG_SMP
+#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
+extern void ia64_spinlock_contention_pre3_4 (void);
+EXPORT_SYMBOL(ia64_spinlock_contention_pre3_4);
+#else
+extern void ia64_spinlock_contention (void);
+EXPORT_SYMBOL(ia64_spinlock_contention);
+#endif
+#endif
index 8ace5ae..cb8d2c6 100644 (file)
@@ -581,9 +581,8 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
        register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, polarity, trigger);
 
        DBG("ISA: IRQ %u -> GSI 0x%x (%s,%s) -> CPU 0x%04x vector %d\n",
-           isa_irq, gsi,
-           polarity == IOSAPIC_POL_HIGH ? "high" : "low", trigger == IOSAPIC_EDGE ? "edge" : "level",
-           dest, vector);
+           isa_irq, gsi, polarity == IOSAPIC_POL_HIGH ? "high" : "low",
+           trigger == IOSAPIC_EDGE ? "edge" : "level", dest, vector);
 
        /* program the IOSAPIC routing table */
        set_rte(vector, dest);
@@ -635,7 +634,6 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
               (ver & 0xf0) >> 4, (ver & 0x0f), phys_addr, gsi_base, gsi_base + num_rte - 1);
 
        if ((gsi_base == 0) && pcat_compat) {
-
                /*
                 * Map the legacy ISA devices into the IOSAPIC data.  Some of these may
                 * get reprogrammed later on with data from the ACPI Interrupt Source
@@ -646,20 +644,11 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
        }
 }
 
-static void __init
-fixup_vector (int vector, unsigned int gsi, const char *pci_id)
+void
+iosapic_enable_intr (unsigned int vector)
 {
-       struct hw_interrupt_type *irq_type = &irq_type_iosapic_level;
-       irq_desc_t *idesc;
        unsigned int dest;
 
-       idesc = irq_desc(vector);
-       if (idesc->handler != irq_type) {
-               if (idesc->handler != &no_irq_type)
-                       printk(KERN_INFO "IOSAPIC: changing vector %d from %s to %s\n",
-                              vector, idesc->handler->typename, irq_type->typename);
-               idesc->handler = irq_type;
-       }
 #ifdef CONFIG_SMP
        /*
         * For platforms that do not support interrupt redirect via the XTP interface, we
@@ -687,10 +676,12 @@ fixup_vector (int vector, unsigned int gsi, const char *pci_id)
 #endif
        set_rte(vector, dest);
 
-       printk(KERN_INFO "IOSAPIC: %s -> GSI 0x%x -> CPU 0x%04x vector %d\n",
-              pci_id, gsi, dest, vector);
+       printk(KERN_INFO "IOSAPIC: vector %d -> CPU 0x%04x, enabled\n",
+              vector, dest);
 }
 
+#ifdef CONFIG_ACPI_PCI
+
 void __init
 iosapic_parse_prt (void)
 {
@@ -699,6 +690,8 @@ iosapic_parse_prt (void)
        unsigned int gsi;
        int vector;
        char pci_id[16];
+       struct hw_interrupt_type *irq_type = &irq_type_iosapic_level;
+       irq_desc_t *idesc;
 
        list_for_each(node, &acpi_prt.entries) {
                entry = list_entry(node, struct acpi_prt_entry, node);
@@ -711,6 +704,9 @@ iosapic_parse_prt (void)
 
                vector = gsi_to_vector(gsi);
                if (vector < 0) {
+                       if (find_iosapic(gsi) < 0)
+                               continue;
+
                        /* allocate a vector for this interrupt line */
                        if (pcat_compat && (gsi < 16))
                                vector = isa_irq_to_vector(gsi);
@@ -718,11 +714,22 @@ iosapic_parse_prt (void)
                                /* new GSI; allocate a vector for it */
                                vector = ia64_alloc_vector();
 
-                       register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, IOSAPIC_POL_LOW, IOSAPIC_LEVEL);
+                       register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, IOSAPIC_POL_LOW,
+                                     IOSAPIC_LEVEL);
                }
                snprintf(pci_id, sizeof(pci_id), "%02x:%02x:%02x[%c]",
                         entry->id.segment, entry->id.bus, entry->id.device, 'A' + entry->pin);
 
-               fixup_vector(vector, gsi, pci_id);
+               /*
+                * If vector was previously initialized to a different
+                * handler, re-initialize.
+                */
+               idesc = irq_desc(vector);
+               if (idesc->handler != irq_type)
+                       register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, IOSAPIC_POL_LOW,
+                                     IOSAPIC_LEVEL);
+
        }
 }
+
+#endif /* CONFIG_ACPI */
index e3b0678..6ed0e52 100644 (file)
@@ -18,7 +18,6 @@
  */
 
 #include <linux/config.h>
-#include <linux/ptrace.h>
 #include <linux/errno.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
@@ -33,6 +32,7 @@
 #include <linux/irq.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
+#include <linux/kallsyms.h>
 
 #include <asm/atomic.h>
 #include <asm/io.h>
@@ -50,7 +50,7 @@
  * Linux has a controller-independent x86 interrupt architecture.
  * every controller has a 'controller-template', that is used
  * by the main code to do the right thing. Each driver-visible
- * interrupt source is transparently wired to the apropriate
+ * interrupt source is transparently wired to the appropriate
  * controller. Thus drivers need not be aware of the
  * interrupt-controller.
  *
@@ -91,7 +91,8 @@ static void register_irq_proc (unsigned int irq);
  * Special irq handlers.
  */
 
-void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
+irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
+{ return IRQ_NONE; }
 
 /*
  * Generic no controller code
@@ -141,9 +142,11 @@ struct hw_interrupt_type no_irq_type = {
 };
 
 atomic_t irq_err_count;
-#if defined(CONFIG_X86) && defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
+#ifdef CONFIG_X86_IO_APIC
+#ifdef APIC_MISMATCH_DEBUG
 atomic_t irq_mis_count;
 #endif
+#endif
 
 /*
  * Generic, controller-independent functions:
@@ -178,9 +181,10 @@ int show_interrupts(struct seq_file *p, void *v)
 #endif
                seq_printf(p, " %14s", idesc->handler->typename);
                seq_printf(p, "  %s", action->name);
+
                for (action=action->next; action; action = action->next)
                        seq_printf(p, ", %s", action->name);
-               
+
                seq_putc(p, '\n');
 skip:
                spin_unlock_irqrestore(&idesc->lock, flags);
@@ -190,17 +194,19 @@ skip:
                if (cpu_online(j))
                        seq_printf(p, "%10u ", nmi_count(j));
        seq_putc(p, '\n');
-#if defined(CONFIG_SMP) && defined(CONFIG_X86)
+#if CONFIG_X86_LOCAL_APIC
        seq_puts(p, "LOC: ");
        for (j = 0; j < NR_CPUS; j++)
                if (cpu_online(j))
-                       seq_printf(p, "%10u ", apic_timer_irqs[j]);
+                       seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
        seq_putc(p, '\n');
 #endif
        seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
-#if defined(CONFIG_X86) && defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
+#ifdef CONFIG_X86_IO_APIC
+#ifdef APIC_MISMATCH_DEBUG
        seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
 #endif
+#endif
        return 0;
 }
 
@@ -219,21 +225,46 @@ inline void synchronize_irq(unsigned int irq)
  * waste of time and is not what some drivers would
  * prefer.
  */
-int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
+int handle_IRQ_event(unsigned int irq,
+               struct pt_regs *regs, struct irqaction *action)
 {
        int status = 1; /* Force the "do bottom halves" bit */
+       int retval = 0;
+       struct irqaction *first_action = action;
 
        if (!(action->flags & SA_INTERRUPT))
                local_irq_enable();
 
        do {
                status |= action->flags;
-               action->handler(irq, action->dev_id, regs);
+               retval |= action->handler(irq, action->dev_id, regs);
                action = action->next;
        } while (action);
        if (status & SA_SAMPLE_RANDOM)
                add_interrupt_randomness(irq);
        local_irq_disable();
+       if (retval != 1) {
+               static int count = 100;
+               if (count) {
+                       count--;
+                       if (retval) {
+                               printk("irq event %d: bogus retval mask %x\n",
+                                       irq, retval);
+                       } else {
+                               printk("irq %d: nobody cared!\n", irq);
+                       }
+                       dump_stack();
+                       printk("handlers:\n");
+                       action = first_action;
+                       do {
+                               printk("[<%p>]", action->handler);
+                               print_symbol(" (%s)",
+                                       (unsigned long)action->handler);
+                               printk("\n");
+                               action = action->next;
+                       } while (action);
+               }
+       }
 
        return status;
 }
@@ -455,7 +486,7 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
  */
 
 int request_irq(unsigned int irq,
-               void (*handler)(int, void *, struct pt_regs *),
+               irqreturn_t (*handler)(int, void *, struct pt_regs *),
                unsigned long irqflags,
                const char * devname,
                void *dev_id)
@@ -482,7 +513,7 @@ int request_irq(unsigned int irq,
                return -EINVAL;
 
        action = (struct irqaction *)
-                       kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+                       kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
        if (!action)
                return -ENOMEM;
 
@@ -511,10 +542,7 @@ int request_irq(unsigned int irq,
  *     does not return until any executing interrupts for this IRQ
  *     have completed.
  *
- *     This function may be called from interrupt context. 
- *
- *     Bugs: Attempting to free an irq in a handler for the same irq hangs
- *           the machine.
+ *     This function must not be called from interrupt context.
  */
 
 void free_irq(unsigned int irq, void *dev_id)
@@ -545,11 +573,8 @@ void free_irq(unsigned int irq, void *dev_id)
                        }
                        spin_unlock_irqrestore(&desc->lock,flags);
 
-#ifdef CONFIG_SMP
                        /* Wait to make sure it's not being used on another CPU */
-                       while (desc->status & IRQ_INPROGRESS)
-                               synchronize_irq(irq);
-#endif
+                       synchronize_irq(irq);
                        kfree(action);
                        return;
                }
@@ -664,7 +689,6 @@ unsigned long probe_irq_on(void)
  *     only return ISA irq numbers - just so that we reset them
  *     all to a known state.
  */
-
 unsigned int probe_irq_mask(unsigned long val)
 {
        int i;
@@ -705,7 +729,7 @@ unsigned int probe_irq_mask(unsigned long val)
  *     The interrupt probe logic state is returned to its previous
  *     value.
  *
- *     BUGS: When used in a module (which arguably shouldnt happen)
+ *     BUGS: When used in a module (which arguably shouldn't happen)
  *     nothing prevents two IRQ probe callers from overlapping. The
  *     results of this are non-optimal.
  */
@@ -748,6 +772,8 @@ int setup_irq(unsigned int irq, struct irqaction * new)
        struct irqaction *old, **p;
        irq_desc_t *desc = irq_desc(irq);
 
+       if (desc->handler == &no_irq_type)
+               return -ENOSYS;
        /*
         * Some drivers like serial.c use request_irq() heavily,
         * so we have to be careful not to interfere with a
@@ -808,11 +834,11 @@ static struct proc_dir_entry * irq_dir [NR_IRQS];
 
 #define HEX_DIGITS 8
 
-static int parse_hex_value (const char *buffer, unsigned long count, unsigned long *ret)
+static unsigned int parse_hex_value (const char *buffer,
+               unsigned long count, unsigned long *ret)
 {
        unsigned char hexnum [HEX_DIGITS];
-       unsigned long value;
-       int i;
+       unsigned long value, i;
 
        if (!count)
                return -EINVAL;
@@ -950,12 +976,13 @@ static void register_irq_proc (unsigned int irq)
 #if CONFIG_SMP
        {
                struct proc_dir_entry *entry;
+
                /* create /proc/irq/1234/smp_affinity */
                entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
 
                if (entry) {
                        entry->nlink = 1;
-                       entry->data = (void *)(unsigned long)irq;
+                       entry->data = (void *)(long)irq;
                        entry->read_proc = irq_affinity_read_proc;
                        entry->write_proc = irq_affinity_write_proc;
                }
index d158a7c..49c6572 100644 (file)
@@ -145,7 +145,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
 }
 
 #ifdef CONFIG_SMP
-extern void handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
+extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
 
 static struct irqaction ipi_irqaction = {
        .handler =      handle_IPI,
index 7d524be..931c69c 100644 (file)
@@ -1,12 +1,14 @@
 #include <linux/config.h>
 
+#include <asm/system.h>
+
 #ifdef CONFIG_IA64_GENERIC
 
 #include <linux/kernel.h>
 #include <linux/string.h>
 
-#include <asm/page.h>
 #include <asm/machvec.h>
+#include <asm/page.h>
 
 struct ia64_machine_vector ia64_mv;
 
@@ -43,3 +45,9 @@ void
 machvec_noop (void)
 {
 }
+
+void
+machvec_memory_fence (void)
+{
+       mb();
+}
index 5808798..88148bc 100644 (file)
@@ -3,6 +3,9 @@
  * Purpose:    Generic MCA handling layer
  *
  * Updated for latest kernel
+ * Copyright (C) 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@hpl.hp.com>
+ *
  * Copyright (C) 2002 Dell Computer Corporation
  * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
  *
@@ -18,6 +21,7 @@
  * Copyright (C) 1999 Silicon Graphics, Inc.
  * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
  *
+ * 03/04/15 D. Mosberger Added INIT backtrace support.
  * 02/03/25 M. Domsch  GUID cleanups
  *
  * 02/01/04 J. Hall    Aligned MCA stack to 16 bytes, added platform vs. CPU
@@ -39,6 +43,7 @@
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/kallsyms.h>
 #include <linux/smp_lock.h>
 #include <linux/bootmem.h>
 #include <linux/acpi.h>
@@ -47,6 +52,7 @@
 #include <linux/kernel.h>
 #include <linux/smp.h>
 
+#include <asm/delay.h>
 #include <asm/machvec.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
@@ -139,7 +145,7 @@ ia64_mca_log_sal_error_record(int sal_info_type, int called_from_init)
 
        /* Get the MCA error record */
        if (!ia64_log_get(sal_info_type, (prfunc_t)printk))
-               return platform_err;                 // no record retrieved
+               return platform_err;            /* no record retrieved */
 
        /* TODO:
         * 1. analyze error logs to determine recoverability
@@ -166,7 +172,7 @@ mca_handler_platform (void)
 
 }
 
-void
+irqreturn_t
 ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
 {
        IA64_MCA_DEBUG("ia64_mca_cpe_int_handler: received interrupt. CPU:%d vector = %#x\n",
@@ -174,20 +180,190 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
 
        /* Get the CMC error record and log it */
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE, 0);
+       return IRQ_HANDLED;
+}
+
+static void
+show_min_state (pal_min_state_area_t *minstate)
+{
+       u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri;
+       u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri;
+
+       printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
+       printk("pr\t\t%016lx\n", minstate->pmsa_pr);
+       printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0);
+       printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
+       printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
+       printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
+       printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
+       printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
+       printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
+       printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
+       printk("b1\t\t%016lx ", minstate->pmsa_br1);
+       print_symbol("%s\n", minstate->pmsa_br1);
+
+       printk("\nstatic registers r0-r15:\n");
+       printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
+              0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]);
+       printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_gr[3], minstate->pmsa_gr[4],
+              minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
+       printk(" r8-11 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_gr[7], minstate->pmsa_gr[8],
+              minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
+       printk("r12-15 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_gr[11], minstate->pmsa_gr[12],
+              minstate->pmsa_gr[13], minstate->pmsa_gr[14]);
+
+       printk("\nbank 0:\n");
+       printk("r16-19 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
+              minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
+       printk("r20-23 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
+              minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
+       printk("r24-27 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
+              minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
+       printk("r28-31 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
+              minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);
+
+       printk("\nbank 1:\n");
+       printk("r16-19 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
+              minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
+       printk("r20-23 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
+              minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
+       printk("r24-27 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
+              minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
+       printk("r28-31 %016lx %016lx %016lx %016lx\n",
+              minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
+              minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
+}
+
+static void
+fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw)
+{
+       u64 *dst_banked, *src_banked, bit, shift, nat_bits;
+       int i;
+
+       /*
+        * First, update the pt-regs and switch-stack structures with the contents stored
+        * in the min-state area:
+        */
+       if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
+               pt->cr_ipsr = ms->pmsa_xpsr;
+               pt->cr_iip = ms->pmsa_xip;
+               pt->cr_ifs = ms->pmsa_xfs;
+       } else {
+               pt->cr_ipsr = ms->pmsa_ipsr;
+               pt->cr_iip = ms->pmsa_iip;
+               pt->cr_ifs = ms->pmsa_ifs;
+       }
+       pt->ar_rsc = ms->pmsa_rsc;
+       pt->pr = ms->pmsa_pr;
+       pt->r1 = ms->pmsa_gr[0];
+       pt->r2 = ms->pmsa_gr[1];
+       pt->r3 = ms->pmsa_gr[2];
+       sw->r4 = ms->pmsa_gr[3];
+       sw->r5 = ms->pmsa_gr[4];
+       sw->r6 = ms->pmsa_gr[5];
+       sw->r7 = ms->pmsa_gr[6];
+       pt->r8 = ms->pmsa_gr[7];
+       pt->r9 = ms->pmsa_gr[8];
+       pt->r10 = ms->pmsa_gr[9];
+       pt->r11 = ms->pmsa_gr[10];
+       pt->r12 = ms->pmsa_gr[11];
+       pt->r13 = ms->pmsa_gr[12];
+       pt->r14 = ms->pmsa_gr[13];
+       pt->r15 = ms->pmsa_gr[14];
+       dst_banked = &pt->r16;          /* r16-r31 are contiguous in struct pt_regs */
+       src_banked = ms->pmsa_bank1_gr;
+       for (i = 0; i < 16; ++i)
+               dst_banked[i] = src_banked[i];
+       pt->b0 = ms->pmsa_br0;
+       sw->b1 = ms->pmsa_br1;
+
+       /* construct the NaT bits for the pt-regs structure: */
+#      define PUT_NAT_BIT(dst, addr)                                   \
+       do {                                                            \
+               bit = nat_bits & 1; nat_bits >>= 1;                     \
+               shift = ((unsigned long) addr >> 3) & 0x3f;             \
+               dst = ((dst) & ~(1UL << shift)) | (bit << shift);       \
+       } while (0)
+
+       /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */
+       shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
+       nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift));
+
+       PUT_NAT_BIT(sw->caller_unat, &pt->r1);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r2);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r3);
+       PUT_NAT_BIT(sw->ar_unat, &sw->r4);
+       PUT_NAT_BIT(sw->ar_unat, &sw->r5);
+       PUT_NAT_BIT(sw->ar_unat, &sw->r6);
+       PUT_NAT_BIT(sw->ar_unat, &sw->r7);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r8);  PUT_NAT_BIT(sw->caller_unat, &pt->r9);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15);
+       nat_bits >>= 16;        /* skip over bank0 NaT bits */
+       PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29);
+       PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31);
 }
 
-/*
- * This routine will be used to deal with platform specific handling
- * of the init, i.e. drop into the kernel debugger on server machine,
- * or if the processor is part of some parallel machine without a
- * console, then we would call the appropriate debug hooks here.
- */
 void
-init_handler_platform (struct pt_regs *regs)
+init_handler_platform (sal_log_processor_info_t *proc_ptr,
+                      struct pt_regs *pt, struct switch_stack *sw)
 {
+       struct unw_frame_info info;
+
        /* if a kernel debugger is available call it here else just dump the registers */
 
-       show_regs(regs);                /* dump the state info */
+       /*
+        * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
+        * generated via the BMC's command-line interface, but since the console is on the
+        * same serial line, the user will need some time to switch out of the BMC before
+        * the dump begins.
+        */
+       printk("Delaying for 5 seconds...\n");
+       udelay(5*1000000);
+       show_min_state(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area);
+
+       printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
+       fetch_min_state(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area, pt, sw);
+       unw_init_from_interruption(&info, current, pt, sw);
+       ia64_do_show_stack(&info, NULL);
+
+#ifdef CONFIG_SMP
+       if (!tasklist_lock.write_lock)
+#endif
+               read_lock(&tasklist_lock);
+       {
+               struct task_struct *g, *t;
+               do_each_thread (g, t) {
+                       if (t == current)
+                               continue;
+
+                       printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
+                       show_stack(t);
+               } while_each_thread (g, t);
+       }
+#ifdef CONFIG_SMP
+       if (!tasklist_lock.write_lock)
+#endif
+               read_unlock(&tasklist_lock);
+
+       printk("\nINIT dump complete.  Please reboot now.\n");
        while (1);                      /* hang city if no debugger */
 }
 
@@ -263,7 +439,6 @@ ia64_mca_register_cpev (int cpev)
 /*
  * routine to process and prepare to dump min_state_save
  * information for debugging purposes.
- *
  */
 void
 ia64_process_min_state_save (pal_min_state_area_t *pmss)
@@ -272,8 +447,6 @@ ia64_process_min_state_save (pal_min_state_area_t *pmss)
        u64 *tpmss_ptr = (u64 *)pmss;
        u64 *return_min_state_ptr = ia64_mca_min_state_save_info;
 
-       /* dump out the min_state_area information */
-
        for (i=0;i<max;i++) {
 
                /* copy min-state register info for eventual return to PAL */
@@ -676,7 +849,7 @@ ia64_mca_wakeup_all(void)
  *  Inputs  :   None
  *  Outputs :   None
  */
-void
+irqreturn_t
 ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
 {
        unsigned long flags;
@@ -699,6 +872,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
 
        /* Enable all interrupts */
        local_irq_restore(flags);
+       return IRQ_HANDLED;
 }
 
 
@@ -717,10 +891,10 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
  *  Outputs :   None
  *
  */
-void
+irqreturn_t
 ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
 {
-
+       return IRQ_HANDLED;
 }
 
 /*
@@ -815,7 +989,7 @@ ia64_mca_ucmc_handler(void)
  * Outputs
  *     None
  */
-void
+irqreturn_t
 ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
 {
        static unsigned long    cmc_history[CMC_HISTORY_LENGTH];
@@ -832,7 +1006,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
        if (!cmc_polling_enabled) {
                int i, count = 1; /* we know 1 happened now */
                unsigned long now = jiffies;
-               
+
                for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
                        if (now - cmc_history[i] <= HZ)
                                count++;
@@ -867,12 +1041,12 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
                         * something is generating more than we can handle.
                         */
                        printk(KERN_WARNING "ia64_mca_cmc_int_handler: WARNING: Switching to polling CMC handler, error records may be lost\n");
-                       
+
 
                        mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
 
                        /* lock already released, get out now */
-                       return;
+                       return IRQ_HANDLED;
                } else {
                        cmc_history[index++] = now;
                        if (index == CMC_HISTORY_LENGTH)
@@ -880,6 +1054,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
                }
        }
        spin_unlock(&cmc_history_lock);
+       return IRQ_HANDLED;
 }
 
 /*
@@ -944,7 +1119,7 @@ ia64_mca_cmc_int_caller(void *dummy)
 static void
 ia64_mca_cmc_poll (unsigned long dummy)
 {
-       int start_count;
+       unsigned long start_count;
 
        start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
 
@@ -986,7 +1161,7 @@ ia64_mca_cpe_int_caller(void *dummy)
  *  ia64_mca_cpe_poll
  *
  *     Poll for Corrected Platform Errors (CPEs), dynamically adjust
- *     polling interval based on occurance of an event.
+ *     polling interval based on occurrence of an event.
  *
  * Inputs   :   dummy(unused)
  * Outputs  :   None
@@ -995,7 +1170,7 @@ ia64_mca_cpe_int_caller(void *dummy)
 static void
 ia64_mca_cpe_poll (unsigned long dummy)
 {
-       int start_count;
+       unsigned long start_count;
        static int poll_time = MAX_CPE_POLL_INTERVAL;
 
        start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
@@ -1062,7 +1237,7 @@ device_initcall(ia64_mca_late_init);
  *
  */
 void
-ia64_init_handler (struct pt_regs *regs)
+ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
 {
        sal_log_processor_info_t *proc_ptr;
        ia64_err_rec_t *plog_ptr;
@@ -1089,7 +1264,7 @@ ia64_init_handler (struct pt_regs *regs)
        /* Clear the INIT SAL logs now that they have been saved in the OS buffer */
        ia64_sal_clear_state_info(SAL_INFO_TYPE_INIT);
 
-       init_handler_platform(regs);              /* call platform specific routines */
+       init_handler_platform(proc_ptr, pt, sw);        /* call platform specific routines */
 }
 
 /*
@@ -1112,7 +1287,8 @@ ia64_log_prt_guid (efi_guid_t *p_guid, prfunc_t prfunc)
 static void
 ia64_log_hexdump(unsigned char *p, unsigned long n_ch, prfunc_t prfunc)
 {
-       int i, j;
+       unsigned long i;
+       int j;
 
        if (!p)
                return;
@@ -1960,7 +2136,7 @@ ia64_log_processor_info_print(sal_log_record_header_t *lh, prfunc_t prfunc)
 {
        sal_log_section_hdr_t       *slsh;
        int                         n_sects;
-       int                         ercd_pos;
+       u32                         ercd_pos;
 
        if (!lh)
                return;
@@ -2022,7 +2198,7 @@ ia64_log_platform_info_print (sal_log_record_header_t *lh, prfunc_t prfunc)
 {
        sal_log_section_hdr_t   *slsh;
        int                     n_sects;
-       int                     ercd_pos;
+       u32                     ercd_pos;
        int                     platform_err = 0;
 
        if (!lh)
@@ -2139,7 +2315,8 @@ ia64_log_print(int sal_info_type, prfunc_t prfunc)
        switch(sal_info_type) {
              case SAL_INFO_TYPE_MCA:
                prfunc("+BEGIN HARDWARE ERROR STATE AT MCA\n");
-               platform_err = ia64_log_platform_info_print(IA64_LOG_CURR_BUFFER(sal_info_type), prfunc);
+               platform_err = ia64_log_platform_info_print(IA64_LOG_CURR_BUFFER(sal_info_type),
+                                                           prfunc);
                prfunc("+END HARDWARE ERROR STATE AT MCA\n");
                break;
              case SAL_INFO_TYPE_INIT:
index 8565c0c..f4ccd0c 100644 (file)
@@ -766,8 +766,6 @@ GLOBAL_ENTRY(ia64_monarch_init_handler)
        // stash the information the SAL passed to os
        SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
        ;;
-
-// now we want to save information so we can dump registers
        SAVE_MIN_WITH_COVER
        ;;
        mov r8=cr.ifa
@@ -798,10 +796,12 @@ IVirtual_Switch:
        //
        // Let's call the C handler to get the rest of the state info
        //
-       alloc r14=ar.pfs,0,0,1,0                // now it's safe (must be first in insn group!)
-       ;;                                      //
+       alloc r14=ar.pfs,0,0,2,0                // now it's safe (must be first in insn group!)
+       ;;
        adds out0=16,sp                         // out0 = pointer to pt_regs
        ;;
+       DO_SAVE_SWITCH_STACK
+       adds out1=16,sp                         // out0 = pointer to switch_stack
 
        br.call.sptk.many rp=ia64_init_handler
 .ret1:
index 8feb33e..27123bb 100644 (file)
@@ -91,7 +91,7 @@ static const char *rse_hints[]={
        "eager loads and stores"
 };
 
-#define RSE_HINTS_COUNT (sizeof(rse_hints)/sizeof(const char *))
+#define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
 
 static const char *mem_attrib[]={
        "WB",           /* 000 */
@@ -192,15 +192,15 @@ power_info(char *page)
 
        for (i=0; i < 8 ; i++ ) {
                if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
-                       p += sprintf(p, "Power level %d:\n" \
-                                       "\tentry_latency       : %d cycles\n" \
-                                       "\texit_latency        : %d cycles\n" \
-                                       "\tpower consumption   : %d mW\n" \
-                                       "\tCache+TLB coherency : %s\n", i,
-                               halt_info[i].pal_power_mgmt_info_s.entry_latency,
-                               halt_info[i].pal_power_mgmt_info_s.exit_latency,
-                               halt_info[i].pal_power_mgmt_info_s.power_consumption,
-                               halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
+                       p += sprintf(p, "Power level %d:\n"
+                                    "\tentry_latency       : %d cycles\n"
+                                    "\texit_latency        : %d cycles\n"
+                                    "\tpower consumption   : %d mW\n"
+                                    "\tCache+TLB coherency : %s\n", i,
+                                    halt_info[i].pal_power_mgmt_info_s.entry_latency,
+                                    halt_info[i].pal_power_mgmt_info_s.exit_latency,
+                                    halt_info[i].pal_power_mgmt_info_s.power_consumption,
+                                    halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
                } else {
                        p += sprintf(p,"Power level %d: not implemented\n",i);
                }
@@ -212,9 +212,9 @@ static int
 cache_info(char *page)
 {
        char *p = page;
-       u64 levels, unique_caches;
+       u64 i, levels, unique_caches;
        pal_cache_config_info_t cci;
-       int i,j, k;
+       int j, k;
        s64 status;
 
        if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
@@ -232,48 +232,50 @@ cache_info(char *page)
                        if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) {
                                continue;
                        }
-                       p += sprintf(p, "%s Cache level %d:\n" \
-                                       "\tSize           : %ld bytes\n" \
-                                       "\tAttributes     : ",
-                                       cache_types[j+cci.pcci_unified], i+1,
-                                       cci.pcci_cache_size);
+                       p += sprintf(p,
+                                    "%s Cache level %lu:\n"
+                                    "\tSize           : %lu bytes\n"
+                                    "\tAttributes     : ",
+                                    cache_types[j+cci.pcci_unified], i+1,
+                                    cci.pcci_cache_size);
 
                        if (cci.pcci_unified) p += sprintf(p, "Unified ");
 
                        p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
 
-                       p += sprintf(p, "\tAssociativity  : %d\n" \
-                                       "\tLine size      : %d bytes\n" \
-                                       "\tStride         : %d bytes\n",
-                                       cci.pcci_assoc,
-                                       1<<cci.pcci_line_size,
-                                       1<<cci.pcci_stride);
+                       p += sprintf(p,
+                                    "\tAssociativity  : %d\n"
+                                    "\tLine size      : %d bytes\n"
+                                    "\tStride         : %d bytes\n",
+                                    cci.pcci_assoc, 1<<cci.pcci_line_size, 1<<cci.pcci_stride);
                        if (j == 1)
                                p += sprintf(p, "\tStore latency  : N/A\n");
                        else
                                p += sprintf(p, "\tStore latency  : %d cycle(s)\n",
                                                cci.pcci_st_latency);
 
-                       p += sprintf(p, "\tLoad latency   : %d cycle(s)\n" \
-                                       "\tStore hints    : ",
-                                       cci.pcci_ld_latency);
+                       p += sprintf(p,
+                                    "\tLoad latency   : %d cycle(s)\n"
+                                    "\tStore hints    : ", cci.pcci_ld_latency);
 
                        for(k=0; k < 8; k++ ) {
-                               if ( cci.pcci_st_hints & 0x1) p += sprintf(p, "[%s]", cache_st_hints[k]);
+                               if ( cci.pcci_st_hints & 0x1)
+                                       p += sprintf(p, "[%s]", cache_st_hints[k]);
                                cci.pcci_st_hints >>=1;
                        }
                        p += sprintf(p, "\n\tLoad hints     : ");
 
                        for(k=0; k < 8; k++ ) {
-                               if ( cci.pcci_ld_hints & 0x1) p += sprintf(p, "[%s]", cache_ld_hints[k]);
+                               if (cci.pcci_ld_hints & 0x1)
+                                       p += sprintf(p, "[%s]", cache_ld_hints[k]);
                                cci.pcci_ld_hints >>=1;
                        }
-                       p += sprintf(p, "\n\tAlias boundary : %d byte(s)\n" \
-                                       "\tTag LSB        : %d\n" \
-                                       "\tTag MSB        : %d\n",
-                                       1<<cci.pcci_alias_boundary,
-                                       cci.pcci_tag_lsb,
-                                       cci.pcci_tag_msb);
+                       p += sprintf(p,
+                                    "\n\tAlias boundary : %d byte(s)\n"
+                                    "\tTag LSB        : %d\n"
+                                    "\tTag MSB        : %d\n",
+                                    1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
+                                    cci.pcci_tag_msb);
 
                        /* when unified, data(j=2) is enough */
                        if (cci.pcci_unified) break;
@@ -303,20 +305,20 @@ vm_info(char *page)
        }
 
 
-       p += sprintf(p, "Physical Address Space         : %d bits\n" \
-                       "Virtual Address Space          : %d bits\n" \
-                       "Protection Key Registers(PKR)  : %d\n" \
-                       "Implemented bits in PKR.key    : %d\n" \
-                       "Hash Tag ID                    : 0x%x\n" \
-                       "Size of RR.rid                 : %d\n",
-                       vm_info_1.pal_vm_info_1_s.phys_add_size,
-                       vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
-                       vm_info_1.pal_vm_info_1_s.max_pkr+1,
-                       vm_info_1.pal_vm_info_1_s.key_size,
-                       vm_info_1.pal_vm_info_1_s.hash_tag_id,
-                       vm_info_2.pal_vm_info_2_s.rid_size);
+       p += sprintf(p,
+                    "Physical Address Space         : %d bits\n"
+                    "Virtual Address Space          : %d bits\n"
+                    "Protection Key Registers(PKR)  : %d\n"
+                    "Implemented bits in PKR.key    : %d\n"
+                    "Hash Tag ID                    : 0x%x\n"
+                    "Size of RR.rid                 : %d\n",
+                    vm_info_1.pal_vm_info_1_s.phys_add_size,
+                    vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1,
+                    vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id,
+                    vm_info_2.pal_vm_info_2_s.rid_size);
 
-       if (ia64_pal_mem_attrib(&attrib) != 0) return 0;
+       if (ia64_pal_mem_attrib(&attrib) != 0)
+               return 0;
 
        p += sprintf(p, "Supported memory attributes    : ");
        sep = "";
@@ -333,13 +335,14 @@ vm_info(char *page)
                return 0;
        }
 
-       p += sprintf(p, "\nTLB walker                     : %simplemented\n" \
-                       "Number of DTR                  : %d\n" \
-                       "Number of ITR                  : %d\n" \
-                       "TLB insertable page sizes      : ",
-                       vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
-                       vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
-                       vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
+       p += sprintf(p,
+                    "\nTLB walker                     : %simplemented\n"
+                    "Number of DTR                  : %d\n"
+                    "Number of ITR                  : %d\n"
+                    "TLB insertable page sizes      : ",
+                    vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
+                    vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
+                    vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
 
 
        p = bitvector_process(p, tr_pages);
@@ -353,21 +356,19 @@ vm_info(char *page)
                return 0;
        }
 
-       p += sprintf(p, "\nPurge base address             : 0x%016lx\n" \
-                       "Purge outer loop count         : %d\n" \
-                       "Purge inner loop count         : %d\n" \
-                       "Purge outer loop stride        : %d\n" \
-                       "Purge inner loop stride        : %d\n",
-                       ptce.base,
-                       ptce.count[0],
-                       ptce.count[1],
-                       ptce.stride[0],
-                       ptce.stride[1]);
-
-       p += sprintf(p, "TC Levels                      : %d\n" \
-                       "Unique TC(s)                   : %d\n",
-                       vm_info_1.pal_vm_info_1_s.num_tc_levels,
-                       vm_info_1.pal_vm_info_1_s.max_unique_tcs);
+       p += sprintf(p,
+                    "\nPurge base address             : 0x%016lx\n"
+                    "Purge outer loop count         : %d\n"
+                    "Purge inner loop count         : %d\n"
+                    "Purge outer loop stride        : %d\n"
+                    "Purge inner loop stride        : %d\n",
+                    ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]);
+
+       p += sprintf(p,
+                    "TC Levels                      : %d\n"
+                    "Unique TC(s)                   : %d\n",
+                    vm_info_1.pal_vm_info_1_s.num_tc_levels,
+                    vm_info_1.pal_vm_info_1_s.max_unique_tcs);
 
        for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
                for (j=2; j>0 ; j--) {
@@ -379,15 +380,14 @@ vm_info(char *page)
                                continue;
                        }
 
-                       p += sprintf(p, "\n%s Translation Cache Level %d:\n" \
-                                       "\tHash sets           : %d\n" \
-                                       "\tAssociativity       : %d\n" \
-                                       "\tNumber of entries   : %d\n" \
-                                       "\tFlags               : ",
-                                       cache_types[j+tc_info.tc_unified], i+1,
-                                       tc_info.tc_num_sets,
-                                       tc_info.tc_associativity,
-                                       tc_info.tc_num_entries);
+                       p += sprintf(p,
+                                    "\n%s Translation Cache Level %d:\n"
+                                    "\tHash sets           : %d\n"
+                                    "\tAssociativity       : %d\n"
+                                    "\tNumber of entries   : %d\n"
+                                    "\tFlags               : ",
+                                    cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets,
+                                    tc_info.tc_associativity, tc_info.tc_num_entries);
 
                        if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized ");
                        if (tc_info.tc_unified) p += sprintf(p, "Unified ");
@@ -436,17 +436,18 @@ register_info(char *page)
 
        if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0;
 
-       p += sprintf(p, "RSE stacked physical registers   : %ld\n" \
-                       "RSE load/store hints             : %ld (%s)\n",
-                       phys_stacked,
-                       hints.ph_data,
-                       hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(\?\?)");
+       p += sprintf(p,
+                    "RSE stacked physical registers   : %ld\n"
+                    "RSE load/store hints             : %ld (%s)\n",
+                    phys_stacked, hints.ph_data,
+                    hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(\?\?)");
 
-       if (ia64_pal_debug_info(&iregs, &dregs)) return 0;
+       if (ia64_pal_debug_info(&iregs, &dregs))
+               return 0;
 
-       p += sprintf(p, "Instruction debug register pairs : %ld\n" \
-                       "Data debug register pairs        : %ld\n",
-                       iregs, dregs);
+       p += sprintf(p,
+                    "Instruction debug register pairs : %ld\n"
+                    "Data debug register pairs        : %ld\n", iregs, dregs);
 
        return p - page;
 }
@@ -563,26 +564,21 @@ version_info(char *page)
         */
        if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0;
 
-       p += sprintf(p, "PAL_vendor : 0x%02x (min=0x%02x)\n" \
-                       "PAL_A      : %x.%x.%x (min=%x.%x.%x)\n" \
-                       "PAL_B      : %x.%x.%x (min=%x.%x.%x)\n",
-                       cur_ver.pal_version_s.pv_pal_vendor,
-                       min_ver.pal_version_s.pv_pal_vendor,
-
-                       cur_ver.pal_version_s.pv_pal_a_model>>4,
-                       cur_ver.pal_version_s.pv_pal_a_model&0xf,
-                       cur_ver.pal_version_s.pv_pal_a_rev,
-                       min_ver.pal_version_s.pv_pal_a_model>>4,
-                       min_ver.pal_version_s.pv_pal_a_model&0xf,
-                       min_ver.pal_version_s.pv_pal_a_rev,
-
-                       cur_ver.pal_version_s.pv_pal_b_model>>4,
-                       cur_ver.pal_version_s.pv_pal_b_model&0xf,
-                       cur_ver.pal_version_s.pv_pal_b_rev,
-                       min_ver.pal_version_s.pv_pal_b_model>>4,
-                       min_ver.pal_version_s.pv_pal_b_model&0xf,
-                       min_ver.pal_version_s.pv_pal_b_rev);
-
+       p += sprintf(p,
+                    "PAL_vendor : 0x%02x (min=0x%02x)\n"
+                    "PAL_A      : %x.%x.%x (min=%x.%x.%x)\n"
+                    "PAL_B      : %x.%x.%x (min=%x.%x.%x)\n",
+                    cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor,
+
+                    cur_ver.pal_version_s.pv_pal_a_model>>4,
+                    cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev,
+                    min_ver.pal_version_s.pv_pal_a_model>>4,
+                    min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev,
+
+                    cur_ver.pal_version_s.pv_pal_b_model>>4,
+                    cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev,
+                    min_ver.pal_version_s.pv_pal_b_model>>4,
+                    min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev);
        return p - page;
 }
 
@@ -595,26 +591,20 @@ perfmon_info(char *page)
 
        if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0;
 
-       p += sprintf(p, "PMC/PMD pairs                 : %d\n" \
-                       "Counter width                 : %d bits\n" \
-                       "Cycle event number            : %d\n" \
-                       "Retired event number          : %d\n" \
-                       "Implemented PMC               : ",
-                       pm_info.pal_perf_mon_info_s.generic,
-                       pm_info.pal_perf_mon_info_s.width,
-                       pm_info.pal_perf_mon_info_s.cycles,
-                       pm_info.pal_perf_mon_info_s.retired);
+       p += sprintf(p,
+                    "PMC/PMD pairs                 : %d\n"
+                    "Counter width                 : %d bits\n"
+                    "Cycle event number            : %d\n"
+                    "Retired event number          : %d\n"
+                    "Implemented PMC               : ",
+                    pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.width,
+                    pm_info.pal_perf_mon_info_s.cycles, pm_info.pal_perf_mon_info_s.retired);
 
        p = bitregister_process(p, pm_buffer, 256);
-
        p += sprintf(p, "\nImplemented PMD               : ");
-
        p = bitregister_process(p, pm_buffer+4, 256);
-
        p += sprintf(p, "\nCycles count capable          : ");
-
        p = bitregister_process(p, pm_buffer+8, 256);
-
        p += sprintf(p, "\nRetired bundles count capable : ");
 
 #ifdef CONFIG_ITANIUM
@@ -646,12 +636,11 @@ frequency_info(char *page)
 
        if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
 
-       p += sprintf(p, "Processor/Clock ratio   : %ld/%ld\n" \
-                       "Bus/Clock ratio         : %ld/%ld\n" \
-                       "ITC/Clock ratio         : %ld/%ld\n",
-                       proc.num, proc.den,
-                       bus.num, bus.den,
-                       itc.num, itc.den);
+       p += sprintf(p,
+                    "Processor/Clock ratio   : %ld/%ld\n"
+                    "Bus/Clock ratio         : %ld/%ld\n"
+                    "ITC/Clock ratio         : %ld/%ld\n",
+                    proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
 
        return p - page;
 }
@@ -665,7 +654,7 @@ tr_info(char *page)
        u64 tr_buffer[4];
        pal_vm_info_1_u_t vm_info_1;
        pal_vm_info_2_u_t vm_info_2;
-       int i, j;
+       u64 i, j;
        u64 max[3], pgm;
        struct ifa_reg {
                u64 valid:1;
@@ -711,7 +700,7 @@ tr_info(char *page)
 
                status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
                if (status != 0) {
-                       printk(KERN_ERR "palinfo: pal call failed on tr[%d:%d]=%ld\n",
+                       printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
                               i, j, status);
                        continue;
                }
@@ -725,34 +714,29 @@ tr_info(char *page)
                rid_reg  = (struct rid_reg *)&tr_buffer[3];
 
                pgm      = -1 << (itir_reg->ps - 12);
-               p += sprintf(p, "%cTR%d: av=%d pv=%d dv=%d mv=%d\n" \
-                               "\tppn  : 0x%lx\n" \
-                               "\tvpn  : 0x%lx\n" \
-                               "\tps   : ",
-
-                               "ID"[i],
-                               j,
-                               tr_valid.pal_tr_valid_s.access_rights_valid,
-                               tr_valid.pal_tr_valid_s.priv_level_valid,
-                               tr_valid.pal_tr_valid_s.dirty_bit_valid,
-                               tr_valid.pal_tr_valid_s.mem_attr_valid,
-                               (gr_reg->ppn & pgm)<< 12,
-                               (ifa_reg->vpn & pgm)<< 12);
+               p += sprintf(p,
+                            "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
+                            "\tppn  : 0x%lx\n"
+                            "\tvpn  : 0x%lx\n"
+                            "\tps   : ",
+                            "ID"[i], j,
+                            tr_valid.pal_tr_valid_s.access_rights_valid,
+                            tr_valid.pal_tr_valid_s.priv_level_valid,
+                            tr_valid.pal_tr_valid_s.dirty_bit_valid,
+                            tr_valid.pal_tr_valid_s.mem_attr_valid,
+                            (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
 
                p = bitvector_process(p, 1<< itir_reg->ps);
 
-               p += sprintf(p, "\n\tpl   : %d\n" \
-                               "\tar   : %d\n" \
-                               "\trid  : %x\n" \
-                               "\tp    : %d\n" \
-                               "\tma   : %d\n" \
-                               "\td    : %d\n",
-                               gr_reg->pl,
-                               gr_reg->ar,
-                               rid_reg->rid,
-                               gr_reg->p,
-                               gr_reg->ma,
-                               gr_reg->d);
+               p += sprintf(p,
+                            "\n\tpl   : %d\n"
+                            "\tar   : %d\n"
+                            "\trid  : %x\n"
+                            "\tp    : %d\n"
+                            "\tma   : %d\n"
+                            "\td    : %d\n",
+                            gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
+                            gr_reg->d);
                }
        }
        return p - page;
@@ -776,7 +760,7 @@ static palinfo_entry_t palinfo_entries[]={
        { "tr_info",            tr_info, }
 };
 
-#define NR_PALINFO_ENTRIES     (sizeof(palinfo_entries)/sizeof(palinfo_entry_t))
+#define NR_PALINFO_ENTRIES     (int) ARRAY_SIZE(palinfo_entries)
 
 /*
  * this array is used to keep track of the proc entries we create. This is
index 7134dfd..fd0f90a 100644 (file)
@@ -2,7 +2,7 @@
  * This file implements the perfmon subsystem which is used
  * to program the IA-64 Performance Monitoring Unit (PMU).
  *
- * Originaly Written by Ganesh Venkitachalam, IBM Corp.
+ * Originally Written by Ganesh Venkitachalam, IBM Corp.
  * Copyright (C) 1999 Ganesh Venkitachalam <venkitac@us.ibm.com>
  *
  * Modifications by Stephane Eranian, Hewlett-Packard Co.
@@ -224,8 +224,9 @@ typedef struct {
        unsigned int protected:1;       /* allow access to creator of context only */
        unsigned int using_dbreg:1;     /* using range restrictions (debug registers) */
        unsigned int excl_idle:1;       /* exclude idle task in system wide session */
+       unsigned int unsecure:1;        /* sp = 0 for non self-monitored task */
        unsigned int trap_reason:2;     /* reason for going into pfm_block_ovfl_reset() */
-       unsigned int reserved:21;
+       unsigned int reserved:20;
 } pfm_context_flags_t;
 
 #define PFM_TRAP_REASON_NONE           0x0     /* default value */
@@ -278,6 +279,7 @@ typedef struct pfm_context {
 #define ctx_fl_using_dbreg     ctx_flags.using_dbreg
 #define ctx_fl_excl_idle       ctx_flags.excl_idle
 #define ctx_fl_trap_reason     ctx_flags.trap_reason
+#define ctx_fl_unsecure                ctx_flags.unsecure
 
 /*
  * global information about all sessions
@@ -362,8 +364,9 @@ typedef struct {
 
 #define PFM_CMD_IDX(cmd)       (cmd)
 
-#define PFM_CMD_IS_VALID(cmd)  ((PFM_CMD_IDX(cmd) >= 0) && (PFM_CMD_IDX(cmd) < PFM_CMD_COUNT) \
-                                 && pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_func != NULL)
+#define PFM_CMD_IS_VALID(cmd)  ((PFM_CMD_IDX(cmd) >= 0)                                \
+                                && (PFM_CMD_IDX(cmd) < (int) PFM_CMD_COUNT)            \
+                                && pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_func != NULL)
 
 #define PFM_CMD_USE_PID(cmd)   ((pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_PID) != 0)
 #define PFM_CMD_READ_ARG(cmd)  ((pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_ARG_READ) != 0)
@@ -646,7 +649,7 @@ pfm_vm_close(struct vm_area_struct *vma)
 
 /*
  * This function is called from pfm_destroy_context() and also from pfm_inherit()
- * to explicitely remove the sampling buffer mapping from the user level address space.
+ * to explicitly remove the sampling buffer mapping from the user level address space.
  */
 static int
 pfm_remove_smpl_mapping(struct task_struct *task)
@@ -724,8 +727,7 @@ pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long ad
 static unsigned long
 pfm_smpl_entry_size(unsigned long *which, unsigned long size)
 {
-       unsigned long res = 0;
-       int i;
+       unsigned long i, res = 0;
 
        for (i=0; i < size; i++, which++) res += hweight64(*which);
 
@@ -1076,10 +1078,15 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
                 * and it must be a valid CPU
                 */
                cpu = ffz(~pfx->ctx_cpu_mask);
+#ifdef CONFIG_SMP
                if (cpu_online(cpu) == 0) {
+#else
+               if (cpu != 0) {
+#endif
                        DBprintk(("CPU%d is not online\n", cpu));
                        return -EINVAL;
                }
+
                /*
                 * check for pre-existing pinning, if conflicting reject
                 */
@@ -1225,6 +1232,7 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int
        ctx->ctx_fl_block     = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
        ctx->ctx_fl_system    = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
        ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
+       ctx->ctx_fl_unsecure  = (ctx_flags & PFM_FL_UNSECURE) ? 1: 0;
        ctx->ctx_fl_frozen    = 0;
        ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
 
@@ -1251,9 +1259,11 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int
        DBprintk(("context=%p, pid=%d notify_task=%p\n",
                        (void *)ctx, task->pid, ctx->ctx_notify_task));
 
-       DBprintk(("context=%p, pid=%d flags=0x%x inherit=%d block=%d system=%d excl_idle=%d\n", 
+       DBprintk(("context=%p, pid=%d flags=0x%x inherit=%d block=%d system=%d excl_idle=%d unsecure=%d\n", 
                        (void *)ctx, task->pid, ctx_flags, ctx->ctx_fl_inherit, 
-                       ctx->ctx_fl_block, ctx->ctx_fl_system, ctx->ctx_fl_excl_idle));
+                       ctx->ctx_fl_block, ctx->ctx_fl_system, 
+                       ctx->ctx_fl_excl_idle,
+                       ctx->ctx_fl_unsecure));
 
        /*
         * when no notification is required, we can make this visible at the last moment
@@ -1659,7 +1669,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
                if (!PMD_IS_IMPL(cnum)) goto abort_mission;
                /*
                 * we can only read the register that we use. That includes
-                * the one we explicitely initialize AND the one we want included
+                * the one we explicitly initialize AND the one we want included
                 * in the sampling buffer (smpl_regs).
                 *
                 * Having this restriction allows optimization in the ctxsw routine
@@ -1871,7 +1881,7 @@ pfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
         * if blocking, then post the semaphore.
         * if non-blocking, then we ensure that the task will go into
         * pfm_overflow_must_block() before returning to user mode. 
-        * We cannot explicitely reset another task, it MUST always
+        * We cannot explicitly reset another task, it MUST always
         * be done by the task itself. This works for system wide because
         * the tool that is controlling the session is doing "self-monitoring".
         *
@@ -1882,7 +1892,10 @@ pfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
                DBprintk(("unblocking %d \n", task->pid));
                up(sem);
        } else {
+               struct thread_info *info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
                task->thread.pfm_ovfl_block_reset = 1;
+               ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
+               set_bit(TIF_NOTIFY_RESUME, &info->flags);
        }
 #if 0
        /*
@@ -2051,7 +2064,7 @@ pfm_protect_context(struct task_struct *task, pfm_context_t *ctx, void *arg, int
        /*
         * reinforce secure monitoring: cannot toggle psr.up
         */
-       ia64_psr(regs)->sp = 1;
+       if (ctx->ctx_fl_unsecure == 0) ia64_psr(regs)->sp = 1;
 
        return 0;
 }
@@ -2159,11 +2172,11 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru
                 * never leaves the current CPU and the state
                 * is shared by all processes running on it
                 */
-               for (i=0; i < pmu_conf.num_ibrs; i++) {
+               for (i=0; i < (int) pmu_conf.num_ibrs; i++) {
                        ia64_set_ibr(i, 0UL);
                }
                ia64_srlz_i();
-               for (i=0; i < pmu_conf.num_dbrs; i++) {
+               for (i=0; i < (int) pmu_conf.num_dbrs; i++) {
                        ia64_set_dbr(i, 0UL);
                }
                ia64_srlz_d();
@@ -2505,7 +2518,7 @@ static pfm_cmd_desc_t pfm_cmd_tab[]={
 /* 33 */{ pfm_write_dbrs, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, sizeof(pfarg_dbreg_t)}
 #endif
 };
-#define PFM_CMD_COUNT  (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
+#define PFM_CMD_COUNT  ARRAY_SIZE(pfm_cmd_tab)
 
 static int
 check_task_state(struct task_struct *task)
@@ -2732,12 +2745,13 @@ pfm_ovfl_block_reset(void)
         * again
         */
        th->pfm_ovfl_block_reset = 0;
+       clear_thread_flag(TIF_NOTIFY_RESUME);
 
        /*
         * do some sanity checks first
         */
        if (!ctx) {
-               printk(KERN_DEBUG "perfmon: [%d] has no PFM context\n", current->pid);
+               printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid);
                return;
        }
        /*
@@ -2899,15 +2913,18 @@ pfm_record_sample(struct task_struct *task, pfm_context_t *ctx, unsigned long ov
 
 /*
  * main overflow processing routine.
- * it can be called from the interrupt path or explicitely during the context switch code
+ * it can be called from the interrupt path or explicitly during the context switch code
+ * Arguments:
+ *     mode: 0=coming from PMU interrupt, 1=coming from ctxsw 
+ *     
  * Return:
  *     new value of pmc[0]. if 0x0 then unfreeze, else keep frozen
  */
 static unsigned long
-pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
+pfm_overflow_handler(int mode, struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
 {
-       unsigned long mask;
        struct thread_struct *t;
+       unsigned long mask;
        unsigned long old_val;
        unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL;
        int i;
@@ -2998,10 +3015,10 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
        /*
         * check for sampling buffer
         *
-        * if present, record sample. We propagate notification ONLY when buffer
-        * becomes full.
+        * if present, record sample only when a 64-bit counter has overflowed.
+        * We propagate notification ONLY when buffer becomes full.
         */
-       if(CTX_HAS_SMPL(ctx)) {
+       if(CTX_HAS_SMPL(ctx) && ovfl_pmds) {
                ret = pfm_record_sample(task, ctx, ovfl_pmds, regs);
                if (ret == 1) {
                        /*
@@ -3046,12 +3063,55 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
         * ctx_notify_task could already be NULL, checked in pfm_notify_user() 
         */
        if (CTX_OVFL_NOBLOCK(ctx) == 0 && ctx->ctx_notify_task != task) {
-               t->pfm_ovfl_block_reset = 1; /* will cause blocking */
                ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCKSIG;
        } else {
-               t->pfm_ovfl_block_reset = 1; /* will cause blocking */
                ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_SIG;
        }
+       /*
+        * we cannot block in system wide mode and we do not go
+        * through the PMU ctxsw code. Therefore we can generate
+        * the notification here. In system wide mode, the current
+        * task maybe different from the task controlling the session
+        * on this CPU, therefore owner can be different from current.
+        *
+        * In per-process mode, this function gets called from 
+        * the interrupt handler or pfm_load_regs(). The mode argument
+        * tells where we are coming from. When coming from the interrupt
+        * handler, it is safe to notify (send signal) right here because
+        * we do not hold any runqueue locks needed by send_sig_info(). 
+        *
+        * However when coming from ctxsw, we cannot send the signal here.
+        * It must be deferred until we are sure we do not hold any runqueue
+        * related locks. The current task maybe different from the owner
+        * only in UP mode. The deferral is implemented using the 
+        * TIF_NOTIFY_RESUME mechanism. In this case, the pending work
+        * is checked when the task is about to leave the kernel (see
+        * entry.S). As of this version of perfmon, a kernel only
+        * task cannot be monitored in per-process mode. Therefore,
+        * when this function gets called from pfm_load_regs(), we know
+        * we have a user level task which will eventually either exit
+        * or leave the kernel, and thereby go through the checkpoint
+        * for TIF_*.
+        */
+       if (ctx->ctx_fl_system || mode == 0) {
+               pfm_notify_user(ctx);
+               ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
+       } else {
+               struct thread_info *info;
+
+               /*
+                * given that TIF_NOTIFY_RESUME is not specific to
+                * perfmon, we need to have a second level check to
+                * verify the source of the notification.
+                */
+               task->thread.pfm_ovfl_block_reset = 1;
+               /*
+                * when coming from ctxsw, current still points to the
+                * previous task, therefore we must work with task and not current.
+                */
+               info = ((struct thread_info *) ((char *) task + IA64_TASK_SIZE));
+               set_bit(TIF_NOTIFY_RESUME, &info->flags);
+       }
 
        /*
         * keep the PMU frozen until either pfm_restart() or 
@@ -3059,7 +3119,10 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
         */
        ctx->ctx_fl_frozen = 1;
 
-       DBprintk_ovfl(("return pmc0=0x%x must_block=%ld reason=%d\n",
+       DBprintk_ovfl(("current [%d] owner [%d] mode=%d return pmc0=0x%x must_block=%ld reason=%d\n",
+               current->pid, 
+               PMU_OWNER() ? PMU_OWNER()->pid : -1,
+               mode,
                ctx->ctx_fl_frozen ? 0x1 : 0x0, 
                t->pfm_ovfl_block_reset,
                ctx->ctx_fl_trap_reason));
@@ -3068,7 +3131,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
        return 0x1UL;
 }
 
-static void
+static irqreturn_t
 pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
 {
        u64 pmc0;
@@ -3083,7 +3146,7 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
        if (pfm_alternate_intr_handler) {
                (*pfm_alternate_intr_handler->handler)(irq, arg, regs);
                put_cpu();
-               return;
+               return IRQ_HANDLED;
        }
 
        /* 
@@ -3108,19 +3171,21 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
                        printk(KERN_DEBUG "perfmon: Spurious overflow interrupt: process %d has "
                               "no PFM context\n", task->pid);
                        put_cpu();
-                       return;
+                       return IRQ_HANDLED;
                }
 
                /* 
                 * assume PMC[0].fr = 1 at this point 
                 */
-               pmc0 = pfm_overflow_handler(task, ctx, pmc0, regs);
+               pmc0 = pfm_overflow_handler(0, task, ctx, pmc0, regs);
                /*
                 * we can only update pmc0 when the overflow
-                * is for the current context. In UP the current
-                * task may not be the one owning the PMU
+                * is for the current context or we are in system
+                * wide mode. In UP (per-task) the current
+                * task may not be the one owning the PMU,
+                * same thing for system-wide.
                 */
-               if (task == current) {
+               if (task == current || ctx->ctx_fl_system) {
                        /*
                         * We always clear the overflow status bits and either unfreeze
                         * or keep the PMU frozen.
@@ -3134,6 +3199,7 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
                pfm_stats[smp_processor_id()].pfm_spurious_ovfl_intr_count++;
        }
        put_cpu_no_resched();
+       return IRQ_HANDLED;
 }
 
 /* for debug only */
@@ -3387,11 +3453,11 @@ pfm_load_regs (struct task_struct *task)
         * in the next version of perfmon.
         */
        if (ctx->ctx_fl_using_dbreg) {
-               for (i=0; i < pmu_conf.num_ibrs; i++) {
+               for (i=0; i < (int) pmu_conf.num_ibrs; i++) {
                        ia64_set_ibr(i, t->ibr[i]);
                }
                ia64_srlz_i();
-               for (i=0; i < pmu_conf.num_dbrs; i++) {
+               for (i=0; i < (int) pmu_conf.num_dbrs; i++) {
                        ia64_set_dbr(i, t->dbr[i]);
                }
                ia64_srlz_d();
@@ -3402,7 +3468,7 @@ pfm_load_regs (struct task_struct *task)
         * this path cannot be used in SMP
         */
        if (owner == task) {
-               if (atomic_read(&ctx->ctx_last_cpu) != smp_processor_id())
+               if ((unsigned int) atomic_read(&ctx->ctx_last_cpu) != smp_processor_id())
                        DBprintk(("invalid last_cpu=%d for [%d]\n", 
                                atomic_read(&ctx->ctx_last_cpu), task->pid));
 
@@ -3454,7 +3520,7 @@ pfm_load_regs (struct task_struct *task)
         * Side effect on ctx_fl_frozen is possible.
         */
        if (t->pmc[0] & ~0x1) {
-               t->pmc[0] = pfm_overflow_handler(task, ctx, t->pmc[0], NULL);
+               t->pmc[0] = pfm_overflow_handler(1, task, ctx, t->pmc[0], NULL);
        }
 
        /*
@@ -3676,7 +3742,7 @@ pfm_flush_regs (struct task_struct *task)
         *
         */
 
-       if (atomic_read(&ctx->ctx_last_cpu) != smp_processor_id())
+       if ((unsigned int) atomic_read(&ctx->ctx_last_cpu) != smp_processor_id())
                printk(KERN_DEBUG "perfmon: [%d] last_cpu=%d\n",
                       task->pid, atomic_read(&ctx->ctx_last_cpu));
 
@@ -3754,16 +3820,20 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
 
        preempt_disable();
        /*
-        * make sure child cannot mess up the monitoring session
+        * for secure sessions, make sure child cannot mess up 
+        * the monitoring session.
         */
-        ia64_psr(regs)->sp = 1;
-        DBprintk(("enabling psr.sp for [%d]\n", task->pid));
-
+       if (ctx->ctx_fl_unsecure == 0) {
+               ia64_psr(regs)->sp = 1;
+               DBprintk(("enabling psr.sp for [%d]\n", task->pid));
+       } else {
+               DBprintk(("psr.sp=%d [%d]\n", ia64_psr(regs)->sp, task->pid));
+       }
 
        /*
         * if there was a virtual mapping for the sampling buffer
         * the mapping is NOT inherited across fork() (see VM_DONTCOPY), 
-        * so we don't have to explicitely remove it here. 
+        * so we don't have to explicitly remove it here. 
         *
         *
         * Part of the clearing of fields is also done in
index c4e8a12..a02cd6c 100644 (file)
@@ -25,8 +25,8 @@ static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={
 /* pmc5  */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL,  pfm_mck_reserved, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
 /* pmc6  */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL,  pfm_mck_reserved, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
 /* pmc7  */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL,  pfm_mck_reserved, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc8  */ { PFM_REG_CONFIG  , 0, 0xffffffff3fffffffUL, 0xffffffff9fffffffUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc9  */ { PFM_REG_CONFIG  , 0, 0xffffffff3ffffffcUL, 0xffffffff9fffffffUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
+/* pmc8  */ { PFM_REG_CONFIG  , 0, 0xffffffff3fffffffUL, 0xffffffff3fffffffUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
+/* pmc9  */ { PFM_REG_CONFIG  , 0, 0xffffffff3ffffffcUL, 0xffffffff3fffffffUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
 /* pmc10 */ { PFM_REG_MONITOR , 4, 0x0UL, 0xffffUL, NULL, pfm_mck_reserved, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
 /* pmc11 */ { PFM_REG_MONITOR , 6, 0x0UL, 0x30f01cf, NULL,  pfm_mck_reserved, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
 /* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, 0xffffUL, NULL,  pfm_mck_reserved, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
@@ -143,11 +143,8 @@ pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *va
                case  8: val8 = *val;
                         val13 = th->pmc[13];
                         val14 = th->pmc[14];
-                        *val |= 1UL << 2; /* bit 2 must always be 1 */
                         check_case1 = 1;
                         break;
-               case  9: *val |= 1UL << 2; /* bit 2 must always be 1 */
-                        break;
                case 13: val8  = th->pmc[8];
                         val13 = *val;
                         val14 = th->pmc[14];
index 132a4a6..2a477a5 100644 (file)
@@ -43,8 +43,8 @@
 
 #include "sigframe.h"
 
-static void
-do_show_stack (struct unw_frame_info *info, void *arg)
+void
+ia64_do_show_stack (struct unw_frame_info *info, void *arg)
 {
        unsigned long ip, sp, bsp;
        char buf[80];                   /* don't make it so big that it overflows the stack! */
@@ -57,7 +57,7 @@ do_show_stack (struct unw_frame_info *info, void *arg)
 
                unw_get_sp(info, &sp);
                unw_get_bsp(info, &bsp);
-               snprintf(buf, sizeof(buf), " [<%016lx>] %%s sp=0x%016lx bsp=0x%016lx\n",
+               snprintf(buf, sizeof(buf), " [<%016lx>] %%s\n\t\t\t\tsp=%016lx bsp=%016lx\n",
                         ip, sp, bsp);
                print_symbol(buf, ip);
        } while (unw_unwind(info) >= 0);
@@ -73,12 +73,12 @@ void
 show_stack (struct task_struct *task)
 {
        if (!task)
-               unw_init_running(do_show_stack, 0);
+               unw_init_running(ia64_do_show_stack, 0);
        else {
                struct unw_frame_info info;
 
                unw_init_from_blocked_task(&info, task);
-               do_show_stack(&info, 0);
+               ia64_do_show_stack(&info, 0);
        }
 }
 
@@ -123,8 +123,8 @@ show_regs (struct pt_regs *regs)
 
        if (user_mode(regs)) {
                /* print the stacked registers */
-               unsigned long val, sof, *bsp, ndirty;
-               int i, is_nat = 0;
+               unsigned long val, *bsp, ndirty;
+               int i, sof, is_nat = 0;
 
                sof = regs->cr_ifs & 0x7f;      /* size of frame */
                ndirty = (regs->loadrs >> 19);
@@ -135,7 +135,7 @@ show_regs (struct pt_regs *regs)
                               ((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
                }
        } else
-               show_stack(0);
+               show_stack(NULL);
 }
 
 void
@@ -379,6 +379,7 @@ copy_thread (int nr, unsigned long clone_flags,
 #      define THREAD_FLAGS_TO_SET      0
        p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
                           | THREAD_FLAGS_TO_SET);
+       p->thread.last_fph_cpu = -1;
 #ifdef CONFIG_IA32_SUPPORT
        /*
         * If we're cloning an IA32 task then save the IA32 extra
index 376d055..c8eedec 100644 (file)
@@ -202,17 +202,16 @@ static unsigned long
 get_rnat (struct pt_regs *pt, struct switch_stack *sw,
          unsigned long *krbs, unsigned long *urnat_addr)
 {
-       unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, kmask = ~0UL;
+       unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, umask = 0UL;
        unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
        long num_regs;
 
        kbsp = (unsigned long *) sw->ar_bspstore;
        ubspstore = (unsigned long *) pt->ar_bspstore;
        /*
-        * First, figure out which bit number slot 0 in user-land maps
-        * to in the kernel rnat.  Do this by figuring out how many
-        * register slots we're beyond the user's backingstore and
-        * then computing the equivalent address in kernel space.
+        * First, figure out which bit number slot 0 in user-land maps to in the kernel
+        * rnat.  Do this by figuring out how many register slots we're beyond the user's
+        * backingstore and then computing the equivalent address in kernel space.
         */
        num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
        slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
@@ -222,8 +221,8 @@ get_rnat (struct pt_regs *pt, struct switch_stack *sw,
 
        if (ubspstore + 63 > urnat_addr) {
                /* some bits need to be merged in from pt->ar_rnat */
-               kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1);
-               urnat = (pt->ar_rnat & ~kmask);
+               umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1);
+               urnat = (pt->ar_rnat & umask);
        }
        if (rnat0_kaddr >= kbsp) {
                rnat0 = sw->ar_rnat;
@@ -235,7 +234,7 @@ get_rnat (struct pt_regs *pt, struct switch_stack *sw,
        } else if (rnat1_kaddr > krbs) {
                rnat1 = *rnat1_kaddr;
        }
-       urnat |= ((rnat1 << (63 - shift)) | (rnat0 >> shift)) & kmask;
+       urnat |= ((rnat1 << (63 - shift)) | (rnat0 >> shift)) & ~umask;
        return urnat;
 }
 
@@ -246,17 +245,19 @@ static void
 put_rnat (struct pt_regs *pt, struct switch_stack *sw,
          unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat)
 {
-       unsigned long rnat0 = 0, rnat1 = 0, rnat = 0, *slot0_kaddr, kmask = ~0UL, mask;
-       unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
-       long num_regs;
+       unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
+       unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift, slot, ndirty;
+       long num_regs, nbits;
+
+       ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
+       nbits = ndirty % 63;
 
        kbsp = (unsigned long *) sw->ar_bspstore;
        ubspstore = (unsigned long *) pt->ar_bspstore;
        /*
-        * First, figure out which bit number slot 0 in user-land maps
-        * to in the kernel rnat.  Do this by figuring out how many
-        * register slots we're beyond the user's backingstore and
-        * then computing the equivalent address in kernel space.
+        * First, figure out which bit number slot 0 in user-land maps to in the kernel
+        * rnat.  Do this by figuring out how many register slots we're beyond the user's
+        * backingstore and then computing the equivalent address in kernel space.
         */
        num_regs = (long) ia64_rse_num_regs(ubspstore, urnat_addr + 1);
        slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
@@ -264,29 +265,37 @@ put_rnat (struct pt_regs *pt, struct switch_stack *sw,
        rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
        rnat0_kaddr = rnat1_kaddr - 64;
 
+printk("%s: ubspstore=%p urnat_addr=%p\n", __FUNCTION__, ubspstore, urnat_addr);
        if (ubspstore + 63 > urnat_addr) {
                /* some bits need to be place in pt->ar_rnat: */
-               kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1);
-               pt->ar_rnat = (pt->ar_rnat & kmask) | (rnat & ~kmask);
+               slot = ia64_rse_slot_num(ubspstore);
+               umask = ((1UL << slot) - 1);
+               pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
+               nbits -= slot;
+               if (nbits <= 0)
+                       return;
        }
+       mask = (1UL << nbits) - 1;
        /*
         * Note: Section 11.1 of the EAS guarantees that bit 63 of an
         * rnat slot is ignored. so we don't have to clear it here.
         */
        rnat0 = (urnat << shift);
-       mask = ~0UL << shift;
+       m = mask << shift;
+printk("%s: rnat0=%016lx, m=%016lx, rnat0_kaddr=%p kbsp=%p\n", __FUNCTION__, rnat0, m, rnat0_kaddr, kbsp);
        if (rnat0_kaddr >= kbsp) {
-               sw->ar_rnat = (sw->ar_rnat & ~mask) | (rnat0 & mask);
+               sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
        } else if (rnat0_kaddr > krbs) {
-               *rnat0_kaddr = ((*rnat0_kaddr & ~mask) | (rnat0 & mask));
+               *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
        }
 
        rnat1 = (urnat >> (63 - shift));
-       mask = ~0UL >> (63 - shift);
+       m = mask >> (63 - shift);
+printk("%s: rnat1=%016lx, m=%016lx, rnat1_kaddr=%p kbsp=%p\n", __FUNCTION__, rnat1, m, rnat1_kaddr, kbsp);
        if (rnat1_kaddr >= kbsp) {
-               sw->ar_rnat = (sw->ar_rnat & ~mask) | (rnat1 & mask);
+               sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
        } else if (rnat1_kaddr > krbs) {
-               *rnat1_kaddr = ((*rnat1_kaddr & ~mask) | (rnat1 & mask));
+               *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
        }
 }
 
@@ -589,6 +598,7 @@ ia64_flush_fph (struct task_struct *task)
                psr->mfh = 0;
                ia64_save_fpu(&task->thread.fph[0]);
                task->thread.flags |= IA64_THREAD_FPH_VALID;
+               task->thread.last_fph_cpu = smp_processor_id();
        }
 }
 
@@ -608,12 +618,11 @@ ia64_sync_fph (struct task_struct *task)
        ia64_flush_fph(task);
        if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
                task->thread.flags |= IA64_THREAD_FPH_VALID;
+               task->thread.last_fph_cpu = -1;         /* force reload */
                memset(&task->thread.fph, 0, sizeof(task->thread.fph));
        }
-#ifndef CONFIG_SMP
        if (ia64_get_fpu_owner() == task)
                ia64_set_fpu_owner(0);
-#endif
        psr->dfh = 1;
 }
 
@@ -702,7 +711,9 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
                      case PT_R4: case PT_R5: case PT_R6: case PT_R7:
                        if (write_access) {
                                /* read NaT bit first: */
-                               ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, data, &nat);
+                               unsigned long dummy;
+
+                               ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, &dummy, &nat);
                                if (ret < 0)
                                        return ret;
                        }
index ffd5e36..7298e9a 100644 (file)
@@ -116,7 +116,7 @@ ia64_sal_init (struct ia64_sal_systab *systab)
        p = (char *) (systab + 1);
        for (i = 0; i < systab->entry_count; i++) {
                /*
-                * The first byte of each entry type contains the type desciptor.
+                * The first byte of each entry type contains the type descriptor.
                 */
                switch (*p) {
                      case SAL_DESC_ENTRY_POINT:
index 4ec8d51..04f8740 100644 (file)
@@ -38,7 +38,7 @@ static salinfo_entry_t salinfo_entries[]={
        { "itc_drift",          IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT, },
 };
 
-#define NR_SALINFO_ENTRIES (sizeof(salinfo_entries)/sizeof(salinfo_entry_t))
+#define NR_SALINFO_ENTRIES ARRAY_SIZE(salinfo_entries)
 
 /*
  * One for each feature and one more for the directory entry...
index 4b70c64..8c81836 100644 (file)
@@ -59,7 +59,10 @@ unsigned long ia64_cycles_per_usec;
 struct ia64_boot_param *ia64_boot_param;
 struct screen_info screen_info;
 
+unsigned long ia64_max_cacheline_size;
 unsigned long ia64_iobase;     /* virtual address for I/O accesses */
+struct io_space io_space[MAX_IO_SPACES];
+unsigned int num_io_spaces;
 
 unsigned char aux_device_present = 0xaa;        /* XXX remove this when legacy I/O is gone */
 
@@ -412,6 +415,11 @@ setup_arch (char **cmdline_p)
        }
        ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
 
+       /* setup legacy IO port space */
+       io_space[0].mmio_base = ia64_iobase;
+       io_space[0].sparse = 1;
+       num_io_spaces = 1;
+
 #ifdef CONFIG_SMP
        cpu_physical_id(0) = hard_smp_processor_id();
 #endif
@@ -421,7 +429,7 @@ setup_arch (char **cmdline_p)
 #ifdef CONFIG_ACPI_BOOT
        acpi_boot_init();
 #endif
-#ifdef CONFIG_SERIAL_HCDP
+#ifdef CONFIG_SERIAL_8250_HCDP
        if (efi.hcdp) {
                void setup_serial_hcdp(void *);
 
@@ -494,7 +502,7 @@ show_cpuinfo (struct seq_file *m, void *v)
        memcpy(features, " standard", 10);
        cp = features;
        sep = 0;
-       for (i = 0; i < sizeof(feature_bits)/sizeof(feature_bits[0]); ++i) {
+       for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
                if (mask & feature_bits[i].mask) {
                        if (sep)
                                *cp++ = sep;
@@ -625,6 +633,39 @@ setup_per_cpu_areas (void)
        /* start_kernel() requires this... */
 }
 
+static void
+get_max_cacheline_size (void)
+{
+       unsigned long line_size, max = 1;
+       u64 l, levels, unique_caches;
+        pal_cache_config_info_t cci;
+        s64 status;
+
+        status = ia64_pal_cache_summary(&levels, &unique_caches);
+        if (status != 0) {
+                printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
+                       __FUNCTION__, status);
+                max = SMP_CACHE_BYTES;
+               goto out;
+        }
+
+       for (l = 0; l < levels; ++l) {
+               status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
+                                                   &cci);
+               if (status != 0) {
+                       printk(KERN_ERR
+                              "%s: ia64_pal_cache_config_info(l=%lu) failed (status=%ld)\n",
+                              __FUNCTION__, l, status);
+                       max = SMP_CACHE_BYTES;
+               }
+               line_size = 1 << cci.pcci_line_size;
+               if (line_size > max)
+                       max = line_size;
+        }
+  out:
+       if (max > ia64_max_cacheline_size)
+               ia64_max_cacheline_size = max;
+}
 
 /*
  * cpu_init() initializes state that is per-CPU.  This function acts
@@ -668,6 +709,8 @@ cpu_init (void)
        cpu_info->node_data = get_node_data_ptr();
 #endif
 
+       get_max_cacheline_size();
+
        /*
         * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
         * ia64_mmu_init() yet.  And we can't call ia64_mmu_init() first because it
index 3037b25..3b55d7f 100644 (file)
@@ -142,8 +142,13 @@ restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr)
 
                __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16);
                psr->mfh = 0;   /* drop signal handler's fph contents... */
-               if (!psr->dfh)
+               if (psr->dfh)
+                       current->thread.last_fph_cpu = -1;
+               else {
                        __ia64_load_fpu(current->thread.fph);
+                       ia64_set_fpu_owner(current);
+                       current->thread.last_fph_cpu = smp_processor_id();
+               }
        }
        return err;
 }
@@ -523,7 +528,7 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
                        else
                                errno = -errno;
                }
-       } else if (scr->pt.r10 != -1)
+       } else if ((long) scr->pt.r10 != -1)
                /*
                 * A system calls has to be restarted only if one of the error codes
                 * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned.  If r10
index 3dc304e..7d19294 100644 (file)
@@ -2,7 +2,7 @@
  * SMP Support
  *
  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
  *
  * Lots of stuff stolen from arch/alpha/kernel/smp.c
  *
@@ -87,7 +87,7 @@ stop_this_cpu (void)
        cpu_halt();
 }
 
-void
+irqreturn_t
 handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
 {
        int this_cpu = get_cpu();
@@ -147,10 +147,11 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
                mb();   /* Order data access and bit testing. */
        }
        put_cpu();
+       return IRQ_HANDLED;
 }
 
 /*
- * Called with preeemption disabled 
+ * Called with preeemption disabled.
  */
 static inline void
 send_IPI_single (int dest_cpu, int op)
@@ -160,12 +161,12 @@ send_IPI_single (int dest_cpu, int op)
 }
 
 /*
- * Called with preeemption disabled 
+ * Called with preeemption disabled.
  */
 static inline void
 send_IPI_allbutself (int op)
 {
-       int i;
+       unsigned int i;
 
        for (i = 0; i < NR_CPUS; i++) {
                if (cpu_online(i) && i != smp_processor_id())
@@ -174,7 +175,7 @@ send_IPI_allbutself (int op)
 }
 
 /*
- * Called with preeemption disabled 
+ * Called with preeemption disabled.
  */
 static inline void
 send_IPI_all (int op)
@@ -187,7 +188,7 @@ send_IPI_all (int op)
 }
 
 /*
- * Called with preeemption disabled 
+ * Called with preeemption disabled.
  */
 static inline void
 send_IPI_self (int op)
@@ -196,7 +197,7 @@ send_IPI_self (int op)
 }
 
 /*
- * Called with preeemption disabled 
+ * Called with preeemption disabled.
  */
 void
 smp_send_reschedule (int cpu)
index dfd8ffd..8c183cd 100644 (file)
@@ -192,6 +192,7 @@ ia64_sync_itc (unsigned int master)
 {
        long i, delta, adj, adjust_latency = 0, done = 0;
        unsigned long flags, rt, master_time_stamp, bound;
+       extern void ia64_cpu_local_tick (void);
 #if DEBUG_ITC_SYNC
        struct {
                long rt;        /* roundtrip time */
@@ -246,6 +247,16 @@ ia64_sync_itc (unsigned int master)
 
        printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
               "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
+
+       /*
+        * Check whether we sync'd the itc ahead of the next timer interrupt.  If so, just
+        * reset it.
+        */
+       if (time_after(ia64_get_itc(), local_cpu_data->itm_next)) {
+               Dprintk("CPU %d: oops, jumped a timer tick; resetting timer.\n",
+                       smp_processor_id());
+               ia64_cpu_local_tick();
+       }
 }
 
 /*
@@ -279,15 +290,6 @@ smp_callin (void)
 
        smp_setup_percpu_timer();
 
-       if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
-               /*
-                * Synchronize the ITC with the BP
-                */
-               Dprintk("Going to syncup ITC with BP.\n");
-
-               ia64_sync_itc(0);
-       }
-
        /*
         * Get our bogomips.
         */
@@ -310,6 +312,18 @@ smp_callin (void)
        local_irq_enable();
        calibrate_delay();
        local_cpu_data->loops_per_jiffy = loops_per_jiffy;
+
+       if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
+               /*
+                * Synchronize the ITC with the BP.  Need to do this after irqs are
+                * enabled because ia64_sync_itc() calls smp_call_function_single(), which
+                * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
+                * local_bh_enable(), which bugs out if irqs are not enabled...
+                */
+               Dprintk("Going to syncup ITC with BP.\n");
+               ia64_sync_itc(0);
+       }
+
        /*
         * Allow the master to continue.
         */
@@ -394,13 +408,26 @@ do_boot_cpu (int sapicid, int cpu)
        return 0;
 }
 
-unsigned long cache_decay_ticks;       /* # of ticks an idle task is considered cache-hot */
+static int __init
+decay (char *str)
+{
+       int ticks;
+       get_option (&str, &ticks);
+       cache_decay_ticks = ticks;
+       return 1;
+}
+
+__setup("decay=", decay);
+
+/*
+ * # of ticks an idle task is considered cache-hot.  Highly application-dependent.  There
+ * are apps out there which are known to suffer significantly with values >= 4.
+ */
+unsigned long cache_decay_ticks = 10;  /* equal to MIN_TIMESLICE */
 
 static void
 smp_tune_scheduling (void)
 {
-       cache_decay_ticks = 10; /* XXX base this on PAL info and cache-bandwidth estimate */
-
        printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n",
               (cache_decay_ticks + 1) * 1000 / HZ);
 }
index abc0b4e..0f387be 100644 (file)
@@ -83,11 +83,26 @@ gettimeoffset (void)
        return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT;
 }
 
+static inline void
+set_normalized_timespec (struct timespec *ts, time_t sec, long nsec)
+{
+       while (nsec > NSEC_PER_SEC) {
+               nsec -= NSEC_PER_SEC;
+               ++sec;
+       }
+       while (nsec < 0) {
+               nsec += NSEC_PER_SEC;
+               --sec;
+       }
+       ts->tv_sec = sec;
+       ts->tv_nsec = nsec;
+}
+
 void
 do_settimeofday (struct timeval *tv)
 {
-       time_t sec = tv->tv_sec;
-       long nsec = tv->tv_usec * 1000;
+       time_t wtm_sec, sec = tv->tv_sec;
+       long wtm_nsec, nsec = tv->tv_usec * 1000;
 
        write_seqlock_irq(&xtime_lock);
        {
@@ -99,13 +114,12 @@ do_settimeofday (struct timeval *tv)
                 */
                nsec -= gettimeoffset();
 
-               while (nsec < 0) {
-                       nsec += 1000000000;
-                       sec--;
-               }
+               wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+               wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+               set_normalized_timespec(&xtime, sec, nsec);
+               set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
 
-               xtime.tv_sec = sec;
-               xtime.tv_nsec = nsec;
                time_adjust = 0;                /* stop active adjtime() */
                time_status |= STA_UNSYNC;
                time_maxerror = NTP_PHASE_LIMIT;
@@ -166,8 +180,8 @@ do_gettimeofday (struct timeval *tv)
 
        usec = (nsec + offset) / 1000;
 
-       while (unlikely(usec >= 1000000)) {
-               usec -= 1000000;
+       while (unlikely(usec >= USEC_PER_SEC)) {
+               usec -= USEC_PER_SEC;
                ++sec;
        }
 
@@ -175,8 +189,8 @@ do_gettimeofday (struct timeval *tv)
        tv->tv_usec = usec;
 }
 
-static void
-timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t
+timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
 {
        unsigned long new_itm;
 
@@ -221,7 +235,7 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
        do {
            /*
             * If we're too close to the next clock tick for comfort, we increase the
-            * saftey margin by intentionally dropping the next tick(s).  We do NOT update
+            * safety margin by intentionally dropping the next tick(s).  We do NOT update
             * itm.next because that would force us to call do_timer() which in turn would
             * let our clock run too fast (with the potentially devastating effect of
             * losing monotony of time).
@@ -231,12 +245,13 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
            ia64_set_itm(new_itm);
            /* double check, in case we got hit by a (slow) PMI: */
        } while (time_after_eq(ia64_get_itc(), new_itm));
+       return IRQ_HANDLED;
 }
 
 /*
  * Encapsulate access to the itm structure for SMP.
  */
-void __init
+void
 ia64_cpu_local_tick (void)
 {
        int cpu = smp_processor_id();
@@ -281,7 +296,7 @@ ia64_init_itm (void)
        if (status != 0) {
                /* invent "random" values */
                printk(KERN_ERR
-                      "SAL/PAL failed to obtain frequency info---inventing reasonably values\n");
+                      "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
                platform_base_freq = 100000000;
                itc_ratio.num = 3;
                itc_ratio.den = 1;
@@ -305,8 +320,8 @@ ia64_init_itm (void)
 
        local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
        local_cpu_data->itc_freq = itc_freq;
-       local_cpu_data->cyc_per_usec = (itc_freq + 500000) / 1000000;
-       local_cpu_data->nsec_per_cyc = ((1000000000UL<<IA64_NSEC_PER_CYC_SHIFT)
+       local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
+       local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
                                        + itc_freq/2)/itc_freq;
 
        /* Setup the CPU local timer tick */
@@ -323,6 +338,12 @@ void __init
 time_init (void)
 {
        register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
-       efi_gettimeofday((struct timeval *) &xtime);
+       efi_gettimeofday(&xtime);
        ia64_init_itm();
+
+       /*
+        * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
+        * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
+        */
+       set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
 }
index 6d3f997..f17048b 100644 (file)
@@ -94,7 +94,7 @@ die (const char *str, struct pt_regs *regs, long err)
 {
        static struct {
                spinlock_t lock;
-               int lock_owner;
+               u32 lock_owner;
                int lock_owner_depth;
        } die = {
                .lock =                 SPIN_LOCK_UNLOCKED,
index cb366a3..4436fb8 100644 (file)
@@ -789,7 +789,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
         *
         * ldX.a (advanced load):
         *      - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's because the
-        *        address doesn't match requested size alignement. This means that we would
+        *        address doesn't match requested size alignment. This means that we would
         *        possibly need more than one load to get the result.
         *
         *        The load part can be handled just like a normal load, however the difficult
index 991fc4a..9f15881 100644 (file)
@@ -682,7 +682,7 @@ finish_prologue (struct unw_state_record *sr)
         * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
         * for Using Unwind Descriptors", rule 3):
         */
-       for (i = 0; i < (int) sizeof(unw.save_order)/sizeof(unw.save_order[0]); ++i) {
+       for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
                reg = sr->curr.reg + unw.save_order[i];
                if (reg->where == UNW_WHERE_GR_SAVE) {
                        reg->where = UNW_WHERE_GR;
@@ -698,7 +698,7 @@ finish_prologue (struct unw_state_record *sr)
         */
        if (sr->imask) {
                unsigned char kind, mask = 0, *cp = sr->imask;
-               unsigned long t;
+               int t;
                static const unsigned char limit[3] = {
                        UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
                };
@@ -1214,13 +1214,13 @@ script_new (unsigned long ip)
        spin_unlock(&unw.lock);
 
        /*
-        * XXX We'll deadlock here if we interrupt a thread that is
-        * holding a read lock on script->lock.  A try_write_lock()
-        * might be mighty handy here...  Alternatively, we could
-        * disable interrupts whenever we hold a read-lock, but that
-        * seems silly.
+        * We'd deadlock here if we interrupted a thread that is holding a read lock on
+        * script->lock.  Thus, if the write_trylock() fails, we simply bail out.  The
+        * alternative would be to disable interrupts whenever we hold a read-lock, but
+        * that seems silly.
         */
-       write_lock(&script->lock);
+       if (!write_trylock(&script->lock))
+               return NULL;
 
        spin_lock(&unw.lock);
        {
@@ -1888,22 +1888,21 @@ unw_unwind_to_user (struct unw_frame_info *info)
        return -1;
 }
 
-void
-unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
+static void
+init_frame_info (struct unw_frame_info *info, struct task_struct *t,
+                struct switch_stack *sw, unsigned long stktop)
 {
-       unsigned long rbslimit, rbstop, stklimit, stktop, sol;
+       unsigned long rbslimit, rbstop, stklimit;
        STAT(unsigned long start, flags;)
 
        STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
 
        /*
-        * Subtle stuff here: we _could_ unwind through the
-        * switch_stack frame but we don't want to do that because it
-        * would be slow as each preserved register would have to be
-        * processed.  Instead, what we do here is zero out the frame
-        * info and start the unwind process at the function that
-        * created the switch_stack frame.  When a preserved value in
-        * switch_stack needs to be accessed, run_script() will
+        * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
+        * don't want to do that because it would be slow as each preserved register would
+        * have to be processed.  Instead, what we do here is zero out the frame info and
+        * start the unwind process at the function that created the switch_stack frame.
+        * When a preserved value in switch_stack needs to be accessed, run_script() will
         * initialize the appropriate pointer on demand.
         */
        memset(info, 0, sizeof(*info));
@@ -1914,7 +1913,6 @@ unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct
                rbstop = rbslimit;
 
        stklimit = (unsigned long) t + IA64_STK_OFFSET;
-       stktop   = (unsigned long) sw - 16;
        if (stktop <= rbstop)
                stktop = rbstop;
 
@@ -1924,34 +1922,58 @@ unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct
        info->memstk.top   = stktop;
        info->task = t;
        info->sw  = sw;
-       info->sp = info->psp = (unsigned long) (sw + 1) - 16;
-       info->pt = 0;
+       info->sp = info->psp = stktop;
+       info->pr = sw->pr;
+       UNW_DPRINT(3, "unwind.%s:\n"
+                  "  task   0x%lx\n"
+                  "  rbs = [0x%lx-0x%lx)\n"
+                  "  stk = [0x%lx-0x%lx)\n"
+                  "  pr     0x%lx\n"
+                  "  sw     0x%lx\n"
+                  "  sp     0x%lx\n",
+                  __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
+                  info->pr, (unsigned long) info->sw, info->sp);
+       STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
+}
+
+void
+unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
+                           struct pt_regs *pt, struct switch_stack *sw)
+{
+       unsigned long sof;
+
+       init_frame_info(info, t, sw, pt->r12);
+       info->cfm_loc = &pt->cr_ifs;
+       info->unat_loc = &pt->ar_unat;
+       info->pfs_loc = &pt->ar_pfs;
+       sof = *info->cfm_loc & 0x7f;
+       info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
+       info->ip = pt->cr_iip + ia64_psr(pt)->ri;
+       info->pt = (unsigned long) pt;
+       UNW_DPRINT(3, "unwind.%s:\n"
+                  "  bsp    0x%lx\n"
+                  "  sof    0x%lx\n"
+                  "  ip     0x%lx\n",
+                  __FUNCTION__, info->bsp, sof, info->ip);
+       find_save_locs(info);
+}
+
+void
+unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
+{
+       unsigned long sol;
+
+       init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
        info->cfm_loc = &sw->ar_pfs;
        sol = (*info->cfm_loc >> 7) & 0x7f;
        info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
        info->ip = sw->b0;
-       info->pr = sw->pr;
-       UNW_DPRINT(3,
-                       "unwind.%s\n"
-                       "  rbslimit 0x%lx\n"
-                       "  rbstop   0x%lx\n"
-                       "  stklimit 0x%lx\n"
-                       "  stktop   0x%lx\n"
-                       "  task     0x%lx\n"
-                       "  sw       0x%lx\n",
-                       __FUNCTION__, rbslimit, rbstop, stklimit, stktop,
-                       (unsigned long)(info->task),
-                       (unsigned long)(info->sw));
-       UNW_DPRINT(3,
-                       "  sp/psp   0x%lx\n"
-                       "  sol      0x%lx\n"
-                       "  bsp      0x%lx\n"
-                       "  ip       0x%lx\n"
-                       "  pr       0x%lx\n",
-                       info->sp, sol, info->bsp, info->ip, info->pr);
-
+       UNW_DPRINT(3, "unwind.%s:\n"
+                  "  bsp    0x%lx\n"
+                  "  sol    0x%lx\n"
+                  "  ip     0x%lx\n",
+                  __FUNCTION__, info->bsp, sol, info->ip);
        find_save_locs(info);
-       STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
 }
 
 void
index 2272730..c952bdc 100644 (file)
@@ -316,7 +316,7 @@ GLOBAL_ENTRY(__copy_user)
        // Beginning of long mempcy (i.e. > 16 bytes)
        //
 .long_copy_user:
-       tbit.nz p6,p7=src1,0    // odd alignement
+       tbit.nz p6,p7=src1,0    // odd alignment
        and tmp=7,tmp
        ;;
        cmp.eq p10,p8=r0,tmp
index c3bc67d..6bec2fc 100644 (file)
@@ -137,7 +137,7 @@ GLOBAL_ENTRY(do_csum)
        mov saved_pr=pr         // preserve predicates (rotation)
 (p6)   br.ret.spnt.many rp     // return if zero or negative length
 
-       mov hmask=-1            // intialize head mask
+       mov hmask=-1            // initialize head mask
        tbit.nz p15,p0=buf,0    // is buf an odd address?
        and first1=-8,buf       // 8-byte align down address of first1 element
 
index 2ff57f3..d05f92f 100644 (file)
@@ -51,84 +51,79 @@ __ia64_memset_c_io (unsigned long dst, unsigned long c, long count)
 
 #ifdef CONFIG_IA64_GENERIC
 
+#undef __ia64_inb
+#undef __ia64_inw
+#undef __ia64_inl
+#undef __ia64_outb
+#undef __ia64_outw
+#undef __ia64_outl
+#undef __ia64_readb
+#undef __ia64_readw
+#undef __ia64_readl
+#undef __ia64_readq
+#undef __ia64_writeb
+#undef __ia64_writew
+#undef __ia64_writel
+#undef __ia64_writeq
+
 unsigned int
-ia64_inb (unsigned long port)
+__ia64_inb (unsigned long port)
 {
-       return __ia64_inb(port);
+       return ___ia64_inb(port);
 }
 
 unsigned int
-ia64_inw (unsigned long port)
+__ia64_inw (unsigned long port)
 {
-       return __ia64_inw(port);
+       return ___ia64_inw(port);
 }
 
 unsigned int
-ia64_inl (unsigned long port)
+__ia64_inl (unsigned long port)
 {
-       return __ia64_inl(port);
+       return ___ia64_inl(port);
 }
 
 void
-ia64_outb (unsigned char val, unsigned long port)
+__ia64_outb (unsigned char val, unsigned long port)
 {
-       __ia64_outb(val, port);
+       ___ia64_outb(val, port);
 }
 
 void
-ia64_outw (unsigned short val, unsigned long port)
+__ia64_outw (unsigned short val, unsigned long port)
 {
-       __ia64_outw(val, port);
+       ___ia64_outw(val, port);
 }
 
 void
-ia64_outl (unsigned int val, unsigned long port)
+__ia64_outl (unsigned int val, unsigned long port)
 {
-       __ia64_outl(val, port);
+       ___ia64_outl(val, port);
 }
 
 unsigned char
-ia64_readb (void *addr)
+__ia64_readb (void *addr)
 {
-       return __ia64_readb (addr);
+       return ___ia64_readb (addr);
 }
 
 unsigned short
-ia64_readw (void *addr)
+__ia64_readw (void *addr)
 {
-       return __ia64_readw (addr);
+       return ___ia64_readw (addr);
 }
 
 unsigned int
-ia64_readl (void *addr)
+__ia64_readl (void *addr)
 {
-       return __ia64_readl (addr);
+       return ___ia64_readl (addr);
 }
 
 unsigned long
-ia64_readq (void *addr)
+__ia64_readq (void *addr)
 {
-       return __ia64_readq (addr)
+       return ___ia64_readq (addr);
 }
 
-
-/* define aliases: */
-
-asm (".global __ia64_inb, __ia64_inw, __ia64_inl");
-asm ("__ia64_inb = ia64_inb");
-asm ("__ia64_inw = ia64_inw");
-asm ("__ia64_inl = ia64_inl");
-
-asm (".global __ia64_outb, __ia64_outw, __ia64_outl");
-asm ("__ia64_outb = ia64_outb");
-asm ("__ia64_outw = ia64_outw");
-asm ("__ia64_outl = ia64_outl");
-
-asm (".global __ia64_readb, __ia64_readw, __ia64_readl, __ia64_readq");
-asm ("__ia64_readb = ia64_readb");
-asm ("__ia64_readw = ia64_readw");
-asm ("__ia64_readl = ia64_readl");
-asm ("__ia64_readq = ia64_readq");
-
-
 #endif /* CONFIG_IA64_GENERIC */
index 3d00798..9ba1094 100644 (file)
@@ -5,7 +5,10 @@
  * I/O TLBs (aka DMA address translation hardware).
  * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
  * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
+ * Copyright (C) 2000, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@hpl.hp.com>
  *
+ * 03/05/07 davidm     Switch from PCI-DMA to generic device DMA API.
  * 00/12/13 davidm     Rename to swiotlb.c and add mark_clean() to avoid
  *                     unnecessary i-cache flushing.
  */
@@ -92,7 +95,7 @@ __setup("swiotlb=", setup_io_tlb_npages);
 void
 swiotlb_init (void)
 {
-       int i;
+       unsigned long i;
 
        /*
         * Get IO TLB memory from the low pages
@@ -121,7 +124,7 @@ swiotlb_init (void)
  * Allocates bounce buffer and returns its kernel virtual address.
  */
 static void *
-map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction)
+map_single (struct device *hwdev, char *buffer, size_t size, int dir)
 {
        unsigned long flags;
        char *dma_addr;
@@ -161,7 +164,7 @@ map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction)
                        if (io_tlb_list[index] >= nslots) {
                                int count = 0;
 
-                               for (i = index; i < index + nslots; i++)
+                               for (i = index; i < (int) (index + nslots); i++)
                                        io_tlb_list[i] = 0;
                                for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1)
                                       && io_tlb_list[i]; i--)
@@ -195,7 +198,7 @@ map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction)
         * needed when we sync the memory.  Then we sync the buffer if needed.
         */
        io_tlb_orig_addr[index] = buffer;
-       if (direction == PCI_DMA_TODEVICE || direction == PCI_DMA_BIDIRECTIONAL)
+       if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
                memcpy(dma_addr, buffer, size);
 
        return dma_addr;
@@ -205,7 +208,7 @@ map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction)
  * dma_addr is the kernel virtual address of the bounce buffer to unmap.
  */
 static void
-unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
+unmap_single (struct device *hwdev, char *dma_addr, size_t size, int dir)
 {
        unsigned long flags;
        int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -215,7 +218,7 @@ unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
        /*
         * First, sync the memory before unmapping the entry
         */
-       if ((direction == PCI_DMA_FROMDEVICE) || (direction == PCI_DMA_BIDIRECTIONAL))
+       if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
                /*
                 * bounce... copy the data back into the original buffer * and delete the
                 * bounce buffer.
@@ -239,7 +242,7 @@ unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
                for (i = index + nslots - 1; i >= index; i--)
                        io_tlb_list[i] = ++count;
                /*
-                * Step 2: merge the returned slots with the preceeding slots, if
+                * Step 2: merge the returned slots with the preceding slots, if
                 * available (non zero)
                 */
                for (i = index - 1;  (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) &&
@@ -250,49 +253,46 @@ unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
 }
 
 static void
-sync_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
+sync_single (struct device *hwdev, char *dma_addr, size_t size, int dir)
 {
        int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
        char *buffer = io_tlb_orig_addr[index];
 
        /*
         * bounce... copy the data back into/from the original buffer
-        * XXX How do you handle PCI_DMA_BIDIRECTIONAL here ?
+        * XXX How do you handle DMA_BIDIRECTIONAL here ?
         */
-       if (direction == PCI_DMA_FROMDEVICE)
+       if (dir == DMA_FROM_DEVICE)
                memcpy(buffer, dma_addr, size);
-       else if (direction == PCI_DMA_TODEVICE)
+       else if (dir == DMA_TO_DEVICE)
                memcpy(dma_addr, buffer, size);
        else
                BUG();
 }
 
 void *
-swiotlb_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
+swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, int flags)
 {
-       unsigned long pci_addr;
-       int gfp = GFP_ATOMIC;
+       unsigned long dev_addr;
        void *ret;
 
-       /*
-        * Alloc_consistent() is defined to return memory < 4GB, no matter what the DMA
-        * mask says.
-        */
-       gfp |= GFP_DMA; /* XXX fix me: should change this to GFP_32BIT or ZONE_32BIT */
-       ret = (void *)__get_free_pages(gfp, get_order(size));
+       /* XXX fix me: the DMA API should pass us an explicit DMA mask instead: */
+       flags |= GFP_DMA;
+
+       ret = (void *)__get_free_pages(flags, get_order(size));
        if (!ret)
                return NULL;
 
        memset(ret, 0, size);
-       pci_addr = virt_to_phys(ret);
-       if (hwdev && (pci_addr & ~hwdev->dma_mask) != 0)
-               panic("swiotlb_alloc_consistent: allocated memory is out of range for PCI device");
-       *dma_handle = pci_addr;
+       dev_addr = virt_to_phys(ret);
+       if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) != 0)
+               panic("swiotlb_alloc_consistent: allocated memory is out of range for device");
+       *dma_handle = dev_addr;
        return ret;
 }
 
 void
-swiotlb_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
 {
        free_pages((unsigned long) vaddr, get_order(size));
 }
@@ -305,34 +305,34 @@ swiotlb_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_ad
  * swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
  */
 dma_addr_t
-swiotlb_map_single (struct pci_dev *hwdev, void *ptr, size_t size, int direction)
+swiotlb_map_single (struct device *hwdev, void *ptr, size_t size, int dir)
 {
-       unsigned long pci_addr = virt_to_phys(ptr);
+       unsigned long dev_addr = virt_to_phys(ptr);
 
-       if (direction == PCI_DMA_NONE)
+       if (dir == DMA_NONE)
                BUG();
        /*
         * Check if the PCI device can DMA to ptr... if so, just return ptr
         */
-       if ((pci_addr & ~hwdev->dma_mask) == 0)
+       if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) == 0)
                /*
                 * Device is bit capable of DMA'ing to the buffer... just return the PCI
                 * address of ptr
                 */
-               return pci_addr;
+               return dev_addr;
 
        /*
         * get a bounce buffer:
         */
-       pci_addr = virt_to_phys(map_single(hwdev, ptr, size, direction));
+       dev_addr = virt_to_phys(map_single(hwdev, ptr, size, dir));
 
        /*
         * Ensure that the address returned is DMA'ble:
         */
-       if ((pci_addr & ~hwdev->dma_mask) != 0)
+       if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) != 0)
                panic("map_single: bounce buffer is not DMA'ble");
 
-       return pci_addr;
+       return dev_addr;
 }
 
 /*
@@ -363,15 +363,15 @@ mark_clean (void *addr, size_t size)
  * device wrote there.
  */
 void
-swiotlb_unmap_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction)
+swiotlb_unmap_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
 {
-       char *dma_addr = phys_to_virt(pci_addr);
+       char *dma_addr = phys_to_virt(dev_addr);
 
-       if (direction == PCI_DMA_NONE)
+       if (dir == DMA_NONE)
                BUG();
        if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
-               unmap_single(hwdev, dma_addr, size, direction);
-       else if (direction == PCI_DMA_FROMDEVICE)
+               unmap_single(hwdev, dma_addr, size, dir);
+       else if (dir == DMA_FROM_DEVICE)
                mark_clean(dma_addr, size);
 }
 
@@ -385,21 +385,21 @@ swiotlb_unmap_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, i
  * again owns the buffer.
  */
 void
-swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction)
+swiotlb_sync_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir)
 {
-       char *dma_addr = phys_to_virt(pci_addr);
+       char *dma_addr = phys_to_virt(dev_addr);
 
-       if (direction == PCI_DMA_NONE)
+       if (dir == DMA_NONE)
                BUG();
        if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
-               sync_single(hwdev, dma_addr, size, direction);
-       else if (direction == PCI_DMA_FROMDEVICE)
+               sync_single(hwdev, dma_addr, size, dir);
+       else if (dir == DMA_FROM_DEVICE)
                mark_clean(dma_addr, size);
 }
 
 /*
  * Map a set of buffers described by scatterlist in streaming mode for DMA.  This is the
- * scather-gather version of the above swiotlb_map_single interface.  Here the scatter
+ * scatter-gather version of the above swiotlb_map_single interface.  Here the scatter
  * gather list elements are each tagged with the appropriate dma address and length.  They
  * are obtained via sg_dma_{address,length}(SG).
  *
@@ -412,23 +412,22 @@ swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, in
  * Device ownership issues as mentioned above for swiotlb_map_single are the same here.
  */
 int
-swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
+swiotlb_map_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
 {
        void *addr;
-       unsigned long pci_addr;
+       unsigned long dev_addr;
        int i;
 
-       if (direction == PCI_DMA_NONE)
+       if (dir == DMA_NONE)
                BUG();
 
        for (i = 0; i < nelems; i++, sg++) {
                addr = SG_ENT_VIRT_ADDRESS(sg);
-               pci_addr = virt_to_phys(addr);
-               if ((pci_addr & ~hwdev->dma_mask) != 0)
-                       sg->dma_address = (dma_addr_t)
-                               map_single(hwdev, addr, sg->length, direction);
+               dev_addr = virt_to_phys(addr);
+               if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) != 0)
+                       sg->dma_address = (dma_addr_t) map_single(hwdev, addr, sg->length, dir);
                else
-                       sg->dma_address = pci_addr;
+                       sg->dma_address = dev_addr;
                sg->dma_length = sg->length;
        }
        return nelems;
@@ -439,17 +438,17 @@ swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int d
  * here are the same as for swiotlb_unmap_single() above.
  */
 void
-swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
+swiotlb_unmap_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
 {
        int i;
 
-       if (direction == PCI_DMA_NONE)
+       if (dir == DMA_NONE)
                BUG();
 
        for (i = 0; i < nelems; i++, sg++)
                if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-                       unmap_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction);
-               else if (direction == PCI_DMA_FROMDEVICE)
+                       unmap_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir);
+               else if (dir == DMA_FROM_DEVICE)
                        mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
 }
 
@@ -461,16 +460,16 @@ swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
  * usage.
  */
 void
-swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
+swiotlb_sync_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir)
 {
        int i;
 
-       if (direction == PCI_DMA_NONE)
+       if (dir == DMA_NONE)
                BUG();
 
        for (i = 0; i < nelems; i++, sg++)
                if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
-                       sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction);
+                       sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir);
 }
 
 /*
@@ -479,7 +478,7 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
  * you would pass 0x00ffffff as the mask to this function.
  */
 int
-swiotlb_pci_dma_supported (struct pci_dev *hwdev, u64 mask)
+swiotlb_dma_supported (struct device *hwdev, u64 mask)
 {
        return 1;
 }
@@ -491,6 +490,6 @@ EXPORT_SYMBOL(swiotlb_map_sg);
 EXPORT_SYMBOL(swiotlb_unmap_sg);
 EXPORT_SYMBOL(swiotlb_sync_single);
 EXPORT_SYMBOL(swiotlb_sync_sg);
-EXPORT_SYMBOL(swiotlb_alloc_consistent);
-EXPORT_SYMBOL(swiotlb_free_consistent);
-EXPORT_SYMBOL(swiotlb_pci_dma_supported);
+EXPORT_SYMBOL(swiotlb_alloc_coherent);
+EXPORT_SYMBOL(swiotlb_free_coherent);
+EXPORT_SYMBOL(swiotlb_dma_supported);
index f16f8ce..68f3b4a 100644 (file)
@@ -58,6 +58,18 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
        if (in_atomic() || !mm)
                goto no_context;
 
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+       /*
+        * If fault is in region 5 and we are in the kernel, we may already
+        * have the mmap_sem (pfn_valid macro is called during mmap). There
+        * is no vma for region 5 addr's anyway, so skip getting the semaphore
+        * and go directly to the exception handling code.
+        */
+
+       if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
+               goto bad_area_no_up;
+#endif
+
        down_read(&mm->mmap_sem);
 
        vma = find_vma_prev(mm, address, &prev_vma);
@@ -139,6 +151,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
 
   bad_area:
        up_read(&mm->mmap_sem);
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+  bad_area_no_up:
+#endif
        if ((isr & IA64_ISR_SP)
            || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
        {
index 0d38a16..aa7b7de 100644 (file)
@@ -38,6 +38,13 @@ extern void ia64_tlb_init (void);
 
 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
 
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+# define LARGE_GAP     0x40000000      /* Use virtual mem map if hole is > than this */
+  unsigned long vmalloc_end = VMALLOC_END_INIT;
+  static struct page *vmem_map;
+  static unsigned long num_dma_physpages;
+#endif
+
 static int pgt_cache_water[2] = { 25, 50 };
 
 void
@@ -48,13 +55,13 @@ check_pgt_cache (void)
        low = pgt_cache_water[0];
        high = pgt_cache_water[1];
 
-       if (pgtable_cache_size > high) {
+       if (pgtable_cache_size > (u64) high) {
                do {
                        if (pgd_quicklist)
                                free_page((unsigned long)pgd_alloc_one_fast(0));
                        if (pmd_quicklist)
                                free_page((unsigned long)pmd_alloc_one_fast(0, 0));
-               } while (pgtable_cache_size > low);
+               } while (pgtable_cache_size > (u64) low);
        }
 }
 
@@ -337,6 +344,139 @@ ia64_mmu_init (void *my_cpu_data)
        ia64_tlb_init();
 }
 
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+
+static int
+create_mem_map_page_table (u64 start, u64 end, void *arg)
+{
+       unsigned long address, start_page, end_page;
+       struct page *map_start, *map_end;
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
+       map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);
+
+       start_page = (unsigned long) map_start & PAGE_MASK;
+       end_page = PAGE_ALIGN((unsigned long) map_end);
+
+       for (address = start_page; address < end_page; address += PAGE_SIZE) {
+               pgd = pgd_offset_k(address);
+               if (pgd_none(*pgd))
+                       pgd_populate(&init_mm, pgd, alloc_bootmem_pages(PAGE_SIZE));
+               pmd = pmd_offset(pgd, address);
+
+               if (pmd_none(*pmd))
+                       pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages(PAGE_SIZE));
+               pte = pte_offset_kernel(pmd, address);
+
+               if (pte_none(*pte))
+                       set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages(PAGE_SIZE)) >> PAGE_SHIFT,
+                                            PAGE_KERNEL));
+       }
+       return 0;
+}
+
+struct memmap_init_callback_data {
+       struct page *start;
+       struct page *end;
+       int nid;
+       unsigned long zone;
+};
+
+static int
+virtual_memmap_init (u64 start, u64 end, void *arg)
+{
+       struct memmap_init_callback_data *args;
+       struct page *map_start, *map_end;
+
+       args = (struct memmap_init_callback_data *) arg;
+
+       map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
+       map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);
+
+       if (map_start < args->start)
+               map_start = args->start;
+       if (map_end > args->end)
+               map_end = args->end;
+
+       /*
+        * We have to initialize "out of bounds" struct page elements that fit completely
+        * on the same pages that were allocated for the "in bounds" elements because they
+        * may be referenced later (and found to be "reserved").
+        */
+       map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
+       map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
+                   / sizeof(struct page));
+
+       if (map_start < map_end)
+               memmap_init_zone(map_start, (unsigned long) (map_end - map_start),
+                                args->nid, args->zone, page_to_pfn(map_start));
+       return 0;
+}
+
+void
+memmap_init (struct page *start, unsigned long size, int nid,
+            unsigned long zone, unsigned long start_pfn)
+{
+       if (!vmem_map)
+               memmap_init_zone(start, size, nid, zone, start_pfn);
+       else {
+               struct memmap_init_callback_data args;
+
+               args.start = start;
+               args.end = start + size;
+               args.nid = nid;
+               args.zone = zone;
+
+               efi_memmap_walk(virtual_memmap_init, &args);
+       }
+}
+
+int
+ia64_pfn_valid (unsigned long pfn)
+{
+       char byte;
+
+       return __get_user(byte, (char *) pfn_to_page(pfn)) == 0;
+}
+
+static int
+count_dma_pages (u64 start, u64 end, void *arg)
+{
+       unsigned long *count = arg;
+
+       if (end <= MAX_DMA_ADDRESS)
+               *count += (end - start) >> PAGE_SHIFT;
+       return 0;
+}
+
+static int
+find_largest_hole (u64 start, u64 end, void *arg)
+{
+       u64 *max_gap = arg;
+
+       static u64 last_end = PAGE_OFFSET;
+
+       /* NOTE: this algorithm assumes efi memmap table is ordered */
+
+       if (*max_gap < (start - last_end))
+               *max_gap = start - last_end;
+       last_end = end;
+       return 0;
+}
+#endif /* CONFIG_VIRTUAL_MEM_MAP */
+
+static int
+count_pages (u64 start, u64 end, void *arg)
+{
+       unsigned long *count = arg;
+
+       *count += (end - start) >> PAGE_SHIFT;
+       return 0;
+}
+
 /*
  * Set up the page tables.
  */
@@ -348,18 +488,70 @@ paging_init (void)
        extern void discontig_paging_init(void);
 
        discontig_paging_init();
+       efi_memmap_walk(count_pages, &num_physpages);
 }
 #else /* !CONFIG_DISCONTIGMEM */
 void
 paging_init (void)
 {
-       unsigned long max_dma, zones_size[MAX_NR_ZONES];
+       unsigned long max_dma;
+       unsigned long zones_size[MAX_NR_ZONES];
+#  ifdef CONFIG_VIRTUAL_MEM_MAP
+       unsigned long zholes_size[MAX_NR_ZONES];
+       unsigned long max_gap;
+#  endif
 
        /* initialize mem_map[] */
 
        memset(zones_size, 0, sizeof(zones_size));
 
+       num_physpages = 0;
+       efi_memmap_walk(count_pages, &num_physpages);
+
        max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+
+#  ifdef CONFIG_VIRTUAL_MEM_MAP
+       memset(zholes_size, 0, sizeof(zholes_size));
+
+       num_dma_physpages = 0;
+       efi_memmap_walk(count_dma_pages, &num_dma_physpages);
+
+       if (max_low_pfn < max_dma) {
+               zones_size[ZONE_DMA] = max_low_pfn;
+               zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
+       } else {
+               zones_size[ZONE_DMA] = max_dma;
+               zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
+               if (num_physpages > num_dma_physpages) {
+                       zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
+                       zholes_size[ZONE_NORMAL] = ((max_low_pfn - max_dma)
+                                                   - (num_physpages - num_dma_physpages));
+               }
+       }
+
+       max_gap = 0;
+       efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
+       if (max_gap < LARGE_GAP) {
+               vmem_map = (struct page *) 0;
+               free_area_init_node(0, &contig_page_data, NULL, zones_size, 0, zholes_size);
+               mem_map = contig_page_data.node_mem_map;
+       }
+       else {
+               unsigned long map_size;
+
+               /* allocate virtual_mem_map */
+
+               map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+               vmalloc_end -= map_size;
+               vmem_map = (struct page *) vmalloc_end;
+               efi_memmap_walk(create_mem_map_page_table, 0);
+
+               free_area_init_node(0, &contig_page_data, vmem_map, zones_size, 0, zholes_size);
+
+               mem_map = contig_page_data.node_mem_map;
+               printk("Virtual mem_map starts at 0x%p\n", mem_map);
+       }
+#  else /* !CONFIG_VIRTUAL_MEM_MAP */
        if (max_low_pfn < max_dma)
                zones_size[ZONE_DMA] = max_low_pfn;
        else {
@@ -367,19 +559,11 @@ paging_init (void)
                zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
        }
        free_area_init(zones_size);
+#  endif /* !CONFIG_VIRTUAL_MEM_MAP */
 }
 #endif /* !CONFIG_DISCONTIGMEM */
 
 static int
-count_pages (u64 start, u64 end, void *arg)
-{
-       unsigned long *count = arg;
-
-       *count += (end - start) >> PAGE_SHIFT;
-       return 0;
-}
-
-static int
 count_reserved_pages (u64 start, u64 end, void *arg)
 {
        unsigned long num_reserved = 0;
@@ -406,7 +590,7 @@ mem_init (void)
         * any drivers that may need the PCI DMA interface are initialized or bootmem has
         * been freed.
         */
-       platform_pci_dma_init();
+       platform_dma_init();
 #endif
 
 #ifndef CONFIG_DISCONTIGMEM
@@ -415,9 +599,6 @@ mem_init (void)
        max_mapnr = max_low_pfn;
 #endif
 
-       num_physpages = 0;
-       efi_memmap_walk(count_pages, &num_physpages);
-
        high_memory = __va(max_low_pfn * PAGE_SIZE);
 
        for_each_pgdat(pgdat)
@@ -445,7 +626,7 @@ mem_init (void)
        num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
        if (num_pgt_pages > nr_free_pages() / 10)
                num_pgt_pages = nr_free_pages() / 10;
-       if (num_pgt_pages > pgt_cache_water[1])
+       if (num_pgt_pages > (u64) pgt_cache_water[1])
                pgt_cache_water[1] = num_pgt_pages;
 
        /* install the gate page in the global page table: */
index b660eff..0b002ea 100644 (file)
@@ -5,6 +5,7 @@
  *
  * Copyright (C) 2002 Hewlett-Packard Co
  *     David Mosberger-Tang <davidm@hpl.hp.com>
+ *     Bjorn Helgaas <bjorn_helgaas@hp.com>
  *
  * Note: Above list of copyright holders is incomplete...
  */
@@ -116,31 +117,10 @@ pci_acpi_init (void)
 
 subsys_initcall(pci_acpi_init);
 
-static void __init
-pcibios_fixup_resource(struct resource *res, u64 offset)
-{
-       res->start += offset;
-       res->end += offset;
-}
-
-void __init
-pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus)
-{
-       int i;
-
-       for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-               if (!dev->resource[i].start)
-                       continue;
-               if (dev->resource[i].flags & IORESOURCE_MEM)
-                       pcibios_fixup_resource(&dev->resource[i],
-                                              PCI_CONTROLLER(dev)->mem_offset);
-       }
-}
-
 /* Called by ACPI when it finds a new root bus.  */
 
 static struct pci_controller *
-alloc_pci_controller(int seg)
+alloc_pci_controller (int seg)
 {
        struct pci_controller *controller;
 
@@ -153,8 +133,8 @@ alloc_pci_controller(int seg)
        return controller;
 }
 
-struct pci_bus *
-scan_root_bus(int bus, struct pci_ops *ops, void *sysdata)
+static struct pci_bus *
+scan_root_bus (int bus, struct pci_ops *ops, void *sysdata)
 {
        struct pci_bus *b;
 
@@ -184,23 +164,185 @@ scan_root_bus(int bus, struct pci_ops *ops, void *sysdata)
        return b;
 }
 
+static int
+alloc_resource (char *name, struct resource *root, unsigned long start, unsigned long end, unsigned long flags)
+{
+       struct resource *res;
+
+       res = kmalloc(sizeof(*res), GFP_KERNEL);
+       if (!res)
+               return -ENOMEM;
+
+       memset(res, 0, sizeof(*res));
+       res->name = name;
+       res->start = start;
+       res->end = end;
+       res->flags = flags;
+
+       if (request_resource(root, res))
+               return -EBUSY;
+
+       return 0;
+}
+
+static u64
+add_io_space (struct acpi_resource_address64 *addr)
+{
+       u64 offset;
+       int sparse = 0;
+       int i;
+
+       if (addr->address_translation_offset == 0)
+               return IO_SPACE_BASE(0);        /* part of legacy IO space */
+
+       if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION)
+               sparse = 1;
+
+       offset = (u64) ioremap(addr->address_translation_offset, 0);
+       for (i = 0; i < num_io_spaces; i++)
+               if (io_space[i].mmio_base == offset &&
+                   io_space[i].sparse == sparse)
+                       return IO_SPACE_BASE(i);
+
+       if (num_io_spaces == MAX_IO_SPACES) {
+               printk("Too many IO port spaces\n");
+               return ~0;
+       }
+
+       i = num_io_spaces++;
+       io_space[i].mmio_base = offset;
+       io_space[i].sparse = sparse;
+
+       return IO_SPACE_BASE(i);
+}
+
+static acpi_status
+count_window (struct acpi_resource *resource, void *data)
+{
+       unsigned int *windows = (unsigned int *) data;
+       struct acpi_resource_address64 addr;
+       acpi_status status;
+
+       status = acpi_resource_to_address64(resource, &addr);
+       if (ACPI_SUCCESS(status))
+               if (addr.resource_type == ACPI_MEMORY_RANGE ||
+                   addr.resource_type == ACPI_IO_RANGE)
+                       (*windows)++;
+
+       return AE_OK;
+}
+
+struct pci_root_info {
+       struct pci_controller *controller;
+       char *name;
+};
+
+static acpi_status
+add_window (struct acpi_resource *res, void *data)
+{
+       struct pci_root_info *info = (struct pci_root_info *) data;
+       struct pci_window *window;
+       struct acpi_resource_address64 addr;
+       acpi_status status;
+       unsigned long flags, offset = 0;
+       struct resource *root;
+
+       status = acpi_resource_to_address64(res, &addr);
+       if (ACPI_SUCCESS(status)) {
+               if (addr.resource_type == ACPI_MEMORY_RANGE) {
+                       flags = IORESOURCE_MEM;
+                       root = &iomem_resource;
+                       offset = addr.address_translation_offset;
+               } else if (addr.resource_type == ACPI_IO_RANGE) {
+                       flags = IORESOURCE_IO;
+                       root = &ioport_resource;
+                       offset = add_io_space(&addr);
+                       if (offset == ~0)
+                               return AE_OK;
+               } else
+                       return AE_OK;
+
+               window = &info->controller->window[info->controller->windows++];
+               window->resource.flags |= flags;
+               window->resource.start  = addr.min_address_range;
+               window->resource.end    = addr.max_address_range;
+               window->offset          = offset;
+
+               if (alloc_resource(info->name, root, addr.min_address_range + offset,
+                       addr.max_address_range + offset, flags))
+                       printk(KERN_ERR "alloc 0x%lx-0x%lx from %s for %s failed\n",
+                               addr.min_address_range + offset, addr.max_address_range + offset,
+                               root->name, info->name);
+       }
+
+       return AE_OK;
+}
+
 struct pci_bus *
-pcibios_scan_root(void *handle, int seg, int bus)
+pcibios_scan_root (void *handle, int seg, int bus)
 {
+       struct pci_root_info info;
        struct pci_controller *controller;
-       u64 base, size, offset;
+       unsigned int windows = 0;
+       char *name;
 
        printk("PCI: Probing PCI hardware on bus (%02x:%02x)\n", seg, bus);
        controller = alloc_pci_controller(seg);
        if (!controller)
-               return NULL;
+               goto out1;
 
        controller->acpi_handle = handle;
 
-       acpi_get_addr_space(handle, ACPI_MEMORY_RANGE, &base, &size, &offset);
-       controller->mem_offset = offset;
+       acpi_walk_resources(handle, METHOD_NAME__CRS, count_window, &windows);
+       controller->window = kmalloc(sizeof(*controller->window) * windows, GFP_KERNEL);
+       if (!controller->window)
+               goto out2;
+
+       name = kmalloc(16, GFP_KERNEL);
+       if (!name)
+               goto out3;
+
+       sprintf(name, "PCI Bus %02x:%02x", seg, bus);
+       info.controller = controller;
+       info.name = name;
+       acpi_walk_resources(handle, METHOD_NAME__CRS, add_window, &info);
 
        return scan_root_bus(bus, pci_root_ops, controller);
+
+out3:
+       kfree(controller->window);
+out2:
+       kfree(controller);
+out1:
+       return NULL;
+}
+
+void __init
+pcibios_fixup_device_resources (struct pci_dev *dev, struct pci_bus *bus)
+{
+       struct pci_controller *controller = PCI_CONTROLLER(dev);
+       struct pci_window *window;
+       int i, j;
+
+       for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+               if (!dev->resource[i].start)
+                       continue;
+
+#define contains(win, res)     ((res)->start >= (win)->start && \
+                                (res)->end   <= (win)->end)
+
+               for (j = 0; j < controller->windows; j++) {
+                       window = &controller->window[j];
+                       if (((dev->resource[i].flags & IORESOURCE_MEM &&
+                             window->resource.flags & IORESOURCE_MEM) ||
+                            (dev->resource[i].flags & IORESOURCE_IO &&
+                             window->resource.flags & IORESOURCE_IO)) &&
+                           contains(&window->resource, &dev->resource[i])) {
+                               dev->resource[i].start += window->offset;
+                               dev->resource[i].end   += window->offset;
+                       }
+               }
+       }
 }
 
 /*
index e9f7071..72a81f6 100644 (file)
 #include <linux/config.h>
 
 #ifdef CONFIG_IA64_SGI_SN1
-#define MACHVEC_PLATFORM_NAME  sn1
+#define MACHVEC_PLATFORM_NAME          sn1
+#define MACHVEC_PLATFORM_HEADER                <asm/machvec_sn1.h>
 #else CONFIG_IA64_SGI_SN1
-#define MACHVEC_PLATFORM_NAME  sn2
+#define MACHVEC_PLATFORM_NAME          sn2
+#define MACHVEC_PLATFORM_HEADER                <asm/machvec_sn2.h>
 #else
 #error "unknown platform"
 #endif
index 5547bb7..8ce471f 100644 (file)
@@ -193,7 +193,7 @@ main (int argc, char **argv)
   printf ("/*\n * DO NOT MODIFY\n *\n * This file was generated by "
          "arch/ia64/tools/print_offsets.\n *\n */\n\n");
 
-  for (i = 0; i < sizeof (tab) / sizeof (tab[0]); ++i)
+  for (i = 0; i < (int) (sizeof (tab) / sizeof (tab[0])); ++i)
     {
       if (tab[i].name[0] == '\0')
        printf ("\n");
index 703991d..936eba6 100644 (file)
@@ -568,3 +568,7 @@ int module_finalize(const Elf_Ehdr *hdr,
 #endif
        return 0;
 }
+
+void module_arch_cleanup(struct module *mod)
+{
+}
index e9e586c..d263eac 100644 (file)
@@ -269,3 +269,7 @@ int module_finalize(const Elf_Ehdr *hdr,
 {
        return 0;
 }
+
+void module_arch_cleanup(struct module *mod)
+{
+}
index d79e734..913a216 100644 (file)
@@ -386,3 +386,7 @@ int module_finalize(const Elf_Ehdr *hdr,
                kfree(me->arch.syminfo);
        return 0;
 }
+
+void module_arch_cleanup(struct module *mod)
+{
+}
index 99183d7..1bde20c 100644 (file)
@@ -145,3 +145,7 @@ int module_finalize(const Elf_Ehdr *hdr,
 {
        return 0;
 }
+
+void module_arch_cleanup(struct module *mod)
+{
+}
index 9918b76..c24ee5a 100644 (file)
@@ -273,3 +273,7 @@ int module_finalize(const Elf_Ehdr *hdr,
 {
        return 0;
 }
+
+void module_arch_cleanup(struct module *mod)
+{
+}
index eedced8..64aeb3e 100644 (file)
@@ -230,3 +230,8 @@ int apply_relocate_add (Elf32_Shdr *sechdrs, const char *strtab,
 
        return 0;
 }
+
+void
+module_arch_cleanup(struct module *mod)
+{
+}
index 9236b4b..a82e568 100644 (file)
@@ -231,3 +231,7 @@ int module_finalize(const Elf_Ehdr *hdr,
 {
        return 0;
 }
+
+void module_arch_cleanup(struct module *mod)
+{
+}
index a56ae47..fa14619 100644 (file)
@@ -5,10 +5,10 @@
 #include <linux/kernel.h>
 #include <linux/ctype.h>
 #include <linux/string.h>
+#include <linux/irq.h>
 #include <asm/io.h>
 #include <asm/kdebug.h>
 #include <asm/delay.h>
-#include <asm/hw_irq.h>
 
 
 /*
index c78ea7e..85aa0e4 100644 (file)
@@ -251,7 +251,14 @@ acpi_os_install_interrupt_handler(u32 irq, OSD_HANDLER handler, void *context)
        irq = acpi_fadt.sci_int;
 
 #ifdef CONFIG_IA64
-       irq = gsi_to_vector(irq);
+       int vector;
+
+       vector = acpi_irq_to_vector(irq);
+       if (vector < 0) {
+               printk(KERN_ERR PREFIX "SCI (IRQ%d) not registerd\n", irq);
+               return AE_OK;
+       }
+       irq = vector;
 #endif
        acpi_irq_irq = irq;
        acpi_irq_handler = handler;
@@ -269,7 +276,7 @@ acpi_os_remove_interrupt_handler(u32 irq, OSD_HANDLER handler)
 {
        if (acpi_irq_handler) {
 #ifdef CONFIG_IA64
-               irq = gsi_to_vector(irq);
+               irq = acpi_irq_to_vector(irq);
 #endif
                free_irq(irq, acpi_irq);
                acpi_irq_handler = NULL;
@@ -454,6 +461,9 @@ acpi_os_read_pci_configuration (
        int                     result = 0;
        int                     size = 0;
        struct pci_bus          bus;
+#ifdef CONFIG_IA64
+       struct pci_controller   ctrl;
+#endif
 
        if (!value)
                return AE_BAD_PARAMETER;
@@ -473,6 +483,10 @@ acpi_os_read_pci_configuration (
        }
 
        bus.number = pci_id->bus;
+#ifdef CONFIG_IA64
+       ctrl.segment = pci_id->segment;
+       bus.sysdata = &ctrl;
+#endif
        result = pci_root_ops->read(&bus, PCI_DEVFN(pci_id->device,
                                                    pci_id->function),
                                    reg, size, value);
@@ -490,6 +504,9 @@ acpi_os_write_pci_configuration (
        int                     result = 0;
        int                     size = 0;
        struct pci_bus          bus;
+#ifdef CONFIG_IA64
+       struct pci_controller   ctrl;
+#endif
 
        switch (width) {
        case 8:
@@ -506,6 +523,10 @@ acpi_os_write_pci_configuration (
        }
 
        bus.number = pci_id->bus;
+#ifdef CONFIG_IA64
+       ctrl.segment = pci_id->segment;
+       bus.sysdata = &ctrl;
+#endif
        result = pci_root_ops->write(&bus, PCI_DEVFN(pci_id->device,
                                                     pci_id->function),
                                     reg, size, value);
index 7160d98..c94dabc 100644 (file)
@@ -24,6 +24,8 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
+#include <linux/config.h>
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #ifdef CONFIG_X86_IO_APIC
 #include <asm/mpspec.h>
 #endif
+#ifdef CONFIG_IOSAPIC
+# include <asm/iosapic.h>
+#endif
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 
+#ifdef CONFIG_X86
+# define PCI_SEGMENT(x)        0       /* XXX fix me */
+#endif
+
 
 #define _COMPONENT             ACPI_PCI_COMPONENT
 ACPI_MODULE_NAME               ("pci_irq")
@@ -250,6 +259,8 @@ acpi_pci_irq_lookup (
                return_VALUE(0);
        }
 
+       entry->irq = entry->link.index;
+
        if (!entry->irq && entry->link.handle) {
                entry->irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index);
                if (!entry->irq) {
@@ -288,7 +299,7 @@ acpi_pci_irq_derive (
        while (!irq && bridge->bus->self) {
                pin = (pin + PCI_SLOT(bridge->devfn)) % 4;
                bridge = bridge->bus->self;
-               irq = acpi_pci_irq_lookup(0, bridge->bus->number, PCI_SLOT(bridge->devfn), pin);
+               irq = acpi_pci_irq_lookup(PCI_SEGMENT(bridge), bridge->bus->number, PCI_SLOT(bridge->devfn), pin);
        }
 
        if (!irq) {
@@ -331,7 +342,7 @@ acpi_pci_irq_enable (
         * First we check the PCI IRQ routing table (PRT) for an IRQ.  PRT
         * values override any BIOS-assigned IRQs set during boot.
         */
-       irq = acpi_pci_irq_lookup(0, dev->bus->number, PCI_SLOT(dev->devfn), pin);
+       irq = acpi_pci_irq_lookup(PCI_SEGMENT(dev), dev->bus->number, PCI_SLOT(dev->devfn), pin);
  
        /*
         * If no PRT entry was found, we'll try to derive an IRQ from the
@@ -357,7 +368,11 @@ acpi_pci_irq_enable (
                }
        }
 
+#ifdef CONFIG_IA64
+       dev->irq = gsi_to_irq(irq);
+#else
        dev->irq = irq;
+#endif
 
        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device %s using IRQ %d\n", dev->slot_name, dev->irq));
 
@@ -371,6 +386,10 @@ acpi_pci_irq_enable (
                eisa_set_level_irq(dev->irq);
        }
 #endif
+#ifdef CONFIG_IOSAPIC
+       if (acpi_irq_model == ACPI_IRQ_MODEL_IOSAPIC)
+               iosapic_enable_intr(dev->irq);
+#endif
 
        return_VALUE(dev->irq);
 }
index 210ef7f..0625085 100644 (file)
@@ -23,6 +23,8 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
+#include <linux/config.h>
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -202,8 +204,6 @@ acpi_pci_root_add (
        switch (status) {
        case AE_OK:
                root->id.segment = (u16) value;
-               printk("_SEG exists! Unsupported. Abort.\n");
-               BUG();
                break;
        case AE_NOT_FOUND:
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, 
@@ -265,7 +265,12 @@ acpi_pci_root_add (
         * PCI namespace does not get created until this call is made (and 
         * thus the root bridge's pci_dev does not exist).
         */
+#ifdef CONFIG_X86
        root->bus = pcibios_scan_root(root->id.bus);
+#else
+       root->bus = pcibios_scan_root(root->handle,
+                                     root->id.segment, root->id.bus);
+#endif
        if (!root->bus) {
                ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 
                        "Bus %02x:%02x not present in PCI namespace\n", 
index 9774d10..f22dcdb 100644 (file)
@@ -31,7 +31,7 @@ config AGP_GART
 
 config AGP_INTEL
        tristate "Intel 440LX/BX/GX, I8xx and E7x05 support"
-       depends on AGP && !X86_64
+       depends on AGP && !X86_64 && !IA64
        help
          This option gives you AGP support for the GLX component of the
          XFree86 4.x on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860
@@ -44,7 +44,7 @@ config AGP_INTEL
 
 #config AGP_I810
 #      tristate "Intel I810/I815/I830M (on-board) support"
-#      depends on AGP && !X86_64
+#      depends on AGP && !X86_64 && !IA64
 #      help
 #        This option gives you AGP support for the Xserver on the Intel 810
 #        815 and 830m chipset boards for their on-board integrated graphics. This
@@ -52,7 +52,7 @@ config AGP_INTEL
 
 config AGP_VIA
        tristate "VIA chipset support"
-       depends on AGP && !X86_64
+       depends on AGP && !X86_64 && !IA64
        help
          This option gives you AGP support for the GLX component of the
          XFree86 4.x on VIA MPV3/Apollo Pro chipsets.
@@ -62,7 +62,7 @@ config AGP_VIA
 
 config AGP_AMD
        tristate "AMD Irongate, 761, and 762 support"
-       depends on AGP && !X86_64
+       depends on AGP && !X86_64 && !IA64
        help
          This option gives you AGP support for the GLX component of the
          XFree86 4.x on AMD Irongate, 761, and 762 chipsets.
@@ -72,7 +72,7 @@ config AGP_AMD
 
 config AGP_SIS
        tristate "Generic SiS support"
-       depends on AGP && !X86_64
+       depends on AGP && !X86_64 && !IA64
        help
          This option gives you AGP support for the GLX component of the "soon
          to be released" XFree86 4.x on Silicon Integrated Systems [SiS]
@@ -85,7 +85,7 @@ config AGP_SIS
 
 config AGP_ALI
        tristate "ALI chipset support"
-       depends on AGP && !X86_64
+       depends on AGP && !X86_64 && !IA64
        ---help---
          This option gives you AGP support for the GLX component of the
          XFree86 4.x on the following ALi chipsets.  The supported chipsets
@@ -103,14 +103,14 @@ config AGP_ALI
 
 config AGP_SWORKS
        tristate "Serverworks LE/HE support"
-       depends on AGP && !X86_64
+       depends on AGP && !X86_64 && !IA64
        help
          Say Y here to support the Serverworks AGP card.  See 
          <http://www.serverworks.com/> for product descriptions and images.
 
 config AGP_AMD_8151
        tristate "AMD 8151 support"
-       depends on AGP
+       depends on AGP && !IA64
        default GART_IOMMU
        help
          Say Y here to support the AMD 8151 AGP bridge and the builtin
index 79fa4de..f4e4593 100644 (file)
@@ -252,7 +252,9 @@ int agp_register_driver (struct agp_driver *drv)
        /* FIXME: What to do with this? */
        inter_module_register("drm_agp", THIS_MODULE, &drm_agp);
 
+#if 0
        pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge->dev), agp_power);
+#endif
        agp_count++;
        return 0;
 
index ef7ef95..7058dfb 100644 (file)
@@ -1,17 +1,36 @@
 /*
- * HP AGPGART routines. 
+ * HP AGPGART routines.
+ *     Copyright (C) 2002-2003 Hewlett-Packard Co
+ *             Bjorn Helgaas <bjorn_helgaas@hp.com>
  */
 
+#include <linux/acpi.h>
+#include <linux/agp_backend.h>
+#include <linux/init.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/agp_backend.h>
+
+#include <asm/acpi-ext.h>
+
 #include "agp.h"
 
 #ifndef log2
 #define log2(x)                ffz(~(x))
 #endif
 
+#define HP_ZX1_IOC_OFFSET      0x1000  /* ACPI reports SBA, we want IOC */
+
+/* HP ZX1 IOC registers */
+#define HP_ZX1_IBASE           0x300
+#define HP_ZX1_IMASK           0x308
+#define HP_ZX1_PCOM            0x310
+#define HP_ZX1_TCNFG           0x318
+#define HP_ZX1_PDIR_BASE       0x320
+
+/* HP ZX1 LBA registers */
+#define HP_ZX1_AGP_STATUS      0x64
+#define HP_ZX1_AGP_COMMAND     0x68
+
 #define HP_ZX1_IOVA_BASE       GB(1UL)
 #define HP_ZX1_IOVA_SIZE       GB(1UL)
 #define HP_ZX1_GART_SIZE       (HP_ZX1_IOVA_SIZE / 2)
@@ -20,6 +39,9 @@
 #define HP_ZX1_PDIR_VALID_BIT  0x8000000000000000UL
 #define HP_ZX1_IOVA_TO_PDIR(va)        ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
 
+/* AGP bridge need not be PCI device, but DRM thinks it is. */
+static struct pci_dev fake_bridge_dev;
+
 static struct aper_size_info_fixed hp_zx1_sizes[] =
 {
        {0, 0, 0},              /* filled in by hp_zx1_fetch_size() */
@@ -31,8 +53,8 @@ static struct gatt_mask hp_zx1_masks[] =
 };
 
 static struct _hp_private {
-       struct pci_dev *ioc;
-       volatile u8 *registers;
+       volatile u8 *ioc_regs;
+       volatile u8 *lba_regs;
        u64 *io_pdir;           // PDIR for entire IOVA
        u64 *gatt;              // PDIR just for GART (subset of above)
        u64 gatt_entries;
@@ -47,7 +69,8 @@ static struct _hp_private {
        int io_pages_per_kpage;
 } hp_private;
 
-static int __init hp_zx1_ioc_shared(void)
+static int __init
+hp_zx1_ioc_shared (void)
 {
        struct _hp_private *hp = &hp_private;
 
@@ -59,7 +82,7 @@ static int __init hp_zx1_ioc_shared(void)
         *      - IOVA space is 1Gb in size
         *      - first 512Mb is IOMMU, second 512Mb is GART
         */
-       hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG);
+       hp->io_tlb_ps = INREG64(hp->ioc_regs, HP_ZX1_TCNFG);
        switch (hp->io_tlb_ps) {
                case 0: hp->io_tlb_shift = 12; break;
                case 1: hp->io_tlb_shift = 13; break;
@@ -75,13 +98,13 @@ static int __init hp_zx1_ioc_shared(void)
        hp->io_page_size = 1 << hp->io_tlb_shift;
        hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
 
-       hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1;
+       hp->iova_base = INREG64(hp->ioc_regs, HP_ZX1_IBASE) & ~0x1;
        hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
 
        hp->gart_size = HP_ZX1_GART_SIZE;
        hp->gatt_entries = hp->gart_size / hp->io_page_size;
 
-       hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE));
+       hp->io_pdir = phys_to_virt(INREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE));
        hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
 
        if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
@@ -95,7 +118,8 @@ static int __init hp_zx1_ioc_shared(void)
        return 0;
 }
 
-static int __init hp_zx1_ioc_owner(u8 ioc_rev)
+static int __init
+hp_zx1_ioc_owner (void)
 {
        struct _hp_private *hp = &hp_private;
 
@@ -130,47 +154,28 @@ static int __init hp_zx1_ioc_owner(u8 ioc_rev)
        return 0;
 }
 
-static int __init hp_zx1_ioc_init(void)
+static int __init
+hp_zx1_ioc_init (u64 ioc_hpa, u64 lba_hpa)
 {
        struct _hp_private *hp = &hp_private;
-       struct pci_dev *ioc;
-       int i;
-       u8 ioc_rev;
-
-       ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL);
-       if (!ioc) {
-               printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n");
-               return -ENODEV;
-       }
-       hp->ioc = ioc;
 
-       pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev);
-
-       for (i = 0; i < PCI_NUM_RESOURCES; i++) {
-               if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) {
-                       hp->registers = (u8 *) ioremap(pci_resource_start(ioc, i),
-                                                   pci_resource_len(ioc, i));
-                       break;
-               }
-       }
-       if (!hp->registers) {
-               printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n");
-               return -ENODEV;
-       }
+       hp->ioc_regs = ioremap(ioc_hpa, 1024);
+       hp->lba_regs = ioremap(lba_hpa, 256);
 
        /*
         * If the IOTLB is currently disabled, we can take it over.
         * Otherwise, we have to share with sba_iommu.
         */
-       hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0;
+       hp->io_pdir_owner = (INREG64(hp->ioc_regs, HP_ZX1_IBASE) & 0x1) == 0;
 
        if (hp->io_pdir_owner)
-               return hp_zx1_ioc_owner(ioc_rev);
+               return hp_zx1_ioc_owner();
 
        return hp_zx1_ioc_shared();
 }
 
-static int hp_zx1_fetch_size(void)
+static int
+hp_zx1_fetch_size (void)
 {
        int size;
 
@@ -180,47 +185,49 @@ static int hp_zx1_fetch_size(void)
        return size;
 }
 
-static int hp_zx1_configure(void)
+static int
+hp_zx1_configure (void)
 {
        struct _hp_private *hp = &hp_private;
 
        agp_bridge->gart_bus_addr = hp->gart_base;
-       agp_bridge->capndx = pci_find_capability(agp_bridge->dev, PCI_CAP_ID_AGP);
-       pci_read_config_dword(agp_bridge->dev,
-               agp_bridge->capndx + PCI_AGP_STATUS, &agp_bridge->mode);
+       agp_bridge->mode = INREG32(hp->lba_regs, HP_ZX1_AGP_STATUS);
 
        if (hp->io_pdir_owner) {
-               OUTREG64(hp->registers, HP_ZX1_PDIR_BASE,
+               OUTREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE,
                        virt_to_phys(hp->io_pdir));
-               OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps);
-               OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
-               OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1);
-               OUTREG64(hp->registers, HP_ZX1_PCOM,
+               OUTREG64(hp->ioc_regs, HP_ZX1_TCNFG, hp->io_tlb_ps);
+               OUTREG64(hp->ioc_regs, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
+               OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, hp->iova_base | 0x1);
+               OUTREG64(hp->ioc_regs, HP_ZX1_PCOM,
                        hp->iova_base | log2(HP_ZX1_IOVA_SIZE));
-               INREG64(hp->registers, HP_ZX1_PCOM);
+               INREG64(hp->ioc_regs, HP_ZX1_PCOM);
        }
 
        return 0;
 }
 
-static void hp_zx1_cleanup(void)
+static void
+hp_zx1_cleanup (void)
 {
        struct _hp_private *hp = &hp_private;
 
        if (hp->io_pdir_owner)
-               OUTREG64(hp->registers, HP_ZX1_IBASE, 0);
-       iounmap((void *) hp->registers);
+               OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, 0);
+       iounmap((void *) hp->ioc_regs);
 }
 
-static void hp_zx1_tlbflush(agp_memory * mem)
+static void
+hp_zx1_tlbflush (agp_memory * mem)
 {
        struct _hp_private *hp = &hp_private;
 
-       OUTREG64(hp->registers, HP_ZX1_PCOM, hp->gart_base | log2(hp->gart_size));
-       INREG64(hp->registers, HP_ZX1_PCOM);
+       OUTREG64(hp->ioc_regs, HP_ZX1_PCOM, hp->gart_base | log2(hp->gart_size));
+       INREG64(hp->ioc_regs, HP_ZX1_PCOM);
 }
 
-static int hp_zx1_create_gatt_table(void)
+static int
+hp_zx1_create_gatt_table (void)
 {
        struct _hp_private *hp = &hp_private;
        int i;
@@ -247,7 +254,8 @@ static int hp_zx1_create_gatt_table(void)
        return 0;
 }
 
-static int hp_zx1_free_gatt_table(void)
+static int
+hp_zx1_free_gatt_table (void)
 {
        struct _hp_private *hp = &hp_private;
 
@@ -259,7 +267,8 @@ static int hp_zx1_free_gatt_table(void)
        return 0;
 }
 
-static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type)
+static int
+hp_zx1_insert_memory (agp_memory * mem, off_t pg_start, int type)
 {
        struct _hp_private *hp = &hp_private;
        int i, k;
@@ -304,7 +313,8 @@ static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type)
        return 0;
 }
 
-static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type)
+static int
+hp_zx1_remove_memory (agp_memory * mem, off_t pg_start, int type)
 {
        struct _hp_private *hp = &hp_private;
        int i, io_pg_start, io_pg_count;
@@ -323,12 +333,30 @@ static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type)
        return 0;
 }
 
-static unsigned long hp_zx1_mask_memory(unsigned long addr, int type)
+static unsigned long
+hp_zx1_mask_memory(unsigned long addr, int type)
 {
        return HP_ZX1_PDIR_VALID_BIT | addr;
 }
 
-static int __init hp_zx1_setup (struct pci_dev *pdev __attribute__((unused)))
+static void
+hp_zx1_agp_enable (u32 mode)
+{
+       struct _hp_private *hp = &hp_private;
+       u32 command;
+
+       command = INREG32(hp->lba_regs, HP_ZX1_AGP_STATUS);
+
+       command = agp_collect_device_status(mode, command);
+       command |= 0x00000100;
+
+       OUTREG32(hp->lba_regs, HP_ZX1_AGP_COMMAND, command);
+
+       agp_device_command(command, 0);
+}
+
+static int __init
+hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
 {
        agp_bridge->masks = hp_zx1_masks;
        agp_bridge->dev_private_data = NULL;
@@ -339,7 +367,7 @@ static int __init hp_zx1_setup (struct pci_dev *pdev __attribute__((unused)))
        agp_bridge->cleanup = hp_zx1_cleanup;
        agp_bridge->tlb_flush = hp_zx1_tlbflush;
        agp_bridge->mask_memory = hp_zx1_mask_memory;
-       agp_bridge->agp_enable = agp_generic_enable;
+       agp_bridge->agp_enable = hp_zx1_agp_enable;
        agp_bridge->cache_flush = global_cache_flush;
        agp_bridge->create_gatt_table = hp_zx1_create_gatt_table;
        agp_bridge->free_gatt_table = hp_zx1_free_gatt_table;
@@ -350,73 +378,85 @@ static int __init hp_zx1_setup (struct pci_dev *pdev __attribute__((unused)))
        agp_bridge->agp_alloc_page = agp_generic_alloc_page;
        agp_bridge->agp_destroy_page = agp_generic_destroy_page;
        agp_bridge->cant_use_aperture = 1;
-       return hp_zx1_ioc_init();
+
+       return hp_zx1_ioc_init(ioc_hpa, lba_hpa);
 }
 
-static int __init agp_find_supported_device(struct pci_dev *dev)
+static acpi_status __init
+zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
 {
-       agp_bridge->dev = dev;
+       acpi_handle handle, parent;
+       acpi_status status;
+       struct acpi_device_info info;
+       u64 lba_hpa, sba_hpa, length;
+
+       status = hp_acpi_csr_space(obj, &lba_hpa, &length);
+       if (ACPI_FAILURE(status))
+               return 1;
+
+       /* Look for an enclosing IOC scope and find its CSR space */
+       handle = obj;
+       do {
+               status = acpi_get_object_info(handle, &info);
+               if (ACPI_SUCCESS(status)) {
+                       /* TBD check _CID also */
+                       info.hardware_id[sizeof(info.hardware_id)-1] = '\0';
+                       if (!strcmp(info.hardware_id, "HWP0001")) {
+                               status = hp_acpi_csr_space(handle, &sba_hpa, &length);
+                               if (ACPI_SUCCESS(status))
+                                       break;
+                               else {
+                                       printk(KERN_ERR PFX "Detected HP ZX1 "
+                                              "AGP LBA but no IOC.\n");
+                                       return status;
+                               }
+                       }
+               }
 
-       /* ZX1 LBAs can be either PCI or AGP bridges */
-       if (pci_find_capability(dev, PCI_CAP_ID_AGP)) {
-               printk(KERN_INFO PFX "Detected HP ZX1 AGP chipset at %s\n",
-                       dev->slot_name);
-               agp_bridge->type = HP_ZX1;
-               agp_bridge->dev = dev;
-               return hp_zx1_setup(dev);
-       }
-       return -ENODEV;
-}
+               status = acpi_get_parent(handle, &parent);
+               handle = parent;
+       } while (ACPI_SUCCESS(status));
 
-static struct agp_driver hp_agp_driver = {
-       .owner = THIS_MODULE,
-};
+       if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
+               return 1;
 
-static int __init agp_hp_probe (struct pci_dev *dev, const struct pci_device_id *ent)
-{
-       if (agp_find_supported_device(dev) == 0) {
-               hp_agp_driver.dev = dev;
-               agp_register_driver(&hp_agp_driver);
-               return 0;
-       }
-       return -ENODEV;
-}
+       fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
+       fake_bridge_dev.device = PCI_DEVICE_ID_HP_ZX1_LBA;
 
-static struct pci_device_id agp_hp_pci_table[] __initdata = {
-       {
-       .class          = (PCI_CLASS_BRIDGE_HOST << 8),
-       .class_mask     = ~0,
-       .vendor         = PCI_VENDOR_ID_HP,
-       .device         = PCI_DEVICE_ID_HP_ZX1_LBA,
-       .subvendor      = PCI_ANY_ID,
-       .subdevice      = PCI_ANY_ID,
-       },
-       { }
-};
-
-MODULE_DEVICE_TABLE(pci, agp_hp_pci_table);
+       return 0;
+}
 
-static struct __initdata pci_driver agp_hp_pci_driver = {
-       .name           = "agpgart-hp",
-       .id_table       = agp_hp_pci_table,
-       .probe          = agp_hp_probe,
+static struct agp_driver hp_agp_driver = {
+       .owner = THIS_MODULE,
 };
 
-static int __init agp_hp_init(void)
+static int __init
+agp_hp_init (void)
 {
-       int ret_val;
+       acpi_status status;
 
-       ret_val = pci_module_init(&agp_hp_pci_driver);
-       if (ret_val)
+       status = acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003 AGP LBA", NULL);
+       if (!(ACPI_SUCCESS(status))) {
                agp_bridge->type = NOT_SUPPORTED;
+               printk(KERN_INFO PFX "Failed to initialize zx1 AGP.\n");
+               return -ENODEV;
+       }
+
+       if (fake_bridge_dev.vendor && !agp_bridge->type) {
+               hp_agp_driver.dev = &fake_bridge_dev;
+               agp_bridge->type = HP_ZX1;
+               agp_bridge->dev = &fake_bridge_dev;
+               return agp_register_driver(&hp_agp_driver);
 
-       return ret_val;
+       } else {
+               return -ENODEV;
+       }
 }
 
-static void __exit agp_hp_cleanup(void)
+static void __exit
+agp_hp_cleanup (void)
 {
        agp_unregister_driver(&hp_agp_driver);
-       pci_unregister_driver(&agp_hp_pci_driver);
 }
 
 module_init(agp_hp_init);
index abc3ab6..a0baa6f 100644 (file)
@@ -571,6 +571,7 @@ static int __init agp_intel_i460_probe (struct pci_dev *dev, const struct pci_de
        if (cap_ptr == 0)
                return -ENODEV;
 
+       agp_bridge->type = INTEL_460GX;
        agp_bridge->dev = dev;
        agp_bridge->capndx = cap_ptr;
        intel_i460_setup(dev);
index c0f95fe..9d6433b 100644 (file)
@@ -225,16 +225,16 @@ static inline struct page * vmalloc_to_page(void * vmalloc_addr)
    if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
 
                                /* Mapping helper macros */
-#define DRM_IOREMAP(map)                                               \
-       (map)->handle = DRM(ioremap)( (map)->offset, (map)->size )
+#define DRM_IOREMAP(map, dev)                                                  \
+       (map)->handle = DRM(ioremap)( (map)->offset, (map)->size, (dev) )
 
-#define DRM_IOREMAP_NOCACHE(map)                                       \
-       (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size)
+#define DRM_IOREMAP_NOCACHE(map, dev)                                          \
+       (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size, (dev))
 
-#define DRM_IOREMAPFREE(map)                                           \
-       do {                                                            \
-               if ( (map)->handle && (map)->size )                     \
-                       DRM(ioremapfree)( (map)->handle, (map)->size ); \
+#define DRM_IOREMAPFREE(map, dev)                                                      \
+       do {                                                                    \
+               if ( (map)->handle && (map)->size )                             \
+                       DRM(ioremapfree)( (map)->handle, (map)->size, (dev) );  \
        } while (0)
 
 #define DRM_FIND_MAP(_map, _o)                                                         \
@@ -652,9 +652,10 @@ extern void             DRM(free)(void *pt, size_t size, int area);
 extern unsigned long DRM(alloc_pages)(int order, int area);
 extern void         DRM(free_pages)(unsigned long address, int order,
                                     int area);
-extern void         *DRM(ioremap)(unsigned long offset, unsigned long size);
-extern void         *DRM(ioremap_nocache)(unsigned long offset, unsigned long size);
-extern void         DRM(ioremapfree)(void *pt, unsigned long size);
+extern void         *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev);
+extern void         *DRM(ioremap_nocache)(unsigned long offset, unsigned long size,
+                                          drm_device_t *dev);
+extern void         DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev);
 
 #if __REALLY_HAVE_AGP
 extern agp_memory    *DRM(alloc_agp)(int pages, u32 type);
index 5ee9333..b1e484d 100644 (file)
@@ -123,7 +123,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp,
                                              MTRR_TYPE_WRCOMB, 1 );
                }
 #endif
-               map->handle = DRM(ioremap)( map->offset, map->size );
+               map->handle = DRM(ioremap)( map->offset, map->size, dev );
                break;
 
        case _DRM_SHM:
@@ -245,7 +245,7 @@ int DRM(rmmap)(struct inode *inode, struct file *filp,
                                DRM_DEBUG("mtrr_del = %d\n", retcode);
                        }
 #endif
-                       DRM(ioremapfree)(map->handle, map->size);
+                       DRM(ioremapfree)(map->handle, map->size, dev);
                        break;
                case _DRM_SHM:
                        vfree(map->handle);
index 2ea8e6f..9f51aeb 100644 (file)
@@ -454,7 +454,7 @@ static int DRM(takedown)( drm_device_t *dev )
                                        DRM_DEBUG( "mtrr_del=%d\n", retcode );
                                }
 #endif
-                               DRM(ioremapfree)( map->handle, map->size );
+                               DRM(ioremapfree)( map->handle, map->size, dev );
                                break;
                        case _DRM_SHM:
                                vfree(map->handle);
index 8b63416..3dbd03b 100644 (file)
 
 #include <linux/config.h>
 #include "drmP.h"
+#include <linux/vmalloc.h>
+
+#include <asm/agp.h>
+#include <asm/tlbflush.h>
 
 /* Cut down version of drm_memory_debug.h, which used to be called
  * drm_memory.h.  If you want the debug functionality, change 0 to 1
  */
 #define DEBUG_MEMORY 0
 
+#if __REALLY_HAVE_AGP
+
+/*
+ * Find the drm_map that covers the range [offset, offset+size).
+ */
+static inline drm_map_t *
+drm_lookup_map (unsigned long offset, unsigned long size, drm_device_t *dev)
+{
+       struct list_head *list;
+       drm_map_list_t *r_list;
+       drm_map_t *map;
+
+       list_for_each(list, &dev->maplist->head) {
+               r_list = (drm_map_list_t *) list;
+               map = r_list->map;
+               if (!map)
+                       continue;
+               if (map->offset <= offset && (offset + size) <= (map->offset + map->size))
+                       return map;
+       }
+       return NULL;
+}
+
+static inline void *
+agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
+{
+       unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE;
+       struct drm_agp_mem *agpmem;
+       struct page **page_map;
+       void *addr;
+
+       size = PAGE_ALIGN(size);
+
+       for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
+               if (agpmem->bound <= offset
+                   && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= (offset + size))
+                       break;
+       if (!agpmem)
+               return NULL;
+
+       /*
+        * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
+        * the CPU do not get remapped by the GART.  We fix this by using the kernel's
+        * page-table instead (that's probably faster anyhow...).
+        */
+       /* note: use vmalloc() because num_pages could be large... */
+       page_map = vmalloc(num_pages * sizeof(struct page *));
+       if (!page_map)
+               return NULL;
+
+       phys_addr_map = agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
+       for (i = 0; i < num_pages; ++i)
+               page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
+       addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
+       vfree(page_map);
+       if (!addr)
+               return NULL;
+
+       flush_tlb_kernel_range((unsigned long) addr, (unsigned long) addr + size);
+       return addr;
+}
+
+static inline unsigned long
+drm_follow_page (void *vaddr)
+{
+       pgd_t *pgd = pgd_offset_k((unsigned long) vaddr);
+       pmd_t *pmd = pmd_offset(pgd, (unsigned long) vaddr);
+       pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr);
+       return pte_pfn(*ptep) << PAGE_SHIFT;
+}
+
+#else /* !__REALLY_HAVE_AGP */
+
+static inline void *
+agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev)
+{
+       return NULL;
+}
+
+#endif /* !__REALLY_HAVE_AGP */
+
+static inline void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t *dev)
+{
+       int remap_aperture = 0;
+
+#if __REALLY_HAVE_AGP
+       if (dev->agp->cant_use_aperture) {
+               drm_map_t *map = drm_lookup_map(offset, size, dev);
+
+               if (map && map->type == _DRM_AGP)
+                       remap_aperture = 1;
+       }
+#endif
+       if (remap_aperture)
+               return agp_remap(offset, size, dev);
+       else
+               return ioremap(offset, size);
+}
+
+static inline void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
+                                       drm_device_t *dev)
+{
+       int remap_aperture = 0;
+
+#if __REALLY_HAVE_AGP
+       if (dev->agp->cant_use_aperture) {
+               drm_map_t *map = drm_lookup_map(offset, size, dev);
+
+               if (map && map->type == _DRM_AGP)
+                       remap_aperture = 1;
+       }
+#endif
+       if (remap_aperture)
+               return agp_remap(offset, size, dev);
+       else
+               return ioremap_nocache(offset, size);
+}
+
+static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev)
+{
+       int unmap_aperture = 0;
+#if __REALLY_HAVE_AGP
+       /*
+        * This is a bit ugly.  It would be much cleaner if the DRM API would use separate
+        * routines for handling mappings in the AGP space.  Hopefully this can be done in
+        * a future revision of the interface...
+        */
+       if (dev->agp->cant_use_aperture
+           && ((unsigned long) pt >= VMALLOC_START && (unsigned long) pt < VMALLOC_END))
+       {
+               unsigned long offset;
+               drm_map_t *map;
+
+               offset = drm_follow_page(pt) | ((unsigned long) pt & ~PAGE_MASK);
+               map = drm_lookup_map(offset, size, dev);
+               if (map && map->type == _DRM_AGP)
+                       unmap_aperture = 1;
+       }
+#endif
+       if (unmap_aperture)
+               vunmap(pt);
+       else
+               iounmap(pt);
+}
 
 #if DEBUG_MEMORY
 #include "drm_memory_debug.h"
@@ -118,19 +266,19 @@ void DRM(free_pages)(unsigned long address, int order, int area)
        free_pages(address, order);
 }
 
-void *DRM(ioremap)(unsigned long offset, unsigned long size)
+void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
 {
-       return ioremap(offset, size);
+       return drm_ioremap(offset, size, dev);
 }
 
-void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
+void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
 {
-       return ioremap_nocache(offset, size);
+       return drm_ioremap_nocache(offset, size, dev);
 }
 
-void DRM(ioremapfree)(void *pt, unsigned long size)
+void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
 {
-       iounmap(pt);
+       drm_ioremapfree(pt, size, dev);
 }
 
 #if __REALLY_HAVE_AGP
index b189793..b87ecbc 100644 (file)
@@ -269,7 +269,7 @@ void DRM(free_pages)(unsigned long address, int order, int area)
        }
 }
 
-void *DRM(ioremap)(unsigned long offset, unsigned long size)
+void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
 {
        void *pt;
 
@@ -279,7 +279,7 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size)
                return NULL;
        }
 
-       if (!(pt = ioremap(offset, size))) {
+       if (!(pt = drm_ioremap(offset, size, dev))) {
                spin_lock(&DRM(mem_lock));
                ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
                spin_unlock(&DRM(mem_lock));
@@ -292,7 +292,7 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size)
        return pt;
 }
 
-void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
+void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
 {
        void *pt;
 
@@ -302,7 +302,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
                return NULL;
        }
 
-       if (!(pt = ioremap_nocache(offset, size))) {
+       if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
                spin_lock(&DRM(mem_lock));
                ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
                spin_unlock(&DRM(mem_lock));
@@ -315,7 +315,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size)
        return pt;
 }
 
-void DRM(ioremapfree)(void *pt, unsigned long size)
+void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
 {
        int alloc_count;
        int free_count;
@@ -324,7 +324,7 @@ void DRM(ioremapfree)(void *pt, unsigned long size)
                DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
                              "Attempt to free NULL pointer\n");
        else
-               iounmap(pt);
+               drm_ioremapfree(pt, size, dev);
 
        spin_lock(&DRM(mem_lock));
        DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
index 7711639..18bda8a 100644 (file)
@@ -107,12 +107,12 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
                  * Get the page, inc the use count, and return it
                  */
                offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
-               agpmem->memory->memory[offset] &= dev->agp->page_mask;
                page = virt_to_page(__va(agpmem->memory->memory[offset]));
                get_page(page);
 
-               DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx\n",
-                         baddr, __va(agpmem->memory->memory[offset]), offset);
+               DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
+                         baddr, __va(agpmem->memory->memory[offset]), offset,
+                         atomic_read(&page->count));
 
                return page;
         }
@@ -206,7 +206,7 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma)
                                        DRM_DEBUG("mtrr_del = %d\n", retcode);
                                }
 #endif
-                               DRM(ioremapfree)(map->handle, map->size);
+                               DRM(ioremapfree)(map->handle, map->size, dev);
                                break;
                        case _DRM_SHM:
                                vfree(map->handle);
@@ -420,15 +420,16 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
 
        switch (map->type) {
         case _DRM_AGP:
-#if defined(__alpha__)
+#if __REALLY_HAVE_AGP
+         if (dev->agp->cant_use_aperture) {
                 /*
-                 * On Alpha we can't talk to bus dma address from the
-                 * CPU, so for memory of type DRM_AGP, we'll deal with
-                 * sorting out the real physical pages and mappings
-                 * in nopage()
+                 * On some platforms we can't talk to bus dma address from the CPU, so for
+                 * memory of type DRM_AGP, we'll deal with sorting out the real physical
+                 * pages and mappings in nopage()
                  */
                 vma->vm_ops = &DRM(vm_ops);
                 break;
+         }
 #endif
                 /* fall through to _DRM_FRAME_BUFFER... */        
        case _DRM_FRAME_BUFFER:
@@ -439,15 +440,15 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
                                pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
                                pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
                        }
-#elif defined(__ia64__)
-                       if (map->type != _DRM_AGP)
-                               vma->vm_page_prot =
-                                       pgprot_writecombine(vma->vm_page_prot);
 #elif defined(__powerpc__)
                        pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
 #endif
                        vma->vm_flags |= VM_IO; /* not in core dump */
                }
+#if defined(__ia64__)
+               if (map->type != _DRM_AGP)
+                       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+#endif
                offset = DRIVER_GET_REG_OFS();
 #ifdef __sparc__
                if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
index 8246b1b..77135f2 100644 (file)
@@ -612,7 +612,7 @@ static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
        } else {
                DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
 
-               DRM_IOREMAP( dev_priv->buffers );
+               DRM_IOREMAP( dev_priv->buffers, dev );
 
                buf = dma->buflist[GLINT_DRI_BUF_COUNT];
                pgt = buf->address;
@@ -651,7 +651,7 @@ int gamma_do_cleanup_dma( drm_device_t *dev )
                drm_gamma_private_t *dev_priv = dev->dev_private;
 
                if ( dev_priv->buffers != NULL )
-                       DRM_IOREMAPFREE( dev_priv->buffers );
+                       DRM_IOREMAPFREE( dev_priv->buffers, dev );
 
                DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
                           DRM_MEM_DRIVER );
index f3c323f..f274d4b 100644 (file)
@@ -246,7 +246,7 @@ int i810_dma_cleanup(drm_device_t *dev)
 
                if(dev_priv->ring.virtual_start) {
                        DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
-                                        dev_priv->ring.Size);
+                                        dev_priv->ring.Size, dev);
                }
                if (dev_priv->hw_status_page) {
                        pci_free_consistent(dev->pdev, PAGE_SIZE,
@@ -263,7 +263,7 @@ int i810_dma_cleanup(drm_device_t *dev)
                        drm_buf_t *buf = dma->buflist[ i ];
                        drm_i810_buf_priv_t *buf_priv = buf->dev_private;
                        if ( buf_priv->kernel_virtual && buf->total )
-                               DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
+                               DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
                }
        }
        return 0;
@@ -333,7 +333,7 @@ static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv)
                *buf_priv->in_use = I810_BUF_FREE;
 
                buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
-                                                       buf->total);
+                                                       buf->total, dev);
        }
        return 0;
 }
@@ -386,7 +386,7 @@ static int i810_dma_initialize(drm_device_t *dev,
 
        dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
                                                    init->ring_start,
-                                                   init->ring_size);
+                                                   init->ring_size, dev);
 
        if (dev_priv->ring.virtual_start == NULL) {
                dev->dev_private = (void *) dev_priv;
index fe105ff..b0efda4 100644 (file)
@@ -246,7 +246,7 @@ int i830_dma_cleanup(drm_device_t *dev)
           
                if (dev_priv->ring.virtual_start) {
                        DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
-                                        dev_priv->ring.Size);
+                                        dev_priv->ring.Size, dev);
                }
                if (dev_priv->hw_status_page) {
                        pci_free_consistent(dev->pdev, PAGE_SIZE,
@@ -264,7 +264,7 @@ int i830_dma_cleanup(drm_device_t *dev)
                        drm_buf_t *buf = dma->buflist[ i ];
                        drm_i830_buf_priv_t *buf_priv = buf->dev_private;
                        if ( buf_priv->kernel_virtual && buf->total )
-                               DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total);
+                               DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
                }
        }
        return 0;
@@ -340,7 +340,7 @@ static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv)
                *buf_priv->in_use = I830_BUF_FREE;
 
                buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address, 
-                                                       buf->total);
+                                                       buf->total, dev);
        }
        return 0;
 }
@@ -394,7 +394,7 @@ static int i830_dma_initialize(drm_device_t *dev,
 
        dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base + 
                                                    init->ring_start, 
-                                                   init->ring_size);
+                                                   init->ring_size, dev);
 
        if (dev_priv->ring.virtual_start == NULL) {
                dev->dev_private = (void *) dev_priv;
index 71b9f3a..bfaee0d 100644 (file)
@@ -554,9 +554,9 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
                (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
                                    init->sarea_priv_offset);
 
-       DRM_IOREMAP( dev_priv->warp );
-       DRM_IOREMAP( dev_priv->primary );
-       DRM_IOREMAP( dev_priv->buffers );
+       DRM_IOREMAP( dev_priv->warp, dev );
+       DRM_IOREMAP( dev_priv->primary, dev );
+       DRM_IOREMAP( dev_priv->buffers, dev );
 
        if(!dev_priv->warp->handle ||
           !dev_priv->primary->handle ||
@@ -651,11 +651,11 @@ int mga_do_cleanup_dma( drm_device_t *dev )
                drm_mga_private_t *dev_priv = dev->dev_private;
 
                if ( dev_priv->warp != NULL )
-                       DRM_IOREMAPFREE( dev_priv->warp );
+                       DRM_IOREMAPFREE( dev_priv->warp, dev );
                if ( dev_priv->primary != NULL )
-                       DRM_IOREMAPFREE( dev_priv->primary );
+                       DRM_IOREMAPFREE( dev_priv->primary, dev );
                if ( dev_priv->buffers != NULL )
-                       DRM_IOREMAPFREE( dev_priv->buffers );
+                       DRM_IOREMAPFREE( dev_priv->buffers, dev );
 
                if ( dev_priv->head != NULL ) {
                        mga_freelist_cleanup( dev );
index feb389d..9396ae9 100644 (file)
@@ -226,7 +226,7 @@ do {                                                                        \
        if ( MGA_VERBOSE ) {                                            \
                DRM_INFO( "BEGIN_DMA( %d ) in %s\n",                    \
                          (n), __FUNCTION__ );                          \
-               DRM_INFO( "   space=0x%x req=0x%x\n",                   \
+               DRM_INFO( "   space=0x%x req=0x%Zx\n",                  \
                          dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
        }                                                               \
        prim = dev_priv->prim.start;                                    \
@@ -276,7 +276,7 @@ do {                                                                        \
 #define DMA_WRITE( offset, val )                                       \
 do {                                                                   \
        if ( MGA_VERBOSE ) {                                            \
-               DRM_INFO( "   DMA_WRITE( 0x%08x ) at 0x%04x\n",         \
+               DRM_INFO( "   DMA_WRITE( 0x%08x ) at 0x%04Zx\n",        \
                          (u32)(val), write + (offset) * sizeof(u32) ); \
        }                                                               \
        *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
index 608b7da..bff0cad 100644 (file)
@@ -350,8 +350,8 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev,
 
                R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR,
                            entry->busaddr[page_ofs]);
-               DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n",
-                          entry->busaddr[page_ofs],
+               DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n",
+                          (unsigned long) entry->busaddr[page_ofs],
                           entry->handle + tmp_ofs );
        }
 
@@ -540,9 +540,9 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
                                     init->sarea_priv_offset);
 
        if ( !dev_priv->is_pci ) {
-               DRM_IOREMAP( dev_priv->cce_ring );
-               DRM_IOREMAP( dev_priv->ring_rptr );
-               DRM_IOREMAP( dev_priv->buffers );
+               DRM_IOREMAP( dev_priv->cce_ring, dev );
+               DRM_IOREMAP( dev_priv->ring_rptr, dev );
+               DRM_IOREMAP( dev_priv->buffers, dev );
                if(!dev_priv->cce_ring->handle ||
                   !dev_priv->ring_rptr->handle ||
                   !dev_priv->buffers->handle) {
@@ -629,11 +629,11 @@ int r128_do_cleanup_cce( drm_device_t *dev )
                if ( !dev_priv->is_pci ) {
 #endif
                        if ( dev_priv->cce_ring != NULL )
-                               DRM_IOREMAPFREE( dev_priv->cce_ring );
+                               DRM_IOREMAPFREE( dev_priv->cce_ring, dev );
                        if ( dev_priv->ring_rptr != NULL )
-                               DRM_IOREMAPFREE( dev_priv->ring_rptr );
+                               DRM_IOREMAPFREE( dev_priv->ring_rptr, dev );
                        if ( dev_priv->buffers != NULL )
-                               DRM_IOREMAPFREE( dev_priv->buffers );
+                               DRM_IOREMAPFREE( dev_priv->buffers, dev );
 #if __REALLY_HAVE_SG
                } else {
                        if (!DRM(ati_pcigart_cleanup)( dev,
index 281b0cf..c94178b 100644 (file)
@@ -903,8 +903,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
 
                RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
                             entry->busaddr[page_ofs]);
-               DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n",
-                          entry->busaddr[page_ofs],
+               DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n",
+                          (unsigned long) entry->busaddr[page_ofs],
                           entry->handle + tmp_ofs );
        }
 
@@ -1152,9 +1152,9 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
                                       init->sarea_priv_offset);
 
        if ( !dev_priv->is_pci ) {
-               DRM_IOREMAP( dev_priv->cp_ring );
-               DRM_IOREMAP( dev_priv->ring_rptr );
-               DRM_IOREMAP( dev_priv->buffers );
+               DRM_IOREMAP( dev_priv->cp_ring, dev );
+               DRM_IOREMAP( dev_priv->ring_rptr, dev );
+               DRM_IOREMAP( dev_priv->buffers, dev );
                if(!dev_priv->cp_ring->handle ||
                   !dev_priv->ring_rptr->handle ||
                   !dev_priv->buffers->handle) {
@@ -1279,11 +1279,11 @@ int radeon_do_cleanup_cp( drm_device_t *dev )
 
                if ( !dev_priv->is_pci ) {
                        if ( dev_priv->cp_ring != NULL )
-                               DRM_IOREMAPFREE( dev_priv->cp_ring );
+                               DRM_IOREMAPFREE( dev_priv->cp_ring, dev );
                        if ( dev_priv->ring_rptr != NULL )
-                               DRM_IOREMAPFREE( dev_priv->ring_rptr );
+                               DRM_IOREMAPFREE( dev_priv->ring_rptr, dev );
                        if ( dev_priv->buffers != NULL )
-                               DRM_IOREMAPFREE( dev_priv->buffers );
+                               DRM_IOREMAPFREE( dev_priv->buffers, dev );
                } else {
 #if __REALLY_HAVE_SG
                        if (!DRM(ati_pcigart_cleanup)( dev,
index 2322dc4..d7a5551 100644 (file)
@@ -227,7 +227,12 @@ static void hvc_poll(int index)
        spin_unlock_irqrestore(&hp->lock, flags);
 }
 
+#if defined (CONFIG_XMON)
 extern unsigned long cpus_in_xmon;
+#else
+unsigned long cpus_in_xmon=0;
+#endif
+
 
 int khvcd(void *unused)
 {
index 83a0987..60b698c 100644 (file)
@@ -71,7 +71,8 @@ static ssize_t read_mem(struct file * file, char * buf,
        unsigned long p = *ppos;
        unsigned long end_mem;
        ssize_t read;
-       
+       void *addr;
+
        end_mem = __pa(high_memory);
        if (p >= end_mem)
                return 0;
@@ -94,8 +95,14 @@ static ssize_t read_mem(struct file * file, char * buf,
                }
        }
 #endif
-       if (copy_to_user(buf, __va(p), count))
+       if (file->f_flags & O_SYNC)
+               addr = ioremap(p, count);
+       else
+               addr = __va(p);
+       if (copy_to_user(buf, addr, count))
                return -EFAULT;
+       if (file->f_flags & O_SYNC)
+               iounmap(addr);
        read += count;
        *ppos += read;
        return read;
@@ -106,13 +113,22 @@ static ssize_t write_mem(struct file * file, const char * buf,
 {
        unsigned long p = *ppos;
        unsigned long end_mem;
+       ssize_t ret;
+       void *addr;
 
        end_mem = __pa(high_memory);
        if (p >= end_mem)
                return 0;
        if (count > end_mem - p)
                count = end_mem - p;
-       return do_write_mem(file, __va(p), p, buf, count, ppos);
+       if (file->f_flags & O_SYNC)
+               addr = ioremap(p, count);
+       else
+               addr = __va(p);
+       ret = do_write_mem(file, addr, p, buf, count, ppos);
+       if (file->f_flags & O_SYNC)
+               iounmap(addr);
+       return ret;
 }
 
 #ifndef pgprot_noncached
@@ -522,17 +538,19 @@ static loff_t null_lseek(struct file * file, loff_t offset, int orig)
  */
 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
 {
-       int ret;
+       loff_t ret;
 
        lock_kernel();
        switch (orig) {
                case 0:
                        file->f_pos = offset;
                        ret = file->f_pos;
+                       force_successful_syscall_return();
                        break;
                case 1:
                        file->f_pos += offset;
                        ret = file->f_pos;
+                       force_successful_syscall_return();
                        break;
                default:
                        ret = -EINVAL;
index 62263fa..a481129 100644 (file)
@@ -2,6 +2,8 @@
 # Makefile for the kernel character device drivers.
 #
 
+obj-y          := dummy.o
+
 miropcm20-objs := miropcm20-rds-core.o miropcm20-radio.o
 
 obj-$(CONFIG_RADIO_AZTECH) += radio-aztech.o
diff --git a/drivers/media/radio/dummy.c b/drivers/media/radio/dummy.c
new file mode 100644 (file)
index 0000000..4f9d00a
--- /dev/null
@@ -0,0 +1 @@
+/* just so the linker knows what kind of object files it's deadling with... */
index cbc7e09..80db81d 100644 (file)
@@ -6,6 +6,8 @@ bttv-objs       :=      bttv-driver.o bttv-cards.o bttv-if.o \
                        bttv-risc.o bttv-vbi.o
 zoran-objs      :=     zr36120.o zr36120_i2c.o zr36120_mem.o
 
+obj-y          :=      dummy.o
+
 obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o
 
 obj-$(CONFIG_VIDEO_BT848) += bttv.o msp3400.o tvaudio.o \
diff --git a/drivers/media/video/dummy.c b/drivers/media/video/dummy.c
new file mode 100644 (file)
index 0000000..4f9d00a
--- /dev/null
@@ -0,0 +1 @@
+/* just so the linker knows what kind of object files it's deadling with... */
index ad7d115..4658e36 100644 (file)
@@ -278,6 +278,10 @@ void tulip_select_media(struct net_device *dev, int startup)
                                for (i = 0; i < init_length; i++)
                                        outl(init_sequence[i], ioaddr + CSR12);
                        }
+
+                       (void) inl(ioaddr + CSR6); /* flush CSR12 writes */
+                       udelay(500);            /* Give MII time to recover */
+
                        tmp_info = get_u16(&misc_info[1]);
                        if (tmp_info)
                                tp->advertising[phy_num] = tmp_info | 1;
index 6de4a1e..e202fe8 100644 (file)
 #define  QL1280_TARGET_MODE_SUPPORT    0       /* Target mode support */
 #define  QL1280_LUN_SUPPORT            0
 #define  WATCHDOGTIMER                 0
-#define  MEMORY_MAPPED_IO              0
+#define  MEMORY_MAPPED_IO              1
 #define  DEBUG_QLA1280_INTR            0
 #define  USE_NVRAM_DEFAULTS           0
 #define  DEBUG_PRINT_NVRAM             0
@@ -2634,7 +2634,7 @@ qla1280_pci_config(struct scsi_qla_host *ha)
        /*
         * Get memory mapped I/O address.
         */
-       pci_read_config_word (ha->pdev, PCI_BASE_ADDRESS_1, &mmapbase);
+       pci_read_config_dword (ha->pdev, PCI_BASE_ADDRESS_1, &mmapbase);
        mmapbase &= PCI_BASE_ADDRESS_MEM_MASK;
 
        /*
index 1308fff..c8a80c3 100644 (file)
@@ -219,6 +219,9 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
        unsigned int needed, buf_needed;
        int timeout, retries, result;
        int data_direction, gfp_mask = GFP_KERNEL;
+#if __GNUC__ < 3
+       int foo;
+#endif
 
        if (!sic)
                return -EINVAL;
@@ -232,11 +235,21 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
        if (verify_area(VERIFY_READ, sic, sizeof(Scsi_Ioctl_Command)))
                return -EFAULT;
 
+#if __GNUC__ < 3
+       foo = __get_user(inlen, &sic->inlen);
+       if (foo)
+               return -EFAULT;
+
+       foo = __get_user(outlen, &sic->outlen);
+       if (foo)
+               return -EFAULT;
+#else
        if(__get_user(inlen, &sic->inlen))
                return -EFAULT;
                
        if(__get_user(outlen, &sic->outlen))
                return -EFAULT;
+#endif
 
        /*
         * We do not transfer more than MAX_BUF with this interface.
index 2509a8a..904c60f 100644 (file)
@@ -295,12 +295,8 @@ struct host_data {
 #ifndef SYM_LINUX_DYNAMIC_DMA_MAPPING
 typedef u_long         bus_addr_t;
 #else
-#if    SYM_CONF_DMA_ADDRESSING_MODE > 0
-typedef dma64_addr_t   bus_addr_t;
-#else
 typedef dma_addr_t     bus_addr_t;
 #endif
-#endif
 
 /*
  *  Used by the eh thread to wait for command completion.
index 7f32ed4..7395ddb 100644 (file)
@@ -150,7 +150,6 @@ static void ___sym_mfree(m_pool_p mp, void *ptr, int size)
                        ((m_link_p) a)->next = h[i].next;
                        h[i].next = (m_link_p) a;
 #endif
-                       break;
                }
                b = a ^ s;
                q = &h[i];
index b554e89..eb7b78f 100644 (file)
@@ -2064,9 +2064,11 @@ int register_serial(struct serial_struct *req)
        return __register_serial(req, -1);
 }
 
-int __init early_serial_setup(struct serial_struct *req)
+int __init early_serial_setup(struct uart_port *port)
 {
-       __register_serial(req, req->line);
+       serial8250_isa_init_ports();
+       serial8250_ports[port->line].port       = *port;
+       serial8250_ports[port->line].port.ops   = &serial8250_pops;
        return 0;
 }
 
diff --git a/drivers/serial/8250_acpi.c b/drivers/serial/8250_acpi.c
new file mode 100644 (file)
index 0000000..3891cde
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * linux/drivers/char/acpi_serial.c
+ *
+ * Copyright (C) 2000, 2002 Hewlett-Packard Co.
+ *     Khalid Aziz <khalid_aziz@hp.com>
+ *
+ * Detect and initialize the headless console serial port defined in SPCR table and debug
+ * serial port defined in DBGP table.
+ *
+ * 2002/08/29 davidm   Adjust it to new 2.5 serial driver infrastructure.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/acpi_serial.h>
+
+#include <asm/io.h>
+#include <asm/serial.h>
+
+#undef SERIAL_DEBUG_ACPI
+
+#define ACPI_SERIAL_CONSOLE_PORT       0
+#define ACPI_SERIAL_DEBUG_PORT         5
+
+/*
+ * Query ACPI tables for a debug and a headless console serial port. If found, add them to
+ * rs_table[]. A pointer to either SPCR or DBGP table is passed as parameter. This
+ * function should be called before serial_console_init() is called to make sure the SPCR
+ * serial console will be available for use. IA-64 kernel calls this function from within
+ * acpi.c when it encounters SPCR or DBGP tables as it parses the ACPI 2.0 tables during
+ * bootup.
+ */
+void __init
+setup_serial_acpi (void *tablep)
+{
+       acpi_ser_t *acpi_ser_p;
+       struct uart_port port;
+       unsigned long iobase;
+       int gsi;
+
+#ifdef SERIAL_DEBUG_ACPI
+       printk("Entering setup_serial_acpi()\n");
+#endif
+
+       /* Now get the table */
+       if (!tablep)
+               return;
+
+       memset(&port, 0, sizeof(port));
+
+       acpi_ser_p = (acpi_ser_t *) tablep;
+
+       /*
+        * Perform a sanity check on the table. Table should have a signature of "SPCR" or
+        * "DBGP" and it should be atleast 52 bytes long.
+        */
+       if (strncmp(acpi_ser_p->signature, ACPI_SPCRT_SIGNATURE, ACPI_SIG_LEN) != 0 &&
+           strncmp(acpi_ser_p->signature, ACPI_DBGPT_SIGNATURE, ACPI_SIG_LEN) != 0)
+               return;
+       if (acpi_ser_p->length < 52)
+               return;
+
+       iobase = (((u64) acpi_ser_p->base_addr.addrh) << 32) | acpi_ser_p->base_addr.addrl;
+       gsi = (  (acpi_ser_p->global_int[3] << 24) | (acpi_ser_p->global_int[2] << 16)
+              | (acpi_ser_p->global_int[1] <<  8) | (acpi_ser_p->global_int[0] <<  0));
+
+#ifdef SERIAL_DEBUG_ACPI
+       printk("setup_serial_acpi(): table pointer = 0x%p\n", acpi_ser_p);
+       printk("                     sig = '%c%c%c%c'\n", acpi_ser_p->signature[0],
+              acpi_ser_p->signature[1], acpi_ser_p->signature[2], acpi_ser_p->signature[3]);
+       printk("                     length = %d\n", acpi_ser_p->length);
+       printk("                     Rev = %d\n", acpi_ser_p->rev);
+       printk("                     Interface type = %d\n", acpi_ser_p->intfc_type);
+       printk("                     Base address = 0x%lX\n", iobase);
+       printk("                     IRQ = %d\n", acpi_ser_p->irq);
+       printk("                     Global System Int = %d\n", gsi);
+       printk("                     Baud rate = ");
+       switch (acpi_ser_p->baud) {
+             case ACPI_SERIAL_BAUD_9600:
+               printk("9600\n");
+               break;
+
+             case ACPI_SERIAL_BAUD_19200:
+               printk("19200\n");
+               break;
+
+             case ACPI_SERIAL_BAUD_57600:
+               printk("57600\n");
+               break;
+
+             case ACPI_SERIAL_BAUD_115200:
+               printk("115200\n");
+               break;
+
+             default:
+               printk("Huh (%d)\n", acpi_ser_p->baud);
+               break;
+       }
+       if (acpi_ser_p->base_addr.space_id == ACPI_SERIAL_PCICONF_SPACE) {
+               printk("                     PCI serial port:\n");
+               printk("                         Bus %d, Device %d, Vendor ID 0x%x, Dev ID 0x%x\n",
+                      acpi_ser_p->pci_bus, acpi_ser_p->pci_dev,
+                      acpi_ser_p->pci_vendor_id, acpi_ser_p->pci_dev_id);
+       }
+#endif
+       /*
+        * Now build a serial_req structure to update the entry in rs_table for the
+        * headless console port.
+        */
+       switch (acpi_ser_p->intfc_type) {
+             case ACPI_SERIAL_INTFC_16550:
+               port.type = PORT_16550;
+               port.uartclk = BASE_BAUD * 16;
+               break;
+
+             case ACPI_SERIAL_INTFC_16450:
+               port.type = PORT_16450;
+               port.uartclk = BASE_BAUD * 16;
+               break;
+
+             default:
+               port.type = PORT_UNKNOWN;
+               break;
+       }
+       if (strncmp(acpi_ser_p->signature, ACPI_SPCRT_SIGNATURE, ACPI_SIG_LEN) == 0)
+               port.line = ACPI_SERIAL_CONSOLE_PORT;
+       else if (strncmp(acpi_ser_p->signature, ACPI_DBGPT_SIGNATURE, ACPI_SIG_LEN) == 0)
+               port.line = ACPI_SERIAL_DEBUG_PORT;
+       /*
+        * Check if this is an I/O mapped address or a memory mapped address
+        */
+       if (acpi_ser_p->base_addr.space_id == ACPI_SERIAL_MEM_SPACE) {
+               port.iobase = 0;
+               port.mapbase = iobase;
+               port.membase = ioremap(iobase, 64);
+               port.iotype = SERIAL_IO_MEM;
+       } else if (acpi_ser_p->base_addr.space_id == ACPI_SERIAL_IO_SPACE) {
+               port.iobase = iobase;
+               port.mapbase = 0;
+               port.membase = NULL;
+               port.iotype = SERIAL_IO_PORT;
+       } else if (acpi_ser_p->base_addr.space_id == ACPI_SERIAL_PCICONF_SPACE) {
+               printk("WARNING: No support for PCI serial console\n");
+               return;
+       }
+
+       /*
+        * If the table does not have IRQ information, use 0 for IRQ.  This will force
+        * rs_init() to probe for IRQ.
+        */
+       if (acpi_ser_p->length < 53)
+               port.irq = 0;
+       else {
+               port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ;
+               if (acpi_ser_p->int_type & (ACPI_SERIAL_INT_APIC | ACPI_SERIAL_INT_SAPIC))
+                       port.irq = gsi;
+               else if (acpi_ser_p->int_type & ACPI_SERIAL_INT_PCAT)
+                       port.irq = acpi_ser_p->irq;
+               else
+                       /*
+                        * IRQ type not being set would mean UART will run in polling
+                        * mode. Do not probe for IRQ in that case.
+                        */
+                       port.flags &= UPF_AUTO_IRQ;
+       }
+       if (early_serial_setup(&port) < 0) {
+               printk("early_serial_setup() for ACPI serial console port failed\n");
+               return;
+       }
+
+#ifdef SERIAL_DEBUG_ACPI
+       printk("Leaving setup_serial_acpi()\n");
+#endif
+}
diff --git a/drivers/serial/8250_hcdp.c b/drivers/serial/8250_hcdp.c
new file mode 100644 (file)
index 0000000..a6f16f1
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * linux/drivers/char/hcdp_serial.c
+ *
+ * Copyright (C) 2002 Hewlett-Packard Co.
+ *     Khalid Aziz <khalid_aziz@hp.com>
+ *
+ * Parse the EFI HCDP table to locate serial console and debug ports and initialize them.
+ *
+ * 2002/08/29 davidm   Adjust it to new 2.5 serial driver infrastructure (untested).
+ */
+#include <linux/config.h>
+
+#include <linux/kernel.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/types.h>
+#include <linux/acpi.h>
+
+#include <asm/io.h>
+#include <asm/serial.h>
+#include <asm/acpi.h>
+
+#include "8250_hcdp.h"
+
+#undef SERIAL_DEBUG_HCDP
+
+/*
+ * Parse the HCDP table to find descriptions for headless console and debug serial ports
+ * and add them to rs_table[]. A pointer to HCDP table is passed as parameter. This
+ * function should be called before serial_console_init() is called to make sure the HCDP
+ * serial console will be available for use. IA-64 kernel calls this function from
+ * setup_arch() after the EFI and ACPI tables have been parsed.
+ */
+void __init
+setup_serial_hcdp (void *tablep)
+{
+       hcdp_dev_t *hcdp_dev;
+       struct uart_port port;
+       unsigned long iobase;
+       hcdp_t hcdp;
+       int gsi, nr;
+#if 0
+       static int shift_once = 1;
+#endif
+
+#ifdef SERIAL_DEBUG_HCDP
+       printk("Entering setup_serial_hcdp()\n");
+#endif
+
+       /* Verify we have a valid table pointer */
+       if (!tablep)
+               return;
+
+       memset(&port, 0, sizeof(port));
+
+       /*
+        * Don't trust firmware to give us a table starting at an aligned address. Make a
+        * local copy of the HCDP table with aligned structures.
+        */
+       memcpy(&hcdp, tablep, sizeof(hcdp));
+
+       /*
+        * Perform a sanity check on the table. Table should have a signature of "HCDP"
+        * and it should be atleast 82 bytes long to have any useful information.
+        */
+       if ((strncmp(hcdp.signature, HCDP_SIGNATURE, HCDP_SIG_LEN) != 0))
+               return;
+       if (hcdp.len < 82)
+               return;
+
+#ifdef SERIAL_DEBUG_HCDP
+       printk("setup_serial_hcdp(): table pointer = 0x%p, sig = '%.4s'\n",
+              tablep, hcdp.signature);
+       printk(" length = %d, rev = %d, ", hcdp.len, hcdp.rev);
+       printk("OEM ID = %.6s, # of entries = %d\n", hcdp.oemid, hcdp.num_entries);
+#endif
+
+       /*
+        * Parse each device entry
+        */
+       for (nr = 0; nr < hcdp.num_entries; nr++) {
+               hcdp_dev = hcdp.hcdp_dev + nr;
+               /*
+                * We will parse only the primary console device which is the first entry
+                * for these devices. We will ignore rest of the entries for the same type
+                * device that has already been parsed and initialized
+                */
+               if (hcdp_dev->type != HCDP_DEV_CONSOLE)
+                       continue;
+
+               iobase = ((u64) hcdp_dev->base_addr.addrhi << 32) | hcdp_dev->base_addr.addrlo;
+               gsi = hcdp_dev->global_int;
+
+               /* See PCI spec v2.2, Appendix D (Class Codes): */
+               switch (hcdp_dev->pci_prog_intfc) {
+                     case 0x00: port.type = PORT_8250;  break;
+                     case 0x01: port.type = PORT_16450; break;
+                     case 0x02: port.type = PORT_16550; break;
+                     case 0x03: port.type = PORT_16650; break;
+                     case 0x04: port.type = PORT_16750; break;
+                     case 0x05: port.type = PORT_16850; break;
+                     case 0x06: port.type = PORT_16C950; break;
+                     default:
+                       printk(KERN_WARNING"warning: EFI HCDP table reports unknown serial "
+                              "programming interface 0x%02x; will autoprobe.\n",
+                              hcdp_dev->pci_prog_intfc);
+                       port.type = PORT_UNKNOWN;
+                       break;
+               }
+
+#ifdef SERIAL_DEBUG_HCDP
+               printk("  type = %s, uart = %d\n", ((hcdp_dev->type == HCDP_DEV_CONSOLE)
+                                        ? "Headless Console" : ((hcdp_dev->type == HCDP_DEV_DEBUG)
+                                                                ? "Debug port" : "Huh????")),
+                      port.type);
+               printk("  base address space = %s, base address = 0x%lx\n",
+                      ((hcdp_dev->base_addr.space_id == ACPI_MEM_SPACE)
+                       ? "Memory Space" : ((hcdp_dev->base_addr.space_id == ACPI_IO_SPACE)
+                                           ? "I/O space" : "PCI space")),
+                      iobase);
+               printk("  gsi = %d, baud rate = %lu, bits = %d, clock = %d\n",
+                      gsi, (unsigned long) hcdp_dev->baud, hcdp_dev->bits, hcdp_dev->clock_rate);
+               if (hcdp_dev->base_addr.space_id == ACPI_PCICONF_SPACE)
+                       printk(" PCI id: %02x:%02x:%02x, vendor ID=0x%x, dev ID=0x%x\n",
+                              hcdp_dev->pci_seg, hcdp_dev->pci_bus, hcdp_dev->pci_dev,
+                              hcdp_dev->pci_vendor_id, hcdp_dev->pci_dev_id);
+#endif
+               /*
+                * Now fill in a port structure to update the 8250 port table..
+                */
+               if (hcdp_dev->clock_rate)
+                       port.uartclk = hcdp_dev->clock_rate;
+               else
+                       port.uartclk = BASE_BAUD * 16;
+
+               /*
+                * Check if this is an I/O mapped address or a memory mapped address
+                */
+               if (hcdp_dev->base_addr.space_id == ACPI_MEM_SPACE) {
+                       port.iobase = 0;
+                       port.mapbase = iobase;
+                       port.membase = ioremap(iobase, 64);
+                       port.iotype = SERIAL_IO_MEM;
+               } else if (hcdp_dev->base_addr.space_id == ACPI_IO_SPACE) {
+                       port.iobase = iobase;
+                       port.mapbase = 0;
+                       port.membase = NULL;
+                       port.iotype = SERIAL_IO_PORT;
+               } else if (hcdp_dev->base_addr.space_id == ACPI_PCICONF_SPACE) {
+                       printk(KERN_WARNING"warning: No support for PCI serial console\n");
+                       return;
+               }
+#ifdef CONFIG_IA64
+               port.irq = acpi_register_irq(gsi, ACPI_ACTIVE_HIGH, ACPI_EDGE_SENSITIVE);
+#else
+               port.irq = gsi;
+#endif
+               port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
+               if (gsi)
+                       port.flags |= ASYNC_AUTO_IRQ;
+
+               /*
+                * Note: the above memset() initializes port.line to 0, so we register
+                * this port as ttyS0.
+                */
+               if (early_serial_setup(&port) < 0) {
+                       printk("setup_serial_hcdp(): early_serial_setup() for HCDP serial "
+                              "console port failed. Will try any additional consoles in HCDP.\n");
+                       continue;
+               }
+               break;
+       }
+
+#ifdef SERIAL_DEBUG_HCDP
+       printk("Leaving setup_serial_hcdp()\n");
+#endif
+}
+
+#ifdef CONFIG_IA64_EARLY_PRINTK_UART
+unsigned long
+hcdp_early_uart (void)
+{
+       efi_system_table_t *systab;
+       efi_config_table_t *config_tables;
+       unsigned long addr = 0;
+       hcdp_t *hcdp = 0;
+       hcdp_dev_t *dev;
+       int i;
+
+       systab = (efi_system_table_t *) ia64_boot_param->efi_systab;
+       if (!systab)
+               return 0;
+       systab = __va(systab);
+
+       config_tables = (efi_config_table_t *) systab->tables;
+       if (!config_tables)
+               return 0;
+       config_tables = __va(config_tables);
+
+       for (i = 0; i < systab->nr_tables; i++) {
+               if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
+                       hcdp = (hcdp_t *) config_tables[i].table;
+                       break;
+               }
+       }
+       if (!hcdp)
+               return 0;
+       hcdp = __va(hcdp);
+
+       for (i = 0, dev = hcdp->hcdp_dev; i < hcdp->num_entries; i++, dev++) {
+               if (dev->type == HCDP_DEV_CONSOLE) {
+                       addr = (u64) dev->base_addr.addrhi << 32 | dev->base_addr.addrlo;
+                       break;
+               }
+       }
+       return addr;
+}
+#endif /* CONFIG_IA64_EARLY_PRINTK_UART */
diff --git a/drivers/serial/8250_hcdp.h b/drivers/serial/8250_hcdp.h
new file mode 100644 (file)
index 0000000..8ee1b60
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * drivers/serial/8250_hcdp.h
+ *
+ * Copyright (C) 2002 Hewlett-Packard Co.
+ *     Khalid Aziz <khalid_aziz@hp.com>
+ *
+ * Definitions for HCDP defined serial ports (Serial console and debug
+ * ports)
+ */
+
+/* ACPI table signatures */
+#define HCDP_SIG_LEN           4
+#define HCDP_SIGNATURE         "HCDP"
+
+/* Space ID as defined in ACPI generic address structure */
+#define ACPI_MEM_SPACE         0
+#define ACPI_IO_SPACE          1
+#define ACPI_PCICONF_SPACE     2
+
+/*
+ * Maximum number of HCDP devices we want to read in
+ */
+#define MAX_HCDP_DEVICES       6
+
+/*
+ * Default UART clock rate if clock rate is 0 in HCDP table.
+ */
+#define DEFAULT_UARTCLK                115200
+
+/*
+ * ACPI Generic Address Structure
+ */
+typedef struct {
+       u8  space_id;
+       u8  bit_width;
+       u8  bit_offset;
+       u8  resv;
+       u32 addrlo;
+       u32 addrhi;
+} acpi_gen_addr;
+
+/* HCDP Device descriptor entry types */
+#define HCDP_DEV_CONSOLE       0
+#define HCDP_DEV_DEBUG         1
+
+/* HCDP Device descriptor type */
+typedef struct {
+       u8      type;
+       u8      bits;
+       u8      parity;
+       u8      stop_bits;
+       u8      pci_seg;
+       u8      pci_bus;
+       u8      pci_dev;
+       u8      pci_func;
+       u64     baud;
+       acpi_gen_addr   base_addr;
+       u16     pci_dev_id;
+       u16     pci_vendor_id;
+       u32     global_int;
+       u32     clock_rate;
+       u8      pci_prog_intfc;
+       u8      resv;
+} hcdp_dev_t;
+
+/* HCDP Table format */
+typedef struct {
+       u8      signature[4];
+       u32     len;
+       u8      rev;
+       u8      chksum;
+       u8      oemid[6];
+       u8      oem_tabid[8];
+       u32     oem_rev;
+       u8      creator_id[4];
+       u32     creator_rev;
+       u32     num_entries;
+       hcdp_dev_t      hcdp_dev[MAX_HCDP_DEVICES];
+} hcdp_t;
index 48aec4e..1fc92d2 100644 (file)
@@ -77,6 +77,15 @@ config SERIAL_8250_CS
          a module, say M here and read <file:Documentation/modules.txt>.
          If unsure, say N.
 
+config SERIAL_8250_HCDP
+       bool "8250/16550 device discovery support via EFI HCDP table"
+       depends on IA64
+       ---help---
+         If you wish to make the serial console port described by the EFI
+         HCDP table available for use as serial console or general
+         purpose port, say Y here. See
+         <http://www.dig64.org/specifications/DIG64_HCDPv10a_01.pdf>.
+
 config SERIAL_8250_EXTENDED
        bool "Extended 8250/16550 serial driver options"
        depends on SERIAL_8250
index 35631fa..c6f6071 100644 (file)
@@ -8,6 +8,8 @@ serial-8250-y :=
 serial-8250-$(CONFIG_GSC) += 8250_gsc.o
 serial-8250-$(CONFIG_PCI) += 8250_pci.o
 serial-8250-$(CONFIG_PNP) += 8250_pnp.o
+serial-8250-$(CONFIG_ACPI) += acpi.o
+serial-8250-$(CONFIG_SERIAL_8250_HCDP) += 8250_hcdp.o
 obj-$(CONFIG_SERIAL_CORE) += core.o
 obj-$(CONFIG_SERIAL_21285) += 21285.o
 obj-$(CONFIG_SERIAL_8250) += 8250.o $(serial-8250-y)
diff --git a/drivers/serial/acpi.c b/drivers/serial/acpi.c
new file mode 100644 (file)
index 0000000..85c1b6d
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * serial/acpi.c
+ * Copyright (c) 2002-2003 Matthew Wilcox for Hewlett-Packard
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/serial.h>
+
+#include <acpi/acpi_bus.h>
+
+#include <asm/io.h>
+#include <asm/serial.h>
+
+static void acpi_serial_address(struct serial_struct *req, struct acpi_resource_address32 *addr32)
+{
+       unsigned long size;
+
+       size = addr32->max_address_range - addr32->min_address_range + 1;
+       req->iomap_base = addr32->min_address_range;
+       req->iomem_base = ioremap(req->iomap_base, size);
+       req->io_type = SERIAL_IO_MEM;
+}
+
+static void acpi_serial_irq(struct serial_struct *req, struct acpi_resource_ext_irq *ext_irq)
+{
+       if (ext_irq->number_of_interrupts > 0) {
+#ifdef CONFIG_IA64
+               req->irq = acpi_register_irq(ext_irq->interrupts[0],
+                         ext_irq->active_high_low, ext_irq->edge_level);
+#else
+               req->irq = ext_irq->interrupts[0];
+#endif
+       }
+}
+
+static int acpi_serial_add(struct acpi_device *device)
+{
+       acpi_status result;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct serial_struct serial_req;
+       int line, offset = 0;
+
+       memset(&serial_req, 0, sizeof(serial_req));
+       result = acpi_get_current_resources(device->handle, &buffer);
+       if (ACPI_FAILURE(result)) {
+               result = -ENODEV;
+               goto out;
+       }
+
+       while (offset <= buffer.length) {
+               struct acpi_resource *res = buffer.pointer + offset;
+               if (res->length == 0)
+                       break;
+               offset += res->length;
+               if (res->id == ACPI_RSTYPE_ADDRESS32) {
+                       acpi_serial_address(&serial_req, &res->data.address32);
+               } else if (res->id == ACPI_RSTYPE_EXT_IRQ) {
+                       acpi_serial_irq(&serial_req, &res->data.extended_irq);
+               }
+       }
+
+       serial_req.baud_base = BASE_BAUD;
+       serial_req.flags = ASYNC_SKIP_TEST|ASYNC_BOOT_AUTOCONF|ASYNC_AUTO_IRQ;
+
+       result = 0;
+       line = register_serial(&serial_req);
+       if (line < 0)
+               result = -ENODEV;
+
+ out:
+       acpi_os_free(buffer.pointer);
+       return result;
+}
+
+static int acpi_serial_remove(struct acpi_device *device, int type)
+{
+       return 0;
+}
+
+static struct acpi_driver acpi_serial_driver = {
+       .name =         "serial",
+       .class =        "",
+       .ids =          "PNP0501",
+       .ops =  {
+               .add =          acpi_serial_add,
+               .remove =       acpi_serial_remove,
+       },
+};
+
+static int __init acpi_serial_init(void)
+{
+       acpi_bus_register_driver(&acpi_serial_driver);
+       return 0;
+}
+
+static void __exit acpi_serial_exit(void)
+{
+       acpi_bus_unregister_driver(&acpi_serial_driver);
+}
+
+module_init(acpi_serial_init);
+module_exit(acpi_serial_exit);
index 0692a59..f749721 100644 (file)
@@ -320,6 +320,7 @@ static long do_fcntl(unsigned int fd, unsigned int cmd,
                         * to fix this will be in libc.
                         */
                        err = filp->f_owner.pid;
+                       force_successful_syscall_return();
                        break;
                case F_SETOWN:
                        err = f_setown(filp, arg, 1);
index 06e363b..724f4b3 100644 (file)
@@ -534,7 +534,24 @@ static ssize_t mem_write(struct file * file, const char * buf,
 }
 #endif
 
+static loff_t mem_lseek(struct file * file, loff_t offset, int orig)
+{
+       switch (orig) {
+             case 0:
+               file->f_pos = offset;
+               break;
+             case 1:
+               file->f_pos += offset;
+               break;
+             default:
+               return -EINVAL;
+       }
+       force_successful_syscall_return();
+       return file->f_pos;
+}
+
 static struct file_operations proc_mem_operations = {
+       .llseek         = mem_lseek,
        .read           = mem_read,
        .write          = mem_write,
        .open           = mem_open,
index 631d41b..d1e29ab 100644 (file)
@@ -176,7 +176,7 @@ int do_select(int n, fd_set_bits *fds, long *timeout)
 {
        struct poll_wqueues table;
        poll_table *wait;
-       int retval, i, off;
+       int retval, i;
        long __timeout = *timeout;
 
        spin_lock(&current->files->file_lock);
@@ -193,38 +193,54 @@ int do_select(int n, fd_set_bits *fds, long *timeout)
                wait = NULL;
        retval = 0;
        for (;;) {
+               unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
                set_current_state(TASK_INTERRUPTIBLE);
-               for (i = 0 ; i < n; i++) {
-                       unsigned long bit = BIT(i);
-                       unsigned long mask;
-                       struct file *file;
 
-                       off = i / __NFDBITS;
-                       if (!(bit & BITS(fds, off)))
+               inp = fds->in; outp = fds->out; exp = fds->ex;
+               rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
+
+               for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
+                       unsigned long in, out, ex, all_bits, bit = 1, mask, j;
+                       unsigned long res_in = 0, res_out = 0, res_ex = 0;
+                       struct file_operations *f_op = NULL;
+                       struct file *file = NULL;
+
+                       in = *inp++; out = *outp++; ex = *exp++;
+                       all_bits = in | out | ex;
+                       if (all_bits == 0) {
+                               i += __NFDBITS;
                                continue;
-                       file = fget(i);
-                       mask = POLLNVAL;
-                       if (file) {
-                               mask = DEFAULT_POLLMASK;
-                               if (file->f_op && file->f_op->poll)
-                                       mask = file->f_op->poll(file, wait);
-                               fput(file);
-                       }
-                       if ((mask & POLLIN_SET) && ISSET(bit, __IN(fds,off))) {
-                               SET(bit, __RES_IN(fds,off));
-                               retval++;
-                               wait = NULL;
                        }
-                       if ((mask & POLLOUT_SET) && ISSET(bit, __OUT(fds,off))) {
-                               SET(bit, __RES_OUT(fds,off));
-                               retval++;
-                               wait = NULL;
-                       }
-                       if ((mask & POLLEX_SET) && ISSET(bit, __EX(fds,off))) {
-                               SET(bit, __RES_EX(fds,off));
-                               retval++;
-                               wait = NULL;
+
+                       for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) {
+                               if (i >= n)
+                                       break;
+                               if (!(bit & all_bits))
+                                       continue;
+                               file = fget(i);
+                               if (file) {
+                                       f_op = file->f_op;
+                                       mask = DEFAULT_POLLMASK;
+                                       if (f_op && f_op->poll)
+                                               mask = (*f_op->poll)(file, retval ? NULL : wait);
+                                       fput(file);
+                                       if ((mask & POLLIN_SET) && (in & bit)) {
+                                               res_in |= bit;
+                                               retval++;
+                                       }
+                                       if ((mask & POLLOUT_SET) && (out & bit)) {
+                                               res_out |= bit;
+                                               retval++;
+                                       }
+                                       if ((mask & POLLEX_SET) && (ex & bit)) {
+                                               res_ex |= bit;
+                                               retval++;
+                                       }
+                               }
                        }
+                       if (res_in) *rinp = res_in;
+                       if (res_out) *routp = res_out;
+                       if (res_ex) *rexp = res_ex;
                }
                wait = NULL;
                if (retval || !__timeout || signal_pending(current))
index c99dbbb..03a9189 100644 (file)
 #define flush_agp_mappings() 
 #define flush_agp_cache() mb()
 
+/*
+ * Page-protection value to be used for AGP memory mapped into kernel space.  For
+ * platforms which use coherent AGP DMA, this can be PAGE_KERNEL.  For others, it needs to
+ * be an uncached mapping (such as write-combining).
+ */
+#define PAGE_AGP                       PAGE_KERNEL_NOCACHE     /* XXX fix me */
+
 #endif
index a310b9e..3a0ac66 100644 (file)
@@ -13,4 +13,6 @@ extern volatile unsigned long irq_err_count;
 #define ACTUAL_NR_IRQS NR_IRQS
 #endif
 
+extern irq_desc_t irq_desc [NR_IRQS];
+
 #endif
index 268ebfc..a1fb206 100644 (file)
@@ -69,6 +69,7 @@ struct switch_stack {
 #ifdef __KERNEL__
 #define user_mode(regs) (((regs)->ps & 8) != 0)
 #define instruction_pointer(regs) ((regs)->pc)
+#define force_successful_syscall_return()      do { } while (0)
 extern void show_regs(struct pt_regs *);
 #endif
 
index eaf50e3..d6ced79 100644 (file)
@@ -17,6 +17,8 @@
 #define instruction_pointer(regs) \
        (pc_pointer((regs)->ARM_pc))
 
+#define force_successful_syscall_return()      do { } while (0)
+
 #ifdef __KERNEL__
 extern void show_regs(struct pt_regs *);
 
index b7391cc..016fee9 100644 (file)
@@ -114,6 +114,7 @@ struct switch_stack {
 /* bit 8 is user-mode flag */
 #define user_mode(regs) (((regs)->dccr & 0x100) != 0)
 #define instruction_pointer(regs) ((regs)->irp)
+#define force_successful_syscall_return()      do { } while (0)
 extern void show_regs(struct pt_regs *);
 #endif
 
index 85dc064..c1829ed 100644 (file)
@@ -50,6 +50,7 @@ struct switch_stack {
 
 #define user_mode(regs) (!((regs)->ccr & PS_S))
 #define instruction_pointer(regs) ((regs)->pc)
+#define force_successful_syscall_return()      do { } while (0)
 extern void show_regs(struct pt_regs *);
 #endif /* __KERNEL__ */
 #endif /* __ASSEMBLY__ */
index 9ae97c0..56ab5cd 100644 (file)
    worth it. Would need a page for it. */
 #define flush_agp_cache() asm volatile("wbinvd":::"memory")
 
+/*
+ * Page-protection value to be used for AGP memory mapped into kernel space.  For
+ * platforms which use coherent AGP DMA, this can be PAGE_KERNEL.  For others, it needs to
+ * be an uncached mapping (such as write-combining).
+ */
+#define PAGE_AGP                       PAGE_KERNEL_NOCACHE
+
 #endif
index 65e0461..2476508 100644 (file)
@@ -117,4 +117,6 @@ static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
 static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
 #endif
 
+extern irq_desc_t irq_desc [NR_IRQS];
+
 #endif /* _ASM_HW_IRQ_H */
index d80fd65..0988167 100644 (file)
@@ -57,6 +57,7 @@ struct pt_regs {
 #ifdef __KERNEL__
 #define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->xcs))
 #define instruction_pointer(regs) ((regs)->eip)
+#define force_successful_syscall_return()      do { } while (0)
 #endif
 
 #endif
index 51bbd4a..9271d74 100644 (file)
@@ -3,30 +3,15 @@
  *
  * Copyright (C) 2003 Hewlett-Packard
  * Copyright (C) Alex Williamson
+ * Copyright (C) Bjorn Helgaas
  *
- * Vendor specific extensions to ACPI.  The HP-specific extensiosn are also used by NEC.
+ * Vendor specific extensions to ACPI.
  */
 #ifndef _ASM_IA64_ACPI_EXT_H
 #define _ASM_IA64_ACPI_EXT_H
 
 #include <linux/types.h>
 
-#define HP_CCSR_LENGTH 0x21
-#define HP_CCSR_TYPE   0x2
-#define HP_CCSR_GUID   EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, \
-                                0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad)
-
-struct acpi_hp_vendor_long {
-       u8      guid_id;
-       u8      guid[16];
-       u8      csr_base[8];
-       u8      csr_length[8];
-};
-
 extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length);
-extern acpi_status acpi_get_crs (acpi_handle, struct acpi_buffer *);
-extern struct acpi_resource *acpi_get_crs_next (struct acpi_buffer *, int *);
-extern union acpi_resource_data *acpi_get_crs_type (struct acpi_buffer *, int *, int);
-extern void acpi_dispose_crs (struct acpi_buffer *);
 
 #endif /* _ASM_IA64_ACPI_EXT_H */
index 65b3f29..37029e9 100644 (file)
@@ -56,11 +56,16 @@ ia64_atomic_sub (int i, atomic_t *v)
 }
 
 #define atomic_add_return(i,v)                                         \
-       ((__builtin_constant_p(i) &&                                    \
-         (   (i ==  1) || (i ==  4) || (i ==  8) || (i ==  16)         \
-          || (i == -1) || (i == -4) || (i == -8) || (i == -16)))       \
-        ? ia64_fetch_and_add(i, &(v)->counter)                         \
-        : ia64_atomic_add(i, v))
+({                                                                     \
+       int __ia64_aar_i = (i);                                         \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
+            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
+            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
+            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
+               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
+               : ia64_atomic_add(__ia64_aar_i, v);                     \
+})
 
 /*
  * Atomically add I to V and return TRUE if the resulting value is
@@ -72,13 +77,17 @@ atomic_add_negative (int i, atomic_t *v)
        return atomic_add_return(i, v) < 0;
 }
 
-
 #define atomic_sub_return(i,v)                                         \
-       ((__builtin_constant_p(i) &&                                    \
-         (   (i ==  1) || (i ==  4) || (i ==  8) || (i ==  16)         \
-          || (i == -1) || (i == -4) || (i == -8) || (i == -16)))       \
-        ? ia64_fetch_and_add(-(i), &(v)->counter)                      \
-        : ia64_atomic_sub(i, v))
+({                                                                     \
+       int __ia64_asr_i = (i);                                         \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
+            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
+            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
+            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
+               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
+               : ia64_atomic_sub(__ia64_asr_i, v);                     \
+})
 
 #define atomic_dec_return(v)           atomic_sub_return(1, (v))
 #define atomic_inc_return(v)           atomic_add_return(1, (v))
index af58934..b133b67 100644 (file)
@@ -450,15 +450,15 @@ find_next_bit (void *addr, unsigned long size, unsigned long offset)
 
 #ifdef __KERNEL__
 
-#define __clear_bit(nr, addr)        clear_bit(nr, addr)
-
-#define ext2_set_bit                 test_and_set_bit
-#define ext2_set_atomic(l,n,a)      test_and_set_bit(n,a)
-#define ext2_clear_bit               test_and_clear_bit
-#define ext2_clear_atomic(l,n,a)     test_and_clear_bit(n,a)
-#define ext2_test_bit                test_bit
-#define ext2_find_first_zero_bit     find_first_zero_bit
-#define ext2_find_next_zero_bit      find_next_zero_bit
+#define __clear_bit(nr, addr)          clear_bit(nr, addr)
+
+#define ext2_set_bit                   test_and_set_bit
+#define ext2_set_bit_atomic(l,n,a)     test_and_set_bit(n,a)
+#define ext2_clear_bit                 test_and_clear_bit
+#define ext2_clear_bit_atomic(l,n,a)   test_and_clear_bit(n,a)
+#define ext2_test_bit                  test_bit
+#define ext2_find_first_zero_bit       find_first_zero_bit
+#define ext2_find_next_zero_bit                find_next_zero_bit
 
 /* Bitmap functions for the minix filesystem.  */
 #define minix_test_and_set_bit(nr,addr)                test_and_set_bit(nr,addr)
index 54baeef..9473a87 100644 (file)
@@ -102,6 +102,9 @@ struct compat_statfs {
        int             f_spare[6];
 };
 
+#define COMPAT_RLIM_OLD_INFINITY       0x7fffffff
+#define COMPAT_RLIM_INFINITY           0xffffffff
+
 typedef u32            compat_old_sigset_t;    /* at least 32 bits */
 
 #define _COMPAT_NSIG           64
index e7e1690..83cceb6 100644 (file)
@@ -1 +1,63 @@
-#include <asm-generic/dma-mapping.h>
+#ifndef _ASM_IA64_DMA_MAPPING_H
+#define _ASM_IA64_DMA_MAPPING_H
+
+/*
+ * Copyright (C) 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#define dma_alloc_coherent     platform_dma_alloc_coherent
+#define dma_alloc_noncoherent  platform_dma_alloc_coherent     /* coherent mem. is cheap */
+#define dma_free_coherent      platform_dma_free_coherent
+#define dma_free_noncoherent   platform_dma_free_coherent
+#define dma_map_single         platform_dma_map_single
+#define dma_map_sg             platform_dma_map_sg
+#define dma_unmap_single       platform_dma_unmap_single
+#define dma_unmap_sg           platform_dma_unmap_sg
+#define dma_sync_single                platform_dma_sync_single
+#define dma_sync_sg            platform_dma_sync_sg
+
+#define dma_map_page(dev, pg, off, size, dir)                          \
+       dma_map_single(dev, page_address(pg) + (off), (size), (dir))
+#define dma_unmap_page(dev, dma_addr, size, dir)                       \
+       dma_unmap_single(dev, dma_addr, size, dir)
+
+/*
+ * Rest of this file is part of the "Advanced DMA API".  Use at your own risk.
+ * See Documentation/DMA-API.txt for details.
+ */
+
+#define dma_sync_single_range(dev, dma_handle, offset, size, dir)      \
+       dma_sync_single(dev, dma_handle, size, dir)
+
+#define dma_supported          platform_dma_supported
+
+static inline int
+dma_set_mask (struct device *dev, u64 mask)
+{
+       if (!dev->dma_mask || !dma_supported(dev, mask))
+               return -EIO;
+       *dev->dma_mask = mask;
+       return 0;
+}
+
+static inline int
+dma_get_cache_alignment (void)
+{
+       extern int ia64_max_cacheline_size;
+       return ia64_max_cacheline_size;
+}
+
+static inline void
+dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
+{
+       /*
+        * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
+        * ensure that dma_cache_sync() enforces order, hence the mb().
+        */
+       mb();
+}
+
+#define dma_is_consistent(dma_handle)  (1)     /* all we do is coherent memory... */
+
+#endif /* _ASM_IA64_DMA_MAPPING_H */
index 298dfc4..a8b5c44 100644 (file)
@@ -453,8 +453,6 @@ struct ia32_modify_ldt_ldt_s {
 struct linux_binprm;
 
 extern void ia32_gdt_init (void);
-extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
-                              sigset_t *set, struct pt_regs *regs);
 extern void ia32_init_addr_space (struct pt_regs *regs);
 extern int ia32_setup_arg_pages (struct linux_binprm *bprm);
 extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
@@ -476,4 +474,8 @@ extern void ia32_load_segment_descriptors (struct task_struct *task);
 
 #endif /* !CONFIG_IA32_SUPPORT */
 
+/* Declare this uncondiontally, so we don't get warnings for unreachable code.  */
+extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
+                             sigset_t *set, struct pt_regs *regs);
+
 #endif /* _ASM_IA64_IA32_H */
index 976e3c8..1940874 100644 (file)
 extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
 extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
 
-#define IA64_FETCHADD(tmp,v,n,sz)                                              \
+#define IA64_FETCHADD(tmp,v,n,sz,sem)                                          \
 ({                                                                             \
        switch (sz) {                                                           \
              case 4:                                                           \
-               __asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2"                \
+               __asm__ __volatile__ ("fetchadd4."sem" %0=[%1],%2"              \
                                      : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
                break;                                                          \
                                                                                \
              case 8:                                                           \
-               __asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2"                \
+               __asm__ __volatile__ ("fetchadd8."sem" %0=[%1],%2"              \
                                      : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
                break;                                                          \
                                                                                \
@@ -35,32 +35,34 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
        }                                                                       \
 })
 
-#define ia64_fetch_and_add(i,v)                                                                \
+#define ia64_fetchadd(i,v,sem)                                                         \
 ({                                                                                     \
        __u64 _tmp;                                                                     \
        volatile __typeof__(*(v)) *_v = (v);                                            \
        /* Can't use a switch () here: gcc isn't always smart enough for that... */     \
        if ((i) == -16)                                                                 \
-               IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)));                             \
+               IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem);                        \
        else if ((i) == -8)                                                             \
-               IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)));                              \
+               IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem);                         \
        else if ((i) == -4)                                                             \
-               IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)));                              \
+               IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem);                         \
        else if ((i) == -1)                                                             \
-               IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)));                              \
+               IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem);                         \
        else if ((i) == 1)                                                              \
-               IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)));                               \
+               IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem);                          \
        else if ((i) == 4)                                                              \
-               IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)));                               \
+               IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem);                          \
        else if ((i) == 8)                                                              \
-               IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)));                               \
+               IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem);                          \
        else if ((i) == 16)                                                             \
-               IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)));                              \
+               IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem);                         \
        else                                                                            \
                _tmp = __bad_increment_for_ia64_fetch_and_add();                        \
-       (__typeof__(*(v))) (_tmp + (i));        /* return new value */                  \
+       (__typeof__(*(v))) (_tmp);      /* return old value */                          \
 })
 
+#define ia64_fetch_and_add(i,v)        (ia64_fetchadd(i, v, "rel") + (i)) /* return new value */
+
 /*
  * This function doesn't exist, so you'll get a linker error if
  * something tries to do an invalid xchg().
@@ -127,7 +129,7 @@ extern long __cmpxchg_called_with_bad_pointer(void);
              case 8: _o_ = (__u64) (long) (old); break;                                \
              default: break;                                                           \
        }                                                                               \
-        __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_));                         \
+       __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_));                          \
        switch (size) {                                                                 \
              case 1:                                                                   \
                __asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv"                \
index 0dd5ee4..fcc63e1 100644 (file)
@@ -13,7 +13,7 @@
  * over and over again with slight variations and possibly making a
  * mistake somewhere.
  *
- * Copyright (C) 1998-2002 Hewlett-Packard Co
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
  */
 #define IO_SPACE_LIMIT         0xffffffffffffffffUL
 
+#define MAX_IO_SPACES                  16
+#define IO_SPACE_BITS                  24
+#define IO_SPACE_SIZE                  (1UL << IO_SPACE_BITS)
+
+#define IO_SPACE_NR(port)              ((port) >> IO_SPACE_BITS)
+#define IO_SPACE_BASE(space)           ((space) << IO_SPACE_BITS)
+#define IO_SPACE_PORT(port)            ((port) & (IO_SPACE_SIZE - 1))
+
+#define IO_SPACE_SPARSE_ENCODING(p)    ((((p) >> 2) << 12) | (p & 0xfff))
+
+struct io_space {
+       unsigned long mmio_base;        /* base in MMIO space */
+       int sparse;
+};
+
+extern struct io_space io_space[];
+extern unsigned int num_io_spaces;
+
 # ifdef __KERNEL__
 
 #include <asm/machvec.h>
@@ -80,13 +98,34 @@ __ia64_get_io_port_base (void)
 static inline void*
 __ia64_mk_io_addr (unsigned long port)
 {
-       const unsigned long io_base = __ia64_get_io_port_base();
-       unsigned long addr;
+       struct io_space *space;
+       unsigned long offset;
 
-       addr = io_base | ((port >> 2) << 12) | (port & 0xfff);
-       return (void *) addr;
+       space = &io_space[IO_SPACE_NR(port)];
+       port = IO_SPACE_PORT(port);
+       if (space->sparse)
+               offset = IO_SPACE_SPARSE_ENCODING(port);
+       else
+               offset = port;
+
+       return (void *) (space->mmio_base | offset);
 }
 
+#define __ia64_inb     ___ia64_inb
+#define __ia64_inw     ___ia64_inw
+#define __ia64_inl     ___ia64_inl
+#define __ia64_outb    ___ia64_outb
+#define __ia64_outw    ___ia64_outw
+#define __ia64_outl    ___ia64_outl
+#define __ia64_readb   ___ia64_readb
+#define __ia64_readw   ___ia64_readw
+#define __ia64_readl   ___ia64_readl
+#define __ia64_readq   ___ia64_readq
+#define __ia64_writeb  ___ia64_writeb
+#define __ia64_writew  ___ia64_writew
+#define __ia64_writel  ___ia64_writel
+#define __ia64_writeq  ___ia64_writeq
+
 /*
  * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
  * that the access has completed before executing other I/O accesses.  Since we're doing
@@ -96,7 +135,7 @@ __ia64_mk_io_addr (unsigned long port)
  */
 
 static inline unsigned int
-__ia64_inb (unsigned long port)
+___ia64_inb (unsigned long port)
 {
        volatile unsigned char *addr = __ia64_mk_io_addr(port);
        unsigned char ret;
@@ -107,7 +146,7 @@ __ia64_inb (unsigned long port)
 }
 
 static inline unsigned int
-__ia64_inw (unsigned long port)
+___ia64_inw (unsigned long port)
 {
        volatile unsigned short *addr = __ia64_mk_io_addr(port);
        unsigned short ret;
@@ -118,7 +157,7 @@ __ia64_inw (unsigned long port)
 }
 
 static inline unsigned int
-__ia64_inl (unsigned long port)
+___ia64_inl (unsigned long port)
 {
        volatile unsigned int *addr = __ia64_mk_io_addr(port);
        unsigned int ret;
@@ -129,7 +168,7 @@ __ia64_inl (unsigned long port)
 }
 
 static inline void
-__ia64_outb (unsigned char val, unsigned long port)
+___ia64_outb (unsigned char val, unsigned long port)
 {
        volatile unsigned char *addr = __ia64_mk_io_addr(port);
 
@@ -138,7 +177,7 @@ __ia64_outb (unsigned char val, unsigned long port)
 }
 
 static inline void
-__ia64_outw (unsigned short val, unsigned long port)
+___ia64_outw (unsigned short val, unsigned long port)
 {
        volatile unsigned short *addr = __ia64_mk_io_addr(port);
 
@@ -147,7 +186,7 @@ __ia64_outw (unsigned short val, unsigned long port)
 }
 
 static inline void
-__ia64_outl (unsigned int val, unsigned long port)
+___ia64_outl (unsigned int val, unsigned long port)
 {
        volatile unsigned int *addr = __ia64_mk_io_addr(port);
 
@@ -160,17 +199,8 @@ __insb (unsigned long port, void *dst, unsigned long count)
 {
        unsigned char *dp = dst;
 
-       if (platform_inb == __ia64_inb) {
-               volatile unsigned char *addr = __ia64_mk_io_addr(port);
-
-               __ia64_mf_a();
-               while (count--)
-                       *dp++ = *addr;
-               __ia64_mf_a();
-       } else
-               while (count--)
-                       *dp++ = platform_inb(port);
-       return;
+       while (count--)
+               *dp++ = platform_inb(port);
 }
 
 static inline void
@@ -178,17 +208,8 @@ __insw (unsigned long port, void *dst, unsigned long count)
 {
        unsigned short *dp = dst;
 
-       if (platform_inw == __ia64_inw) {
-               volatile unsigned short *addr = __ia64_mk_io_addr(port);
-
-               __ia64_mf_a();
-               while (count--)
-                       *dp++ = *addr;
-               __ia64_mf_a();
-       } else
-               while (count--)
-                       *dp++ = platform_inw(port);
-       return;
+       while (count--)
+               *dp++ = platform_inw(port);
 }
 
 static inline void
@@ -196,17 +217,8 @@ __insl (unsigned long port, void *dst, unsigned long count)
 {
        unsigned int *dp = dst;
 
-       if (platform_inl == __ia64_inl) {
-               volatile unsigned int *addr = __ia64_mk_io_addr(port);
-
-               __ia64_mf_a();
-               while (count--)
-                       *dp++ = *addr;
-               __ia64_mf_a();
-       } else
-               while (count--)
-                       *dp++ = platform_inl(port);
-       return;
+       while (count--)
+               *dp++ = platform_inl(port);
 }
 
 static inline void
@@ -214,16 +226,8 @@ __outsb (unsigned long port, const void *src, unsigned long count)
 {
        const unsigned char *sp = src;
 
-       if (platform_outb == __ia64_outb) {
-               volatile unsigned char *addr = __ia64_mk_io_addr(port);
-
-               while (count--)
-                       *addr = *sp++;
-               __ia64_mf_a();
-       } else
-               while (count--)
-                       platform_outb(*sp++, port);
-       return;
+       while (count--)
+               platform_outb(*sp++, port);
 }
 
 static inline void
@@ -231,16 +235,8 @@ __outsw (unsigned long port, const void *src, unsigned long count)
 {
        const unsigned short *sp = src;
 
-       if (platform_outw == __ia64_outw) {
-               volatile unsigned short *addr = __ia64_mk_io_addr(port);
-
-               while (count--)
-                       *addr = *sp++;
-               __ia64_mf_a();
-       } else
-               while (count--)
-                       platform_outw(*sp++, port);
-       return;
+       while (count--)
+               platform_outw(*sp++, port);
 }
 
 static inline void
@@ -248,16 +244,8 @@ __outsl (unsigned long port, void *src, unsigned long count)
 {
        const unsigned int *sp = src;
 
-       if (platform_outl == __ia64_outl) {
-               volatile unsigned int *addr = __ia64_mk_io_addr(port);
-
-               while (count--)
-                       *addr = *sp++;
-               __ia64_mf_a();
-       } else
-               while (count--)
-                       platform_outl(*sp++, port);
-       return;
+       while (count--)
+               platform_outl(*sp++, port);
 }
 
 /*
@@ -294,25 +282,25 @@ __outsl (unsigned long port, void *src, unsigned long count)
  * hopefully it'll stay that way).
  */
 static inline unsigned char
-__ia64_readb (void *addr)
+___ia64_readb (void *addr)
 {
        return *(volatile unsigned char *)addr;
 }
 
 static inline unsigned short
-__ia64_readw (void *addr)
+___ia64_readw (void *addr)
 {
        return *(volatile unsigned short *)addr;
 }
 
 static inline unsigned int
-__ia64_readl (void *addr)
+___ia64_readl (void *addr)
 {
        return *(volatile unsigned int *) addr;
 }
 
 static inline unsigned long
-__ia64_readq (void *addr)
+___ia64_readq (void *addr)
 {
        return *(volatile unsigned long *) addr;
 }
index 03f2571..410fb72 100644 (file)
@@ -57,6 +57,7 @@ extern void __init iosapic_init (unsigned long address,
 extern int gsi_to_vector (unsigned int gsi);
 extern int gsi_to_irq (unsigned int gsi);
 extern void __init iosapic_parse_prt (void);
+extern void iosapic_enable_intr (unsigned int vector);
 extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity,
                                  unsigned long trigger);
 extern void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
index 31885a2..5f23c35 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 1999 Silicon Graphics, Inc.
  * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
  * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
- * Copyright (C) 1999-2001 Hewlett-Packard Co.
+ * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co.
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  */
 #ifndef _ASM_IA64_MACHVEC_H
@@ -14,7 +14,7 @@
 #include <linux/types.h>
 
 /* forward declarations: */
-struct pci_dev;
+struct device;
 struct pt_regs;
 struct scatterlist;
 struct irq_desc;
@@ -33,17 +33,17 @@ typedef struct irq_desc *ia64_mv_irq_desc (unsigned int);
 typedef u8 ia64_mv_irq_to_vector (u8);
 typedef unsigned int ia64_mv_local_vector_to_irq (u8 vector);
 
-/* PCI-DMA interface: */
-typedef void ia64_mv_pci_dma_init (void);
-typedef void *ia64_mv_pci_alloc_consistent (struct pci_dev *, size_t, dma_addr_t *);
-typedef void ia64_mv_pci_free_consistent (struct pci_dev *, size_t, void *, dma_addr_t);
-typedef dma_addr_t ia64_mv_pci_map_single (struct pci_dev *, void *, size_t, int);
-typedef void ia64_mv_pci_unmap_single (struct pci_dev *, dma_addr_t, size_t, int);
-typedef int ia64_mv_pci_map_sg (struct pci_dev *, struct scatterlist *, int, int);
-typedef void ia64_mv_pci_unmap_sg (struct pci_dev *, struct scatterlist *, int, int);
-typedef void ia64_mv_pci_dma_sync_single (struct pci_dev *, dma_addr_t, size_t, int);
-typedef void ia64_mv_pci_dma_sync_sg (struct pci_dev *, struct scatterlist *, int, int);
-typedef int ia64_mv_pci_dma_supported (struct pci_dev *, u64);
+/* DMA-mapping interface: */
+typedef void ia64_mv_dma_init (void);
+typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int);
+typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t);
+typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int);
+typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int);
+typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int);
+typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int);
+typedef void ia64_mv_dma_sync_single (struct device *, dma_addr_t, size_t, int);
+typedef void ia64_mv_dma_sync_sg (struct device *, struct scatterlist *, int, int);
+typedef int ia64_mv_dma_supported (struct device *, u64);
 
 /*
  * WARNING: The legacy I/O space is _architected_.  Platforms are
@@ -66,6 +66,7 @@ typedef unsigned int ia64_mv_readl_t (void *);
 typedef unsigned long ia64_mv_readq_t (void *);
 
 extern void machvec_noop (void);
+extern void machvec_memory_fence (void);
 
 # if defined (CONFIG_IA64_HP_SIM)
 #  include <asm/machvec_hpsim.h>
@@ -92,16 +93,16 @@ extern void machvec_noop (void);
 #  define platform_log_print   ia64_mv.log_print
 #  define platform_send_ipi    ia64_mv.send_ipi
 #  define platform_global_tlb_purge    ia64_mv.global_tlb_purge
-#  define platform_pci_dma_init                ia64_mv.dma_init
-#  define platform_pci_alloc_consistent        ia64_mv.alloc_consistent
-#  define platform_pci_free_consistent ia64_mv.free_consistent
-#  define platform_pci_map_single      ia64_mv.map_single
-#  define platform_pci_unmap_single    ia64_mv.unmap_single
-#  define platform_pci_map_sg          ia64_mv.map_sg
-#  define platform_pci_unmap_sg                ia64_mv.unmap_sg
-#  define platform_pci_dma_sync_single ia64_mv.sync_single
-#  define platform_pci_dma_sync_sg     ia64_mv.sync_sg
-#  define platform_pci_dma_supported   ia64_mv.dma_supported
+#  define platform_dma_init            ia64_mv.dma_init
+#  define platform_dma_alloc_coherent  ia64_mv.dma_alloc_coherent
+#  define platform_dma_free_coherent   ia64_mv.dma_free_coherent
+#  define platform_dma_map_single      ia64_mv.dma_map_single
+#  define platform_dma_unmap_single    ia64_mv.dma_unmap_single
+#  define platform_dma_map_sg          ia64_mv.dma_map_sg
+#  define platform_dma_unmap_sg                ia64_mv.dma_unmap_sg
+#  define platform_dma_sync_single     ia64_mv.dma_sync_single
+#  define platform_dma_sync_sg         ia64_mv.dma_sync_sg
+#  define platform_dma_supported       ia64_mv.dma_supported
 #  define platform_irq_desc            ia64_mv.irq_desc
 #  define platform_irq_to_vector       ia64_mv.irq_to_vector
 #  define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
@@ -119,7 +120,7 @@ extern void machvec_noop (void);
 
 /* __attribute__((__aligned__(16))) is required to make size of the
  * structure multiple of 16 bytes.
- * This will fillup the holes created because of section 3.3.1 in 
+ * This will fillup the holes created because of section 3.3.1 in
  * Software Conventions guide.
  */
 struct ia64_machine_vector {
@@ -133,16 +134,16 @@ struct ia64_machine_vector {
        ia64_mv_log_print_t *log_print;
        ia64_mv_send_ipi_t *send_ipi;
        ia64_mv_global_tlb_purge_t *global_tlb_purge;
-       ia64_mv_pci_dma_init *dma_init;
-       ia64_mv_pci_alloc_consistent *alloc_consistent;
-       ia64_mv_pci_free_consistent *free_consistent;
-       ia64_mv_pci_map_single *map_single;
-       ia64_mv_pci_unmap_single *unmap_single;
-       ia64_mv_pci_map_sg *map_sg;
-       ia64_mv_pci_unmap_sg *unmap_sg;
-       ia64_mv_pci_dma_sync_single *sync_single;
-       ia64_mv_pci_dma_sync_sg *sync_sg;
-       ia64_mv_pci_dma_supported *dma_supported;
+       ia64_mv_dma_init *dma_init;
+       ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
+       ia64_mv_dma_free_coherent *dma_free_coherent;
+       ia64_mv_dma_map_single *dma_map_single;
+       ia64_mv_dma_unmap_single *dma_unmap_single;
+       ia64_mv_dma_map_sg *dma_map_sg;
+       ia64_mv_dma_unmap_sg *dma_unmap_sg;
+       ia64_mv_dma_sync_single *dma_sync_single;
+       ia64_mv_dma_sync_sg *dma_sync_sg;
+       ia64_mv_dma_supported *dma_supported;
        ia64_mv_irq_desc *irq_desc;
        ia64_mv_irq_to_vector *irq_to_vector;
        ia64_mv_local_vector_to_irq *local_vector_to_irq;
@@ -170,16 +171,16 @@ struct ia64_machine_vector {
        platform_log_print,                     \
        platform_send_ipi,                      \
        platform_global_tlb_purge,              \
-       platform_pci_dma_init,                  \
-       platform_pci_alloc_consistent,          \
-       platform_pci_free_consistent,           \
-       platform_pci_map_single,                \
-       platform_pci_unmap_single,              \
-       platform_pci_map_sg,                    \
-       platform_pci_unmap_sg,                  \
-       platform_pci_dma_sync_single,           \
-       platform_pci_dma_sync_sg,               \
-       platform_pci_dma_supported,             \
+       platform_dma_init,                      \
+       platform_dma_alloc_coherent,            \
+       platform_dma_free_coherent,             \
+       platform_dma_map_single,                \
+       platform_dma_unmap_single,              \
+       platform_dma_map_sg,                    \
+       platform_dma_unmap_sg,                  \
+       platform_dma_sync_single,               \
+       platform_dma_sync_sg,                   \
+       platform_dma_supported,                 \
        platform_irq_desc,                      \
        platform_irq_to_vector,                 \
        platform_local_vector_to_irq,           \
@@ -205,16 +206,16 @@ extern void machvec_init (const char *name);
 /*
  * Declare default routines which aren't declared anywhere else:
  */
-extern ia64_mv_pci_dma_init swiotlb_init;
-extern ia64_mv_pci_alloc_consistent swiotlb_alloc_consistent;
-extern ia64_mv_pci_free_consistent swiotlb_free_consistent;
-extern ia64_mv_pci_map_single swiotlb_map_single;
-extern ia64_mv_pci_unmap_single swiotlb_unmap_single;
-extern ia64_mv_pci_map_sg swiotlb_map_sg;
-extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg;
-extern ia64_mv_pci_dma_sync_single swiotlb_sync_single;
-extern ia64_mv_pci_dma_sync_sg swiotlb_sync_sg;
-extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
+extern ia64_mv_dma_init                        swiotlb_init;
+extern ia64_mv_dma_alloc_coherent      swiotlb_alloc_coherent;
+extern ia64_mv_dma_free_coherent       swiotlb_free_coherent;
+extern ia64_mv_dma_map_single          swiotlb_map_single;
+extern ia64_mv_dma_unmap_single                swiotlb_unmap_single;
+extern ia64_mv_dma_map_sg              swiotlb_map_sg;
+extern ia64_mv_dma_unmap_sg            swiotlb_unmap_sg;
+extern ia64_mv_dma_sync_single         swiotlb_sync_single;
+extern ia64_mv_dma_sync_sg             swiotlb_sync_sg;
+extern ia64_mv_dma_supported           swiotlb_dma_supported;
 
 /*
  * Define default versions so we can extend machvec for new platforms without having
@@ -247,35 +248,35 @@ extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported;
 #ifndef platform_global_tlb_purge
 # define platform_global_tlb_purge     ia64_global_tlb_purge /* default to architected version */
 #endif
-#ifndef platform_pci_dma_init
-# define platform_pci_dma_init         swiotlb_init
+#ifndef platform_dma_init
+# define platform_dma_init             swiotlb_init
 #endif
-#ifndef platform_pci_alloc_consistent
-# define platform_pci_alloc_consistent swiotlb_alloc_consistent
+#ifndef platform_dma_alloc_coherent
+# define platform_dma_alloc_coherent   swiotlb_alloc_coherent
 #endif
-#ifndef platform_pci_free_consistent
-# define platform_pci_free_consistent  swiotlb_free_consistent
+#ifndef platform_dma_free_coherent
+# define platform_dma_free_coherent    swiotlb_free_coherent
 #endif
-#ifndef platform_pci_map_single
-# define platform_pci_map_single       swiotlb_map_single
+#ifndef platform_dma_map_single
+# define platform_dma_map_single       swiotlb_map_single
 #endif
-#ifndef platform_pci_unmap_single
-# define platform_pci_unmap_single     swiotlb_unmap_single
+#ifndef platform_dma_unmap_single
+# define platform_dma_unmap_single     swiotlb_unmap_single
 #endif
-#ifndef platform_pci_map_sg
-# define platform_pci_map_sg           swiotlb_map_sg
+#ifndef platform_dma_map_sg
+# define platform_dma_map_sg           swiotlb_map_sg
 #endif
-#ifndef platform_pci_unmap_sg
-# define platform_pci_unmap_sg         swiotlb_unmap_sg
+#ifndef platform_dma_unmap_sg
+# define platform_dma_unmap_sg         swiotlb_unmap_sg
 #endif
-#ifndef platform_pci_dma_sync_single
-# define platform_pci_dma_sync_single  swiotlb_sync_single
+#ifndef platform_dma_sync_single
+# define platform_dma_sync_single      swiotlb_sync_single
 #endif
-#ifndef platform_pci_dma_sync_sg
-# define platform_pci_dma_sync_sg      swiotlb_sync_sg
+#ifndef platform_dma_sync_sg
+# define platform_dma_sync_sg          swiotlb_sync_sg
 #endif
-#ifndef platform_pci_dma_supported
-# define  platform_pci_dma_supported   swiotlb_pci_dma_supported
+#ifndef platform_dma_supported
+# define  platform_dma_supported       swiotlb_dma_supported
 #endif
 #ifndef platform_irq_desc
 # define platform_irq_desc             __ia64_irq_desc
index d6a6ef6..9927ba1 100644 (file)
@@ -2,13 +2,13 @@
 #define _ASM_IA64_MACHVEC_HPZX1_h
 
 extern ia64_mv_setup_t dig_setup;
-extern ia64_mv_pci_alloc_consistent sba_alloc_consistent;
-extern ia64_mv_pci_free_consistent sba_free_consistent;
-extern ia64_mv_pci_map_single sba_map_single;
-extern ia64_mv_pci_unmap_single sba_unmap_single;
-extern ia64_mv_pci_map_sg sba_map_sg;
-extern ia64_mv_pci_unmap_sg sba_unmap_sg;
-extern ia64_mv_pci_dma_supported sba_dma_supported;
+extern ia64_mv_dma_alloc_coherent      sba_alloc_coherent;
+extern ia64_mv_dma_free_coherent       sba_free_coherent;
+extern ia64_mv_dma_map_single          sba_map_single;
+extern ia64_mv_dma_unmap_single                sba_unmap_single;
+extern ia64_mv_dma_map_sg              sba_map_sg;
+extern ia64_mv_dma_unmap_sg            sba_unmap_sg;
+extern ia64_mv_dma_supported           sba_dma_supported;
 
 /*
  * This stuff has dual use!
@@ -19,15 +19,15 @@ extern ia64_mv_pci_dma_supported sba_dma_supported;
  */
 #define platform_name                  "hpzx1"
 #define platform_setup                 dig_setup
-#define platform_pci_dma_init          ((ia64_mv_pci_dma_init *) machvec_noop)
-#define platform_pci_alloc_consistent  sba_alloc_consistent
-#define platform_pci_free_consistent   sba_free_consistent
-#define platform_pci_map_single                sba_map_single
-#define platform_pci_unmap_single      sba_unmap_single
-#define platform_pci_map_sg            sba_map_sg
-#define platform_pci_unmap_sg          sba_unmap_sg
-#define platform_pci_dma_sync_single   ((ia64_mv_pci_dma_sync_single *) machvec_noop)
-#define platform_pci_dma_sync_sg       ((ia64_mv_pci_dma_sync_sg *) machvec_noop)
-#define platform_pci_dma_supported     sba_dma_supported
+#define platform_dma_init              ((ia64_mv_dma_init *) machvec_noop)
+#define platform_dma_alloc_coherent    sba_alloc_coherent
+#define platform_dma_free_coherent     sba_free_coherent
+#define platform_dma_map_single                sba_map_single
+#define platform_dma_unmap_single      sba_unmap_single
+#define platform_dma_map_sg            sba_map_sg
+#define platform_dma_unmap_sg          sba_unmap_sg
+#define platform_dma_sync_single       ((ia64_mv_dma_sync_single *) machvec_memory_fence)
+#define platform_dma_sync_sg           ((ia64_mv_dma_sync_sg *) machvec_memory_fence)
+#define platform_dma_supported         sba_dma_supported
 
 #endif /* _ASM_IA64_MACHVEC_HPZX1_h */
index 71912c1..c90ecf8 100644 (file)
@@ -1,7 +1,3 @@
-#define __MACHVEC_HDR(n)               <asm/machvec_##n##.h>
-#define __MACHVEC_EXPAND(n)            __MACHVEC_HDR(n)
-#define MACHVEC_PLATFORM_HEADER                __MACHVEC_EXPAND(MACHVEC_PLATFORM_NAME)
-
 #include <asm/machvec.h>
 
 extern ia64_mv_send_ipi_t ia64_send_ipi;
@@ -16,6 +12,10 @@ extern ia64_mv_inl_t __ia64_inl;
 extern ia64_mv_outb_t __ia64_outb;
 extern ia64_mv_outw_t __ia64_outw;
 extern ia64_mv_outl_t __ia64_outl;
+extern ia64_mv_readb_t __ia64_readb;
+extern ia64_mv_readw_t __ia64_readw;
+extern ia64_mv_readl_t __ia64_readl;
+extern ia64_mv_readq_t __ia64_readq;
 
 #define MACHVEC_HELPER(name)                                                                   \
  struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec")))  \
index 354b029..4c8c9f7 100644 (file)
@@ -44,14 +44,14 @@ extern ia64_mv_inl_t sn1_inl;
 extern ia64_mv_outb_t sn1_outb;
 extern ia64_mv_outw_t sn1_outw;
 extern ia64_mv_outl_t sn1_outl;
-extern ia64_mv_pci_alloc_consistent    sn1_pci_alloc_consistent;
-extern ia64_mv_pci_free_consistent     sn1_pci_free_consistent;
-extern ia64_mv_pci_map_single          sn1_pci_map_single;
-extern ia64_mv_pci_unmap_single                sn1_pci_unmap_single;
-extern ia64_mv_pci_map_sg              sn1_pci_map_sg;
-extern ia64_mv_pci_unmap_sg            sn1_pci_unmap_sg;
-extern ia64_mv_pci_dma_sync_single     sn1_pci_dma_sync_single;
-extern ia64_mv_pci_dma_sync_sg         sn1_pci_dma_sync_sg;
+extern ia64_mv_dma_alloc_coherent      sn1_dma_alloc_coherent;
+extern ia64_mv_dma_free_coherent       sn1_dma_free_coherent;
+extern ia64_mv_dma_map_single          sn1_dma_map_single;
+extern ia64_mv_dma_unmap_single                sn1_dma_unmap_single;
+extern ia64_mv_dma_map_sg              sn1_dma_map_sg;
+extern ia64_mv_dma_unmap_sg            sn1_dma_unmap_sg;
+extern ia64_mv_dma_sync_single         sn1_dma_sync_single;
+extern ia64_mv_dma_sync_sg             sn1_dma_sync_sg;
 
 /*
  * This stuff has dual use!
@@ -72,14 +72,14 @@ extern ia64_mv_pci_dma_sync_sg              sn1_pci_dma_sync_sg;
 #define platform_outb          sn1_outb
 #define platform_outw          sn1_outw
 #define platform_outl          sn1_outl
-#define platform_pci_dma_init  machvec_noop
-#define platform_pci_alloc_consistent  sn1_pci_alloc_consistent
-#define platform_pci_free_consistent   sn1_pci_free_consistent
-#define platform_pci_map_single                sn1_pci_map_single
-#define platform_pci_unmap_single      sn1_pci_unmap_single
-#define platform_pci_map_sg            sn1_pci_map_sg
-#define platform_pci_unmap_sg          sn1_pci_unmap_sg
-#define platform_pci_dma_sync_single   sn1_pci_dma_sync_single
-#define platform_pci_dma_sync_sg       sn1_pci_dma_sync_sg
+#define platform_dma_init      machvec_noop
+#define platform_dma_alloc_coherent    sn1_dma_alloc_coherent
+#define platform_dma_free_coherent     sn1_dma_free_coherent
+#define platform_dma_map_single                sn1_dma_map_single
+#define platform_dma_unmap_single      sn1_dma_unmap_single
+#define platform_dma_map_sg            sn1_dma_map_sg
+#define platform_dma_unmap_sg          sn1_dma_unmap_sg
+#define platform_dma_sync_single       sn1_dma_sync_single
+#define platform_dma_sync_sg           sn1_dma_sync_sg
 
 #endif /* _ASM_IA64_MACHVEC_SN1_h */
index 6df35ac..9146e1c 100644 (file)
@@ -51,15 +51,15 @@ extern ia64_mv_readb_t __sn_readb;
 extern ia64_mv_readw_t __sn_readw;
 extern ia64_mv_readl_t __sn_readl;
 extern ia64_mv_readq_t __sn_readq;
-extern ia64_mv_pci_alloc_consistent    sn_pci_alloc_consistent;
-extern ia64_mv_pci_free_consistent     sn_pci_free_consistent;
-extern ia64_mv_pci_map_single          sn_pci_map_single;
-extern ia64_mv_pci_unmap_single                sn_pci_unmap_single;
-extern ia64_mv_pci_map_sg              sn_pci_map_sg;
-extern ia64_mv_pci_unmap_sg            sn_pci_unmap_sg;
-extern ia64_mv_pci_dma_sync_single     sn_pci_dma_sync_single;
-extern ia64_mv_pci_dma_sync_sg         sn_pci_dma_sync_sg;
-extern ia64_mv_pci_dma_supported       sn_pci_dma_supported;
+extern ia64_mv_dma_alloc_coherent      sn_dma_alloc_coherent;
+extern ia64_mv_dma_free_coherent       sn_dma_free_coherent;
+extern ia64_mv_dma_map_single          sn_dma_map_single;
+extern ia64_mv_dma_unmap_single                sn_dma_unmap_single;
+extern ia64_mv_dma_map_sg              sn_dma_map_sg;
+extern ia64_mv_dma_unmap_sg            sn_dma_unmap_sg;
+extern ia64_mv_dma_sync_single         sn_dma_sync_single;
+extern ia64_mv_dma_sync_sg             sn_dma_sync_sg;
+extern ia64_mv_dma_supported           sn_dma_supported;
 
 /*
  * This stuff has dual use!
@@ -88,15 +88,15 @@ extern ia64_mv_pci_dma_supported    sn_pci_dma_supported;
 #define platform_irq_desc              sn_irq_desc
 #define platform_irq_to_vector         sn_irq_to_vector
 #define platform_local_vector_to_irq   sn_local_vector_to_irq
-#define platform_pci_dma_init          machvec_noop
-#define platform_pci_alloc_consistent  sn_pci_alloc_consistent
-#define platform_pci_free_consistent   sn_pci_free_consistent
-#define platform_pci_map_single                sn_pci_map_single
-#define platform_pci_unmap_single      sn_pci_unmap_single
-#define platform_pci_map_sg            sn_pci_map_sg
-#define platform_pci_unmap_sg          sn_pci_unmap_sg
-#define platform_pci_dma_sync_single   sn_pci_dma_sync_single
-#define platform_pci_dma_sync_sg       sn_pci_dma_sync_sg
-#define platform_pci_dma_supported     sn_pci_dma_supported
+#define platform_dma_init              machvec_noop
+#define platform_dma_alloc_coherent    sn_dma_alloc_coherent
+#define platform_dma_free_coherent     sn_dma_free_coherent
+#define platform_dma_map_single                sn_dma_map_single
+#define platform_dma_unmap_single      sn_dma_unmap_single
+#define platform_dma_map_sg            sn_dma_map_sg
+#define platform_dma_unmap_sg          sn_dma_unmap_sg
+#define platform_dma_sync_single       sn_dma_sync_single
+#define platform_dma_sync_sg           sn_dma_sync_sg
+#define platform_dma_supported         sn_dma_supported
 
 #endif /* _ASM_IA64_MACHVEC_SN2_H */
index e4ddfbe..63853a2 100644 (file)
 #define _ASM_IA64_MCA_H
 
 #if !defined(__ASSEMBLY__)
+
+#include <linux/interrupt.h>
 #include <linux/types.h>
+
 #include <asm/param.h>
 #include <asm/sal.h>
 #include <asm/processor.h>
@@ -129,10 +132,10 @@ extern void ia64_os_mca_dispatch_end(void);
 extern void ia64_mca_ucmc_handler(void);
 extern void ia64_monarch_init_handler(void);
 extern void ia64_slave_init_handler(void);
-extern void ia64_mca_rendez_int_handler(int,void *,struct pt_regs *);
-extern void ia64_mca_wakeup_int_handler(int,void *,struct pt_regs *);
-extern void ia64_mca_cmc_int_handler(int,void *,struct pt_regs *);
-extern void ia64_mca_cpe_int_handler(int,void *,struct pt_regs *);
+extern irqreturn_t ia64_mca_rendez_int_handler(int,void *,struct pt_regs *);
+extern irqreturn_t ia64_mca_wakeup_int_handler(int,void *,struct pt_regs *);
+extern irqreturn_t ia64_mca_cmc_int_handler(int,void *,struct pt_regs *);
+extern irqreturn_t ia64_mca_cpe_int_handler(int,void *,struct pt_regs *);
 extern int  ia64_log_print(int,prfunc_t);
 extern void ia64_mca_cmc_vector_setup(void);
 extern int  ia64_mca_check_errors(void);
index fb3fb3f..a2d4447 100644 (file)
@@ -37,6 +37,8 @@
 
 # if defined(CONFIG_HUGETLB_PAGE_SIZE_4GB)
 #  define HPAGE_SHIFT  32
+# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1GB)
+#  define HPAGE_SHIFT  30
 # elif defined(CONFIG_HUGETLB_PAGE_SIZE_256MB)
 #  define HPAGE_SHIFT  28
 # elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
@@ -89,7 +91,12 @@ do {                                         \
 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 
 #ifndef CONFIG_DISCONTIGMEM
-#define pfn_valid(pfn)         ((pfn) < max_mapnr)
+# ifdef CONFIG_VIRTUAL_MEM_MAP
+   extern int ia64_pfn_valid (unsigned long pfn);
+#  define pfn_valid(pfn)       (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
+# else
+#  define pfn_valid(pfn)       ((pfn) < max_mapnr)
+# endif
 #define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 #define page_to_pfn(page)      ((unsigned long) (page - mem_map))
 #define pfn_to_page(pfn)       (mem_map + (pfn))
index 840ae4b..5640226 100644 (file)
@@ -622,7 +622,8 @@ typedef struct pal_min_state_area_s {
        u64     pmsa_xip;               /* previous iip            */
        u64     pmsa_xpsr;              /* previous psr            */
        u64     pmsa_xfs;               /* previous ifs            */
-       u64     pmsa_reserved[71];      /* pal_min_state_area should total to 1KB */
+       u64     pmsa_br1;               /* branch register 1       */
+       u64     pmsa_reserved[70];      /* pal_min_state_area should total to 1KB */
 } pal_min_state_area_t;
 
 
index 56760f7..3c94af4 100644 (file)
@@ -47,18 +47,7 @@ pcibios_penalize_isa_irq (int irq)
 #define HAVE_ARCH_PCI_MWI 1
 extern int pcibios_prep_mwi (struct pci_dev *);
 
-/*
- * Dynamic DMA mapping API.  See Documentation/DMA-mapping.txt for details.
- */
-#define pci_alloc_consistent           platform_pci_alloc_consistent
-#define pci_free_consistent            platform_pci_free_consistent
-#define pci_map_single                 platform_pci_map_single
-#define pci_unmap_single               platform_pci_unmap_single
-#define pci_map_sg                     platform_pci_map_sg
-#define pci_unmap_sg                   platform_pci_unmap_sg
-#define pci_dma_sync_single            platform_pci_dma_sync_single
-#define pci_dma_sync_sg                        platform_pci_dma_sync_sg
-#define pci_dma_supported              platform_pci_dma_supported
+#include <asm-generic/pci-dma-compat.h>
 
 /* pci_unmap_{single,page} is not a nop, thus... */
 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)      \
@@ -74,18 +63,12 @@ extern int pcibios_prep_mwi (struct pci_dev *);
 #define pci_unmap_len_set(PTR, LEN_NAME, VAL)          \
        (((PTR)->LEN_NAME) = (VAL))
 
-#define pci_map_page(dev,pg,off,size,dir)                              \
-       pci_map_single((dev), page_address(pg) + (off), (size), (dir))
-#define pci_unmap_page(dev,dma_addr,size,dir)                          \
-       pci_unmap_single((dev), (dma_addr), (size), (dir))
-
 /* The ia64 platform always supports 64-bit addressing. */
-#define pci_dac_dma_supported(pci_dev, mask)   (1)
-
-#define pci_dac_page_to_dma(dev,pg,off,dir)    ((dma_addr_t) page_to_bus(pg) + (off))
-#define pci_dac_dma_to_page(dev,dma_addr)      (virt_to_page(bus_to_virt(dma_addr)))
-#define pci_dac_dma_to_offset(dev,dma_addr)    ((dma_addr) & ~PAGE_MASK)
-#define pci_dac_dma_sync_single(dev,dma_addr,len,dir)  do { /* nothing */ } while (0)
+#define pci_dac_dma_supported(pci_dev, mask)           (1)
+#define pci_dac_page_to_dma(dev,pg,off,dir)            ((dma_addr_t) page_to_bus(pg) + (off))
+#define pci_dac_dma_to_page(dev,dma_addr)              (virt_to_page(bus_to_virt(dma_addr)))
+#define pci_dac_dma_to_offset(dev,dma_addr)            ((dma_addr) & ~PAGE_MASK)
+#define pci_dac_dma_sync_single(dev,dma_addr,len,dir)  do { mb(); } while (0)
 
 /* Return the index of the PCI controller for device PDEV. */
 #define pci_controller_num(PDEV)       (0)
@@ -97,12 +80,18 @@ extern int pcibios_prep_mwi (struct pci_dev *);
 extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
                                enum pci_mmap_state mmap_state, int write_combine);
 
+struct pci_window {
+       struct resource resource;
+       u64 offset;
+};
+
 struct pci_controller {
        void *acpi_handle;
        void *iommu;
        int segment;
 
-       u64 mem_offset;
+       unsigned int windows;
+       struct pci_window *window;
 };
 
 #define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
index a87a976..5c6ca2d 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/compiler.h>
 
 /*
- * Copyright (C) 2002 Hewlett-Packard Co
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  */
 
@@ -35,6 +35,8 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var##__per_cpu)
 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var##__per_cpu)
 
+extern void setup_per_cpu_areas (void);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_IA64_PERCPU_H */
index af8a393..9db86e5 100644 (file)
@@ -41,6 +41,7 @@
 #define PFM_FL_NOTIFY_BLOCK             0x04   /* block task on user level notifications */
 #define PFM_FL_SYSTEM_WIDE      0x08   /* create a system wide context */
 #define PFM_FL_EXCL_IDLE         0x20   /* exclude idle task from system wide session */
+#define PFM_FL_UNSECURE                 0x40   /* allow unsecure monitoring for non self-monitoring task */
 
 /*
  * PMC flags
@@ -125,7 +126,7 @@ typedef struct {
  * Define the version numbers for both perfmon as a whole and the sampling buffer format.
  */
 #define PFM_VERSION_MAJ                1U
-#define PFM_VERSION_MIN                3U
+#define PFM_VERSION_MIN                4U
 #define PFM_VERSION            (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
 
 #define PFM_SMPL_VERSION_MAJ   1U
index 8104da9..ebf187e 100644 (file)
@@ -207,7 +207,13 @@ ia64_phys_addr_valid (unsigned long addr)
 
 #define VMALLOC_START          (0xa000000000000000 + 3*PERCPU_PAGE_SIZE)
 #define VMALLOC_VMADDR(x)      ((unsigned long)(x))
-#define VMALLOC_END            (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+# define VMALLOC_END_INIT      (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
+# define VMALLOC_END           vmalloc_end
+  extern unsigned long vmalloc_end;
+#else
+# define VMALLOC_END           (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
+#endif
 
 /*
  * Conversion functions: convert page frame number (pfn) and a protection value to a page
@@ -449,6 +455,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
 
 typedef pte_t *pte_addr_t;
 
+#  ifdef CONFIG_VIRTUAL_MEM_MAP
+
+  /* arch mem_map init routine is needed due to holes in a virtual mem_map */
+#   define __HAVE_ARCH_MEMMAP_INIT
+
+    extern void memmap_init (struct page *start, unsigned long size, int nid, unsigned long zone,
+                            unsigned long start_pfn);
+#  endif /* CONFIG_VIRTUAL_MEM_MAP */
 # endif /* !__ASSEMBLY__ */
 
 /*
index 265c0ba..6838714 100644 (file)
 #define TASK_SIZE              (current->thread.task_size)
 
 /*
+ * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for
+ * address-space MM.  Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE,
+ * because the kernel may have installed helper-mappings above TASK_SIZE.  For example,
+ * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE.
+ */
+#define MM_VM_SIZE(mm)         DEFAULT_TASK_SIZE
+
+/*
  * This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
@@ -291,7 +299,7 @@ struct thread_struct {
 
 #define start_thread(regs,new_ip,new_sp) do {                                                  \
        set_fs(USER_DS);                                                                        \
-       regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL | IA64_PSR_SP))  \
+       regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL))                \
                         & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS));              \
        regs->cr_iip = new_ip;                                                                  \
        regs->ar_rsc = 0xf;             /* eager mode, privilege level 3 */                     \
index 351df13..34bd44e 100644 (file)
@@ -227,8 +227,10 @@ struct switch_stack {
   })
 
   struct task_struct;                  /* forward decl */
+  struct unw_frame_info;               /* forward decl */
 
   extern void show_regs (struct pt_regs *);
+  extern void ia64_do_show_stack (struct unw_frame_info *, void *);
   extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
                                              unsigned long *);
   extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
index 519c90d..eccac9c 100644 (file)
@@ -226,7 +226,7 @@ enum {
 
 /* Encodings for machine check parameter types */
 enum {
-       SAL_MC_PARAM_RENDEZ_INT    = 1, /* Rendezevous interrupt */
+       SAL_MC_PARAM_RENDEZ_INT    = 1, /* Rendezvous interrupt */
        SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */
        SAL_MC_PARAM_CPE_INT       = 3  /* Corrected Platform Error Int */
 };
index 2d123f4..7169391 100644 (file)
@@ -59,7 +59,6 @@
        { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS },      /* ttyS2 */     \
        { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS },     /* ttyS3 */
 
-
 #ifdef CONFIG_SERIAL_MANY_PORTS
 #define EXTRA_SERIAL_PORT_DEFNS                        \
        { 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS },     /* ttyS4 */     \
index 2a40f54..96e298c 100644 (file)
@@ -22,26 +22,72 @@ typedef struct {
 #define SPIN_LOCK_UNLOCKED                     (spinlock_t) { 0 }
 #define spin_lock_init(x)                      ((x)->lock = 0)
 
-#define DEBUG_SPIN_LOCK        0
+#define NEW_LOCK
+#ifdef NEW_LOCK
 
-#if DEBUG_SPIN_LOCK
-
-#include <ia64intrin.h>
-
-#define _raw_spin_lock(x)                                                              \
-do {                                                                                   \
-       unsigned long _timeout = 1000000000;                                            \
-       volatile unsigned int _old = 0, _new = 1, *_ptr = &((x)->lock);                 \
-       do {                                                                            \
-               if (_timeout-- == 0) {                                                  \
-                       extern void dump_stack (void);                                  \
-                       printk("kernel DEADLOCK at %s:%d?\n", __FILE__, __LINE__);      \
-                       dump_stack();                                                   \
-               }                                                                       \
-       } while (__sync_val_compare_and_swap(_ptr, _old, _new) != _old);                \
-} while (0)
+/*
+ * Try to get the lock.  If we fail to get the lock, make a non-standard call to
+ * ia64_spinlock_contention().  We do not use a normal call because that would force all
+ * callers of spin_lock() to be non-leaf routines.  Instead, ia64_spinlock_contention() is
+ * carefully coded to touch only those registers that spin_lock() marks "clobbered".
+ */
 
+#define IA64_SPINLOCK_CLOBBERS "ar.pfs", "p14", "r28", "r29", "r30", "b6", "memory"
+
+static inline void
+_raw_spin_lock (spinlock_t *lock)
+{
+       register volatile unsigned int *ptr asm ("r31") = &lock->lock;
+
+#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
+# ifdef CONFIG_ITANIUM
+       /* don't use brl on Itanium... */
+       asm volatile ("{\n\t"
+                     "  mov ar.ccv = r0\n\t"
+                     "  mov r28 = ip\n\t"
+                     "  mov r30 = 1;;\n\t"
+                     "}\n\t"
+                     "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
+                     "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
+                     "cmp4.ne p14, p0 = r30, r0\n\t"
+                     "mov b6 = r29;;\n"
+                     "(p14) br.cond.spnt.many b6"
+                     : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS);
+# else
+       asm volatile ("{\n\t"
+                     "  mov ar.ccv = r0\n\t"
+                     "  mov r28 = ip\n\t"
+                     "  mov r30 = 1;;\n\t"
+                     "}\n\t"
+                     "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
+                     "cmp4.ne p14, p0 = r30, r0\n"
+                     "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4"
+                     : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS);
+# endif /* CONFIG_MCKINLEY */
 #else
+# ifdef CONFIG_ITANIUM
+       /* don't use brl on Itanium... */
+       /* mis-declare, so we get the entry-point, not it's function descriptor: */
+       asm volatile ("mov r30 = 1\n\t"
+                     "mov ar.ccv = r0;;\n\t"
+                     "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
+                     "movl r29 = ia64_spinlock_contention;;\n\t"
+                     "cmp4.ne p14, p0 = r30, r0\n\t"
+                     "mov b6 = r29;;\n"
+                     "(p14) br.call.spnt.many b6 = b6"
+                     : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS);
+# else
+       asm volatile ("mov r30 = 1\n\t"
+                     "mov ar.ccv = r0;;\n\t"
+                     "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
+                     "cmp4.ne p14, p0 = r30, r0\n\t"
+                     "(p14) brl.call.spnt.many b6=ia64_spinlock_contention"
+                     : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS);
+# endif /* CONFIG_MCKINLEY */
+#endif
+}
+
+#else /* !NEW_LOCK */
 
 /*
  * Streamlined test_and_set_bit(0, (x)).  We use test-and-test-and-set
@@ -64,7 +110,7 @@ do {                                                                                 \
        ";;\n"                                                  \
        :: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
 
-#endif /* !DEBUG_SPIN_LOCK */
+#endif /* !NEW_LOCK */
 
 #define spin_is_locked(x)      ((x)->lock != 0)
 #define _raw_spin_unlock(x)    do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
@@ -72,43 +118,31 @@ do {                                                                                       \
 #define spin_unlock_wait(x)    do { barrier(); } while ((x)->lock)
 
 typedef struct {
-       volatile int read_counter:31;
-       volatile int write_lock:1;
+       volatile int read_counter       : 31;
+       volatile int write_lock         :  1;
 } rwlock_t;
 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
 
 #define rwlock_init(x)         do { *(x) = RW_LOCK_UNLOCKED; } while(0)
 #define rwlock_is_locked(x)    (*(volatile int *) (x) != 0)
 
-#define _raw_read_lock(rw)                                                     \
-do {                                                                           \
-       int __read_lock_tmp = 0;                                                \
-       __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n"                \
-                             ";;\n"                                            \
-                             "tbit.nz p6,p0 = %0, 31\n"                        \
-                             "(p6) br.cond.sptk.few 2f\n"                      \
-                             ".section .text.lock,\"ax\"\n"                    \
-                             "2:\tfetchadd4.rel %0 = [%1], -1\n"               \
-                             ";;\n"                                            \
-                             "3:\tld4.acq %0 = [%1]\n"                         \
-                             ";;\n"                                            \
-                             "tbit.nz p6,p0 = %0, 31\n"                        \
-                             "(p6) br.cond.sptk.few 3b\n"                      \
-                             "br.cond.sptk.few 1b\n"                           \
-                             ";;\n"                                            \
-                             ".previous\n"                                     \
-                             : "=&r" (__read_lock_tmp)                         \
-                             : "r" (rw) : "p6", "memory");                     \
-} while(0)
+#define _raw_read_lock(rw)                                                             \
+do {                                                                                   \
+       rwlock_t *__read_lock_ptr = (rw);                                               \
+                                                                                       \
+       while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) {        \
+               ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel");                      \
+               while (*(volatile int *)__read_lock_ptr < 0)                            \
+                       barrier();                                                      \
+                                                                                       \
+       }                                                                               \
+} while (0)
 
-#define _raw_read_unlock(rw)                                                   \
-do {                                                                           \
-       int __read_unlock_tmp = 0;                                              \
-       __asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n"                   \
-                             : "=r" (__read_unlock_tmp)                        \
-                             : "r" (rw)                                        \
-                             : "memory");                                      \
-} while(0)
+#define _raw_read_unlock(rw)                                   \
+do {                                                           \
+       rwlock_t *__read_lock_ptr = (rw);                       \
+       ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel");      \
+} while (0)
 
 #define _raw_write_lock(rw)                                                    \
 do {                                                                           \
index 1486785..250c0fa 100644 (file)
@@ -212,48 +212,39 @@ extern void ia64_load_extra (struct task_struct *task);
 # define PERFMON_IS_SYSWIDE() (0)
 #endif
 
-#define __switch_to(prev,next,last) do {                                               \
-       if (((prev)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID))       \
-           || IS_IA32_PROCESS(ia64_task_regs(prev)) || PERFMON_IS_SYSWIDE())           \
-               ia64_save_extra(prev);                                                  \
-       if (((next)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID))       \
-           || IS_IA32_PROCESS(ia64_task_regs(next)) || PERFMON_IS_SYSWIDE())           \
-               ia64_load_extra(next);                                                  \
-       (last) = ia64_switch_to((next));                                                \
+#define IA64_HAS_EXTRA_STATE(t)                                                        \
+       ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)       \
+        || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
+
+#define __switch_to(prev,next,last) do {                                                        \
+       struct task_struct *__fpu_owner = ia64_get_fpu_owner();                                  \
+       if (IA64_HAS_EXTRA_STATE(prev))                                                          \
+               ia64_save_extra(prev);                                                           \
+       if (IA64_HAS_EXTRA_STATE(next))                                                          \
+               ia64_load_extra(next);                                                           \
+       ia64_psr(ia64_task_regs(next))->dfh =                                                    \
+               !(__fpu_owner == (next) && ((next)->thread.last_fph_cpu == smp_processor_id())); \
+       (last) = ia64_switch_to((next));                                                         \
 } while (0)
 
 #ifdef CONFIG_SMP
-
 /*
- * In the SMP case, we save the fph state when context-switching
- * away from a thread that modified fph.  This way, when the thread
- * gets scheduled on another CPU, the CPU can pick up the state from
- * task->thread.fph, avoiding the complication of having to fetch
- * the latest fph state from another CPU.
+ * In the SMP case, we save the fph state when context-switching away from a thread that
+ * modified fph.  This way, when the thread gets scheduled on another CPU, the CPU can
+ * pick up the state from task->thread.fph, avoiding the complication of having to fetch
+ * the latest fph state from another CPU.  In other words: eager save, lazy restore.
  */
-# define switch_to(prev,next,last) do {                                        \
-       if (ia64_psr(ia64_task_regs(prev))->mfh) {                      \
-               ia64_psr(ia64_task_regs(prev))->mfh = 0;                \
-               (prev)->thread.flags |= IA64_THREAD_FPH_VALID;          \
-               __ia64_save_fpu((prev)->thread.fph);                    \
-               (prev)->thread.last_fph_cpu = smp_processor_id();       \
-       }                                                               \
-       if ((next)->thread.flags & IA64_THREAD_FPH_VALID) {             \
-               if (((next)->thread.last_fph_cpu == smp_processor_id()) \
-                   && (ia64_get_fpu_owner() == next))                  \
-               {                                                       \
-                       ia64_psr(ia64_task_regs(next))->dfh = 0;        \
-                       ia64_psr(ia64_task_regs(next))->mfh = 0;        \
-               } else                                                  \
-                       ia64_psr(ia64_task_regs(next))->dfh = 1;        \
-       }                                                               \
-       __switch_to(prev,next,last);                                    \
-  } while (0)
-#else
 # define switch_to(prev,next,last) do {                                                \
-       ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \
-       __switch_to(prev,next,last);                                            \
+       if (ia64_psr(ia64_task_regs(prev))->mfh) {                              \
+               ia64_psr(ia64_task_regs(prev))->mfh = 0;                        \
+               (prev)->thread.flags |= IA64_THREAD_FPH_VALID;                  \
+               __ia64_save_fpu((prev)->thread.fph);                            \
+               (prev)->thread.last_fph_cpu = smp_processor_id();               \
+       }                                                                       \
+       __switch_to(prev, next, last);                                          \
 } while (0)
+#else
+# define switch_to(prev,next,last)     __switch_to(prev, next, last)
 #endif
 
 /*
index b4bfc4c..a30fcfa 100644 (file)
@@ -8,7 +8,7 @@
  * addresses.  Thus, we need to be careful not to let the user to
  * trick us into accessing kernel memory that would normally be
  * inaccessible.  This code is also fairly performance sensitive,
- * so we want to spend as little time doing saftey checks as
+ * so we want to spend as little time doing safety checks as
  * possible.
  *
  * To make matters a bit more interesting, these macros sometimes also
index a62655e..153f06c 100644 (file)
@@ -2,8 +2,8 @@
 #define _ASM_IA64_UNWIND_H
 
 /*
- * Copyright (C) 1999-2000 Hewlett-Packard Co
- * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999-2000, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@hpl.hp.com>
  *
  * A simple API for unwinding kernel stacks.  This is used for
  * debugging and error reporting purposes.  The kernel doesn't need
@@ -107,6 +107,13 @@ extern void unw_remove_unwind_table (void *handle);
  */
 extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t);
 
+/*
+ * Prepare to unwind from interruption.  The pt-regs and switch-stack structures must have
+ * be "adjacent" (no state modifications between pt-regs and switch-stack).
+ */
+extern void unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
+                                       struct pt_regs *pt, struct switch_stack *sw);
+
 extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t,
                                 struct switch_stack *sw);
 
index f62c54e..c02357f 100644 (file)
@@ -73,6 +73,7 @@ struct switch_stack {
 
 #define user_mode(regs) (!((regs)->sr & PS_S))
 #define instruction_pointer(regs) ((regs)->pc)
+#define force_successful_syscall_return()      do { } while (0)
 extern void show_regs(struct pt_regs *);
 #endif /* __KERNEL__ */
 #endif /* __ASSEMBLY__ */
index ac38686..8af84a6 100644 (file)
@@ -84,6 +84,7 @@ struct switch_stack {
 
 #define user_mode(regs) (!((regs)->sr & PS_S))
 #define instruction_pointer(regs) ((regs)->pc)
+#define force_successful_syscall_return()      do { } while (0)
 extern void show_regs(struct pt_regs *);
 #endif /* __KERNEL__ */
 #endif /* __ASSEMBLY__ */
index 8dfa57d..598c6c0 100644 (file)
@@ -13,4 +13,6 @@ static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
 {
 }
 
+extern irq_desc_t irq_desc [NR_IRQS];
+
 #endif /* _ASM_HW_IRQ_H */
index c52592c..7a1757d 100644 (file)
@@ -75,6 +75,8 @@ struct pt_regs {
 
 #define instruction_pointer(regs) ((regs)->cp0_epc)
 
+#define force_successful_syscall_return()      do { } while (0)
+
 extern void show_regs(struct pt_regs *);
 #endif /* !(_LANGUAGE_ASSEMBLY) */
 
index 1bf6629..5bcd8ce 100644 (file)
@@ -3,3 +3,5 @@
 
    Everything of consequence is in arch/alpha/kernel/irq_impl.h,
    to be used only in arch/alpha/kernel/.  */
+
+extern irq_desc_t irq_desc [NR_IRQS];
index f62c31f..b7430f4 100644 (file)
@@ -74,6 +74,7 @@ struct pt_regs {
 
 #ifndef _LANGUAGE_ASSEMBLY
 #define instruction_pointer(regs) ((regs)->cp0_epc)
+#define force_successful_syscall_return()      do { } while (0)
 
 extern void (*_show_regs)(struct pt_regs *);
 #define show_regs(regs)        _show_regs(regs)
index f35c91d..ea93ba6 100644 (file)
@@ -14,4 +14,6 @@
 
 #include <asm/irq.h>
 
+extern irq_desc_t irq_desc [NR_IRQS];
+
 #endif
index 181fc75..74887da 100644 (file)
@@ -48,6 +48,7 @@ struct pt_regs {
 /* XXX should we use iaoq[1] or iaoq[0] ? */
 #define user_mode(regs)                        (((regs)->iaoq[0] &  3) ? 1 : 0)
 #define instruction_pointer(regs)      ((regs)->iaoq[0] & ~3)
+#define force_successful_syscall_return()      do { } while (0)
 extern void show_regs(struct pt_regs *);
 #endif
 
index 2abb143..bce77c5 100644 (file)
@@ -71,6 +71,8 @@ extern void do_lost_interrupts(unsigned long);
 struct hw_interrupt_type;
 static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
 
+extern irq_desc_t irq_desc [NR_IRQS];
+
 
 #endif /* _PPC_HW_IRQ_H */
 #endif /* __KERNEL__ */
index 998f0a3..26ae0fd 100644 (file)
@@ -48,6 +48,7 @@ struct pt_regs {
 #ifndef __ASSEMBLY__
 #define instruction_pointer(regs) ((regs)->nip)
 #define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
+#define force_successful_syscall_return()      do { } while (0)
 
 /*
  * We use the least-significant bit of the trap field to indicate
index 9b4ad1f..f455a61 100644 (file)
@@ -81,5 +81,7 @@ static inline void __do_save_and_cli(unsigned long *flags)
 struct hw_interrupt_type;
 static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
  
+extern irq_desc_t irq_desc [NR_IRQS];
+
 #endif /* _PPC64_HW_IRQ_H */
 #endif /* __KERNEL__ */
index 17d15d0..2b69148 100644 (file)
@@ -71,6 +71,7 @@ struct pt_regs32 {
 
 #define instruction_pointer(regs) ((regs)->nip)
 #define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
+#define force_successful_syscall_return()      do { } while (0)
 
 /*
  * Offsets used by 'ptrace' system call interface.
index 36d77a6..7bae27a 100644 (file)
@@ -458,6 +458,7 @@ struct user_regs_struct
 #ifdef __KERNEL__
 #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
 #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
+#define force_successful_syscall_return()      do { } while (0)
 extern void show_regs(struct pt_regs * regs);
 #endif
 
index 6569595..fee7395 100644 (file)
@@ -3,4 +3,7 @@
 static __inline__ void sh_do_profile (unsigned long pc) {/*Not implemented yet*/}
 
 static __inline__ void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { /* Nothing to do */ }
+
+extern irq_desc_t irq_desc [NR_IRQS];
+
 #endif /* __ASM_SH_HW_IRQ_H */
index 71333b7..7ab81f7 100644 (file)
@@ -64,6 +64,7 @@ struct pt_regs {
 #ifdef __KERNEL__
 #define user_mode(regs) (((regs)->sr & 0x40000000)==0)
 #define instruction_pointer(regs) ((regs)->pc)
+#define force_successful_syscall_return()      do { } while (0)
 extern void show_regs(struct pt_regs *);
 
 /* User Break Controller */
index 68ecd78..d5c9277 100644 (file)
@@ -62,6 +62,7 @@ struct sparc_stackf {
 #ifdef __KERNEL__
 #define user_mode(regs) (!((regs)->psr & PSR_PS))
 #define instruction_pointer(regs) ((regs)->pc)
+#define force_successful_syscall_return()      do { } while (0)
 extern void show_regs(struct pt_regs *);
 #endif
 
index ba05bdf..197a541 100644 (file)
@@ -8,4 +8,11 @@
 #define flush_agp_mappings() 
 #define flush_agp_cache() mb()
 
+/*
+ * Page-protection value to be used for AGP memory mapped into kernel space.  For
+ * platforms which use coherent AGP DMA, this can be PAGE_KERNEL.  For others, it needs to
+ * be an uncached mapping (such as write-combining).
+ */
+#define PAGE_AGP                       PAGE_KERNEL_NOCACHE
+
 #endif
index ec7698b..b48a64b 100644 (file)
@@ -96,6 +96,7 @@ struct sparc_trapf {
 #ifdef __KERNEL__
 #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
 #define instruction_pointer(regs) ((regs)->tpc)
+#define force_successful_syscall_return()      do { } while (0)
 extern void show_regs(struct pt_regs *);
 #endif
 
index 4ee38c0..7929dff 100644 (file)
@@ -7,4 +7,6 @@
 static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
 {}
 
+extern irq_desc_t irq_desc [NR_IRQS];
+
 #endif
index 4bdc98e..1cd1289 100644 (file)
@@ -5,4 +5,6 @@ extern inline void hw_resend_irq (struct hw_interrupt_type *h, unsigned int i)
 {
 }
 
+extern irq_desc_t irq_desc [NR_IRQS];
+
 #endif /* __V850_HW_IRQ_H__ */
index 8c7c759..13d23f3 100644 (file)
@@ -77,6 +77,7 @@ struct pt_regs
 
 #define instruction_pointer(regs)      ((regs)->pc)
 #define user_mode(regs)                        (!(regs)->kernel_mode)
+#define force_successful_syscall_return()      do { } while (0)
 
 /* When a struct pt_regs is used to save user state for a system call in
    the kernel, the system call is stored in the space for R0 (since it's
index ecb3db3..ec2b2ed 100644 (file)
    worth it. Would need a page for it. */
 #define flush_agp_cache() asm volatile("wbinvd":::"memory")
 
+/*
+ * Page-protection value to be used for AGP memory mapped into kernel space.  For
+ * platforms which use coherent AGP DMA, this can be PAGE_KERNEL.  For others, it needs to
+ * be an uncached mapping (such as write-combining).
+ */
+#define PAGE_AGP                       PAGE_KERNEL_NOCACHE
+
 #endif
index 567e01a..2044913 100644 (file)
@@ -173,6 +173,8 @@ static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {
 static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
 #endif
 
+extern irq_desc_t irq_desc [NR_IRQS];
+
 #endif
 
 #endif /* _ASM_HW_IRQ_H */
index e735aaf..c1ff66c 100644 (file)
@@ -87,6 +87,7 @@ struct pt_regs {
 #if defined(__KERNEL__) && !defined(__ASSEMBLY__) 
 #define user_mode(regs) (!!((regs)->cs & 3))
 #define instruction_pointer(regs) ((regs)->rip)
+#define force_successful_syscall_return()      do { } while (0)
 void signal_fault(struct pt_regs *regs, void *frame, char *where);
 
 enum {
index 07fb16f..e4b87c5 100644 (file)
@@ -9,6 +9,8 @@
  *
  */
 
+#include <linux/serial.h>
+
 extern void setup_serial_acpi(void *);
 
 #define ACPI_SIG_LEN           4
index a627f21..d07bc5c 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright (C) 1999 VA Linux Systems
  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999, 2002 Hewlett-Packard Co.
+ * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co.
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  *     Stephane Eranian <eranian@hpl.hp.com>
  */
 #include <asm/system.h>
 
 #define EFI_SUCCESS            0
-#define EFI_LOAD_ERROR          (1L | (1L << 63))
-#define EFI_INVALID_PARAMETER  (2L | (1L << 63))
-#define EFI_UNSUPPORTED                (3L | (1L << 63))
-#define EFI_BAD_BUFFER_SIZE     (4L | (1L << 63))
-#define EFI_BUFFER_TOO_SMALL   (5L | (1L << 63))
-#define EFI_NOT_FOUND          (14L | (1L << 63))
+#define EFI_LOAD_ERROR          ( 1 | (1UL << 63))
+#define EFI_INVALID_PARAMETER  ( 2 | (1UL << 63))
+#define EFI_UNSUPPORTED                ( 3 | (1UL << 63))
+#define EFI_BAD_BUFFER_SIZE     ( 4 | (1UL << 63))
+#define EFI_BUFFER_TOO_SMALL   ( 5 | (1UL << 63))
+#define EFI_NOT_FOUND          (14 | (1UL << 63))
 
 typedef unsigned long efi_status_t;
 typedef u8 efi_bool_t;
@@ -260,7 +260,7 @@ efi_guid_unparse(efi_guid_t *guid, char *out)
 extern void efi_init (void);
 extern void efi_map_pal_code (void);
 extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
-extern void efi_gettimeofday (struct timeval *tv);
+extern void efi_gettimeofday (struct timespec *ts);
 extern void efi_enter_virtual_mode (void);     /* switch EFI to virtual mode, if possible */
 extern u64 efi_get_iobase (void);
 extern u32 efi_mem_type (unsigned long phys_addr);
index fb03869..3240748 100644 (file)
@@ -229,6 +229,90 @@ typedef struct {
 #define R_386_GOTPC    10
 #define R_386_NUM      11
 
+#define R_IA64_NONE            0x00    /* none */
+#define R_IA64_IMM14           0x21    /* symbol + addend, add imm14 */
+#define R_IA64_IMM22           0x22    /* symbol + addend, add imm22 */
+#define R_IA64_IMM64           0x23    /* symbol + addend, mov imm64 */
+#define R_IA64_DIR32MSB                0x24    /* symbol + addend, data4 MSB */
+#define R_IA64_DIR32LSB                0x25    /* symbol + addend, data4 LSB */
+#define R_IA64_DIR64MSB                0x26    /* symbol + addend, data8 MSB */
+#define R_IA64_DIR64LSB                0x27    /* symbol + addend, data8 LSB */
+#define R_IA64_GPREL22         0x2a    /* @gprel(sym+add), add imm22 */
+#define R_IA64_GPREL64I                0x2b    /* @gprel(sym+add), mov imm64 */
+#define R_IA64_GPREL32MSB      0x2c    /* @gprel(sym+add), data4 MSB */
+#define R_IA64_GPREL32LSB      0x2d    /* @gprel(sym+add), data4 LSB */
+#define R_IA64_GPREL64MSB      0x2e    /* @gprel(sym+add), data8 MSB */
+#define R_IA64_GPREL64LSB      0x2f    /* @gprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF22         0x32    /* @ltoff(sym+add), add imm22 */
+#define R_IA64_LTOFF64I                0x33    /* @ltoff(sym+add), mov imm64 */
+#define R_IA64_PLTOFF22                0x3a    /* @pltoff(sym+add), add imm22 */
+#define R_IA64_PLTOFF64I       0x3b    /* @pltoff(sym+add), mov imm64 */
+#define R_IA64_PLTOFF64MSB     0x3e    /* @pltoff(sym+add), data8 MSB */
+#define R_IA64_PLTOFF64LSB     0x3f    /* @pltoff(sym+add), data8 LSB */
+#define R_IA64_FPTR64I         0x43    /* @fptr(sym+add), mov imm64 */
+#define R_IA64_FPTR32MSB       0x44    /* @fptr(sym+add), data4 MSB */
+#define R_IA64_FPTR32LSB       0x45    /* @fptr(sym+add), data4 LSB */
+#define R_IA64_FPTR64MSB       0x46    /* @fptr(sym+add), data8 MSB */
+#define R_IA64_FPTR64LSB       0x47    /* @fptr(sym+add), data8 LSB */
+#define R_IA64_PCREL60B                0x48    /* @pcrel(sym+add), brl */
+#define R_IA64_PCREL21B                0x49    /* @pcrel(sym+add), ptb, call */
+#define R_IA64_PCREL21M                0x4a    /* @pcrel(sym+add), chk.s */
+#define R_IA64_PCREL21F                0x4b    /* @pcrel(sym+add), fchkf */
+#define R_IA64_PCREL32MSB      0x4c    /* @pcrel(sym+add), data4 MSB */
+#define R_IA64_PCREL32LSB      0x4d    /* @pcrel(sym+add), data4 LSB */
+#define R_IA64_PCREL64MSB      0x4e    /* @pcrel(sym+add), data8 MSB */
+#define R_IA64_PCREL64LSB      0x4f    /* @pcrel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_FPTR22    0x52    /* @ltoff(@fptr(s+a)), imm22 */
+#define R_IA64_LTOFF_FPTR64I   0x53    /* @ltoff(@fptr(s+a)), imm64 */
+#define R_IA64_LTOFF_FPTR32MSB 0x54    /* @ltoff(@fptr(s+a)), 4 MSB */
+#define R_IA64_LTOFF_FPTR32LSB 0x55    /* @ltoff(@fptr(s+a)), 4 LSB */
+#define R_IA64_LTOFF_FPTR64MSB 0x56    /* @ltoff(@fptr(s+a)), 8 MSB */
+#define R_IA64_LTOFF_FPTR64LSB 0x57    /* @ltoff(@fptr(s+a)), 8 LSB */
+#define R_IA64_SEGREL32MSB     0x5c    /* @segrel(sym+add), data4 MSB */
+#define R_IA64_SEGREL32LSB     0x5d    /* @segrel(sym+add), data4 LSB */
+#define R_IA64_SEGREL64MSB     0x5e    /* @segrel(sym+add), data8 MSB */
+#define R_IA64_SEGREL64LSB     0x5f    /* @segrel(sym+add), data8 LSB */
+#define R_IA64_SECREL32MSB     0x64    /* @secrel(sym+add), data4 MSB */
+#define R_IA64_SECREL32LSB     0x65    /* @secrel(sym+add), data4 LSB */
+#define R_IA64_SECREL64MSB     0x66    /* @secrel(sym+add), data8 MSB */
+#define R_IA64_SECREL64LSB     0x67    /* @secrel(sym+add), data8 LSB */
+#define R_IA64_REL32MSB                0x6c    /* data 4 + REL */
+#define R_IA64_REL32LSB                0x6d    /* data 4 + REL */
+#define R_IA64_REL64MSB                0x6e    /* data 8 + REL */
+#define R_IA64_REL64LSB                0x6f    /* data 8 + REL */
+#define R_IA64_LTV32MSB                0x74    /* symbol + addend, data4 MSB */
+#define R_IA64_LTV32LSB                0x75    /* symbol + addend, data4 LSB */
+#define R_IA64_LTV64MSB                0x76    /* symbol + addend, data8 MSB */
+#define R_IA64_LTV64LSB                0x77    /* symbol + addend, data8 LSB */
+#define R_IA64_PCREL21BI       0x79    /* @pcrel(sym+add), ptb, call */
+#define R_IA64_PCREL22         0x7a    /* @pcrel(sym+add), imm22 */
+#define R_IA64_PCREL64I                0x7b    /* @pcrel(sym+add), imm64 */
+#define R_IA64_IPLTMSB         0x80    /* dynamic reloc, imported PLT, MSB */
+#define R_IA64_IPLTLSB         0x81    /* dynamic reloc, imported PLT, LSB */
+#define R_IA64_COPY            0x84    /* dynamic reloc, data copy */
+#define R_IA64_SUB             0x85    /* -symbol + addend, add imm22 */
+#define R_IA64_LTOFF22X                0x86    /* LTOFF22, relaxable.  */
+#define R_IA64_LDXMOV          0x87    /* Use of LTOFF22X.  */
+#define R_IA64_TPREL14         0x91    /* @tprel(sym+add), add imm14 */
+#define R_IA64_TPREL22         0x92    /* @tprel(sym+add), add imm22 */
+#define R_IA64_TPREL64I                0x93    /* @tprel(sym+add), add imm64 */
+#define R_IA64_TPREL64MSB      0x96    /* @tprel(sym+add), data8 MSB */
+#define R_IA64_TPREL64LSB      0x97    /* @tprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_TPREL22   0x9a    /* @ltoff(@tprel(s+a)), add imm22 */
+#define R_IA64_DTPMOD64MSB     0xa6    /* @dtpmod(sym+add), data8 MSB */
+#define R_IA64_DTPMOD64LSB     0xa7    /* @dtpmod(sym+add), data8 LSB */
+#define R_IA64_LTOFF_DTPMOD22  0xaa    /* @ltoff(@dtpmod(s+a)), imm22 */
+#define R_IA64_DTPREL14                0xb1    /* @dtprel(sym+add), imm14 */
+#define R_IA64_DTPREL22                0xb2    /* @dtprel(sym+add), imm22 */
+#define R_IA64_DTPREL64I       0xb3    /* @dtprel(sym+add), imm64 */
+#define R_IA64_DTPREL32MSB     0xb4    /* @dtprel(sym+add), data4 MSB */
+#define R_IA64_DTPREL32LSB     0xb5    /* @dtprel(sym+add), data4 LSB */
+#define R_IA64_DTPREL64MSB     0xb6    /* @dtprel(sym+add), data8 MSB */
+#define R_IA64_DTPREL64LSB     0xb7    /* @dtprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_DTPREL22  0xba    /* @ltoff(@dtprel(s+a)), imm22 */
+
+#define SHF_IA_64_SHORT                0x10000000      /* section near gp */
+
 #define R_MIPS_NONE            0
 #define R_MIPS_16              1
 #define R_MIPS_32              2
index 3bc7bcb..48ac747 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <linux/config.h>
 #include <linux/fs.h>
+#include <linux/mm.h>
+
 #include <asm/cacheflush.h>
 
 #ifdef CONFIG_HIGHMEM
index c9bb7be..6256392 100644 (file)
@@ -56,7 +56,7 @@ typedef struct hw_interrupt_type  hw_irq_controller;
  *
  * Pad this out to 32 bytes for cache and indexing reasons.
  */
-typedef struct {
+typedef struct irq_desc {
        unsigned int status;            /* IRQ status */
        hw_irq_controller *handler;
        struct irqaction *action;       /* IRQ action list */
@@ -64,8 +64,6 @@ typedef struct {
        spinlock_t lock;
 } ____cacheline_aligned irq_desc_t;
 
-extern irq_desc_t irq_desc [NR_IRQS];
-
 #include <asm/hw_irq.h> /* the arch dependent stuff */
 
 extern int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
index a83926f..377ae36 100644 (file)
@@ -24,7 +24,7 @@ extern irq_cpustat_t irq_stat[];                      /* defined in asm/hardirq.h */
 #define __IRQ_STAT(cpu, member)        (irq_stat[cpu].member)
 #else
 #define __IRQ_STAT(cpu, member)        ((void)(cpu), irq_stat[0].member)
-#endif 
+#endif
 #endif
 
   /* arch independent irq_stat fields */
@@ -35,4 +35,9 @@ extern irq_cpustat_t irq_stat[];                      /* defined in asm/hardirq.h */
   /* arch dependent irq_stat fields */
 #define nmi_count(cpu)         __IRQ_STAT((cpu), __nmi_count)          /* i386, ia64 */
 
+#define local_softirq_pending()        softirq_pending(smp_processor_id())
+#define local_syscall_count()  syscall_count(smp_processor_id())
+#define local_ksoftirqd_task() ksoftirqd_task(smp_processor_id())
+#define local_nmi_count()      nmi_count(smp_processor_id())
+
 #endif /* __irq_cpustat_h */
index e00aec0..c34ff99 100644 (file)
@@ -279,9 +279,9 @@ void buffer_assertion_failure(struct buffer_head *bh);
                        printk(KERN_ERR why);                                \
                }                                                            \
        } while (0)
-#define J_EXPECT(expr, why...)         __journal_expect(expr, ## why)
-#define J_EXPECT_BH(bh, expr, why...)  __journal_expect(expr, ## why)
-#define J_EXPECT_JH(jh, expr, why...)  __journal_expect(expr, ## why)
+#define J_EXPECT(expr, why...)         __journal_expect(expr, why)
+#define J_EXPECT_BH(bh, expr, why...)  __journal_expect(expr, why)
+#define J_EXPECT_JH(jh, expr, why...)  __journal_expect(expr, why)
 #endif
 
 enum jbd_state_bits {
index 6aa89d7..51f5377 100644 (file)
@@ -23,8 +23,13 @@ extern int page_cluster;
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/processor.h>
 #include <asm/atomic.h>
 
+#ifndef MM_VM_SIZE
+#define MM_VM_SIZE(mm) TASK_SIZE
+#endif
+
 /*
  * Linux kernel virtual memory manager primitives.
  * The idea being to have a "virtual" mm in the same way
index 0c9e9a6..eb10339 100644 (file)
@@ -41,4 +41,7 @@ int module_finalize(const Elf_Ehdr *hdr,
                    const Elf_Shdr *sechdrs,
                    struct module *mod);
 
+/* Any cleanup needed when module leaves. */
+void module_arch_cleanup(struct module *mod);
+
 #endif
index 3d75259..6826301 100644 (file)
@@ -408,7 +408,7 @@ static inline loff_t
 nfs_size_to_loff_t(__u64 size)
 {
        loff_t maxsz = (((loff_t) ULONG_MAX) << PAGE_CACHE_SHIFT) + PAGE_CACHE_SIZE - 1;
-       if (size > maxsz)
+       if (size > (__u64) maxsz)
                return maxsz;
        return (loff_t) size;
 }
index 21043f2..7c087a5 100644 (file)
 #define PCI_DEVICE_ID_HP_DIVA_TOSCA1   0x1049
 #define PCI_DEVICE_ID_HP_DIVA_TOSCA2   0x104A
 #define PCI_DEVICE_ID_HP_DIVA_MAESTRO  0x104B
+#define PCI_DEVICE_ID_HP_REO_SBA       0x10f0
+#define PCI_DEVICE_ID_HP_REO_IOC       0x10f1
 #define PCI_DEVICE_ID_HP_VISUALIZE_FXE 0x108b
 #define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223
 #define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226
 #define PCI_DEVICE_ID_HP_ZX1_SBA       0x1229
 #define PCI_DEVICE_ID_HP_ZX1_IOC       0x122a
 #define PCI_DEVICE_ID_HP_ZX1_LBA       0x122e
+#define PCI_DEVICE_ID_HP_SX1000_IOC    0x127c
 #define PCI_DEVICE_ID_HP_DIVA_EVEREST  0x1282
 #define PCI_DEVICE_ID_HP_DIVA_AUX      0x1290
 
index 74c3d97..0870a29 100644 (file)
@@ -1,9 +1,8 @@
 #ifndef __LINUX_PERCPU_H
 #define __LINUX_PERCPU_H
-#include <linux/spinlock.h> /* For preempt_disable() */
+#include <linux/preempt.h> /* For preempt_disable() */
 #include <linux/slab.h> /* For kmalloc_percpu() */
 #include <asm/percpu.h>
-
 /* Must be an lvalue. */
 #define get_cpu_var(var) (*({ preempt_disable(); &__get_cpu_var(var); }))
 #define put_cpu_var(var) preempt_enable()
index aceee0c..677e67b 100644 (file)
@@ -180,14 +180,9 @@ struct serial_icounter_struct {
 extern int register_serial(struct serial_struct *req);
 extern void unregister_serial(int line);
 
-/* Allow complicated architectures to specify rs_table[] at run time */
-extern int early_serial_setup(struct serial_struct *req);
-
-#ifdef CONFIG_ACPI
-/* tty ports reserved for the ACPI serial console port and debug port */
-#define ACPI_SERIAL_CONSOLE_PORT        4
-#define ACPI_SERIAL_DEBUG_PORT          5
-#endif
+/* Allow architectures to override entries in serial8250_ports[] at run time: */
+struct uart_port;      /* forward declaration */
+extern int early_serial_setup(struct uart_port *port);
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SERIAL_H */
index 81a9069..194d2f5 100644 (file)
@@ -74,10 +74,6 @@ static inline int on_each_cpu(void (*func) (void *info), void *info,
  */
 extern int smp_threads_ready;
 
-extern volatile unsigned long smp_msg_data;
-extern volatile int smp_src_cpu;
-extern volatile int smp_msg_id;
-
 #define MSG_ALL_BUT_SELF       0x8000  /* Assume <32768 CPU's */
 #define MSG_ALL                        0x8001
 
index 6553637..98f1c2a 100644 (file)
@@ -73,7 +73,7 @@ struct svc_serv {
  * This assumes that the non-page part of an rpc reply will fit
  * in a page - NFSd ensures this.  lockd also has no trouble.
  */
-#define RPCSVC_MAXPAGES                ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE + 1)
+#define RPCSVC_MAXPAGES                ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE + 2)
 
 static inline u32 svc_getu32(struct iovec *iov)
 {
index 2cce1f2..f9bb3af 100644 (file)
@@ -130,6 +130,7 @@ enum
        KERN_PIDMAX=55,         /* int: PID # limit */
        KERN_CORE_PATTERN=56,   /* string: pattern for core-file names */
        KERN_PANIC_ON_OOPS=57,  /* int: whether we will panic on an oops */
+       KERN_CACHEDECAYTICKS=58, /* ulong: value for cache_decay_ticks (EXPERIMENTAL!) */
 };
 
 
index e34abd2..9750e47 100644 (file)
@@ -38,8 +38,6 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
-static kmem_cache_t *task_struct_cachep;
-
 extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
 extern void exit_semundo(struct task_struct *tsk);
 
@@ -55,13 +53,6 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
 
 rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;  /* outer */
 
-/*
- * A per-CPU task cache - this relies on the fact that
- * the very last portion of sys_exit() is executed with
- * preemption turned off.
- */
-static task_t *task_cache[NR_CPUS] __cacheline_aligned;
-
 int nr_processes(void)
 {
        int cpu;
@@ -74,6 +65,22 @@ int nr_processes(void)
        return total;
 }
 
+#ifdef CONFIG_IA64
+# define HAVE_ARCH_DUP_TASK_STRUCT
+#endif
+
+#ifdef HAVE_ARCH_DUP_TASK_STRUCT
+extern void free_task_struct (struct task_struct *tsk);
+#else
+static kmem_cache_t *task_struct_cachep;
+
+/*
+ * A per-CPU task cache - this relies on the fact that
+ * the very last portion of sys_exit() is executed with
+ * preemption turned off.
+ */
+static task_t *task_cache[NR_CPUS] __cacheline_aligned;
+
 static void free_task_struct(struct task_struct *tsk)
 {
        /*
@@ -97,6 +104,7 @@ static void free_task_struct(struct task_struct *tsk)
                put_cpu();
        }
 }
+#endif /* HAVE_ARCH_DUP_TASK_STRUCT */
 
 void __put_task_struct(struct task_struct *tsk)
 {
@@ -186,6 +194,7 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync)
 
 void __init fork_init(unsigned long mempages)
 {
+#ifndef HAVE_ARCH_DUP_TASK_STRUCT
        /* create a slab on which task_structs can be allocated */
        task_struct_cachep =
                kmem_cache_create("task_struct",
@@ -193,6 +202,7 @@ void __init fork_init(unsigned long mempages)
                                  SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
        if (!task_struct_cachep)
                panic("fork_init(): cannot create task_struct SLAB cache");
+#endif
 
        /*
         * The default maximum number of threads is set to a safe
@@ -210,7 +220,11 @@ void __init fork_init(unsigned long mempages)
        init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
 }
 
-static struct task_struct *dup_task_struct(struct task_struct *orig)
+#ifdef HAVE_ARCH_DUP_TASK_STRUCT
+extern struct task_struct *dup_task_struct (struct task_struct *orig);
+#else /* !HAVE_ARCH_DUP_TASK_STRUCT */
+
+struct task_struct *dup_task_struct(struct task_struct *orig)
 {
        struct task_struct *tsk;
        struct thread_info *ti;
@@ -244,6 +258,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
        return tsk;
 }
 
+#endif /* !HAVE_ARCH_DUP_TASK_STRUCT */
+
 #ifdef CONFIG_MMU
 static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm)
 {
@@ -884,11 +900,15 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        if (clone_flags & CLONE_CHILD_SETTID)
                p->set_child_tid = child_tidptr;
+       else
+               p->set_child_tid = NULL;
        /*
         * Clear TID on mm_release()?
         */
        if (clone_flags & CLONE_CHILD_CLEARTID)
                p->clear_child_tid = child_tidptr;
+       else
+               p->clear_child_tid = NULL;
 
        /*
         * Syscall tracing should be turned off in the child regardless
index c8ca8a9..b6f4ad0 100644 (file)
@@ -402,7 +402,9 @@ EXPORT_SYMBOL(add_timer);
 EXPORT_SYMBOL(del_timer);
 EXPORT_SYMBOL(request_irq);
 EXPORT_SYMBOL(free_irq);
+#if !defined(CONFIG_IA64)
 EXPORT_SYMBOL(irq_stat);
+#endif
 
 /* waitqueue handling */
 EXPORT_SYMBOL(add_wait_queue);
@@ -602,7 +604,9 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
 /* init task, for moving kthread roots - ought to export a function ?? */
 
 EXPORT_SYMBOL(init_task);
+#ifndef CONFIG_IA64
 EXPORT_SYMBOL(init_thread_union);
+#endif
 
 EXPORT_SYMBOL(tasklist_lock);
 EXPORT_SYMBOL(find_task_by_pid);
index 06ff932..4a01db5 100644 (file)
@@ -910,6 +910,9 @@ static void free_module(struct module *mod)
        list_del(&mod->list);
        spin_unlock_irq(&modlist_lock);
 
+       /* Arch-specific cleanup. */
+       module_arch_cleanup(mod);
+
        /* Module unload stuff */
        module_unload_free(mod);
 
@@ -1276,6 +1279,7 @@ static struct module *load_module(void __user *umod,
        mod->module_init = ptr;
 
        /* Transfer each section which specifies SHF_ALLOC */
+       DEBUGP("final section addresses:\n");
        for (i = 0; i < hdr->e_shnum; i++) {
                void *dest;
 
@@ -1293,6 +1297,7 @@ static struct module *load_module(void __user *umod,
                               sechdrs[i].sh_size);
                /* Update sh_addr to point to copy in image. */
                sechdrs[i].sh_addr = (unsigned long)dest;
+               DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
        }
        /* Module has been moved. */
        mod = (void *)sechdrs[modindex].sh_addr;
index a7a9811..1defc9f 100644 (file)
@@ -308,6 +308,12 @@ static void _call_console_drivers(unsigned long start, unsigned long end, int ms
                        __call_console_drivers(start, end);
                }
        }
+#ifdef CONFIG_IA64_EARLY_PRINTK
+       if (!console_drivers) {
+               void early_printk (const char *str, size_t len);
+               early_printk(&LOG_BUF(start), end - start);
+       }
+#endif
 }
 
 /*
@@ -625,7 +631,11 @@ void register_console(struct console * console)
                 * for us.
                 */
                spin_lock_irqsave(&logbuf_lock, flags);
+#ifdef CONFIG_IA64_EARLY_PRINTK
+               con_start = log_end;
+#else
                con_start = log_start;
+#endif
                spin_unlock_irqrestore(&logbuf_lock, flags);
        }
        release_console_sem();
@@ -678,3 +688,110 @@ void tty_write_message(struct tty_struct *tty, char *msg)
                tty->driver->write(tty, 0, msg, strlen(msg));
        return;
 }
+
+#ifdef CONFIG_IA64_EARLY_PRINTK
+
+#include <asm/io.h>
+
+# ifdef CONFIG_IA64_EARLY_PRINTK_VGA
+
+
+#define VGABASE                ((char *)0xc0000000000b8000)
+#define VGALINES       24
+#define VGACOLS                80
+
+static int current_ypos = VGALINES, current_xpos = 0;
+
+static void
+early_printk_vga (const char *str, size_t len)
+{
+       char c;
+       int  i, k, j;
+
+       while (len-- > 0) {
+               c = *str++;
+               if (current_ypos >= VGALINES) {
+                       /* scroll 1 line up */
+                       for (k = 1, j = 0; k < VGALINES; k++, j++) {
+                               for (i = 0; i < VGACOLS; i++) {
+                                       writew(readw(VGABASE + 2*(VGACOLS*k + i)),
+                                              VGABASE + 2*(VGACOLS*j + i));
+                               }
+                       }
+                       for (i = 0; i < VGACOLS; i++) {
+                               writew(0x720, VGABASE + 2*(VGACOLS*j + i));
+                       }
+                       current_ypos = VGALINES-1;
+               }
+               if (c == '\n') {
+                       current_xpos = 0;
+                       current_ypos++;
+               } else if (c != '\r')  {
+                       writew(((0x7 << 8) | (unsigned short) c),
+                              VGABASE + 2*(VGACOLS*current_ypos + current_xpos++));
+                       if (current_xpos >= VGACOLS) {
+                               current_xpos = 0;
+                               current_ypos++;
+                       }
+               }
+       }
+}
+
+# endif /* CONFIG_IA64_EARLY_PRINTK_VGA */
+
+# ifdef CONFIG_IA64_EARLY_PRINTK_UART
+
+#include <linux/serial_reg.h>
+#include <asm/system.h>
+
+static void early_printk_uart(const char *str, size_t len)
+{
+       static char *uart = NULL;
+       unsigned long uart_base;
+       char c;
+
+       if (!uart) {
+               uart_base = 0;
+#  ifdef CONFIG_SERIAL_8250_HCDP
+               {
+                       extern unsigned long hcdp_early_uart(void);
+                       uart_base = hcdp_early_uart();
+               }
+#  endif
+#  if CONFIG_IA64_EARLY_PRINTK_UART_BASE
+               if (!uart_base)
+                       uart_base = CONFIG_IA64_EARLY_PRINTK_UART_BASE;
+#  endif
+               if (!uart_base)
+                       return;
+
+               uart = ioremap(uart_base, 64);
+               if (!uart)
+                       return;
+       }
+
+       while (len-- > 0) {
+               c = *str++;
+               while ((readb(uart + UART_LSR) & UART_LSR_TEMT) == 0)
+                       cpu_relax(); /* spin */
+
+               writeb(c, uart + UART_TX);
+
+               if (c == '\n')
+                       writeb('\r', uart + UART_TX);
+       }
+}
+
+# endif /* CONFIG_IA64_EARLY_PRINTK_UART */
+
+void early_printk(const char *str, size_t len)
+{
+#ifdef CONFIG_IA64_EARLY_PRINTK_UART
+       early_printk_uart(str, len);
+#endif
+#ifdef CONFIG_IA64_EARLY_PRINTK_VGA
+       early_printk_vga(str, len);
+#endif
+}
+
+#endif /* CONFIG_IA64_EARLY_PRINTK */
index 60f9148..bb94e1f 100644 (file)
    - Tasklets: serialized wrt itself.
  */
 
+/* No separate irq_stat for ia64, it is part of PSA */
+#if !defined(CONFIG_IA64)
 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
+#endif /* CONFIG_IA64 */
 
 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
 
@@ -321,7 +324,7 @@ static int ksoftirqd(void * __bind_cpu)
        __set_current_state(TASK_INTERRUPTIBLE);
        mb();
 
-       ksoftirqd_task(cpu) = current;
+       local_ksoftirqd_task() = current;
 
        for (;;) {
                if (!local_softirq_pending())
index 8e7de84..fdabc0e 100644 (file)
@@ -1218,7 +1218,7 @@ asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
                        ? -EFAULT : 0;
 }
 
-#if !defined(__ia64__) && !defined(CONFIG_V850)
+#if (!defined(__ia64__) && !defined(CONFIG_V850)) || defined(CONFIG_COMPAT)
 
 /*
  *     Back compatibility for getrlimit. Needed for some apps.
index aff6558..e566041 100644 (file)
@@ -265,6 +265,10 @@ static ctl_table kern_table[] = {
         0600, NULL, &proc_dointvec},
        {KERN_PANIC_ON_OOPS,"panic_on_oops",
         &panic_on_oops,sizeof(int),0644,NULL,&proc_dointvec},
+#ifdef CONFIG_SMP
+       {KERN_CACHEDECAYTICKS, "cache_decay_ticks", &cache_decay_ticks, sizeof (cache_decay_ticks),
+        0644, NULL, &proc_doulongvec_minmax},
+#endif
        {0}
 };
 
index 3af9c7d..e78e500 100644 (file)
@@ -35,7 +35,7 @@
  */
 struct timezone sys_tz;
 
-extern unsigned long last_time_offset;
+extern unsigned long last_nsec_offset;
 
 #if !defined(__alpha__) && !defined(__ia64__)
 
@@ -79,7 +79,7 @@ asmlinkage long sys_stime(int * tptr)
        write_seqlock_irq(&xtime_lock);
        xtime.tv_sec = value;
        xtime.tv_nsec = 0;
-       last_time_offset = 0;
+       last_nsec_offset = 0;
        time_adjust = 0;        /* stop active adjtime() */
        time_status |= STA_UNSYNC;
        time_maxerror = NTP_PHASE_LIMIT;
@@ -125,7 +125,7 @@ inline static void warp_clock(void)
 {
        write_seqlock_irq(&xtime_lock);
        xtime.tv_sec += sys_tz.tz_minuteswest * 60;
-       last_time_offset = 0;
+       last_nsec_offset = 0;
        write_sequnlock_irq(&xtime_lock);
 }
 
@@ -381,7 +381,7 @@ leave:      if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0
        txc->calcnt        = pps_calcnt;
        txc->errcnt        = pps_errcnt;
        txc->stbcnt        = pps_stbcnt;
-       last_time_offset = 0;
+       last_nsec_offset = 0;
        write_sequnlock_irq(&xtime_lock);
        do_gettimeofday(&txc->time);
        return(result);
index caa3771..6ffa0bf 100644 (file)
@@ -451,6 +451,7 @@ unsigned long tick_nsec = TICK_NSEC(TICK_USEC);     /* USER_HZ period (nsec) */
  */
 struct timespec xtime __attribute__ ((aligned (16)));
 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
+unsigned long last_nsec_offset;
 
 /* Don't completely fail for HZ > 500.  */
 int tickadj = 500/HZ ? : 1;            /* microsecs */
@@ -605,7 +606,7 @@ static void second_overflow(void)
 /* in the NTP reference this is called "hardclock()" */
 static void update_wall_time_one_tick(void)
 {
-       long time_adjust_step;
+       long time_adjust_step, delta_nsec;
 
        if ( (time_adjust_step = time_adjust) != 0 ) {
            /* We are doing an adjtime thing. 
@@ -621,11 +622,11 @@ static void update_wall_time_one_tick(void)
                time_adjust_step = tickadj;
             else if (time_adjust < -tickadj)
                time_adjust_step = -tickadj;
-            
+
            /* Reduce by this step the amount of time left  */
            time_adjust -= time_adjust_step;
        }
-       xtime.tv_nsec += tick_nsec + time_adjust_step * 1000;
+       delta_nsec = tick_nsec + time_adjust_step * 1000;
        /*
         * Advance the phase, once it gets to one microsecond, then
         * advance the tick more.
@@ -634,13 +635,33 @@ static void update_wall_time_one_tick(void)
        if (time_phase <= -FINEUSEC) {
                long ltemp = -time_phase >> (SHIFT_SCALE - 10);
                time_phase += ltemp << (SHIFT_SCALE - 10);
-               xtime.tv_nsec -= ltemp;
+               delta_nsec -= ltemp;
        }
        else if (time_phase >= FINEUSEC) {
                long ltemp = time_phase >> (SHIFT_SCALE - 10);
                time_phase -= ltemp << (SHIFT_SCALE - 10);
-               xtime.tv_nsec += ltemp;
+               delta_nsec += ltemp;
+       }
+       xtime.tv_nsec += delta_nsec;
+
+       /*
+        * The whole point of last_nsec_offset is that it can be updated atomically and
+        * lock-free.  Thus, arches that don't have __HAVE_ARCH_CMPXCHG probably can't use
+        * last_nsec_offset anyhow... --davidm 2003-Feb-11
+        */
+#ifdef __HAVE_ARCH_CMPXCHG
+       if (last_nsec_offset > 0) {
+               unsigned long new, old;
+
+               do {
+                       old = last_nsec_offset;
+                       if (old > delta_nsec)
+                               new = old - delta_nsec;
+                       else
+                               new = 0;
+               } while (cmpxchg(&last_nsec_offset, old, new) != old);
        }
+#endif
 }
 
 /*
@@ -777,7 +798,6 @@ unsigned long wall_jiffies = INITIAL_JIFFIES;
 #ifndef ARCH_HAVE_XTIME_LOCK
 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
 #endif
-unsigned long last_time_offset;
 
 /*
  * This function runs timers and the timer-tq in bottom half context.
@@ -811,7 +831,6 @@ static inline void update_times(void)
                wall_jiffies += ticks;
                update_wall_time(ticks);
        }
-       last_time_offset = 0;
        calc_load(ticks);
 }
   
index cc3138f..6013408 100644 (file)
@@ -114,8 +114,10 @@ static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir)
        }
        pmd = pmd_offset(dir, 0);
        pgd_clear(dir);
-       for (j = 0; j < PTRS_PER_PMD ; j++)
+       for (j = 0; j < PTRS_PER_PMD ; j++) {
+               prefetchw(pmd + j + PREFETCH_STRIDE/sizeof(*pmd));
                free_one_pmd(tlb, pmd+j);
+       }
        pmd_free_tlb(tlb, pmd);
 }
 
index cb35e2d..014a0be 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1440,7 +1440,7 @@ void exit_mmap(struct mm_struct *mm)
        vm_unacct_memory(nr_accounted);
        BUG_ON(mm->map_count);  /* This is just debugging */
        clear_page_tables(tlb, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
-       tlb_finish_mmu(tlb, 0, TASK_SIZE);
+       tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm));
 
        vma = mm->mmap;
        mm->mmap = mm->mmap_cache = NULL;
index 258efe3..5757af5 100644 (file)
@@ -1944,8 +1944,8 @@ static void clear_advance(void *buf, unsigned bsize, unsigned bptr,
                len -= x;
        }
        CS_DBGOUT(CS_WAVE_WRITE, 4, printk(KERN_INFO
-               "cs4281: clear_advance(): memset %d at 0x%.8x for %d size \n",
-                       (unsigned)c, (unsigned)((char *) buf) + bptr, len));
+               "cs4281: clear_advance(): memset %d at %p for %d size \n",
+                       (unsigned)c, ((char *) buf) + bptr, len));
        memset(((char *) buf) + bptr, c, len);
 }
 
@@ -1980,9 +1980,8 @@ static void cs4281_update_ptr(struct cs4281_state *s, int intflag)
                                wake_up(&s->dma_adc.wait);
                }
                CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO
-                       "cs4281: cs4281_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n",
-                               (unsigned)s, s->dma_adc.hwptr, 
-                               s->dma_adc.total_bytes, s->dma_adc.count));
+                       "cs4281: cs4281_update_ptr(): s=%p hwptr=%d total_bytes=%d count=%d \n",
+                               s, s->dma_adc.hwptr, s->dma_adc.total_bytes, s->dma_adc.count));
        }
        // update DAC pointer 
        //
@@ -2014,11 +2013,10 @@ static void cs4281_update_ptr(struct cs4281_state *s, int intflag)
                                // Continue to play silence until the _release.
                                //
                                CS_DBGOUT(CS_WAVE_WRITE, 6, printk(KERN_INFO
-                                       "cs4281: cs4281_update_ptr(): memset %d at 0x%.8x for %d size \n",
+                                       "cs4281: cs4281_update_ptr(): memset %d at %p for %d size \n",
                                                (unsigned)(s->prop_dac.fmt & 
                                                (AFMT_U8 | AFMT_U16_LE)) ? 0x80 : 0, 
-                                               (unsigned)s->dma_dac.rawbuf, 
-                                               s->dma_dac.dmasize));
+                                               s->dma_dac.rawbuf, s->dma_dac.dmasize));
                                memset(s->dma_dac.rawbuf,
                                       (s->prop_dac.
                                        fmt & (AFMT_U8 | AFMT_U16_LE)) ?
@@ -2049,9 +2047,8 @@ static void cs4281_update_ptr(struct cs4281_state *s, int intflag)
                        }
                }
                CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO
-                       "cs4281: cs4281_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n",
-                               (unsigned) s, s->dma_dac.hwptr, 
-                               s->dma_dac.total_bytes, s->dma_dac.count));
+                       "cs4281: cs4281_update_ptr(): s=%p hwptr=%d total_bytes=%d count=%d \n",
+                               s, s->dma_dac.hwptr, s->dma_dac.total_bytes, s->dma_dac.count));
        }
 }
 
@@ -2182,8 +2179,7 @@ static int mixer_ioctl(struct cs4281_state *s, unsigned int cmd,
 
        VALIDATE_STATE(s);
        CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO
-                "cs4281: mixer_ioctl(): s=0x%.8x cmd=0x%.8x\n",
-                        (unsigned) s, cmd));
+                "cs4281: mixer_ioctl(): s=%p cmd=0x%.8x\n", s, cmd));
 #if CSDEBUG
        cs_printioctl(cmd);
 #endif
@@ -2748,9 +2744,8 @@ static void CopySamples(char *dst, char *src, int count, int iChannels,
        CS_DBGOUT(CS_FUNCTION, 2,
                  printk(KERN_INFO "cs4281: CopySamples()+ "));
        CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
-                " dst=0x%x src=0x%x count=%d iChannels=%d fmt=0x%x\n",
-                        (unsigned) dst, (unsigned) src, (unsigned) count,
-                        (unsigned) iChannels, (unsigned) fmt));
+                " dst=%p src=%p count=%d iChannels=%d fmt=0x%x\n",
+                        dst, src, (unsigned) count, (unsigned) iChannels, (unsigned) fmt));
 
        // Gershwin does format conversion in hardware so normally
        // we don't do any host based coversion. The data formatter
@@ -2830,9 +2825,9 @@ static unsigned cs_copy_to_user(struct cs4281_state *s, void *dest,
        void *src = hwsrc;      //default to the standard destination buffer addr
 
        CS_DBGOUT(CS_FUNCTION, 6, printk(KERN_INFO
-               "cs_copy_to_user()+ fmt=0x%x fmt_o=0x%x cnt=%d dest=0x%.8x\n",
+               "cs_copy_to_user()+ fmt=0x%x fmt_o=0x%x cnt=%d dest=%p\n",
                        s->prop_adc.fmt, s->prop_adc.fmt_original,
-                       (unsigned) cnt, (unsigned) dest));
+                       (unsigned) cnt, dest));
 
        if (cnt > s->dma_adc.dmasize) {
                cnt = s->dma_adc.dmasize;
@@ -2877,7 +2872,7 @@ static ssize_t cs4281_read(struct file *file, char *buffer, size_t count,
        unsigned copied = 0;
 
        CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2,
-                 printk(KERN_INFO "cs4281: cs4281_read()+ %d \n", count));
+                 printk(KERN_INFO "cs4281: cs4281_read()+ %Zu \n", count));
 
        VALIDATE_STATE(s);
        if (ppos != &file->f_pos)
@@ -2900,7 +2895,7 @@ static ssize_t cs4281_read(struct file *file, char *buffer, size_t count,
 //
        while (count > 0) {
                CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
-                       "_read() count>0 count=%d .count=%d .swptr=%d .hwptr=%d \n",
+                       "_read() count>0 count=%Zu .count=%d .swptr=%d .hwptr=%d \n",
                                count, s->dma_adc.count,
                                s->dma_adc.swptr, s->dma_adc.hwptr));
                spin_lock_irqsave(&s->lock, flags);
@@ -2957,11 +2952,10 @@ static ssize_t cs4281_read(struct file *file, char *buffer, size_t count,
                // the "cnt" is the number of bytes to read.
 
                CS_DBGOUT(CS_WAVE_READ, 2, printk(KERN_INFO
-                       "_read() copy_to cnt=%d count=%d ", cnt, count));
+                       "_read() copy_to cnt=%d count=%Zu ", cnt, count));
                CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
-                        " .dmasize=%d .count=%d buffer=0x%.8x ret=%d\n",
-                                s->dma_adc.dmasize, s->dma_adc.count,
-                                (unsigned) buffer, ret));
+                        " .dmasize=%d .count=%d buffer=%p ret=%Zd\n",
+                                s->dma_adc.dmasize, s->dma_adc.count, buffer, ret));
 
                if (cs_copy_to_user
                    (s, buffer, s->dma_adc.rawbuf + swptr, cnt, &copied))
@@ -2977,7 +2971,7 @@ static ssize_t cs4281_read(struct file *file, char *buffer, size_t count,
                start_adc(s);
        }
        CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2,
-                 printk(KERN_INFO "cs4281: cs4281_read()- %d\n", ret));
+                 printk(KERN_INFO "cs4281: cs4281_read()- %Zd\n", ret));
        return ret;
 }
 
@@ -2993,7 +2987,7 @@ static ssize_t cs4281_write(struct file *file, const char *buffer,
        int cnt;
 
        CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2,
-                 printk(KERN_INFO "cs4281: cs4281_write()+ count=%d\n",
+                 printk(KERN_INFO "cs4281: cs4281_write()+ count=%Zu\n",
                         count));
        VALIDATE_STATE(s);
 
@@ -3049,7 +3043,7 @@ static ssize_t cs4281_write(struct file *file, const char *buffer,
                start_dac(s);
        }
        CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2,
-                 printk(KERN_INFO "cs4281: cs4281_write()- %d\n", ret));
+                 printk(KERN_INFO "cs4281: cs4281_write()- %Zd\n", ret));
        return ret;
 }
 
@@ -3170,8 +3164,7 @@ static int cs4281_ioctl(struct inode *inode, struct file *file,
        int val, mapped, ret;
 
        CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO
-                "cs4281: cs4281_ioctl(): file=0x%.8x cmd=0x%.8x\n",
-                        (unsigned) file, cmd));
+                "cs4281: cs4281_ioctl(): file=%p cmd=0x%.8x\n", file, cmd));
 #if CSDEBUG
        cs_printioctl(cmd);
 #endif
@@ -3601,8 +3594,8 @@ static int cs4281_release(struct inode *inode, struct file *file)
            (struct cs4281_state *) file->private_data;
 
        CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 2, printk(KERN_INFO
-                "cs4281: cs4281_release(): inode=0x%.8x file=0x%.8x f_mode=%d\n",
-                        (unsigned) inode, (unsigned) file, file->f_mode));
+                "cs4281: cs4281_release(): inode=%p file=%p f_mode=%d\n",
+                        inode, file, file->f_mode));
 
        VALIDATE_STATE(s);
 
@@ -3636,8 +3629,8 @@ static int cs4281_open(struct inode *inode, struct file *file)
        struct list_head *entry;
 
        CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO
-               "cs4281: cs4281_open(): inode=0x%.8x file=0x%.8x f_mode=0x%x\n",
-                       (unsigned) inode, (unsigned) file, file->f_mode));
+               "cs4281: cs4281_open(): inode=%p file=%p f_mode=0x%x\n",
+                       inode, file, file->f_mode));
 
        list_for_each(entry, &cs4281_devs)
        {
@@ -4347,10 +4340,8 @@ static int __devinit cs4281_probe(struct pci_dev *pcidev,
 
        CS_DBGOUT(CS_INIT, 2,
                  printk(KERN_INFO
-                        "cs4281: probe() BA0=0x%.8x BA1=0x%.8x pBA0=0x%.8x pBA1=0x%.8x \n",
-                        (unsigned) temp1, (unsigned) temp2,
-                        (unsigned) s->pBA0, (unsigned) s->pBA1));
-
+                        "cs4281: probe() BA0=0x%.8x BA1=0x%.8x pBA0=%p pBA1=%p \n",
+                        (unsigned) temp1, (unsigned) temp2, s->pBA0, s->pBA1));
        CS_DBGOUT(CS_INIT, 2,
                  printk(KERN_INFO
                         "cs4281: probe() pBA0phys=0x%.8x pBA1phys=0x%.8x\n",
@@ -4397,15 +4388,13 @@ static int __devinit cs4281_probe(struct pci_dev *pcidev,
        if (pmdev)
        {
                CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO
-                        "cs4281: probe() pm_register() succeeded (0x%x).\n",
-                               (unsigned)pmdev));
+                        "cs4281: probe() pm_register() succeeded (%p).\n", pmdev));
                pmdev->data = s;
        }
        else
        {
                CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 0, printk(KERN_INFO
-                        "cs4281: probe() pm_register() failed (0x%x).\n",
-                               (unsigned)pmdev));
+                        "cs4281: probe() pm_register() failed (%p).\n", pmdev));
                s->pm.flags |= CS4281_PM_NOT_REGISTERED;
        }
 #endif
index eaada08..faa091b 100644 (file)
@@ -46,8 +46,8 @@ int cs4281_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
        struct cs4281_state *state;
 
        CS_DBGOUT(CS_PM, 2, printk(KERN_INFO 
-               "cs4281: cs4281_pm_callback dev=0x%x rqst=0x%x state=%d\n",
-                       (unsigned)dev,(unsigned)rqst,(unsigned)data));
+               "cs4281: cs4281_pm_callback dev=%p rqst=0x%x state=%p\n",
+                       dev,(unsigned)rqst,data));
        state = (struct cs4281_state *) dev->data;
        if (state) {
                switch(rqst) {
index 58a915e..0ab87ea 100644 (file)
@@ -5,11 +5,9 @@ host-progs  := gen_init_cpio
 
 clean-files := initramfs_data.cpio.gz
 
-LDFLAGS_initramfs_data.o := $(LDFLAGS_BLOB) -r -T
-
-$(obj)/initramfs_data.o: $(src)/initramfs_data.scr \
-                        $(obj)/initramfs_data.cpio.gz FORCE
-       $(call if_changed,ld)
+$(obj)/initramfs_data.S: $(obj)/initramfs_data.cpio.gz
+       echo '.section ".init.ramfs", "a"' > $@
+       od -v -An -t x1 -w8 $^ | cut -c2- | sed -e s"/ /,0x/g" -e s"/^/.byte 0x"/ >> $@
 
 # initramfs-y are the programs which will be copied into the CPIO
 # archive. Currently, the filenames are hardcoded in gen_init_cpio,