From: Gerd Hoffmann Date: Thu, 15 May 2003 07:29:48 +0000 (+0000) Subject: restricted ia64 patches to ia64 again, they still break builds on X-Git-Url: http://git.alex.org.uk restricted ia64 patches to ia64 again, they still break builds on hammer (something irq related this time ...). suse-commit: d0b5d5c0b6cf5f3d9430cad70950673cdeb6b05c --- diff --git a/Makefile b/Makefile index 086b7aa..6e9ba16 100644 --- a/Makefile +++ b/Makefile @@ -182,7 +182,7 @@ AFLAGS_KERNEL = NOSTDINC_FLAGS = -nostdinc -iwithprefix include CPPFLAGS := -D__KERNEL__ -Iinclude -CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -g -O2 \ +CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -O2 \ -fno-strict-aliasing -fno-common AFLAGS := -D__ASSEMBLY__ $(CPPFLAGS) diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index 4259287..f444a94 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c @@ -300,8 +300,3 @@ module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, { return 0; } - -void -module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 6d1f4a8..c79822e 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -159,8 +159,3 @@ module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, { return 0; } - -void -module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/i386/kernel/module.c b/arch/i386/kernel/module.c index e8258ad..463a2c7 100644 --- a/arch/i386/kernel/module.c +++ b/arch/i386/kernel/module.c @@ -123,7 +123,3 @@ int module_finalize(const Elf_Ehdr *hdr, } return 0; } - -void module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index a111092..fb55bf5 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -381,10 +381,6 @@ config HUGETLB_PAGE_SIZE_4GB depends on MCKINLEY bool "4GB" -config HUGETLB_PAGE_SIZE_1GB - depends on MCKINLEY - bool "1GB" - config HUGETLB_PAGE_SIZE_256MB bool "256MB" diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index dd86e79..a5a4c6c 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -23,7 +23,6 @@ cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \ CFLAGS_KERNEL := -mconstant-gp GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.') -GCC_MINOR_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f2 -d'.') GAS_STATUS=$(shell arch/ia64/scripts/check-gas $(CC) $(OBJDUMP)) @@ -36,14 +35,7 @@ $(error Sorry, you need a newer version of the assember, one that is built from endif ifneq ($(GCC_VERSION),2) - cflags-$(CONFIG_ITANIUM) += -frename-registers -endif - -ifeq ($(GCC_VERSION),3) - ifeq ($(GCC_MINOR_VERSION),4) - cflags-$(CONFIG_ITANIUM) += -mtune=merced - cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley - endif + cflags-y += -frename-registers --param max-inline-insns=5000 endif cflags-$(CONFIG_ITANIUM_BSTEP_SPECIFIC) += -mb-step @@ -56,14 +48,14 @@ libs-y += arch/ia64/lib/ core-y += arch/ia64/kernel/ arch/ia64/mm/ core-$(CONFIG_IA32_SUPPORT) += arch/ia64/ia32/ core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ -core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ +core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ arch/ia64/hp/common/ arch/ia64/hp/zx1/ \ + arch/ia64/hp/sim/ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ core-$(CONFIG_IA64_SGI_SN) += arch/ia64/sn/ drivers-$(CONFIG_PCI) += arch/ia64/pci/ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ -drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ boot := arch/ia64/boot tools := arch/ia64/tools diff --git a/arch/ia64/dig/machvec.c b/arch/ia64/dig/machvec.c index 0c55bda..4d24527 100644 --- a/arch/ia64/dig/machvec.c +++ b/arch/ia64/dig/machvec.c @@ -1,3 +1,2 @@ -#define MACHVEC_PLATFORM_NAME dig -#define MACHVEC_PLATFORM_HEADER +#define MACHVEC_PLATFORM_NAME dig #include diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 668ca81..f9d86c5 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1,9 +1,9 @@ /* ** IA64 System Bus Adapter (SBA) I/O MMU manager ** -** (c) Copyright 2002-2003 Alex Williamson -** (c) Copyright 2002-2003 Grant Grundler -** (c) Copyright 2002-2003 Hewlett-Packard Company +** (c) Copyright 2002 Alex Williamson +** (c) Copyright 2002 Grant Grundler +** (c) Copyright 2002 Hewlett-Packard Company ** ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) @@ -30,39 +30,17 @@ #include #include #include -#include -#include #include #include /* ia64_get_itc() */ #include #include /* PAGE_OFFSET */ -#include -#include /* wmb() */ -#include -#define PFX "IOC: " +#define DRIVER_NAME "SBA" -/* -** This option allows cards capable of 64bit DMA to bypass the IOMMU. If -** not defined, all DMA will be 32bit and go through the TLB. -*/ #define ALLOW_IOV_BYPASS - -/* -** If a device prefetches beyond the end of a valid pdir entry, it will cause -** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should -** disconnect on 4k boundaries and prevent such issues. If the device is -** particularly agressive, this option will keep the entire pdir valid such -** that prefetching will hit a valid address. This could severely impact -** error containment, and is therefore off by default. The page that is -** used for spill-over is poisoned, so that should help debugging somewhat. -*/ -#undef FULL_VALID_PDIR - #define ENABLE_MARK_CLEAN - /* ** The number of debug flags is a clue - this code is fragile. */ @@ -74,10 +52,6 @@ #undef DEBUG_LARGE_SG_ENTRIES #undef DEBUG_BYPASS -#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY) -#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive -#endif - #define SBA_INLINE __inline__ /* #define SBA_INLINE */ @@ -122,8 +96,12 @@ #define ASSERT(expr) #endif +#define KB(x) ((x) * 1024) +#define MB(x) (KB (KB (x))) +#define GB(x) (MB (KB (x))) + /* -** The number of pdir entries to "free" before issuing +** The number of pdir entries to "free" before issueing ** a read to PCOM register to flush out PCOM writes. ** Interacts with allocation granularity (ie 4 or 8 entries ** allocated and free'd/purged at a time might make this @@ -131,24 +109,30 @@ */ #define DELAYED_RESOURCE_CNT 16 -#define DEFAULT_DMA_HINT_REG 0 +#define DEFAULT_DMA_HINT_REG(d) 0 + +#define ZX1_FUNC_ID_VALUE ((PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP) +#define ZX1_MC_ID ((PCI_DEVICE_ID_HP_ZX1_MC << 16) | PCI_VENDOR_ID_HP) -#define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP) -#define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP) -#define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP) +#define SBA_FUNC_ID 0x0000 /* function id */ +#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */ -#define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ +#define SBA_FUNC_SIZE 0x10000 /* SBA configuration function reg set */ + +unsigned int __initdata zx1_func_offsets[] = {0x1000, 0x4000, 0x8000, + 0x9000, 0xa000, -1}; + +#define SBA_IOC_OFFSET 0x1000 + +#define MAX_IOC 1 /* we only have 1 for now*/ -#define IOC_FUNC_ID 0x000 -#define IOC_FCLASS 0x008 /* function class, bist, header, rev... */ #define IOC_IBASE 0x300 /* IO TLB */ #define IOC_IMASK 0x308 #define IOC_PCOM 0x310 #define IOC_TCNFG 0x318 #define IOC_PDIR_BASE 0x320 -/* AGP GART driver looks for this */ -#define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL +#define IOC_IOVA_SPACE_BASE 0x40000000 /* IOVA ranges start at 1GB */ /* ** IOC supports 4/8/16/64KB page sizes (see TCNFG register) @@ -168,7 +152,7 @@ #define IOVP_MASK PAGE_MASK struct ioc { - void *ioc_hpa; /* I/O MMU base address */ + unsigned long ioc_hpa; /* I/O MMU base address */ char *res_map; /* resource map, bit == pdir entry */ u64 *pdir_base; /* physical base address */ unsigned long ibase; /* pdir IOV Space base */ @@ -209,37 +193,37 @@ struct ioc { #endif #endif - /* Stuff we don't need in performance path */ - struct ioc *next; /* list of IOC's in system */ - acpi_handle handle; /* for multiple IOC's */ - const char *name; - unsigned int func_id; - unsigned int rev; /* HW revision of chip */ - u32 iov_size; + /* STUFF We don't need in performance path */ unsigned int pdir_size; /* in bytes, determined by IOV Space size */ - struct pci_dev *sac_only_dev; }; -static struct ioc *ioc_list; -static int reserve_sba_gart = 1; +struct sba_device { + struct sba_device *next; /* list of SBA's in system */ + const char *name; + unsigned long sba_hpa; /* base address */ + spinlock_t sba_lock; + unsigned int flags; /* state/functionality enabled */ + unsigned int hw_rev; /* HW revision of chip */ -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) -#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset) -#else -#define sba_sg_address(sg) ((sg)->address ? (sg)->address : \ - page_address((sg)->page) + (sg)->offset) -#endif + unsigned int num_ioc; /* number of on-board IOC's */ + struct ioc ioc[MAX_IOC]; +}; -#ifdef FULL_VALID_PDIR -static u64 prefetch_spill_page; -#endif -#ifdef CONFIG_PCI -# define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \ - ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL) -#else -# define GET_IOC(dev) NULL -#endif +static struct sba_device *sba_list; +static int sba_count; +static int reserve_sba_gart = 1; +static struct pci_dev sac_only_dev; + +#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset) +#define sba_sg_len(sg) (sg->length) +#define sba_sg_iova(sg) (sg->dma_address) +#define sba_sg_iova_len(sg) (sg->dma_length) + +/* REVISIT - fix me for multiple SBAs/IOCs */ +#define GET_IOC(dev) (sba_list->ioc) +#define SBA_SET_AGP(sba_dev) (sba_dev->flags |= 0x1) +#define SBA_GET_AGP(sba_dev) (sba_dev->flags & 0x1) /* ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up @@ -248,7 +232,10 @@ static u64 prefetch_spill_page; ** rather than the HW. I/O MMU allocation alogorithms can be ** faster with smaller size is (to some degree). */ -#define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE) +#define DMA_CHUNK_SIZE (BITS_PER_LONG*IOVP_SIZE) + +/* Looks nice and keeps the compiler happy */ +#define SBA_DEV(d) ((struct sba_device *) (d)) #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) @@ -268,7 +255,7 @@ static u64 prefetch_spill_page; * sba_dump_tlb - debugging only - print IOMMU operating parameters * @hpa: base address of the IOMMU * - * Print the size/location of the IO MMU PDIR. + * Print the size/location of the IO MMU Pdir. */ static void sba_dump_tlb(char *hpa) @@ -286,19 +273,19 @@ sba_dump_tlb(char *hpa) #ifdef ASSERT_PDIR_SANITY /** - * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry + * sba_dump_pdir_entry - debugging only - print one IOMMU Pdir entry * @ioc: IO MMU structure which owns the pdir we are interested in. * @msg: text to print ont the output line. * @pide: pdir index. * - * Print one entry of the IO MMU PDIR in human readable form. + * Print one entry of the IO MMU Pdir in human readable form. */ static void sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) { /* start printing from lowest pde in rval */ - u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)]; - unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)]; + u64 *ptr = &(ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)]); + unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); uint rcnt; printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", @@ -309,7 +296,7 @@ sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) printk(KERN_DEBUG "%s %2d %p %016Lx\n", (rcnt == (pide & (BITS_PER_LONG - 1))) ? " -->" : " ", - rcnt, ptr, (unsigned long long) *ptr ); + rcnt, ptr, *ptr ); rcnt++; ptr++; } @@ -372,18 +359,17 @@ sba_check_pdir(struct ioc *ioc, char *msg) * print the SG list so we can verify it's correct by hand. */ static void -sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) +sba_dump_sg(struct ioc *ioc, struct scatterlist *startsg, int nents) { while (nents-- > 0) { printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents, - startsg->dma_address, startsg->dma_length, + (unsigned long) sba_sg_iova(startsg), sba_sg_iova_len(startsg), sba_sg_address(startsg)); startsg++; } } - static void -sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) +sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents) { struct scatterlist *the_sg = startsg; int the_nents = nents; @@ -412,11 +398,9 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ /* Convert from IOVP to IOVA and vice versa. */ -#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | \ - ((hint_reg)<<(ioc->hint_shift_pdir))) +#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir))) #define SBA_IOVP(ioc,iova) (((iova) & ioc->hint_mask_pdir) & ~(ioc->ibase)) -/* FIXME : review these macros to verify correctness and usage */ #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT) #define RESMAP_MASK(n) ~(~0UL << (n)) @@ -424,7 +408,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) /** - * sba_search_bitmap - find free space in IO PDIR resource bitmap + * sba_search_bitmap - find free space in IO Pdir resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. * @bits_wanted: number of entries we need. * @@ -461,7 +445,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) ** We need the alignment to invalidate I/O TLB using ** SBA HW features in the unmap path. */ - unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT); + unsigned long o = 1UL << get_order(bits_wanted << IOVP_SHIFT); uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); unsigned long mask; @@ -507,7 +491,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) /** - * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap + * sba_alloc_range - find free bits and mark them in IO Pdir resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. * @size: number of bytes to create a mapping for * @@ -536,8 +520,7 @@ sba_alloc_range(struct ioc *ioc, size_t size) if (pide >= (ioc->res_size << 3)) { pide = sba_search_bitmap(ioc, pages_needed); if (pide >= (ioc->res_size << 3)) - panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", - ioc->ioc_hpa); + panic(__FILE__ ": I/O MMU @ %lx is out of mapping resources\n", ioc->ioc_hpa); } #ifdef ASSERT_PDIR_SANITY @@ -570,7 +553,7 @@ sba_alloc_range(struct ioc *ioc, size_t size) /** - * sba_free_range - unmark bits in IO PDIR resource bitmap + * sba_free_range - unmark bits in IO Pdir resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. * @iova: IO virtual address which was previously allocated. * @size: number of bytes to create a mapping for @@ -617,14 +600,14 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) /** - * sba_io_pdir_entry - fill in one IO PDIR entry - * @pdir_ptr: pointer to IO PDIR entry - * @vba: Virtual CPU address of buffer to map + * sba_io_pdir_entry - fill in one IO Pdir entry + * @pdir_ptr: pointer to IO Pdir entry + * @phys_page: phys CPU address of page to map * * SBA Mapping Routine * - * Given a virtual address (vba, arg1) sba_io_pdir_entry() - * loads the I/O PDIR entry pointed to by pdir_ptr (arg0). + * Given a physical address (phys_page, arg1) sba_io_pdir_entry() + * loads the I/O Pdir entry pointed to by pdir_ptr (arg0). * Each IO Pdir entry consists of 8 bytes as shown below * (LSB == bit 0): * @@ -636,21 +619,12 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) * V == Valid Bit * U == Unused * PPN == Physical Page Number - * - * The physical address fields are filled with the results of virt_to_phys() - * on the vba. */ -#if 1 -#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \ - | 0x8000000000000000ULL) -#else -void SBA_INLINE -sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba) -{ - *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL); -} -#endif +#define SBA_VALID_MASK 0x80000000000000FFULL +#define sba_io_pdir_entry(pdir_ptr, phys_page) *pdir_ptr = (phys_page | SBA_VALID_MASK) +#define sba_io_page(pdir_ptr) (*pdir_ptr & ~SBA_VALID_MASK) + #ifdef ENABLE_MARK_CLEAN /** @@ -666,7 +640,7 @@ mark_clean (void *addr, size_t size) pg_addr = PAGE_ALIGN((unsigned long) addr); end = (unsigned long) addr + size; while (pg_addr + PAGE_SIZE <= end) { - struct page *page = virt_to_page((void *)pg_addr); + struct page *page = virt_to_page(pg_addr); set_bit(PG_arch_1, &page->flags); pg_addr += PAGE_SIZE; } @@ -674,12 +648,12 @@ mark_clean (void *addr, size_t size) #endif /** - * sba_mark_invalid - invalidate one or more IO PDIR entries + * sba_mark_invalid - invalidate one or more IO Pdir entries * @ioc: IO MMU structure which owns the pdir we are interested in. * @iova: IO Virtual Address mapped earlier * @byte_cnt: number of bytes this mapping covers. * - * Marking the IO PDIR entry(ies) as Invalid and invalidate + * Marking the IO Pdir entry(ies) as Invalid and invalidate * corresponding IO TLB entry. The PCOM (Purge Command Register) * is to purge stale entries in the IO TLB when unmapping entries. * @@ -713,24 +687,15 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ -#ifndef FULL_VALID_PDIR /* - ** clear I/O PDIR entry "valid" bit + ** clear I/O Pdir entry "valid" bit ** Do NOT clear the rest - save it for debugging. ** We should only clear bits that have previously ** been enabled. */ - ioc->pdir_base[off] &= ~(0x80000000000000FFULL); -#else - /* - ** If we want to maintain the PDIR as valid, put in - ** the spill page so devices prefetching won't - ** cause a hard fail. - */ - ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); -#endif + ioc->pdir_base[off] &= ~SBA_VALID_MASK; } else { - u32 t = get_order(byte_cnt) + PAGE_SHIFT; + u32 t = get_order(byte_cnt) + IOVP_SHIFT; iovp |= t; ASSERT(t <= 31); /* 2GB! Max value of "size" field */ @@ -738,18 +703,14 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) do { /* verify this pdir entry is enabled */ ASSERT(ioc->pdir_base[off] >> 63); -#ifndef FULL_VALID_PDIR /* clear I/O Pdir entry "valid" bit first */ - ioc->pdir_base[off] &= ~(0x80000000000000FFULL); -#else - ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); -#endif + ioc->pdir_base[off] &= ~SBA_VALID_MASK; off++; byte_cnt -= IOVP_SIZE; } while (byte_cnt > 0); } - WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM); + WRITE_REG(iovp, ioc->ioc_hpa+IOC_PCOM); } /** @@ -757,23 +718,26 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) * @dev: instance of PCI owned by the driver that's asking. * @addr: driver buffer to map. * @size: number of bytes to map in driver buffer. - * @dir: R/W or both. + * @direction: R/W or both. * * See Documentation/DMA-mapping.txt */ dma_addr_t -sba_map_single(struct device *dev, void *addr, size_t size, int dir) +sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction) { struct ioc *ioc; - unsigned long flags; + unsigned long flags; dma_addr_t iovp; dma_addr_t offset; u64 *pdir_start; int pide; #ifdef ALLOW_IOV_BYPASS - unsigned long pci_addr = virt_to_phys(addr); + unsigned long phys_addr = virt_to_phys(addr); #endif + if (!sba_list) + panic("sba_map_single: no SBA found!\n"); + ioc = GET_IOC(dev); ASSERT(ioc); @@ -781,7 +745,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) /* ** Check if the PCI device can DMA to ptr... if so, just return ptr */ - if (dev && dev->dma_mask && (pci_addr & ~*dev->dma_mask) == 0) { + if ((phys_addr & ~dev->dma_mask) == 0) { /* ** Device is bit capable of DMA'ing to the buffer... ** just return the PCI address of ptr @@ -792,8 +756,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) spin_unlock_irqrestore(&ioc->res_lock, flags); #endif DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n", - *dev->dma_mask, pci_addr); - return pci_addr; + dev->dma_mask, phys_addr); + return phys_addr; } #endif @@ -826,7 +790,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) while (size > 0) { ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */ - sba_io_pdir_entry(pdir_start, (unsigned long) addr); + + sba_io_pdir_entry(pdir_start, virt_to_phys(addr)); DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start); @@ -834,15 +799,12 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) size -= IOVP_SIZE; pdir_start++; } - /* force pdir update */ - wmb(); - /* form complete address */ #ifdef ASSERT_PDIR_SANITY sba_check_pdir(ioc,"Check after sba_map_single()"); #endif spin_unlock_irqrestore(&ioc->res_lock, flags); - return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG); + return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG(direction)); } /** @@ -850,19 +812,23 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) * @dev: instance of PCI owned by the driver that's asking. * @iova: IOVA of driver buffer previously mapped. * @size: number of bytes mapped in driver buffer. - * @dir: R/W or both. + * @direction: R/W or both. * * See Documentation/DMA-mapping.txt */ -void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) +void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, + int direction) { struct ioc *ioc; #if DELAYED_RESOURCE_CNT > 0 struct sba_dma_pair *d; #endif - unsigned long flags; + unsigned long flags; dma_addr_t offset; + if (!sba_list) + panic("sba_map_single: no SBA found!\n"); + ioc = GET_IOC(dev); ASSERT(ioc); @@ -879,7 +845,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova); #ifdef ENABLE_MARK_CLEAN - if (dir == DMA_FROM_DEVICE) { + if (direction == PCI_DMA_FROMDEVICE) { mark_clean(phys_to_virt(iova), size); } #endif @@ -895,6 +861,29 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) size += offset; size = ROUNDUP(size, IOVP_SIZE); +#ifdef ENABLE_MARK_CLEAN + /* + ** Don't need to hold the spinlock while telling VM pages are "clean". + ** The pages are "busy" in the resource map until we mark them free. + ** But tell VM pages are clean *before* releasing the resource + ** in order to avoid race conditions. + */ + if (direction == PCI_DMA_FROMDEVICE) { + u32 iovp = (u32) SBA_IOVP(ioc,iova); + unsigned int pide = PDIR_INDEX(iovp); + u64 *pdirp = &(ioc->pdir_base[pide]); + size_t byte_cnt = size; + void *addr; + + do { + addr = phys_to_virt(sba_io_page(pdirp)); + mark_clean(addr, min(byte_cnt, IOVP_SIZE)); + pdirp++; + byte_cnt -= IOVP_SIZE; + } while (byte_cnt > 0); + } +#endif + spin_lock_irqsave(&ioc->res_lock, flags); #ifdef CONFIG_PROC_FS ioc->usingle_calls++; @@ -920,40 +909,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) sba_free_range(ioc, iova, size); READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ #endif /* DELAYED_RESOURCE_CNT == 0 */ -#ifdef ENABLE_MARK_CLEAN - if (dir == DMA_FROM_DEVICE) { - u32 iovp = (u32) SBA_IOVP(ioc,iova); - int off = PDIR_INDEX(iovp); - void *addr; - - if (size <= IOVP_SIZE) { - addr = phys_to_virt(ioc->pdir_base[off] & - ~0xE000000000000FFFULL); - mark_clean(addr, size); - } else { - size_t byte_cnt = size; - - do { - addr = phys_to_virt(ioc->pdir_base[off] & - ~0xE000000000000FFFULL); - mark_clean(addr, min(byte_cnt, IOVP_SIZE)); - off++; - byte_cnt -= IOVP_SIZE; - - } while (byte_cnt > 0); - } - } -#endif spin_unlock_irqrestore(&ioc->res_lock, flags); - - /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support. - ** For Astro based systems this isn't a big deal WRT performance. - ** As long as 2.4 kernels copyin/copyout data from/to userspace, - ** we don't need the syncdma. The issue here is I/O MMU cachelines - ** are *not* coherent in all cases. May be hwrev dependent. - ** Need to investigate more. - asm volatile("syncdma"); - */ } @@ -966,25 +922,29 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) * See Documentation/DMA-mapping.txt */ void * -sba_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, int flags) +sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { - struct ioc *ioc; - void *addr; + void *ret; - addr = (void *) __get_free_pages(flags, get_order(size)); - if (!addr) - return NULL; + if (!hwdev) { + /* only support PCI */ + *dma_handle = 0; + return 0; + } - /* - * REVISIT: if sba_map_single starts needing more than dma_mask from the - * device, this needs to be updated. - */ - ioc = GET_IOC(hwdev); - ASSERT(ioc); - *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0); + ret = (void *) __get_free_pages(GFP_ATOMIC, get_order(size)); - memset(addr, 0, size); - return addr; + if (ret) { + memset(ret, 0, size); + /* + * REVISIT: if sba_map_single starts needing more + * than dma_mask from the device, this needs to be + * updated. + */ + *dma_handle = sba_map_single(&sac_only_dev, ret, size, 0); + } + + return ret; } @@ -997,245 +957,117 @@ sba_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, i * * See Documentation/DMA-mapping.txt */ -void sba_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) +void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle) { sba_unmap_single(hwdev, dma_handle, size, 0); free_pages((unsigned long) vaddr, get_order(size)); } -/* -** Since 0 is a valid pdir_base index value, can't use that -** to determine if a value is valid or not. Use a flag to indicate -** the SG list entry contains a valid pdir index. -*/ -#define PIDE_FLAG 0x1UL - #ifdef DEBUG_LARGE_SG_ENTRIES int dump_run_sg = 0; #endif - -/** - * sba_fill_pdir - write allocated SG entries into IO PDIR - * @ioc: IO MMU structure which owns the pdir we are interested in. - * @startsg: list of IOVA/size pairs - * @nents: number of entries in startsg list - * - * Take preprocessed SG list and write corresponding entries - * in the IO PDIR. - */ - -static SBA_INLINE int -sba_fill_pdir( - struct ioc *ioc, - struct scatterlist *startsg, - int nents) -{ - struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ - int n_mappings = 0; - u64 *pdirp = 0; - unsigned long dma_offset = 0; - - dma_sg--; - while (nents-- > 0) { - int cnt = startsg->dma_length; - startsg->dma_length = 0; - -#ifdef DEBUG_LARGE_SG_ENTRIES - if (dump_run_sg) - printk(" %2d : %08lx/%05x %p\n", - nents, startsg->dma_address, cnt, - sba_sg_address(startsg)); -#else - DBG_RUN_SG(" %d : %08lx/%05x %p\n", - nents, startsg->dma_address, cnt, - sba_sg_address(startsg)); -#endif - /* - ** Look for the start of a new DMA stream - */ - if (startsg->dma_address & PIDE_FLAG) { - u32 pide = startsg->dma_address & ~PIDE_FLAG; - dma_offset = (unsigned long) pide & ~IOVP_MASK; - startsg->dma_address = 0; - dma_sg++; - dma_sg->dma_address = pide | ioc->ibase; - pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]); - n_mappings++; - } - - /* - ** Look for a VCONTIG chunk - */ - if (cnt) { - unsigned long vaddr = (unsigned long) sba_sg_address(startsg); - ASSERT(pdirp); - - /* Since multiple Vcontig blocks could make up - ** one DMA stream, *add* cnt to dma_len. - */ - dma_sg->dma_length += cnt; - cnt += dma_offset; - dma_offset=0; /* only want offset on first chunk */ - cnt = ROUNDUP(cnt, IOVP_SIZE); -#ifdef CONFIG_PROC_FS - ioc->msg_pages += cnt >> IOVP_SHIFT; -#endif - do { - sba_io_pdir_entry(pdirp, vaddr); - vaddr += IOVP_SIZE; - cnt -= IOVP_SIZE; - pdirp++; - } while (cnt > 0); - } - startsg++; - } - /* force pdir update */ - wmb(); - -#ifdef DEBUG_LARGE_SG_ENTRIES - dump_run_sg = 0; -#endif - return(n_mappings); -} - - -/* -** Two address ranges are DMA contiguous *iff* "end of prev" and -** "start of next" are both on a page boundry. -** -** (shift left is a quick trick to mask off upper bits) -*/ -#define DMA_CONTIG(__X, __Y) \ - (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL) +#define SG_ENT_VIRT_PAGE(sg) page_address((sg)->page) +#define SG_ENT_PHYS_PAGE(SG) virt_to_phys(SG_ENT_VIRT_PAGE(SG)) /** * sba_coalesce_chunks - preprocess the SG list * @ioc: IO MMU structure which owns the pdir we are interested in. - * @startsg: list of IOVA/size pairs + * @startsg: input=SG list output=DMA addr/len pairs filled in * @nents: number of entries in startsg list + * @direction: R/W or both. * - * First pass is to walk the SG list and determine where the breaks are - * in the DMA stream. Allocates PDIR entries but does not fill them. - * Returns the number of DMA chunks. + * Walk the SG list and determine where the breaks are in the DMA stream. + * Allocate IO Pdir resources and fill them in separate loop. + * Returns the number of DMA streams used for output IOVA list. + * Note each DMA stream can consume multiple IO Pdir entries. * - * Doing the fill seperate from the coalescing/allocation keeps the - * code simpler. Future enhancement could make one pass through - * the sglist do both. + * Code is written assuming some coalescing is possible. */ static SBA_INLINE int -sba_coalesce_chunks( struct ioc *ioc, - struct scatterlist *startsg, - int nents) +sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, + int nents, int direction) { - struct scatterlist *vcontig_sg; /* VCONTIG chunk head */ - unsigned long vcontig_len; /* len of VCONTIG chunk */ - unsigned long vcontig_end; - struct scatterlist *dma_sg; /* next DMA stream head */ - unsigned long dma_offset, dma_len; /* start/len of DMA stream */ + struct scatterlist *dma_sg = startsg; /* return array */ int n_mappings = 0; - while (nents > 0) { - unsigned long vaddr = (unsigned long) sba_sg_address(startsg); + ASSERT(nents > 1); + + do { + unsigned int dma_cnt = 1; /* number of pages in DMA stream */ + unsigned int pide; /* index into IO Pdir array */ + u64 *pdirp; /* pointer into IO Pdir array */ + unsigned long dma_offset, dma_len; /* cumulative DMA stream */ /* ** Prepare for first/next DMA stream */ - dma_sg = vcontig_sg = startsg; - dma_len = vcontig_len = vcontig_end = startsg->length; - vcontig_end += vaddr; - dma_offset = vaddr & ~IOVP_MASK; - - /* PARANOID: clear entries */ - startsg->dma_address = startsg->dma_length = 0; + dma_len = sba_sg_len(startsg); + dma_offset = (unsigned long) sba_sg_address(startsg); + startsg++; + nents--; /* - ** This loop terminates one iteration "early" since - ** it's always looking one "ahead". + ** We want to know how many entries can be coalesced + ** before trying to allocate IO Pdir space. + ** IOVAs can then be allocated "naturally" aligned + ** to take advantage of the block IO TLB flush. */ - while (--nents > 0) { - unsigned long vaddr; /* tmp */ - - startsg++; - - /* PARANOID */ - startsg->dma_address = startsg->dma_length = 0; + while (nents) { + unsigned long end_offset = dma_offset + dma_len; - /* catch brokenness in SCSI layer */ - ASSERT(startsg->length <= DMA_CHUNK_SIZE); + /* prev entry must end on a page boundary */ + if (end_offset & IOVP_MASK) + break; - /* - ** First make sure current dma stream won't - ** exceed DMA_CHUNK_SIZE if we coalesce the - ** next entry. - */ - if (((dma_len + dma_offset + startsg->length + ~IOVP_MASK) & IOVP_MASK) - > DMA_CHUNK_SIZE) + /* next entry start on a page boundary? */ + if (startsg->offset) break; /* - ** Then look for virtually contiguous blocks. - ** - ** append the next transaction? + ** make sure current dma stream won't exceed + ** DMA_CHUNK_SIZE if coalescing entries. */ - vaddr = (unsigned long) sba_sg_address(startsg); - if (vcontig_end == vaddr) - { - vcontig_len += startsg->length; - vcontig_end += startsg->length; - dma_len += startsg->length; - continue; - } + if (((end_offset + startsg->length + ~IOVP_MASK) + & IOVP_MASK) + > DMA_CHUNK_SIZE) + break; -#ifdef DEBUG_LARGE_SG_ENTRIES - dump_run_sg = (vcontig_len > IOVP_SIZE); -#endif + dma_len += sba_sg_len(startsg); + startsg++; + nents--; + dma_cnt++; + } - /* - ** Not virtually contigous. - ** Terminate prev chunk. - ** Start a new chunk. - ** - ** Once we start a new VCONTIG chunk, dma_offset - ** can't change. And we need the offset from the first - ** chunk - not the last one. Ergo Successive chunks - ** must start on page boundaries and dove tail - ** with it's predecessor. - */ - vcontig_sg->dma_length = vcontig_len; + ASSERT(dma_len <= DMA_CHUNK_SIZE); - vcontig_sg = startsg; - vcontig_len = startsg->length; + /* allocate IO Pdir resource. + ** returns index into (u64) IO Pdir array. + ** IOVA is formed from this. + */ + pide = sba_alloc_range(ioc, dma_cnt << IOVP_SHIFT); + pdirp = &(ioc->pdir_base[pide]); - /* - ** 3) do the entries end/start on page boundaries? - ** Don't update vcontig_end until we've checked. - */ - if (DMA_CONTIG(vcontig_end, vaddr)) - { - vcontig_end = vcontig_len + vaddr; - dma_len += vcontig_len; - continue; - } else { - break; - } + /* fill_pdir: write stream into IO Pdir */ + while (dma_cnt--) { + sba_io_pdir_entry(pdirp, SG_ENT_PHYS_PAGE(startsg)); + startsg++; + pdirp++; } - /* - ** End of DMA Stream - ** Terminate last VCONTIG block. - ** Allocate space for DMA stream. - */ - vcontig_sg->dma_length = vcontig_len; - dma_len = (dma_len + dma_offset + ~IOVP_MASK) & IOVP_MASK; - ASSERT(dma_len <= DMA_CHUNK_SIZE); - dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG - | (sba_alloc_range(ioc, dma_len) << IOVP_SHIFT) - | dma_offset); + /* "output" IOVA */ + sba_sg_iova(dma_sg) = SBA_IOVA(ioc, + ((dma_addr_t) pide << IOVP_SHIFT), + dma_offset, + DEFAULT_DMA_HINT_REG(direction)); + sba_sg_iova_len(dma_sg) = dma_len; + + dma_sg++; n_mappings++; - } + } while (nents); return n_mappings; } @@ -1243,51 +1075,60 @@ sba_coalesce_chunks( struct ioc *ioc, /** * sba_map_sg - map Scatter/Gather list - * @dev: instance of PCI owned by the driver that's asking. + * @dev: instance of PCI device owned by the driver that's asking. * @sglist: array of buffer/length pairs * @nents: number of entries in list - * @dir: R/W or both. + * @direction: R/W or both. * * See Documentation/DMA-mapping.txt */ -int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir) +int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, + int direction) { struct ioc *ioc; - int coalesced, filled = 0; + int filled = 0; unsigned long flags; #ifdef ALLOW_IOV_BYPASS struct scatterlist *sg; #endif - DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); + DBG_RUN_SG("%s() START %d entries, 0x%p,0x%x\n", __FUNCTION__, nents, + sba_sg_address(sglist), sba_sg_len(sglist)); + + if (!sba_list) + panic("sba_map_single: no SBA found!\n"); + ioc = GET_IOC(dev); ASSERT(ioc); #ifdef ALLOW_IOV_BYPASS - if (dev && dev->dma_mask && (ioc->dma_mask & ~*dev->dma_mask) == 0) { - for (sg = sglist ; filled < nents ; filled++, sg++){ - sg->dma_length = sg->length; - sg->dma_address = virt_to_phys(sba_sg_address(sg)); + if (dev->dma_mask >= ioc->dma_mask) { + for (sg = sglist ; filled < nents ; filled++, sg++) { + sba_sg_iova(sg) = virt_to_phys(sba_sg_address(sg)); + sba_sg_iova_len(sg) = sba_sg_len(sg); } #ifdef CONFIG_PROC_FS spin_lock_irqsave(&ioc->res_lock, flags); ioc->msg_bypass++; spin_unlock_irqrestore(&ioc->res_lock, flags); #endif + DBG_RUN_SG("%s() DONE %d mappings bypassed\n", __FUNCTION__, filled); return filled; } #endif /* Fast path single entry scatterlists. */ if (nents == 1) { - sglist->dma_length = sglist->length; - sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, - dir); + sba_sg_iova(sglist) = sba_map_single(dev, + (void *) sba_sg_iova(sglist), + sba_sg_len(sglist), direction); + sba_sg_iova_len(sglist) = sba_sg_len(sglist); #ifdef CONFIG_PROC_FS /* ** Should probably do some stats counting, but trying to ** be precise quickly starts wasting CPU time. */ #endif + DBG_RUN_SG("%s() DONE 1 mapping\n", __FUNCTION__); return 1; } @@ -1304,26 +1145,11 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di #ifdef CONFIG_PROC_FS ioc->msg_calls++; #endif - + /* - ** First coalesce the chunks and allocate I/O pdir space - ** - ** If this is one DMA stream, we can properly map using the - ** correct virtual address associated with each DMA page. - ** w/o this association, we wouldn't have coherent DMA! - ** Access to the virtual address is what forces a two pass algorithm. + ** coalesce and program the I/O Pdir */ - coalesced = sba_coalesce_chunks(ioc, sglist, nents); - - /* - ** Program the I/O Pdir - ** - ** map the virtual addresses to the I/O Pdir - ** o dma_address will contain the pdir index - ** o dma_len will contain the number of bytes to map - ** o address contains the virtual address. - */ - filled = sba_fill_pdir(ioc, sglist, nents); + filled = sba_coalesce_chunks(ioc, sglist, nents, direction); #ifdef ASSERT_PDIR_SANITY if (sba_check_pdir(ioc,"Check after sba_map_sg()")) @@ -1335,7 +1161,6 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di spin_unlock_irqrestore(&ioc->res_lock, flags); - ASSERT(coalesced == filled); DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); return filled; @@ -1347,19 +1172,23 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di * @dev: instance of PCI owned by the driver that's asking. * @sglist: array of buffer/length pairs * @nents: number of entries in list - * @dir: R/W or both. + * @direction: R/W or both. * * See Documentation/DMA-mapping.txt */ -void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) +void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, + int direction) { struct ioc *ioc; #ifdef ASSERT_PDIR_SANITY unsigned long flags; #endif - DBG_RUN_SG("%s() START %d entries, %p,%x\n", - __FUNCTION__, nents, sba_sg_address(sglist), sglist->length); + DBG_RUN_SG("%s() START %d entries, 0x%p,0x%x\n", + __FUNCTION__, nents, sba_sg_address(sglist), sba_sg_len(sglist)); + + if (!sba_list) + panic("sba_map_single: no SBA found!\n"); ioc = GET_IOC(dev); ASSERT(ioc); @@ -1374,9 +1203,10 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in spin_unlock_irqrestore(&ioc->res_lock, flags); #endif - while (nents && sglist->dma_length) { + while (sba_sg_len(sglist) && nents--) { - sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); + sba_unmap_single(dev, (dma_addr_t)sba_sg_iova(sglist), + sba_sg_iova_len(sglist), direction); #ifdef CONFIG_PROC_FS /* ** This leaves inconsistent data in the stats, but we can't @@ -1384,11 +1214,9 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in ** were coalesced to a single entry. The stats are fun, ** but speed is more important. */ - ioc->usg_pages += ((sglist->dma_address & ~IOVP_MASK) + sglist->dma_length - + IOVP_SIZE - 1) >> PAGE_SHIFT; + ioc->usg_pages += (((u64)sba_sg_iova(sglist) & ~IOVP_MASK) + sba_sg_len(sglist) + IOVP_SIZE - 1) >> IOVP_SHIFT; #endif - sglist++; - nents--; + ++sglist; } DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); @@ -1401,76 +1229,87 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in } +unsigned long +sba_dma_address (struct scatterlist *sg) +{ + return ((unsigned long)sba_sg_iova(sg)); +} + +int +sba_dma_supported (struct pci_dev *dev, u64 mask) +{ + return 1; +} + /************************************************************** * * Initialization and claim * ***************************************************************/ -static void __init -ioc_iova_init(struct ioc *ioc) + +static void +sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num) { - u32 iova_space_mask; - int iov_order, tcnfg; - int agp_found = 0; - struct pci_dev *device; -#ifdef FULL_VALID_PDIR - unsigned long index; -#endif + u32 iova_space_size, iova_space_mask; + void * pdir_base; + int pdir_size, iov_order, tcnfg; /* - ** Firmware programs the base and size of a "safe IOVA space" - ** (one that doesn't overlap memory or LMMIO space) in the - ** IBASE and IMASK registers. + ** Firmware programs the maximum IOV space size into the imask reg */ - ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL; - ioc->iov_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; + iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; /* ** iov_order is always based on a 1GB IOVA space since we want to ** turn on the other half for AGP GART. */ - iov_order = get_order(ioc->iov_size >> (IOVP_SHIFT - PAGE_SHIFT)); - ioc->pdir_size = (ioc->iov_size / IOVP_SIZE) * sizeof(u64); + iov_order = get_order(iova_space_size >> (IOVP_SHIFT-PAGE_SHIFT)); + ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); - DBG_INIT("%s() hpa %p IOV %dMB (%d bits) PDIR size 0x%x\n", - __FUNCTION__, ioc->ioc_hpa, ioc->iov_size >> 20, - iov_order + PAGE_SHIFT, ioc->pdir_size); + DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits) PDIR size 0x%0x\n", + __FUNCTION__, ioc->ioc_hpa, iova_space_size>>20, + iov_order + PAGE_SHIFT, ioc->pdir_size); - /* FIXME : DMA HINTs not used */ + /* XXX DMA HINTs not used */ ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); - ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, - get_order(ioc->pdir_size)); - if (!ioc->pdir_base) - panic(PFX "Couldn't allocate I/O Page Table\n"); - - memset(ioc->pdir_base, 0, ioc->pdir_size); + ioc->pdir_base = pdir_base = + (void *) __get_free_pages(GFP_KERNEL, get_order(pdir_size)); + if (NULL == pdir_base) + { + panic(__FILE__ ":%s() could not allocate I/O Page Table\n", __FUNCTION__); + } + memset(pdir_base, 0, pdir_size); DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n", - __FUNCTION__, ioc->pdir_base, ioc->pdir_size, + __FUNCTION__, pdir_base, pdir_size, ioc->hint_shift_pdir, ioc->hint_mask_pdir); - ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base); - WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); + ASSERT((((unsigned long) pdir_base) & PAGE_MASK) == (unsigned long) pdir_base); + WRITE_REG(virt_to_phys(pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); - DBG_INIT(" base %p\n", ioc->pdir_base); + DBG_INIT(" base %p\n", pdir_base); /* build IMASK for IOC and Elroy */ iova_space_mask = 0xffffffff; - iova_space_mask <<= (iov_order + PAGE_SHIFT); - ioc->imask = iova_space_mask; + iova_space_mask <<= (iov_order + IOVP_SHIFT); + + ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & 0xFFFFFFFEUL; + + ioc->imask = iova_space_mask; /* save it */ DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", __FUNCTION__, ioc->ibase, ioc->imask); /* - ** FIXME: Hint registers are programmed with default hint + ** XXX DMA HINT registers are programmed with default hint ** values during boot, so hints should be sane even if we ** can't reprogram them the way drivers want. */ - WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK); + + WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK); /* ** Setting the upper bits makes checking for bypass addresses @@ -1478,30 +1317,34 @@ ioc_iova_init(struct ioc *ioc) */ ioc->imask |= 0xFFFFFFFF00000000UL; - /* Set I/O PDIR Page size to system page size */ - switch (PAGE_SHIFT) { - case 12: tcnfg = 0; break; /* 4K */ - case 13: tcnfg = 1; break; /* 8K */ - case 14: tcnfg = 2; break; /* 16K */ - case 16: tcnfg = 3; break; /* 64K */ - default: - panic(PFX "Unsupported system page size %d", - 1 << PAGE_SHIFT); + /* Set I/O Pdir page size to system page size */ + switch (IOVP_SHIFT) { + case 12: /* 4K */ + tcnfg = 0; + break; + case 13: /* 8K */ + tcnfg = 1; + break; + case 14: /* 16K */ + tcnfg = 2; + break; + case 16: /* 64K */ + tcnfg = 3; break; } - WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); + WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG); /* ** Program the IOC's ibase and enable IOVA translation ** Bit zero == enable bit. */ - WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); + WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE); /* ** Clear I/O TLB of any possible entries. ** (Yes. This is a bit paranoid...but so what) */ - WRITE_REG(ioc->ibase | (iov_order+PAGE_SHIFT), ioc->ioc_hpa + IOC_PCOM); + WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM); /* ** If an AGP device is present, only use half of the IOV space @@ -1511,468 +1354,346 @@ ioc_iova_init(struct ioc *ioc) ** We program the next pdir index after we stop w/ a key for ** the GART code to handshake on. */ - pci_for_each_dev(device) - agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP); - - if (agp_found && reserve_sba_gart) { - DBG_INIT("%s: AGP device found, reserving half of IOVA for GART support\n", - __FUNCTION__); + if (SBA_GET_AGP(sba_dev)) { + DBG_INIT("%s() AGP Device found, reserving 512MB for GART support\n", __FUNCTION__); ioc->pdir_size /= 2; - ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE; + ((u64 *)pdir_base)[PDIR_INDEX(iova_space_size/2)] = 0x0000badbadc0ffeeULL; } -#ifdef FULL_VALID_PDIR - /* - ** Check to see if the spill page has been allocated, we don't need more than - ** one across multiple SBAs. - */ - if (!prefetch_spill_page) { - char *spill_poison = "SBAIOMMU POISON"; - int poison_size = 16; - void *poison_addr, *addr; - - addr = (void *)__get_free_pages(GFP_KERNEL, get_order(IOVP_SIZE)); - if (!addr) - panic(PFX "Couldn't allocate PDIR spill page\n"); - - poison_addr = addr; - for ( ; (u64) poison_addr < addr + IOVP_SIZE; poison_addr += poison_size) - memcpy(poison_addr, spill_poison, poison_size); - - prefetch_spill_page = virt_to_phys(addr); - - DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page); - } - /* - ** Set all the PDIR entries valid w/ the spill page as the target - */ - for (index = 0 ; index < (ioc->pdir_size / sizeof(u64)) ; index++) - ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page); -#endif + DBG_INIT("%s() DONE\n", __FUNCTION__); } -static void __init -ioc_resource_init(struct ioc *ioc) -{ - spin_lock_init(&ioc->res_lock); - - /* resource map size dictated by pdir_size */ - ioc->res_size = ioc->pdir_size / sizeof(u64); /* entries */ - ioc->res_size >>= 3; /* convert bit count to byte count */ - DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); - ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, - get_order(ioc->res_size)); - if (!ioc->res_map) - panic(PFX "Couldn't allocate resource map\n"); - memset(ioc->res_map, 0, ioc->res_size); - /* next available IOVP - circular search */ - ioc->res_hint = (unsigned long *) ioc->res_map; +/************************************************************************** +** +** SBA initialization code (HW and SW) +** +** o identify SBA chip itself +** o FIXME: initialize DMA hints for reasonable defaults +** +**************************************************************************/ -#ifdef ASSERT_PDIR_SANITY - /* Mark first bit busy - ie no IOVA 0 */ - ioc->res_map[0] = 0x1; - ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE; -#endif -#ifdef FULL_VALID_PDIR - /* Mark the last resource used so we don't prefetch beyond IOVA space */ - ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */ - ioc->pdir_base[(ioc->pdir_size / sizeof(u64)) - 1] = (0x80000000000000FF - | prefetch_spill_page); -#endif +static void +sba_hw_init(struct sba_device *sba_dev) +{ + int i; + int num_ioc; + u64 dma_mask; + u32 func_id; - DBG_INIT("%s() res_map %x %p\n", __FUNCTION__, - ioc->res_size, (void *) ioc->res_map); -} + /* + ** Identify the SBA so we can set the dma_mask. We can make a virtual + ** dma_mask of the memory subsystem such that devices not implmenting + ** a full 64bit mask might still be able to bypass efficiently. + */ + func_id = READ_REG(sba_dev->sba_hpa + SBA_FUNC_ID); -static void __init -ioc_sac_init(struct ioc *ioc) -{ - struct pci_dev *sac = NULL; - struct pci_controller *controller = NULL; + if (func_id == ZX1_FUNC_ID_VALUE) { + dma_mask = 0xFFFFFFFFFFUL; + } else { + dma_mask = 0xFFFFFFFFFFFFFFFFUL; + } + DBG_INIT("%s(): ioc->dma_mask == 0x%lx\n", __FUNCTION__, dma_mask); + /* - * pci_alloc_coherent() must return a DMA address which is - * SAC (single address cycle) addressable, so allocate a - * pseudo-device to enforce that. - */ - sac = kmalloc(sizeof(*sac), GFP_KERNEL); - if (!sac) - panic(PFX "Couldn't allocate struct pci_dev"); - memset(sac, 0, sizeof(*sac)); - - controller = kmalloc(sizeof(*controller), GFP_KERNEL); - if (!controller) - panic(PFX "Couldn't allocate struct pci_controller"); - memset(controller, 0, sizeof(*controller)); - - controller->iommu = ioc; - sac->sysdata = controller; - sac->dma_mask = 0xFFFFFFFFUL; -#ifdef CONFIG_PCI - sac->dev.bus = &pci_bus_type; -#endif - ioc->sac_only_dev = sac; + ** Leaving in the multiple ioc code from parisc for the future, + ** currently there are no muli-ioc mckinley sbas + */ + sba_dev->ioc[0].ioc_hpa = SBA_IOC_OFFSET; + num_ioc = 1; + + sba_dev->num_ioc = num_ioc; + for (i = 0; i < num_ioc; i++) { + sba_dev->ioc[i].dma_mask = dma_mask; + sba_dev->ioc[i].ioc_hpa += sba_dev->sba_hpa; + sba_ioc_init(sba_dev, &(sba_dev->ioc[i]), i); + } } -static void __init -ioc_zx1_init(struct ioc *ioc) +static void +sba_common_init(struct sba_device *sba_dev) { - if (ioc->rev < 0x20) - panic(PFX "IOC 2.0 or later required for IOMMU support\n"); + int i; - ioc->dma_mask = 0xFFFFFFFFFFUL; -} + /* add this one to the head of the list (order doesn't matter) + ** This will be useful for debugging - especially if we get coredumps + */ + sba_dev->next = sba_list; + sba_list = sba_dev; + sba_count++; -typedef void (initfunc)(struct ioc *); + for(i=0; i< sba_dev->num_ioc; i++) { + int res_size; -struct ioc_iommu { - u32 func_id; - char *name; - initfunc *init; -}; + /* resource map size dictated by pdir_size */ + res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ + res_size >>= 3; /* convert bit count to byte count */ + DBG_INIT("%s() res_size 0x%x\n", + __FUNCTION__, res_size); -static struct ioc_iommu ioc_iommu_info[] __initdata = { - { ZX1_IOC_ID, "zx1", ioc_zx1_init }, - { REO_IOC_ID, "REO" }, - { SX1000_IOC_ID, "sx1000" }, -}; + sba_dev->ioc[i].res_size = res_size; + sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); -static struct ioc * __init -ioc_init(u64 hpa, void *handle) -{ - struct ioc *ioc; - struct ioc_iommu *info; + if (NULL == sba_dev->ioc[i].res_map) + { + panic(__FILE__ ":%s() could not allocate resource map\n", __FUNCTION__ ); + } - ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); - if (!ioc) - return NULL; + memset(sba_dev->ioc[i].res_map, 0, res_size); + /* next available IOVP - circular search */ + if ((sba_dev->hw_rev & 0xFF) >= 0x20) { + sba_dev->ioc[i].res_hint = (unsigned long *) + sba_dev->ioc[i].res_map; + } else { + u64 reserved_iov; - memset(ioc, 0, sizeof(*ioc)); + /* Yet another 1.x hack */ + printk(KERN_DEBUG "zx1 1.x: Starting resource hint offset into " + "IOV space to avoid initial zero value IOVA\n"); + sba_dev->ioc[i].res_hint = (unsigned long *) + &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]); - ioc->next = ioc_list; - ioc_list = ioc; + sba_dev->ioc[i].res_map[0] = 0x1; + sba_dev->ioc[i].pdir_base[0] = 0x8000badbadc0ffeeULL; - ioc->handle = handle; - ioc->ioc_hpa = ioremap(hpa, 0x1000); + for (reserved_iov = 0xA0000 ; reserved_iov < 0xC0000 ; reserved_iov += IOVP_SIZE) { + u64 *res_ptr = (u64 *) sba_dev->ioc[i].res_map; + int index = PDIR_INDEX(reserved_iov); + int res_word; + u64 mask; - ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID); - ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL; - ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */ + res_word = (int)(index / BITS_PER_LONG); + mask = 0x1UL << (index - (res_word * BITS_PER_LONG)); + res_ptr[res_word] |= mask; + sba_dev->ioc[i].pdir_base[PDIR_INDEX(reserved_iov)] = (SBA_VALID_MASK | reserved_iov); - for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) { - if (ioc->func_id == info->func_id) { - ioc->name = info->name; - if (info->init) - (info->init)(ioc); + } } - } - - if (!ioc->name) { - ioc->name = kmalloc(24, GFP_KERNEL); - if (ioc->name) - sprintf((char *) ioc->name, "Unknown (%04x:%04x)", - ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF); - else - ioc->name = "Unknown"; - } - ioc_iova_init(ioc); - ioc_resource_init(ioc); - ioc_sac_init(ioc); +#ifdef ASSERT_PDIR_SANITY + /* Mark first bit busy - ie no IOVA 0 */ + sba_dev->ioc[i].res_map[0] = 0x1; + sba_dev->ioc[i].pdir_base[0] = 0x8000badbadc0ffeeULL; +#endif - printk(KERN_INFO PFX - "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", - ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, - hpa, ioc->iov_size >> 20, ioc->ibase); + DBG_INIT("%s() %d res_map %x %p\n", __FUNCTION__, + i, res_size, (void *)sba_dev->ioc[i].res_map); + } - return ioc; + sba_dev->sba_lock = SPIN_LOCK_UNLOCKED; } - - -/************************************************************************** -** -** SBA initialization code (HW and SW) -** -** o identify SBA chip itself -** o FIXME: initialize DMA hints for reasonable defaults -** -**************************************************************************/ - #ifdef CONFIG_PROC_FS -static void * -ioc_start(struct seq_file *s, loff_t *pos) +static int sba_proc_info(char *buf, char **start, off_t offset, int len) { + struct sba_device *sba_dev; struct ioc *ioc; - loff_t n = *pos; - - for (ioc = ioc_list; ioc; ioc = ioc->next) - if (!n--) - return ioc; - - return NULL; -} - -static void * -ioc_next(struct seq_file *s, void *v, loff_t *pos) -{ - struct ioc *ioc = v; - - ++*pos; - return ioc->next; -} - -static void -ioc_stop(struct seq_file *s, void *v) -{ -} - -static int -ioc_show(struct seq_file *s, void *v) -{ - struct ioc *ioc = v; - int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ + int total_pages; unsigned long i = 0, avg = 0, min, max; - seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n", - ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF)); - seq_printf(s, "IO PDIR size : %d bytes (%d entries)\n", - (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ - total_pages); + for (sba_dev = sba_list; sba_dev; sba_dev = sba_dev->next) { + ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ + total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ - seq_printf(s, "IO PDIR entries : %ld free %ld used (%d%%)\n", - total_pages - ioc->used_pages, ioc->used_pages, - (int) (ioc->used_pages * 100 / total_pages)); + sprintf(buf, "%s rev %d.%d\n", "Hewlett-Packard zx1 SBA", + ((sba_dev->hw_rev >> 4) & 0xF), (sba_dev->hw_rev & 0xF)); + sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", buf, + (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ total_pages); - seq_printf(s, "Resource bitmap : %d bytes (%d pages)\n", - ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ + sprintf(buf, "%sIO PDIR entries : %ld free %ld used (%d%%)\n", buf, + total_pages - ioc->used_pages, ioc->used_pages, + (int) (ioc->used_pages * 100 / total_pages)); - min = max = ioc->avg_search[0]; - for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { - avg += ioc->avg_search[i]; - if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; - if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; - } - avg /= SBA_SEARCH_SAMPLE; - seq_printf(s, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", min, avg, max); + sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n", + buf, ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ - seq_printf(s, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", - ioc->msingle_calls, ioc->msingle_pages, - (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls)); + min = max = ioc->avg_search[0]; + for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { + avg += ioc->avg_search[i]; + if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; + if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; + } + avg /= SBA_SEARCH_SAMPLE; + sprintf(buf, "%s Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", + buf, min, avg, max); + + sprintf(buf, "%spci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", + buf, ioc->msingle_calls, ioc->msingle_pages, + (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls)); #ifdef ALLOW_IOV_BYPASS - seq_printf(s, "pci_map_single(): %12ld bypasses\n", ioc->msingle_bypass); + sprintf(buf, "%spci_map_single(): %12ld bypasses\n", + buf, ioc->msingle_bypass); #endif - seq_printf(s, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", - ioc->usingle_calls, ioc->usingle_pages, - (int) ((ioc->usingle_pages * 1000)/ioc->usingle_calls)); + sprintf(buf, "%spci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", + buf, ioc->usingle_calls, ioc->usingle_pages, + (int) ((ioc->usingle_pages * 1000)/ioc->usingle_calls)); #ifdef ALLOW_IOV_BYPASS - seq_printf(s, "pci_unmap_single: %12ld bypasses\n", ioc->usingle_bypass); + sprintf(buf, "%spci_unmap_single: %12ld bypasses\n", + buf, ioc->usingle_bypass); #endif - seq_printf(s, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", - ioc->msg_calls, ioc->msg_pages, - (int) ((ioc->msg_pages * 1000)/ioc->msg_calls)); + sprintf(buf, "%spci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", + buf, ioc->msg_calls, ioc->msg_pages, + (int) ((ioc->msg_pages * 1000)/ioc->msg_calls)); #ifdef ALLOW_IOV_BYPASS - seq_printf(s, "pci_map_sg() : %12ld bypasses\n", ioc->msg_bypass); + sprintf(buf, "%spci_map_sg() : %12ld bypasses\n", + buf, ioc->msg_bypass); #endif - seq_printf(s, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", - ioc->usg_calls, ioc->usg_pages, (int) ((ioc->usg_pages * 1000)/ioc->usg_calls)); - - return 0; -} - -static struct seq_operations ioc_seq_ops = { - .start = ioc_start, - .next = ioc_next, - .stop = ioc_stop, - .show = ioc_show -}; - -static int -ioc_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &ioc_seq_ops); -} - -static struct file_operations ioc_fops = { - .open = ioc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release -}; - -static int -ioc_map_show(struct seq_file *s, void *v) -{ - struct ioc *ioc = v; - unsigned int i, *res_ptr = (unsigned int *)ioc->res_map; - - for (i = 0; i < ioc->res_size / sizeof(unsigned int); ++i, ++res_ptr) - seq_printf(s, "%s%08x", (i & 7) ? " " : "\n ", *res_ptr); - seq_printf(s, "\n"); - - return 0; + sprintf(buf, "%spci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", + buf, ioc->usg_calls, ioc->usg_pages, + (int) ((ioc->usg_pages * 1000)/ioc->usg_calls)); + } + return strlen(buf); } -static struct seq_operations ioc_map_ops = { - .start = ioc_start, - .next = ioc_next, - .stop = ioc_stop, - .show = ioc_map_show -}; - static int -ioc_map_open(struct inode *inode, struct file *file) +sba_resource_map(char *buf, char **start, off_t offset, int len) { - return seq_open(file, &ioc_map_ops); -} + struct ioc *ioc = sba_list->ioc; /* FIXME: Multi-IOC support! */ + unsigned int *res_ptr; + int i; -static struct file_operations ioc_map_fops = { - .open = ioc_map_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release -}; - -static void __init -ioc_proc_init(void) -{ - if (ioc_list) { - struct proc_dir_entry *dir, *entry; - - dir = proc_mkdir("bus/mckinley", 0); - entry = create_proc_entry(ioc_list->name, 0, dir); - if (entry) - entry->proc_fops = &ioc_fops; - - entry = create_proc_entry("bitmap", 0, dir); - if (entry) - entry->proc_fops = &ioc_map_fops; + if (!ioc) + return 0; + + res_ptr = (unsigned int *)ioc->res_map; + buf[0] = '\0'; + for(i = 0; i < (ioc->res_size / sizeof(unsigned int)); ++i, ++res_ptr) { + if ((i & 7) == 0) + strcat(buf,"\n "); + sprintf(buf, "%s %08x", buf, *res_ptr); } + strcat(buf, "\n"); + + return strlen(buf); } #endif -void -sba_connect_bus(struct pci_bus *bus) +/* +** Determine if sba should claim this chip (return 0) or not (return 1). +** If so, initialize the chip and tell other partners in crime they +** have work to do. +*/ +void __init sba_init(void) { - acpi_handle handle, parent; - acpi_status status; - struct ioc *ioc; + struct sba_device *sba_dev; + u32 func_id, hw_rev; + u32 *func_offset = NULL; + int i, agp_found = 0; + static char sba_rev[6]; + struct pci_dev *device = NULL; + u64 hpa = 0; + + if (!(device = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_SBA, NULL))) + return; - if (!PCI_CONTROLLER(bus)) - panic(PFX "no sysdata on bus %d!\n",bus->number); + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + if (pci_resource_flags(device, i) == IORESOURCE_MEM) { + hpa = (u64) ioremap(pci_resource_start(device, i), + pci_resource_len(device, i)); + break; + } + } - if (PCI_CONTROLLER(bus)->iommu) + func_id = READ_REG(hpa + SBA_FUNC_ID); + if (func_id != ZX1_FUNC_ID_VALUE) return; - handle = PCI_CONTROLLER(bus)->acpi_handle; - if (!handle) - return; + strcpy(sba_rev, "zx1"); + func_offset = zx1_func_offsets; + + /* Read HW Rev First */ + hw_rev = READ_REG(hpa + SBA_FCLASS) & 0xFFUL; /* - * The IOC scope encloses PCI root bridges in the ACPI - * namespace, so work our way out until we find an IOC we - * claimed previously. + * Not all revision registers of the chipset are updated on every + * turn. Must scan through all functions looking for the highest rev */ - do { - for (ioc = ioc_list; ioc; ioc = ioc->next) - if (ioc->handle == handle) { - PCI_CONTROLLER(bus)->iommu = ioc; - return; - } + if (func_offset) { + for (i = 0 ; func_offset[i] != -1 ; i++) { + u32 func_rev; + + func_rev = READ_REG(hpa + SBA_FCLASS + func_offset[i]) & 0xFFUL; + DBG_INIT("%s() func offset: 0x%x rev: 0x%x\n", + __FUNCTION__, func_offset[i], func_rev); + if (func_rev > hw_rev) + hw_rev = func_rev; + } + } - status = acpi_get_parent(handle, &parent); - handle = parent; - } while (ACPI_SUCCESS(status)); + printk(KERN_INFO "%s found %s %d.%d at %s, HPA 0x%lx\n", DRIVER_NAME, + sba_rev, ((hw_rev >> 4) & 0xF), (hw_rev & 0xF), + device->slot_name, hpa); - printk(KERN_WARNING "No IOC for PCI Bus %02x:%02x in ACPI\n", PCI_SEGMENT(bus), bus->number); -} + if ((hw_rev & 0xFF) < 0x20) { + printk(KERN_INFO "%s: SBA rev less than 2.0 not supported", DRIVER_NAME); + return; + } -static int __init -acpi_sba_ioc_add(struct acpi_device *device) -{ - struct ioc *ioc; - acpi_status status; - u64 hpa, length; - struct acpi_device_info dev_info; + sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL); + if (NULL == sba_dev) { + printk(KERN_ERR DRIVER_NAME " - couldn't alloc sba_device\n"); + return; + } - status = hp_acpi_csr_space(device->handle, &hpa, &length); - if (ACPI_FAILURE(status)) - return 1; + memset(sba_dev, 0, sizeof(struct sba_device)); - status = acpi_get_object_info(device->handle, &dev_info); - if (ACPI_FAILURE(status)) - return 1; + for(i=0; iioc[i].res_lock)); + + sba_dev->hw_rev = hw_rev; + sba_dev->sba_hpa = hpa; /* - * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI - * root bridges, and its CSR space includes the IOC function. + * We pass this fake device from alloc_consistent to ensure + * we only use SAC for alloc_consistent mappings. */ - if (strncmp("HWP0001", dev_info.hardware_id, 7) == 0) - hpa += ZX1_IOC_OFFSET; - - ioc = ioc_init(hpa, device->handle); - if (!ioc) - return 1; + sac_only_dev.dma_mask = 0xFFFFFFFFUL; - return 0; -} - -static struct acpi_driver acpi_sba_ioc_driver = { - name: "IOC IOMMU Driver", - ids: "HWP0001,HWP0004", - ops: { - add: acpi_sba_ioc_add, - }, -}; + /* + * We need to check for an AGP device, if we find one, then only + * use part of the IOVA space for PCI DMA, the rest is for GART. + * REVISIT for multiple IOC. + */ + pci_for_each_dev(device) + agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP); -static int __init -sba_init(void) -{ - MAX_DMA_ADDRESS = ~0UL; + if (agp_found && reserve_sba_gart) + SBA_SET_AGP(sba_dev); - acpi_bus_register_driver(&acpi_sba_ioc_driver); + sba_hw_init(sba_dev); + sba_common_init(sba_dev); -#ifdef CONFIG_PCI +#ifdef CONFIG_PROC_FS { - struct pci_bus *b; - pci_for_each_bus(b) - sba_connect_bus(b); - } -#endif + struct proc_dir_entry * proc_mckinley_root; -#ifdef CONFIG_PROC_FS - ioc_proc_init(); + proc_mckinley_root = proc_mkdir("bus/mckinley",0); + create_proc_info_entry(sba_rev, 0, proc_mckinley_root, sba_proc_info); + create_proc_info_entry("bitmap", 0, proc_mckinley_root, sba_resource_map); + } #endif - return 0; } -subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ - static int __init -nosbagart(char *str) +nosbagart (char *str) { reserve_sba_gart = 0; return 1; } -int -sba_dma_supported (struct device *dev, u64 mask) -{ - /* make sure it's at least 32bit capable */ - return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); -} - -__setup("nosbagart", nosbagart); +__setup("nosbagart",nosbagart); +EXPORT_SYMBOL(sba_init); EXPORT_SYMBOL(sba_map_single); EXPORT_SYMBOL(sba_unmap_single); EXPORT_SYMBOL(sba_map_sg); EXPORT_SYMBOL(sba_unmap_sg); +EXPORT_SYMBOL(sba_dma_address); EXPORT_SYMBOL(sba_dma_supported); -EXPORT_SYMBOL(sba_alloc_coherent); -EXPORT_SYMBOL(sba_free_coherent); +EXPORT_SYMBOL(sba_alloc_consistent); +EXPORT_SYMBOL(sba_free_consistent); diff --git a/arch/ia64/hp/sim/hpsim_console.c b/arch/ia64/hp/sim/hpsim_console.c index 29c03cf..540f1d8 100644 --- a/arch/ia64/hp/sim/hpsim_console.c +++ b/arch/ia64/hp/sim/hpsim_console.c @@ -59,7 +59,7 @@ simcons_write (struct console *cons, const char *buf, unsigned count) static struct tty_driver *simcons_console_device (struct console *c, int *index) { - extern struct tty_driver hp_simserial_driver; + extern struct tty_driver hp_serial_driver; *index = c->index; - return &hp_simserial_driver; + return &hp_serial_driver; } diff --git a/arch/ia64/hp/sim/hpsim_machvec.c b/arch/ia64/hp/sim/hpsim_machvec.c index c214193..76af3b4 100644 --- a/arch/ia64/hp/sim/hpsim_machvec.c +++ b/arch/ia64/hp/sim/hpsim_machvec.c @@ -1,3 +1,2 @@ -#define MACHVEC_PLATFORM_NAME hpsim -#define MACHVEC_PLATFORM_HEADER +#define MACHVEC_PLATFORM_NAME hpsim #include diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index fef47f1..8520822 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c @@ -55,7 +55,7 @@ static int simeth_close(struct net_device *dev); static int simeth_tx(struct sk_buff *skb, struct net_device *dev); static int simeth_rx(struct net_device *dev); static struct net_device_stats *simeth_get_stats(struct net_device *dev); -static irqreturn_t simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs); +static void simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs); static void set_multicast_list(struct net_device *dev); static int simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr); @@ -494,21 +494,20 @@ simeth_rx(struct net_device *dev) /* * Interrupt handler (Yes, we can do it too !!!) */ -static irqreturn_t +static void simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs) { struct net_device *dev = dev_id; if ( dev == NULL ) { printk(KERN_WARNING "simeth: irq %d for unknown device\n", irq); - return IRQ_NONE; + return; } /* - * very simple loop because we get interrupts only when receiving + * very simple loop because we get interrupts only when receving */ while (simeth_rx(dev)); - return IRQ_HANDLED; } static struct net_device_stats * diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index 0c04fb0..121fb7c 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c @@ -103,8 +103,7 @@ static struct serial_uart_config uart_config[] = { { 0, 0} }; -struct tty_driver hp_simserial_driver; -static struct tty_driver callout_driver; +static struct tty_driver hp_serial_driver, callout_driver; static int serial_refcount; static struct async_struct *IRQ_ports[NR_IRQS]; @@ -185,7 +184,7 @@ static void receive_chars(struct tty_struct *tty, struct pt_regs *regs) /* * This is the serial driver's interrupt routine for a single port */ -static irqreturn_t rs_interrupt_single(int irq, void *dev_id, struct pt_regs * regs) +static void rs_interrupt_single(int irq, void *dev_id, struct pt_regs * regs) { struct async_struct * info; @@ -196,14 +195,13 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id, struct pt_regs * r info = IRQ_ports[irq]; if (!info || !info->tty) { printk(KERN_INFO "simrs_interrupt_single: info|tty=0 info=%p problem\n", info); - return IRQ_NONE; + return; } /* * pretty simple in our case, because we only get interrupts * on inbound traffic */ receive_chars(info->tty, regs); - return IRQ_HANDLED; } /* @@ -770,7 +768,7 @@ startup(struct async_struct *info) { unsigned long flags; int retval=0; - irqreturn_t (*handler)(int, void *, struct pt_regs *); + void (*handler)(int, void *, struct pt_regs *); struct serial_state *state= info->state; unsigned long page; @@ -810,7 +808,8 @@ startup(struct async_struct *info) } else handler = rs_interrupt_single; - retval = request_irq(state->irq, handler, IRQ_T(info), "simserial", NULL); + retval = request_irq(state->irq, handler, IRQ_T(info), + "simserial", NULL); if (retval) { if (capable(CAP_SYS_ADMIN)) { if (info->tty) @@ -1029,43 +1028,43 @@ simrs_init (void) /* Initialize the tty_driver structure */ - memset(&hp_simserial_driver, 0, sizeof(struct tty_driver)); - hp_simserial_driver.magic = TTY_DRIVER_MAGIC; - hp_simserial_driver.driver_name = "simserial"; - hp_simserial_driver.name = "ttyS"; - hp_simserial_driver.major = TTY_MAJOR; - hp_simserial_driver.minor_start = 64; - hp_simserial_driver.num = 1; - hp_simserial_driver.type = TTY_DRIVER_TYPE_SERIAL; - hp_simserial_driver.subtype = SERIAL_TYPE_NORMAL; - hp_simserial_driver.init_termios = tty_std_termios; - hp_simserial_driver.init_termios.c_cflag = + memset(&hp_serial_driver, 0, sizeof(struct tty_driver)); + hp_serial_driver.magic = TTY_DRIVER_MAGIC; + hp_serial_driver.driver_name = "simserial"; + hp_serial_driver.name = "ttyS"; + hp_serial_driver.major = TTY_MAJOR; + hp_serial_driver.minor_start = 64; + hp_serial_driver.num = 1; + hp_serial_driver.type = TTY_DRIVER_TYPE_SERIAL; + hp_serial_driver.subtype = SERIAL_TYPE_NORMAL; + hp_serial_driver.init_termios = tty_std_termios; + hp_serial_driver.init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; - hp_simserial_driver.flags = TTY_DRIVER_REAL_RAW; - hp_simserial_driver.refcount = &serial_refcount; - hp_simserial_driver.table = serial_table; - hp_simserial_driver.termios = serial_termios; - hp_simserial_driver.termios_locked = serial_termios_locked; - - hp_simserial_driver.open = rs_open; - hp_simserial_driver.close = rs_close; - hp_simserial_driver.write = rs_write; - hp_simserial_driver.put_char = rs_put_char; - hp_simserial_driver.flush_chars = rs_flush_chars; - hp_simserial_driver.write_room = rs_write_room; - hp_simserial_driver.chars_in_buffer = rs_chars_in_buffer; - hp_simserial_driver.flush_buffer = rs_flush_buffer; - hp_simserial_driver.ioctl = rs_ioctl; - hp_simserial_driver.throttle = rs_throttle; - hp_simserial_driver.unthrottle = rs_unthrottle; - hp_simserial_driver.send_xchar = rs_send_xchar; - hp_simserial_driver.set_termios = rs_set_termios; - hp_simserial_driver.stop = rs_stop; - hp_simserial_driver.start = rs_start; - hp_simserial_driver.hangup = rs_hangup; - hp_simserial_driver.break_ctl = rs_break; - hp_simserial_driver.wait_until_sent = rs_wait_until_sent; - hp_simserial_driver.read_proc = rs_read_proc; + hp_serial_driver.flags = TTY_DRIVER_REAL_RAW; + hp_serial_driver.refcount = &serial_refcount; + hp_serial_driver.table = serial_table; + hp_serial_driver.termios = serial_termios; + hp_serial_driver.termios_locked = serial_termios_locked; + + hp_serial_driver.open = rs_open; + hp_serial_driver.close = rs_close; + hp_serial_driver.write = rs_write; + hp_serial_driver.put_char = rs_put_char; + hp_serial_driver.flush_chars = rs_flush_chars; + hp_serial_driver.write_room = rs_write_room; + hp_serial_driver.chars_in_buffer = rs_chars_in_buffer; + hp_serial_driver.flush_buffer = rs_flush_buffer; + hp_serial_driver.ioctl = rs_ioctl; + hp_serial_driver.throttle = rs_throttle; + hp_serial_driver.unthrottle = rs_unthrottle; + hp_serial_driver.send_xchar = rs_send_xchar; + hp_serial_driver.set_termios = rs_set_termios; + hp_serial_driver.stop = rs_stop; + hp_serial_driver.start = rs_start; + hp_serial_driver.hangup = rs_hangup; + hp_serial_driver.break_ctl = rs_break; + hp_serial_driver.wait_until_sent = rs_wait_until_sent; + hp_serial_driver.read_proc = rs_read_proc; /* * Let's have a little bit of fun ! @@ -1088,14 +1087,14 @@ simrs_init (void) * The callout device is just like normal device except for * major number and the subtype code. */ - callout_driver = hp_simserial_driver; + callout_driver = hp_serial_driver; callout_driver.name = "cua"; callout_driver.major = TTYAUX_MAJOR; callout_driver.subtype = SERIAL_TYPE_CALLOUT; callout_driver.read_proc = 0; callout_driver.proc_entry = 0; - if (tty_register_driver(&hp_simserial_driver)) + if (tty_register_driver(&hp_serial_driver)) panic("Couldn't register simserial driver\n"); if (tty_register_driver(&callout_driver)) diff --git a/arch/ia64/hp/zx1/Makefile b/arch/ia64/hp/zx1/Makefile index 64e39aa..4c00a07 100644 --- a/arch/ia64/hp/zx1/Makefile +++ b/arch/ia64/hp/zx1/Makefile @@ -5,4 +5,5 @@ # Copyright (C) Alex Williamson (alex_williamson@hp.com) # +obj-y := hpzx1_misc.o obj-$(CONFIG_IA64_GENERIC) += hpzx1_machvec.o diff --git a/arch/ia64/hp/zx1/hpzx1_machvec.c b/arch/ia64/hp/zx1/hpzx1_machvec.c index 32518b0..53b1a13 100644 --- a/arch/ia64/hp/zx1/hpzx1_machvec.c +++ b/arch/ia64/hp/zx1/hpzx1_machvec.c @@ -1,3 +1,2 @@ -#define MACHVEC_PLATFORM_NAME hpzx1 -#define MACHVEC_PLATFORM_HEADER +#define MACHVEC_PLATFORM_NAME hpzx1 #include diff --git a/arch/ia64/hp/zx1/hpzx1_misc.c b/arch/ia64/hp/zx1/hpzx1_misc.c new file mode 100644 index 0000000..bf6faa9 --- /dev/null +++ b/arch/ia64/hp/zx1/hpzx1_misc.c @@ -0,0 +1,348 @@ +/* + * Misc. support for HP zx1 chipset support + * + * Copyright (C) 2002-2003 Hewlett-Packard Co + * Alex Williamson + * Bjorn Helgaas + */ + + +#include +#include +#include +#include +#include +#include + +#include +#include + +extern acpi_status acpi_evaluate_integer (acpi_handle, acpi_string, struct acpi_object_list *, + unsigned long *); + +#define PFX "hpzx1: " + +static int hpzx1_devices; + +struct fake_pci_dev { + struct fake_pci_dev *next; + struct pci_dev *pci_dev; + unsigned long csr_base; + unsigned long csr_size; + unsigned long mapped_csrs; // ioremapped + int sizing; // in middle of BAR sizing operation? +} *fake_pci_dev_list; + +static struct pci_ops *orig_pci_ops; + +struct fake_pci_dev * +lookup_fake_dev (struct pci_bus *bus, unsigned int devfn) +{ + struct fake_pci_dev *fake_dev; + + for (fake_dev = fake_pci_dev_list; fake_dev; fake_dev = fake_dev->next) + if (fake_dev->pci_dev->bus == bus && fake_dev->pci_dev->devfn == devfn) + return fake_dev; + return NULL; +} + +static int +hp_cfg_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) +{ + struct fake_pci_dev *fake_dev = lookup_fake_dev(bus, devfn); + + if (!fake_dev) + return (*orig_pci_ops->read)(bus, devfn, where, size, value); + + if (where == PCI_BASE_ADDRESS_0) { + if (fake_dev->sizing) + *value = ~(fake_dev->csr_size - 1); + else + *value = ((fake_dev->csr_base & PCI_BASE_ADDRESS_MEM_MASK) + | PCI_BASE_ADDRESS_SPACE_MEMORY); + fake_dev->sizing = 0; + return PCIBIOS_SUCCESSFUL; + } + switch (size) { + case 1: *value = readb(fake_dev->mapped_csrs + where); break; + case 2: *value = readw(fake_dev->mapped_csrs + where); break; + case 4: *value = readl(fake_dev->mapped_csrs + where); break; + default: + printk(KERN_WARNING"hp_cfg_read: bad size = %d bytes", size); + break; + } + if (where == PCI_COMMAND) + *value |= PCI_COMMAND_MEMORY; /* SBA omits this */ + return PCIBIOS_SUCCESSFUL; +} + +static int +hp_cfg_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) +{ + struct fake_pci_dev *fake_dev = lookup_fake_dev(bus, devfn); + + if (!fake_dev) + return (*orig_pci_ops->write)(bus, devfn, where, size, value); + + if (where == PCI_BASE_ADDRESS_0) { + if (value == ((1UL << 8*size) - 1)) + fake_dev->sizing = 1; + return PCIBIOS_SUCCESSFUL; + } + switch (size) { + case 1: writeb(value, fake_dev->mapped_csrs + where); break; + case 2: writew(value, fake_dev->mapped_csrs + where); break; + case 4: writel(value, fake_dev->mapped_csrs + where); break; + default: + printk(KERN_WARNING"hp_cfg_write: bad size = %d bytes", size); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops hp_pci_conf = { + .read = hp_cfg_read, + .write = hp_cfg_write +}; + +static void +hpzx1_fake_pci_dev(char *name, unsigned int busnum, unsigned long addr, unsigned int size) +{ + struct fake_pci_dev *fake; + int slot, ret; + struct pci_dev *dev; + struct pci_bus *b, *bus = NULL; + u8 hdr; + + fake = kmalloc(sizeof(*fake), GFP_KERNEL); + if (!fake) { + printk(KERN_ERR PFX "No memory for %s (0x%p) sysdata\n", name, (void *) addr); + return; + } + + memset(fake, 0, sizeof(*fake)); + fake->csr_base = addr; + fake->csr_size = size; + fake->mapped_csrs = (unsigned long) ioremap(addr, size); + fake->sizing = 0; + + pci_for_each_bus(b) + if (busnum == b->number) { + bus = b; + break; + } + + if (!bus) { + printk(KERN_ERR PFX "No host bus 0x%02x for %s (0x%p)\n", + busnum, name, (void *) addr); + kfree(fake); + return; + } + + for (slot = 0x1e; slot; slot--) + if (!pci_find_slot(busnum, PCI_DEVFN(slot, 0))) + break; + + if (slot < 0) { + printk(KERN_ERR PFX "No space for %s (0x%p) on bus 0x%02x\n", + name, (void *) addr, busnum); + kfree(fake); + return; + } + + dev = kmalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + printk(KERN_ERR PFX "No memory for %s (0x%p)\n", name, (void *) addr); + kfree(fake); + return; + } + + bus->ops = &hp_pci_conf; // replace pci ops for this bus + + fake->pci_dev = dev; + fake->next = fake_pci_dev_list; + fake_pci_dev_list = fake; + + memset(dev, 0, sizeof(*dev)); + dev->bus = bus; + dev->sysdata = fake; + dev->dev.parent = bus->dev; + dev->dev.bus = &pci_bus_type; + dev->devfn = PCI_DEVFN(slot, 0); + pci_read_config_word(dev, PCI_VENDOR_ID, &dev->vendor); + pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device); + pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr); + dev->hdr_type = hdr & 0x7f; + + pci_setup_device(dev); + + // pci_insert_device() without running /sbin/hotplug + list_add_tail(&dev->bus_list, &bus->devices); + list_add_tail(&dev->global_list, &pci_devices); + + strcpy(dev->dev.bus_id, dev->slot_name); + ret = device_register(&dev->dev); + if (ret < 0) + printk(KERN_INFO PFX "fake device registration failed (%d)\n", ret); + + printk(KERN_INFO PFX "%s at 0x%lx; pci dev %s\n", name, addr, dev->slot_name); + + hpzx1_devices++; +} + +struct acpi_hp_vendor_long { + u8 guid_id; + u8 guid[16]; + u8 csr_base[8]; + u8 csr_length[8]; +}; + +#define HP_CCSR_LENGTH 0x21 +#define HP_CCSR_TYPE 0x2 +#define HP_CCSR_GUID EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, \ + 0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad) + +extern acpi_status acpi_get_crs(acpi_handle, struct acpi_buffer *); +extern struct acpi_resource *acpi_get_crs_next(struct acpi_buffer *, int *); +extern union acpi_resource_data *acpi_get_crs_type(struct acpi_buffer *, int *, int); +extern void acpi_dispose_crs(struct acpi_buffer *); + +static acpi_status +hp_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length) +{ + int i, offset = 0; + acpi_status status; + struct acpi_buffer buf; + struct acpi_resource_vendor *res; + struct acpi_hp_vendor_long *hp_res; + efi_guid_t vendor_guid; + + *csr_base = 0; + *csr_length = 0; + + status = acpi_get_crs(obj, &buf); + if (ACPI_FAILURE(status)) { + printk(KERN_ERR PFX "Unable to get _CRS data on object\n"); + return status; + } + + res = (struct acpi_resource_vendor *)acpi_get_crs_type(&buf, &offset, ACPI_RSTYPE_VENDOR); + if (!res) { + printk(KERN_ERR PFX "Failed to find config space for device\n"); + acpi_dispose_crs(&buf); + return AE_NOT_FOUND; + } + + hp_res = (struct acpi_hp_vendor_long *)(res->reserved); + + if (res->length != HP_CCSR_LENGTH || hp_res->guid_id != HP_CCSR_TYPE) { + printk(KERN_ERR PFX "Unknown Vendor data\n"); + acpi_dispose_crs(&buf); + return AE_TYPE; /* Revisit error? */ + } + + memcpy(&vendor_guid, hp_res->guid, sizeof(efi_guid_t)); + if (efi_guidcmp(vendor_guid, HP_CCSR_GUID) != 0) { + printk(KERN_ERR PFX "Vendor GUID does not match\n"); + acpi_dispose_crs(&buf); + return AE_TYPE; /* Revisit error? */ + } + + for (i = 0 ; i < 8 ; i++) { + *csr_base |= ((u64)(hp_res->csr_base[i]) << (i * 8)); + *csr_length |= ((u64)(hp_res->csr_length[i]) << (i * 8)); + } + + acpi_dispose_crs(&buf); + + return AE_OK; +} + +static acpi_status +hpzx1_sba_probe(acpi_handle obj, u32 depth, void *context, void **ret) +{ + u64 csr_base = 0, csr_length = 0; + acpi_status status; + char *name = context; + char fullname[16]; + + status = hp_csr_space(obj, &csr_base, &csr_length); + if (ACPI_FAILURE(status)) + return status; + + /* + * Only SBA shows up in ACPI namespace, so its CSR space + * includes both SBA and IOC. Make SBA and IOC show up + * separately in PCI space. + */ + sprintf(fullname, "%s SBA", name); + hpzx1_fake_pci_dev(fullname, 0, csr_base, 0x1000); + sprintf(fullname, "%s IOC", name); + hpzx1_fake_pci_dev(fullname, 0, csr_base + 0x1000, 0x1000); + + return AE_OK; +} + +static acpi_status +hpzx1_lba_probe(acpi_handle obj, u32 depth, void *context, void **ret) +{ + u64 csr_base = 0, csr_length = 0; + acpi_status status; + acpi_native_uint busnum; + char *name = context; + char fullname[32]; + + status = hp_csr_space(obj, &csr_base, &csr_length); + if (ACPI_FAILURE(status)) + return status; + + status = acpi_evaluate_integer(obj, METHOD_NAME__BBN, NULL, &busnum); + if (ACPI_FAILURE(status)) { + printk(KERN_WARNING PFX "evaluate _BBN fail=0x%x\n", status); + busnum = 0; // no _BBN; stick it on bus 0 + } + + sprintf(fullname, "%s _BBN 0x%02x", name, (unsigned int) busnum); + hpzx1_fake_pci_dev(fullname, busnum, csr_base, csr_length); + + return AE_OK; +} + +static void +hpzx1_acpi_dev_init(void) +{ + extern struct pci_ops *pci_root_ops; + + orig_pci_ops = pci_root_ops; + + /* + * Make fake PCI devices for the following hardware in the + * ACPI namespace. This makes it more convenient for drivers + * because they can claim these devices based on PCI + * information, rather than needing to know about ACPI. The + * 64-bit "HPA" space for this hardware is available as BAR + * 0/1. + * + * HWP0001: Single IOC SBA w/o IOC in namespace + * HWP0002: LBA device + * HWP0003: AGP LBA device + */ + acpi_get_devices("HWP0001", hpzx1_sba_probe, "HWP0001", NULL); + acpi_get_devices("HWP0002", hpzx1_lba_probe, "HWP0002 PCI LBA", NULL); + acpi_get_devices("HWP0003", hpzx1_lba_probe, "HWP0003 AGP LBA", NULL); +} + +extern void sba_init(void); + +static int +hpzx1_init (void) +{ + /* zx1 has a hardware I/O TLB which lets us DMA from any device to any address */ + MAX_DMA_ADDRESS = ~0UL; + + hpzx1_acpi_dev_init(); + sba_init(); + return 0; +} + +subsys_initcall(hpzx1_init); diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index 576a189..6001b46 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S @@ -273,9 +273,9 @@ ia32_syscall_table: data8 sys32_sigsuspend data8 compat_sys_sigpending data8 sys_sethostname - data8 compat_sys_setrlimit /* 75 */ - data8 compat_sys_old_getrlimit - data8 compat_sys_getrusage + data8 sys32_setrlimit /* 75 */ + data8 sys32_old_getrlimit + data8 sys32_getrusage data8 sys32_gettimeofday data8 sys32_settimeofday data8 sys32_getgroups16 /* 80 */ @@ -312,7 +312,7 @@ ia32_syscall_table: data8 sys_vhangup data8 sys32_ni_syscall /* used to be sys_idle */ data8 sys32_ni_syscall - data8 compat_sys_wait4 + data8 sys32_wait4 data8 sys_swapoff /* 115 */ data8 sys32_sysinfo data8 sys32_ipc @@ -389,7 +389,7 @@ ia32_syscall_table: data8 sys32_ni_syscall /* streams1 */ data8 sys32_ni_syscall /* streams2 */ data8 sys32_vfork /* 190 */ - data8 compat_sys_getrlimit + data8 sys32_getrlimit data8 sys32_mmap2 data8 sys32_truncate64 data8 sys32_ftruncate64 diff --git a/arch/ia64/ia32/ia32_ioctl.c b/arch/ia64/ia32/ia32_ioctl.c index e86c863..0ead044 100644 --- a/arch/ia64/ia32/ia32_ioctl.c +++ b/arch/ia64/ia32/ia32_ioctl.c @@ -3,16 +3,13 @@ * * Copyright (C) 2000 VA Linux Co * Copyright (C) 2000 Don Dugger - * Copyright (C) 2001-2003 Hewlett-Packard Co + * Copyright (C) 2001-2002 Hewlett-Packard Co * David Mosberger-Tang */ #include #include #include /* argh, msdos_fs.h isn't self-contained... */ -#include /* argh, msdos_fs.h isn't self-contained... */ - -#include #include #include @@ -36,6 +33,8 @@ #define __KERNEL__ #include +#include + #include <../drivers/char/drm/drm.h> #include <../drivers/char/drm/mga_drm.h> #include <../drivers/char/drm/i810_drm.h> diff --git a/arch/ia64/ia32/ia32_traps.c b/arch/ia64/ia32/ia32_traps.c index 3090690..20fea69 100644 --- a/arch/ia64/ia32/ia32_traps.c +++ b/arch/ia64/ia32/ia32_traps.c @@ -103,7 +103,7 @@ ia32_exception (struct pt_regs *regs, unsigned long isr) * C1 reg you need in case of a stack fault, 0x040 is the stack * fault bit. We should only be taking one exception at a time, * so if this combination doesn't produce any single exception, - * then we have a bad program that isn't synchronizing its FPU usage + * then we have a bad program that isn't syncronizing its FPU usage * and it will suffer the consequences since we won't be able to * fully reproduce the context of the exception */ diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index fedff08..570b039 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -53,10 +53,10 @@ #include #include #include -#include #include #include +#include #define DEBUG 0 @@ -177,7 +177,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat *ubuf) { int err; - if ((u64) stat->size > MAX_NON_LFS) + if (stat->size > MAX_NON_LFS) return -EOVERFLOW; if (clear_user(ubuf, sizeof(*ubuf))) @@ -243,7 +243,8 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro return -ENOMEM; if (old_prot) - copy_from_user(page, (void *) PAGE_START(start), PAGE_SIZE); + if (copy_from_user(page, (void *) PAGE_START(start), PAGE_SIZE)) + return -EFAULT; down_write(¤t->mm->mmap_sem); { @@ -836,9 +837,8 @@ sys32_select (int n, fd_set *inp, fd_set *outp, fd_set *exp, struct compat_timev } } - size = FDS_BYTES(n); ret = -EINVAL; - if (n < 0 || size < n) + if (n < 0) goto out_nofds; if (n > current->files->max_fdset) @@ -850,6 +850,7 @@ sys32_select (int n, fd_set *inp, fd_set *outp, fd_set *exp, struct compat_timev * long-words. */ ret = -ENOMEM; + size = FDS_BYTES(n); bits = kmalloc(6 * size, GFP_KERNEL); if (!bits) goto out_nofds; @@ -927,7 +928,8 @@ asmlinkage ssize_t sys_writev (unsigned long,const struct iovec *,unsigned long) static struct iovec * get_compat_iovec (struct compat_iovec *iov32, struct iovec *iov_buf, u32 count, int type) { - u32 i, buf, len; + int i; + u32 buf, len; struct iovec *ivp, *iov; /* Get the "struct iovec" from user memory */ @@ -1003,6 +1005,77 @@ sys32_writev (int fd, struct compat_iovec *vector, u32 count) return ret; } +#define RLIM_INFINITY32 0x7fffffff +#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x) + +struct rlimit32 { + int rlim_cur; + int rlim_max; +}; + +extern asmlinkage long sys_getrlimit (unsigned int resource, struct rlimit *rlim); + +asmlinkage long +sys32_old_getrlimit (unsigned int resource, struct rlimit32 *rlim) +{ + mm_segment_t old_fs = get_fs(); + struct rlimit r; + int ret; + + set_fs(KERNEL_DS); + ret = sys_getrlimit(resource, &r); + set_fs(old_fs); + if (!ret) { + ret = put_user(RESOURCE32(r.rlim_cur), &rlim->rlim_cur); + ret |= put_user(RESOURCE32(r.rlim_max), &rlim->rlim_max); + } + return ret; +} + +asmlinkage long +sys32_getrlimit (unsigned int resource, struct rlimit32 *rlim) +{ + mm_segment_t old_fs = get_fs(); + struct rlimit r; + int ret; + + set_fs(KERNEL_DS); + ret = sys_getrlimit(resource, &r); + set_fs(old_fs); + if (!ret) { + if (r.rlim_cur >= 0xffffffff) + r.rlim_cur = 0xffffffff; + if (r.rlim_max >= 0xffffffff) + r.rlim_max = 0xffffffff; + ret = put_user(r.rlim_cur, &rlim->rlim_cur); + ret |= put_user(r.rlim_max, &rlim->rlim_max); + } + return ret; +} + +extern asmlinkage long sys_setrlimit (unsigned int resource, struct rlimit *rlim); + +asmlinkage long +sys32_setrlimit (unsigned int resource, struct rlimit32 *rlim) +{ + struct rlimit r; + int ret; + mm_segment_t old_fs = get_fs(); + + if (resource >= RLIM_NLIMITS) + return -EINVAL; + if (get_user(r.rlim_cur, &rlim->rlim_cur) || get_user(r.rlim_max, &rlim->rlim_max)) + return -EFAULT; + if (r.rlim_cur == RLIM_INFINITY32) + r.rlim_cur = RLIM_INFINITY; + if (r.rlim_max == RLIM_INFINITY32) + r.rlim_max = RLIM_INFINITY; + set_fs(KERNEL_DS); + ret = sys_setrlimit(resource, &r); + set_fs(old_fs); + return ret; +} + /* * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.. * @@ -1575,35 +1648,19 @@ shmctl32 (int first, int second, void *uptr) return err; } -extern int sem_ctls[]; -#define sc_semopm (sem_ctls[2]) - static long -semtimedop32(int semid, struct sembuf *tsops, int nsops, - struct compat_timespec *timeout32) +semtimedop32(int semid, struct sembuf *tsems, int nsems, + const struct compat_timespec *timeout32) { struct timespec t; - mm_segment_t oldfs; - long ret; - - /* parameter checking precedence should mirror sys_semtimedop() */ - if (nsops < 1 || semid < 0) - return -EINVAL; - if (nsops > sc_semopm) - return -E2BIG; - if (!access_ok(VERIFY_READ, tsops, nsops * sizeof(struct sembuf)) || - get_compat_timespec(&t, timeout32)) + if (get_user (t.tv_sec, &timeout32->tv_sec) || + get_user (t.tv_nsec, &timeout32->tv_nsec)) return -EFAULT; - - oldfs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_semtimedop(semid, tsops, nsops, &t); - set_fs(oldfs); - return ret; + return sys_semtimedop(semid, tsems, nsems, &t); } asmlinkage long -sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth) +sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) { int version; @@ -1611,15 +1668,12 @@ sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth) call &= 0xffff; switch (call) { - case SEMTIMEDOP: - if (fifth) - return semtimedop32(first, (struct sembuf *)AA(ptr), - second, (struct compat_timespec *)AA(fifth)); - /* else fall through for normal semop() */ case SEMOP: /* struct sembuf is the same on 32 and 64bit :)) */ - return sys_semtimedop(first, (struct sembuf *)AA(ptr), second, - NULL); + return sys_semtimedop(first, (struct sembuf *)AA(ptr), second, NULL); + case SEMTIMEDOP: + return semtimedop32(first, (struct sembuf *)AA(ptr), second, + (const struct compat_timespec *)AA(fifth)); case SEMGET: return sys_semget(first, second, third); case SEMCTL: @@ -1670,10 +1724,98 @@ sys32_time (int *tloc) return i; } +struct rusage32 { + struct compat_timeval ru_utime; + struct compat_timeval ru_stime; + int ru_maxrss; + int ru_ixrss; + int ru_idrss; + int ru_isrss; + int ru_minflt; + int ru_majflt; + int ru_nswap; + int ru_inblock; + int ru_oublock; + int ru_msgsnd; + int ru_msgrcv; + int ru_nsignals; + int ru_nvcsw; + int ru_nivcsw; +}; + +static int +put_rusage (struct rusage32 *ru, struct rusage *r) +{ + int err; + + if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru))) + return -EFAULT; + + err = __put_user (r->ru_utime.tv_sec, &ru->ru_utime.tv_sec); + err |= __put_user (r->ru_utime.tv_usec, &ru->ru_utime.tv_usec); + err |= __put_user (r->ru_stime.tv_sec, &ru->ru_stime.tv_sec); + err |= __put_user (r->ru_stime.tv_usec, &ru->ru_stime.tv_usec); + err |= __put_user (r->ru_maxrss, &ru->ru_maxrss); + err |= __put_user (r->ru_ixrss, &ru->ru_ixrss); + err |= __put_user (r->ru_idrss, &ru->ru_idrss); + err |= __put_user (r->ru_isrss, &ru->ru_isrss); + err |= __put_user (r->ru_minflt, &ru->ru_minflt); + err |= __put_user (r->ru_majflt, &ru->ru_majflt); + err |= __put_user (r->ru_nswap, &ru->ru_nswap); + err |= __put_user (r->ru_inblock, &ru->ru_inblock); + err |= __put_user (r->ru_oublock, &ru->ru_oublock); + err |= __put_user (r->ru_msgsnd, &ru->ru_msgsnd); + err |= __put_user (r->ru_msgrcv, &ru->ru_msgrcv); + err |= __put_user (r->ru_nsignals, &ru->ru_nsignals); + err |= __put_user (r->ru_nvcsw, &ru->ru_nvcsw); + err |= __put_user (r->ru_nivcsw, &ru->ru_nivcsw); + return err; +} + +asmlinkage long +sys32_wait4 (int pid, unsigned int *stat_addr, int options, struct rusage32 *ru) +{ + if (!ru) + return sys_wait4(pid, stat_addr, options, NULL); + else { + struct rusage r; + int ret; + unsigned int status; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + ret = sys_wait4(pid, stat_addr ? &status : NULL, options, &r); + set_fs(old_fs); + if (put_rusage(ru, &r)) + return -EFAULT; + if (stat_addr && put_user(status, stat_addr)) + return -EFAULT; + return ret; + } +} + asmlinkage long sys32_waitpid (int pid, unsigned int *stat_addr, int options) { - return compat_sys_wait4(pid, stat_addr, options, NULL); + return sys32_wait4(pid, stat_addr, options, NULL); +} + + +extern asmlinkage long sys_getrusage (int who, struct rusage *ru); + +asmlinkage long +sys32_getrusage (int who, struct rusage32 *ru) +{ + struct rusage r; + int ret; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + ret = sys_getrusage(who, &r); + set_fs(old_fs); + if (put_rusage (ru, &r)) + return -EFAULT; + return ret; } static unsigned int @@ -2069,7 +2211,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ret = -EIO; break; } - for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) { + for (i = 0; i < 17*sizeof(int); i += sizeof(int) ) { put_user(getreg(child, i), (unsigned int *) A(data)); data += sizeof(int); } @@ -2081,7 +2223,7 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ret = -EIO; break; } - for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) { + for (i = 0; i < 17*sizeof(int); i += sizeof(int) ) { get_user(tmp, (unsigned int *) A(data)); putreg(child, i, tmp); data += sizeof(int); @@ -2157,7 +2299,7 @@ sys32_iopl (int level) return(-EINVAL); /* Trying to gain more privileges? */ asm volatile ("mov %0=ar.eflag ;;" : "=r"(old)); - if ((unsigned int) level > ((old >> 12) & 3)) { + if (level > ((old >> 12) & 3)) { if (!capable(CAP_SYS_RAWIO)) return -EPERM; } diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 0add23a..176a973 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -11,8 +11,6 @@ obj-y := acpi.o entry.o efi.o efi_stub.o gate.o ia64_ksyms.o irq.o irq_ia64.o ir obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_FSYS) += fsys.o obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o -obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o -obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o obj-$(CONFIG_IA64_MCA) += mca.o mca_asm.o obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_IOSAPIC) += iosapic.o diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c index 42d0c1f..cb1b314 100644 --- a/arch/ia64/kernel/acpi-ext.c +++ b/arch/ia64/kernel/acpi-ext.c @@ -3,99 +3,69 @@ * * Copyright (C) 2003 Hewlett-Packard * Copyright (C) Alex Williamson - * Copyright (C) Bjorn Helgaas * - * Vendor specific extensions to ACPI. + * Vendor specific extensions to ACPI. These are used by both + * HP and NEC. */ #include -#include #include #include #include #include -struct acpi_vendor_descriptor { - u8 guid_id; - efi_guid_t guid; -}; - -struct acpi_vendor_info { - struct acpi_vendor_descriptor *descriptor; - u8 *data; - u32 length; -}; - -acpi_status -acpi_vendor_resource_match(struct acpi_resource *resource, void *context) -{ - struct acpi_vendor_info *info = (struct acpi_vendor_info *) context; - struct acpi_resource_vendor *vendor; - struct acpi_vendor_descriptor *descriptor; - u32 length; - - if (resource->id != ACPI_RSTYPE_VENDOR) - return AE_OK; - - vendor = (struct acpi_resource_vendor *) &resource->data; - descriptor = (struct acpi_vendor_descriptor *) vendor->reserved; - if (vendor->length <= sizeof(*info->descriptor) || - descriptor->guid_id != info->descriptor->guid_id || - efi_guidcmp(descriptor->guid, info->descriptor->guid)) - return AE_OK; - - length = vendor->length - sizeof(struct acpi_vendor_descriptor); - info->data = acpi_os_allocate(length); - if (!info->data) - return AE_NO_MEMORY; - - memcpy(info->data, vendor->reserved + sizeof(struct acpi_vendor_descriptor), length); - info->length = length; - return AE_CTRL_TERMINATE; -} - +/* + * Note: Strictly speaking, this is only needed for HP and NEC machines. + * However, NEC machines identify themselves as DIG-compliant, so there is + * no easy way to #ifdef this out. + */ acpi_status -acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor *id, - u8 **data, u32 *length) +hp_acpi_csr_space (acpi_handle obj, u64 *csr_base, u64 *csr_length) { - struct acpi_vendor_info info; - - info.descriptor = id; - info.data = 0; - - acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match, &info); - if (!info.data) + int i, offset = 0; + acpi_status status; + struct acpi_buffer buf; + struct acpi_resource_vendor *res; + struct acpi_hp_vendor_long *hp_res; + efi_guid_t vendor_guid; + + *csr_base = 0; + *csr_length = 0; + + status = acpi_get_crs(obj, &buf); + if (ACPI_FAILURE(status)) { + printk(KERN_ERR PREFIX "Unable to get _CRS data on object\n"); + return status; + } + + res = (struct acpi_resource_vendor *)acpi_get_crs_type(&buf, &offset, ACPI_RSTYPE_VENDOR); + if (!res) { + printk(KERN_ERR PREFIX "Failed to find config space for device\n"); + acpi_dispose_crs(&buf); return AE_NOT_FOUND; + } - *data = info.data; - *length = info.length; - return AE_OK; -} - -struct acpi_vendor_descriptor hp_ccsr_descriptor = { - .guid_id = 2, - .guid = EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad) -}; + hp_res = (struct acpi_hp_vendor_long *)(res->reserved); -acpi_status -hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length) -{ - acpi_status status; - u8 *data; - u32 length; - int i; + if (res->length != HP_CCSR_LENGTH || hp_res->guid_id != HP_CCSR_TYPE) { + printk(KERN_ERR PREFIX "Unknown Vendor data\n"); + acpi_dispose_crs(&buf); + return AE_TYPE; /* Revisit error? */ + } - status = acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length); + memcpy(&vendor_guid, hp_res->guid, sizeof(efi_guid_t)); + if (efi_guidcmp(vendor_guid, HP_CCSR_GUID) != 0) { + printk(KERN_ERR PREFIX "Vendor GUID does not match\n"); + acpi_dispose_crs(&buf); + return AE_TYPE; /* Revisit error? */ + } - if (ACPI_FAILURE(status) || length != 16) - return AE_NOT_FOUND; - - memcpy(csr_base, data, sizeof(*csr_base)); - memcpy(csr_length, data + 8, sizeof(*csr_length)); - acpi_os_free(data); + for (i = 0 ; i < 8 ; i++) { + *csr_base |= ((u64)(hp_res->csr_base[i]) << (i * 8)); + *csr_length |= ((u64)(hp_res->csr_length[i]) << (i * 8)); + } + acpi_dispose_crs(&buf); return AE_OK; } - -EXPORT_SYMBOL(hp_acpi_csr_space); diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 7f888fb..b1c6734 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -115,6 +115,134 @@ acpi_get_sysname (void) #endif } +#ifdef CONFIG_ACPI + +/** + * acpi_get_crs - Return the current resource settings for a device + * obj: A handle for this device + * buf: A buffer to be populated by this call. + * + * Pass a valid handle, typically obtained by walking the namespace and a + * pointer to an allocated buffer, and this function will fill in the buffer + * with a list of acpi_resource structures. + */ +acpi_status +acpi_get_crs (acpi_handle obj, struct acpi_buffer *buf) +{ + acpi_status result; + buf->length = 0; + buf->pointer = NULL; + + result = acpi_get_current_resources(obj, buf); + if (result != AE_BUFFER_OVERFLOW) + return result; + buf->pointer = kmalloc(buf->length, GFP_KERNEL); + if (!buf->pointer) + return -ENOMEM; + + return acpi_get_current_resources(obj, buf); +} + +struct acpi_resource * +acpi_get_crs_next (struct acpi_buffer *buf, int *offset) +{ + struct acpi_resource *res; + + if (*offset >= buf->length) + return NULL; + + res = buf->pointer + *offset; + *offset += res->length; + return res; +} + +union acpi_resource_data * +acpi_get_crs_type (struct acpi_buffer *buf, int *offset, int type) +{ + for (;;) { + struct acpi_resource *res = acpi_get_crs_next(buf, offset); + if (!res) + return NULL; + if (res->id == type) + return &res->data; + } +} + +void +acpi_dispose_crs (struct acpi_buffer *buf) +{ + kfree(buf->pointer); +} + +void +acpi_get_crs_addr (struct acpi_buffer *buf, int type, u64 *base, u64 *size, u64 *tra) +{ + int offset = 0; + struct acpi_resource_address16 *addr16; + struct acpi_resource_address32 *addr32; + struct acpi_resource_address64 *addr64; + + for (;;) { + struct acpi_resource *res = acpi_get_crs_next(buf, &offset); + if (!res) + return; + switch (res->id) { + case ACPI_RSTYPE_ADDRESS16: + addr16 = (struct acpi_resource_address16 *) &res->data; + + if (type == addr16->resource_type) { + *base = addr16->min_address_range; + *size = addr16->address_length; + *tra = addr16->address_translation_offset; + return; + } + break; + case ACPI_RSTYPE_ADDRESS32: + addr32 = (struct acpi_resource_address32 *) &res->data; + if (type == addr32->resource_type) { + *base = addr32->min_address_range; + *size = addr32->address_length; + *tra = addr32->address_translation_offset; + return; + } + break; + case ACPI_RSTYPE_ADDRESS64: + addr64 = (struct acpi_resource_address64 *) &res->data; + if (type == addr64->resource_type) { + *base = addr64->min_address_range; + *size = addr64->address_length; + *tra = addr64->address_translation_offset; + return; + } + break; + } + } +} + +int +acpi_get_addr_space(void *obj, u8 type, u64 *base, u64 *length, u64 *tra) +{ + acpi_status status; + struct acpi_buffer buf; + + *base = 0; + *length = 0; + *tra = 0; + + status = acpi_get_crs((acpi_handle)obj, &buf); + if (ACPI_FAILURE(status)) { + printk(KERN_ERR PREFIX "Unable to get _CRS data on object\n"); + return status; + } + + acpi_get_crs_addr(&buf, type, base, length, tra); + + acpi_dispose_crs(&buf); + + return AE_OK; +} +#endif /* CONFIG_ACPI */ + #ifdef CONFIG_ACPI_BOOT #define ACPI_MAX_PLATFORM_INTERRUPTS 256 @@ -196,8 +324,7 @@ acpi_parse_lsapic (acpi_table_entry_header *header) printk(" enabled"); #ifdef CONFIG_SMP smp_boot_data.cpu_phys_id[total_cpus] = (lsapic->id << 8) | lsapic->eid; - if (hard_smp_processor_id() - == (unsigned int) smp_boot_data.cpu_phys_id[total_cpus]) + if (hard_smp_processor_id() == smp_boot_data.cpu_phys_id[total_cpus]) printk(" (BSP)"); #endif } @@ -791,7 +918,8 @@ acpi_register_irq (u32 gsi, u32 polarity, u32 trigger) return 0; /* Turn it on */ - vector = iosapic_register_intr (gsi, polarity, trigger); + vector = iosapic_register_intr (gsi, polarity ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, + trigger ? IOSAPIC_EDGE : IOSAPIC_LEVEL); return vector; } diff --git a/arch/ia64/kernel/brl_emu.c b/arch/ia64/kernel/brl_emu.c index 0b286ca..541addc 100644 --- a/arch/ia64/kernel/brl_emu.c +++ b/arch/ia64/kernel/brl_emu.c @@ -59,7 +59,7 @@ ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec) unsigned long next_ip; struct siginfo siginfo; struct illegal_op_return rv; - long tmp_taken, unimplemented_address; + int tmp_taken, unimplemented_address; rv.fkt = (unsigned long) -1; diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index cb967c5..78c8c05 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -203,16 +203,16 @@ STUB_GET_NEXT_HIGH_MONO_COUNT(virt, ) STUB_RESET_SYSTEM(virt, ) void -efi_gettimeofday (struct timespec *ts) +efi_gettimeofday (struct timeval *tv) { efi_time_t tm; - memset(ts, 0, sizeof(ts)); + memset(tv, 0, sizeof(tv)); if ((*efi.get_time)(&tm, 0) != EFI_SUCCESS) return; - ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); - ts->tv_nsec = tm.nanosecond; + tv->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); + tv->tv_usec = tm.nanosecond / 1000; } static int @@ -512,7 +512,7 @@ efi_init (void) /* Show what we know for posterity */ c16 = __va(efi.systab->fw_vendor); if (c16) { - for (i = 0;i < (int) sizeof(vendor) && *c16; ++i) + for (i = 0;i < sizeof(vendor) && *c16; ++i) vendor[i] = *c16++; vendor[i] = '\0'; } @@ -520,7 +520,7 @@ efi_init (void) printk(KERN_INFO "EFI v%u.%.02u by %s:", efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); - for (i = 0; i < (int) efi.systab->nr_tables; i++) { + for (i = 0; i < efi.systab->nr_tables; i++) { if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { efi.mps = __va(config_tables[i].table); printk(" MPS=0x%lx", config_tables[i].table); diff --git a/arch/ia64/kernel/efivars.c b/arch/ia64/kernel/efivars.c index 920675b..b5ab1a0 100644 --- a/arch/ia64/kernel/efivars.c +++ b/arch/ia64/kernel/efivars.c @@ -138,7 +138,8 @@ utf8_strlen(efi_char16_t *data, unsigned long maxlength) static inline unsigned long utf8_strsize(efi_char16_t *data, unsigned long maxlength) { - return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t); + return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * + sizeof(efi_char16_t); } @@ -169,7 +170,8 @@ efivar_create_proc_entry(unsigned long variable_name_size, efi_guid_t *vendor_guid) { - int i, short_name_size = variable_name_size / sizeof(efi_char16_t) + 38; + int i, short_name_size = variable_name_size / + sizeof(efi_char16_t) + 38; char *short_name; efivar_entry_t *new_efivar; @@ -190,7 +192,7 @@ efivar_create_proc_entry(unsigned long variable_name_size, /* Convert Unicode to normal chars (assume top bits are 0), ala UTF-8 */ - for (i=0; i< (int) (variable_name_size / sizeof(efi_char16_t)); i++) { + for (i=0; i EXPORT_SYMBOL_NOVERS(__down); @@ -57,12 +56,6 @@ EXPORT_SYMBOL_NOVERS(__up); #include EXPORT_SYMBOL(clear_page); -#ifdef CONFIG_VIRTUAL_MEM_MAP -#include -EXPORT_SYMBOL(vmalloc_end); -EXPORT_SYMBOL(ia64_pfn_valid); -#endif - #include EXPORT_SYMBOL(cpu_info__per_cpu); EXPORT_SYMBOL(kernel_thread); @@ -168,13 +161,3 @@ EXPORT_SYMBOL(unw_access_br); EXPORT_SYMBOL(unw_access_fr); EXPORT_SYMBOL(unw_access_ar); EXPORT_SYMBOL(unw_access_pr); - -#ifdef CONFIG_SMP -#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4) -extern void ia64_spinlock_contention_pre3_4 (void); -EXPORT_SYMBOL(ia64_spinlock_contention_pre3_4); -#else -extern void ia64_spinlock_contention (void); -EXPORT_SYMBOL(ia64_spinlock_contention); -#endif -#endif diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index cb8d2c6..8ace5ae 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -581,8 +581,9 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, polarity, trigger); DBG("ISA: IRQ %u -> GSI 0x%x (%s,%s) -> CPU 0x%04x vector %d\n", - isa_irq, gsi, polarity == IOSAPIC_POL_HIGH ? "high" : "low", - trigger == IOSAPIC_EDGE ? "edge" : "level", dest, vector); + isa_irq, gsi, + polarity == IOSAPIC_POL_HIGH ? "high" : "low", trigger == IOSAPIC_EDGE ? "edge" : "level", + dest, vector); /* program the IOSAPIC routing table */ set_rte(vector, dest); @@ -634,6 +635,7 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base) (ver & 0xf0) >> 4, (ver & 0x0f), phys_addr, gsi_base, gsi_base + num_rte - 1); if ((gsi_base == 0) && pcat_compat) { + /* * Map the legacy ISA devices into the IOSAPIC data. Some of these may * get reprogrammed later on with data from the ACPI Interrupt Source @@ -644,11 +646,20 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base) } } -void -iosapic_enable_intr (unsigned int vector) +static void __init +fixup_vector (int vector, unsigned int gsi, const char *pci_id) { + struct hw_interrupt_type *irq_type = &irq_type_iosapic_level; + irq_desc_t *idesc; unsigned int dest; + idesc = irq_desc(vector); + if (idesc->handler != irq_type) { + if (idesc->handler != &no_irq_type) + printk(KERN_INFO "IOSAPIC: changing vector %d from %s to %s\n", + vector, idesc->handler->typename, irq_type->typename); + idesc->handler = irq_type; + } #ifdef CONFIG_SMP /* * For platforms that do not support interrupt redirect via the XTP interface, we @@ -676,12 +687,10 @@ iosapic_enable_intr (unsigned int vector) #endif set_rte(vector, dest); - printk(KERN_INFO "IOSAPIC: vector %d -> CPU 0x%04x, enabled\n", - vector, dest); + printk(KERN_INFO "IOSAPIC: %s -> GSI 0x%x -> CPU 0x%04x vector %d\n", + pci_id, gsi, dest, vector); } -#ifdef CONFIG_ACPI_PCI - void __init iosapic_parse_prt (void) { @@ -690,8 +699,6 @@ iosapic_parse_prt (void) unsigned int gsi; int vector; char pci_id[16]; - struct hw_interrupt_type *irq_type = &irq_type_iosapic_level; - irq_desc_t *idesc; list_for_each(node, &acpi_prt.entries) { entry = list_entry(node, struct acpi_prt_entry, node); @@ -704,9 +711,6 @@ iosapic_parse_prt (void) vector = gsi_to_vector(gsi); if (vector < 0) { - if (find_iosapic(gsi) < 0) - continue; - /* allocate a vector for this interrupt line */ if (pcat_compat && (gsi < 16)) vector = isa_irq_to_vector(gsi); @@ -714,22 +718,11 @@ iosapic_parse_prt (void) /* new GSI; allocate a vector for it */ vector = ia64_alloc_vector(); - register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, IOSAPIC_POL_LOW, - IOSAPIC_LEVEL); + register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, IOSAPIC_POL_LOW, IOSAPIC_LEVEL); } snprintf(pci_id, sizeof(pci_id), "%02x:%02x:%02x[%c]", entry->id.segment, entry->id.bus, entry->id.device, 'A' + entry->pin); - /* - * If vector was previously initialized to a different - * handler, re-initialize. - */ - idesc = irq_desc(vector); - if (idesc->handler != irq_type) - register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, IOSAPIC_POL_LOW, - IOSAPIC_LEVEL); - + fixup_vector(vector, gsi, pci_id); } } - -#endif /* CONFIG_ACPI */ diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 6ed0e52..e3b0678 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -32,7 +33,6 @@ #include #include #include -#include #include #include @@ -50,7 +50,7 @@ * Linux has a controller-independent x86 interrupt architecture. * every controller has a 'controller-template', that is used * by the main code to do the right thing. Each driver-visible - * interrupt source is transparently wired to the appropriate + * interrupt source is transparently wired to the apropriate * controller. Thus drivers need not be aware of the * interrupt-controller. * @@ -91,8 +91,7 @@ static void register_irq_proc (unsigned int irq); * Special irq handlers. */ -irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) -{ return IRQ_NONE; } +void no_action(int cpl, void *dev_id, struct pt_regs *regs) { } /* * Generic no controller code @@ -142,11 +141,9 @@ struct hw_interrupt_type no_irq_type = { }; atomic_t irq_err_count; -#ifdef CONFIG_X86_IO_APIC -#ifdef APIC_MISMATCH_DEBUG +#if defined(CONFIG_X86) && defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG) atomic_t irq_mis_count; #endif -#endif /* * Generic, controller-independent functions: @@ -181,10 +178,9 @@ int show_interrupts(struct seq_file *p, void *v) #endif seq_printf(p, " %14s", idesc->handler->typename); seq_printf(p, " %s", action->name); - for (action=action->next; action; action = action->next) seq_printf(p, ", %s", action->name); - + seq_putc(p, '\n'); skip: spin_unlock_irqrestore(&idesc->lock, flags); @@ -194,19 +190,17 @@ skip: if (cpu_online(j)) seq_printf(p, "%10u ", nmi_count(j)); seq_putc(p, '\n'); -#if CONFIG_X86_LOCAL_APIC +#if defined(CONFIG_SMP) && defined(CONFIG_X86) seq_puts(p, "LOC: "); for (j = 0; j < NR_CPUS; j++) if (cpu_online(j)) - seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs); + seq_printf(p, "%10u ", apic_timer_irqs[j]); seq_putc(p, '\n'); #endif seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); -#ifdef CONFIG_X86_IO_APIC -#ifdef APIC_MISMATCH_DEBUG +#if defined(CONFIG_X86) && defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG) seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); #endif -#endif return 0; } @@ -225,46 +219,21 @@ inline void synchronize_irq(unsigned int irq) * waste of time and is not what some drivers would * prefer. */ -int handle_IRQ_event(unsigned int irq, - struct pt_regs *regs, struct irqaction *action) +int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action) { int status = 1; /* Force the "do bottom halves" bit */ - int retval = 0; - struct irqaction *first_action = action; if (!(action->flags & SA_INTERRUPT)) local_irq_enable(); do { status |= action->flags; - retval |= action->handler(irq, action->dev_id, regs); + action->handler(irq, action->dev_id, regs); action = action->next; } while (action); if (status & SA_SAMPLE_RANDOM) add_interrupt_randomness(irq); local_irq_disable(); - if (retval != 1) { - static int count = 100; - if (count) { - count--; - if (retval) { - printk("irq event %d: bogus retval mask %x\n", - irq, retval); - } else { - printk("irq %d: nobody cared!\n", irq); - } - dump_stack(); - printk("handlers:\n"); - action = first_action; - do { - printk("[<%p>]", action->handler); - print_symbol(" (%s)", - (unsigned long)action->handler); - printk("\n"); - action = action->next; - } while (action); - } - } return status; } @@ -486,7 +455,7 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs) */ int request_irq(unsigned int irq, - irqreturn_t (*handler)(int, void *, struct pt_regs *), + void (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char * devname, void *dev_id) @@ -513,7 +482,7 @@ int request_irq(unsigned int irq, return -EINVAL; action = (struct irqaction *) - kmalloc(sizeof(struct irqaction), GFP_ATOMIC); + kmalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; @@ -542,7 +511,10 @@ int request_irq(unsigned int irq, * does not return until any executing interrupts for this IRQ * have completed. * - * This function must not be called from interrupt context. + * This function may be called from interrupt context. + * + * Bugs: Attempting to free an irq in a handler for the same irq hangs + * the machine. */ void free_irq(unsigned int irq, void *dev_id) @@ -573,8 +545,11 @@ void free_irq(unsigned int irq, void *dev_id) } spin_unlock_irqrestore(&desc->lock,flags); +#ifdef CONFIG_SMP /* Wait to make sure it's not being used on another CPU */ - synchronize_irq(irq); + while (desc->status & IRQ_INPROGRESS) + synchronize_irq(irq); +#endif kfree(action); return; } @@ -689,6 +664,7 @@ unsigned long probe_irq_on(void) * only return ISA irq numbers - just so that we reset them * all to a known state. */ + unsigned int probe_irq_mask(unsigned long val) { int i; @@ -729,7 +705,7 @@ unsigned int probe_irq_mask(unsigned long val) * The interrupt probe logic state is returned to its previous * value. * - * BUGS: When used in a module (which arguably shouldn't happen) + * BUGS: When used in a module (which arguably shouldnt happen) * nothing prevents two IRQ probe callers from overlapping. The * results of this are non-optimal. */ @@ -772,8 +748,6 @@ int setup_irq(unsigned int irq, struct irqaction * new) struct irqaction *old, **p; irq_desc_t *desc = irq_desc(irq); - if (desc->handler == &no_irq_type) - return -ENOSYS; /* * Some drivers like serial.c use request_irq() heavily, * so we have to be careful not to interfere with a @@ -834,11 +808,11 @@ static struct proc_dir_entry * irq_dir [NR_IRQS]; #define HEX_DIGITS 8 -static unsigned int parse_hex_value (const char *buffer, - unsigned long count, unsigned long *ret) +static int parse_hex_value (const char *buffer, unsigned long count, unsigned long *ret) { unsigned char hexnum [HEX_DIGITS]; - unsigned long value, i; + unsigned long value; + int i; if (!count) return -EINVAL; @@ -976,13 +950,12 @@ static void register_irq_proc (unsigned int irq) #if CONFIG_SMP { struct proc_dir_entry *entry; - /* create /proc/irq/1234/smp_affinity */ entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); if (entry) { entry->nlink = 1; - entry->data = (void *)(long)irq; + entry->data = (void *)(unsigned long)irq; entry->read_proc = irq_affinity_read_proc; entry->write_proc = irq_affinity_write_proc; } diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 49c6572..d158a7c 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -145,7 +145,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) } #ifdef CONFIG_SMP -extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs); +extern void handle_IPI (int irq, void *dev_id, struct pt_regs *regs); static struct irqaction ipi_irqaction = { .handler = handle_IPI, diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c index 931c69c..7d524be 100644 --- a/arch/ia64/kernel/machvec.c +++ b/arch/ia64/kernel/machvec.c @@ -1,14 +1,12 @@ #include -#include - #ifdef CONFIG_IA64_GENERIC #include #include -#include #include +#include struct ia64_machine_vector ia64_mv; @@ -45,9 +43,3 @@ void machvec_noop (void) { } - -void -machvec_memory_fence (void) -{ - mb(); -} diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 88148bc..5808798 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -3,9 +3,6 @@ * Purpose: Generic MCA handling layer * * Updated for latest kernel - * Copyright (C) 2003 Hewlett-Packard Co - * David Mosberger-Tang - * * Copyright (C) 2002 Dell Computer Corporation * Copyright (C) Matt Domsch (Matt_Domsch@dell.com) * @@ -21,7 +18,6 @@ * Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) Vijay Chander(vijay@engr.sgi.com) * - * 03/04/15 D. Mosberger Added INIT backtrace support. * 02/03/25 M. Domsch GUID cleanups * * 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU @@ -43,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -52,7 +47,6 @@ #include #include -#include #include #include #include @@ -145,7 +139,7 @@ ia64_mca_log_sal_error_record(int sal_info_type, int called_from_init) /* Get the MCA error record */ if (!ia64_log_get(sal_info_type, (prfunc_t)printk)) - return platform_err; /* no record retrieved */ + return platform_err; // no record retrieved /* TODO: * 1. analyze error logs to determine recoverability @@ -172,7 +166,7 @@ mca_handler_platform (void) } -irqreturn_t +void ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) { IA64_MCA_DEBUG("ia64_mca_cpe_int_handler: received interrupt. CPU:%d vector = %#x\n", @@ -180,190 +174,20 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) /* Get the CMC error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE, 0); - return IRQ_HANDLED; -} - -static void -show_min_state (pal_min_state_area_t *minstate) -{ - u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri; - u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri; - - printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits); - printk("pr\t\t%016lx\n", minstate->pmsa_pr); - printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0); - printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc); - printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip); - printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr); - printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs); - printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip); - printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr); - printk("xfs\t\t%016lx\n", minstate->pmsa_xfs); - printk("b1\t\t%016lx ", minstate->pmsa_br1); - print_symbol("%s\n", minstate->pmsa_br1); - - printk("\nstatic registers r0-r15:\n"); - printk(" r0- 3 %016lx %016lx %016lx %016lx\n", - 0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]); - printk(" r4- 7 %016lx %016lx %016lx %016lx\n", - minstate->pmsa_gr[3], minstate->pmsa_gr[4], - minstate->pmsa_gr[5], minstate->pmsa_gr[6]); - printk(" r8-11 %016lx %016lx %016lx %016lx\n", - minstate->pmsa_gr[7], minstate->pmsa_gr[8], - minstate->pmsa_gr[9], minstate->pmsa_gr[10]); - printk("r12-15 %016lx %016lx %016lx %016lx\n", - minstate->pmsa_gr[11], minstate->pmsa_gr[12], - minstate->pmsa_gr[13], minstate->pmsa_gr[14]); - - printk("\nbank 0:\n"); - printk("r16-19 %016lx %016lx %016lx %016lx\n", - minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1], - minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]); - printk("r20-23 %016lx %016lx %016lx %016lx\n", - minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5], - minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]); - printk("r24-27 %016lx %016lx %016lx %016lx\n", - minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9], - minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]); - printk("r28-31 %016lx %016lx %016lx %016lx\n", - minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13], - minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]); - - printk("\nbank 1:\n"); - printk("r16-19 %016lx %016lx %016lx %016lx\n", - minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1], - minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]); - printk("r20-23 %016lx %016lx %016lx %016lx\n", - minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5], - minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]); - printk("r24-27 %016lx %016lx %016lx %016lx\n", - minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9], - minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]); - printk("r28-31 %016lx %016lx %016lx %016lx\n", - minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13], - minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]); -} - -static void -fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw) -{ - u64 *dst_banked, *src_banked, bit, shift, nat_bits; - int i; - - /* - * First, update the pt-regs and switch-stack structures with the contents stored - * in the min-state area: - */ - if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) { - pt->cr_ipsr = ms->pmsa_xpsr; - pt->cr_iip = ms->pmsa_xip; - pt->cr_ifs = ms->pmsa_xfs; - } else { - pt->cr_ipsr = ms->pmsa_ipsr; - pt->cr_iip = ms->pmsa_iip; - pt->cr_ifs = ms->pmsa_ifs; - } - pt->ar_rsc = ms->pmsa_rsc; - pt->pr = ms->pmsa_pr; - pt->r1 = ms->pmsa_gr[0]; - pt->r2 = ms->pmsa_gr[1]; - pt->r3 = ms->pmsa_gr[2]; - sw->r4 = ms->pmsa_gr[3]; - sw->r5 = ms->pmsa_gr[4]; - sw->r6 = ms->pmsa_gr[5]; - sw->r7 = ms->pmsa_gr[6]; - pt->r8 = ms->pmsa_gr[7]; - pt->r9 = ms->pmsa_gr[8]; - pt->r10 = ms->pmsa_gr[9]; - pt->r11 = ms->pmsa_gr[10]; - pt->r12 = ms->pmsa_gr[11]; - pt->r13 = ms->pmsa_gr[12]; - pt->r14 = ms->pmsa_gr[13]; - pt->r15 = ms->pmsa_gr[14]; - dst_banked = &pt->r16; /* r16-r31 are contiguous in struct pt_regs */ - src_banked = ms->pmsa_bank1_gr; - for (i = 0; i < 16; ++i) - dst_banked[i] = src_banked[i]; - pt->b0 = ms->pmsa_br0; - sw->b1 = ms->pmsa_br1; - - /* construct the NaT bits for the pt-regs structure: */ -# define PUT_NAT_BIT(dst, addr) \ - do { \ - bit = nat_bits & 1; nat_bits >>= 1; \ - shift = ((unsigned long) addr >> 3) & 0x3f; \ - dst = ((dst) & ~(1UL << shift)) | (bit << shift); \ - } while (0) - - /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */ - shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f; - nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift)); - - PUT_NAT_BIT(sw->caller_unat, &pt->r1); - PUT_NAT_BIT(sw->caller_unat, &pt->r2); - PUT_NAT_BIT(sw->caller_unat, &pt->r3); - PUT_NAT_BIT(sw->ar_unat, &sw->r4); - PUT_NAT_BIT(sw->ar_unat, &sw->r5); - PUT_NAT_BIT(sw->ar_unat, &sw->r6); - PUT_NAT_BIT(sw->ar_unat, &sw->r7); - PUT_NAT_BIT(sw->caller_unat, &pt->r8); PUT_NAT_BIT(sw->caller_unat, &pt->r9); - PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11); - PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13); - PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15); - nat_bits >>= 16; /* skip over bank0 NaT bits */ - PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17); - PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19); - PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21); - PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23); - PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25); - PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27); - PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29); - PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31); } +/* + * This routine will be used to deal with platform specific handling + * of the init, i.e. drop into the kernel debugger on server machine, + * or if the processor is part of some parallel machine without a + * console, then we would call the appropriate debug hooks here. + */ void -init_handler_platform (sal_log_processor_info_t *proc_ptr, - struct pt_regs *pt, struct switch_stack *sw) +init_handler_platform (struct pt_regs *regs) { - struct unw_frame_info info; - /* if a kernel debugger is available call it here else just dump the registers */ - /* - * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be - * generated via the BMC's command-line interface, but since the console is on the - * same serial line, the user will need some time to switch out of the BMC before - * the dump begins. - */ - printk("Delaying for 5 seconds...\n"); - udelay(5*1000000); - show_min_state(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area); - - printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm); - fetch_min_state(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area, pt, sw); - unw_init_from_interruption(&info, current, pt, sw); - ia64_do_show_stack(&info, NULL); - -#ifdef CONFIG_SMP - if (!tasklist_lock.write_lock) -#endif - read_lock(&tasklist_lock); - { - struct task_struct *g, *t; - do_each_thread (g, t) { - if (t == current) - continue; - - printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); - show_stack(t); - } while_each_thread (g, t); - } -#ifdef CONFIG_SMP - if (!tasklist_lock.write_lock) -#endif - read_unlock(&tasklist_lock); - - printk("\nINIT dump complete. Please reboot now.\n"); + show_regs(regs); /* dump the state info */ while (1); /* hang city if no debugger */ } @@ -439,6 +263,7 @@ ia64_mca_register_cpev (int cpev) /* * routine to process and prepare to dump min_state_save * information for debugging purposes. + * */ void ia64_process_min_state_save (pal_min_state_area_t *pmss) @@ -447,6 +272,8 @@ ia64_process_min_state_save (pal_min_state_area_t *pmss) u64 *tpmss_ptr = (u64 *)pmss; u64 *return_min_state_ptr = ia64_mca_min_state_save_info; + /* dump out the min_state_area information */ + for (i=0;i>=1; } p += sprintf(p, "\n\tLoad hints : "); for(k=0; k < 8; k++ ) { - if (cci.pcci_ld_hints & 0x1) - p += sprintf(p, "[%s]", cache_ld_hints[k]); + if ( cci.pcci_ld_hints & 0x1) p += sprintf(p, "[%s]", cache_ld_hints[k]); cci.pcci_ld_hints >>=1; } - p += sprintf(p, - "\n\tAlias boundary : %d byte(s)\n" - "\tTag LSB : %d\n" - "\tTag MSB : %d\n", - 1<0 ; j--) { @@ -380,14 +379,15 @@ vm_info(char *page) continue; } - p += sprintf(p, - "\n%s Translation Cache Level %d:\n" - "\tHash sets : %d\n" - "\tAssociativity : %d\n" - "\tNumber of entries : %d\n" - "\tFlags : ", - cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets, - tc_info.tc_associativity, tc_info.tc_num_entries); + p += sprintf(p, "\n%s Translation Cache Level %d:\n" \ + "\tHash sets : %d\n" \ + "\tAssociativity : %d\n" \ + "\tNumber of entries : %d\n" \ + "\tFlags : ", + cache_types[j+tc_info.tc_unified], i+1, + tc_info.tc_num_sets, + tc_info.tc_associativity, + tc_info.tc_num_entries); if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized "); if (tc_info.tc_unified) p += sprintf(p, "Unified "); @@ -436,18 +436,17 @@ register_info(char *page) if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0; - p += sprintf(p, - "RSE stacked physical registers : %ld\n" - "RSE load/store hints : %ld (%s)\n", - phys_stacked, hints.ph_data, - hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(\?\?)"); + p += sprintf(p, "RSE stacked physical registers : %ld\n" \ + "RSE load/store hints : %ld (%s)\n", + phys_stacked, + hints.ph_data, + hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(\?\?)"); - if (ia64_pal_debug_info(&iregs, &dregs)) - return 0; + if (ia64_pal_debug_info(&iregs, &dregs)) return 0; - p += sprintf(p, - "Instruction debug register pairs : %ld\n" - "Data debug register pairs : %ld\n", iregs, dregs); + p += sprintf(p, "Instruction debug register pairs : %ld\n" \ + "Data debug register pairs : %ld\n", + iregs, dregs); return p - page; } @@ -564,21 +563,26 @@ version_info(char *page) */ if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0; - p += sprintf(p, - "PAL_vendor : 0x%02x (min=0x%02x)\n" - "PAL_A : %x.%x.%x (min=%x.%x.%x)\n" - "PAL_B : %x.%x.%x (min=%x.%x.%x)\n", - cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor, - - cur_ver.pal_version_s.pv_pal_a_model>>4, - cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev, - min_ver.pal_version_s.pv_pal_a_model>>4, - min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev, - - cur_ver.pal_version_s.pv_pal_b_model>>4, - cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev, - min_ver.pal_version_s.pv_pal_b_model>>4, - min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev); + p += sprintf(p, "PAL_vendor : 0x%02x (min=0x%02x)\n" \ + "PAL_A : %x.%x.%x (min=%x.%x.%x)\n" \ + "PAL_B : %x.%x.%x (min=%x.%x.%x)\n", + cur_ver.pal_version_s.pv_pal_vendor, + min_ver.pal_version_s.pv_pal_vendor, + + cur_ver.pal_version_s.pv_pal_a_model>>4, + cur_ver.pal_version_s.pv_pal_a_model&0xf, + cur_ver.pal_version_s.pv_pal_a_rev, + min_ver.pal_version_s.pv_pal_a_model>>4, + min_ver.pal_version_s.pv_pal_a_model&0xf, + min_ver.pal_version_s.pv_pal_a_rev, + + cur_ver.pal_version_s.pv_pal_b_model>>4, + cur_ver.pal_version_s.pv_pal_b_model&0xf, + cur_ver.pal_version_s.pv_pal_b_rev, + min_ver.pal_version_s.pv_pal_b_model>>4, + min_ver.pal_version_s.pv_pal_b_model&0xf, + min_ver.pal_version_s.pv_pal_b_rev); + return p - page; } @@ -591,20 +595,26 @@ perfmon_info(char *page) if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0; - p += sprintf(p, - "PMC/PMD pairs : %d\n" - "Counter width : %d bits\n" - "Cycle event number : %d\n" - "Retired event number : %d\n" - "Implemented PMC : ", - pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.width, - pm_info.pal_perf_mon_info_s.cycles, pm_info.pal_perf_mon_info_s.retired); + p += sprintf(p, "PMC/PMD pairs : %d\n" \ + "Counter width : %d bits\n" \ + "Cycle event number : %d\n" \ + "Retired event number : %d\n" \ + "Implemented PMC : ", + pm_info.pal_perf_mon_info_s.generic, + pm_info.pal_perf_mon_info_s.width, + pm_info.pal_perf_mon_info_s.cycles, + pm_info.pal_perf_mon_info_s.retired); p = bitregister_process(p, pm_buffer, 256); + p += sprintf(p, "\nImplemented PMD : "); + p = bitregister_process(p, pm_buffer+4, 256); + p += sprintf(p, "\nCycles count capable : "); + p = bitregister_process(p, pm_buffer+8, 256); + p += sprintf(p, "\nRetired bundles count capable : "); #ifdef CONFIG_ITANIUM @@ -636,11 +646,12 @@ frequency_info(char *page) if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0; - p += sprintf(p, - "Processor/Clock ratio : %ld/%ld\n" - "Bus/Clock ratio : %ld/%ld\n" - "ITC/Clock ratio : %ld/%ld\n", - proc.num, proc.den, bus.num, bus.den, itc.num, itc.den); + p += sprintf(p, "Processor/Clock ratio : %ld/%ld\n" \ + "Bus/Clock ratio : %ld/%ld\n" \ + "ITC/Clock ratio : %ld/%ld\n", + proc.num, proc.den, + bus.num, bus.den, + itc.num, itc.den); return p - page; } @@ -654,7 +665,7 @@ tr_info(char *page) u64 tr_buffer[4]; pal_vm_info_1_u_t vm_info_1; pal_vm_info_2_u_t vm_info_2; - u64 i, j; + int i, j; u64 max[3], pgm; struct ifa_reg { u64 valid:1; @@ -700,7 +711,7 @@ tr_info(char *page) status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid); if (status != 0) { - printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n", + printk(KERN_ERR "palinfo: pal call failed on tr[%d:%d]=%ld\n", i, j, status); continue; } @@ -714,29 +725,34 @@ tr_info(char *page) rid_reg = (struct rid_reg *)&tr_buffer[3]; pgm = -1 << (itir_reg->ps - 12); - p += sprintf(p, - "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n" - "\tppn : 0x%lx\n" - "\tvpn : 0x%lx\n" - "\tps : ", - "ID"[i], j, - tr_valid.pal_tr_valid_s.access_rights_valid, - tr_valid.pal_tr_valid_s.priv_level_valid, - tr_valid.pal_tr_valid_s.dirty_bit_valid, - tr_valid.pal_tr_valid_s.mem_attr_valid, - (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12); + p += sprintf(p, "%cTR%d: av=%d pv=%d dv=%d mv=%d\n" \ + "\tppn : 0x%lx\n" \ + "\tvpn : 0x%lx\n" \ + "\tps : ", + + "ID"[i], + j, + tr_valid.pal_tr_valid_s.access_rights_valid, + tr_valid.pal_tr_valid_s.priv_level_valid, + tr_valid.pal_tr_valid_s.dirty_bit_valid, + tr_valid.pal_tr_valid_s.mem_attr_valid, + (gr_reg->ppn & pgm)<< 12, + (ifa_reg->vpn & pgm)<< 12); p = bitvector_process(p, 1<< itir_reg->ps); - p += sprintf(p, - "\n\tpl : %d\n" - "\tar : %d\n" - "\trid : %x\n" - "\tp : %d\n" - "\tma : %d\n" - "\td : %d\n", - gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma, - gr_reg->d); + p += sprintf(p, "\n\tpl : %d\n" \ + "\tar : %d\n" \ + "\trid : %x\n" \ + "\tp : %d\n" \ + "\tma : %d\n" \ + "\td : %d\n", + gr_reg->pl, + gr_reg->ar, + rid_reg->rid, + gr_reg->p, + gr_reg->ma, + gr_reg->d); } } return p - page; @@ -760,7 +776,7 @@ static palinfo_entry_t palinfo_entries[]={ { "tr_info", tr_info, } }; -#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) +#define NR_PALINFO_ENTRIES (sizeof(palinfo_entries)/sizeof(palinfo_entry_t)) /* * this array is used to keep track of the proc entries we create. This is diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index fd0f90a..7134dfd 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2,7 +2,7 @@ * This file implements the perfmon subsystem which is used * to program the IA-64 Performance Monitoring Unit (PMU). * - * Originally Written by Ganesh Venkitachalam, IBM Corp. + * Originaly Written by Ganesh Venkitachalam, IBM Corp. * Copyright (C) 1999 Ganesh Venkitachalam * * Modifications by Stephane Eranian, Hewlett-Packard Co. @@ -224,9 +224,8 @@ typedef struct { unsigned int protected:1; /* allow access to creator of context only */ unsigned int using_dbreg:1; /* using range restrictions (debug registers) */ unsigned int excl_idle:1; /* exclude idle task in system wide session */ - unsigned int unsecure:1; /* sp = 0 for non self-monitored task */ unsigned int trap_reason:2; /* reason for going into pfm_block_ovfl_reset() */ - unsigned int reserved:20; + unsigned int reserved:21; } pfm_context_flags_t; #define PFM_TRAP_REASON_NONE 0x0 /* default value */ @@ -279,7 +278,6 @@ typedef struct pfm_context { #define ctx_fl_using_dbreg ctx_flags.using_dbreg #define ctx_fl_excl_idle ctx_flags.excl_idle #define ctx_fl_trap_reason ctx_flags.trap_reason -#define ctx_fl_unsecure ctx_flags.unsecure /* * global information about all sessions @@ -364,9 +362,8 @@ typedef struct { #define PFM_CMD_IDX(cmd) (cmd) -#define PFM_CMD_IS_VALID(cmd) ((PFM_CMD_IDX(cmd) >= 0) \ - && (PFM_CMD_IDX(cmd) < (int) PFM_CMD_COUNT) \ - && pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_func != NULL) +#define PFM_CMD_IS_VALID(cmd) ((PFM_CMD_IDX(cmd) >= 0) && (PFM_CMD_IDX(cmd) < PFM_CMD_COUNT) \ + && pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_func != NULL) #define PFM_CMD_USE_PID(cmd) ((pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_PID) != 0) #define PFM_CMD_READ_ARG(cmd) ((pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_ARG_READ) != 0) @@ -649,7 +646,7 @@ pfm_vm_close(struct vm_area_struct *vma) /* * This function is called from pfm_destroy_context() and also from pfm_inherit() - * to explicitly remove the sampling buffer mapping from the user level address space. + * to explicitely remove the sampling buffer mapping from the user level address space. */ static int pfm_remove_smpl_mapping(struct task_struct *task) @@ -727,7 +724,8 @@ pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long ad static unsigned long pfm_smpl_entry_size(unsigned long *which, unsigned long size) { - unsigned long i, res = 0; + unsigned long res = 0; + int i; for (i=0; i < size; i++, which++) res += hweight64(*which); @@ -1078,15 +1076,10 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx) * and it must be a valid CPU */ cpu = ffz(~pfx->ctx_cpu_mask); -#ifdef CONFIG_SMP if (cpu_online(cpu) == 0) { -#else - if (cpu != 0) { -#endif DBprintk(("CPU%d is not online\n", cpu)); return -EINVAL; } - /* * check for pre-existing pinning, if conflicting reject */ @@ -1232,7 +1225,6 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0; ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0; ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0; - ctx->ctx_fl_unsecure = (ctx_flags & PFM_FL_UNSECURE) ? 1: 0; ctx->ctx_fl_frozen = 0; ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; @@ -1259,11 +1251,9 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int DBprintk(("context=%p, pid=%d notify_task=%p\n", (void *)ctx, task->pid, ctx->ctx_notify_task)); - DBprintk(("context=%p, pid=%d flags=0x%x inherit=%d block=%d system=%d excl_idle=%d unsecure=%d\n", + DBprintk(("context=%p, pid=%d flags=0x%x inherit=%d block=%d system=%d excl_idle=%d\n", (void *)ctx, task->pid, ctx_flags, ctx->ctx_fl_inherit, - ctx->ctx_fl_block, ctx->ctx_fl_system, - ctx->ctx_fl_excl_idle, - ctx->ctx_fl_unsecure)); + ctx->ctx_fl_block, ctx->ctx_fl_system, ctx->ctx_fl_excl_idle)); /* * when no notification is required, we can make this visible at the last moment @@ -1669,7 +1659,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count if (!PMD_IS_IMPL(cnum)) goto abort_mission; /* * we can only read the register that we use. That includes - * the one we explicitly initialize AND the one we want included + * the one we explicitely initialize AND the one we want included * in the sampling buffer (smpl_regs). * * Having this restriction allows optimization in the ctxsw routine @@ -1881,7 +1871,7 @@ pfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, * if blocking, then post the semaphore. * if non-blocking, then we ensure that the task will go into * pfm_overflow_must_block() before returning to user mode. - * We cannot explicitly reset another task, it MUST always + * We cannot explicitely reset another task, it MUST always * be done by the task itself. This works for system wide because * the tool that is controlling the session is doing "self-monitoring". * @@ -1892,10 +1882,7 @@ pfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, DBprintk(("unblocking %d \n", task->pid)); up(sem); } else { - struct thread_info *info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE); task->thread.pfm_ovfl_block_reset = 1; - ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; - set_bit(TIF_NOTIFY_RESUME, &info->flags); } #if 0 /* @@ -2064,7 +2051,7 @@ pfm_protect_context(struct task_struct *task, pfm_context_t *ctx, void *arg, int /* * reinforce secure monitoring: cannot toggle psr.up */ - if (ctx->ctx_fl_unsecure == 0) ia64_psr(regs)->sp = 1; + ia64_psr(regs)->sp = 1; return 0; } @@ -2172,11 +2159,11 @@ pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, stru * never leaves the current CPU and the state * is shared by all processes running on it */ - for (i=0; i < (int) pmu_conf.num_ibrs; i++) { + for (i=0; i < pmu_conf.num_ibrs; i++) { ia64_set_ibr(i, 0UL); } ia64_srlz_i(); - for (i=0; i < (int) pmu_conf.num_dbrs; i++) { + for (i=0; i < pmu_conf.num_dbrs; i++) { ia64_set_dbr(i, 0UL); } ia64_srlz_d(); @@ -2518,7 +2505,7 @@ static pfm_cmd_desc_t pfm_cmd_tab[]={ /* 33 */{ pfm_write_dbrs, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, sizeof(pfarg_dbreg_t)} #endif }; -#define PFM_CMD_COUNT ARRAY_SIZE(pfm_cmd_tab) +#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t)) static int check_task_state(struct task_struct *task) @@ -2745,13 +2732,12 @@ pfm_ovfl_block_reset(void) * again */ th->pfm_ovfl_block_reset = 0; - clear_thread_flag(TIF_NOTIFY_RESUME); /* * do some sanity checks first */ if (!ctx) { - printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid); + printk(KERN_DEBUG "perfmon: [%d] has no PFM context\n", current->pid); return; } /* @@ -2913,18 +2899,15 @@ pfm_record_sample(struct task_struct *task, pfm_context_t *ctx, unsigned long ov /* * main overflow processing routine. - * it can be called from the interrupt path or explicitly during the context switch code - * Arguments: - * mode: 0=coming from PMU interrupt, 1=coming from ctxsw - * + * it can be called from the interrupt path or explicitely during the context switch code * Return: * new value of pmc[0]. if 0x0 then unfreeze, else keep frozen */ static unsigned long -pfm_overflow_handler(int mode, struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs) +pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs) { - struct thread_struct *t; unsigned long mask; + struct thread_struct *t; unsigned long old_val; unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL; int i; @@ -3015,10 +2998,10 @@ pfm_overflow_handler(int mode, struct task_struct *task, pfm_context_t *ctx, u64 /* * check for sampling buffer * - * if present, record sample only when a 64-bit counter has overflowed. - * We propagate notification ONLY when buffer becomes full. + * if present, record sample. We propagate notification ONLY when buffer + * becomes full. */ - if(CTX_HAS_SMPL(ctx) && ovfl_pmds) { + if(CTX_HAS_SMPL(ctx)) { ret = pfm_record_sample(task, ctx, ovfl_pmds, regs); if (ret == 1) { /* @@ -3063,55 +3046,12 @@ pfm_overflow_handler(int mode, struct task_struct *task, pfm_context_t *ctx, u64 * ctx_notify_task could already be NULL, checked in pfm_notify_user() */ if (CTX_OVFL_NOBLOCK(ctx) == 0 && ctx->ctx_notify_task != task) { + t->pfm_ovfl_block_reset = 1; /* will cause blocking */ ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCKSIG; } else { + t->pfm_ovfl_block_reset = 1; /* will cause blocking */ ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_SIG; } - /* - * we cannot block in system wide mode and we do not go - * through the PMU ctxsw code. Therefore we can generate - * the notification here. In system wide mode, the current - * task maybe different from the task controlling the session - * on this CPU, therefore owner can be different from current. - * - * In per-process mode, this function gets called from - * the interrupt handler or pfm_load_regs(). The mode argument - * tells where we are coming from. When coming from the interrupt - * handler, it is safe to notify (send signal) right here because - * we do not hold any runqueue locks needed by send_sig_info(). - * - * However when coming from ctxsw, we cannot send the signal here. - * It must be deferred until we are sure we do not hold any runqueue - * related locks. The current task maybe different from the owner - * only in UP mode. The deferral is implemented using the - * TIF_NOTIFY_RESUME mechanism. In this case, the pending work - * is checked when the task is about to leave the kernel (see - * entry.S). As of this version of perfmon, a kernel only - * task cannot be monitored in per-process mode. Therefore, - * when this function gets called from pfm_load_regs(), we know - * we have a user level task which will eventually either exit - * or leave the kernel, and thereby go through the checkpoint - * for TIF_*. - */ - if (ctx->ctx_fl_system || mode == 0) { - pfm_notify_user(ctx); - ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; - } else { - struct thread_info *info; - - /* - * given that TIF_NOTIFY_RESUME is not specific to - * perfmon, we need to have a second level check to - * verify the source of the notification. - */ - task->thread.pfm_ovfl_block_reset = 1; - /* - * when coming from ctxsw, current still points to the - * previous task, therefore we must work with task and not current. - */ - info = ((struct thread_info *) ((char *) task + IA64_TASK_SIZE)); - set_bit(TIF_NOTIFY_RESUME, &info->flags); - } /* * keep the PMU frozen until either pfm_restart() or @@ -3119,10 +3059,7 @@ pfm_overflow_handler(int mode, struct task_struct *task, pfm_context_t *ctx, u64 */ ctx->ctx_fl_frozen = 1; - DBprintk_ovfl(("current [%d] owner [%d] mode=%d return pmc0=0x%x must_block=%ld reason=%d\n", - current->pid, - PMU_OWNER() ? PMU_OWNER()->pid : -1, - mode, + DBprintk_ovfl(("return pmc0=0x%x must_block=%ld reason=%d\n", ctx->ctx_fl_frozen ? 0x1 : 0x0, t->pfm_ovfl_block_reset, ctx->ctx_fl_trap_reason)); @@ -3131,7 +3068,7 @@ pfm_overflow_handler(int mode, struct task_struct *task, pfm_context_t *ctx, u64 return 0x1UL; } -static irqreturn_t +static void pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) { u64 pmc0; @@ -3146,7 +3083,7 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) if (pfm_alternate_intr_handler) { (*pfm_alternate_intr_handler->handler)(irq, arg, regs); put_cpu(); - return IRQ_HANDLED; + return; } /* @@ -3171,21 +3108,19 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) printk(KERN_DEBUG "perfmon: Spurious overflow interrupt: process %d has " "no PFM context\n", task->pid); put_cpu(); - return IRQ_HANDLED; + return; } /* * assume PMC[0].fr = 1 at this point */ - pmc0 = pfm_overflow_handler(0, task, ctx, pmc0, regs); + pmc0 = pfm_overflow_handler(task, ctx, pmc0, regs); /* * we can only update pmc0 when the overflow - * is for the current context or we are in system - * wide mode. In UP (per-task) the current - * task may not be the one owning the PMU, - * same thing for system-wide. + * is for the current context. In UP the current + * task may not be the one owning the PMU */ - if (task == current || ctx->ctx_fl_system) { + if (task == current) { /* * We always clear the overflow status bits and either unfreeze * or keep the PMU frozen. @@ -3199,7 +3134,6 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) pfm_stats[smp_processor_id()].pfm_spurious_ovfl_intr_count++; } put_cpu_no_resched(); - return IRQ_HANDLED; } /* for debug only */ @@ -3453,11 +3387,11 @@ pfm_load_regs (struct task_struct *task) * in the next version of perfmon. */ if (ctx->ctx_fl_using_dbreg) { - for (i=0; i < (int) pmu_conf.num_ibrs; i++) { + for (i=0; i < pmu_conf.num_ibrs; i++) { ia64_set_ibr(i, t->ibr[i]); } ia64_srlz_i(); - for (i=0; i < (int) pmu_conf.num_dbrs; i++) { + for (i=0; i < pmu_conf.num_dbrs; i++) { ia64_set_dbr(i, t->dbr[i]); } ia64_srlz_d(); @@ -3468,7 +3402,7 @@ pfm_load_regs (struct task_struct *task) * this path cannot be used in SMP */ if (owner == task) { - if ((unsigned int) atomic_read(&ctx->ctx_last_cpu) != smp_processor_id()) + if (atomic_read(&ctx->ctx_last_cpu) != smp_processor_id()) DBprintk(("invalid last_cpu=%d for [%d]\n", atomic_read(&ctx->ctx_last_cpu), task->pid)); @@ -3520,7 +3454,7 @@ pfm_load_regs (struct task_struct *task) * Side effect on ctx_fl_frozen is possible. */ if (t->pmc[0] & ~0x1) { - t->pmc[0] = pfm_overflow_handler(1, task, ctx, t->pmc[0], NULL); + t->pmc[0] = pfm_overflow_handler(task, ctx, t->pmc[0], NULL); } /* @@ -3742,7 +3676,7 @@ pfm_flush_regs (struct task_struct *task) * */ - if ((unsigned int) atomic_read(&ctx->ctx_last_cpu) != smp_processor_id()) + if (atomic_read(&ctx->ctx_last_cpu) != smp_processor_id()) printk(KERN_DEBUG "perfmon: [%d] last_cpu=%d\n", task->pid, atomic_read(&ctx->ctx_last_cpu)); @@ -3820,20 +3754,16 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs) preempt_disable(); /* - * for secure sessions, make sure child cannot mess up - * the monitoring session. + * make sure child cannot mess up the monitoring session */ - if (ctx->ctx_fl_unsecure == 0) { - ia64_psr(regs)->sp = 1; - DBprintk(("enabling psr.sp for [%d]\n", task->pid)); - } else { - DBprintk(("psr.sp=%d [%d]\n", ia64_psr(regs)->sp, task->pid)); - } + ia64_psr(regs)->sp = 1; + DBprintk(("enabling psr.sp for [%d]\n", task->pid)); + /* * if there was a virtual mapping for the sampling buffer * the mapping is NOT inherited across fork() (see VM_DONTCOPY), - * so we don't have to explicitly remove it here. + * so we don't have to explicitely remove it here. * * * Part of the clearing of fields is also done in diff --git a/arch/ia64/kernel/perfmon_mckinley.h b/arch/ia64/kernel/perfmon_mckinley.h index a02cd6c..c4e8a12 100644 --- a/arch/ia64/kernel/perfmon_mckinley.h +++ b/arch/ia64/kernel/perfmon_mckinley.h @@ -25,8 +25,8 @@ static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={ /* pmc5 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_reserved, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc6 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_reserved, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc7 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_reserved, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, -/* pmc8 */ { PFM_REG_CONFIG , 0, 0xffffffff3fffffffUL, 0xffffffff3fffffffUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, -/* pmc9 */ { PFM_REG_CONFIG , 0, 0xffffffff3ffffffcUL, 0xffffffff3fffffffUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, +/* pmc8 */ { PFM_REG_CONFIG , 0, 0xffffffff3fffffffUL, 0xffffffff9fffffffUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, +/* pmc9 */ { PFM_REG_CONFIG , 0, 0xffffffff3ffffffcUL, 0xffffffff9fffffffUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc10 */ { PFM_REG_MONITOR , 4, 0x0UL, 0xffffUL, NULL, pfm_mck_reserved, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc11 */ { PFM_REG_MONITOR , 6, 0x0UL, 0x30f01cf, NULL, pfm_mck_reserved, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, /* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, 0xffffUL, NULL, pfm_mck_reserved, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}}, @@ -143,8 +143,11 @@ pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *va case 8: val8 = *val; val13 = th->pmc[13]; val14 = th->pmc[14]; + *val |= 1UL << 2; /* bit 2 must always be 1 */ check_case1 = 1; break; + case 9: *val |= 1UL << 2; /* bit 2 must always be 1 */ + break; case 13: val8 = th->pmc[8]; val13 = *val; val14 = th->pmc[14]; diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 2a477a5..132a4a6 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -43,8 +43,8 @@ #include "sigframe.h" -void -ia64_do_show_stack (struct unw_frame_info *info, void *arg) +static void +do_show_stack (struct unw_frame_info *info, void *arg) { unsigned long ip, sp, bsp; char buf[80]; /* don't make it so big that it overflows the stack! */ @@ -57,7 +57,7 @@ ia64_do_show_stack (struct unw_frame_info *info, void *arg) unw_get_sp(info, &sp); unw_get_bsp(info, &bsp); - snprintf(buf, sizeof(buf), " [<%016lx>] %%s\n\t\t\t\tsp=%016lx bsp=%016lx\n", + snprintf(buf, sizeof(buf), " [<%016lx>] %%s sp=0x%016lx bsp=0x%016lx\n", ip, sp, bsp); print_symbol(buf, ip); } while (unw_unwind(info) >= 0); @@ -73,12 +73,12 @@ void show_stack (struct task_struct *task) { if (!task) - unw_init_running(ia64_do_show_stack, 0); + unw_init_running(do_show_stack, 0); else { struct unw_frame_info info; unw_init_from_blocked_task(&info, task); - ia64_do_show_stack(&info, 0); + do_show_stack(&info, 0); } } @@ -123,8 +123,8 @@ show_regs (struct pt_regs *regs) if (user_mode(regs)) { /* print the stacked registers */ - unsigned long val, *bsp, ndirty; - int i, sof, is_nat = 0; + unsigned long val, sof, *bsp, ndirty; + int i, is_nat = 0; sof = regs->cr_ifs & 0x7f; /* size of frame */ ndirty = (regs->loadrs >> 19); @@ -135,7 +135,7 @@ show_regs (struct pt_regs *regs) ((i == sof - 1) || (i % 3) == 2) ? "\n" : " "); } } else - show_stack(NULL); + show_stack(0); } void @@ -379,7 +379,6 @@ copy_thread (int nr, unsigned long clone_flags, # define THREAD_FLAGS_TO_SET 0 p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) | THREAD_FLAGS_TO_SET); - p->thread.last_fph_cpu = -1; #ifdef CONFIG_IA32_SUPPORT /* * If we're cloning an IA32 task then save the IA32 extra diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index c8eedec..376d055 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -202,16 +202,17 @@ static unsigned long get_rnat (struct pt_regs *pt, struct switch_stack *sw, unsigned long *krbs, unsigned long *urnat_addr) { - unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, umask = 0UL; + unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, kmask = ~0UL; unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; long num_regs; kbsp = (unsigned long *) sw->ar_bspstore; ubspstore = (unsigned long *) pt->ar_bspstore; /* - * First, figure out which bit number slot 0 in user-land maps to in the kernel - * rnat. Do this by figuring out how many register slots we're beyond the user's - * backingstore and then computing the equivalent address in kernel space. + * First, figure out which bit number slot 0 in user-land maps + * to in the kernel rnat. Do this by figuring out how many + * register slots we're beyond the user's backingstore and + * then computing the equivalent address in kernel space. */ num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); @@ -221,8 +222,8 @@ get_rnat (struct pt_regs *pt, struct switch_stack *sw, if (ubspstore + 63 > urnat_addr) { /* some bits need to be merged in from pt->ar_rnat */ - umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1); - urnat = (pt->ar_rnat & umask); + kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1); + urnat = (pt->ar_rnat & ~kmask); } if (rnat0_kaddr >= kbsp) { rnat0 = sw->ar_rnat; @@ -234,7 +235,7 @@ get_rnat (struct pt_regs *pt, struct switch_stack *sw, } else if (rnat1_kaddr > krbs) { rnat1 = *rnat1_kaddr; } - urnat |= ((rnat1 << (63 - shift)) | (rnat0 >> shift)) & ~umask; + urnat |= ((rnat1 << (63 - shift)) | (rnat0 >> shift)) & kmask; return urnat; } @@ -245,19 +246,17 @@ static void put_rnat (struct pt_regs *pt, struct switch_stack *sw, unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat) { - unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m; - unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift, slot, ndirty; - long num_regs, nbits; - - ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); - nbits = ndirty % 63; + unsigned long rnat0 = 0, rnat1 = 0, rnat = 0, *slot0_kaddr, kmask = ~0UL, mask; + unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; + long num_regs; kbsp = (unsigned long *) sw->ar_bspstore; ubspstore = (unsigned long *) pt->ar_bspstore; /* - * First, figure out which bit number slot 0 in user-land maps to in the kernel - * rnat. Do this by figuring out how many register slots we're beyond the user's - * backingstore and then computing the equivalent address in kernel space. + * First, figure out which bit number slot 0 in user-land maps + * to in the kernel rnat. Do this by figuring out how many + * register slots we're beyond the user's backingstore and + * then computing the equivalent address in kernel space. */ num_regs = (long) ia64_rse_num_regs(ubspstore, urnat_addr + 1); slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); @@ -265,37 +264,29 @@ put_rnat (struct pt_regs *pt, struct switch_stack *sw, rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); rnat0_kaddr = rnat1_kaddr - 64; -printk("%s: ubspstore=%p urnat_addr=%p\n", __FUNCTION__, ubspstore, urnat_addr); if (ubspstore + 63 > urnat_addr) { /* some bits need to be place in pt->ar_rnat: */ - slot = ia64_rse_slot_num(ubspstore); - umask = ((1UL << slot) - 1); - pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask); - nbits -= slot; - if (nbits <= 0) - return; + kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1); + pt->ar_rnat = (pt->ar_rnat & kmask) | (rnat & ~kmask); } - mask = (1UL << nbits) - 1; /* * Note: Section 11.1 of the EAS guarantees that bit 63 of an * rnat slot is ignored. so we don't have to clear it here. */ rnat0 = (urnat << shift); - m = mask << shift; -printk("%s: rnat0=%016lx, m=%016lx, rnat0_kaddr=%p kbsp=%p\n", __FUNCTION__, rnat0, m, rnat0_kaddr, kbsp); + mask = ~0UL << shift; if (rnat0_kaddr >= kbsp) { - sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m); + sw->ar_rnat = (sw->ar_rnat & ~mask) | (rnat0 & mask); } else if (rnat0_kaddr > krbs) { - *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m)); + *rnat0_kaddr = ((*rnat0_kaddr & ~mask) | (rnat0 & mask)); } rnat1 = (urnat >> (63 - shift)); - m = mask >> (63 - shift); -printk("%s: rnat1=%016lx, m=%016lx, rnat1_kaddr=%p kbsp=%p\n", __FUNCTION__, rnat1, m, rnat1_kaddr, kbsp); + mask = ~0UL >> (63 - shift); if (rnat1_kaddr >= kbsp) { - sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m); + sw->ar_rnat = (sw->ar_rnat & ~mask) | (rnat1 & mask); } else if (rnat1_kaddr > krbs) { - *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m)); + *rnat1_kaddr = ((*rnat1_kaddr & ~mask) | (rnat1 & mask)); } } @@ -598,7 +589,6 @@ ia64_flush_fph (struct task_struct *task) psr->mfh = 0; ia64_save_fpu(&task->thread.fph[0]); task->thread.flags |= IA64_THREAD_FPH_VALID; - task->thread.last_fph_cpu = smp_processor_id(); } } @@ -618,11 +608,12 @@ ia64_sync_fph (struct task_struct *task) ia64_flush_fph(task); if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { task->thread.flags |= IA64_THREAD_FPH_VALID; - task->thread.last_fph_cpu = -1; /* force reload */ memset(&task->thread.fph, 0, sizeof(task->thread.fph)); } +#ifndef CONFIG_SMP if (ia64_get_fpu_owner() == task) ia64_set_fpu_owner(0); +#endif psr->dfh = 1; } @@ -711,9 +702,7 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data case PT_R4: case PT_R5: case PT_R6: case PT_R7: if (write_access) { /* read NaT bit first: */ - unsigned long dummy; - - ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, &dummy, &nat); + ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, data, &nat); if (ret < 0) return ret; } diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c index 7298e9a..ffd5e36 100644 --- a/arch/ia64/kernel/sal.c +++ b/arch/ia64/kernel/sal.c @@ -116,7 +116,7 @@ ia64_sal_init (struct ia64_sal_systab *systab) p = (char *) (systab + 1); for (i = 0; i < systab->entry_count; i++) { /* - * The first byte of each entry type contains the type descriptor. + * The first byte of each entry type contains the type desciptor. */ switch (*p) { case SAL_DESC_ENTRY_POINT: diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 04f8740..4ec8d51 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -38,7 +38,7 @@ static salinfo_entry_t salinfo_entries[]={ { "itc_drift", IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT, }, }; -#define NR_SALINFO_ENTRIES ARRAY_SIZE(salinfo_entries) +#define NR_SALINFO_ENTRIES (sizeof(salinfo_entries)/sizeof(salinfo_entry_t)) /* * One for each feature and one more for the directory entry... diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 8c81836..4b70c64 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -59,10 +59,7 @@ unsigned long ia64_cycles_per_usec; struct ia64_boot_param *ia64_boot_param; struct screen_info screen_info; -unsigned long ia64_max_cacheline_size; unsigned long ia64_iobase; /* virtual address for I/O accesses */ -struct io_space io_space[MAX_IO_SPACES]; -unsigned int num_io_spaces; unsigned char aux_device_present = 0xaa; /* XXX remove this when legacy I/O is gone */ @@ -415,11 +412,6 @@ setup_arch (char **cmdline_p) } ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); - /* setup legacy IO port space */ - io_space[0].mmio_base = ia64_iobase; - io_space[0].sparse = 1; - num_io_spaces = 1; - #ifdef CONFIG_SMP cpu_physical_id(0) = hard_smp_processor_id(); #endif @@ -429,7 +421,7 @@ setup_arch (char **cmdline_p) #ifdef CONFIG_ACPI_BOOT acpi_boot_init(); #endif -#ifdef CONFIG_SERIAL_8250_HCDP +#ifdef CONFIG_SERIAL_HCDP if (efi.hcdp) { void setup_serial_hcdp(void *); @@ -502,7 +494,7 @@ show_cpuinfo (struct seq_file *m, void *v) memcpy(features, " standard", 10); cp = features; sep = 0; - for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) { + for (i = 0; i < sizeof(feature_bits)/sizeof(feature_bits[0]); ++i) { if (mask & feature_bits[i].mask) { if (sep) *cp++ = sep; @@ -633,39 +625,6 @@ setup_per_cpu_areas (void) /* start_kernel() requires this... */ } -static void -get_max_cacheline_size (void) -{ - unsigned long line_size, max = 1; - u64 l, levels, unique_caches; - pal_cache_config_info_t cci; - s64 status; - - status = ia64_pal_cache_summary(&levels, &unique_caches); - if (status != 0) { - printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", - __FUNCTION__, status); - max = SMP_CACHE_BYTES; - goto out; - } - - for (l = 0; l < levels; ++l) { - status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, - &cci); - if (status != 0) { - printk(KERN_ERR - "%s: ia64_pal_cache_config_info(l=%lu) failed (status=%ld)\n", - __FUNCTION__, l, status); - max = SMP_CACHE_BYTES; - } - line_size = 1 << cci.pcci_line_size; - if (line_size > max) - max = line_size; - } - out: - if (max > ia64_max_cacheline_size) - ia64_max_cacheline_size = max; -} /* * cpu_init() initializes state that is per-CPU. This function acts @@ -709,8 +668,6 @@ cpu_init (void) cpu_info->node_data = get_node_data_ptr(); #endif - get_max_cacheline_size(); - /* * We can't pass "local_cpu_data" to identify_cpu() because we haven't called * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 3b55d7f..3037b25 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -142,13 +142,8 @@ restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr) __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); psr->mfh = 0; /* drop signal handler's fph contents... */ - if (psr->dfh) - current->thread.last_fph_cpu = -1; - else { + if (!psr->dfh) __ia64_load_fpu(current->thread.fph); - ia64_set_fpu_owner(current); - current->thread.last_fph_cpu = smp_processor_id(); - } } return err; } @@ -528,7 +523,7 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) else errno = -errno; } - } else if ((long) scr->pt.r10 != -1) + } else if (scr->pt.r10 != -1) /* * A system calls has to be restarted only if one of the error codes * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 7d19294..3dc304e 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -2,7 +2,7 @@ * SMP Support * * Copyright (C) 1999 Walt Drummond - * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang + * Copyright (C) 1999, 2001 David Mosberger-Tang * * Lots of stuff stolen from arch/alpha/kernel/smp.c * @@ -87,7 +87,7 @@ stop_this_cpu (void) cpu_halt(); } -irqreturn_t +void handle_IPI (int irq, void *dev_id, struct pt_regs *regs) { int this_cpu = get_cpu(); @@ -147,11 +147,10 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs) mb(); /* Order data access and bit testing. */ } put_cpu(); - return IRQ_HANDLED; } /* - * Called with preeemption disabled. + * Called with preeemption disabled */ static inline void send_IPI_single (int dest_cpu, int op) @@ -161,12 +160,12 @@ send_IPI_single (int dest_cpu, int op) } /* - * Called with preeemption disabled. + * Called with preeemption disabled */ static inline void send_IPI_allbutself (int op) { - unsigned int i; + int i; for (i = 0; i < NR_CPUS; i++) { if (cpu_online(i) && i != smp_processor_id()) @@ -175,7 +174,7 @@ send_IPI_allbutself (int op) } /* - * Called with preeemption disabled. + * Called with preeemption disabled */ static inline void send_IPI_all (int op) @@ -188,7 +187,7 @@ send_IPI_all (int op) } /* - * Called with preeemption disabled. + * Called with preeemption disabled */ static inline void send_IPI_self (int op) @@ -197,7 +196,7 @@ send_IPI_self (int op) } /* - * Called with preeemption disabled. + * Called with preeemption disabled */ void smp_send_reschedule (int cpu) diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 8c183cd..dfd8ffd 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -192,7 +192,6 @@ ia64_sync_itc (unsigned int master) { long i, delta, adj, adjust_latency = 0, done = 0; unsigned long flags, rt, master_time_stamp, bound; - extern void ia64_cpu_local_tick (void); #if DEBUG_ITC_SYNC struct { long rt; /* roundtrip time */ @@ -247,16 +246,6 @@ ia64_sync_itc (unsigned int master) printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, " "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt); - - /* - * Check whether we sync'd the itc ahead of the next timer interrupt. If so, just - * reset it. - */ - if (time_after(ia64_get_itc(), local_cpu_data->itm_next)) { - Dprintk("CPU %d: oops, jumped a timer tick; resetting timer.\n", - smp_processor_id()); - ia64_cpu_local_tick(); - } } /* @@ -290,6 +279,15 @@ smp_callin (void) smp_setup_percpu_timer(); + if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { + /* + * Synchronize the ITC with the BP + */ + Dprintk("Going to syncup ITC with BP.\n"); + + ia64_sync_itc(0); + } + /* * Get our bogomips. */ @@ -312,18 +310,6 @@ smp_callin (void) local_irq_enable(); calibrate_delay(); local_cpu_data->loops_per_jiffy = loops_per_jiffy; - - if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { - /* - * Synchronize the ITC with the BP. Need to do this after irqs are - * enabled because ia64_sync_itc() calls smp_call_function_single(), which - * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls - * local_bh_enable(), which bugs out if irqs are not enabled... - */ - Dprintk("Going to syncup ITC with BP.\n"); - ia64_sync_itc(0); - } - /* * Allow the master to continue. */ @@ -408,26 +394,13 @@ do_boot_cpu (int sapicid, int cpu) return 0; } -static int __init -decay (char *str) -{ - int ticks; - get_option (&str, &ticks); - cache_decay_ticks = ticks; - return 1; -} - -__setup("decay=", decay); - -/* - * # of ticks an idle task is considered cache-hot. Highly application-dependent. There - * are apps out there which are known to suffer significantly with values >= 4. - */ -unsigned long cache_decay_ticks = 10; /* equal to MIN_TIMESLICE */ +unsigned long cache_decay_ticks; /* # of ticks an idle task is considered cache-hot */ static void smp_tune_scheduling (void) { + cache_decay_ticks = 10; /* XXX base this on PAL info and cache-bandwidth estimate */ + printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n", (cache_decay_ticks + 1) * 1000 / HZ); } diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 0f387be..abc0b4e 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -83,26 +83,11 @@ gettimeoffset (void) return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT; } -static inline void -set_normalized_timespec (struct timespec *ts, time_t sec, long nsec) -{ - while (nsec > NSEC_PER_SEC) { - nsec -= NSEC_PER_SEC; - ++sec; - } - while (nsec < 0) { - nsec += NSEC_PER_SEC; - --sec; - } - ts->tv_sec = sec; - ts->tv_nsec = nsec; -} - void do_settimeofday (struct timeval *tv) { - time_t wtm_sec, sec = tv->tv_sec; - long wtm_nsec, nsec = tv->tv_usec * 1000; + time_t sec = tv->tv_sec; + long nsec = tv->tv_usec * 1000; write_seqlock_irq(&xtime_lock); { @@ -114,12 +99,13 @@ do_settimeofday (struct timeval *tv) */ nsec -= gettimeoffset(); - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); - - set_normalized_timespec(&xtime, sec, nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); + while (nsec < 0) { + nsec += 1000000000; + sec--; + } + xtime.tv_sec = sec; + xtime.tv_nsec = nsec; time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; time_maxerror = NTP_PHASE_LIMIT; @@ -180,8 +166,8 @@ do_gettimeofday (struct timeval *tv) usec = (nsec + offset) / 1000; - while (unlikely(usec >= USEC_PER_SEC)) { - usec -= USEC_PER_SEC; + while (unlikely(usec >= 1000000)) { + usec -= 1000000; ++sec; } @@ -189,8 +175,8 @@ do_gettimeofday (struct timeval *tv) tv->tv_usec = usec; } -static irqreturn_t -timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) +static void +timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) { unsigned long new_itm; @@ -235,7 +221,7 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) do { /* * If we're too close to the next clock tick for comfort, we increase the - * safety margin by intentionally dropping the next tick(s). We do NOT update + * saftey margin by intentionally dropping the next tick(s). We do NOT update * itm.next because that would force us to call do_timer() which in turn would * let our clock run too fast (with the potentially devastating effect of * losing monotony of time). @@ -245,13 +231,12 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) ia64_set_itm(new_itm); /* double check, in case we got hit by a (slow) PMI: */ } while (time_after_eq(ia64_get_itc(), new_itm)); - return IRQ_HANDLED; } /* * Encapsulate access to the itm structure for SMP. */ -void +void __init ia64_cpu_local_tick (void) { int cpu = smp_processor_id(); @@ -296,7 +281,7 @@ ia64_init_itm (void) if (status != 0) { /* invent "random" values */ printk(KERN_ERR - "SAL/PAL failed to obtain frequency info---inventing reasonable values\n"); + "SAL/PAL failed to obtain frequency info---inventing reasonably values\n"); platform_base_freq = 100000000; itc_ratio.num = 3; itc_ratio.den = 1; @@ -320,8 +305,8 @@ ia64_init_itm (void) local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den; local_cpu_data->itc_freq = itc_freq; - local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC; - local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<cyc_per_usec = (itc_freq + 500000) / 1000000; + local_cpu_data->nsec_per_cyc = ((1000000000UL<curr.reg + unw.save_order[i]; if (reg->where == UNW_WHERE_GR_SAVE) { reg->where = UNW_WHERE_GR; @@ -698,7 +698,7 @@ finish_prologue (struct unw_state_record *sr) */ if (sr->imask) { unsigned char kind, mask = 0, *cp = sr->imask; - int t; + unsigned long t; static const unsigned char limit[3] = { UNW_REG_F31, UNW_REG_R7, UNW_REG_B5 }; @@ -1214,13 +1214,13 @@ script_new (unsigned long ip) spin_unlock(&unw.lock); /* - * We'd deadlock here if we interrupted a thread that is holding a read lock on - * script->lock. Thus, if the write_trylock() fails, we simply bail out. The - * alternative would be to disable interrupts whenever we hold a read-lock, but - * that seems silly. + * XXX We'll deadlock here if we interrupt a thread that is + * holding a read lock on script->lock. A try_write_lock() + * might be mighty handy here... Alternatively, we could + * disable interrupts whenever we hold a read-lock, but that + * seems silly. */ - if (!write_trylock(&script->lock)) - return NULL; + write_lock(&script->lock); spin_lock(&unw.lock); { @@ -1888,21 +1888,22 @@ unw_unwind_to_user (struct unw_frame_info *info) return -1; } -static void -init_frame_info (struct unw_frame_info *info, struct task_struct *t, - struct switch_stack *sw, unsigned long stktop) +void +unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) { - unsigned long rbslimit, rbstop, stklimit; + unsigned long rbslimit, rbstop, stklimit, stktop, sol; STAT(unsigned long start, flags;) STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc()); /* - * Subtle stuff here: we _could_ unwind through the switch_stack frame but we - * don't want to do that because it would be slow as each preserved register would - * have to be processed. Instead, what we do here is zero out the frame info and - * start the unwind process at the function that created the switch_stack frame. - * When a preserved value in switch_stack needs to be accessed, run_script() will + * Subtle stuff here: we _could_ unwind through the + * switch_stack frame but we don't want to do that because it + * would be slow as each preserved register would have to be + * processed. Instead, what we do here is zero out the frame + * info and start the unwind process at the function that + * created the switch_stack frame. When a preserved value in + * switch_stack needs to be accessed, run_script() will * initialize the appropriate pointer on demand. */ memset(info, 0, sizeof(*info)); @@ -1913,6 +1914,7 @@ init_frame_info (struct unw_frame_info *info, struct task_struct *t, rbstop = rbslimit; stklimit = (unsigned long) t + IA64_STK_OFFSET; + stktop = (unsigned long) sw - 16; if (stktop <= rbstop) stktop = rbstop; @@ -1922,58 +1924,34 @@ init_frame_info (struct unw_frame_info *info, struct task_struct *t, info->memstk.top = stktop; info->task = t; info->sw = sw; - info->sp = info->psp = stktop; - info->pr = sw->pr; - UNW_DPRINT(3, "unwind.%s:\n" - " task 0x%lx\n" - " rbs = [0x%lx-0x%lx)\n" - " stk = [0x%lx-0x%lx)\n" - " pr 0x%lx\n" - " sw 0x%lx\n" - " sp 0x%lx\n", - __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit, - info->pr, (unsigned long) info->sw, info->sp); - STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags)); -} - -void -unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t, - struct pt_regs *pt, struct switch_stack *sw) -{ - unsigned long sof; - - init_frame_info(info, t, sw, pt->r12); - info->cfm_loc = &pt->cr_ifs; - info->unat_loc = &pt->ar_unat; - info->pfs_loc = &pt->ar_pfs; - sof = *info->cfm_loc & 0x7f; - info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof); - info->ip = pt->cr_iip + ia64_psr(pt)->ri; - info->pt = (unsigned long) pt; - UNW_DPRINT(3, "unwind.%s:\n" - " bsp 0x%lx\n" - " sof 0x%lx\n" - " ip 0x%lx\n", - __FUNCTION__, info->bsp, sof, info->ip); - find_save_locs(info); -} - -void -unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) -{ - unsigned long sol; - - init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16); + info->sp = info->psp = (unsigned long) (sw + 1) - 16; + info->pt = 0; info->cfm_loc = &sw->ar_pfs; sol = (*info->cfm_loc >> 7) & 0x7f; info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol); info->ip = sw->b0; - UNW_DPRINT(3, "unwind.%s:\n" - " bsp 0x%lx\n" - " sol 0x%lx\n" - " ip 0x%lx\n", - __FUNCTION__, info->bsp, sol, info->ip); + info->pr = sw->pr; + UNW_DPRINT(3, + "unwind.%s\n" + " rbslimit 0x%lx\n" + " rbstop 0x%lx\n" + " stklimit 0x%lx\n" + " stktop 0x%lx\n" + " task 0x%lx\n" + " sw 0x%lx\n", + __FUNCTION__, rbslimit, rbstop, stklimit, stktop, + (unsigned long)(info->task), + (unsigned long)(info->sw)); + UNW_DPRINT(3, + " sp/psp 0x%lx\n" + " sol 0x%lx\n" + " bsp 0x%lx\n" + " ip 0x%lx\n" + " pr 0x%lx\n", + info->sp, sol, info->bsp, info->ip, info->pr); + find_save_locs(info); + STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags)); } void diff --git a/arch/ia64/lib/copy_user.S b/arch/ia64/lib/copy_user.S index c952bdc..2272730 100644 --- a/arch/ia64/lib/copy_user.S +++ b/arch/ia64/lib/copy_user.S @@ -316,7 +316,7 @@ GLOBAL_ENTRY(__copy_user) // Beginning of long mempcy (i.e. > 16 bytes) // .long_copy_user: - tbit.nz p6,p7=src1,0 // odd alignment + tbit.nz p6,p7=src1,0 // odd alignement and tmp=7,tmp ;; cmp.eq p10,p8=r0,tmp diff --git a/arch/ia64/lib/do_csum.S b/arch/ia64/lib/do_csum.S index 6bec2fc..c3bc67d 100644 --- a/arch/ia64/lib/do_csum.S +++ b/arch/ia64/lib/do_csum.S @@ -137,7 +137,7 @@ GLOBAL_ENTRY(do_csum) mov saved_pr=pr // preserve predicates (rotation) (p6) br.ret.spnt.many rp // return if zero or negative length - mov hmask=-1 // initialize head mask + mov hmask=-1 // intialize head mask tbit.nz p15,p0=buf,0 // is buf an odd address? and first1=-8,buf // 8-byte align down address of first1 element diff --git a/arch/ia64/lib/io.c b/arch/ia64/lib/io.c index d05f92f..2ff57f3 100644 --- a/arch/ia64/lib/io.c +++ b/arch/ia64/lib/io.c @@ -51,79 +51,84 @@ __ia64_memset_c_io (unsigned long dst, unsigned long c, long count) #ifdef CONFIG_IA64_GENERIC -#undef __ia64_inb -#undef __ia64_inw -#undef __ia64_inl -#undef __ia64_outb -#undef __ia64_outw -#undef __ia64_outl -#undef __ia64_readb -#undef __ia64_readw -#undef __ia64_readl -#undef __ia64_readq -#undef __ia64_writeb -#undef __ia64_writew -#undef __ia64_writel -#undef __ia64_writeq - unsigned int -__ia64_inb (unsigned long port) +ia64_inb (unsigned long port) { - return ___ia64_inb(port); + return __ia64_inb(port); } unsigned int -__ia64_inw (unsigned long port) +ia64_inw (unsigned long port) { - return ___ia64_inw(port); + return __ia64_inw(port); } unsigned int -__ia64_inl (unsigned long port) +ia64_inl (unsigned long port) { - return ___ia64_inl(port); + return __ia64_inl(port); } void -__ia64_outb (unsigned char val, unsigned long port) +ia64_outb (unsigned char val, unsigned long port) { - ___ia64_outb(val, port); + __ia64_outb(val, port); } void -__ia64_outw (unsigned short val, unsigned long port) +ia64_outw (unsigned short val, unsigned long port) { - ___ia64_outw(val, port); + __ia64_outw(val, port); } void -__ia64_outl (unsigned int val, unsigned long port) +ia64_outl (unsigned int val, unsigned long port) { - ___ia64_outl(val, port); + __ia64_outl(val, port); } unsigned char -__ia64_readb (void *addr) +ia64_readb (void *addr) { - return ___ia64_readb (addr); + return __ia64_readb (addr); } unsigned short -__ia64_readw (void *addr) +ia64_readw (void *addr) { - return ___ia64_readw (addr); + return __ia64_readw (addr); } unsigned int -__ia64_readl (void *addr) +ia64_readl (void *addr) { - return ___ia64_readl (addr); + return __ia64_readl (addr); } unsigned long -__ia64_readq (void *addr) +ia64_readq (void *addr) { - return ___ia64_readq (addr); + return __ia64_readq (addr) } + +/* define aliases: */ + +asm (".global __ia64_inb, __ia64_inw, __ia64_inl"); +asm ("__ia64_inb = ia64_inb"); +asm ("__ia64_inw = ia64_inw"); +asm ("__ia64_inl = ia64_inl"); + +asm (".global __ia64_outb, __ia64_outw, __ia64_outl"); +asm ("__ia64_outb = ia64_outb"); +asm ("__ia64_outw = ia64_outw"); +asm ("__ia64_outl = ia64_outl"); + +asm (".global __ia64_readb, __ia64_readw, __ia64_readl, __ia64_readq"); +asm ("__ia64_readb = ia64_readb"); +asm ("__ia64_readw = ia64_readw"); +asm ("__ia64_readl = ia64_readl"); +asm ("__ia64_readq = ia64_readq"); + + #endif /* CONFIG_IA64_GENERIC */ diff --git a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c index 9ba1094..3d00798 100644 --- a/arch/ia64/lib/swiotlb.c +++ b/arch/ia64/lib/swiotlb.c @@ -5,10 +5,7 @@ * I/O TLBs (aka DMA address translation hardware). * Copyright (C) 2000 Asit Mallick * Copyright (C) 2000 Goutham Rao - * Copyright (C) 2000, 2003 Hewlett-Packard Co - * David Mosberger-Tang * - * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid * unnecessary i-cache flushing. */ @@ -95,7 +92,7 @@ __setup("swiotlb=", setup_io_tlb_npages); void swiotlb_init (void) { - unsigned long i; + int i; /* * Get IO TLB memory from the low pages @@ -124,7 +121,7 @@ swiotlb_init (void) * Allocates bounce buffer and returns its kernel virtual address. */ static void * -map_single (struct device *hwdev, char *buffer, size_t size, int dir) +map_single (struct pci_dev *hwdev, char *buffer, size_t size, int direction) { unsigned long flags; char *dma_addr; @@ -164,7 +161,7 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir) if (io_tlb_list[index] >= nslots) { int count = 0; - for (i = index; i < (int) (index + nslots); i++) + for (i = index; i < index + nslots; i++) io_tlb_list[i] = 0; for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) @@ -198,7 +195,7 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir) * needed when we sync the memory. Then we sync the buffer if needed. */ io_tlb_orig_addr[index] = buffer; - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + if (direction == PCI_DMA_TODEVICE || direction == PCI_DMA_BIDIRECTIONAL) memcpy(dma_addr, buffer, size); return dma_addr; @@ -208,7 +205,7 @@ map_single (struct device *hwdev, char *buffer, size_t size, int dir) * dma_addr is the kernel virtual address of the bounce buffer to unmap. */ static void -unmap_single (struct device *hwdev, char *dma_addr, size_t size, int dir) +unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction) { unsigned long flags; int i, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; @@ -218,7 +215,7 @@ unmap_single (struct device *hwdev, char *dma_addr, size_t size, int dir) /* * First, sync the memory before unmapping the entry */ - if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)) + if ((direction == PCI_DMA_FROMDEVICE) || (direction == PCI_DMA_BIDIRECTIONAL)) /* * bounce... copy the data back into the original buffer * and delete the * bounce buffer. @@ -242,7 +239,7 @@ unmap_single (struct device *hwdev, char *dma_addr, size_t size, int dir) for (i = index + nslots - 1; i >= index; i--) io_tlb_list[i] = ++count; /* - * Step 2: merge the returned slots with the preceding slots, if + * Step 2: merge the returned slots with the preceeding slots, if * available (non zero) */ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && @@ -253,46 +250,49 @@ unmap_single (struct device *hwdev, char *dma_addr, size_t size, int dir) } static void -sync_single (struct device *hwdev, char *dma_addr, size_t size, int dir) +sync_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction) { int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; char *buffer = io_tlb_orig_addr[index]; /* * bounce... copy the data back into/from the original buffer - * XXX How do you handle DMA_BIDIRECTIONAL here ? + * XXX How do you handle PCI_DMA_BIDIRECTIONAL here ? */ - if (dir == DMA_FROM_DEVICE) + if (direction == PCI_DMA_FROMDEVICE) memcpy(buffer, dma_addr, size); - else if (dir == DMA_TO_DEVICE) + else if (direction == PCI_DMA_TODEVICE) memcpy(dma_addr, buffer, size); else BUG(); } void * -swiotlb_alloc_coherent (struct device *hwdev, size_t size, dma_addr_t *dma_handle, int flags) +swiotlb_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { - unsigned long dev_addr; + unsigned long pci_addr; + int gfp = GFP_ATOMIC; void *ret; - /* XXX fix me: the DMA API should pass us an explicit DMA mask instead: */ - flags |= GFP_DMA; - - ret = (void *)__get_free_pages(flags, get_order(size)); + /* + * Alloc_consistent() is defined to return memory < 4GB, no matter what the DMA + * mask says. + */ + gfp |= GFP_DMA; /* XXX fix me: should change this to GFP_32BIT or ZONE_32BIT */ + ret = (void *)__get_free_pages(gfp, get_order(size)); if (!ret) return NULL; memset(ret, 0, size); - dev_addr = virt_to_phys(ret); - if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) != 0) - panic("swiotlb_alloc_consistent: allocated memory is out of range for device"); - *dma_handle = dev_addr; + pci_addr = virt_to_phys(ret); + if (hwdev && (pci_addr & ~hwdev->dma_mask) != 0) + panic("swiotlb_alloc_consistent: allocated memory is out of range for PCI device"); + *dma_handle = pci_addr; return ret; } void -swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) +swiotlb_free_consistent (struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { free_pages((unsigned long) vaddr, get_order(size)); } @@ -305,34 +305,34 @@ swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_ * swiotlb_unmap_single or swiotlb_dma_sync_single is performed. */ dma_addr_t -swiotlb_map_single (struct device *hwdev, void *ptr, size_t size, int dir) +swiotlb_map_single (struct pci_dev *hwdev, void *ptr, size_t size, int direction) { - unsigned long dev_addr = virt_to_phys(ptr); + unsigned long pci_addr = virt_to_phys(ptr); - if (dir == DMA_NONE) + if (direction == PCI_DMA_NONE) BUG(); /* * Check if the PCI device can DMA to ptr... if so, just return ptr */ - if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) == 0) + if ((pci_addr & ~hwdev->dma_mask) == 0) /* * Device is bit capable of DMA'ing to the buffer... just return the PCI * address of ptr */ - return dev_addr; + return pci_addr; /* * get a bounce buffer: */ - dev_addr = virt_to_phys(map_single(hwdev, ptr, size, dir)); + pci_addr = virt_to_phys(map_single(hwdev, ptr, size, direction)); /* * Ensure that the address returned is DMA'ble: */ - if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) != 0) + if ((pci_addr & ~hwdev->dma_mask) != 0) panic("map_single: bounce buffer is not DMA'ble"); - return dev_addr; + return pci_addr; } /* @@ -363,15 +363,15 @@ mark_clean (void *addr, size_t size) * device wrote there. */ void -swiotlb_unmap_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir) +swiotlb_unmap_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction) { - char *dma_addr = phys_to_virt(dev_addr); + char *dma_addr = phys_to_virt(pci_addr); - if (dir == DMA_NONE) + if (direction == PCI_DMA_NONE) BUG(); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) - unmap_single(hwdev, dma_addr, size, dir); - else if (dir == DMA_FROM_DEVICE) + unmap_single(hwdev, dma_addr, size, direction); + else if (direction == PCI_DMA_FROMDEVICE) mark_clean(dma_addr, size); } @@ -385,21 +385,21 @@ swiotlb_unmap_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, in * again owns the buffer. */ void -swiotlb_sync_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, int dir) +swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, int direction) { - char *dma_addr = phys_to_virt(dev_addr); + char *dma_addr = phys_to_virt(pci_addr); - if (dir == DMA_NONE) + if (direction == PCI_DMA_NONE) BUG(); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) - sync_single(hwdev, dma_addr, size, dir); - else if (dir == DMA_FROM_DEVICE) + sync_single(hwdev, dma_addr, size, direction); + else if (direction == PCI_DMA_FROMDEVICE) mark_clean(dma_addr, size); } /* * Map a set of buffers described by scatterlist in streaming mode for DMA. This is the - * scatter-gather version of the above swiotlb_map_single interface. Here the scatter + * scather-gather version of the above swiotlb_map_single interface. Here the scatter * gather list elements are each tagged with the appropriate dma address and length. They * are obtained via sg_dma_{address,length}(SG). * @@ -412,22 +412,23 @@ swiotlb_sync_single (struct device *hwdev, dma_addr_t dev_addr, size_t size, int * Device ownership issues as mentioned above for swiotlb_map_single are the same here. */ int -swiotlb_map_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir) +swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) { void *addr; - unsigned long dev_addr; + unsigned long pci_addr; int i; - if (dir == DMA_NONE) + if (direction == PCI_DMA_NONE) BUG(); for (i = 0; i < nelems; i++, sg++) { addr = SG_ENT_VIRT_ADDRESS(sg); - dev_addr = virt_to_phys(addr); - if (hwdev && hwdev->dma_mask && (dev_addr & ~*hwdev->dma_mask) != 0) - sg->dma_address = (dma_addr_t) map_single(hwdev, addr, sg->length, dir); + pci_addr = virt_to_phys(addr); + if ((pci_addr & ~hwdev->dma_mask) != 0) + sg->dma_address = (dma_addr_t) + map_single(hwdev, addr, sg->length, direction); else - sg->dma_address = dev_addr; + sg->dma_address = pci_addr; sg->dma_length = sg->length; } return nelems; @@ -438,17 +439,17 @@ swiotlb_map_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int di * here are the same as for swiotlb_unmap_single() above. */ void -swiotlb_unmap_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir) +swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) { int i; - if (dir == DMA_NONE) + if (direction == PCI_DMA_NONE) BUG(); for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) - unmap_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir); - else if (dir == DMA_FROM_DEVICE) + unmap_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction); + else if (direction == PCI_DMA_FROMDEVICE) mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); } @@ -460,16 +461,16 @@ swiotlb_unmap_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int * usage. */ void -swiotlb_sync_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int dir) +swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) { int i; - if (dir == DMA_NONE) + if (direction == PCI_DMA_NONE) BUG(); for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) - sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, dir); + sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction); } /* @@ -478,7 +479,7 @@ swiotlb_sync_sg (struct device *hwdev, struct scatterlist *sg, int nelems, int d * you would pass 0x00ffffff as the mask to this function. */ int -swiotlb_dma_supported (struct device *hwdev, u64 mask) +swiotlb_pci_dma_supported (struct pci_dev *hwdev, u64 mask) { return 1; } @@ -490,6 +491,6 @@ EXPORT_SYMBOL(swiotlb_map_sg); EXPORT_SYMBOL(swiotlb_unmap_sg); EXPORT_SYMBOL(swiotlb_sync_single); EXPORT_SYMBOL(swiotlb_sync_sg); -EXPORT_SYMBOL(swiotlb_alloc_coherent); -EXPORT_SYMBOL(swiotlb_free_coherent); -EXPORT_SYMBOL(swiotlb_dma_supported); +EXPORT_SYMBOL(swiotlb_alloc_consistent); +EXPORT_SYMBOL(swiotlb_free_consistent); +EXPORT_SYMBOL(swiotlb_pci_dma_supported); diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 68f3b4a..f16f8ce 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -58,18 +58,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re if (in_atomic() || !mm) goto no_context; -#ifdef CONFIG_VIRTUAL_MEM_MAP - /* - * If fault is in region 5 and we are in the kernel, we may already - * have the mmap_sem (pfn_valid macro is called during mmap). There - * is no vma for region 5 addr's anyway, so skip getting the semaphore - * and go directly to the exception handling code. - */ - - if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) - goto bad_area_no_up; -#endif - down_read(&mm->mmap_sem); vma = find_vma_prev(mm, address, &prev_vma); @@ -151,9 +139,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re bad_area: up_read(&mm->mmap_sem); -#ifdef CONFIG_VIRTUAL_MEM_MAP - bad_area_no_up: -#endif if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index aa7b7de..0d38a16 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -38,13 +38,6 @@ extern void ia64_tlb_init (void); unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; -#ifdef CONFIG_VIRTUAL_MEM_MAP -# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ - unsigned long vmalloc_end = VMALLOC_END_INIT; - static struct page *vmem_map; - static unsigned long num_dma_physpages; -#endif - static int pgt_cache_water[2] = { 25, 50 }; void @@ -55,13 +48,13 @@ check_pgt_cache (void) low = pgt_cache_water[0]; high = pgt_cache_water[1]; - if (pgtable_cache_size > (u64) high) { + if (pgtable_cache_size > high) { do { if (pgd_quicklist) free_page((unsigned long)pgd_alloc_one_fast(0)); if (pmd_quicklist) free_page((unsigned long)pmd_alloc_one_fast(0, 0)); - } while (pgtable_cache_size > (u64) low); + } while (pgtable_cache_size > low); } } @@ -344,139 +337,6 @@ ia64_mmu_init (void *my_cpu_data) ia64_tlb_init(); } -#ifdef CONFIG_VIRTUAL_MEM_MAP - -static int -create_mem_map_page_table (u64 start, u64 end, void *arg) -{ - unsigned long address, start_page, end_page; - struct page *map_start, *map_end; - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - - map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); - map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); - - start_page = (unsigned long) map_start & PAGE_MASK; - end_page = PAGE_ALIGN((unsigned long) map_end); - - for (address = start_page; address < end_page; address += PAGE_SIZE) { - pgd = pgd_offset_k(address); - if (pgd_none(*pgd)) - pgd_populate(&init_mm, pgd, alloc_bootmem_pages(PAGE_SIZE)); - pmd = pmd_offset(pgd, address); - - if (pmd_none(*pmd)) - pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages(PAGE_SIZE)); - pte = pte_offset_kernel(pmd, address); - - if (pte_none(*pte)) - set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages(PAGE_SIZE)) >> PAGE_SHIFT, - PAGE_KERNEL)); - } - return 0; -} - -struct memmap_init_callback_data { - struct page *start; - struct page *end; - int nid; - unsigned long zone; -}; - -static int -virtual_memmap_init (u64 start, u64 end, void *arg) -{ - struct memmap_init_callback_data *args; - struct page *map_start, *map_end; - - args = (struct memmap_init_callback_data *) arg; - - map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); - map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); - - if (map_start < args->start) - map_start = args->start; - if (map_end > args->end) - map_end = args->end; - - /* - * We have to initialize "out of bounds" struct page elements that fit completely - * on the same pages that were allocated for the "in bounds" elements because they - * may be referenced later (and found to be "reserved"). - */ - map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); - map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) - / sizeof(struct page)); - - if (map_start < map_end) - memmap_init_zone(map_start, (unsigned long) (map_end - map_start), - args->nid, args->zone, page_to_pfn(map_start)); - return 0; -} - -void -memmap_init (struct page *start, unsigned long size, int nid, - unsigned long zone, unsigned long start_pfn) -{ - if (!vmem_map) - memmap_init_zone(start, size, nid, zone, start_pfn); - else { - struct memmap_init_callback_data args; - - args.start = start; - args.end = start + size; - args.nid = nid; - args.zone = zone; - - efi_memmap_walk(virtual_memmap_init, &args); - } -} - -int -ia64_pfn_valid (unsigned long pfn) -{ - char byte; - - return __get_user(byte, (char *) pfn_to_page(pfn)) == 0; -} - -static int -count_dma_pages (u64 start, u64 end, void *arg) -{ - unsigned long *count = arg; - - if (end <= MAX_DMA_ADDRESS) - *count += (end - start) >> PAGE_SHIFT; - return 0; -} - -static int -find_largest_hole (u64 start, u64 end, void *arg) -{ - u64 *max_gap = arg; - - static u64 last_end = PAGE_OFFSET; - - /* NOTE: this algorithm assumes efi memmap table is ordered */ - - if (*max_gap < (start - last_end)) - *max_gap = start - last_end; - last_end = end; - return 0; -} -#endif /* CONFIG_VIRTUAL_MEM_MAP */ - -static int -count_pages (u64 start, u64 end, void *arg) -{ - unsigned long *count = arg; - - *count += (end - start) >> PAGE_SHIFT; - return 0; -} - /* * Set up the page tables. */ @@ -488,70 +348,18 @@ paging_init (void) extern void discontig_paging_init(void); discontig_paging_init(); - efi_memmap_walk(count_pages, &num_physpages); } #else /* !CONFIG_DISCONTIGMEM */ void paging_init (void) { - unsigned long max_dma; - unsigned long zones_size[MAX_NR_ZONES]; -# ifdef CONFIG_VIRTUAL_MEM_MAP - unsigned long zholes_size[MAX_NR_ZONES]; - unsigned long max_gap; -# endif + unsigned long max_dma, zones_size[MAX_NR_ZONES]; /* initialize mem_map[] */ memset(zones_size, 0, sizeof(zones_size)); - num_physpages = 0; - efi_memmap_walk(count_pages, &num_physpages); - max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; - -# ifdef CONFIG_VIRTUAL_MEM_MAP - memset(zholes_size, 0, sizeof(zholes_size)); - - num_dma_physpages = 0; - efi_memmap_walk(count_dma_pages, &num_dma_physpages); - - if (max_low_pfn < max_dma) { - zones_size[ZONE_DMA] = max_low_pfn; - zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages; - } else { - zones_size[ZONE_DMA] = max_dma; - zholes_size[ZONE_DMA] = max_dma - num_dma_physpages; - if (num_physpages > num_dma_physpages) { - zones_size[ZONE_NORMAL] = max_low_pfn - max_dma; - zholes_size[ZONE_NORMAL] = ((max_low_pfn - max_dma) - - (num_physpages - num_dma_physpages)); - } - } - - max_gap = 0; - efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); - if (max_gap < LARGE_GAP) { - vmem_map = (struct page *) 0; - free_area_init_node(0, &contig_page_data, NULL, zones_size, 0, zholes_size); - mem_map = contig_page_data.node_mem_map; - } - else { - unsigned long map_size; - - /* allocate virtual_mem_map */ - - map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page)); - vmalloc_end -= map_size; - vmem_map = (struct page *) vmalloc_end; - efi_memmap_walk(create_mem_map_page_table, 0); - - free_area_init_node(0, &contig_page_data, vmem_map, zones_size, 0, zholes_size); - - mem_map = contig_page_data.node_mem_map; - printk("Virtual mem_map starts at 0x%p\n", mem_map); - } -# else /* !CONFIG_VIRTUAL_MEM_MAP */ if (max_low_pfn < max_dma) zones_size[ZONE_DMA] = max_low_pfn; else { @@ -559,11 +367,19 @@ paging_init (void) zones_size[ZONE_NORMAL] = max_low_pfn - max_dma; } free_area_init(zones_size); -# endif /* !CONFIG_VIRTUAL_MEM_MAP */ } #endif /* !CONFIG_DISCONTIGMEM */ static int +count_pages (u64 start, u64 end, void *arg) +{ + unsigned long *count = arg; + + *count += (end - start) >> PAGE_SHIFT; + return 0; +} + +static int count_reserved_pages (u64 start, u64 end, void *arg) { unsigned long num_reserved = 0; @@ -590,7 +406,7 @@ mem_init (void) * any drivers that may need the PCI DMA interface are initialized or bootmem has * been freed. */ - platform_dma_init(); + platform_pci_dma_init(); #endif #ifndef CONFIG_DISCONTIGMEM @@ -599,6 +415,9 @@ mem_init (void) max_mapnr = max_low_pfn; #endif + num_physpages = 0; + efi_memmap_walk(count_pages, &num_physpages); + high_memory = __va(max_low_pfn * PAGE_SIZE); for_each_pgdat(pgdat) @@ -626,7 +445,7 @@ mem_init (void) num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS; if (num_pgt_pages > nr_free_pages() / 10) num_pgt_pages = nr_free_pages() / 10; - if (num_pgt_pages > (u64) pgt_cache_water[1]) + if (num_pgt_pages > pgt_cache_water[1]) pgt_cache_water[1] = num_pgt_pages; /* install the gate page in the global page table: */ diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 0b002ea..b660eff 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -5,7 +5,6 @@ * * Copyright (C) 2002 Hewlett-Packard Co * David Mosberger-Tang - * Bjorn Helgaas * * Note: Above list of copyright holders is incomplete... */ @@ -117,10 +116,31 @@ pci_acpi_init (void) subsys_initcall(pci_acpi_init); +static void __init +pcibios_fixup_resource(struct resource *res, u64 offset) +{ + res->start += offset; + res->end += offset; +} + +void __init +pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus) +{ + int i; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + if (!dev->resource[i].start) + continue; + if (dev->resource[i].flags & IORESOURCE_MEM) + pcibios_fixup_resource(&dev->resource[i], + PCI_CONTROLLER(dev)->mem_offset); + } +} + /* Called by ACPI when it finds a new root bus. */ static struct pci_controller * -alloc_pci_controller (int seg) +alloc_pci_controller(int seg) { struct pci_controller *controller; @@ -133,8 +153,8 @@ alloc_pci_controller (int seg) return controller; } -static struct pci_bus * -scan_root_bus (int bus, struct pci_ops *ops, void *sysdata) +struct pci_bus * +scan_root_bus(int bus, struct pci_ops *ops, void *sysdata) { struct pci_bus *b; @@ -164,185 +184,23 @@ scan_root_bus (int bus, struct pci_ops *ops, void *sysdata) return b; } -static int -alloc_resource (char *name, struct resource *root, unsigned long start, unsigned long end, unsigned long flags) -{ - struct resource *res; - - res = kmalloc(sizeof(*res), GFP_KERNEL); - if (!res) - return -ENOMEM; - - memset(res, 0, sizeof(*res)); - res->name = name; - res->start = start; - res->end = end; - res->flags = flags; - - if (request_resource(root, res)) - return -EBUSY; - - return 0; -} - -static u64 -add_io_space (struct acpi_resource_address64 *addr) -{ - u64 offset; - int sparse = 0; - int i; - - if (addr->address_translation_offset == 0) - return IO_SPACE_BASE(0); /* part of legacy IO space */ - - if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION) - sparse = 1; - - offset = (u64) ioremap(addr->address_translation_offset, 0); - for (i = 0; i < num_io_spaces; i++) - if (io_space[i].mmio_base == offset && - io_space[i].sparse == sparse) - return IO_SPACE_BASE(i); - - if (num_io_spaces == MAX_IO_SPACES) { - printk("Too many IO port spaces\n"); - return ~0; - } - - i = num_io_spaces++; - io_space[i].mmio_base = offset; - io_space[i].sparse = sparse; - - return IO_SPACE_BASE(i); -} - -static acpi_status -count_window (struct acpi_resource *resource, void *data) -{ - unsigned int *windows = (unsigned int *) data; - struct acpi_resource_address64 addr; - acpi_status status; - - status = acpi_resource_to_address64(resource, &addr); - if (ACPI_SUCCESS(status)) - if (addr.resource_type == ACPI_MEMORY_RANGE || - addr.resource_type == ACPI_IO_RANGE) - (*windows)++; - - return AE_OK; -} - -struct pci_root_info { - struct pci_controller *controller; - char *name; -}; - -static acpi_status -add_window (struct acpi_resource *res, void *data) -{ - struct pci_root_info *info = (struct pci_root_info *) data; - struct pci_window *window; - struct acpi_resource_address64 addr; - acpi_status status; - unsigned long flags, offset = 0; - struct resource *root; - - status = acpi_resource_to_address64(res, &addr); - if (ACPI_SUCCESS(status)) { - if (addr.resource_type == ACPI_MEMORY_RANGE) { - flags = IORESOURCE_MEM; - root = &iomem_resource; - offset = addr.address_translation_offset; - } else if (addr.resource_type == ACPI_IO_RANGE) { - flags = IORESOURCE_IO; - root = &ioport_resource; - offset = add_io_space(&addr); - if (offset == ~0) - return AE_OK; - } else - return AE_OK; - - window = &info->controller->window[info->controller->windows++]; - window->resource.flags |= flags; - window->resource.start = addr.min_address_range; - window->resource.end = addr.max_address_range; - window->offset = offset; - - if (alloc_resource(info->name, root, addr.min_address_range + offset, - addr.max_address_range + offset, flags)) - printk(KERN_ERR "alloc 0x%lx-0x%lx from %s for %s failed\n", - addr.min_address_range + offset, addr.max_address_range + offset, - root->name, info->name); - } - - return AE_OK; -} - struct pci_bus * -pcibios_scan_root (void *handle, int seg, int bus) +pcibios_scan_root(void *handle, int seg, int bus) { - struct pci_root_info info; struct pci_controller *controller; - unsigned int windows = 0; - char *name; + u64 base, size, offset; printk("PCI: Probing PCI hardware on bus (%02x:%02x)\n", seg, bus); controller = alloc_pci_controller(seg); if (!controller) - goto out1; + return NULL; controller->acpi_handle = handle; - acpi_walk_resources(handle, METHOD_NAME__CRS, count_window, &windows); - controller->window = kmalloc(sizeof(*controller->window) * windows, GFP_KERNEL); - if (!controller->window) - goto out2; - - name = kmalloc(16, GFP_KERNEL); - if (!name) - goto out3; - - sprintf(name, "PCI Bus %02x:%02x", seg, bus); - info.controller = controller; - info.name = name; - acpi_walk_resources(handle, METHOD_NAME__CRS, add_window, &info); + acpi_get_addr_space(handle, ACPI_MEMORY_RANGE, &base, &size, &offset); + controller->mem_offset = offset; return scan_root_bus(bus, pci_root_ops, controller); - -out3: - kfree(controller->window); -out2: - kfree(controller); -out1: - return NULL; -} - -void __init -pcibios_fixup_device_resources (struct pci_dev *dev, struct pci_bus *bus) -{ - struct pci_controller *controller = PCI_CONTROLLER(dev); - struct pci_window *window; - int i, j; - - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - if (!dev->resource[i].start) - continue; - -#define contains(win, res) ((res)->start >= (win)->start && \ - (res)->end <= (win)->end) - - for (j = 0; j < controller->windows; j++) { - window = &controller->window[j]; - if (((dev->resource[i].flags & IORESOURCE_MEM && - window->resource.flags & IORESOURCE_MEM) || - (dev->resource[i].flags & IORESOURCE_IO && - window->resource.flags & IORESOURCE_IO)) && - contains(&window->resource, &dev->resource[i])) { - dev->resource[i].start += window->offset; - dev->resource[i].end += window->offset; - } - } - } } /* diff --git a/arch/ia64/sn/kernel/machvec.c b/arch/ia64/sn/kernel/machvec.c index 72a81f6..e9f7071 100644 --- a/arch/ia64/sn/kernel/machvec.c +++ b/arch/ia64/sn/kernel/machvec.c @@ -33,11 +33,9 @@ #include #ifdef CONFIG_IA64_SGI_SN1 -#define MACHVEC_PLATFORM_NAME sn1 -#define MACHVEC_PLATFORM_HEADER +#define MACHVEC_PLATFORM_NAME sn1 #else CONFIG_IA64_SGI_SN1 -#define MACHVEC_PLATFORM_NAME sn2 -#define MACHVEC_PLATFORM_HEADER +#define MACHVEC_PLATFORM_NAME sn2 #else #error "unknown platform" #endif diff --git a/arch/ia64/tools/print_offsets.c b/arch/ia64/tools/print_offsets.c index 8ce471f..5547bb7 100644 --- a/arch/ia64/tools/print_offsets.c +++ b/arch/ia64/tools/print_offsets.c @@ -193,7 +193,7 @@ main (int argc, char **argv) printf ("/*\n * DO NOT MODIFY\n *\n * This file was generated by " "arch/ia64/tools/print_offsets.\n *\n */\n\n"); - for (i = 0; i < (int) (sizeof (tab) / sizeof (tab[0])); ++i) + for (i = 0; i < sizeof (tab) / sizeof (tab[0]); ++i) { if (tab[i].name[0] == '\0') printf ("\n"); diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 936eba6..703991d 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -568,7 +568,3 @@ int module_finalize(const Elf_Ehdr *hdr, #endif return 0; } - -void module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/ppc/kernel/module.c b/arch/ppc/kernel/module.c index d263eac..e9e586c 100644 --- a/arch/ppc/kernel/module.c +++ b/arch/ppc/kernel/module.c @@ -269,7 +269,3 @@ int module_finalize(const Elf_Ehdr *hdr, { return 0; } - -void module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 913a216..d79e734 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -386,7 +386,3 @@ int module_finalize(const Elf_Ehdr *hdr, kfree(me->arch.syminfo); return 0; } - -void module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c index 1bde20c..99183d7 100644 --- a/arch/sparc/kernel/module.c +++ b/arch/sparc/kernel/module.c @@ -145,7 +145,3 @@ int module_finalize(const Elf_Ehdr *hdr, { return 0; } - -void module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/sparc64/kernel/module.c b/arch/sparc64/kernel/module.c index c24ee5a..9918b76c 100644 --- a/arch/sparc64/kernel/module.c +++ b/arch/sparc64/kernel/module.c @@ -273,7 +273,3 @@ int module_finalize(const Elf_Ehdr *hdr, { return 0; } - -void module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/v850/kernel/module.c b/arch/v850/kernel/module.c index 64aeb3e..eedced8 100644 --- a/arch/v850/kernel/module.c +++ b/arch/v850/kernel/module.c @@ -230,8 +230,3 @@ int apply_relocate_add (Elf32_Shdr *sechdrs, const char *strtab, return 0; } - -void -module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/x86_64/kernel/module.c b/arch/x86_64/kernel/module.c index a82e568..9236b4b 100644 --- a/arch/x86_64/kernel/module.c +++ b/arch/x86_64/kernel/module.c @@ -231,7 +231,3 @@ int module_finalize(const Elf_Ehdr *hdr, { return 0; } - -void module_arch_cleanup(struct module *mod) -{ -} diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 85aa0e4..c78ea7e 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -251,14 +251,7 @@ acpi_os_install_interrupt_handler(u32 irq, OSD_HANDLER handler, void *context) irq = acpi_fadt.sci_int; #ifdef CONFIG_IA64 - int vector; - - vector = acpi_irq_to_vector(irq); - if (vector < 0) { - printk(KERN_ERR PREFIX "SCI (IRQ%d) not registerd\n", irq); - return AE_OK; - } - irq = vector; + irq = gsi_to_vector(irq); #endif acpi_irq_irq = irq; acpi_irq_handler = handler; @@ -276,7 +269,7 @@ acpi_os_remove_interrupt_handler(u32 irq, OSD_HANDLER handler) { if (acpi_irq_handler) { #ifdef CONFIG_IA64 - irq = acpi_irq_to_vector(irq); + irq = gsi_to_vector(irq); #endif free_irq(irq, acpi_irq); acpi_irq_handler = NULL; @@ -461,9 +454,6 @@ acpi_os_read_pci_configuration ( int result = 0; int size = 0; struct pci_bus bus; -#ifdef CONFIG_IA64 - struct pci_controller ctrl; -#endif if (!value) return AE_BAD_PARAMETER; @@ -483,10 +473,6 @@ acpi_os_read_pci_configuration ( } bus.number = pci_id->bus; -#ifdef CONFIG_IA64 - ctrl.segment = pci_id->segment; - bus.sysdata = &ctrl; -#endif result = pci_root_ops->read(&bus, PCI_DEVFN(pci_id->device, pci_id->function), reg, size, value); @@ -504,9 +490,6 @@ acpi_os_write_pci_configuration ( int result = 0; int size = 0; struct pci_bus bus; -#ifdef CONFIG_IA64 - struct pci_controller ctrl; -#endif switch (width) { case 8: @@ -523,10 +506,6 @@ acpi_os_write_pci_configuration ( } bus.number = pci_id->bus; -#ifdef CONFIG_IA64 - ctrl.segment = pci_id->segment; - bus.sysdata = &ctrl; -#endif result = pci_root_ops->write(&bus, PCI_DEVFN(pci_id->device, pci_id->function), reg, size, value); diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index c94dabc..7160d98 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -24,8 +24,6 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include - #include #include #include @@ -38,16 +36,9 @@ #ifdef CONFIG_X86_IO_APIC #include #endif -#ifdef CONFIG_IOSAPIC -# include -#endif #include #include -#ifdef CONFIG_X86 -# define PCI_SEGMENT(x) 0 /* XXX fix me */ -#endif - #define _COMPONENT ACPI_PCI_COMPONENT ACPI_MODULE_NAME ("pci_irq") @@ -259,8 +250,6 @@ acpi_pci_irq_lookup ( return_VALUE(0); } - entry->irq = entry->link.index; - if (!entry->irq && entry->link.handle) { entry->irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index); if (!entry->irq) { @@ -299,7 +288,7 @@ acpi_pci_irq_derive ( while (!irq && bridge->bus->self) { pin = (pin + PCI_SLOT(bridge->devfn)) % 4; bridge = bridge->bus->self; - irq = acpi_pci_irq_lookup(PCI_SEGMENT(bridge), bridge->bus->number, PCI_SLOT(bridge->devfn), pin); + irq = acpi_pci_irq_lookup(0, bridge->bus->number, PCI_SLOT(bridge->devfn), pin); } if (!irq) { @@ -342,7 +331,7 @@ acpi_pci_irq_enable ( * First we check the PCI IRQ routing table (PRT) for an IRQ. PRT * values override any BIOS-assigned IRQs set during boot. */ - irq = acpi_pci_irq_lookup(PCI_SEGMENT(dev), dev->bus->number, PCI_SLOT(dev->devfn), pin); + irq = acpi_pci_irq_lookup(0, dev->bus->number, PCI_SLOT(dev->devfn), pin); /* * If no PRT entry was found, we'll try to derive an IRQ from the @@ -368,11 +357,7 @@ acpi_pci_irq_enable ( } } -#ifdef CONFIG_IA64 - dev->irq = gsi_to_irq(irq); -#else dev->irq = irq; -#endif ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device %s using IRQ %d\n", dev->slot_name, dev->irq)); @@ -386,10 +371,6 @@ acpi_pci_irq_enable ( eisa_set_level_irq(dev->irq); } #endif -#ifdef CONFIG_IOSAPIC - if (acpi_irq_model == ACPI_IRQ_MODEL_IOSAPIC) - iosapic_enable_intr(dev->irq); -#endif return_VALUE(dev->irq); } diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 0625085..210ef7f 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -23,8 +23,6 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include - #include #include #include @@ -204,6 +202,8 @@ acpi_pci_root_add ( switch (status) { case AE_OK: root->id.segment = (u16) value; + printk("_SEG exists! Unsupported. Abort.\n"); + BUG(); break; case AE_NOT_FOUND: ACPI_DEBUG_PRINT((ACPI_DB_INFO, @@ -265,12 +265,7 @@ acpi_pci_root_add ( * PCI namespace does not get created until this call is made (and * thus the root bridge's pci_dev does not exist). */ -#ifdef CONFIG_X86 root->bus = pcibios_scan_root(root->id.bus); -#else - root->bus = pcibios_scan_root(root->handle, - root->id.segment, root->id.bus); -#endif if (!root->bus) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Bus %02x:%02x not present in PCI namespace\n", diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index f22dcdb..9774d10 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig @@ -31,7 +31,7 @@ config AGP_GART config AGP_INTEL tristate "Intel 440LX/BX/GX, I8xx and E7x05 support" - depends on AGP && !X86_64 && !IA64 + depends on AGP && !X86_64 help This option gives you AGP support for the GLX component of the XFree86 4.x on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860 @@ -44,7 +44,7 @@ config AGP_INTEL #config AGP_I810 # tristate "Intel I810/I815/I830M (on-board) support" -# depends on AGP && !X86_64 && !IA64 +# depends on AGP && !X86_64 # help # This option gives you AGP support for the Xserver on the Intel 810 # 815 and 830m chipset boards for their on-board integrated graphics. This @@ -52,7 +52,7 @@ config AGP_INTEL config AGP_VIA tristate "VIA chipset support" - depends on AGP && !X86_64 && !IA64 + depends on AGP && !X86_64 help This option gives you AGP support for the GLX component of the XFree86 4.x on VIA MPV3/Apollo Pro chipsets. @@ -62,7 +62,7 @@ config AGP_VIA config AGP_AMD tristate "AMD Irongate, 761, and 762 support" - depends on AGP && !X86_64 && !IA64 + depends on AGP && !X86_64 help This option gives you AGP support for the GLX component of the XFree86 4.x on AMD Irongate, 761, and 762 chipsets. @@ -72,7 +72,7 @@ config AGP_AMD config AGP_SIS tristate "Generic SiS support" - depends on AGP && !X86_64 && !IA64 + depends on AGP && !X86_64 help This option gives you AGP support for the GLX component of the "soon to be released" XFree86 4.x on Silicon Integrated Systems [SiS] @@ -85,7 +85,7 @@ config AGP_SIS config AGP_ALI tristate "ALI chipset support" - depends on AGP && !X86_64 && !IA64 + depends on AGP && !X86_64 ---help--- This option gives you AGP support for the GLX component of the XFree86 4.x on the following ALi chipsets. The supported chipsets @@ -103,14 +103,14 @@ config AGP_ALI config AGP_SWORKS tristate "Serverworks LE/HE support" - depends on AGP && !X86_64 && !IA64 + depends on AGP && !X86_64 help Say Y here to support the Serverworks AGP card. See for product descriptions and images. config AGP_AMD_8151 tristate "AMD 8151 support" - depends on AGP && !IA64 + depends on AGP default GART_IOMMU help Say Y here to support the AMD 8151 AGP bridge and the builtin diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index f4e4593..79fa4de 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c @@ -252,9 +252,7 @@ int agp_register_driver (struct agp_driver *drv) /* FIXME: What to do with this? */ inter_module_register("drm_agp", THIS_MODULE, &drm_agp); -#if 0 pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge->dev), agp_power); -#endif agp_count++; return 0; diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c index 7058dfb..ef7ef95 100644 --- a/drivers/char/agp/hp-agp.c +++ b/drivers/char/agp/hp-agp.c @@ -1,36 +1,17 @@ /* - * HP AGPGART routines. - * Copyright (C) 2002-2003 Hewlett-Packard Co - * Bjorn Helgaas + * HP AGPGART routines. */ -#include -#include -#include #include #include - -#include - +#include +#include #include "agp.h" #ifndef log2 #define log2(x) ffz(~(x)) #endif -#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ - -/* HP ZX1 IOC registers */ -#define HP_ZX1_IBASE 0x300 -#define HP_ZX1_IMASK 0x308 -#define HP_ZX1_PCOM 0x310 -#define HP_ZX1_TCNFG 0x318 -#define HP_ZX1_PDIR_BASE 0x320 - -/* HP ZX1 LBA registers */ -#define HP_ZX1_AGP_STATUS 0x64 -#define HP_ZX1_AGP_COMMAND 0x68 - #define HP_ZX1_IOVA_BASE GB(1UL) #define HP_ZX1_IOVA_SIZE GB(1UL) #define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2) @@ -39,9 +20,6 @@ #define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL #define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift) -/* AGP bridge need not be PCI device, but DRM thinks it is. */ -static struct pci_dev fake_bridge_dev; - static struct aper_size_info_fixed hp_zx1_sizes[] = { {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */ @@ -53,8 +31,8 @@ static struct gatt_mask hp_zx1_masks[] = }; static struct _hp_private { - volatile u8 *ioc_regs; - volatile u8 *lba_regs; + struct pci_dev *ioc; + volatile u8 *registers; u64 *io_pdir; // PDIR for entire IOVA u64 *gatt; // PDIR just for GART (subset of above) u64 gatt_entries; @@ -69,8 +47,7 @@ static struct _hp_private { int io_pages_per_kpage; } hp_private; -static int __init -hp_zx1_ioc_shared (void) +static int __init hp_zx1_ioc_shared(void) { struct _hp_private *hp = &hp_private; @@ -82,7 +59,7 @@ hp_zx1_ioc_shared (void) * - IOVA space is 1Gb in size * - first 512Mb is IOMMU, second 512Mb is GART */ - hp->io_tlb_ps = INREG64(hp->ioc_regs, HP_ZX1_TCNFG); + hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG); switch (hp->io_tlb_ps) { case 0: hp->io_tlb_shift = 12; break; case 1: hp->io_tlb_shift = 13; break; @@ -98,13 +75,13 @@ hp_zx1_ioc_shared (void) hp->io_page_size = 1 << hp->io_tlb_shift; hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; - hp->iova_base = INREG64(hp->ioc_regs, HP_ZX1_IBASE) & ~0x1; + hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1; hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE; hp->gart_size = HP_ZX1_GART_SIZE; hp->gatt_entries = hp->gart_size / hp->io_page_size; - hp->io_pdir = phys_to_virt(INREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE)); + hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE)); hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { @@ -118,8 +95,7 @@ hp_zx1_ioc_shared (void) return 0; } -static int __init -hp_zx1_ioc_owner (void) +static int __init hp_zx1_ioc_owner(u8 ioc_rev) { struct _hp_private *hp = &hp_private; @@ -154,28 +130,47 @@ hp_zx1_ioc_owner (void) return 0; } -static int __init -hp_zx1_ioc_init (u64 ioc_hpa, u64 lba_hpa) +static int __init hp_zx1_ioc_init(void) { struct _hp_private *hp = &hp_private; + struct pci_dev *ioc; + int i; + u8 ioc_rev; + + ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL); + if (!ioc) { + printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n"); + return -ENODEV; + } + hp->ioc = ioc; - hp->ioc_regs = ioremap(ioc_hpa, 1024); - hp->lba_regs = ioremap(lba_hpa, 256); + pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev); + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) { + hp->registers = (u8 *) ioremap(pci_resource_start(ioc, i), + pci_resource_len(ioc, i)); + break; + } + } + if (!hp->registers) { + printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n"); + return -ENODEV; + } /* * If the IOTLB is currently disabled, we can take it over. * Otherwise, we have to share with sba_iommu. */ - hp->io_pdir_owner = (INREG64(hp->ioc_regs, HP_ZX1_IBASE) & 0x1) == 0; + hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0; if (hp->io_pdir_owner) - return hp_zx1_ioc_owner(); + return hp_zx1_ioc_owner(ioc_rev); return hp_zx1_ioc_shared(); } -static int -hp_zx1_fetch_size (void) +static int hp_zx1_fetch_size(void) { int size; @@ -185,49 +180,47 @@ hp_zx1_fetch_size (void) return size; } -static int -hp_zx1_configure (void) +static int hp_zx1_configure(void) { struct _hp_private *hp = &hp_private; agp_bridge->gart_bus_addr = hp->gart_base; - agp_bridge->mode = INREG32(hp->lba_regs, HP_ZX1_AGP_STATUS); + agp_bridge->capndx = pci_find_capability(agp_bridge->dev, PCI_CAP_ID_AGP); + pci_read_config_dword(agp_bridge->dev, + agp_bridge->capndx + PCI_AGP_STATUS, &agp_bridge->mode); if (hp->io_pdir_owner) { - OUTREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE, + OUTREG64(hp->registers, HP_ZX1_PDIR_BASE, virt_to_phys(hp->io_pdir)); - OUTREG64(hp->ioc_regs, HP_ZX1_TCNFG, hp->io_tlb_ps); - OUTREG64(hp->ioc_regs, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1)); - OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, hp->iova_base | 0x1); - OUTREG64(hp->ioc_regs, HP_ZX1_PCOM, + OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps); + OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1)); + OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1); + OUTREG64(hp->registers, HP_ZX1_PCOM, hp->iova_base | log2(HP_ZX1_IOVA_SIZE)); - INREG64(hp->ioc_regs, HP_ZX1_PCOM); + INREG64(hp->registers, HP_ZX1_PCOM); } return 0; } -static void -hp_zx1_cleanup (void) +static void hp_zx1_cleanup(void) { struct _hp_private *hp = &hp_private; if (hp->io_pdir_owner) - OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, 0); - iounmap((void *) hp->ioc_regs); + OUTREG64(hp->registers, HP_ZX1_IBASE, 0); + iounmap((void *) hp->registers); } -static void -hp_zx1_tlbflush (agp_memory * mem) +static void hp_zx1_tlbflush(agp_memory * mem) { struct _hp_private *hp = &hp_private; - OUTREG64(hp->ioc_regs, HP_ZX1_PCOM, hp->gart_base | log2(hp->gart_size)); - INREG64(hp->ioc_regs, HP_ZX1_PCOM); + OUTREG64(hp->registers, HP_ZX1_PCOM, hp->gart_base | log2(hp->gart_size)); + INREG64(hp->registers, HP_ZX1_PCOM); } -static int -hp_zx1_create_gatt_table (void) +static int hp_zx1_create_gatt_table(void) { struct _hp_private *hp = &hp_private; int i; @@ -254,8 +247,7 @@ hp_zx1_create_gatt_table (void) return 0; } -static int -hp_zx1_free_gatt_table (void) +static int hp_zx1_free_gatt_table(void) { struct _hp_private *hp = &hp_private; @@ -267,8 +259,7 @@ hp_zx1_free_gatt_table (void) return 0; } -static int -hp_zx1_insert_memory (agp_memory * mem, off_t pg_start, int type) +static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type) { struct _hp_private *hp = &hp_private; int i, k; @@ -313,8 +304,7 @@ hp_zx1_insert_memory (agp_memory * mem, off_t pg_start, int type) return 0; } -static int -hp_zx1_remove_memory (agp_memory * mem, off_t pg_start, int type) +static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type) { struct _hp_private *hp = &hp_private; int i, io_pg_start, io_pg_count; @@ -333,30 +323,12 @@ hp_zx1_remove_memory (agp_memory * mem, off_t pg_start, int type) return 0; } -static unsigned long -hp_zx1_mask_memory(unsigned long addr, int type) +static unsigned long hp_zx1_mask_memory(unsigned long addr, int type) { return HP_ZX1_PDIR_VALID_BIT | addr; } -static void -hp_zx1_agp_enable (u32 mode) -{ - struct _hp_private *hp = &hp_private; - u32 command; - - command = INREG32(hp->lba_regs, HP_ZX1_AGP_STATUS); - - command = agp_collect_device_status(mode, command); - command |= 0x00000100; - - OUTREG32(hp->lba_regs, HP_ZX1_AGP_COMMAND, command); - - agp_device_command(command, 0); -} - -static int __init -hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa) +static int __init hp_zx1_setup (struct pci_dev *pdev __attribute__((unused))) { agp_bridge->masks = hp_zx1_masks; agp_bridge->dev_private_data = NULL; @@ -367,7 +339,7 @@ hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa) agp_bridge->cleanup = hp_zx1_cleanup; agp_bridge->tlb_flush = hp_zx1_tlbflush; agp_bridge->mask_memory = hp_zx1_mask_memory; - agp_bridge->agp_enable = hp_zx1_agp_enable; + agp_bridge->agp_enable = agp_generic_enable; agp_bridge->cache_flush = global_cache_flush; agp_bridge->create_gatt_table = hp_zx1_create_gatt_table; agp_bridge->free_gatt_table = hp_zx1_free_gatt_table; @@ -378,85 +350,73 @@ hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa) agp_bridge->agp_alloc_page = agp_generic_alloc_page; agp_bridge->agp_destroy_page = agp_generic_destroy_page; agp_bridge->cant_use_aperture = 1; - - return hp_zx1_ioc_init(ioc_hpa, lba_hpa); + return hp_zx1_ioc_init(); } -static acpi_status __init -zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret) +static int __init agp_find_supported_device(struct pci_dev *dev) { - acpi_handle handle, parent; - acpi_status status; - struct acpi_device_info info; - u64 lba_hpa, sba_hpa, length; - - status = hp_acpi_csr_space(obj, &lba_hpa, &length); - if (ACPI_FAILURE(status)) - return 1; - - /* Look for an enclosing IOC scope and find its CSR space */ - handle = obj; - do { - status = acpi_get_object_info(handle, &info); - if (ACPI_SUCCESS(status)) { - /* TBD check _CID also */ - info.hardware_id[sizeof(info.hardware_id)-1] = '\0'; - if (!strcmp(info.hardware_id, "HWP0001")) { - status = hp_acpi_csr_space(handle, &sba_hpa, &length); - if (ACPI_SUCCESS(status)) - break; - else { - printk(KERN_ERR PFX "Detected HP ZX1 " - "AGP LBA but no IOC.\n"); - return status; - } - } - } - - status = acpi_get_parent(handle, &parent); - handle = parent; - } while (ACPI_SUCCESS(status)); + agp_bridge->dev = dev; - if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa)) - return 1; - - fake_bridge_dev.vendor = PCI_VENDOR_ID_HP; - fake_bridge_dev.device = PCI_DEVICE_ID_HP_ZX1_LBA; - - return 0; + /* ZX1 LBAs can be either PCI or AGP bridges */ + if (pci_find_capability(dev, PCI_CAP_ID_AGP)) { + printk(KERN_INFO PFX "Detected HP ZX1 AGP chipset at %s\n", + dev->slot_name); + agp_bridge->type = HP_ZX1; + agp_bridge->dev = dev; + return hp_zx1_setup(dev); + } + return -ENODEV; } static struct agp_driver hp_agp_driver = { .owner = THIS_MODULE, }; -static int __init -agp_hp_init (void) +static int __init agp_hp_probe (struct pci_dev *dev, const struct pci_device_id *ent) { - acpi_status status; - - status = acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003 AGP LBA", NULL); - if (!(ACPI_SUCCESS(status))) { - agp_bridge->type = NOT_SUPPORTED; - printk(KERN_INFO PFX "Failed to initialize zx1 AGP.\n"); - return -ENODEV; + if (agp_find_supported_device(dev) == 0) { + hp_agp_driver.dev = dev; + agp_register_driver(&hp_agp_driver); + return 0; } + return -ENODEV; +} - if (fake_bridge_dev.vendor && !agp_bridge->type) { - hp_agp_driver.dev = &fake_bridge_dev; - agp_bridge->type = HP_ZX1; - agp_bridge->dev = &fake_bridge_dev; - return agp_register_driver(&hp_agp_driver); +static struct pci_device_id agp_hp_pci_table[] __initdata = { + { + .class = (PCI_CLASS_BRIDGE_HOST << 8), + .class_mask = ~0, + .vendor = PCI_VENDOR_ID_HP, + .device = PCI_DEVICE_ID_HP_ZX1_LBA, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; - } else { - return -ENODEV; - } +MODULE_DEVICE_TABLE(pci, agp_hp_pci_table); + +static struct __initdata pci_driver agp_hp_pci_driver = { + .name = "agpgart-hp", + .id_table = agp_hp_pci_table, + .probe = agp_hp_probe, +}; + +static int __init agp_hp_init(void) +{ + int ret_val; + + ret_val = pci_module_init(&agp_hp_pci_driver); + if (ret_val) + agp_bridge->type = NOT_SUPPORTED; + + return ret_val; } -static void __exit -agp_hp_cleanup (void) +static void __exit agp_hp_cleanup(void) { agp_unregister_driver(&hp_agp_driver); + pci_unregister_driver(&agp_hp_pci_driver); } module_init(agp_hp_init); diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c index a0baa6f..abc3ab6 100644 --- a/drivers/char/agp/i460-agp.c +++ b/drivers/char/agp/i460-agp.c @@ -571,7 +571,6 @@ static int __init agp_intel_i460_probe (struct pci_dev *dev, const struct pci_de if (cap_ptr == 0) return -ENODEV; - agp_bridge->type = INTEL_460GX; agp_bridge->dev = dev; agp_bridge->capndx = cap_ptr; intel_i460_setup(dev); diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h index 9d6433b..c0f95fe 100644 --- a/drivers/char/drm/drmP.h +++ b/drivers/char/drm/drmP.h @@ -225,16 +225,16 @@ static inline struct page * vmalloc_to_page(void * vmalloc_addr) if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; } /* Mapping helper macros */ -#define DRM_IOREMAP(map, dev) \ - (map)->handle = DRM(ioremap)( (map)->offset, (map)->size, (dev) ) +#define DRM_IOREMAP(map) \ + (map)->handle = DRM(ioremap)( (map)->offset, (map)->size ) -#define DRM_IOREMAP_NOCACHE(map, dev) \ - (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size, (dev)) +#define DRM_IOREMAP_NOCACHE(map) \ + (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size) -#define DRM_IOREMAPFREE(map, dev) \ - do { \ - if ( (map)->handle && (map)->size ) \ - DRM(ioremapfree)( (map)->handle, (map)->size, (dev) ); \ +#define DRM_IOREMAPFREE(map) \ + do { \ + if ( (map)->handle && (map)->size ) \ + DRM(ioremapfree)( (map)->handle, (map)->size ); \ } while (0) #define DRM_FIND_MAP(_map, _o) \ @@ -652,10 +652,9 @@ extern void DRM(free)(void *pt, size_t size, int area); extern unsigned long DRM(alloc_pages)(int order, int area); extern void DRM(free_pages)(unsigned long address, int order, int area); -extern void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev); -extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, - drm_device_t *dev); -extern void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev); +extern void *DRM(ioremap)(unsigned long offset, unsigned long size); +extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size); +extern void DRM(ioremapfree)(void *pt, unsigned long size); #if __REALLY_HAVE_AGP extern agp_memory *DRM(alloc_agp)(int pages, u32 type); diff --git a/drivers/char/drm/drm_bufs.h b/drivers/char/drm/drm_bufs.h index b1e484d..5ee9333 100644 --- a/drivers/char/drm/drm_bufs.h +++ b/drivers/char/drm/drm_bufs.h @@ -123,7 +123,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp, MTRR_TYPE_WRCOMB, 1 ); } #endif - map->handle = DRM(ioremap)( map->offset, map->size, dev ); + map->handle = DRM(ioremap)( map->offset, map->size ); break; case _DRM_SHM: @@ -245,7 +245,7 @@ int DRM(rmmap)(struct inode *inode, struct file *filp, DRM_DEBUG("mtrr_del = %d\n", retcode); } #endif - DRM(ioremapfree)(map->handle, map->size, dev); + DRM(ioremapfree)(map->handle, map->size); break; case _DRM_SHM: vfree(map->handle); diff --git a/drivers/char/drm/drm_drv.h b/drivers/char/drm/drm_drv.h index 9f51aeb..2ea8e6f 100644 --- a/drivers/char/drm/drm_drv.h +++ b/drivers/char/drm/drm_drv.h @@ -454,7 +454,7 @@ static int DRM(takedown)( drm_device_t *dev ) DRM_DEBUG( "mtrr_del=%d\n", retcode ); } #endif - DRM(ioremapfree)( map->handle, map->size, dev ); + DRM(ioremapfree)( map->handle, map->size ); break; case _DRM_SHM: vfree(map->handle); diff --git a/drivers/char/drm/drm_memory.h b/drivers/char/drm/drm_memory.h index 3dbd03b..8b63416 100644 --- a/drivers/char/drm/drm_memory.h +++ b/drivers/char/drm/drm_memory.h @@ -31,10 +31,6 @@ #include #include "drmP.h" -#include - -#include -#include /* Cut down version of drm_memory_debug.h, which used to be called * drm_memory.h. If you want the debug functionality, change 0 to 1 @@ -42,150 +38,6 @@ */ #define DEBUG_MEMORY 0 -#if __REALLY_HAVE_AGP - -/* - * Find the drm_map that covers the range [offset, offset+size). - */ -static inline drm_map_t * -drm_lookup_map (unsigned long offset, unsigned long size, drm_device_t *dev) -{ - struct list_head *list; - drm_map_list_t *r_list; - drm_map_t *map; - - list_for_each(list, &dev->maplist->head) { - r_list = (drm_map_list_t *) list; - map = r_list->map; - if (!map) - continue; - if (map->offset <= offset && (offset + size) <= (map->offset + map->size)) - return map; - } - return NULL; -} - -static inline void * -agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev) -{ - unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE; - struct drm_agp_mem *agpmem; - struct page **page_map; - void *addr; - - size = PAGE_ALIGN(size); - - for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) - if (agpmem->bound <= offset - && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= (offset + size)) - break; - if (!agpmem) - return NULL; - - /* - * OK, we're mapping AGP space on a chipset/platform on which memory accesses by - * the CPU do not get remapped by the GART. We fix this by using the kernel's - * page-table instead (that's probably faster anyhow...). - */ - /* note: use vmalloc() because num_pages could be large... */ - page_map = vmalloc(num_pages * sizeof(struct page *)); - if (!page_map) - return NULL; - - phys_addr_map = agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE; - for (i = 0; i < num_pages; ++i) - page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT); - addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); - vfree(page_map); - if (!addr) - return NULL; - - flush_tlb_kernel_range((unsigned long) addr, (unsigned long) addr + size); - return addr; -} - -static inline unsigned long -drm_follow_page (void *vaddr) -{ - pgd_t *pgd = pgd_offset_k((unsigned long) vaddr); - pmd_t *pmd = pmd_offset(pgd, (unsigned long) vaddr); - pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr); - return pte_pfn(*ptep) << PAGE_SHIFT; -} - -#else /* !__REALLY_HAVE_AGP */ - -static inline void * -agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev) -{ - return NULL; -} - -#endif /* !__REALLY_HAVE_AGP */ - -static inline void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t *dev) -{ - int remap_aperture = 0; - -#if __REALLY_HAVE_AGP - if (dev->agp->cant_use_aperture) { - drm_map_t *map = drm_lookup_map(offset, size, dev); - - if (map && map->type == _DRM_AGP) - remap_aperture = 1; - } -#endif - if (remap_aperture) - return agp_remap(offset, size, dev); - else - return ioremap(offset, size); -} - -static inline void *drm_ioremap_nocache(unsigned long offset, unsigned long size, - drm_device_t *dev) -{ - int remap_aperture = 0; - -#if __REALLY_HAVE_AGP - if (dev->agp->cant_use_aperture) { - drm_map_t *map = drm_lookup_map(offset, size, dev); - - if (map && map->type == _DRM_AGP) - remap_aperture = 1; - } -#endif - if (remap_aperture) - return agp_remap(offset, size, dev); - else - return ioremap_nocache(offset, size); -} - -static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev) -{ - int unmap_aperture = 0; -#if __REALLY_HAVE_AGP - /* - * This is a bit ugly. It would be much cleaner if the DRM API would use separate - * routines for handling mappings in the AGP space. Hopefully this can be done in - * a future revision of the interface... - */ - if (dev->agp->cant_use_aperture - && ((unsigned long) pt >= VMALLOC_START && (unsigned long) pt < VMALLOC_END)) - { - unsigned long offset; - drm_map_t *map; - - offset = drm_follow_page(pt) | ((unsigned long) pt & ~PAGE_MASK); - map = drm_lookup_map(offset, size, dev); - if (map && map->type == _DRM_AGP) - unmap_aperture = 1; - } -#endif - if (unmap_aperture) - vunmap(pt); - else - iounmap(pt); -} #if DEBUG_MEMORY #include "drm_memory_debug.h" @@ -266,19 +118,19 @@ void DRM(free_pages)(unsigned long address, int order, int area) free_pages(address, order); } -void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev) +void *DRM(ioremap)(unsigned long offset, unsigned long size) { - return drm_ioremap(offset, size, dev); + return ioremap(offset, size); } -void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev) +void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size) { - return drm_ioremap_nocache(offset, size, dev); + return ioremap_nocache(offset, size); } -void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev) +void DRM(ioremapfree)(void *pt, unsigned long size) { - drm_ioremapfree(pt, size, dev); + iounmap(pt); } #if __REALLY_HAVE_AGP diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h index b87ecbc..b189793 100644 --- a/drivers/char/drm/drm_memory_debug.h +++ b/drivers/char/drm/drm_memory_debug.h @@ -269,7 +269,7 @@ void DRM(free_pages)(unsigned long address, int order, int area) } } -void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev) +void *DRM(ioremap)(unsigned long offset, unsigned long size) { void *pt; @@ -279,7 +279,7 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev) return NULL; } - if (!(pt = drm_ioremap(offset, size, dev))) { + if (!(pt = ioremap(offset, size))) { spin_lock(&DRM(mem_lock)); ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; spin_unlock(&DRM(mem_lock)); @@ -292,7 +292,7 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev) return pt; } -void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev) +void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size) { void *pt; @@ -302,7 +302,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_ return NULL; } - if (!(pt = drm_ioremap_nocache(offset, size, dev))) { + if (!(pt = ioremap_nocache(offset, size))) { spin_lock(&DRM(mem_lock)); ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; spin_unlock(&DRM(mem_lock)); @@ -315,7 +315,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_ return pt; } -void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev) +void DRM(ioremapfree)(void *pt, unsigned long size) { int alloc_count; int free_count; @@ -324,7 +324,7 @@ void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev) DRM_MEM_ERROR(DRM_MEM_MAPPINGS, "Attempt to free NULL pointer\n"); else - drm_ioremapfree(pt, size, dev); + iounmap(pt); spin_lock(&DRM(mem_lock)); DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size; diff --git a/drivers/char/drm/drm_vm.h b/drivers/char/drm/drm_vm.h index 18bda8a..7711639 100644 --- a/drivers/char/drm/drm_vm.h +++ b/drivers/char/drm/drm_vm.h @@ -107,12 +107,12 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma, * Get the page, inc the use count, and return it */ offset = (baddr - agpmem->bound) >> PAGE_SHIFT; + agpmem->memory->memory[offset] &= dev->agp->page_mask; page = virt_to_page(__va(agpmem->memory->memory[offset])); get_page(page); - DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", - baddr, __va(agpmem->memory->memory[offset]), offset, - atomic_read(&page->count)); + DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx\n", + baddr, __va(agpmem->memory->memory[offset]), offset); return page; } @@ -206,7 +206,7 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma) DRM_DEBUG("mtrr_del = %d\n", retcode); } #endif - DRM(ioremapfree)(map->handle, map->size, dev); + DRM(ioremapfree)(map->handle, map->size); break; case _DRM_SHM: vfree(map->handle); @@ -420,16 +420,15 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma) switch (map->type) { case _DRM_AGP: -#if __REALLY_HAVE_AGP - if (dev->agp->cant_use_aperture) { +#if defined(__alpha__) /* - * On some platforms we can't talk to bus dma address from the CPU, so for - * memory of type DRM_AGP, we'll deal with sorting out the real physical - * pages and mappings in nopage() + * On Alpha we can't talk to bus dma address from the + * CPU, so for memory of type DRM_AGP, we'll deal with + * sorting out the real physical pages and mappings + * in nopage() */ vma->vm_ops = &DRM(vm_ops); break; - } #endif /* fall through to _DRM_FRAME_BUFFER... */ case _DRM_FRAME_BUFFER: @@ -440,15 +439,15 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma) pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; } +#elif defined(__ia64__) + if (map->type != _DRM_AGP) + vma->vm_page_prot = + pgprot_writecombine(vma->vm_page_prot); #elif defined(__powerpc__) pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED; #endif vma->vm_flags |= VM_IO; /* not in core dump */ } -#if defined(__ia64__) - if (map->type != _DRM_AGP) - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); -#endif offset = DRIVER_GET_REG_OFS(); #ifdef __sparc__ if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, diff --git a/drivers/char/drm/gamma_dma.c b/drivers/char/drm/gamma_dma.c index 77135f2..8246b1b 100644 --- a/drivers/char/drm/gamma_dma.c +++ b/drivers/char/drm/gamma_dma.c @@ -612,7 +612,7 @@ static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init ) } else { DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset ); - DRM_IOREMAP( dev_priv->buffers, dev ); + DRM_IOREMAP( dev_priv->buffers ); buf = dma->buflist[GLINT_DRI_BUF_COUNT]; pgt = buf->address; @@ -651,7 +651,7 @@ int gamma_do_cleanup_dma( drm_device_t *dev ) drm_gamma_private_t *dev_priv = dev->dev_private; if ( dev_priv->buffers != NULL ) - DRM_IOREMAPFREE( dev_priv->buffers, dev ); + DRM_IOREMAPFREE( dev_priv->buffers ); DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t), DRM_MEM_DRIVER ); diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c index f274d4b..f3c323f 100644 --- a/drivers/char/drm/i810_dma.c +++ b/drivers/char/drm/i810_dma.c @@ -246,7 +246,7 @@ int i810_dma_cleanup(drm_device_t *dev) if(dev_priv->ring.virtual_start) { DRM(ioremapfree)((void *) dev_priv->ring.virtual_start, - dev_priv->ring.Size, dev); + dev_priv->ring.Size); } if (dev_priv->hw_status_page) { pci_free_consistent(dev->pdev, PAGE_SIZE, @@ -263,7 +263,7 @@ int i810_dma_cleanup(drm_device_t *dev) drm_buf_t *buf = dma->buflist[ i ]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; if ( buf_priv->kernel_virtual && buf->total ) - DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev); + DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total); } } return 0; @@ -333,7 +333,7 @@ static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv) *buf_priv->in_use = I810_BUF_FREE; buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address, - buf->total, dev); + buf->total); } return 0; } @@ -386,7 +386,7 @@ static int i810_dma_initialize(drm_device_t *dev, dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base + init->ring_start, - init->ring_size, dev); + init->ring_size); if (dev_priv->ring.virtual_start == NULL) { dev->dev_private = (void *) dev_priv; diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c index b0efda4..fe105ff 100644 --- a/drivers/char/drm/i830_dma.c +++ b/drivers/char/drm/i830_dma.c @@ -246,7 +246,7 @@ int i830_dma_cleanup(drm_device_t *dev) if (dev_priv->ring.virtual_start) { DRM(ioremapfree)((void *) dev_priv->ring.virtual_start, - dev_priv->ring.Size, dev); + dev_priv->ring.Size); } if (dev_priv->hw_status_page) { pci_free_consistent(dev->pdev, PAGE_SIZE, @@ -264,7 +264,7 @@ int i830_dma_cleanup(drm_device_t *dev) drm_buf_t *buf = dma->buflist[ i ]; drm_i830_buf_priv_t *buf_priv = buf->dev_private; if ( buf_priv->kernel_virtual && buf->total ) - DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev); + DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total); } } return 0; @@ -340,7 +340,7 @@ static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv) *buf_priv->in_use = I830_BUF_FREE; buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address, - buf->total, dev); + buf->total); } return 0; } @@ -394,7 +394,7 @@ static int i830_dma_initialize(drm_device_t *dev, dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base + init->ring_start, - init->ring_size, dev); + init->ring_size); if (dev_priv->ring.virtual_start == NULL) { dev->dev_private = (void *) dev_priv; diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c index bfaee0d..71b9f3a 100644 --- a/drivers/char/drm/mga_dma.c +++ b/drivers/char/drm/mga_dma.c @@ -554,9 +554,9 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + init->sarea_priv_offset); - DRM_IOREMAP( dev_priv->warp, dev ); - DRM_IOREMAP( dev_priv->primary, dev ); - DRM_IOREMAP( dev_priv->buffers, dev ); + DRM_IOREMAP( dev_priv->warp ); + DRM_IOREMAP( dev_priv->primary ); + DRM_IOREMAP( dev_priv->buffers ); if(!dev_priv->warp->handle || !dev_priv->primary->handle || @@ -651,11 +651,11 @@ int mga_do_cleanup_dma( drm_device_t *dev ) drm_mga_private_t *dev_priv = dev->dev_private; if ( dev_priv->warp != NULL ) - DRM_IOREMAPFREE( dev_priv->warp, dev ); + DRM_IOREMAPFREE( dev_priv->warp ); if ( dev_priv->primary != NULL ) - DRM_IOREMAPFREE( dev_priv->primary, dev ); + DRM_IOREMAPFREE( dev_priv->primary ); if ( dev_priv->buffers != NULL ) - DRM_IOREMAPFREE( dev_priv->buffers, dev ); + DRM_IOREMAPFREE( dev_priv->buffers ); if ( dev_priv->head != NULL ) { mga_freelist_cleanup( dev ); diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h index 9396ae9..feb389d 100644 --- a/drivers/char/drm/mga_drv.h +++ b/drivers/char/drm/mga_drv.h @@ -226,7 +226,7 @@ do { \ if ( MGA_VERBOSE ) { \ DRM_INFO( "BEGIN_DMA( %d ) in %s\n", \ (n), __FUNCTION__ ); \ - DRM_INFO( " space=0x%x req=0x%Zx\n", \ + DRM_INFO( " space=0x%x req=0x%x\n", \ dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \ } \ prim = dev_priv->prim.start; \ @@ -276,7 +276,7 @@ do { \ #define DMA_WRITE( offset, val ) \ do { \ if ( MGA_VERBOSE ) { \ - DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \ + DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04x\n", \ (u32)(val), write + (offset) * sizeof(u32) ); \ } \ *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \ diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c index bff0cad..608b7da 100644 --- a/drivers/char/drm/r128_cce.c +++ b/drivers/char/drm/r128_cce.c @@ -350,8 +350,8 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev, R128_WRITE( R128_PM4_BUFFER_DL_RPTR_ADDR, entry->busaddr[page_ofs]); - DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n", - (unsigned long) entry->busaddr[page_ofs], + DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", + entry->busaddr[page_ofs], entry->handle + tmp_ofs ); } @@ -540,9 +540,9 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init ) init->sarea_priv_offset); if ( !dev_priv->is_pci ) { - DRM_IOREMAP( dev_priv->cce_ring, dev ); - DRM_IOREMAP( dev_priv->ring_rptr, dev ); - DRM_IOREMAP( dev_priv->buffers, dev ); + DRM_IOREMAP( dev_priv->cce_ring ); + DRM_IOREMAP( dev_priv->ring_rptr ); + DRM_IOREMAP( dev_priv->buffers ); if(!dev_priv->cce_ring->handle || !dev_priv->ring_rptr->handle || !dev_priv->buffers->handle) { @@ -629,11 +629,11 @@ int r128_do_cleanup_cce( drm_device_t *dev ) if ( !dev_priv->is_pci ) { #endif if ( dev_priv->cce_ring != NULL ) - DRM_IOREMAPFREE( dev_priv->cce_ring, dev ); + DRM_IOREMAPFREE( dev_priv->cce_ring ); if ( dev_priv->ring_rptr != NULL ) - DRM_IOREMAPFREE( dev_priv->ring_rptr, dev ); + DRM_IOREMAPFREE( dev_priv->ring_rptr ); if ( dev_priv->buffers != NULL ) - DRM_IOREMAPFREE( dev_priv->buffers, dev ); + DRM_IOREMAPFREE( dev_priv->buffers ); #if __REALLY_HAVE_SG } else { if (!DRM(ati_pcigart_cleanup)( dev, diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c index c94178b..281b0cf 100644 --- a/drivers/char/drm/radeon_cp.c +++ b/drivers/char/drm/radeon_cp.c @@ -903,8 +903,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev, RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]); - DRM_DEBUG( "ring rptr: offset=0x%08lx handle=0x%08lx\n", - (unsigned long) entry->busaddr[page_ofs], + DRM_DEBUG( "ring rptr: offset=0x%08x handle=0x%08lx\n", + entry->busaddr[page_ofs], entry->handle + tmp_ofs ); } @@ -1152,9 +1152,9 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) init->sarea_priv_offset); if ( !dev_priv->is_pci ) { - DRM_IOREMAP( dev_priv->cp_ring, dev ); - DRM_IOREMAP( dev_priv->ring_rptr, dev ); - DRM_IOREMAP( dev_priv->buffers, dev ); + DRM_IOREMAP( dev_priv->cp_ring ); + DRM_IOREMAP( dev_priv->ring_rptr ); + DRM_IOREMAP( dev_priv->buffers ); if(!dev_priv->cp_ring->handle || !dev_priv->ring_rptr->handle || !dev_priv->buffers->handle) { @@ -1279,11 +1279,11 @@ int radeon_do_cleanup_cp( drm_device_t *dev ) if ( !dev_priv->is_pci ) { if ( dev_priv->cp_ring != NULL ) - DRM_IOREMAPFREE( dev_priv->cp_ring, dev ); + DRM_IOREMAPFREE( dev_priv->cp_ring ); if ( dev_priv->ring_rptr != NULL ) - DRM_IOREMAPFREE( dev_priv->ring_rptr, dev ); + DRM_IOREMAPFREE( dev_priv->ring_rptr ); if ( dev_priv->buffers != NULL ) - DRM_IOREMAPFREE( dev_priv->buffers, dev ); + DRM_IOREMAPFREE( dev_priv->buffers ); } else { #if __REALLY_HAVE_SG if (!DRM(ati_pcigart_cleanup)( dev, diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index d7a5551..2322dc4 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c @@ -227,12 +227,7 @@ static void hvc_poll(int index) spin_unlock_irqrestore(&hp->lock, flags); } -#if defined (CONFIG_XMON) extern unsigned long cpus_in_xmon; -#else -unsigned long cpus_in_xmon=0; -#endif - int khvcd(void *unused) { diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 60b698c..83a0987 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -71,8 +71,7 @@ static ssize_t read_mem(struct file * file, char * buf, unsigned long p = *ppos; unsigned long end_mem; ssize_t read; - void *addr; - + end_mem = __pa(high_memory); if (p >= end_mem) return 0; @@ -95,14 +94,8 @@ static ssize_t read_mem(struct file * file, char * buf, } } #endif - if (file->f_flags & O_SYNC) - addr = ioremap(p, count); - else - addr = __va(p); - if (copy_to_user(buf, addr, count)) + if (copy_to_user(buf, __va(p), count)) return -EFAULT; - if (file->f_flags & O_SYNC) - iounmap(addr); read += count; *ppos += read; return read; @@ -113,22 +106,13 @@ static ssize_t write_mem(struct file * file, const char * buf, { unsigned long p = *ppos; unsigned long end_mem; - ssize_t ret; - void *addr; end_mem = __pa(high_memory); if (p >= end_mem) return 0; if (count > end_mem - p) count = end_mem - p; - if (file->f_flags & O_SYNC) - addr = ioremap(p, count); - else - addr = __va(p); - ret = do_write_mem(file, addr, p, buf, count, ppos); - if (file->f_flags & O_SYNC) - iounmap(addr); - return ret; + return do_write_mem(file, __va(p), p, buf, count, ppos); } #ifndef pgprot_noncached @@ -538,19 +522,17 @@ static loff_t null_lseek(struct file * file, loff_t offset, int orig) */ static loff_t memory_lseek(struct file * file, loff_t offset, int orig) { - loff_t ret; + int ret; lock_kernel(); switch (orig) { case 0: file->f_pos = offset; ret = file->f_pos; - force_successful_syscall_return(); break; case 1: file->f_pos += offset; ret = file->f_pos; - force_successful_syscall_return(); break; default: ret = -EINVAL; diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile index a481129..62263fa 100644 --- a/drivers/media/radio/Makefile +++ b/drivers/media/radio/Makefile @@ -2,8 +2,6 @@ # Makefile for the kernel character device drivers. # -obj-y := dummy.o - miropcm20-objs := miropcm20-rds-core.o miropcm20-radio.o obj-$(CONFIG_RADIO_AZTECH) += radio-aztech.o diff --git a/drivers/media/radio/dummy.c b/drivers/media/radio/dummy.c deleted file mode 100644 index 4f9d00a..0000000 --- a/drivers/media/radio/dummy.c +++ /dev/null @@ -1 +0,0 @@ -/* just so the linker knows what kind of object files it's deadling with... */ diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile index 80db81d..cbc7e09 100644 --- a/drivers/media/video/Makefile +++ b/drivers/media/video/Makefile @@ -6,8 +6,6 @@ bttv-objs := bttv-driver.o bttv-cards.o bttv-if.o \ bttv-risc.o bttv-vbi.o zoran-objs := zr36120.o zr36120_i2c.o zr36120_mem.o -obj-y := dummy.o - obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o obj-$(CONFIG_VIDEO_BT848) += bttv.o msp3400.o tvaudio.o \ diff --git a/drivers/media/video/dummy.c b/drivers/media/video/dummy.c deleted file mode 100644 index 4f9d00a..0000000 --- a/drivers/media/video/dummy.c +++ /dev/null @@ -1 +0,0 @@ -/* just so the linker knows what kind of object files it's deadling with... */ diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c index 4658e36..ad7d115 100644 --- a/drivers/net/tulip/media.c +++ b/drivers/net/tulip/media.c @@ -278,10 +278,6 @@ void tulip_select_media(struct net_device *dev, int startup) for (i = 0; i < init_length; i++) outl(init_sequence[i], ioaddr + CSR12); } - - (void) inl(ioaddr + CSR6); /* flush CSR12 writes */ - udelay(500); /* Give MII time to recover */ - tmp_info = get_u16(&misc_info[1]); if (tmp_info) tp->advertising[phy_num] = tmp_info | 1; diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index e202fe8..6de4a1e 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -284,7 +284,7 @@ #define QL1280_TARGET_MODE_SUPPORT 0 /* Target mode support */ #define QL1280_LUN_SUPPORT 0 #define WATCHDOGTIMER 0 -#define MEMORY_MAPPED_IO 1 +#define MEMORY_MAPPED_IO 0 #define DEBUG_QLA1280_INTR 0 #define USE_NVRAM_DEFAULTS 0 #define DEBUG_PRINT_NVRAM 0 @@ -2634,7 +2634,7 @@ qla1280_pci_config(struct scsi_qla_host *ha) /* * Get memory mapped I/O address. */ - pci_read_config_dword (ha->pdev, PCI_BASE_ADDRESS_1, &mmapbase); + pci_read_config_word (ha->pdev, PCI_BASE_ADDRESS_1, &mmapbase); mmapbase &= PCI_BASE_ADDRESS_MEM_MASK; /* diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index c8a80c3..1308fff 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -219,9 +219,6 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic) unsigned int needed, buf_needed; int timeout, retries, result; int data_direction, gfp_mask = GFP_KERNEL; -#if __GNUC__ < 3 - int foo; -#endif if (!sic) return -EINVAL; @@ -235,21 +232,11 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic) if (verify_area(VERIFY_READ, sic, sizeof(Scsi_Ioctl_Command))) return -EFAULT; -#if __GNUC__ < 3 - foo = __get_user(inlen, &sic->inlen); - if (foo) - return -EFAULT; - - foo = __get_user(outlen, &sic->outlen); - if (foo) - return -EFAULT; -#else if(__get_user(inlen, &sic->inlen)) return -EFAULT; if(__get_user(outlen, &sic->outlen)) return -EFAULT; -#endif /* * We do not transfer more than MAX_BUF with this interface. diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 904c60f..2509a8a 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -295,8 +295,12 @@ struct host_data { #ifndef SYM_LINUX_DYNAMIC_DMA_MAPPING typedef u_long bus_addr_t; #else +#if SYM_CONF_DMA_ADDRESSING_MODE > 0 +typedef dma64_addr_t bus_addr_t; +#else typedef dma_addr_t bus_addr_t; #endif +#endif /* * Used by the eh thread to wait for command completion. diff --git a/drivers/scsi/sym53c8xx_2/sym_malloc.c b/drivers/scsi/sym53c8xx_2/sym_malloc.c index 7395ddb..7f32ed4 100644 --- a/drivers/scsi/sym53c8xx_2/sym_malloc.c +++ b/drivers/scsi/sym53c8xx_2/sym_malloc.c @@ -150,6 +150,7 @@ static void ___sym_mfree(m_pool_p mp, void *ptr, int size) ((m_link_p) a)->next = h[i].next; h[i].next = (m_link_p) a; #endif + break; } b = a ^ s; q = &h[i]; diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index eb7b78f..b554e89 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -2064,11 +2064,9 @@ int register_serial(struct serial_struct *req) return __register_serial(req, -1); } -int __init early_serial_setup(struct uart_port *port) +int __init early_serial_setup(struct serial_struct *req) { - serial8250_isa_init_ports(); - serial8250_ports[port->line].port = *port; - serial8250_ports[port->line].port.ops = &serial8250_pops; + __register_serial(req, req->line); return 0; } diff --git a/drivers/serial/8250_acpi.c b/drivers/serial/8250_acpi.c deleted file mode 100644 index 3891cde..0000000 --- a/drivers/serial/8250_acpi.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * linux/drivers/char/acpi_serial.c - * - * Copyright (C) 2000, 2002 Hewlett-Packard Co. - * Khalid Aziz - * - * Detect and initialize the headless console serial port defined in SPCR table and debug - * serial port defined in DBGP table. - * - * 2002/08/29 davidm Adjust it to new 2.5 serial driver infrastructure. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#undef SERIAL_DEBUG_ACPI - -#define ACPI_SERIAL_CONSOLE_PORT 0 -#define ACPI_SERIAL_DEBUG_PORT 5 - -/* - * Query ACPI tables for a debug and a headless console serial port. If found, add them to - * rs_table[]. A pointer to either SPCR or DBGP table is passed as parameter. This - * function should be called before serial_console_init() is called to make sure the SPCR - * serial console will be available for use. IA-64 kernel calls this function from within - * acpi.c when it encounters SPCR or DBGP tables as it parses the ACPI 2.0 tables during - * bootup. - */ -void __init -setup_serial_acpi (void *tablep) -{ - acpi_ser_t *acpi_ser_p; - struct uart_port port; - unsigned long iobase; - int gsi; - -#ifdef SERIAL_DEBUG_ACPI - printk("Entering setup_serial_acpi()\n"); -#endif - - /* Now get the table */ - if (!tablep) - return; - - memset(&port, 0, sizeof(port)); - - acpi_ser_p = (acpi_ser_t *) tablep; - - /* - * Perform a sanity check on the table. Table should have a signature of "SPCR" or - * "DBGP" and it should be atleast 52 bytes long. - */ - if (strncmp(acpi_ser_p->signature, ACPI_SPCRT_SIGNATURE, ACPI_SIG_LEN) != 0 && - strncmp(acpi_ser_p->signature, ACPI_DBGPT_SIGNATURE, ACPI_SIG_LEN) != 0) - return; - if (acpi_ser_p->length < 52) - return; - - iobase = (((u64) acpi_ser_p->base_addr.addrh) << 32) | acpi_ser_p->base_addr.addrl; - gsi = ( (acpi_ser_p->global_int[3] << 24) | (acpi_ser_p->global_int[2] << 16) - | (acpi_ser_p->global_int[1] << 8) | (acpi_ser_p->global_int[0] << 0)); - -#ifdef SERIAL_DEBUG_ACPI - printk("setup_serial_acpi(): table pointer = 0x%p\n", acpi_ser_p); - printk(" sig = '%c%c%c%c'\n", acpi_ser_p->signature[0], - acpi_ser_p->signature[1], acpi_ser_p->signature[2], acpi_ser_p->signature[3]); - printk(" length = %d\n", acpi_ser_p->length); - printk(" Rev = %d\n", acpi_ser_p->rev); - printk(" Interface type = %d\n", acpi_ser_p->intfc_type); - printk(" Base address = 0x%lX\n", iobase); - printk(" IRQ = %d\n", acpi_ser_p->irq); - printk(" Global System Int = %d\n", gsi); - printk(" Baud rate = "); - switch (acpi_ser_p->baud) { - case ACPI_SERIAL_BAUD_9600: - printk("9600\n"); - break; - - case ACPI_SERIAL_BAUD_19200: - printk("19200\n"); - break; - - case ACPI_SERIAL_BAUD_57600: - printk("57600\n"); - break; - - case ACPI_SERIAL_BAUD_115200: - printk("115200\n"); - break; - - default: - printk("Huh (%d)\n", acpi_ser_p->baud); - break; - } - if (acpi_ser_p->base_addr.space_id == ACPI_SERIAL_PCICONF_SPACE) { - printk(" PCI serial port:\n"); - printk(" Bus %d, Device %d, Vendor ID 0x%x, Dev ID 0x%x\n", - acpi_ser_p->pci_bus, acpi_ser_p->pci_dev, - acpi_ser_p->pci_vendor_id, acpi_ser_p->pci_dev_id); - } -#endif - /* - * Now build a serial_req structure to update the entry in rs_table for the - * headless console port. - */ - switch (acpi_ser_p->intfc_type) { - case ACPI_SERIAL_INTFC_16550: - port.type = PORT_16550; - port.uartclk = BASE_BAUD * 16; - break; - - case ACPI_SERIAL_INTFC_16450: - port.type = PORT_16450; - port.uartclk = BASE_BAUD * 16; - break; - - default: - port.type = PORT_UNKNOWN; - break; - } - if (strncmp(acpi_ser_p->signature, ACPI_SPCRT_SIGNATURE, ACPI_SIG_LEN) == 0) - port.line = ACPI_SERIAL_CONSOLE_PORT; - else if (strncmp(acpi_ser_p->signature, ACPI_DBGPT_SIGNATURE, ACPI_SIG_LEN) == 0) - port.line = ACPI_SERIAL_DEBUG_PORT; - /* - * Check if this is an I/O mapped address or a memory mapped address - */ - if (acpi_ser_p->base_addr.space_id == ACPI_SERIAL_MEM_SPACE) { - port.iobase = 0; - port.mapbase = iobase; - port.membase = ioremap(iobase, 64); - port.iotype = SERIAL_IO_MEM; - } else if (acpi_ser_p->base_addr.space_id == ACPI_SERIAL_IO_SPACE) { - port.iobase = iobase; - port.mapbase = 0; - port.membase = NULL; - port.iotype = SERIAL_IO_PORT; - } else if (acpi_ser_p->base_addr.space_id == ACPI_SERIAL_PCICONF_SPACE) { - printk("WARNING: No support for PCI serial console\n"); - return; - } - - /* - * If the table does not have IRQ information, use 0 for IRQ. This will force - * rs_init() to probe for IRQ. - */ - if (acpi_ser_p->length < 53) - port.irq = 0; - else { - port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ; - if (acpi_ser_p->int_type & (ACPI_SERIAL_INT_APIC | ACPI_SERIAL_INT_SAPIC)) - port.irq = gsi; - else if (acpi_ser_p->int_type & ACPI_SERIAL_INT_PCAT) - port.irq = acpi_ser_p->irq; - else - /* - * IRQ type not being set would mean UART will run in polling - * mode. Do not probe for IRQ in that case. - */ - port.flags &= UPF_AUTO_IRQ; - } - if (early_serial_setup(&port) < 0) { - printk("early_serial_setup() for ACPI serial console port failed\n"); - return; - } - -#ifdef SERIAL_DEBUG_ACPI - printk("Leaving setup_serial_acpi()\n"); -#endif -} diff --git a/drivers/serial/8250_hcdp.c b/drivers/serial/8250_hcdp.c deleted file mode 100644 index a6f16f1..0000000 --- a/drivers/serial/8250_hcdp.c +++ /dev/null @@ -1,221 +0,0 @@ -/* - * linux/drivers/char/hcdp_serial.c - * - * Copyright (C) 2002 Hewlett-Packard Co. - * Khalid Aziz - * - * Parse the EFI HCDP table to locate serial console and debug ports and initialize them. - * - * 2002/08/29 davidm Adjust it to new 2.5 serial driver infrastructure (untested). - */ -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "8250_hcdp.h" - -#undef SERIAL_DEBUG_HCDP - -/* - * Parse the HCDP table to find descriptions for headless console and debug serial ports - * and add them to rs_table[]. A pointer to HCDP table is passed as parameter. This - * function should be called before serial_console_init() is called to make sure the HCDP - * serial console will be available for use. IA-64 kernel calls this function from - * setup_arch() after the EFI and ACPI tables have been parsed. - */ -void __init -setup_serial_hcdp (void *tablep) -{ - hcdp_dev_t *hcdp_dev; - struct uart_port port; - unsigned long iobase; - hcdp_t hcdp; - int gsi, nr; -#if 0 - static int shift_once = 1; -#endif - -#ifdef SERIAL_DEBUG_HCDP - printk("Entering setup_serial_hcdp()\n"); -#endif - - /* Verify we have a valid table pointer */ - if (!tablep) - return; - - memset(&port, 0, sizeof(port)); - - /* - * Don't trust firmware to give us a table starting at an aligned address. Make a - * local copy of the HCDP table with aligned structures. - */ - memcpy(&hcdp, tablep, sizeof(hcdp)); - - /* - * Perform a sanity check on the table. Table should have a signature of "HCDP" - * and it should be atleast 82 bytes long to have any useful information. - */ - if ((strncmp(hcdp.signature, HCDP_SIGNATURE, HCDP_SIG_LEN) != 0)) - return; - if (hcdp.len < 82) - return; - -#ifdef SERIAL_DEBUG_HCDP - printk("setup_serial_hcdp(): table pointer = 0x%p, sig = '%.4s'\n", - tablep, hcdp.signature); - printk(" length = %d, rev = %d, ", hcdp.len, hcdp.rev); - printk("OEM ID = %.6s, # of entries = %d\n", hcdp.oemid, hcdp.num_entries); -#endif - - /* - * Parse each device entry - */ - for (nr = 0; nr < hcdp.num_entries; nr++) { - hcdp_dev = hcdp.hcdp_dev + nr; - /* - * We will parse only the primary console device which is the first entry - * for these devices. We will ignore rest of the entries for the same type - * device that has already been parsed and initialized - */ - if (hcdp_dev->type != HCDP_DEV_CONSOLE) - continue; - - iobase = ((u64) hcdp_dev->base_addr.addrhi << 32) | hcdp_dev->base_addr.addrlo; - gsi = hcdp_dev->global_int; - - /* See PCI spec v2.2, Appendix D (Class Codes): */ - switch (hcdp_dev->pci_prog_intfc) { - case 0x00: port.type = PORT_8250; break; - case 0x01: port.type = PORT_16450; break; - case 0x02: port.type = PORT_16550; break; - case 0x03: port.type = PORT_16650; break; - case 0x04: port.type = PORT_16750; break; - case 0x05: port.type = PORT_16850; break; - case 0x06: port.type = PORT_16C950; break; - default: - printk(KERN_WARNING"warning: EFI HCDP table reports unknown serial " - "programming interface 0x%02x; will autoprobe.\n", - hcdp_dev->pci_prog_intfc); - port.type = PORT_UNKNOWN; - break; - } - -#ifdef SERIAL_DEBUG_HCDP - printk(" type = %s, uart = %d\n", ((hcdp_dev->type == HCDP_DEV_CONSOLE) - ? "Headless Console" : ((hcdp_dev->type == HCDP_DEV_DEBUG) - ? "Debug port" : "Huh????")), - port.type); - printk(" base address space = %s, base address = 0x%lx\n", - ((hcdp_dev->base_addr.space_id == ACPI_MEM_SPACE) - ? "Memory Space" : ((hcdp_dev->base_addr.space_id == ACPI_IO_SPACE) - ? "I/O space" : "PCI space")), - iobase); - printk(" gsi = %d, baud rate = %lu, bits = %d, clock = %d\n", - gsi, (unsigned long) hcdp_dev->baud, hcdp_dev->bits, hcdp_dev->clock_rate); - if (hcdp_dev->base_addr.space_id == ACPI_PCICONF_SPACE) - printk(" PCI id: %02x:%02x:%02x, vendor ID=0x%x, dev ID=0x%x\n", - hcdp_dev->pci_seg, hcdp_dev->pci_bus, hcdp_dev->pci_dev, - hcdp_dev->pci_vendor_id, hcdp_dev->pci_dev_id); -#endif - /* - * Now fill in a port structure to update the 8250 port table.. - */ - if (hcdp_dev->clock_rate) - port.uartclk = hcdp_dev->clock_rate; - else - port.uartclk = BASE_BAUD * 16; - - /* - * Check if this is an I/O mapped address or a memory mapped address - */ - if (hcdp_dev->base_addr.space_id == ACPI_MEM_SPACE) { - port.iobase = 0; - port.mapbase = iobase; - port.membase = ioremap(iobase, 64); - port.iotype = SERIAL_IO_MEM; - } else if (hcdp_dev->base_addr.space_id == ACPI_IO_SPACE) { - port.iobase = iobase; - port.mapbase = 0; - port.membase = NULL; - port.iotype = SERIAL_IO_PORT; - } else if (hcdp_dev->base_addr.space_id == ACPI_PCICONF_SPACE) { - printk(KERN_WARNING"warning: No support for PCI serial console\n"); - return; - } -#ifdef CONFIG_IA64 - port.irq = acpi_register_irq(gsi, ACPI_ACTIVE_HIGH, ACPI_EDGE_SENSITIVE); -#else - port.irq = gsi; -#endif - port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; - if (gsi) - port.flags |= ASYNC_AUTO_IRQ; - - /* - * Note: the above memset() initializes port.line to 0, so we register - * this port as ttyS0. - */ - if (early_serial_setup(&port) < 0) { - printk("setup_serial_hcdp(): early_serial_setup() for HCDP serial " - "console port failed. Will try any additional consoles in HCDP.\n"); - continue; - } - break; - } - -#ifdef SERIAL_DEBUG_HCDP - printk("Leaving setup_serial_hcdp()\n"); -#endif -} - -#ifdef CONFIG_IA64_EARLY_PRINTK_UART -unsigned long -hcdp_early_uart (void) -{ - efi_system_table_t *systab; - efi_config_table_t *config_tables; - unsigned long addr = 0; - hcdp_t *hcdp = 0; - hcdp_dev_t *dev; - int i; - - systab = (efi_system_table_t *) ia64_boot_param->efi_systab; - if (!systab) - return 0; - systab = __va(systab); - - config_tables = (efi_config_table_t *) systab->tables; - if (!config_tables) - return 0; - config_tables = __va(config_tables); - - for (i = 0; i < systab->nr_tables; i++) { - if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { - hcdp = (hcdp_t *) config_tables[i].table; - break; - } - } - if (!hcdp) - return 0; - hcdp = __va(hcdp); - - for (i = 0, dev = hcdp->hcdp_dev; i < hcdp->num_entries; i++, dev++) { - if (dev->type == HCDP_DEV_CONSOLE) { - addr = (u64) dev->base_addr.addrhi << 32 | dev->base_addr.addrlo; - break; - } - } - return addr; -} -#endif /* CONFIG_IA64_EARLY_PRINTK_UART */ diff --git a/drivers/serial/8250_hcdp.h b/drivers/serial/8250_hcdp.h deleted file mode 100644 index 8ee1b60..0000000 --- a/drivers/serial/8250_hcdp.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * drivers/serial/8250_hcdp.h - * - * Copyright (C) 2002 Hewlett-Packard Co. - * Khalid Aziz - * - * Definitions for HCDP defined serial ports (Serial console and debug - * ports) - */ - -/* ACPI table signatures */ -#define HCDP_SIG_LEN 4 -#define HCDP_SIGNATURE "HCDP" - -/* Space ID as defined in ACPI generic address structure */ -#define ACPI_MEM_SPACE 0 -#define ACPI_IO_SPACE 1 -#define ACPI_PCICONF_SPACE 2 - -/* - * Maximum number of HCDP devices we want to read in - */ -#define MAX_HCDP_DEVICES 6 - -/* - * Default UART clock rate if clock rate is 0 in HCDP table. - */ -#define DEFAULT_UARTCLK 115200 - -/* - * ACPI Generic Address Structure - */ -typedef struct { - u8 space_id; - u8 bit_width; - u8 bit_offset; - u8 resv; - u32 addrlo; - u32 addrhi; -} acpi_gen_addr; - -/* HCDP Device descriptor entry types */ -#define HCDP_DEV_CONSOLE 0 -#define HCDP_DEV_DEBUG 1 - -/* HCDP Device descriptor type */ -typedef struct { - u8 type; - u8 bits; - u8 parity; - u8 stop_bits; - u8 pci_seg; - u8 pci_bus; - u8 pci_dev; - u8 pci_func; - u64 baud; - acpi_gen_addr base_addr; - u16 pci_dev_id; - u16 pci_vendor_id; - u32 global_int; - u32 clock_rate; - u8 pci_prog_intfc; - u8 resv; -} hcdp_dev_t; - -/* HCDP Table format */ -typedef struct { - u8 signature[4]; - u32 len; - u8 rev; - u8 chksum; - u8 oemid[6]; - u8 oem_tabid[8]; - u32 oem_rev; - u8 creator_id[4]; - u32 creator_rev; - u32 num_entries; - hcdp_dev_t hcdp_dev[MAX_HCDP_DEVICES]; -} hcdp_t; diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 1fc92d2..48aec4e 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -77,15 +77,6 @@ config SERIAL_8250_CS a module, say M here and read . If unsure, say N. -config SERIAL_8250_HCDP - bool "8250/16550 device discovery support via EFI HCDP table" - depends on IA64 - ---help--- - If you wish to make the serial console port described by the EFI - HCDP table available for use as serial console or general - purpose port, say Y here. See - . - config SERIAL_8250_EXTENDED bool "Extended 8250/16550 serial driver options" depends on SERIAL_8250 diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile index c6f6071..35631fa 100644 --- a/drivers/serial/Makefile +++ b/drivers/serial/Makefile @@ -8,8 +8,6 @@ serial-8250-y := serial-8250-$(CONFIG_GSC) += 8250_gsc.o serial-8250-$(CONFIG_PCI) += 8250_pci.o serial-8250-$(CONFIG_PNP) += 8250_pnp.o -serial-8250-$(CONFIG_ACPI) += acpi.o -serial-8250-$(CONFIG_SERIAL_8250_HCDP) += 8250_hcdp.o obj-$(CONFIG_SERIAL_CORE) += core.o obj-$(CONFIG_SERIAL_21285) += 21285.o obj-$(CONFIG_SERIAL_8250) += 8250.o $(serial-8250-y) diff --git a/drivers/serial/acpi.c b/drivers/serial/acpi.c deleted file mode 100644 index 85c1b6d..0000000 --- a/drivers/serial/acpi.c +++ /dev/null @@ -1,109 +0,0 @@ -/* - * serial/acpi.c - * Copyright (c) 2002-2003 Matthew Wilcox for Hewlett-Packard - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include - -#include - -#include -#include - -static void acpi_serial_address(struct serial_struct *req, struct acpi_resource_address32 *addr32) -{ - unsigned long size; - - size = addr32->max_address_range - addr32->min_address_range + 1; - req->iomap_base = addr32->min_address_range; - req->iomem_base = ioremap(req->iomap_base, size); - req->io_type = SERIAL_IO_MEM; -} - -static void acpi_serial_irq(struct serial_struct *req, struct acpi_resource_ext_irq *ext_irq) -{ - if (ext_irq->number_of_interrupts > 0) { -#ifdef CONFIG_IA64 - req->irq = acpi_register_irq(ext_irq->interrupts[0], - ext_irq->active_high_low, ext_irq->edge_level); -#else - req->irq = ext_irq->interrupts[0]; -#endif - } -} - -static int acpi_serial_add(struct acpi_device *device) -{ - acpi_status result; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - struct serial_struct serial_req; - int line, offset = 0; - - memset(&serial_req, 0, sizeof(serial_req)); - result = acpi_get_current_resources(device->handle, &buffer); - if (ACPI_FAILURE(result)) { - result = -ENODEV; - goto out; - } - - while (offset <= buffer.length) { - struct acpi_resource *res = buffer.pointer + offset; - if (res->length == 0) - break; - offset += res->length; - if (res->id == ACPI_RSTYPE_ADDRESS32) { - acpi_serial_address(&serial_req, &res->data.address32); - } else if (res->id == ACPI_RSTYPE_EXT_IRQ) { - acpi_serial_irq(&serial_req, &res->data.extended_irq); - } - } - - serial_req.baud_base = BASE_BAUD; - serial_req.flags = ASYNC_SKIP_TEST|ASYNC_BOOT_AUTOCONF|ASYNC_AUTO_IRQ; - - result = 0; - line = register_serial(&serial_req); - if (line < 0) - result = -ENODEV; - - out: - acpi_os_free(buffer.pointer); - return result; -} - -static int acpi_serial_remove(struct acpi_device *device, int type) -{ - return 0; -} - -static struct acpi_driver acpi_serial_driver = { - .name = "serial", - .class = "", - .ids = "PNP0501", - .ops = { - .add = acpi_serial_add, - .remove = acpi_serial_remove, - }, -}; - -static int __init acpi_serial_init(void) -{ - acpi_bus_register_driver(&acpi_serial_driver); - return 0; -} - -static void __exit acpi_serial_exit(void) -{ - acpi_bus_unregister_driver(&acpi_serial_driver); -} - -module_init(acpi_serial_init); -module_exit(acpi_serial_exit); diff --git a/fs/fcntl.c b/fs/fcntl.c index f749721..0692a59 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -320,7 +320,6 @@ static long do_fcntl(unsigned int fd, unsigned int cmd, * to fix this will be in libc. */ err = filp->f_owner.pid; - force_successful_syscall_return(); break; case F_SETOWN: err = f_setown(filp, arg, 1); diff --git a/fs/proc/base.c b/fs/proc/base.c index 724f4b3..06e363b 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -534,24 +534,7 @@ static ssize_t mem_write(struct file * file, const char * buf, } #endif -static loff_t mem_lseek(struct file * file, loff_t offset, int orig) -{ - switch (orig) { - case 0: - file->f_pos = offset; - break; - case 1: - file->f_pos += offset; - break; - default: - return -EINVAL; - } - force_successful_syscall_return(); - return file->f_pos; -} - static struct file_operations proc_mem_operations = { - .llseek = mem_lseek, .read = mem_read, .write = mem_write, .open = mem_open, diff --git a/fs/select.c b/fs/select.c index d1e29ab..631d41b 100644 --- a/fs/select.c +++ b/fs/select.c @@ -176,7 +176,7 @@ int do_select(int n, fd_set_bits *fds, long *timeout) { struct poll_wqueues table; poll_table *wait; - int retval, i; + int retval, i, off; long __timeout = *timeout; spin_lock(¤t->files->file_lock); @@ -193,54 +193,38 @@ int do_select(int n, fd_set_bits *fds, long *timeout) wait = NULL; retval = 0; for (;;) { - unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp; set_current_state(TASK_INTERRUPTIBLE); + for (i = 0 ; i < n; i++) { + unsigned long bit = BIT(i); + unsigned long mask; + struct file *file; - inp = fds->in; outp = fds->out; exp = fds->ex; - rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex; - - for (i = 0; i < n; ++rinp, ++routp, ++rexp) { - unsigned long in, out, ex, all_bits, bit = 1, mask, j; - unsigned long res_in = 0, res_out = 0, res_ex = 0; - struct file_operations *f_op = NULL; - struct file *file = NULL; - - in = *inp++; out = *outp++; ex = *exp++; - all_bits = in | out | ex; - if (all_bits == 0) { - i += __NFDBITS; + off = i / __NFDBITS; + if (!(bit & BITS(fds, off))) continue; + file = fget(i); + mask = POLLNVAL; + if (file) { + mask = DEFAULT_POLLMASK; + if (file->f_op && file->f_op->poll) + mask = file->f_op->poll(file, wait); + fput(file); } - - for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) { - if (i >= n) - break; - if (!(bit & all_bits)) - continue; - file = fget(i); - if (file) { - f_op = file->f_op; - mask = DEFAULT_POLLMASK; - if (f_op && f_op->poll) - mask = (*f_op->poll)(file, retval ? NULL : wait); - fput(file); - if ((mask & POLLIN_SET) && (in & bit)) { - res_in |= bit; - retval++; - } - if ((mask & POLLOUT_SET) && (out & bit)) { - res_out |= bit; - retval++; - } - if ((mask & POLLEX_SET) && (ex & bit)) { - res_ex |= bit; - retval++; - } - } + if ((mask & POLLIN_SET) && ISSET(bit, __IN(fds,off))) { + SET(bit, __RES_IN(fds,off)); + retval++; + wait = NULL; + } + if ((mask & POLLOUT_SET) && ISSET(bit, __OUT(fds,off))) { + SET(bit, __RES_OUT(fds,off)); + retval++; + wait = NULL; + } + if ((mask & POLLEX_SET) && ISSET(bit, __EX(fds,off))) { + SET(bit, __RES_EX(fds,off)); + retval++; + wait = NULL; } - if (res_in) *rinp = res_in; - if (res_out) *routp = res_out; - if (res_ex) *rexp = res_ex; } wait = NULL; if (retval || !__timeout || signal_pending(current)) diff --git a/include/asm-alpha/agp.h b/include/asm-alpha/agp.h index 03a9189..c99dbbb 100644 --- a/include/asm-alpha/agp.h +++ b/include/asm-alpha/agp.h @@ -10,11 +10,4 @@ #define flush_agp_mappings() #define flush_agp_cache() mb() -/* - * Page-protection value to be used for AGP memory mapped into kernel space. For - * platforms which use coherent AGP DMA, this can be PAGE_KERNEL. For others, it needs to - * be an uncached mapping (such as write-combining). - */ -#define PAGE_AGP PAGE_KERNEL_NOCACHE /* XXX fix me */ - #endif diff --git a/include/asm-i386/agp.h b/include/asm-i386/agp.h index 56ab5cd..9ae97c0 100644 --- a/include/asm-i386/agp.h +++ b/include/asm-i386/agp.h @@ -20,11 +20,4 @@ worth it. Would need a page for it. */ #define flush_agp_cache() asm volatile("wbinvd":::"memory") -/* - * Page-protection value to be used for AGP memory mapped into kernel space. For - * platforms which use coherent AGP DMA, this can be PAGE_KERNEL. For others, it needs to - * be an uncached mapping (such as write-combining). - */ -#define PAGE_AGP PAGE_KERNEL_NOCACHE - #endif diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h index 2476508..65e0461 100644 --- a/include/asm-i386/hw_irq.h +++ b/include/asm-i386/hw_irq.h @@ -117,6 +117,4 @@ static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {} #endif -extern irq_desc_t irq_desc [NR_IRQS]; - #endif /* _ASM_HW_IRQ_H */ diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h index 0988167..d80fd65 100644 --- a/include/asm-i386/ptrace.h +++ b/include/asm-i386/ptrace.h @@ -57,7 +57,6 @@ struct pt_regs { #ifdef __KERNEL__ #define user_mode(regs) ((VM_MASK & (regs)->eflags) || (3 & (regs)->xcs)) #define instruction_pointer(regs) ((regs)->eip) -#define force_successful_syscall_return() do { } while (0) #endif #endif diff --git a/include/asm-ia64/acpi-ext.h b/include/asm-ia64/acpi-ext.h index 9271d74..51bbd4a 100644 --- a/include/asm-ia64/acpi-ext.h +++ b/include/asm-ia64/acpi-ext.h @@ -3,15 +3,30 @@ * * Copyright (C) 2003 Hewlett-Packard * Copyright (C) Alex Williamson - * Copyright (C) Bjorn Helgaas * - * Vendor specific extensions to ACPI. + * Vendor specific extensions to ACPI. The HP-specific extensiosn are also used by NEC. */ #ifndef _ASM_IA64_ACPI_EXT_H #define _ASM_IA64_ACPI_EXT_H #include +#define HP_CCSR_LENGTH 0x21 +#define HP_CCSR_TYPE 0x2 +#define HP_CCSR_GUID EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, \ + 0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad) + +struct acpi_hp_vendor_long { + u8 guid_id; + u8 guid[16]; + u8 csr_base[8]; + u8 csr_length[8]; +}; + extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length); +extern acpi_status acpi_get_crs (acpi_handle, struct acpi_buffer *); +extern struct acpi_resource *acpi_get_crs_next (struct acpi_buffer *, int *); +extern union acpi_resource_data *acpi_get_crs_type (struct acpi_buffer *, int *, int); +extern void acpi_dispose_crs (struct acpi_buffer *); #endif /* _ASM_IA64_ACPI_EXT_H */ diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h index 37029e9..65b3f29 100644 --- a/include/asm-ia64/atomic.h +++ b/include/asm-ia64/atomic.h @@ -56,16 +56,11 @@ ia64_atomic_sub (int i, atomic_t *v) } #define atomic_add_return(i,v) \ -({ \ - int __ia64_aar_i = (i); \ - (__builtin_constant_p(i) \ - && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ - || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ - || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ - || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ - ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ - : ia64_atomic_add(__ia64_aar_i, v); \ -}) + ((__builtin_constant_p(i) && \ + ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \ + || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \ + ? ia64_fetch_and_add(i, &(v)->counter) \ + : ia64_atomic_add(i, v)) /* * Atomically add I to V and return TRUE if the resulting value is @@ -77,17 +72,13 @@ atomic_add_negative (int i, atomic_t *v) return atomic_add_return(i, v) < 0; } + #define atomic_sub_return(i,v) \ -({ \ - int __ia64_asr_i = (i); \ - (__builtin_constant_p(i) \ - && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ - || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ - || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ - || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ - ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ - : ia64_atomic_sub(__ia64_asr_i, v); \ -}) + ((__builtin_constant_p(i) && \ + ( (i == 1) || (i == 4) || (i == 8) || (i == 16) \ + || (i == -1) || (i == -4) || (i == -8) || (i == -16))) \ + ? ia64_fetch_and_add(-(i), &(v)->counter) \ + : ia64_atomic_sub(i, v)) #define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_inc_return(v) atomic_add_return(1, (v)) diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index b133b67..af58934 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -450,15 +450,15 @@ find_next_bit (void *addr, unsigned long size, unsigned long offset) #ifdef __KERNEL__ -#define __clear_bit(nr, addr) clear_bit(nr, addr) - -#define ext2_set_bit test_and_set_bit -#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) -#define ext2_clear_bit test_and_clear_bit -#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) -#define ext2_test_bit test_bit -#define ext2_find_first_zero_bit find_first_zero_bit -#define ext2_find_next_zero_bit find_next_zero_bit +#define __clear_bit(nr, addr) clear_bit(nr, addr) + +#define ext2_set_bit test_and_set_bit +#define ext2_set_atomic(l,n,a) test_and_set_bit(n,a) +#define ext2_clear_bit test_and_clear_bit +#define ext2_clear_atomic(l,n,a) test_and_clear_bit(n,a) +#define ext2_test_bit test_bit +#define ext2_find_first_zero_bit find_first_zero_bit +#define ext2_find_next_zero_bit find_next_zero_bit /* Bitmap functions for the minix filesystem. */ #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h index 9473a87..54baeef 100644 --- a/include/asm-ia64/compat.h +++ b/include/asm-ia64/compat.h @@ -102,9 +102,6 @@ struct compat_statfs { int f_spare[6]; }; -#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff -#define COMPAT_RLIM_INFINITY 0xffffffff - typedef u32 compat_old_sigset_t; /* at least 32 bits */ #define _COMPAT_NSIG 64 diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h index 83cceb6..e7e1690 100644 --- a/include/asm-ia64/dma-mapping.h +++ b/include/asm-ia64/dma-mapping.h @@ -1,63 +1 @@ -#ifndef _ASM_IA64_DMA_MAPPING_H -#define _ASM_IA64_DMA_MAPPING_H - -/* - * Copyright (C) 2003 Hewlett-Packard Co - * David Mosberger-Tang - */ - -#define dma_alloc_coherent platform_dma_alloc_coherent -#define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */ -#define dma_free_coherent platform_dma_free_coherent -#define dma_free_noncoherent platform_dma_free_coherent -#define dma_map_single platform_dma_map_single -#define dma_map_sg platform_dma_map_sg -#define dma_unmap_single platform_dma_unmap_single -#define dma_unmap_sg platform_dma_unmap_sg -#define dma_sync_single platform_dma_sync_single -#define dma_sync_sg platform_dma_sync_sg - -#define dma_map_page(dev, pg, off, size, dir) \ - dma_map_single(dev, page_address(pg) + (off), (size), (dir)) -#define dma_unmap_page(dev, dma_addr, size, dir) \ - dma_unmap_single(dev, dma_addr, size, dir) - -/* - * Rest of this file is part of the "Advanced DMA API". Use at your own risk. - * See Documentation/DMA-API.txt for details. - */ - -#define dma_sync_single_range(dev, dma_handle, offset, size, dir) \ - dma_sync_single(dev, dma_handle, size, dir) - -#define dma_supported platform_dma_supported - -static inline int -dma_set_mask (struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - *dev->dma_mask = mask; - return 0; -} - -static inline int -dma_get_cache_alignment (void) -{ - extern int ia64_max_cacheline_size; - return ia64_max_cacheline_size; -} - -static inline void -dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) -{ - /* - * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to - * ensure that dma_cache_sync() enforces order, hence the mb(). - */ - mb(); -} - -#define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */ - -#endif /* _ASM_IA64_DMA_MAPPING_H */ +#include diff --git a/include/asm-ia64/ia32.h b/include/asm-ia64/ia32.h index a8b5c44..298dfc4 100644 --- a/include/asm-ia64/ia32.h +++ b/include/asm-ia64/ia32.h @@ -453,6 +453,8 @@ struct ia32_modify_ldt_ldt_s { struct linux_binprm; extern void ia32_gdt_init (void); +extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs *regs); extern void ia32_init_addr_space (struct pt_regs *regs); extern int ia32_setup_arg_pages (struct linux_binprm *bprm); extern int ia32_exception (struct pt_regs *regs, unsigned long isr); @@ -474,8 +476,4 @@ extern void ia32_load_segment_descriptors (struct task_struct *task); #endif /* !CONFIG_IA32_SUPPORT */ -/* Declare this uncondiontally, so we don't get warnings for unreachable code. */ -extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs); - #endif /* _ASM_IA64_IA32_H */ diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h index 1940874..976e3c8 100644 --- a/include/asm-ia64/intrinsics.h +++ b/include/asm-ia64/intrinsics.h @@ -17,16 +17,16 @@ extern unsigned long __bad_size_for_ia64_fetch_and_add (void); extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); -#define IA64_FETCHADD(tmp,v,n,sz,sem) \ +#define IA64_FETCHADD(tmp,v,n,sz) \ ({ \ switch (sz) { \ case 4: \ - __asm__ __volatile__ ("fetchadd4."sem" %0=[%1],%2" \ + __asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2" \ : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \ break; \ \ case 8: \ - __asm__ __volatile__ ("fetchadd8."sem" %0=[%1],%2" \ + __asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2" \ : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \ break; \ \ @@ -35,34 +35,32 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); } \ }) -#define ia64_fetchadd(i,v,sem) \ +#define ia64_fetch_and_add(i,v) \ ({ \ __u64 _tmp; \ volatile __typeof__(*(v)) *_v = (v); \ /* Can't use a switch () here: gcc isn't always smart enough for that... */ \ if ((i) == -16) \ - IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \ + IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); \ else if ((i) == -8) \ - IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \ + IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \ else if ((i) == -4) \ - IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \ + IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \ else if ((i) == -1) \ - IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \ + IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \ else if ((i) == 1) \ - IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \ + IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \ else if ((i) == 4) \ - IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \ + IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \ else if ((i) == 8) \ - IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \ + IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); \ else if ((i) == 16) \ - IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \ + IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); \ else \ _tmp = __bad_increment_for_ia64_fetch_and_add(); \ - (__typeof__(*(v))) (_tmp); /* return old value */ \ + (__typeof__(*(v))) (_tmp + (i)); /* return new value */ \ }) -#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, "rel") + (i)) /* return new value */ - /* * This function doesn't exist, so you'll get a linker error if * something tries to do an invalid xchg(). @@ -129,7 +127,7 @@ extern long __cmpxchg_called_with_bad_pointer(void); case 8: _o_ = (__u64) (long) (old); break; \ default: break; \ } \ - __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \ + __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_)); \ switch (size) { \ case 1: \ __asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv" \ diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index fcc63e1..0dd5ee4 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h @@ -13,7 +13,7 @@ * over and over again with slight variations and possibly making a * mistake somewhere. * - * Copyright (C) 1998-2003 Hewlett-Packard Co + * Copyright (C) 1998-2002 Hewlett-Packard Co * David Mosberger-Tang * Copyright (C) 1999 Asit Mallick * Copyright (C) 1999 Don Dugger @@ -32,24 +32,6 @@ */ #define IO_SPACE_LIMIT 0xffffffffffffffffUL -#define MAX_IO_SPACES 16 -#define IO_SPACE_BITS 24 -#define IO_SPACE_SIZE (1UL << IO_SPACE_BITS) - -#define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS) -#define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS) -#define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1)) - -#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | (p & 0xfff)) - -struct io_space { - unsigned long mmio_base; /* base in MMIO space */ - int sparse; -}; - -extern struct io_space io_space[]; -extern unsigned int num_io_spaces; - # ifdef __KERNEL__ #include @@ -98,34 +80,13 @@ __ia64_get_io_port_base (void) static inline void* __ia64_mk_io_addr (unsigned long port) { - struct io_space *space; - unsigned long offset; + const unsigned long io_base = __ia64_get_io_port_base(); + unsigned long addr; - space = &io_space[IO_SPACE_NR(port)]; - port = IO_SPACE_PORT(port); - if (space->sparse) - offset = IO_SPACE_SPARSE_ENCODING(port); - else - offset = port; - - return (void *) (space->mmio_base | offset); + addr = io_base | ((port >> 2) << 12) | (port & 0xfff); + return (void *) addr; } -#define __ia64_inb ___ia64_inb -#define __ia64_inw ___ia64_inw -#define __ia64_inl ___ia64_inl -#define __ia64_outb ___ia64_outb -#define __ia64_outw ___ia64_outw -#define __ia64_outl ___ia64_outl -#define __ia64_readb ___ia64_readb -#define __ia64_readw ___ia64_readw -#define __ia64_readl ___ia64_readl -#define __ia64_readq ___ia64_readq -#define __ia64_writeb ___ia64_writeb -#define __ia64_writew ___ia64_writew -#define __ia64_writel ___ia64_writel -#define __ia64_writeq ___ia64_writeq - /* * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure * that the access has completed before executing other I/O accesses. Since we're doing @@ -135,7 +96,7 @@ __ia64_mk_io_addr (unsigned long port) */ static inline unsigned int -___ia64_inb (unsigned long port) +__ia64_inb (unsigned long port) { volatile unsigned char *addr = __ia64_mk_io_addr(port); unsigned char ret; @@ -146,7 +107,7 @@ ___ia64_inb (unsigned long port) } static inline unsigned int -___ia64_inw (unsigned long port) +__ia64_inw (unsigned long port) { volatile unsigned short *addr = __ia64_mk_io_addr(port); unsigned short ret; @@ -157,7 +118,7 @@ ___ia64_inw (unsigned long port) } static inline unsigned int -___ia64_inl (unsigned long port) +__ia64_inl (unsigned long port) { volatile unsigned int *addr = __ia64_mk_io_addr(port); unsigned int ret; @@ -168,7 +129,7 @@ ___ia64_inl (unsigned long port) } static inline void -___ia64_outb (unsigned char val, unsigned long port) +__ia64_outb (unsigned char val, unsigned long port) { volatile unsigned char *addr = __ia64_mk_io_addr(port); @@ -177,7 +138,7 @@ ___ia64_outb (unsigned char val, unsigned long port) } static inline void -___ia64_outw (unsigned short val, unsigned long port) +__ia64_outw (unsigned short val, unsigned long port) { volatile unsigned short *addr = __ia64_mk_io_addr(port); @@ -186,7 +147,7 @@ ___ia64_outw (unsigned short val, unsigned long port) } static inline void -___ia64_outl (unsigned int val, unsigned long port) +__ia64_outl (unsigned int val, unsigned long port) { volatile unsigned int *addr = __ia64_mk_io_addr(port); @@ -199,8 +160,17 @@ __insb (unsigned long port, void *dst, unsigned long count) { unsigned char *dp = dst; - while (count--) - *dp++ = platform_inb(port); + if (platform_inb == __ia64_inb) { + volatile unsigned char *addr = __ia64_mk_io_addr(port); + + __ia64_mf_a(); + while (count--) + *dp++ = *addr; + __ia64_mf_a(); + } else + while (count--) + *dp++ = platform_inb(port); + return; } static inline void @@ -208,8 +178,17 @@ __insw (unsigned long port, void *dst, unsigned long count) { unsigned short *dp = dst; - while (count--) - *dp++ = platform_inw(port); + if (platform_inw == __ia64_inw) { + volatile unsigned short *addr = __ia64_mk_io_addr(port); + + __ia64_mf_a(); + while (count--) + *dp++ = *addr; + __ia64_mf_a(); + } else + while (count--) + *dp++ = platform_inw(port); + return; } static inline void @@ -217,8 +196,17 @@ __insl (unsigned long port, void *dst, unsigned long count) { unsigned int *dp = dst; - while (count--) - *dp++ = platform_inl(port); + if (platform_inl == __ia64_inl) { + volatile unsigned int *addr = __ia64_mk_io_addr(port); + + __ia64_mf_a(); + while (count--) + *dp++ = *addr; + __ia64_mf_a(); + } else + while (count--) + *dp++ = platform_inl(port); + return; } static inline void @@ -226,8 +214,16 @@ __outsb (unsigned long port, const void *src, unsigned long count) { const unsigned char *sp = src; - while (count--) - platform_outb(*sp++, port); + if (platform_outb == __ia64_outb) { + volatile unsigned char *addr = __ia64_mk_io_addr(port); + + while (count--) + *addr = *sp++; + __ia64_mf_a(); + } else + while (count--) + platform_outb(*sp++, port); + return; } static inline void @@ -235,8 +231,16 @@ __outsw (unsigned long port, const void *src, unsigned long count) { const unsigned short *sp = src; - while (count--) - platform_outw(*sp++, port); + if (platform_outw == __ia64_outw) { + volatile unsigned short *addr = __ia64_mk_io_addr(port); + + while (count--) + *addr = *sp++; + __ia64_mf_a(); + } else + while (count--) + platform_outw(*sp++, port); + return; } static inline void @@ -244,8 +248,16 @@ __outsl (unsigned long port, void *src, unsigned long count) { const unsigned int *sp = src; - while (count--) - platform_outl(*sp++, port); + if (platform_outl == __ia64_outl) { + volatile unsigned int *addr = __ia64_mk_io_addr(port); + + while (count--) + *addr = *sp++; + __ia64_mf_a(); + } else + while (count--) + platform_outl(*sp++, port); + return; } /* @@ -282,25 +294,25 @@ __outsl (unsigned long port, void *src, unsigned long count) * hopefully it'll stay that way). */ static inline unsigned char -___ia64_readb (void *addr) +__ia64_readb (void *addr) { return *(volatile unsigned char *)addr; } static inline unsigned short -___ia64_readw (void *addr) +__ia64_readw (void *addr) { return *(volatile unsigned short *)addr; } static inline unsigned int -___ia64_readl (void *addr) +__ia64_readl (void *addr) { return *(volatile unsigned int *) addr; } static inline unsigned long -___ia64_readq (void *addr) +__ia64_readq (void *addr) { return *(volatile unsigned long *) addr; } diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h index 410fb72..03f2571 100644 --- a/include/asm-ia64/iosapic.h +++ b/include/asm-ia64/iosapic.h @@ -57,7 +57,6 @@ extern void __init iosapic_init (unsigned long address, extern int gsi_to_vector (unsigned int gsi); extern int gsi_to_irq (unsigned int gsi); extern void __init iosapic_parse_prt (void); -extern void iosapic_enable_intr (unsigned int vector); extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity, unsigned long trigger); extern void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index 5f23c35..31885a2 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h @@ -4,7 +4,7 @@ * Copyright (C) 1999 Silicon Graphics, Inc. * Copyright (C) Srinivasa Thirumalachar * Copyright (C) Vijay Chander - * Copyright (C) 1999-2001, 2003 Hewlett-Packard Co. + * Copyright (C) 1999-2001 Hewlett-Packard Co. * David Mosberger-Tang */ #ifndef _ASM_IA64_MACHVEC_H @@ -14,7 +14,7 @@ #include /* forward declarations: */ -struct device; +struct pci_dev; struct pt_regs; struct scatterlist; struct irq_desc; @@ -33,17 +33,17 @@ typedef struct irq_desc *ia64_mv_irq_desc (unsigned int); typedef u8 ia64_mv_irq_to_vector (u8); typedef unsigned int ia64_mv_local_vector_to_irq (u8 vector); -/* DMA-mapping interface: */ -typedef void ia64_mv_dma_init (void); -typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int); -typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t); -typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int); -typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int); -typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int); -typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int); -typedef void ia64_mv_dma_sync_single (struct device *, dma_addr_t, size_t, int); -typedef void ia64_mv_dma_sync_sg (struct device *, struct scatterlist *, int, int); -typedef int ia64_mv_dma_supported (struct device *, u64); +/* PCI-DMA interface: */ +typedef void ia64_mv_pci_dma_init (void); +typedef void *ia64_mv_pci_alloc_consistent (struct pci_dev *, size_t, dma_addr_t *); +typedef void ia64_mv_pci_free_consistent (struct pci_dev *, size_t, void *, dma_addr_t); +typedef dma_addr_t ia64_mv_pci_map_single (struct pci_dev *, void *, size_t, int); +typedef void ia64_mv_pci_unmap_single (struct pci_dev *, dma_addr_t, size_t, int); +typedef int ia64_mv_pci_map_sg (struct pci_dev *, struct scatterlist *, int, int); +typedef void ia64_mv_pci_unmap_sg (struct pci_dev *, struct scatterlist *, int, int); +typedef void ia64_mv_pci_dma_sync_single (struct pci_dev *, dma_addr_t, size_t, int); +typedef void ia64_mv_pci_dma_sync_sg (struct pci_dev *, struct scatterlist *, int, int); +typedef int ia64_mv_pci_dma_supported (struct pci_dev *, u64); /* * WARNING: The legacy I/O space is _architected_. Platforms are @@ -66,7 +66,6 @@ typedef unsigned int ia64_mv_readl_t (void *); typedef unsigned long ia64_mv_readq_t (void *); extern void machvec_noop (void); -extern void machvec_memory_fence (void); # if defined (CONFIG_IA64_HP_SIM) # include @@ -93,16 +92,16 @@ extern void machvec_memory_fence (void); # define platform_log_print ia64_mv.log_print # define platform_send_ipi ia64_mv.send_ipi # define platform_global_tlb_purge ia64_mv.global_tlb_purge -# define platform_dma_init ia64_mv.dma_init -# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent -# define platform_dma_free_coherent ia64_mv.dma_free_coherent -# define platform_dma_map_single ia64_mv.dma_map_single -# define platform_dma_unmap_single ia64_mv.dma_unmap_single -# define platform_dma_map_sg ia64_mv.dma_map_sg -# define platform_dma_unmap_sg ia64_mv.dma_unmap_sg -# define platform_dma_sync_single ia64_mv.dma_sync_single -# define platform_dma_sync_sg ia64_mv.dma_sync_sg -# define platform_dma_supported ia64_mv.dma_supported +# define platform_pci_dma_init ia64_mv.dma_init +# define platform_pci_alloc_consistent ia64_mv.alloc_consistent +# define platform_pci_free_consistent ia64_mv.free_consistent +# define platform_pci_map_single ia64_mv.map_single +# define platform_pci_unmap_single ia64_mv.unmap_single +# define platform_pci_map_sg ia64_mv.map_sg +# define platform_pci_unmap_sg ia64_mv.unmap_sg +# define platform_pci_dma_sync_single ia64_mv.sync_single +# define platform_pci_dma_sync_sg ia64_mv.sync_sg +# define platform_pci_dma_supported ia64_mv.dma_supported # define platform_irq_desc ia64_mv.irq_desc # define platform_irq_to_vector ia64_mv.irq_to_vector # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq @@ -120,7 +119,7 @@ extern void machvec_memory_fence (void); /* __attribute__((__aligned__(16))) is required to make size of the * structure multiple of 16 bytes. - * This will fillup the holes created because of section 3.3.1 in + * This will fillup the holes created because of section 3.3.1 in * Software Conventions guide. */ struct ia64_machine_vector { @@ -134,16 +133,16 @@ struct ia64_machine_vector { ia64_mv_log_print_t *log_print; ia64_mv_send_ipi_t *send_ipi; ia64_mv_global_tlb_purge_t *global_tlb_purge; - ia64_mv_dma_init *dma_init; - ia64_mv_dma_alloc_coherent *dma_alloc_coherent; - ia64_mv_dma_free_coherent *dma_free_coherent; - ia64_mv_dma_map_single *dma_map_single; - ia64_mv_dma_unmap_single *dma_unmap_single; - ia64_mv_dma_map_sg *dma_map_sg; - ia64_mv_dma_unmap_sg *dma_unmap_sg; - ia64_mv_dma_sync_single *dma_sync_single; - ia64_mv_dma_sync_sg *dma_sync_sg; - ia64_mv_dma_supported *dma_supported; + ia64_mv_pci_dma_init *dma_init; + ia64_mv_pci_alloc_consistent *alloc_consistent; + ia64_mv_pci_free_consistent *free_consistent; + ia64_mv_pci_map_single *map_single; + ia64_mv_pci_unmap_single *unmap_single; + ia64_mv_pci_map_sg *map_sg; + ia64_mv_pci_unmap_sg *unmap_sg; + ia64_mv_pci_dma_sync_single *sync_single; + ia64_mv_pci_dma_sync_sg *sync_sg; + ia64_mv_pci_dma_supported *dma_supported; ia64_mv_irq_desc *irq_desc; ia64_mv_irq_to_vector *irq_to_vector; ia64_mv_local_vector_to_irq *local_vector_to_irq; @@ -171,16 +170,16 @@ struct ia64_machine_vector { platform_log_print, \ platform_send_ipi, \ platform_global_tlb_purge, \ - platform_dma_init, \ - platform_dma_alloc_coherent, \ - platform_dma_free_coherent, \ - platform_dma_map_single, \ - platform_dma_unmap_single, \ - platform_dma_map_sg, \ - platform_dma_unmap_sg, \ - platform_dma_sync_single, \ - platform_dma_sync_sg, \ - platform_dma_supported, \ + platform_pci_dma_init, \ + platform_pci_alloc_consistent, \ + platform_pci_free_consistent, \ + platform_pci_map_single, \ + platform_pci_unmap_single, \ + platform_pci_map_sg, \ + platform_pci_unmap_sg, \ + platform_pci_dma_sync_single, \ + platform_pci_dma_sync_sg, \ + platform_pci_dma_supported, \ platform_irq_desc, \ platform_irq_to_vector, \ platform_local_vector_to_irq, \ @@ -206,16 +205,16 @@ extern void machvec_init (const char *name); /* * Declare default routines which aren't declared anywhere else: */ -extern ia64_mv_dma_init swiotlb_init; -extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; -extern ia64_mv_dma_free_coherent swiotlb_free_coherent; -extern ia64_mv_dma_map_single swiotlb_map_single; -extern ia64_mv_dma_unmap_single swiotlb_unmap_single; -extern ia64_mv_dma_map_sg swiotlb_map_sg; -extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; -extern ia64_mv_dma_sync_single swiotlb_sync_single; -extern ia64_mv_dma_sync_sg swiotlb_sync_sg; -extern ia64_mv_dma_supported swiotlb_dma_supported; +extern ia64_mv_pci_dma_init swiotlb_init; +extern ia64_mv_pci_alloc_consistent swiotlb_alloc_consistent; +extern ia64_mv_pci_free_consistent swiotlb_free_consistent; +extern ia64_mv_pci_map_single swiotlb_map_single; +extern ia64_mv_pci_unmap_single swiotlb_unmap_single; +extern ia64_mv_pci_map_sg swiotlb_map_sg; +extern ia64_mv_pci_unmap_sg swiotlb_unmap_sg; +extern ia64_mv_pci_dma_sync_single swiotlb_sync_single; +extern ia64_mv_pci_dma_sync_sg swiotlb_sync_sg; +extern ia64_mv_pci_dma_supported swiotlb_pci_dma_supported; /* * Define default versions so we can extend machvec for new platforms without having @@ -248,35 +247,35 @@ extern ia64_mv_dma_supported swiotlb_dma_supported; #ifndef platform_global_tlb_purge # define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */ #endif -#ifndef platform_dma_init -# define platform_dma_init swiotlb_init +#ifndef platform_pci_dma_init +# define platform_pci_dma_init swiotlb_init #endif -#ifndef platform_dma_alloc_coherent -# define platform_dma_alloc_coherent swiotlb_alloc_coherent +#ifndef platform_pci_alloc_consistent +# define platform_pci_alloc_consistent swiotlb_alloc_consistent #endif -#ifndef platform_dma_free_coherent -# define platform_dma_free_coherent swiotlb_free_coherent +#ifndef platform_pci_free_consistent +# define platform_pci_free_consistent swiotlb_free_consistent #endif -#ifndef platform_dma_map_single -# define platform_dma_map_single swiotlb_map_single +#ifndef platform_pci_map_single +# define platform_pci_map_single swiotlb_map_single #endif -#ifndef platform_dma_unmap_single -# define platform_dma_unmap_single swiotlb_unmap_single +#ifndef platform_pci_unmap_single +# define platform_pci_unmap_single swiotlb_unmap_single #endif -#ifndef platform_dma_map_sg -# define platform_dma_map_sg swiotlb_map_sg +#ifndef platform_pci_map_sg +# define platform_pci_map_sg swiotlb_map_sg #endif -#ifndef platform_dma_unmap_sg -# define platform_dma_unmap_sg swiotlb_unmap_sg +#ifndef platform_pci_unmap_sg +# define platform_pci_unmap_sg swiotlb_unmap_sg #endif -#ifndef platform_dma_sync_single -# define platform_dma_sync_single swiotlb_sync_single +#ifndef platform_pci_dma_sync_single +# define platform_pci_dma_sync_single swiotlb_sync_single #endif -#ifndef platform_dma_sync_sg -# define platform_dma_sync_sg swiotlb_sync_sg +#ifndef platform_pci_dma_sync_sg +# define platform_pci_dma_sync_sg swiotlb_sync_sg #endif -#ifndef platform_dma_supported -# define platform_dma_supported swiotlb_dma_supported +#ifndef platform_pci_dma_supported +# define platform_pci_dma_supported swiotlb_pci_dma_supported #endif #ifndef platform_irq_desc # define platform_irq_desc __ia64_irq_desc diff --git a/include/asm-ia64/machvec_hpzx1.h b/include/asm-ia64/machvec_hpzx1.h index 9927ba1..d6a6ef6 100644 --- a/include/asm-ia64/machvec_hpzx1.h +++ b/include/asm-ia64/machvec_hpzx1.h @@ -2,13 +2,13 @@ #define _ASM_IA64_MACHVEC_HPZX1_h extern ia64_mv_setup_t dig_setup; -extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; -extern ia64_mv_dma_free_coherent sba_free_coherent; -extern ia64_mv_dma_map_single sba_map_single; -extern ia64_mv_dma_unmap_single sba_unmap_single; -extern ia64_mv_dma_map_sg sba_map_sg; -extern ia64_mv_dma_unmap_sg sba_unmap_sg; -extern ia64_mv_dma_supported sba_dma_supported; +extern ia64_mv_pci_alloc_consistent sba_alloc_consistent; +extern ia64_mv_pci_free_consistent sba_free_consistent; +extern ia64_mv_pci_map_single sba_map_single; +extern ia64_mv_pci_unmap_single sba_unmap_single; +extern ia64_mv_pci_map_sg sba_map_sg; +extern ia64_mv_pci_unmap_sg sba_unmap_sg; +extern ia64_mv_pci_dma_supported sba_dma_supported; /* * This stuff has dual use! @@ -19,15 +19,15 @@ extern ia64_mv_dma_supported sba_dma_supported; */ #define platform_name "hpzx1" #define platform_setup dig_setup -#define platform_dma_init ((ia64_mv_dma_init *) machvec_noop) -#define platform_dma_alloc_coherent sba_alloc_coherent -#define platform_dma_free_coherent sba_free_coherent -#define platform_dma_map_single sba_map_single -#define platform_dma_unmap_single sba_unmap_single -#define platform_dma_map_sg sba_map_sg -#define platform_dma_unmap_sg sba_unmap_sg -#define platform_dma_sync_single ((ia64_mv_dma_sync_single *) machvec_memory_fence) -#define platform_dma_sync_sg ((ia64_mv_dma_sync_sg *) machvec_memory_fence) -#define platform_dma_supported sba_dma_supported +#define platform_pci_dma_init ((ia64_mv_pci_dma_init *) machvec_noop) +#define platform_pci_alloc_consistent sba_alloc_consistent +#define platform_pci_free_consistent sba_free_consistent +#define platform_pci_map_single sba_map_single +#define platform_pci_unmap_single sba_unmap_single +#define platform_pci_map_sg sba_map_sg +#define platform_pci_unmap_sg sba_unmap_sg +#define platform_pci_dma_sync_single ((ia64_mv_pci_dma_sync_single *) machvec_noop) +#define platform_pci_dma_sync_sg ((ia64_mv_pci_dma_sync_sg *) machvec_noop) +#define platform_pci_dma_supported sba_dma_supported #endif /* _ASM_IA64_MACHVEC_HPZX1_h */ diff --git a/include/asm-ia64/machvec_init.h b/include/asm-ia64/machvec_init.h index c90ecf8..71912c1 100644 --- a/include/asm-ia64/machvec_init.h +++ b/include/asm-ia64/machvec_init.h @@ -1,3 +1,7 @@ +#define __MACHVEC_HDR(n) +#define __MACHVEC_EXPAND(n) __MACHVEC_HDR(n) +#define MACHVEC_PLATFORM_HEADER __MACHVEC_EXPAND(MACHVEC_PLATFORM_NAME) + #include extern ia64_mv_send_ipi_t ia64_send_ipi; @@ -12,10 +16,6 @@ extern ia64_mv_inl_t __ia64_inl; extern ia64_mv_outb_t __ia64_outb; extern ia64_mv_outw_t __ia64_outw; extern ia64_mv_outl_t __ia64_outl; -extern ia64_mv_readb_t __ia64_readb; -extern ia64_mv_readw_t __ia64_readw; -extern ia64_mv_readl_t __ia64_readl; -extern ia64_mv_readq_t __ia64_readq; #define MACHVEC_HELPER(name) \ struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \ diff --git a/include/asm-ia64/machvec_sn1.h b/include/asm-ia64/machvec_sn1.h index 4c8c9f7..354b029 100644 --- a/include/asm-ia64/machvec_sn1.h +++ b/include/asm-ia64/machvec_sn1.h @@ -44,14 +44,14 @@ extern ia64_mv_inl_t sn1_inl; extern ia64_mv_outb_t sn1_outb; extern ia64_mv_outw_t sn1_outw; extern ia64_mv_outl_t sn1_outl; -extern ia64_mv_dma_alloc_coherent sn1_dma_alloc_coherent; -extern ia64_mv_dma_free_coherent sn1_dma_free_coherent; -extern ia64_mv_dma_map_single sn1_dma_map_single; -extern ia64_mv_dma_unmap_single sn1_dma_unmap_single; -extern ia64_mv_dma_map_sg sn1_dma_map_sg; -extern ia64_mv_dma_unmap_sg sn1_dma_unmap_sg; -extern ia64_mv_dma_sync_single sn1_dma_sync_single; -extern ia64_mv_dma_sync_sg sn1_dma_sync_sg; +extern ia64_mv_pci_alloc_consistent sn1_pci_alloc_consistent; +extern ia64_mv_pci_free_consistent sn1_pci_free_consistent; +extern ia64_mv_pci_map_single sn1_pci_map_single; +extern ia64_mv_pci_unmap_single sn1_pci_unmap_single; +extern ia64_mv_pci_map_sg sn1_pci_map_sg; +extern ia64_mv_pci_unmap_sg sn1_pci_unmap_sg; +extern ia64_mv_pci_dma_sync_single sn1_pci_dma_sync_single; +extern ia64_mv_pci_dma_sync_sg sn1_pci_dma_sync_sg; /* * This stuff has dual use! @@ -72,14 +72,14 @@ extern ia64_mv_dma_sync_sg sn1_dma_sync_sg; #define platform_outb sn1_outb #define platform_outw sn1_outw #define platform_outl sn1_outl -#define platform_dma_init machvec_noop -#define platform_dma_alloc_coherent sn1_dma_alloc_coherent -#define platform_dma_free_coherent sn1_dma_free_coherent -#define platform_dma_map_single sn1_dma_map_single -#define platform_dma_unmap_single sn1_dma_unmap_single -#define platform_dma_map_sg sn1_dma_map_sg -#define platform_dma_unmap_sg sn1_dma_unmap_sg -#define platform_dma_sync_single sn1_dma_sync_single -#define platform_dma_sync_sg sn1_dma_sync_sg +#define platform_pci_dma_init machvec_noop +#define platform_pci_alloc_consistent sn1_pci_alloc_consistent +#define platform_pci_free_consistent sn1_pci_free_consistent +#define platform_pci_map_single sn1_pci_map_single +#define platform_pci_unmap_single sn1_pci_unmap_single +#define platform_pci_map_sg sn1_pci_map_sg +#define platform_pci_unmap_sg sn1_pci_unmap_sg +#define platform_pci_dma_sync_single sn1_pci_dma_sync_single +#define platform_pci_dma_sync_sg sn1_pci_dma_sync_sg #endif /* _ASM_IA64_MACHVEC_SN1_h */ diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h index 9146e1c..6df35ac 100644 --- a/include/asm-ia64/machvec_sn2.h +++ b/include/asm-ia64/machvec_sn2.h @@ -51,15 +51,15 @@ extern ia64_mv_readb_t __sn_readb; extern ia64_mv_readw_t __sn_readw; extern ia64_mv_readl_t __sn_readl; extern ia64_mv_readq_t __sn_readq; -extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; -extern ia64_mv_dma_free_coherent sn_dma_free_coherent; -extern ia64_mv_dma_map_single sn_dma_map_single; -extern ia64_mv_dma_unmap_single sn_dma_unmap_single; -extern ia64_mv_dma_map_sg sn_dma_map_sg; -extern ia64_mv_dma_unmap_sg sn_dma_unmap_sg; -extern ia64_mv_dma_sync_single sn_dma_sync_single; -extern ia64_mv_dma_sync_sg sn_dma_sync_sg; -extern ia64_mv_dma_supported sn_dma_supported; +extern ia64_mv_pci_alloc_consistent sn_pci_alloc_consistent; +extern ia64_mv_pci_free_consistent sn_pci_free_consistent; +extern ia64_mv_pci_map_single sn_pci_map_single; +extern ia64_mv_pci_unmap_single sn_pci_unmap_single; +extern ia64_mv_pci_map_sg sn_pci_map_sg; +extern ia64_mv_pci_unmap_sg sn_pci_unmap_sg; +extern ia64_mv_pci_dma_sync_single sn_pci_dma_sync_single; +extern ia64_mv_pci_dma_sync_sg sn_pci_dma_sync_sg; +extern ia64_mv_pci_dma_supported sn_pci_dma_supported; /* * This stuff has dual use! @@ -88,15 +88,15 @@ extern ia64_mv_dma_supported sn_dma_supported; #define platform_irq_desc sn_irq_desc #define platform_irq_to_vector sn_irq_to_vector #define platform_local_vector_to_irq sn_local_vector_to_irq -#define platform_dma_init machvec_noop -#define platform_dma_alloc_coherent sn_dma_alloc_coherent -#define platform_dma_free_coherent sn_dma_free_coherent -#define platform_dma_map_single sn_dma_map_single -#define platform_dma_unmap_single sn_dma_unmap_single -#define platform_dma_map_sg sn_dma_map_sg -#define platform_dma_unmap_sg sn_dma_unmap_sg -#define platform_dma_sync_single sn_dma_sync_single -#define platform_dma_sync_sg sn_dma_sync_sg -#define platform_dma_supported sn_dma_supported +#define platform_pci_dma_init machvec_noop +#define platform_pci_alloc_consistent sn_pci_alloc_consistent +#define platform_pci_free_consistent sn_pci_free_consistent +#define platform_pci_map_single sn_pci_map_single +#define platform_pci_unmap_single sn_pci_unmap_single +#define platform_pci_map_sg sn_pci_map_sg +#define platform_pci_unmap_sg sn_pci_unmap_sg +#define platform_pci_dma_sync_single sn_pci_dma_sync_single +#define platform_pci_dma_sync_sg sn_pci_dma_sync_sg +#define platform_pci_dma_supported sn_pci_dma_supported #endif /* _ASM_IA64_MACHVEC_SN2_H */ diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index 63853a2..e4ddfbe 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h @@ -11,10 +11,7 @@ #define _ASM_IA64_MCA_H #if !defined(__ASSEMBLY__) - -#include #include - #include #include #include @@ -132,10 +129,10 @@ extern void ia64_os_mca_dispatch_end(void); extern void ia64_mca_ucmc_handler(void); extern void ia64_monarch_init_handler(void); extern void ia64_slave_init_handler(void); -extern irqreturn_t ia64_mca_rendez_int_handler(int,void *,struct pt_regs *); -extern irqreturn_t ia64_mca_wakeup_int_handler(int,void *,struct pt_regs *); -extern irqreturn_t ia64_mca_cmc_int_handler(int,void *,struct pt_regs *); -extern irqreturn_t ia64_mca_cpe_int_handler(int,void *,struct pt_regs *); +extern void ia64_mca_rendez_int_handler(int,void *,struct pt_regs *); +extern void ia64_mca_wakeup_int_handler(int,void *,struct pt_regs *); +extern void ia64_mca_cmc_int_handler(int,void *,struct pt_regs *); +extern void ia64_mca_cpe_int_handler(int,void *,struct pt_regs *); extern int ia64_log_print(int,prfunc_t); extern void ia64_mca_cmc_vector_setup(void); extern int ia64_mca_check_errors(void); diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index a2d4447..fb3fb3f 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h @@ -37,8 +37,6 @@ # if defined(CONFIG_HUGETLB_PAGE_SIZE_4GB) # define HPAGE_SHIFT 32 -# elif defined(CONFIG_HUGETLB_PAGE_SIZE_1GB) -# define HPAGE_SHIFT 30 # elif defined(CONFIG_HUGETLB_PAGE_SIZE_256MB) # define HPAGE_SHIFT 28 # elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) @@ -91,12 +89,7 @@ do { \ #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #ifndef CONFIG_DISCONTIGMEM -# ifdef CONFIG_VIRTUAL_MEM_MAP - extern int ia64_pfn_valid (unsigned long pfn); -# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) -# else -# define pfn_valid(pfn) ((pfn) < max_mapnr) -# endif +#define pfn_valid(pfn) ((pfn) < max_mapnr) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define page_to_pfn(page) ((unsigned long) (page - mem_map)) #define pfn_to_page(pfn) (mem_map + (pfn)) diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 5640226..840ae4b 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h @@ -622,8 +622,7 @@ typedef struct pal_min_state_area_s { u64 pmsa_xip; /* previous iip */ u64 pmsa_xpsr; /* previous psr */ u64 pmsa_xfs; /* previous ifs */ - u64 pmsa_br1; /* branch register 1 */ - u64 pmsa_reserved[70]; /* pal_min_state_area should total to 1KB */ + u64 pmsa_reserved[71]; /* pal_min_state_area should total to 1KB */ } pal_min_state_area_t; diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h index 3c94af4..56760f7 100644 --- a/include/asm-ia64/pci.h +++ b/include/asm-ia64/pci.h @@ -47,7 +47,18 @@ pcibios_penalize_isa_irq (int irq) #define HAVE_ARCH_PCI_MWI 1 extern int pcibios_prep_mwi (struct pci_dev *); -#include +/* + * Dynamic DMA mapping API. See Documentation/DMA-mapping.txt for details. + */ +#define pci_alloc_consistent platform_pci_alloc_consistent +#define pci_free_consistent platform_pci_free_consistent +#define pci_map_single platform_pci_map_single +#define pci_unmap_single platform_pci_unmap_single +#define pci_map_sg platform_pci_map_sg +#define pci_unmap_sg platform_pci_unmap_sg +#define pci_dma_sync_single platform_pci_dma_sync_single +#define pci_dma_sync_sg platform_pci_dma_sync_sg +#define pci_dma_supported platform_pci_dma_supported /* pci_unmap_{single,page} is not a nop, thus... */ #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ @@ -63,12 +74,18 @@ extern int pcibios_prep_mwi (struct pci_dev *); #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ (((PTR)->LEN_NAME) = (VAL)) +#define pci_map_page(dev,pg,off,size,dir) \ + pci_map_single((dev), page_address(pg) + (off), (size), (dir)) +#define pci_unmap_page(dev,dma_addr,size,dir) \ + pci_unmap_single((dev), (dma_addr), (size), (dir)) + /* The ia64 platform always supports 64-bit addressing. */ -#define pci_dac_dma_supported(pci_dev, mask) (1) -#define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off)) -#define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr))) -#define pci_dac_dma_to_offset(dev,dma_addr) ((dma_addr) & ~PAGE_MASK) -#define pci_dac_dma_sync_single(dev,dma_addr,len,dir) do { mb(); } while (0) +#define pci_dac_dma_supported(pci_dev, mask) (1) + +#define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off)) +#define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr))) +#define pci_dac_dma_to_offset(dev,dma_addr) ((dma_addr) & ~PAGE_MASK) +#define pci_dac_dma_sync_single(dev,dma_addr,len,dir) do { /* nothing */ } while (0) /* Return the index of the PCI controller for device PDEV. */ #define pci_controller_num(PDEV) (0) @@ -80,18 +97,12 @@ extern int pcibios_prep_mwi (struct pci_dev *); extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); -struct pci_window { - struct resource resource; - u64 offset; -}; - struct pci_controller { void *acpi_handle; void *iommu; int segment; - unsigned int windows; - struct pci_window *window; + u64 mem_offset; }; #define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata) diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h index 5c6ca2d..a87a976 100644 --- a/include/asm-ia64/percpu.h +++ b/include/asm-ia64/percpu.h @@ -5,7 +5,7 @@ #include /* - * Copyright (C) 2002-2003 Hewlett-Packard Co + * Copyright (C) 2002 Hewlett-Packard Co * David Mosberger-Tang */ @@ -35,8 +35,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var##__per_cpu) #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var##__per_cpu) -extern void setup_per_cpu_areas (void); - #endif /* !__ASSEMBLY__ */ #endif /* _ASM_IA64_PERCPU_H */ diff --git a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h index 9db86e5..af8a393 100644 --- a/include/asm-ia64/perfmon.h +++ b/include/asm-ia64/perfmon.h @@ -41,7 +41,6 @@ #define PFM_FL_NOTIFY_BLOCK 0x04 /* block task on user level notifications */ #define PFM_FL_SYSTEM_WIDE 0x08 /* create a system wide context */ #define PFM_FL_EXCL_IDLE 0x20 /* exclude idle task from system wide session */ -#define PFM_FL_UNSECURE 0x40 /* allow unsecure monitoring for non self-monitoring task */ /* * PMC flags @@ -126,7 +125,7 @@ typedef struct { * Define the version numbers for both perfmon as a whole and the sampling buffer format. */ #define PFM_VERSION_MAJ 1U -#define PFM_VERSION_MIN 4U +#define PFM_VERSION_MIN 3U #define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff)) #define PFM_SMPL_VERSION_MAJ 1U diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index ebf187e..8104da9 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -207,13 +207,7 @@ ia64_phys_addr_valid (unsigned long addr) #define VMALLOC_START (0xa000000000000000 + 3*PERCPU_PAGE_SIZE) #define VMALLOC_VMADDR(x) ((unsigned long)(x)) -#ifdef CONFIG_VIRTUAL_MEM_MAP -# define VMALLOC_END_INIT (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) -# define VMALLOC_END vmalloc_end - extern unsigned long vmalloc_end; -#else -# define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) -#endif +#define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) /* * Conversion functions: convert page frame number (pfn) and a protection value to a page @@ -455,14 +449,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; typedef pte_t *pte_addr_t; -# ifdef CONFIG_VIRTUAL_MEM_MAP - - /* arch mem_map init routine is needed due to holes in a virtual mem_map */ -# define __HAVE_ARCH_MEMMAP_INIT - - extern void memmap_init (struct page *start, unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn); -# endif /* CONFIG_VIRTUAL_MEM_MAP */ # endif /* !__ASSEMBLY__ */ /* diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 6838714..265c0ba 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -39,14 +39,6 @@ #define TASK_SIZE (current->thread.task_size) /* - * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for - * address-space MM. Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE, - * because the kernel may have installed helper-mappings above TASK_SIZE. For example, - * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE. - */ -#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE - -/* * This decides where the kernel will search for a free chunk of vm * space during mmap's. */ @@ -299,7 +291,7 @@ struct thread_struct { #define start_thread(regs,new_ip,new_sp) do { \ set_fs(USER_DS); \ - regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \ + regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL | IA64_PSR_SP)) \ & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \ regs->cr_iip = new_ip; \ regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index 34bd44e..351df13 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h @@ -227,10 +227,8 @@ struct switch_stack { }) struct task_struct; /* forward decl */ - struct unw_frame_info; /* forward decl */ extern void show_regs (struct pt_regs *); - extern void ia64_do_show_stack (struct unw_frame_info *, void *); extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *, unsigned long *); extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long, diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index eccac9c..519c90d 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h @@ -226,7 +226,7 @@ enum { /* Encodings for machine check parameter types */ enum { - SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezvous interrupt */ + SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezevous interrupt */ SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */ SAL_MC_PARAM_CPE_INT = 3 /* Corrected Platform Error Int */ }; diff --git a/include/asm-ia64/serial.h b/include/asm-ia64/serial.h index 7169391..2d123f4 100644 --- a/include/asm-ia64/serial.h +++ b/include/asm-ia64/serial.h @@ -59,6 +59,7 @@ { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ + #ifdef CONFIG_SERIAL_MANY_PORTS #define EXTRA_SERIAL_PORT_DEFNS \ { 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS }, /* ttyS4 */ \ diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index 96e298c..2a40f54 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h @@ -22,72 +22,26 @@ typedef struct { #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } #define spin_lock_init(x) ((x)->lock = 0) -#define NEW_LOCK -#ifdef NEW_LOCK +#define DEBUG_SPIN_LOCK 0 -/* - * Try to get the lock. If we fail to get the lock, make a non-standard call to - * ia64_spinlock_contention(). We do not use a normal call because that would force all - * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is - * carefully coded to touch only those registers that spin_lock() marks "clobbered". - */ +#if DEBUG_SPIN_LOCK + +#include + +#define _raw_spin_lock(x) \ +do { \ + unsigned long _timeout = 1000000000; \ + volatile unsigned int _old = 0, _new = 1, *_ptr = &((x)->lock); \ + do { \ + if (_timeout-- == 0) { \ + extern void dump_stack (void); \ + printk("kernel DEADLOCK at %s:%d?\n", __FILE__, __LINE__); \ + dump_stack(); \ + } \ + } while (__sync_val_compare_and_swap(_ptr, _old, _new) != _old); \ +} while (0) -#define IA64_SPINLOCK_CLOBBERS "ar.pfs", "p14", "r28", "r29", "r30", "b6", "memory" - -static inline void -_raw_spin_lock (spinlock_t *lock) -{ - register volatile unsigned int *ptr asm ("r31") = &lock->lock; - -#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4) -# ifdef CONFIG_ITANIUM - /* don't use brl on Itanium... */ - asm volatile ("{\n\t" - " mov ar.ccv = r0\n\t" - " mov r28 = ip\n\t" - " mov r30 = 1;;\n\t" - "}\n\t" - "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t" - "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t" - "cmp4.ne p14, p0 = r30, r0\n\t" - "mov b6 = r29;;\n" - "(p14) br.cond.spnt.many b6" - : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS); -# else - asm volatile ("{\n\t" - " mov ar.ccv = r0\n\t" - " mov r28 = ip\n\t" - " mov r30 = 1;;\n\t" - "}\n\t" - "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t" - "cmp4.ne p14, p0 = r30, r0\n" - "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4" - : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS); -# endif /* CONFIG_MCKINLEY */ #else -# ifdef CONFIG_ITANIUM - /* don't use brl on Itanium... */ - /* mis-declare, so we get the entry-point, not it's function descriptor: */ - asm volatile ("mov r30 = 1\n\t" - "mov ar.ccv = r0;;\n\t" - "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t" - "movl r29 = ia64_spinlock_contention;;\n\t" - "cmp4.ne p14, p0 = r30, r0\n\t" - "mov b6 = r29;;\n" - "(p14) br.call.spnt.many b6 = b6" - : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS); -# else - asm volatile ("mov r30 = 1\n\t" - "mov ar.ccv = r0;;\n\t" - "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t" - "cmp4.ne p14, p0 = r30, r0\n\t" - "(p14) brl.call.spnt.many b6=ia64_spinlock_contention" - : "=r"(ptr) : "r"(ptr) : IA64_SPINLOCK_CLOBBERS); -# endif /* CONFIG_MCKINLEY */ -#endif -} - -#else /* !NEW_LOCK */ /* * Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set @@ -110,7 +64,7 @@ _raw_spin_lock (spinlock_t *lock) ";;\n" \ :: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory") -#endif /* !NEW_LOCK */ +#endif /* !DEBUG_SPIN_LOCK */ #define spin_is_locked(x) ((x)->lock != 0) #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) @@ -118,31 +72,43 @@ _raw_spin_lock (spinlock_t *lock) #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) typedef struct { - volatile int read_counter : 31; - volatile int write_lock : 1; + volatile int read_counter:31; + volatile int write_lock:1; } rwlock_t; #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) #define rwlock_is_locked(x) (*(volatile int *) (x) != 0) -#define _raw_read_lock(rw) \ -do { \ - rwlock_t *__read_lock_ptr = (rw); \ - \ - while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) { \ - ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \ - while (*(volatile int *)__read_lock_ptr < 0) \ - barrier(); \ - \ - } \ -} while (0) +#define _raw_read_lock(rw) \ +do { \ + int __read_lock_tmp = 0; \ + __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n" \ + ";;\n" \ + "tbit.nz p6,p0 = %0, 31\n" \ + "(p6) br.cond.sptk.few 2f\n" \ + ".section .text.lock,\"ax\"\n" \ + "2:\tfetchadd4.rel %0 = [%1], -1\n" \ + ";;\n" \ + "3:\tld4.acq %0 = [%1]\n" \ + ";;\n" \ + "tbit.nz p6,p0 = %0, 31\n" \ + "(p6) br.cond.sptk.few 3b\n" \ + "br.cond.sptk.few 1b\n" \ + ";;\n" \ + ".previous\n" \ + : "=&r" (__read_lock_tmp) \ + : "r" (rw) : "p6", "memory"); \ +} while(0) -#define _raw_read_unlock(rw) \ -do { \ - rwlock_t *__read_lock_ptr = (rw); \ - ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \ -} while (0) +#define _raw_read_unlock(rw) \ +do { \ + int __read_unlock_tmp = 0; \ + __asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n" \ + : "=r" (__read_unlock_tmp) \ + : "r" (rw) \ + : "memory"); \ +} while(0) #define _raw_write_lock(rw) \ do { \ diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 250c0fa..1486785 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -212,39 +212,48 @@ extern void ia64_load_extra (struct task_struct *task); # define PERFMON_IS_SYSWIDE() (0) #endif -#define IA64_HAS_EXTRA_STATE(t) \ - ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ - || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE()) - -#define __switch_to(prev,next,last) do { \ - struct task_struct *__fpu_owner = ia64_get_fpu_owner(); \ - if (IA64_HAS_EXTRA_STATE(prev)) \ - ia64_save_extra(prev); \ - if (IA64_HAS_EXTRA_STATE(next)) \ - ia64_load_extra(next); \ - ia64_psr(ia64_task_regs(next))->dfh = \ - !(__fpu_owner == (next) && ((next)->thread.last_fph_cpu == smp_processor_id())); \ - (last) = ia64_switch_to((next)); \ +#define __switch_to(prev,next,last) do { \ + if (((prev)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)) \ + || IS_IA32_PROCESS(ia64_task_regs(prev)) || PERFMON_IS_SYSWIDE()) \ + ia64_save_extra(prev); \ + if (((next)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)) \ + || IS_IA32_PROCESS(ia64_task_regs(next)) || PERFMON_IS_SYSWIDE()) \ + ia64_load_extra(next); \ + (last) = ia64_switch_to((next)); \ } while (0) #ifdef CONFIG_SMP + /* - * In the SMP case, we save the fph state when context-switching away from a thread that - * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can - * pick up the state from task->thread.fph, avoiding the complication of having to fetch - * the latest fph state from another CPU. In other words: eager save, lazy restore. + * In the SMP case, we save the fph state when context-switching + * away from a thread that modified fph. This way, when the thread + * gets scheduled on another CPU, the CPU can pick up the state from + * task->thread.fph, avoiding the complication of having to fetch + * the latest fph state from another CPU. */ +# define switch_to(prev,next,last) do { \ + if (ia64_psr(ia64_task_regs(prev))->mfh) { \ + ia64_psr(ia64_task_regs(prev))->mfh = 0; \ + (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ + __ia64_save_fpu((prev)->thread.fph); \ + (prev)->thread.last_fph_cpu = smp_processor_id(); \ + } \ + if ((next)->thread.flags & IA64_THREAD_FPH_VALID) { \ + if (((next)->thread.last_fph_cpu == smp_processor_id()) \ + && (ia64_get_fpu_owner() == next)) \ + { \ + ia64_psr(ia64_task_regs(next))->dfh = 0; \ + ia64_psr(ia64_task_regs(next))->mfh = 0; \ + } else \ + ia64_psr(ia64_task_regs(next))->dfh = 1; \ + } \ + __switch_to(prev,next,last); \ + } while (0) +#else # define switch_to(prev,next,last) do { \ - if (ia64_psr(ia64_task_regs(prev))->mfh) { \ - ia64_psr(ia64_task_regs(prev))->mfh = 0; \ - (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ - __ia64_save_fpu((prev)->thread.fph); \ - (prev)->thread.last_fph_cpu = smp_processor_id(); \ - } \ - __switch_to(prev, next, last); \ + ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \ + __switch_to(prev,next,last); \ } while (0) -#else -# define switch_to(prev,next,last) __switch_to(prev, next, last) #endif /* diff --git a/include/asm-ia64/uaccess.h b/include/asm-ia64/uaccess.h index a30fcfa..b4bfc4c 100644 --- a/include/asm-ia64/uaccess.h +++ b/include/asm-ia64/uaccess.h @@ -8,7 +8,7 @@ * addresses. Thus, we need to be careful not to let the user to * trick us into accessing kernel memory that would normally be * inaccessible. This code is also fairly performance sensitive, - * so we want to spend as little time doing safety checks as + * so we want to spend as little time doing saftey checks as * possible. * * To make matters a bit more interesting, these macros sometimes also diff --git a/include/asm-ia64/unwind.h b/include/asm-ia64/unwind.h index 153f06c..a62655e 100644 --- a/include/asm-ia64/unwind.h +++ b/include/asm-ia64/unwind.h @@ -2,8 +2,8 @@ #define _ASM_IA64_UNWIND_H /* - * Copyright (C) 1999-2000, 2003 Hewlett-Packard Co - * David Mosberger-Tang + * Copyright (C) 1999-2000 Hewlett-Packard Co + * Copyright (C) 1999-2000 David Mosberger-Tang * * A simple API for unwinding kernel stacks. This is used for * debugging and error reporting purposes. The kernel doesn't need @@ -107,13 +107,6 @@ extern void unw_remove_unwind_table (void *handle); */ extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); -/* - * Prepare to unwind from interruption. The pt-regs and switch-stack structures must have - * be "adjacent" (no state modifications between pt-regs and switch-stack). - */ -extern void unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t, - struct pt_regs *pt, struct switch_stack *sw); - extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw); diff --git a/include/asm-sparc64/agp.h b/include/asm-sparc64/agp.h index 197a541..ba05bdf 100644 --- a/include/asm-sparc64/agp.h +++ b/include/asm-sparc64/agp.h @@ -8,11 +8,4 @@ #define flush_agp_mappings() #define flush_agp_cache() mb() -/* - * Page-protection value to be used for AGP memory mapped into kernel space. For - * platforms which use coherent AGP DMA, this can be PAGE_KERNEL. For others, it needs to - * be an uncached mapping (such as write-combining). - */ -#define PAGE_AGP PAGE_KERNEL_NOCACHE - #endif diff --git a/include/asm-x86_64/agp.h b/include/asm-x86_64/agp.h index ec2b2ed..ecb3db3 100644 --- a/include/asm-x86_64/agp.h +++ b/include/asm-x86_64/agp.h @@ -20,11 +20,4 @@ worth it. Would need a page for it. */ #define flush_agp_cache() asm volatile("wbinvd":::"memory") -/* - * Page-protection value to be used for AGP memory mapped into kernel space. For - * platforms which use coherent AGP DMA, this can be PAGE_KERNEL. For others, it needs to - * be an uncached mapping (such as write-combining). - */ -#define PAGE_AGP PAGE_KERNEL_NOCACHE - #endif diff --git a/include/linux/acpi_serial.h b/include/linux/acpi_serial.h index e4b87c5..07fb16f 100644 --- a/include/linux/acpi_serial.h +++ b/include/linux/acpi_serial.h @@ -9,8 +9,6 @@ * */ -#include - extern void setup_serial_acpi(void *); #define ACPI_SIG_LEN 4 diff --git a/include/linux/efi.h b/include/linux/efi.h index d07bc5c..a627f21 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -7,7 +7,7 @@ * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond - * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co. + * Copyright (C) 1999, 2002 Hewlett-Packard Co. * David Mosberger-Tang * Stephane Eranian */ @@ -21,12 +21,12 @@ #include #define EFI_SUCCESS 0 -#define EFI_LOAD_ERROR ( 1 | (1UL << 63)) -#define EFI_INVALID_PARAMETER ( 2 | (1UL << 63)) -#define EFI_UNSUPPORTED ( 3 | (1UL << 63)) -#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << 63)) -#define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << 63)) -#define EFI_NOT_FOUND (14 | (1UL << 63)) +#define EFI_LOAD_ERROR (1L | (1L << 63)) +#define EFI_INVALID_PARAMETER (2L | (1L << 63)) +#define EFI_UNSUPPORTED (3L | (1L << 63)) +#define EFI_BAD_BUFFER_SIZE (4L | (1L << 63)) +#define EFI_BUFFER_TOO_SMALL (5L | (1L << 63)) +#define EFI_NOT_FOUND (14L | (1L << 63)) typedef unsigned long efi_status_t; typedef u8 efi_bool_t; @@ -260,7 +260,7 @@ efi_guid_unparse(efi_guid_t *guid, char *out) extern void efi_init (void); extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); -extern void efi_gettimeofday (struct timespec *ts); +extern void efi_gettimeofday (struct timeval *tv); extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ extern u64 efi_get_iobase (void); extern u32 efi_mem_type (unsigned long phys_addr); diff --git a/include/linux/elf.h b/include/linux/elf.h index 3240748..fb03869 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -229,90 +229,6 @@ typedef struct { #define R_386_GOTPC 10 #define R_386_NUM 11 -#define R_IA64_NONE 0x00 /* none */ -#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */ -#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */ -#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */ -#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */ -#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */ -#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */ -#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */ -#define R_IA64_GPREL22 0x2a /* @gprel(sym+add), add imm22 */ -#define R_IA64_GPREL64I 0x2b /* @gprel(sym+add), mov imm64 */ -#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym+add), data4 MSB */ -#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym+add), data4 LSB */ -#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym+add), data8 MSB */ -#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym+add), data8 LSB */ -#define R_IA64_LTOFF22 0x32 /* @ltoff(sym+add), add imm22 */ -#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym+add), mov imm64 */ -#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym+add), add imm22 */ -#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym+add), mov imm64 */ -#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym+add), data8 MSB */ -#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym+add), data8 LSB */ -#define R_IA64_FPTR64I 0x43 /* @fptr(sym+add), mov imm64 */ -#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym+add), data4 MSB */ -#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym+add), data4 LSB */ -#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym+add), data8 MSB */ -#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym+add), data8 LSB */ -#define R_IA64_PCREL60B 0x48 /* @pcrel(sym+add), brl */ -#define R_IA64_PCREL21B 0x49 /* @pcrel(sym+add), ptb, call */ -#define R_IA64_PCREL21M 0x4a /* @pcrel(sym+add), chk.s */ -#define R_IA64_PCREL21F 0x4b /* @pcrel(sym+add), fchkf */ -#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym+add), data4 MSB */ -#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym+add), data4 LSB */ -#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym+add), data8 MSB */ -#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym+add), data8 LSB */ -#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */ -#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */ -#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), 4 MSB */ -#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), 4 LSB */ -#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), 8 MSB */ -#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), 8 LSB */ -#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym+add), data4 MSB */ -#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym+add), data4 LSB */ -#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym+add), data8 MSB */ -#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym+add), data8 LSB */ -#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym+add), data4 MSB */ -#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym+add), data4 LSB */ -#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym+add), data8 MSB */ -#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym+add), data8 LSB */ -#define R_IA64_REL32MSB 0x6c /* data 4 + REL */ -#define R_IA64_REL32LSB 0x6d /* data 4 + REL */ -#define R_IA64_REL64MSB 0x6e /* data 8 + REL */ -#define R_IA64_REL64LSB 0x6f /* data 8 + REL */ -#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */ -#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */ -#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */ -#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */ -#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym+add), ptb, call */ -#define R_IA64_PCREL22 0x7a /* @pcrel(sym+add), imm22 */ -#define R_IA64_PCREL64I 0x7b /* @pcrel(sym+add), imm64 */ -#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */ -#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */ -#define R_IA64_COPY 0x84 /* dynamic reloc, data copy */ -#define R_IA64_SUB 0x85 /* -symbol + addend, add imm22 */ -#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */ -#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */ -#define R_IA64_TPREL14 0x91 /* @tprel(sym+add), add imm14 */ -#define R_IA64_TPREL22 0x92 /* @tprel(sym+add), add imm22 */ -#define R_IA64_TPREL64I 0x93 /* @tprel(sym+add), add imm64 */ -#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym+add), data8 MSB */ -#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym+add), data8 LSB */ -#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), add imm22 */ -#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym+add), data8 MSB */ -#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym+add), data8 LSB */ -#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(s+a)), imm22 */ -#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym+add), imm14 */ -#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym+add), imm22 */ -#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym+add), imm64 */ -#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym+add), data4 MSB */ -#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym+add), data4 LSB */ -#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym+add), data8 MSB */ -#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym+add), data8 LSB */ -#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */ - -#define SHF_IA_64_SHORT 0x10000000 /* section near gp */ - #define R_MIPS_NONE 0 #define R_MIPS_16 1 #define R_MIPS_32 2 diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 48ac747..3bc7bcb 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -3,8 +3,6 @@ #include #include -#include - #include #ifdef CONFIG_HIGHMEM diff --git a/include/linux/irq.h b/include/linux/irq.h index 6256392..c9bb7be1 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -56,7 +56,7 @@ typedef struct hw_interrupt_type hw_irq_controller; * * Pad this out to 32 bytes for cache and indexing reasons. */ -typedef struct irq_desc { +typedef struct { unsigned int status; /* IRQ status */ hw_irq_controller *handler; struct irqaction *action; /* IRQ action list */ @@ -64,6 +64,8 @@ typedef struct irq_desc { spinlock_t lock; } ____cacheline_aligned irq_desc_t; +extern irq_desc_t irq_desc [NR_IRQS]; + #include /* the arch dependent stuff */ extern int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h index 377ae36..a83926f 100644 --- a/include/linux/irq_cpustat.h +++ b/include/linux/irq_cpustat.h @@ -24,7 +24,7 @@ extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */ #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) #else #define __IRQ_STAT(cpu, member) ((void)(cpu), irq_stat[0].member) -#endif +#endif #endif /* arch independent irq_stat fields */ @@ -35,9 +35,4 @@ extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */ /* arch dependent irq_stat fields */ #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386, ia64 */ -#define local_softirq_pending() softirq_pending(smp_processor_id()) -#define local_syscall_count() syscall_count(smp_processor_id()) -#define local_ksoftirqd_task() ksoftirqd_task(smp_processor_id()) -#define local_nmi_count() nmi_count(smp_processor_id()) - #endif /* __irq_cpustat_h */ diff --git a/include/linux/jbd.h b/include/linux/jbd.h index c34ff99..e00aec0 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -279,9 +279,9 @@ void buffer_assertion_failure(struct buffer_head *bh); printk(KERN_ERR why); \ } \ } while (0) -#define J_EXPECT(expr, why...) __journal_expect(expr, why) -#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, why) -#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, why) +#define J_EXPECT(expr, why...) __journal_expect(expr, ## why) +#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why) +#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why) #endif enum jbd_state_bits { diff --git a/include/linux/mm.h b/include/linux/mm.h index 51f5377..6aa89d7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -23,13 +23,8 @@ extern int page_cluster; #include #include -#include #include -#ifndef MM_VM_SIZE -#define MM_VM_SIZE(mm) TASK_SIZE -#endif - /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index eb10339..0c9e9a6 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -41,7 +41,4 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod); -/* Any cleanup needed when module leaves. */ -void module_arch_cleanup(struct module *mod); - #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 6826301..3d75259 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -408,7 +408,7 @@ static inline loff_t nfs_size_to_loff_t(__u64 size) { loff_t maxsz = (((loff_t) ULONG_MAX) << PAGE_CACHE_SHIFT) + PAGE_CACHE_SIZE - 1; - if (size > (__u64) maxsz) + if (size > maxsz) return maxsz; return (loff_t) size; } diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 7c087a5..21043f2 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -598,8 +598,6 @@ #define PCI_DEVICE_ID_HP_DIVA_TOSCA1 0x1049 #define PCI_DEVICE_ID_HP_DIVA_TOSCA2 0x104A #define PCI_DEVICE_ID_HP_DIVA_MAESTRO 0x104B -#define PCI_DEVICE_ID_HP_REO_SBA 0x10f0 -#define PCI_DEVICE_ID_HP_REO_IOC 0x10f1 #define PCI_DEVICE_ID_HP_VISUALIZE_FXE 0x108b #define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223 #define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226 @@ -607,7 +605,6 @@ #define PCI_DEVICE_ID_HP_ZX1_SBA 0x1229 #define PCI_DEVICE_ID_HP_ZX1_IOC 0x122a #define PCI_DEVICE_ID_HP_ZX1_LBA 0x122e -#define PCI_DEVICE_ID_HP_SX1000_IOC 0x127c #define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282 #define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290 diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 0870a29..74c3d97 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -1,8 +1,9 @@ #ifndef __LINUX_PERCPU_H #define __LINUX_PERCPU_H -#include /* For preempt_disable() */ +#include /* For preempt_disable() */ #include /* For kmalloc_percpu() */ #include + /* Must be an lvalue. */ #define get_cpu_var(var) (*({ preempt_disable(); &__get_cpu_var(var); })) #define put_cpu_var(var) preempt_enable() diff --git a/include/linux/serial.h b/include/linux/serial.h index 677e67b..aceee0c 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h @@ -180,9 +180,14 @@ struct serial_icounter_struct { extern int register_serial(struct serial_struct *req); extern void unregister_serial(int line); -/* Allow architectures to override entries in serial8250_ports[] at run time: */ -struct uart_port; /* forward declaration */ -extern int early_serial_setup(struct uart_port *port); +/* Allow complicated architectures to specify rs_table[] at run time */ +extern int early_serial_setup(struct serial_struct *req); + +#ifdef CONFIG_ACPI +/* tty ports reserved for the ACPI serial console port and debug port */ +#define ACPI_SERIAL_CONSOLE_PORT 4 +#define ACPI_SERIAL_DEBUG_PORT 5 +#endif #endif /* __KERNEL__ */ #endif /* _LINUX_SERIAL_H */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 194d2f5..81a9069 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -74,6 +74,10 @@ static inline int on_each_cpu(void (*func) (void *info), void *info, */ extern int smp_threads_ready; +extern volatile unsigned long smp_msg_data; +extern volatile int smp_src_cpu; +extern volatile int smp_msg_id; + #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ #define MSG_ALL 0x8001 diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 98f1c2a..6553637b 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -73,7 +73,7 @@ struct svc_serv { * This assumes that the non-page part of an rpc reply will fit * in a page - NFSd ensures this. lockd also has no trouble. */ -#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE + 2) +#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE + 1) static inline u32 svc_getu32(struct iovec *iov) { diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index f9bb3af..2cce1f2 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -130,7 +130,6 @@ enum KERN_PIDMAX=55, /* int: PID # limit */ KERN_CORE_PATTERN=56, /* string: pattern for core-file names */ KERN_PANIC_ON_OOPS=57, /* int: whether we will panic on an oops */ - KERN_CACHEDECAYTICKS=58, /* ulong: value for cache_decay_ticks (EXPERIMENTAL!) */ }; diff --git a/kernel/fork.c b/kernel/fork.c index 9750e47..e34abd2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -38,6 +38,8 @@ #include #include +static kmem_cache_t *task_struct_cachep; + extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk); extern void exit_semundo(struct task_struct *tsk); @@ -53,6 +55,13 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0; rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */ +/* + * A per-CPU task cache - this relies on the fact that + * the very last portion of sys_exit() is executed with + * preemption turned off. + */ +static task_t *task_cache[NR_CPUS] __cacheline_aligned; + int nr_processes(void) { int cpu; @@ -65,22 +74,6 @@ int nr_processes(void) return total; } -#ifdef CONFIG_IA64 -# define HAVE_ARCH_DUP_TASK_STRUCT -#endif - -#ifdef HAVE_ARCH_DUP_TASK_STRUCT -extern void free_task_struct (struct task_struct *tsk); -#else -static kmem_cache_t *task_struct_cachep; - -/* - * A per-CPU task cache - this relies on the fact that - * the very last portion of sys_exit() is executed with - * preemption turned off. - */ -static task_t *task_cache[NR_CPUS] __cacheline_aligned; - static void free_task_struct(struct task_struct *tsk) { /* @@ -104,7 +97,6 @@ static void free_task_struct(struct task_struct *tsk) put_cpu(); } } -#endif /* HAVE_ARCH_DUP_TASK_STRUCT */ void __put_task_struct(struct task_struct *tsk) { @@ -194,7 +186,6 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync) void __init fork_init(unsigned long mempages) { -#ifndef HAVE_ARCH_DUP_TASK_STRUCT /* create a slab on which task_structs can be allocated */ task_struct_cachep = kmem_cache_create("task_struct", @@ -202,7 +193,6 @@ void __init fork_init(unsigned long mempages) SLAB_MUST_HWCACHE_ALIGN, NULL, NULL); if (!task_struct_cachep) panic("fork_init(): cannot create task_struct SLAB cache"); -#endif /* * The default maximum number of threads is set to a safe @@ -220,11 +210,7 @@ void __init fork_init(unsigned long mempages) init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2; } -#ifdef HAVE_ARCH_DUP_TASK_STRUCT -extern struct task_struct *dup_task_struct (struct task_struct *orig); -#else /* !HAVE_ARCH_DUP_TASK_STRUCT */ - -struct task_struct *dup_task_struct(struct task_struct *orig) +static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; @@ -258,8 +244,6 @@ struct task_struct *dup_task_struct(struct task_struct *orig) return tsk; } -#endif /* !HAVE_ARCH_DUP_TASK_STRUCT */ - #ifdef CONFIG_MMU static inline int dup_mmap(struct mm_struct * mm, struct mm_struct * oldmm) { @@ -900,15 +884,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, if (clone_flags & CLONE_CHILD_SETTID) p->set_child_tid = child_tidptr; - else - p->set_child_tid = NULL; /* * Clear TID on mm_release()? */ if (clone_flags & CLONE_CHILD_CLEARTID) p->clear_child_tid = child_tidptr; - else - p->clear_child_tid = NULL; /* * Syscall tracing should be turned off in the child regardless diff --git a/kernel/ksyms.c b/kernel/ksyms.c index b6f4ad0..c8ca8a9 100644 --- a/kernel/ksyms.c +++ b/kernel/ksyms.c @@ -402,9 +402,7 @@ EXPORT_SYMBOL(add_timer); EXPORT_SYMBOL(del_timer); EXPORT_SYMBOL(request_irq); EXPORT_SYMBOL(free_irq); -#if !defined(CONFIG_IA64) EXPORT_SYMBOL(irq_stat); -#endif /* waitqueue handling */ EXPORT_SYMBOL(add_wait_queue); @@ -604,9 +602,7 @@ EXPORT_SYMBOL(__tasklet_hi_schedule); /* init task, for moving kthread roots - ought to export a function ?? */ EXPORT_SYMBOL(init_task); -#ifndef CONFIG_IA64 EXPORT_SYMBOL(init_thread_union); -#endif EXPORT_SYMBOL(tasklist_lock); EXPORT_SYMBOL(find_task_by_pid); diff --git a/kernel/module.c b/kernel/module.c index 4a01db56..06ff932 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -910,9 +910,6 @@ static void free_module(struct module *mod) list_del(&mod->list); spin_unlock_irq(&modlist_lock); - /* Arch-specific cleanup. */ - module_arch_cleanup(mod); - /* Module unload stuff */ module_unload_free(mod); @@ -1279,7 +1276,6 @@ static struct module *load_module(void __user *umod, mod->module_init = ptr; /* Transfer each section which specifies SHF_ALLOC */ - DEBUGP("final section addresses:\n"); for (i = 0; i < hdr->e_shnum; i++) { void *dest; @@ -1297,7 +1293,6 @@ static struct module *load_module(void __user *umod, sechdrs[i].sh_size); /* Update sh_addr to point to copy in image. */ sechdrs[i].sh_addr = (unsigned long)dest; - DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name); } /* Module has been moved. */ mod = (void *)sechdrs[modindex].sh_addr; diff --git a/kernel/printk.c b/kernel/printk.c index 1defc9f..a7a9811 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -308,12 +308,6 @@ static void _call_console_drivers(unsigned long start, unsigned long end, int ms __call_console_drivers(start, end); } } -#ifdef CONFIG_IA64_EARLY_PRINTK - if (!console_drivers) { - void early_printk (const char *str, size_t len); - early_printk(&LOG_BUF(start), end - start); - } -#endif } /* @@ -631,11 +625,7 @@ void register_console(struct console * console) * for us. */ spin_lock_irqsave(&logbuf_lock, flags); -#ifdef CONFIG_IA64_EARLY_PRINTK - con_start = log_end; -#else con_start = log_start; -#endif spin_unlock_irqrestore(&logbuf_lock, flags); } release_console_sem(); @@ -688,110 +678,3 @@ void tty_write_message(struct tty_struct *tty, char *msg) tty->driver->write(tty, 0, msg, strlen(msg)); return; } - -#ifdef CONFIG_IA64_EARLY_PRINTK - -#include - -# ifdef CONFIG_IA64_EARLY_PRINTK_VGA - - -#define VGABASE ((char *)0xc0000000000b8000) -#define VGALINES 24 -#define VGACOLS 80 - -static int current_ypos = VGALINES, current_xpos = 0; - -static void -early_printk_vga (const char *str, size_t len) -{ - char c; - int i, k, j; - - while (len-- > 0) { - c = *str++; - if (current_ypos >= VGALINES) { - /* scroll 1 line up */ - for (k = 1, j = 0; k < VGALINES; k++, j++) { - for (i = 0; i < VGACOLS; i++) { - writew(readw(VGABASE + 2*(VGACOLS*k + i)), - VGABASE + 2*(VGACOLS*j + i)); - } - } - for (i = 0; i < VGACOLS; i++) { - writew(0x720, VGABASE + 2*(VGACOLS*j + i)); - } - current_ypos = VGALINES-1; - } - if (c == '\n') { - current_xpos = 0; - current_ypos++; - } else if (c != '\r') { - writew(((0x7 << 8) | (unsigned short) c), - VGABASE + 2*(VGACOLS*current_ypos + current_xpos++)); - if (current_xpos >= VGACOLS) { - current_xpos = 0; - current_ypos++; - } - } - } -} - -# endif /* CONFIG_IA64_EARLY_PRINTK_VGA */ - -# ifdef CONFIG_IA64_EARLY_PRINTK_UART - -#include -#include - -static void early_printk_uart(const char *str, size_t len) -{ - static char *uart = NULL; - unsigned long uart_base; - char c; - - if (!uart) { - uart_base = 0; -# ifdef CONFIG_SERIAL_8250_HCDP - { - extern unsigned long hcdp_early_uart(void); - uart_base = hcdp_early_uart(); - } -# endif -# if CONFIG_IA64_EARLY_PRINTK_UART_BASE - if (!uart_base) - uart_base = CONFIG_IA64_EARLY_PRINTK_UART_BASE; -# endif - if (!uart_base) - return; - - uart = ioremap(uart_base, 64); - if (!uart) - return; - } - - while (len-- > 0) { - c = *str++; - while ((readb(uart + UART_LSR) & UART_LSR_TEMT) == 0) - cpu_relax(); /* spin */ - - writeb(c, uart + UART_TX); - - if (c == '\n') - writeb('\r', uart + UART_TX); - } -} - -# endif /* CONFIG_IA64_EARLY_PRINTK_UART */ - -void early_printk(const char *str, size_t len) -{ -#ifdef CONFIG_IA64_EARLY_PRINTK_UART - early_printk_uart(str, len); -#endif -#ifdef CONFIG_IA64_EARLY_PRINTK_VGA - early_printk_vga(str, len); -#endif -} - -#endif /* CONFIG_IA64_EARLY_PRINTK */ diff --git a/kernel/softirq.c b/kernel/softirq.c index bb94e1f..60f9148 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -33,10 +33,7 @@ - Tasklets: serialized wrt itself. */ -/* No separate irq_stat for ia64, it is part of PSA */ -#if !defined(CONFIG_IA64) irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; -#endif /* CONFIG_IA64 */ static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; @@ -324,7 +321,7 @@ static int ksoftirqd(void * __bind_cpu) __set_current_state(TASK_INTERRUPTIBLE); mb(); - local_ksoftirqd_task() = current; + ksoftirqd_task(cpu) = current; for (;;) { if (!local_softirq_pending()) diff --git a/kernel/sys.c b/kernel/sys.c index fdabc0e..8e7de84 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1218,7 +1218,7 @@ asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim) ? -EFAULT : 0; } -#if (!defined(__ia64__) && !defined(CONFIG_V850)) || defined(CONFIG_COMPAT) +#if !defined(__ia64__) && !defined(CONFIG_V850) /* * Back compatibility for getrlimit. Needed for some apps. diff --git a/kernel/sysctl.c b/kernel/sysctl.c index e566041..aff6558 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -265,10 +265,6 @@ static ctl_table kern_table[] = { 0600, NULL, &proc_dointvec}, {KERN_PANIC_ON_OOPS,"panic_on_oops", &panic_on_oops,sizeof(int),0644,NULL,&proc_dointvec}, -#ifdef CONFIG_SMP - {KERN_CACHEDECAYTICKS, "cache_decay_ticks", &cache_decay_ticks, sizeof (cache_decay_ticks), - 0644, NULL, &proc_doulongvec_minmax}, -#endif {0} }; diff --git a/kernel/time.c b/kernel/time.c index e78e500..3af9c7d 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -35,7 +35,7 @@ */ struct timezone sys_tz; -extern unsigned long last_nsec_offset; +extern unsigned long last_time_offset; #if !defined(__alpha__) && !defined(__ia64__) @@ -79,7 +79,7 @@ asmlinkage long sys_stime(int * tptr) write_seqlock_irq(&xtime_lock); xtime.tv_sec = value; xtime.tv_nsec = 0; - last_nsec_offset = 0; + last_time_offset = 0; time_adjust = 0; /* stop active adjtime() */ time_status |= STA_UNSYNC; time_maxerror = NTP_PHASE_LIMIT; @@ -125,7 +125,7 @@ inline static void warp_clock(void) { write_seqlock_irq(&xtime_lock); xtime.tv_sec += sys_tz.tz_minuteswest * 60; - last_nsec_offset = 0; + last_time_offset = 0; write_sequnlock_irq(&xtime_lock); } @@ -381,7 +381,7 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0 txc->calcnt = pps_calcnt; txc->errcnt = pps_errcnt; txc->stbcnt = pps_stbcnt; - last_nsec_offset = 0; + last_time_offset = 0; write_sequnlock_irq(&xtime_lock); do_gettimeofday(&txc->time); return(result); diff --git a/kernel/timer.c b/kernel/timer.c index 6ffa0bf..caa3771 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -451,7 +451,6 @@ unsigned long tick_nsec = TICK_NSEC(TICK_USEC); /* USER_HZ period (nsec) */ */ struct timespec xtime __attribute__ ((aligned (16))); struct timespec wall_to_monotonic __attribute__ ((aligned (16))); -unsigned long last_nsec_offset; /* Don't completely fail for HZ > 500. */ int tickadj = 500/HZ ? : 1; /* microsecs */ @@ -606,7 +605,7 @@ static void second_overflow(void) /* in the NTP reference this is called "hardclock()" */ static void update_wall_time_one_tick(void) { - long time_adjust_step, delta_nsec; + long time_adjust_step; if ( (time_adjust_step = time_adjust) != 0 ) { /* We are doing an adjtime thing. @@ -622,11 +621,11 @@ static void update_wall_time_one_tick(void) time_adjust_step = tickadj; else if (time_adjust < -tickadj) time_adjust_step = -tickadj; - + /* Reduce by this step the amount of time left */ time_adjust -= time_adjust_step; } - delta_nsec = tick_nsec + time_adjust_step * 1000; + xtime.tv_nsec += tick_nsec + time_adjust_step * 1000; /* * Advance the phase, once it gets to one microsecond, then * advance the tick more. @@ -635,33 +634,13 @@ static void update_wall_time_one_tick(void) if (time_phase <= -FINEUSEC) { long ltemp = -time_phase >> (SHIFT_SCALE - 10); time_phase += ltemp << (SHIFT_SCALE - 10); - delta_nsec -= ltemp; + xtime.tv_nsec -= ltemp; } else if (time_phase >= FINEUSEC) { long ltemp = time_phase >> (SHIFT_SCALE - 10); time_phase -= ltemp << (SHIFT_SCALE - 10); - delta_nsec += ltemp; - } - xtime.tv_nsec += delta_nsec; - - /* - * The whole point of last_nsec_offset is that it can be updated atomically and - * lock-free. Thus, arches that don't have __HAVE_ARCH_CMPXCHG probably can't use - * last_nsec_offset anyhow... --davidm 2003-Feb-11 - */ -#ifdef __HAVE_ARCH_CMPXCHG - if (last_nsec_offset > 0) { - unsigned long new, old; - - do { - old = last_nsec_offset; - if (old > delta_nsec) - new = old - delta_nsec; - else - new = 0; - } while (cmpxchg(&last_nsec_offset, old, new) != old); + xtime.tv_nsec += ltemp; } -#endif } /* @@ -798,6 +777,7 @@ unsigned long wall_jiffies = INITIAL_JIFFIES; #ifndef ARCH_HAVE_XTIME_LOCK seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; #endif +unsigned long last_time_offset; /* * This function runs timers and the timer-tq in bottom half context. @@ -831,6 +811,7 @@ static inline void update_times(void) wall_jiffies += ticks; update_wall_time(ticks); } + last_time_offset = 0; calc_load(ticks); } diff --git a/mm/memory.c b/mm/memory.c index 6013408..cc3138f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -114,10 +114,8 @@ static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir) } pmd = pmd_offset(dir, 0); pgd_clear(dir); - for (j = 0; j < PTRS_PER_PMD ; j++) { - prefetchw(pmd + j + PREFETCH_STRIDE/sizeof(*pmd)); + for (j = 0; j < PTRS_PER_PMD ; j++) free_one_pmd(tlb, pmd+j); - } pmd_free_tlb(tlb, pmd); } diff --git a/mm/mmap.c b/mm/mmap.c index 014a0be..cb35e2d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1440,7 +1440,7 @@ void exit_mmap(struct mm_struct *mm) vm_unacct_memory(nr_accounted); BUG_ON(mm->map_count); /* This is just debugging */ clear_page_tables(tlb, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD); - tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm)); + tlb_finish_mmu(tlb, 0, TASK_SIZE); vma = mm->mmap; mm->mmap = mm->mmap_cache = NULL; diff --git a/sound/oss/cs4281/cs4281m.c b/sound/oss/cs4281/cs4281m.c index 5757af5..258efe3 100644 --- a/sound/oss/cs4281/cs4281m.c +++ b/sound/oss/cs4281/cs4281m.c @@ -1944,8 +1944,8 @@ static void clear_advance(void *buf, unsigned bsize, unsigned bptr, len -= x; } CS_DBGOUT(CS_WAVE_WRITE, 4, printk(KERN_INFO - "cs4281: clear_advance(): memset %d at %p for %d size \n", - (unsigned)c, ((char *) buf) + bptr, len)); + "cs4281: clear_advance(): memset %d at 0x%.8x for %d size \n", + (unsigned)c, (unsigned)((char *) buf) + bptr, len)); memset(((char *) buf) + bptr, c, len); } @@ -1980,8 +1980,9 @@ static void cs4281_update_ptr(struct cs4281_state *s, int intflag) wake_up(&s->dma_adc.wait); } CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO - "cs4281: cs4281_update_ptr(): s=%p hwptr=%d total_bytes=%d count=%d \n", - s, s->dma_adc.hwptr, s->dma_adc.total_bytes, s->dma_adc.count)); + "cs4281: cs4281_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n", + (unsigned)s, s->dma_adc.hwptr, + s->dma_adc.total_bytes, s->dma_adc.count)); } // update DAC pointer // @@ -2013,10 +2014,11 @@ static void cs4281_update_ptr(struct cs4281_state *s, int intflag) // Continue to play silence until the _release. // CS_DBGOUT(CS_WAVE_WRITE, 6, printk(KERN_INFO - "cs4281: cs4281_update_ptr(): memset %d at %p for %d size \n", + "cs4281: cs4281_update_ptr(): memset %d at 0x%.8x for %d size \n", (unsigned)(s->prop_dac.fmt & (AFMT_U8 | AFMT_U16_LE)) ? 0x80 : 0, - s->dma_dac.rawbuf, s->dma_dac.dmasize)); + (unsigned)s->dma_dac.rawbuf, + s->dma_dac.dmasize)); memset(s->dma_dac.rawbuf, (s->prop_dac. fmt & (AFMT_U8 | AFMT_U16_LE)) ? @@ -2047,8 +2049,9 @@ static void cs4281_update_ptr(struct cs4281_state *s, int intflag) } } CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO - "cs4281: cs4281_update_ptr(): s=%p hwptr=%d total_bytes=%d count=%d \n", - s, s->dma_dac.hwptr, s->dma_dac.total_bytes, s->dma_dac.count)); + "cs4281: cs4281_update_ptr(): s=0x%.8x hwptr=%d total_bytes=%d count=%d \n", + (unsigned) s, s->dma_dac.hwptr, + s->dma_dac.total_bytes, s->dma_dac.count)); } } @@ -2179,7 +2182,8 @@ static int mixer_ioctl(struct cs4281_state *s, unsigned int cmd, VALIDATE_STATE(s); CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO - "cs4281: mixer_ioctl(): s=%p cmd=0x%.8x\n", s, cmd)); + "cs4281: mixer_ioctl(): s=0x%.8x cmd=0x%.8x\n", + (unsigned) s, cmd)); #if CSDEBUG cs_printioctl(cmd); #endif @@ -2744,8 +2748,9 @@ static void CopySamples(char *dst, char *src, int count, int iChannels, CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO "cs4281: CopySamples()+ ")); CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO - " dst=%p src=%p count=%d iChannels=%d fmt=0x%x\n", - dst, src, (unsigned) count, (unsigned) iChannels, (unsigned) fmt)); + " dst=0x%x src=0x%x count=%d iChannels=%d fmt=0x%x\n", + (unsigned) dst, (unsigned) src, (unsigned) count, + (unsigned) iChannels, (unsigned) fmt)); // Gershwin does format conversion in hardware so normally // we don't do any host based coversion. The data formatter @@ -2825,9 +2830,9 @@ static unsigned cs_copy_to_user(struct cs4281_state *s, void *dest, void *src = hwsrc; //default to the standard destination buffer addr CS_DBGOUT(CS_FUNCTION, 6, printk(KERN_INFO - "cs_copy_to_user()+ fmt=0x%x fmt_o=0x%x cnt=%d dest=%p\n", + "cs_copy_to_user()+ fmt=0x%x fmt_o=0x%x cnt=%d dest=0x%.8x\n", s->prop_adc.fmt, s->prop_adc.fmt_original, - (unsigned) cnt, dest)); + (unsigned) cnt, (unsigned) dest)); if (cnt > s->dma_adc.dmasize) { cnt = s->dma_adc.dmasize; @@ -2872,7 +2877,7 @@ static ssize_t cs4281_read(struct file *file, char *buffer, size_t count, unsigned copied = 0; CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2, - printk(KERN_INFO "cs4281: cs4281_read()+ %Zu \n", count)); + printk(KERN_INFO "cs4281: cs4281_read()+ %d \n", count)); VALIDATE_STATE(s); if (ppos != &file->f_pos) @@ -2895,7 +2900,7 @@ static ssize_t cs4281_read(struct file *file, char *buffer, size_t count, // while (count > 0) { CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO - "_read() count>0 count=%Zu .count=%d .swptr=%d .hwptr=%d \n", + "_read() count>0 count=%d .count=%d .swptr=%d .hwptr=%d \n", count, s->dma_adc.count, s->dma_adc.swptr, s->dma_adc.hwptr)); spin_lock_irqsave(&s->lock, flags); @@ -2952,10 +2957,11 @@ static ssize_t cs4281_read(struct file *file, char *buffer, size_t count, // the "cnt" is the number of bytes to read. CS_DBGOUT(CS_WAVE_READ, 2, printk(KERN_INFO - "_read() copy_to cnt=%d count=%Zu ", cnt, count)); + "_read() copy_to cnt=%d count=%d ", cnt, count)); CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO - " .dmasize=%d .count=%d buffer=%p ret=%Zd\n", - s->dma_adc.dmasize, s->dma_adc.count, buffer, ret)); + " .dmasize=%d .count=%d buffer=0x%.8x ret=%d\n", + s->dma_adc.dmasize, s->dma_adc.count, + (unsigned) buffer, ret)); if (cs_copy_to_user (s, buffer, s->dma_adc.rawbuf + swptr, cnt, &copied)) @@ -2971,7 +2977,7 @@ static ssize_t cs4281_read(struct file *file, char *buffer, size_t count, start_adc(s); } CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2, - printk(KERN_INFO "cs4281: cs4281_read()- %Zd\n", ret)); + printk(KERN_INFO "cs4281: cs4281_read()- %d\n", ret)); return ret; } @@ -2987,7 +2993,7 @@ static ssize_t cs4281_write(struct file *file, const char *buffer, int cnt; CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2, - printk(KERN_INFO "cs4281: cs4281_write()+ count=%Zu\n", + printk(KERN_INFO "cs4281: cs4281_write()+ count=%d\n", count)); VALIDATE_STATE(s); @@ -3043,7 +3049,7 @@ static ssize_t cs4281_write(struct file *file, const char *buffer, start_dac(s); } CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2, - printk(KERN_INFO "cs4281: cs4281_write()- %Zd\n", ret)); + printk(KERN_INFO "cs4281: cs4281_write()- %d\n", ret)); return ret; } @@ -3164,7 +3170,8 @@ static int cs4281_ioctl(struct inode *inode, struct file *file, int val, mapped, ret; CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO - "cs4281: cs4281_ioctl(): file=%p cmd=0x%.8x\n", file, cmd)); + "cs4281: cs4281_ioctl(): file=0x%.8x cmd=0x%.8x\n", + (unsigned) file, cmd)); #if CSDEBUG cs_printioctl(cmd); #endif @@ -3594,8 +3601,8 @@ static int cs4281_release(struct inode *inode, struct file *file) (struct cs4281_state *) file->private_data; CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 2, printk(KERN_INFO - "cs4281: cs4281_release(): inode=%p file=%p f_mode=%d\n", - inode, file, file->f_mode)); + "cs4281: cs4281_release(): inode=0x%.8x file=0x%.8x f_mode=%d\n", + (unsigned) inode, (unsigned) file, file->f_mode)); VALIDATE_STATE(s); @@ -3629,8 +3636,8 @@ static int cs4281_open(struct inode *inode, struct file *file) struct list_head *entry; CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO - "cs4281: cs4281_open(): inode=%p file=%p f_mode=0x%x\n", - inode, file, file->f_mode)); + "cs4281: cs4281_open(): inode=0x%.8x file=0x%.8x f_mode=0x%x\n", + (unsigned) inode, (unsigned) file, file->f_mode)); list_for_each(entry, &cs4281_devs) { @@ -4340,8 +4347,10 @@ static int __devinit cs4281_probe(struct pci_dev *pcidev, CS_DBGOUT(CS_INIT, 2, printk(KERN_INFO - "cs4281: probe() BA0=0x%.8x BA1=0x%.8x pBA0=%p pBA1=%p \n", - (unsigned) temp1, (unsigned) temp2, s->pBA0, s->pBA1)); + "cs4281: probe() BA0=0x%.8x BA1=0x%.8x pBA0=0x%.8x pBA1=0x%.8x \n", + (unsigned) temp1, (unsigned) temp2, + (unsigned) s->pBA0, (unsigned) s->pBA1)); + CS_DBGOUT(CS_INIT, 2, printk(KERN_INFO "cs4281: probe() pBA0phys=0x%.8x pBA1phys=0x%.8x\n", @@ -4388,13 +4397,15 @@ static int __devinit cs4281_probe(struct pci_dev *pcidev, if (pmdev) { CS_DBGOUT(CS_INIT | CS_PM, 4, printk(KERN_INFO - "cs4281: probe() pm_register() succeeded (%p).\n", pmdev)); + "cs4281: probe() pm_register() succeeded (0x%x).\n", + (unsigned)pmdev)); pmdev->data = s; } else { CS_DBGOUT(CS_INIT | CS_PM | CS_ERROR, 0, printk(KERN_INFO - "cs4281: probe() pm_register() failed (%p).\n", pmdev)); + "cs4281: probe() pm_register() failed (0x%x).\n", + (unsigned)pmdev)); s->pm.flags |= CS4281_PM_NOT_REGISTERED; } #endif diff --git a/sound/oss/cs4281/cs4281pm-24.c b/sound/oss/cs4281/cs4281pm-24.c index faa091b..eaada08 100644 --- a/sound/oss/cs4281/cs4281pm-24.c +++ b/sound/oss/cs4281/cs4281pm-24.c @@ -46,8 +46,8 @@ int cs4281_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data) struct cs4281_state *state; CS_DBGOUT(CS_PM, 2, printk(KERN_INFO - "cs4281: cs4281_pm_callback dev=%p rqst=0x%x state=%p\n", - dev,(unsigned)rqst,data)); + "cs4281: cs4281_pm_callback dev=0x%x rqst=0x%x state=%d\n", + (unsigned)dev,(unsigned)rqst,(unsigned)data)); state = (struct cs4281_state *) dev->data; if (state) { switch(rqst) { diff --git a/usr/Makefile b/usr/Makefile index 0ab87ea..58a915e 100644 --- a/usr/Makefile +++ b/usr/Makefile @@ -5,9 +5,11 @@ host-progs := gen_init_cpio clean-files := initramfs_data.cpio.gz -$(obj)/initramfs_data.S: $(obj)/initramfs_data.cpio.gz - echo '.section ".init.ramfs", "a"' > $@ - od -v -An -t x1 -w8 $^ | cut -c2- | sed -e s"/ /,0x/g" -e s"/^/.byte 0x"/ >> $@ +LDFLAGS_initramfs_data.o := $(LDFLAGS_BLOB) -r -T + +$(obj)/initramfs_data.o: $(src)/initramfs_data.scr \ + $(obj)/initramfs_data.cpio.gz FORCE + $(call if_changed,ld) # initramfs-y are the programs which will be copied into the CPIO # archive. Currently, the filenames are hardcoded in gen_init_cpio,