NOSTDINC_FLAGS = -nostdinc -iwithprefix include
CPPFLAGS := -D__KERNEL__ -Iinclude
-CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -O2 \
+CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -g -O2 \
-fno-strict-aliasing -fno-common
AFLAGS := -D__ASSEMBLY__ $(CPPFLAGS)
cmd_syscall = $(CC) -nostdlib $(SYSCFLAGS_$(@F)) \
-Wl,-T,$(filter-out FORCE,$^) -o $@
-vsyscall-flags = -shared -s -Wl,-soname=linux-vsyscall.so.1
+vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1
SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags)
SYSCFLAGS_vsyscall-int80.so = $(vsyscall-flags)
bool
default y
+config TIME_INTERPOLATION
+ bool
+ default y
+
choice
prompt "IA-64 processor type"
default ITANIUM
HP-simulator For the HP simulator
(<http://software.hp.com/ia64linux/>).
HP-zx1 For HP zx1-based systems.
- SN1-simulator For the SGI SN1 simulator.
+ SGI-SN2 For SGI Altix systems
DIG-compliant For DIG ("Developer's Interface Guide") compliant
systems.
for the zx1 I/O MMU and makes root bus bridges appear in PCI config
space (required for zx1 agpgart support).
-config IA64_SGI_SN1
- bool "SGI-SN1"
-
config IA64_SGI_SN2
bool "SGI-SN2"
# align cache-sensitive data to 128 bytes
config IA64_L1_CACHE_SHIFT
int
- default "7" if MCKINLEY || ITANIUM && IA64_SGI_SN1
- default "6" if ITANIUM && !IA64_SGI_SN1
+ default "7" if MCKINLEY
+ default "6" if ITANIUM
# align cache-sensitive data to 64 bytes
config MCKINLEY_ASTEP_SPECIFIC
config NUMA
bool "Enable NUMA support" if IA64_GENERIC || IA64_DIG || IA64_HP_ZX1
- default y if IA64_SGI_SN1 || IA64_SGI_SN2
+ default y if IA64_SGI_SN2
help
Say Y to compile the kernel to support NUMA (Non-Uniform Memory
Access). This option is for configuring high-end multiprocessor
config DISCONTIGMEM
bool
- depends on IA64_SGI_SN1 || IA64_SGI_SN2 || (IA64_GENERIC || IA64_DIG || IA64_HP_ZX1) && NUMA
+ depends on IA64_SGI_SN2 || (IA64_GENERIC || IA64_DIG || IA64_HP_ZX1) && NUMA
default y
help
Say Y to support efficient handling of discontiguous physical memory,
config IA64_MCA
bool "Enable IA-64 Machine Check Abort" if IA64_GENERIC || IA64_DIG || IA64_HP_ZX1
- default y if IA64_SGI_SN1 || IA64_SGI_SN2
+ default y if IA64_SGI_SN2
help
Say Y here to enable machine check support for IA-64. If you're
unsure, answer Y.
config IOSAPIC
bool
- depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1
- default y
-
-config IA64_SGI_SN
- bool
- depends on IA64_SGI_SN1 || IA64_SGI_SN2
+ depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 || IA64_SGI_SN2
default y
config IA64_SGI_SN_DEBUG
bool "Enable extra debugging code"
- depends on IA64_SGI_SN1 || IA64_SGI_SN2
+ depends on IA64_SGI_SN2
help
Turns on extra debugging code in the SGI SN (Scalable NUMA) platform
for IA-64. Unless you are debugging problems on an SGI SN IA-64 box,
config IA64_SGI_SN_SIM
bool "Enable SGI Medusa Simulator Support"
- depends on IA64_SGI_SN1 || IA64_SGI_SN2
+ depends on IA64_SGI_SN2
help
If you are compiling a kernel that will run under SGI's IA-64
simulator (Medusa) then say Y, otherwise say N.
config IA64_SGI_AUTOTEST
bool "Enable autotest (llsc). Option to run cache test instead of booting"
- depends on IA64_SGI_SN1 || IA64_SGI_SN2
+ depends on IA64_SGI_SN2
help
Build a kernel used for hardware validation. If you include the
keyword "autotest" on the boot command line, the kernel does NOT boot.
config SERIAL_SGI_L1_PROTOCOL
bool "Enable protocol mode for the L1 console"
- depends on IA64_SGI_SN1 || IA64_SGI_SN2
+ depends on IA64_SGI_SN2
help
Uses protocol mode instead of raw mode for the level 1 console on the
SGI SN (Scalable NUMA) platform for IA-64. If you are compiling for
config PERCPU_IRQ
bool
- depends on IA64_SGI_SN1 || IA64_SGI_SN2
+ depends on IA64_SGI_SN2
default y
-config PCIBA
- tristate "PCIBA support"
- depends on IA64_SGI_SN1 || IA64_SGI_SN2
- help
- IRIX PCIBA-inspired user mode PCI interface for the SGI SN (Scalable
- NUMA) platform for IA-64. Unless you are compiling a kernel for an
- SGI SN IA-64 box, say N.
-
# On IA-64, we always want an ELF /proc/kcore.
config KCORE_ELF
bool
source "drivers/block/Kconfig"
+source "drivers/ide/Kconfig"
+
source "drivers/ieee1394/Kconfig"
source "drivers/message/i2o/Kconfig"
source "drivers/usb/Kconfig"
-source "lib/Kconfig"
source "net/bluetooth/Kconfig"
endif
+source "lib/Kconfig"
+
source "arch/ia64/hp/sim/Kconfig"
menu "Kernel hacking"
-config FSYS
- bool "Light-weight system-call support (via epc)"
-
choice
prompt "Physical memory granularity"
default IA64_GRANULE_64MB
config IA64_EARLY_PRINTK
bool "Early printk support"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !IA64_GENERIC
help
Selecting this option uses the VGA screen or serial console for
printk() output before the consoles are initialised. It is useful
bool "Early printk on VGA"
depends on IA64_EARLY_PRINTK
+config IA64_EARLY_PRINTK_SGI_SN
+ bool "Early printk on SGI SN serial console"
+ depends on IA64_EARLY_PRINTK && (IA64_GENERIC || IA64_SGI_SN2)
+
config DEBUG_SLAB
bool "Debug memory allocations"
depends on DEBUG_KERNEL
AFLAGS_KERNEL := -mconstant-gp
EXTRA :=
-cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \
- -falign-functions=32
+cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f12-f15,f32-f127 \
+ -falign-functions=32 -frename-registers
CFLAGS_KERNEL := -mconstant-gp
GCC_VERSION=$(shell $(CC) -v 2>&1 | fgrep 'gcc version' | cut -f3 -d' ' | cut -f1 -d'.')
GAS_STATUS=$(shell arch/ia64/scripts/check-gas $(CC) $(OBJDUMP))
+arch-cppflags := $(shell arch/ia64/scripts/toolchain-flags $(CC) $(OBJDUMP))
+cflags-y += $(arch-cppflags)
+AFLAGS += $(arch-cppflags)
+
ifeq ($(GAS_STATUS),buggy)
$(error Sorry, you need a newer version of the assember, one that is built from \
a source-tree that post-dates 18-Dec-2002. You can find a pre-compiled \
ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz)
endif
-ifneq ($(GCC_VERSION),2)
- cflags-$(CONFIG_ITANIUM) += -frename-registers
+ifeq ($(GCC_VERSION),2)
+$(error Sorry, your compiler is too old. GCC v2.96 is known to generate bad code.)
endif
ifeq ($(GCC_VERSION),3)
ifeq ($(GCC_MINOR_VERSION),4)
- cflags-$(CONFIG_ITANIUM) += -mtune=merced
- cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
+ cflags-$(CONFIG_ITANIUM) += -mtune=merced
+ cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
endif
endif
cflags-$(CONFIG_ITANIUM_BSTEP_SPECIFIC) += -mb-step
-cflags-$(CONFIG_IA64_SGI_SN) += -DBRINGUP
CFLAGS += $(cflags-y)
head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
-core-$(CONFIG_IA64_SGI_SN) += arch/ia64/sn/
+core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
-drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/
+drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ \
+ arch/ia64/sn/
boot := arch/ia64/boot
-tools := arch/ia64/tools
-
-.PHONY: boot compressed include/asm-ia64/offsets.h
-all: prepare vmlinux
+.PHONY: boot compressed check
compressed: vmlinux.gz
vmlinux.gz: vmlinux
- $(Q)$(MAKE) $(build)=$(boot) vmlinux.gz
+ $(Q)$(MAKE) $(build)=$(boot) $@
check: vmlinux
- arch/ia64/scripts/unwcheck.sh vmlinux
+ arch/ia64/scripts/unwcheck.sh $<
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
- $(Q)$(MAKE) $(clean)=$(tools)
-CLEAN_FILES += include/asm-ia64/offsets.h vmlinux.gz bootloader
+CLEAN_FILES += include/asm-ia64/.offsets.h.stamp include/asm-ia64/offsets.h vmlinux.gz bootloader
prepare: include/asm-ia64/offsets.h
+include/asm-$(ARCH)/offsets.h: arch/$(ARCH)/kernel/asm-offsets.s
+ $(call filechk,gen-asm-offsets)
+
+arch/ia64/kernel/asm-offsets.s: include/asm-ia64/.offsets.h.stamp
+
+include/asm-ia64/.offsets.h.stamp:
+ [ -s include/asm-ia64/offsets.h ] \
+ || echo "#define IA64_TASK_SIZE 0" > include/asm-ia64/offsets.h
+ touch $@
+
boot: lib/lib.a vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
-include/asm-ia64/offsets.h: include/asm include/linux/version.h include/config/MARKER
- $(Q)$(MAKE) $(build)=$(tools) $@
define archhelp
echo ' compressed - Build compressed kernel image'
#include "../kernel/fw-emu.c"
+/* This needs to be defined because lib/string.c:strlcat() calls it in case of error... */
+asm (".global printk; printk = 0");
+
/*
* Set a break point on this function so that symbols are available to set breakpoints in
* the kernel being debugged.
continue;
req.len = elf_phdr->p_filesz;
- req.addr = __pa(elf_phdr->p_vaddr);
+ req.addr = __pa(elf_phdr->p_paddr);
ssc(fd, 1, (long) &req, elf_phdr->p_offset, SSC_READ);
ssc((long) &stat, 0, 0, 0, SSC_WAIT_COMPLETION);
- memset((char *)__pa(elf_phdr->p_vaddr) + elf_phdr->p_filesz, 0,
+ memset((char *)__pa(elf_phdr->p_paddr) + elf_phdr->p_filesz, 0,
elf_phdr->p_memsz - elf_phdr->p_filesz);
}
ssc(fd, 0, 0, 0, SSC_CLOSE);
#
# General setup
#
-CONFIG_NET=y
+CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_BSD_PROCESS_ACCT=y
CONFIG_SYSCTL=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_EMBEDDED is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
#
# Loadable module support
#
CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_OBSOLETE_MODPARM=y
CONFIG_MODVERSIONS=y
-# CONFIG_KMOD is not set
+CONFIG_KMOD=y
#
# Processor type and features
#
CONFIG_IA64=y
-CONFIG_RWSEM_GENERIC_SPINLOCK=y
-CONFIG_ITANIUM=y
-# CONFIG_MCKINLEY is not set
+CONFIG_MMU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_TIME_INTERPOLATION=y
+# CONFIG_ITANIUM is not set
+CONFIG_MCKINLEY=y
# CONFIG_IA64_GENERIC is not set
-CONFIG_IA64_DIG=y
+# CONFIG_IA64_DIG is not set
# CONFIG_IA64_HP_SIM is not set
-# CONFIG_IA64_HP_ZX1 is not set
-# CONFIG_IA64_SGI_SN1 is not set
+CONFIG_IA64_HP_ZX1=y
# CONFIG_IA64_SGI_SN2 is not set
# CONFIG_IA64_PAGE_SIZE_4KB is not set
# CONFIG_IA64_PAGE_SIZE_8KB is not set
CONFIG_ACPI_EFI=y
CONFIG_ACPI_INTERPRETER=y
CONFIG_ACPI_KERNEL_CONFIG=y
-CONFIG_IA64_BRL_EMU=y
-# CONFIG_ITANIUM_BSTEP_SPECIFIC is not set
-CONFIG_IA64_L1_CACHE_SHIFT=6
+CONFIG_IA64_L1_CACHE_SHIFT=7
+# CONFIG_MCKINLEY_ASTEP_SPECIFIC is not set
# CONFIG_NUMA is not set
+CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_IA64_MCA=y
CONFIG_PM=y
CONFIG_IOSAPIC=y
CONFIG_KCORE_ELF=y
CONFIG_FORCE_MAX_ZONEORDER=18
-# CONFIG_HUGETLB_PAGE is not set
+CONFIG_HUGETLB_PAGE=y
+# CONFIG_HUGETLB_PAGE_SIZE_4GB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_1GB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_256MB is not set
+CONFIG_HUGETLB_PAGE_SIZE_64MB=y
+# CONFIG_HUGETLB_PAGE_SIZE_16MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_4MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set
+# CONFIG_HUGETLB_PAGE_SIZE_256KB is not set
+CONFIG_IA64_PAL_IDLE=y
CONFIG_SMP=y
+# CONFIG_PREEMPT is not set
CONFIG_IA32_SUPPORT=y
+CONFIG_COMPAT=y
CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y
CONFIG_EFI_VARS=y
CONFIG_NR_CPUS=16
CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
+CONFIG_BINFMT_MISC=y
#
# ACPI Support
#
CONFIG_ACPI_BOOT=y
CONFIG_ACPI_BUTTON=y
-CONFIG_ACPI_FAN=m
-CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_THERMAL=m
+CONFIG_ACPI_FAN=y
+CONFIG_ACPI_PROCESSOR=y
+CONFIG_ACPI_THERMAL=y
# CONFIG_ACPI_DEBUG is not set
CONFIG_ACPI_BUS=y
CONFIG_ACPI_POWER=y
CONFIG_ACPI_PCI=y
CONFIG_ACPI_SYSTEM=y
CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_LEGACY_PROC=y
CONFIG_PCI_NAMES=y
-# CONFIG_HOTPLUG is not set
+CONFIG_HOTPLUG=y
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# PCMCIA/CardBus support
+#
+# CONFIG_PCMCIA is not set
#
# Parallel port support
# CONFIG_MTD is not set
#
-# Plug and Play configuration
+# Plug and Play support
#
# CONFIG_PNP is not set
# CONFIG_BLK_DEV_UMEM is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_NBD is not set
-# CONFIG_BLK_DEV_RAM is not set
-
-#
-# IEEE 1394 (FireWire) support (EXPERIMENTAL)
-#
-# CONFIG_IEEE1394 is not set
-
-#
-# I2O device support
-#
-# CONFIG_I2O is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
-# CONFIG_MD is not set
-
-#
-# Fusion MPT device support
-#
-# CONFIG_FUSION is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=4096
+CONFIG_BLK_DEV_INITRD=y
#
# ATA/ATAPI/MFM/RLL support
CONFIG_IDEDISK_MULTI_MODE=y
# CONFIG_IDEDISK_STROKE is not set
CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_IDEFLOPPY=y
-CONFIG_BLK_DEV_IDESCSI=y
-# CONFIG_IDE_TASK_IOCTL is not set
+CONFIG_BLK_DEV_IDEFLOPPY=m
+CONFIG_BLK_DEV_IDESCSI=m
+CONFIG_IDE_TASK_IOCTL=y
+CONFIG_IDE_TASKFILE_IO=y
#
# IDE chipset support/bugfixes
#
CONFIG_BLK_DEV_IDEPCI=y
-# CONFIG_BLK_DEV_GENERIC is not set
+CONFIG_BLK_DEV_GENERIC=y
CONFIG_IDEPCI_SHARE_IRQ=y
CONFIG_BLK_DEV_IDEDMA_PCI=y
-# CONFIG_BLK_DEV_IDE_TCQ is not set
+CONFIG_BLK_DEV_IDE_TCQ=y
+CONFIG_BLK_DEV_IDE_TCQ_DEFAULT=y
+CONFIG_BLK_DEV_IDE_TCQ_DEPTH=8
# CONFIG_BLK_DEV_OFFBOARD is not set
# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
-# CONFIG_IDEDMA_PCI_AUTO is not set
+CONFIG_IDEDMA_PCI_AUTO=y
+# CONFIG_IDEDMA_ONLYDISK is not set
CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_IDEDMA_PCI_WIP is not set
CONFIG_BLK_DEV_ADMA=y
# CONFIG_BLK_DEV_AEC62XX is not set
# CONFIG_BLK_DEV_ALI15X3 is not set
# CONFIG_BLK_DEV_AMD74XX is not set
-# CONFIG_BLK_DEV_CMD64X is not set
+CONFIG_BLK_DEV_CMD64X=y
+# CONFIG_BLK_DEV_TRIFLEX is not set
# CONFIG_BLK_DEV_CY82C693 is not set
-# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
# CONFIG_BLK_DEV_HPT34X is not set
# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_SC1200 is not set
# CONFIG_BLK_DEV_PIIX is not set
-# CONFIG_BLK_DEV_NFORCE is not set
# CONFIG_BLK_DEV_NS87415 is not set
# CONFIG_BLK_DEV_OPTI621 is not set
# CONFIG_BLK_DEV_PDC202XX_OLD is not set
# CONFIG_BLK_DEV_SLC90E66 is not set
# CONFIG_BLK_DEV_TRM290 is not set
# CONFIG_BLK_DEV_VIA82CXXX is not set
+CONFIG_IDEDMA_AUTO=y
# CONFIG_IDEDMA_IVB is not set
+CONFIG_BLK_DEV_IDE_MODES=y
+
+#
+# IEEE 1394 (FireWire) support (EXPERIMENTAL)
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+CONFIG_FUSION=y
+CONFIG_FUSION_BOOT=y
+CONFIG_FUSION_MAX_SGE=40
+# CONFIG_FUSION_ISENSE is not set
+# CONFIG_FUSION_CTL is not set
#
# SCSI support
# SCSI support type (disk, tape, CD-ROM)
#
CONFIG_BLK_DEV_SD=y
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-# CONFIG_BLK_DEV_SR is not set
-# CONFIG_CHR_DEV_SG is not set
+CONFIG_CHR_DEV_ST=y
+CONFIG_CHR_DEV_OSST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
-# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_REPORT_LUNS is not set
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_REPORT_LUNS=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
-# CONFIG_SCSI_AIC7XXX_OLD is not set
+CONFIG_SCSI_AIC7XXX_OLD=y
+# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_DPT_I2O is not set
# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_IN2000 is not set
# CONFIG_SCSI_AM53C974 is not set
-# CONFIG_SCSI_MEGARAID is not set
+CONFIG_SCSI_MEGARAID=y
# CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_DMA is not set
# CONFIG_SCSI_EATA_PIO is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_GDTH is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
# CONFIG_SCSI_NCR53C7xx is not set
-# CONFIG_SCSI_SYM53C8XX_2 is not set
-# CONFIG_SCSI_NCR53C8XX is not set
-# CONFIG_SCSI_SYM53C8XX is not set
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
+CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
+CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
+# CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set
# CONFIG_SCSI_PCI2000 is not set
# CONFIG_SCSI_PCI2220I is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_SCSI_QLOGIC_FC is not set
CONFIG_SCSI_QLOGIC_1280=y
+# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_U14_34F is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
#
+# Networking support
+#
+CONFIG_NET=y
+
+#
# Networking options
#
CONFIG_PACKET=y
-CONFIG_PACKET_MMAP=y
+# CONFIG_PACKET_MMAP is not set
# CONFIG_NETLINK_DEV is not set
-# CONFIG_NETFILTER is not set
-CONFIG_FILTER=y
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
CONFIG_INET=y
-# CONFIG_IP_MULTICAST is not set
+CONFIG_IP_MULTICAST=y
# CONFIG_IP_ADVANCED_ROUTER is not set
# CONFIG_IP_PNP is not set
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
# CONFIG_ARPD is not set
# CONFIG_INET_ECN is not set
# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_IP_NF_CONNTRACK is not set
+# CONFIG_IP_NF_QUEUE is not set
+# CONFIG_IP_NF_IPTABLES is not set
+CONFIG_IP_NF_ARPTABLES=y
+# CONFIG_IP_NF_ARPFILTER is not set
+# CONFIG_IP_NF_COMPAT_IPCHAINS is not set
+# CONFIG_IP_NF_COMPAT_IPFWADM is not set
# CONFIG_IPV6 is not set
+# CONFIG_XFRM_USER is not set
#
# SCTP Configuration (EXPERIMENTAL)
# CONFIG_NET_SCHED is not set
#
-# Network device support
+# Network testing
#
+# CONFIG_NET_PKTGEN is not set
CONFIG_NETDEVICES=y
#
#
# CONFIG_ARCNET is not set
CONFIG_DUMMY=y
-# CONFIG_BONDING is not set
+CONFIG_BONDING=y
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
# CONFIG_ETHERTAP is not set
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
# CONFIG_HAPPYMEAL is not set
# CONFIG_SUNGEM is not set
# CONFIG_NET_VENDOR_3COM is not set
-# CONFIG_NET_VENDOR_SMC is not set
-# CONFIG_NET_VENDOR_RACAL is not set
#
# Tulip family network device support
# CONFIG_HP100 is not set
CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
# CONFIG_DGRS is not set
CONFIG_EEPRO100=y
+# CONFIG_EEPRO100_PIO is not set
# CONFIG_E100 is not set
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
# CONFIG_VIA_RHINE is not set
-# CONFIG_NET_POCKET is not set
#
# Ethernet (1000 Mbit)
#
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
-# CONFIG_E1000 is not set
+CONFIG_E1000=y
+# CONFIG_E1000_NAPI is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
# CONFIG_SK98LIN is not set
-# CONFIG_TIGON3 is not set
+CONFIG_TIGON3=m
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_IXGB is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_PPP is not set
# CONFIG_NET_RADIO is not set
#
-# Token Ring devices
+# Token Ring devices (depends on LLC=y)
#
-# CONFIG_TR is not set
# CONFIG_NET_FC is not set
# CONFIG_RCPCI is not set
# CONFIG_SHAPER is not set
# Userland interfaces
#
CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
-# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_JOYDEV=y
# CONFIG_INPUT_TSDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_EVBUG is not set
# CONFIG_GAMEPORT is not set
CONFIG_SOUND_GAMEPORT=y
CONFIG_SERIO=y
-CONFIG_SERIO_I8042=y
-CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
# CONFIG_SERIO_CT82C710 is not set
+# CONFIG_SERIO_PCIPS2 is not set
#
# Input Device Drivers
#
-CONFIG_INPUT_KEYBOARD=y
-CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_SUNKBD is not set
-# CONFIG_KEYBOARD_XTKBD is not set
-# CONFIG_KEYBOARD_NEWTON is not set
-CONFIG_INPUT_MOUSE=y
-CONFIG_MOUSE_PS2=y
-# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
-CONFIG_INPUT_MISC=y
-# CONFIG_INPUT_PCSPKR is not set
-# CONFIG_INPUT_UINPUT is not set
+# CONFIG_INPUT_MISC is not set
#
# Character devices
#
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_EXTENDED=y
-# CONFIG_SERIAL_8250_MANY_PORTS is not set
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-# CONFIG_SERIAL_8250_DETECT_IRQ is not set
-# CONFIG_SERIAL_8250_MULTIPORT is not set
-# CONFIG_SERIAL_8250_RSA is not set
+CONFIG_SERIAL_8250_ACPI=y
+CONFIG_SERIAL_8250_HCDP=y
+# CONFIG_SERIAL_8250_EXTENDED is not set
#
# Non-8250 serial port support
#
# I2C support
#
-CONFIG_I2C=y
-CONFIG_I2C_ALGOBIT=y
-# CONFIG_I2C_ELV is not set
-# CONFIG_I2C_VELLEMAN is not set
-# CONFIG_SCx200_ACB is not set
-# CONFIG_I2C_ALGOPCF is not set
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_PROC=y
+# CONFIG_I2C is not set
+
+#
+# I2C Hardware Sensors Mainboard support
+#
+
+#
+# I2C Hardware Sensors Chip support
+#
+# CONFIG_I2C_SENSOR is not set
#
# Mice
# CONFIG_QIC02_TAPE is not set
#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
# Watchdog Cards
#
# CONFIG_WATCHDOG is not set
-# CONFIG_INTEL_RNG is not set
+# CONFIG_HW_RANDOM is not set
# CONFIG_NVRAM is not set
-# CONFIG_RTC is not set
# CONFIG_GEN_RTC is not set
CONFIG_EFI_RTC=y
# CONFIG_DTLK is not set
# Ftape, the floppy tape device driver
#
# CONFIG_FTAPE is not set
-CONFIG_AGP=y
-CONFIG_AGP_INTEL=y
-CONFIG_AGP_I810=y
-CONFIG_AGP_VIA=y
-CONFIG_AGP_AMD=y
-CONFIG_AGP_SIS=y
-CONFIG_AGP_ALI=y
-CONFIG_AGP_SWORKS=y
-# CONFIG_AGP_AMD_8151 is not set
-CONFIG_AGP_I460=y
-CONFIG_AGP_HP_ZX1=y
+CONFIG_AGP=m
+CONFIG_AGP_I460=m
+CONFIG_AGP_HP_ZX1=m
CONFIG_DRM=y
-CONFIG_DRM_TDFX=y
-CONFIG_DRM_R128=y
-CONFIG_DRM_RADEON=y
-CONFIG_DRM_I810=y
-CONFIG_DRM_I830=y
-CONFIG_DRM_MGA=y
+# CONFIG_DRM_TDFX is not set
+# CONFIG_DRM_GAMMA is not set
+# CONFIG_DRM_R128 is not set
+CONFIG_DRM_RADEON=m
+# CONFIG_DRM_MGA is not set
# CONFIG_RAW_DRIVER is not set
+# CONFIG_HANGCHECK_TIMER is not set
#
# Multimedia devices
# CONFIG_VIDEO_DEV is not set
#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
# File systems
#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_EXT2_FS_POSIX_ACL is not set
+# CONFIG_EXT2_FS_SECURITY is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=y
# CONFIG_AUTOFS4_FS is not set
-# CONFIG_REISERFS_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+# CONFIG_ZISOFS is not set
+CONFIG_UDF_FS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_DEVFS_FS is not set
+CONFIG_DEVPTS_FS=y
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+CONFIG_HUGETLBFS=y
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
-CONFIG_EXT3_FS=y
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FAT_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
# CONFIG_EFS_FS is not set
# CONFIG_CRAMFS is not set
-# CONFIG_TMPFS is not set
-CONFIG_RAMFS=y
-CONFIG_ISO9660_FS=y
-# CONFIG_JOLIET is not set
-# CONFIG_ZISOFS is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_MINIX_FS is not set
# CONFIG_VXFS_FS is not set
-# CONFIG_NTFS_FS is not set
# CONFIG_HPFS_FS is not set
-CONFIG_PROC_FS=y
-# CONFIG_DEVFS_FS is not set
-CONFIG_DEVPTS_FS=y
# CONFIG_QNX4FS_FS is not set
-# CONFIG_ROMFS_FS is not set
-CONFIG_EXT2_FS=y
# CONFIG_SYSV_FS is not set
-# CONFIG_UDF_FS is not set
# CONFIG_UFS_FS is not set
-# CONFIG_XFS_FS is not set
#
# Network File Systems
#
-# CONFIG_CODA_FS is not set
-# CONFIG_INTERMEZZO_FS is not set
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V4 is not set
# CONFIG_NFSD_TCP is not set
-CONFIG_SUNRPC=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=y
-# CONFIG_CIFS is not set
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
# CONFIG_AFS_FS is not set
#
# CONFIG_SOLARIS_X86_PARTITION is not set
# CONFIG_UNIXWARE_DISKLABEL is not set
# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_ULTRIX_PARTITION is not set
# CONFIG_SUN_PARTITION is not set
# Native Language Support
#
CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
-# CONFIG_NLS_CODEPAGE_737 is not set
-# CONFIG_NLS_CODEPAGE_775 is not set
-# CONFIG_NLS_CODEPAGE_850 is not set
-# CONFIG_NLS_CODEPAGE_852 is not set
-# CONFIG_NLS_CODEPAGE_855 is not set
-# CONFIG_NLS_CODEPAGE_857 is not set
-# CONFIG_NLS_CODEPAGE_860 is not set
-# CONFIG_NLS_CODEPAGE_861 is not set
-# CONFIG_NLS_CODEPAGE_862 is not set
-# CONFIG_NLS_CODEPAGE_863 is not set
-# CONFIG_NLS_CODEPAGE_864 is not set
-# CONFIG_NLS_CODEPAGE_865 is not set
-# CONFIG_NLS_CODEPAGE_866 is not set
-# CONFIG_NLS_CODEPAGE_869 is not set
-# CONFIG_NLS_CODEPAGE_936 is not set
-# CONFIG_NLS_CODEPAGE_950 is not set
-# CONFIG_NLS_CODEPAGE_932 is not set
-# CONFIG_NLS_CODEPAGE_949 is not set
-# CONFIG_NLS_CODEPAGE_874 is not set
-# CONFIG_NLS_ISO8859_8 is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
# CONFIG_NLS_CODEPAGE_1250 is not set
-# CONFIG_NLS_CODEPAGE_1251 is not set
-# CONFIG_NLS_ISO8859_1 is not set
-# CONFIG_NLS_ISO8859_2 is not set
-# CONFIG_NLS_ISO8859_3 is not set
-# CONFIG_NLS_ISO8859_4 is not set
-# CONFIG_NLS_ISO8859_5 is not set
-# CONFIG_NLS_ISO8859_6 is not set
-# CONFIG_NLS_ISO8859_7 is not set
-# CONFIG_NLS_ISO8859_9 is not set
-# CONFIG_NLS_ISO8859_13 is not set
-# CONFIG_NLS_ISO8859_14 is not set
-# CONFIG_NLS_ISO8859_15 is not set
-# CONFIG_NLS_KOI8_R is not set
-# CONFIG_NLS_KOI8_U is not set
-# CONFIG_NLS_UTF8 is not set
-
-#
-# Console drivers
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_UTF8=y
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_IMSTT is not set
+CONFIG_FB_RIVA=m
+# CONFIG_FB_MATROX is not set
+CONFIG_FB_RADEON=y
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
#
CONFIG_VGA_CONSOLE=y
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_PCI_CONSOLE=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
#
-# Frame-buffer support
+# Logo configuration
#
-# CONFIG_FB is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_LOGO_LINUX_CLUT224=y
#
# Sound
#
# Advanced Linux Sound Architecture
#
-# CONFIG_SND is not set
+CONFIG_SND=m
+CONFIG_SND_SEQUENCER=m
+# CONFIG_SND_SEQ_DUMMY is not set
+# CONFIG_SND_OSSEMUL is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+
+#
+# Generic devices
+#
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+
+#
+# PCI devices
+#
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_TRIDENT is not set
+# CONFIG_SND_YMFPCI is not set
+# CONFIG_SND_ALS4000 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_MAESTRO3 is not set
+CONFIG_SND_FM801=m
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VX222 is not set
+
+#
+# ALSA USB devices
+#
+# CONFIG_SND_USB_AUDIO is not set
#
# Open Sound System
#
-CONFIG_SOUND_PRIME=y
-# CONFIG_SOUND_BT878 is not set
-# CONFIG_SOUND_CMPCI is not set
-# CONFIG_SOUND_EMU10K1 is not set
-# CONFIG_SOUND_FUSION is not set
-CONFIG_SOUND_CS4281=y
-# CONFIG_SOUND_ES1370 is not set
-# CONFIG_SOUND_ES1371 is not set
-# CONFIG_SOUND_ESSSOLO1 is not set
-# CONFIG_SOUND_MAESTRO is not set
-# CONFIG_SOUND_MAESTRO3 is not set
-# CONFIG_SOUND_ICH is not set
-# CONFIG_SOUND_RME96XX is not set
-# CONFIG_SOUND_SONICVIBES is not set
-# CONFIG_SOUND_TRIDENT is not set
-# CONFIG_SOUND_MSNDCLAS is not set
-# CONFIG_SOUND_MSNDPIN is not set
-# CONFIG_SOUND_VIA82CXXX is not set
-# CONFIG_SOUND_OSS is not set
-# CONFIG_SOUND_TVMIXER is not set
+# CONFIG_SOUND_PRIME is not set
#
# USB support
#
# Miscellaneous USB options
#
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_LONG_TIMEOUT is not set
-# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DEVICEFS is not set
+CONFIG_USB_BANDWIDTH=y
# CONFIG_USB_DYNAMIC_MINORS is not set
#
# USB Host Controller Drivers
#
-# CONFIG_USB_EHCI_HCD is not set
-# CONFIG_USB_OHCI_HCD is not set
-CONFIG_USB_UHCI_HCD_ALT=y
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=m
#
# USB Device Class drivers
CONFIG_USB_HIDDEV=y
# CONFIG_USB_AIPTEK is not set
# CONFIG_USB_WACOM is not set
+# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
# CONFIG_USB_XPAD is not set
# USB Network adaptors
#
# CONFIG_USB_CATC is not set
-# CONFIG_USB_CDCETHER is not set
# CONFIG_USB_KAWETH is not set
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_RTL8150 is not set
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_BRLVGER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_TEST is not set
+# CONFIG_USB_GADGET is not set
#
-# Library routines
+# Bluetooth support
#
-# CONFIG_CRC32 is not set
+# CONFIG_BT is not set
#
-# Bluetooth support
+# Library routines
#
-# CONFIG_BT is not set
+CONFIG_CRC32=y
#
# Kernel hacking
# CONFIG_DISABLE_VHPT is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_IA64_EARLY_PRINTK=y
-# CONFIG_IA64_EARLY_PRINTK_UART is not set
+CONFIG_IA64_EARLY_PRINTK_UART=y
+CONFIG_IA64_EARLY_PRINTK_UART_BASE=0xff5e0000
CONFIG_IA64_EARLY_PRINTK_VGA=y
# CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_IA64_DEBUG_CMPXCHG is not set
# CONFIG_IA64_DEBUG_IRQ is not set
#
# Security options
#
-CONFIG_SECURITY_CAPABILITIES=y
+# CONFIG_SECURITY is not set
#
# Cryptographic options
ioc_resource_init(ioc);
ioc_sac_init(ioc);
+ if ((long) ~IOVP_MASK > (long) ia64_max_iommu_merge_mask)
+ ia64_max_iommu_merge_mask = ~IOVP_MASK;
+ MAX_DMA_ADDRESS = ~0UL;
+
printk(KERN_INFO PFX
"%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
struct ioc *ioc;
acpi_status status;
u64 hpa, length;
+ struct acpi_buffer buffer;
struct acpi_device_info *dev_info;
- struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
status = hp_acpi_csr_space(device->handle, &hpa, &length);
if (ACPI_FAILURE(status))
return 1;
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_get_object_info(device->handle, &buffer);
if (ACPI_FAILURE(status))
return 1;
*/
if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0)
hpa += ZX1_IOC_OFFSET;
+ ACPI_MEM_FREE(dev_info);
ioc = ioc_init(hpa, device->handle);
if (!ioc)
static int __init
sba_init(void)
{
- MAX_DMA_ADDRESS = ~0UL;
-
acpi_bus_register_driver(&acpi_sba_ioc_driver);
#ifdef CONFIG_PCI
config HP_SIMSERIAL
bool "Simulated serial driver support"
+config HP_SIMSERIAL_CONSOLE
+ bool "Console for HP simulator"
+ depends on HP_SIMSERIAL
+
config HP_SIMSCSI
bool "Simulated SCSI disk"
depends on SCSI
# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
#
-obj-y := hpsim_console.o hpsim_irq.o hpsim_setup.o
+obj-y := hpsim_irq.o hpsim_setup.o
obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o
obj-$(CONFIG_HP_SIMETH) += simeth.o
obj-$(CONFIG_HP_SIMSERIAL) += simserial.o
+obj-$(CONFIG_HP_SIMSERIAL_CONSOLE) += hpsim_console.o
obj-$(CONFIG_HP_SIMSCSI) += simscsi.o
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
*/
+#include <linux/config.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
#include "hpsim_ssc.h"
-extern struct console hpsim_cons;
-
/*
* Simulator system call.
*/
{
ROOT_DEV = Root_SDA1; /* default to first SCSI drive */
- register_console(&hpsim_cons);
+#ifdef CONFIG_HP_SIMSERIAL_CONSOLE
+ {
+ extern struct console hpsim_cons;
+ if (ia64_platform_is("hpsim"))
+ register_console(&hpsim_cons);
+ }
+#endif
}
int i;
struct serial_state *state;
+ if (!ia64_platform_is("hpsim"))
+ return -ENODEV;
+
hp_simserial_driver = alloc_tty_driver(1);
if (!hp_simserial_driver)
return -ENOMEM;
#include <asm/param.h>
#include <asm/signal.h>
-#include <asm/ia32.h>
+
+#include "ia32priv.h"
#define CONFIG_BINFMT_ELF32
br.call.sptk.many rp=do_fork
.ret0: .restore sp
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
- mov r2=-1000
- adds r3=IA64_TASK_PID_OFFSET,r8
- ;;
- cmp.leu p6,p0=r8,r2
mov ar.pfs=loc1
mov rp=loc0
- ;;
-(p6) ld4 r8=[r3]
br.ret.sptk.many rp
END(ia32_clone)
br.call.sptk.few rp=do_fork
.ret5: .restore sp
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
- mov r2=-1000
- adds r3=IA64_TASK_PID_OFFSET,r8
- ;;
- cmp.leu p6,p0=r8,r2
mov ar.pfs=loc1
mov rp=loc0
- ;;
-(p6) ld4 r8=[r3]
br.ret.sptk.many rp
END(sys32_fork)
data8 sys_ni_syscall
data8 sys_ni_syscall
data8 compat_sys_futex /* 240 */
- data8 compat_sys_setaffinity
- data8 compat_sys_getaffinity
+ data8 compat_sys_sched_setaffinity
+ data8 compat_sys_sched_getaffinity
data8 sys_ni_syscall
data8 sys_ni_syscall
data8 sys_ni_syscall /* 245 */
#include <linux/dirent.h>
#include <linux/fs.h> /* argh, msdos_fs.h isn't self-contained... */
#include <linux/signal.h> /* argh, msdos_fs.h isn't self-contained... */
-
-#include <asm/ia32.h>
-
-#include <linux/msdos_fs.h>
-#include <linux/mtio.h>
-#include <linux/ncp_fs.h>
-#include <linux/capi.h>
-#include <linux/videodev.h>
-#include <linux/synclink.h>
-#include <linux/atmdev.h>
-#include <linux/atm_eni.h>
-#include <linux/atm_nicstar.h>
-#include <linux/atm_zatm.h>
-#include <linux/atm_idt77105.h>
+#include <linux/compat.h>
+
+#include "ia32priv.h"
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/ioctl.h>
+#include <linux/if.h>
+#include <linux/slab.h>
+#include <linux/hdreg.h>
+#include <linux/raid/md.h>
+#include <linux/kd.h>
+#include <linux/route.h>
+#include <linux/in6.h>
+#include <linux/ipv6_route.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/vt.h>
+#include <linux/file.h>
+#include <linux/fd.h>
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
-#include <linux/ixjuser.h>
-#include <linux/i2o-dev.h>
+#include <linux/if_pppox.h>
+#include <linux/mtio.h>
+#include <linux/cdrom.h>
+#include <linux/loop.h>
+#include <linux/auto_fs.h>
+#include <linux/auto_fs4.h>
+#include <linux/devfs_fs.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>
+#include <linux/fb.h>
+#include <linux/ext2_fs.h>
+#include <linux/videodev.h>
+#include <linux/netdevice.h>
+#include <linux/raw.h>
+#include <linux/smb_fs.h>
+#include <linux/blkpg.h>
+#include <linux/blk.h>
+#include <linux/elevator.h>
+#include <linux/rtc.h>
+#include <linux/pci.h>
+#include <linux/rtc.h>
+#include <linux/module.h>
+#include <linux/serial.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/if_tun.h>
+#include <linux/dirent.h>
+#include <linux/ctype.h>
+#include <linux/ncp_fs.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/rfcomm.h>
+
#include <scsi/scsi.h>
/* Ugly hack. */
-#undef __KERNEL__
+#undef __KERNEL__
#include <scsi/scsi_ioctl.h>
-#define __KERNEL__
+#define __KERNEL__
#include <scsi/sg.h>
+#include <asm/types.h>
+#include <asm/uaccess.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_bonding.h>
+#include <linux/watchdog.h>
+
+#include <asm/module.h>
+#include <asm/ioctl32.h>
+#include <linux/soundcard.h>
+#include <linux/lp.h>
+
+#include <linux/atm.h>
+#include <linux/atmarp.h>
+#include <linux/atmclip.h>
+#include <linux/atmdev.h>
+#include <linux/atmioc.h>
+#include <linux/atmlec.h>
+#include <linux/atmmpc.h>
+#include <linux/atmsvc.h>
+#include <linux/atm_tcp.h>
+#include <linux/sonet.h>
+#include <linux/atm_suni.h>
+#include <linux/mtd/mtd.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+
+#include <linux/usb.h>
+#include <linux/usbdevice_fs.h>
+#include <linux/nbd.h>
+#include <linux/random.h>
+#include <linux/filter.h>
+
#include <../drivers/char/drm/drm.h>
#include <../drivers/char/drm/mga_drm.h>
#include <../drivers/char/drm/i810_drm.h>
-
#define IOCTL_NR(a) ((a) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
#define DO_IOCTL(fd, cmd, arg) ({ \
asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
+#define VFAT_IOCTL_READDIR_BOTH32 _IOR('r', 1, struct linux32_dirent[2])
+#define VFAT_IOCTL_READDIR_SHORT32 _IOR('r', 2, struct linux32_dirent[2])
+
static long
put_dirent32 (struct dirent *d, struct linux32_dirent *d32)
{
|| put_user(d->d_reclen, &d32->d_reclen)
|| copy_to_user(d32->d_name, d->d_name, namelen + 1));
}
+
+static int vfat_ioctl32(unsigned fd, unsigned cmd, void *ptr)
+{
+ int ret;
+ mm_segment_t oldfs = get_fs();
+ struct dirent d[2];
+
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd,cmd,(unsigned long)&d);
+ set_fs(oldfs);
+ if (!ret) {
+ ret |= put_dirent32(&d[0], (struct linux32_dirent *)ptr);
+ ret |= put_dirent32(&d[1], ((struct linux32_dirent *)ptr) + 1);
+ }
+ return ret;
+}
+
/*
* The transform code for the SG_IO ioctl was brazenly lifted from
* the Sparc64 port in the file `arch/sparc64/kernel/ioctl32.c'.
}
return err;
}
+
+static __inline__ void *alloc_user_space(long len)
+{
+ struct pt_regs *regs = ((struct pt_regs *)((unsigned long) current +
+ IA64_STK_OFFSET)) - 1;
+ return (void *)regs->r12 - len;
+}
+
+struct ifmap32 {
+ u32 mem_start;
+ u32 mem_end;
+ unsigned short base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+};
+
+struct ifreq32 {
+#define IFHWADDRLEN 6
+#define IFNAMSIZ 16
+ union {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ int ifru_ivalue;
+ int ifru_mtu;
+ struct ifmap32 ifru_map;
+ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
+ char ifru_newname[IFNAMSIZ];
+ compat_caddr_t ifru_data;
+ } ifr_ifru;
+};
+
+int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct ifreq *u_ifreq64;
+ struct ifreq32 *u_ifreq32 = (struct ifreq32 *) arg;
+ char tmp_buf[IFNAMSIZ];
+ void *data64;
+ u32 data32;
+
+ if (copy_from_user(&tmp_buf[0], &(u_ifreq32->ifr_ifrn.ifrn_name[0]),
+ IFNAMSIZ))
+ return -EFAULT;
+ if (__get_user(data32, &u_ifreq32->ifr_ifru.ifru_data))
+ return -EFAULT;
+ data64 = (void *) P(data32);
+
+ u_ifreq64 = alloc_user_space(sizeof(*u_ifreq64));
+
+ /* Don't check these user accesses, just let that get trapped
+ * in the ioctl handler instead.
+ */
+ copy_to_user(&u_ifreq64->ifr_ifrn.ifrn_name[0], &tmp_buf[0], IFNAMSIZ);
+ __put_user(data64, &u_ifreq64->ifr_ifru.ifru_data);
+
+ return sys_ioctl(fd, cmd, (unsigned long) u_ifreq64);
+}
+
+typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
+
+#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
+#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl32_handler_t)(handler), NULL },
+#define IOCTL_TABLE_START \
+ struct ioctl_trans ioctl_start[] = {
+#define IOCTL_TABLE_END \
+ }; struct ioctl_trans ioctl_end[0];
+
+IOCTL_TABLE_START
+#include <linux/compat_ioctl.h>
+HANDLE_IOCTL(VFAT_IOCTL_READDIR_BOTH32, vfat_ioctl32)
+HANDLE_IOCTL(VFAT_IOCTL_READDIR_SHORT32, vfat_ioctl32)
+HANDLE_IOCTL(SG_IO,sg_ioctl_trans)
+IOCTL_TABLE_END
#include <linux/vmalloc.h>
#include <asm/uaccess.h>
-#include <asm/ia32.h>
+
+#include "ia32priv.h"
#define P(p) ((void *) (unsigned long) (p))
#include <asm/rse.h>
#include <asm/sigcontext.h>
#include <asm/segment.h>
-#include <asm/ia32.h>
+
+#include "ia32priv.h"
#include "../kernel/sigframe.h"
* datasel ar.fdr(32:47)
*
* _st[(0+TOS)%8] f8
- * _st[(1+TOS)%8] f9 (f8, f9 from ptregs)
- * : : : (f10..f15 from live reg)
+ * _st[(1+TOS)%8] f9
+ * _st[(2+TOS)%8] f10
+ * _st[(3+TOS)%8] f11 (f8..f11 from ptregs)
+ * : : : (f12..f15 from live reg)
* : : :
* _st[(7+TOS)%8] f15 TOS=sw.top(bits11:13)
*
__put_user( 0, &save->magic); //#define X86_FXSR_MAGIC 0x0000
/*
- * save f8 and f9 from pt_regs
- * save f10..f15 from live register set
+ * save f8..f11 from pt_regs
+ * save f12..f15 from live register set
*/
/*
* Find the location where f8 has to go in fp reg stack. This depends on
copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
ia64f2ia32f(fpregp, &ptp->f9);
copy_to_user(&save->_st[(1+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
-
- __stfe(fpregp, 10);
+ ia64f2ia32f(fpregp, &ptp->f10);
copy_to_user(&save->_st[(2+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
- __stfe(fpregp, 11);
+ ia64f2ia32f(fpregp, &ptp->f11);
copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
+
__stfe(fpregp, 12);
copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
__stfe(fpregp, 13);
asm volatile ( "mov ar.fdr=%0;" :: "r"(fdr));
/*
- * restore f8, f9 onto pt_regs
- * restore f10..f15 onto live registers
+ * restore f8..f11 onto pt_regs
+ * restore f12..f15 onto live registers
*/
/*
* Find the location where f8 has to go in fp reg stack. This depends on
ia32f2ia64f(&ptp->f8, fpregp);
copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
ia32f2ia64f(&ptp->f9, fpregp);
-
copy_from_user(fpregp, &save->_st[(2+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
- __ldfe(10, fpregp);
+ ia32f2ia64f(&ptp->f10, fpregp);
copy_from_user(fpregp, &save->_st[(3+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
- __ldfe(11, fpregp);
+ ia32f2ia64f(&ptp->f11, fpregp);
+
copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
__ldfe(12, fpregp);
copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
#define COPY(ia64x, ia32x) err |= __get_user(regs->ia64x, &sc->ia32x)
-#define copyseg_gs(tmp) (regs->r16 |= (unsigned long) tmp << 48)
-#define copyseg_fs(tmp) (regs->r16 |= (unsigned long) tmp << 32)
+#define copyseg_gs(tmp) (regs->r16 |= (unsigned long) (tmp) << 48)
+#define copyseg_fs(tmp) (regs->r16 |= (unsigned long) (tmp) << 32)
#define copyseg_cs(tmp) (regs->r17 |= tmp)
-#define copyseg_ss(tmp) (regs->r17 |= (unsigned long) tmp << 16)
-#define copyseg_es(tmp) (regs->r16 |= (unsigned long) tmp << 16)
+#define copyseg_ss(tmp) (regs->r17 |= (unsigned long) (tmp) << 16)
+#define copyseg_es(tmp) (regs->r16 |= (unsigned long) (tmp) << 16)
#define copyseg_ds(tmp) (regs->r16 |= tmp)
#define COPY_SEG(seg) \
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
-#include <asm/ia32.h>
+
+#include "ia32priv.h"
extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
regs->r27 = load_desc(regs->r16 >> 0); /* DSD */
regs->r28 = load_desc(regs->r16 >> 32); /* FSD */
regs->r29 = load_desc(regs->r16 >> 48); /* GSD */
- task->thread.csd = load_desc(regs->r17 >> 0); /* CSD */
- task->thread.ssd = load_desc(regs->r17 >> 16); /* SSD */
+ regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */
+ regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */
}
void
ia32_save_state (struct task_struct *t)
{
- unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd;
+ unsigned long eflag, fsr, fcr, fir, fdr;
asm ("mov %0=ar.eflag;"
"mov %1=ar.fsr;"
"mov %2=ar.fcr;"
"mov %3=ar.fir;"
"mov %4=ar.fdr;"
- "mov %5=ar.csd;"
- "mov %6=ar.ssd;"
- : "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr), "=r"(csd), "=r"(ssd));
+ : "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr));
t->thread.eflag = eflag;
t->thread.fsr = fsr;
t->thread.fcr = fcr;
t->thread.fir = fir;
t->thread.fdr = fdr;
- t->thread.csd = csd;
- t->thread.ssd = ssd;
ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
}
void
ia32_load_state (struct task_struct *t)
{
- unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd, tssd;
+ unsigned long eflag, fsr, fcr, fir, fdr, tssd;
struct pt_regs *regs = ia64_task_regs(t);
int nr = get_cpu(); /* LDT and TSS depend on CPU number: */
fcr = t->thread.fcr;
fir = t->thread.fir;
fdr = t->thread.fdr;
- csd = t->thread.csd;
- ssd = t->thread.ssd;
tssd = load_desc(_TSS(nr)); /* TSSD */
asm volatile ("mov ar.eflag=%0;"
"mov ar.fcr=%2;"
"mov ar.fir=%3;"
"mov ar.fdr=%4;"
- "mov ar.csd=%5;"
- "mov ar.ssd=%6;"
- :: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr), "r"(csd), "r"(ssd));
+ :: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr));
current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
force_sig_info(SIGTRAP, &siginfo, current);
}
+void
+ia32_cpu_init (void)
+{
+ /* initialize global ia32 state - CR0 and CR4 */
+ asm volatile ("mov ar.cflg = %0" :: "r" (((ulong) IA32_CR4 << 32) | IA32_CR0));
+}
+
static int __init
ia32_init (void)
{
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <asm/ia32.h>
+#include "ia32priv.h"
+
#include <asm/ptrace.h>
int
--- /dev/null
+#ifndef _ASM_IA64_IA32_H
+#define _ASM_IA64_IA32_H
+
+#include <linux/config.h>
+
+#include <asm/ia32.h>
+
+#ifdef CONFIG_IA32_SUPPORT
+
+#include <linux/binfmts.h>
+#include <linux/compat.h>
+
+/*
+ * 32 bit structures for IA32 support.
+ */
+
+#define IA32_PAGE_SHIFT 12 /* 4KB pages */
+#define IA32_PAGE_SIZE (1UL << IA32_PAGE_SHIFT)
+#define IA32_PAGE_MASK (~(IA32_PAGE_SIZE - 1))
+#define IA32_PAGE_ALIGN(addr) (((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK)
+#define IA32_CLOCKS_PER_SEC 100 /* Cast in stone for IA32 Linux */
+
+/* sigcontext.h */
+/*
+ * As documented in the iBCS2 standard..
+ *
+ * The first part of "struct _fpstate" is just the
+ * normal i387 hardware setup, the extra "status"
+ * word is used to save the coprocessor status word
+ * before entering the handler.
+ */
+struct _fpreg_ia32 {
+ unsigned short significand[4];
+ unsigned short exponent;
+};
+
+struct _fpxreg_ia32 {
+ unsigned short significand[4];
+ unsigned short exponent;
+ unsigned short padding[3];
+};
+
+struct _xmmreg_ia32 {
+ unsigned int element[4];
+};
+
+
+struct _fpstate_ia32 {
+ unsigned int cw,
+ sw,
+ tag,
+ ipoff,
+ cssel,
+ dataoff,
+ datasel;
+ struct _fpreg_ia32 _st[8];
+ unsigned short status;
+ unsigned short magic; /* 0xffff = regular FPU data only */
+
+ /* FXSR FPU environment */
+ unsigned int _fxsr_env[6]; /* FXSR FPU env is ignored */
+ unsigned int mxcsr;
+ unsigned int reserved;
+ struct _fpxreg_ia32 _fxsr_st[8]; /* FXSR FPU reg data is ignored */
+ struct _xmmreg_ia32 _xmm[8];
+ unsigned int padding[56];
+};
+
+struct sigcontext_ia32 {
+ unsigned short gs, __gsh;
+ unsigned short fs, __fsh;
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned int edi;
+ unsigned int esi;
+ unsigned int ebp;
+ unsigned int esp;
+ unsigned int ebx;
+ unsigned int edx;
+ unsigned int ecx;
+ unsigned int eax;
+ unsigned int trapno;
+ unsigned int err;
+ unsigned int eip;
+ unsigned short cs, __csh;
+ unsigned int eflags;
+ unsigned int esp_at_signal;
+ unsigned short ss, __ssh;
+ unsigned int fpstate; /* really (struct _fpstate_ia32 *) */
+ unsigned int oldmask;
+ unsigned int cr2;
+};
+
+/* user.h */
+/*
+ * IA32 (Pentium III/4) FXSR, SSE support
+ *
+ * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
+ * interacting with the FXSR-format floating point environment. Floating
+ * point data can be accessed in the regular format in the usual manner,
+ * and both the standard and SIMD floating point data can be accessed via
+ * the new ptrace requests. In either case, changes to the FPU environment
+ * will be reflected in the task's state as expected.
+ */
+struct ia32_user_i387_struct {
+ int cwd;
+ int swd;
+ int twd;
+ int fip;
+ int fcs;
+ int foo;
+ int fos;
+ /* 8*10 bytes for each FP-reg = 80 bytes */
+ struct _fpreg_ia32 st_space[8];
+};
+
+struct ia32_user_fxsr_struct {
+ unsigned short cwd;
+ unsigned short swd;
+ unsigned short twd;
+ unsigned short fop;
+ int fip;
+ int fcs;
+ int foo;
+ int fos;
+ int mxcsr;
+ int reserved;
+ int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
+ int padding[56];
+};
+
+/* signal.h */
+#define IA32_SET_SA_HANDLER(ka,handler,restorer) \
+ ((ka)->sa.sa_handler = (__sighandler_t) \
+ (((unsigned long)(restorer) << 32) \
+ | ((handler) & 0xffffffff)))
+#define IA32_SA_HANDLER(ka) ((unsigned long) (ka)->sa.sa_handler & 0xffffffff)
+#define IA32_SA_RESTORER(ka) ((unsigned long) (ka)->sa.sa_handler >> 32)
+
+struct sigaction32 {
+ unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */
+ unsigned int sa_flags;
+ unsigned int sa_restorer; /* Another 32 bit pointer */
+ compat_sigset_t sa_mask; /* A 32 bit mask */
+};
+
+struct old_sigaction32 {
+ unsigned int sa_handler; /* Really a pointer, but need to deal
+ with 32 bits */
+ compat_old_sigset_t sa_mask; /* A 32 bit mask */
+ unsigned int sa_flags;
+ unsigned int sa_restorer; /* Another 32 bit pointer */
+};
+
+typedef struct sigaltstack_ia32 {
+ unsigned int ss_sp;
+ int ss_flags;
+ unsigned int ss_size;
+} stack_ia32_t;
+
+struct ucontext_ia32 {
+ unsigned int uc_flags;
+ unsigned int uc_link;
+ stack_ia32_t uc_stack;
+ struct sigcontext_ia32 uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+struct stat64 {
+ unsigned short st_dev;
+ unsigned char __pad0[10];
+ unsigned int __st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned short st_rdev;
+ unsigned char __pad3[10];
+ unsigned int st_size_lo;
+ unsigned int st_size_hi;
+ unsigned int st_blksize;
+ unsigned int st_blocks; /* Number 512-byte blocks allocated. */
+ unsigned int __pad4; /* future possible st_blocks high bits */
+ unsigned int st_atime;
+ unsigned int st_atime_nsec;
+ unsigned int st_mtime;
+ unsigned int st_mtime_nsec;
+ unsigned int st_ctime;
+ unsigned int st_ctime_nsec;
+ unsigned int st_ino_lo;
+ unsigned int st_ino_hi;
+};
+
+typedef union sigval32 {
+ int sival_int;
+ unsigned int sival_ptr;
+} sigval_t32;
+
+typedef struct siginfo32 {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[((128/sizeof(int)) - 3)];
+
+ /* kill() */
+ struct {
+ unsigned int _pid; /* sender's pid */
+ unsigned int _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ char _pad[sizeof(unsigned int) - sizeof(int)];
+ sigval_t32 _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ unsigned int _pid; /* sender's pid */
+ unsigned int _uid; /* sender's uid */
+ sigval_t32 _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ unsigned int _pid; /* which child */
+ unsigned int _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ unsigned int _addr; /* faulting insn/memory ref. */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} siginfo_t32;
+
+struct linux32_dirent {
+ u32 d_ino;
+ u32 d_off;
+ u16 d_reclen;
+ char d_name[256];
+};
+
+struct old_linux32_dirent {
+ u32 d_ino;
+ u32 d_offset;
+ u16 d_namlen;
+ char d_name[1];
+};
+
+/*
+ * IA-32 ELF specific definitions for IA-64.
+ */
+
+#define _ASM_IA64_ELF_H /* Don't include elf.h */
+
+#include <linux/sched.h>
+#include <asm/processor.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_386)
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_386
+
+#define IA32_PAGE_OFFSET 0xc0000000
+#define IA32_STACK_TOP IA32_PAGE_OFFSET
+
+/*
+ * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can
+ * access them.
+ */
+#define IA32_GDT_OFFSET (IA32_PAGE_OFFSET)
+#define IA32_TSS_OFFSET (IA32_PAGE_OFFSET + PAGE_SIZE)
+#define IA32_LDT_OFFSET (IA32_PAGE_OFFSET + 2*PAGE_SIZE)
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE IA32_PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed.
+ * Typical use of this is to invoke "./ld.so someprog" to test out a
+ * new version of the loader. We need to make sure that it is out of
+ * the way of the program that it will "exec", and that there is
+ * sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE (IA32_PAGE_OFFSET/3 + 0x1000000)
+
+void ia64_elf32_init(struct pt_regs *regs);
+#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r)
+
+#define elf_addr_t u32
+
+/* ELF register definitions. This is needed for core dump support. */
+
+#define ELF_NGREG 128 /* XXX fix me */
+#define ELF_NFPREG 128 /* XXX fix me */
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct {
+ unsigned long w0;
+ unsigned long w1;
+} elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/* This macro yields a bitmask that programs can use to figure out
+ what instruction set this CPU supports. */
+#define ELF_HWCAP 0
+
+/* This macro yields a string that ld.so will use to load
+ implementation specific libraries for optimization. Not terribly
+ relevant until we have real hardware to play with... */
+#define ELF_PLATFORM 0
+
+#ifdef __KERNEL__
+# define SET_PERSONALITY(EX,IBCS2) \
+ (current->personality = (IBCS2) ? PER_SVR4 : PER_LINUX)
+#endif
+
+#define IA32_EFLAG 0x200
+
+/*
+ * IA-32 ELF specific definitions for IA-64.
+ */
+
+#define __USER_CS 0x23
+#define __USER_DS 0x2B
+
+#define FIRST_TSS_ENTRY 6
+#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
+#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
+#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
+
+#define IA32_SEGSEL_RPL (0x3 << 0)
+#define IA32_SEGSEL_TI (0x1 << 2)
+#define IA32_SEGSEL_INDEX_SHIFT 3
+
+#define IA32_SEG_BASE 16
+#define IA32_SEG_TYPE 40
+#define IA32_SEG_SYS 44
+#define IA32_SEG_DPL 45
+#define IA32_SEG_P 47
+#define IA32_SEG_HIGH_LIMIT 48
+#define IA32_SEG_AVL 52
+#define IA32_SEG_DB 54
+#define IA32_SEG_G 55
+#define IA32_SEG_HIGH_BASE 56
+
+#define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, gran) \
+ (((limit) & 0xffff) \
+ | (((unsigned long) (base) & 0xffffff) << IA32_SEG_BASE) \
+ | ((unsigned long) (segtype) << IA32_SEG_TYPE) \
+ | ((unsigned long) (nonsysseg) << IA32_SEG_SYS) \
+ | ((unsigned long) (dpl) << IA32_SEG_DPL) \
+ | ((unsigned long) (segpresent) << IA32_SEG_P) \
+ | ((((unsigned long) (limit) >> 16) & 0xf) << IA32_SEG_HIGH_LIMIT) \
+ | ((unsigned long) (avl) << IA32_SEG_AVL) \
+ | ((unsigned long) (segdb) << IA32_SEG_DB) \
+ | ((unsigned long) (gran) << IA32_SEG_G) \
+ | ((((unsigned long) (base) >> 24) & 0xff) << IA32_SEG_HIGH_BASE))
+
+#define SEG_LIM 32
+#define SEG_TYPE 52
+#define SEG_SYS 56
+#define SEG_DPL 57
+#define SEG_P 59
+#define SEG_AVL 60
+#define SEG_DB 62
+#define SEG_G 63
+
+/* Unscramble an IA-32 segment descriptor into the IA-64 format. */
+#define IA32_SEG_UNSCRAMBLE(sd) \
+ ( (((sd) >> IA32_SEG_BASE) & 0xffffff) | ((((sd) >> IA32_SEG_HIGH_BASE) & 0xff) << 24) \
+ | ((((sd) & 0xffff) | ((((sd) >> IA32_SEG_HIGH_LIMIT) & 0xf) << 16)) << SEG_LIM) \
+ | ((((sd) >> IA32_SEG_TYPE) & 0xf) << SEG_TYPE) \
+ | ((((sd) >> IA32_SEG_SYS) & 0x1) << SEG_SYS) \
+ | ((((sd) >> IA32_SEG_DPL) & 0x3) << SEG_DPL) \
+ | ((((sd) >> IA32_SEG_P) & 0x1) << SEG_P) \
+ | ((((sd) >> IA32_SEG_AVL) & 0x1) << SEG_AVL) \
+ | ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB) \
+ | ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G))
+
+#define IA32_IOBASE 0x2000000000000000 /* Virtual address for I/O space */
+
+#define IA32_CR0 0x80000001 /* Enable PG and PE bits */
+#define IA32_CR4 0x600 /* MMXEX and FXSR on */
+
+/*
+ * IA32 floating point control registers starting values
+ */
+
+#define IA32_FSR_DEFAULT 0x55550000 /* set all tag bits */
+#define IA32_FCR_DEFAULT 0x17800000037fUL /* extended precision, all masks */
+
+#define IA32_PTRACE_GETREGS 12
+#define IA32_PTRACE_SETREGS 13
+#define IA32_PTRACE_GETFPREGS 14
+#define IA32_PTRACE_SETFPREGS 15
+#define IA32_PTRACE_GETFPXREGS 18
+#define IA32_PTRACE_SETFPXREGS 19
+
+#define ia32_start_thread(regs,new_ip,new_sp) do { \
+ set_fs(USER_DS); \
+ ia64_psr(regs)->cpl = 3; /* set user mode */ \
+ ia64_psr(regs)->ri = 0; /* clear return slot number */ \
+ ia64_psr(regs)->is = 1; /* IA-32 instruction set */ \
+ regs->cr_iip = new_ip; \
+ regs->ar_rsc = 0xc; /* enforced lazy mode, priv. level 3 */ \
+ regs->ar_rnat = 0; \
+ regs->loadrs = 0; \
+ regs->r12 = new_sp; \
+} while (0)
+
+/*
+ * Local Descriptor Table (LDT) related declarations.
+ */
+
+#define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */
+#define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */
+
+struct ia32_modify_ldt_ldt_s {
+ unsigned int entry_number;
+ unsigned int base_addr;
+ unsigned int limit;
+ unsigned int seg_32bit:1;
+ unsigned int contents:2;
+ unsigned int read_exec_only:1;
+ unsigned int limit_in_pages:1;
+ unsigned int seg_not_present:1;
+ unsigned int useable:1;
+};
+
+struct linux_binprm;
+
+extern void ia32_init_addr_space (struct pt_regs *regs);
+extern int ia32_setup_arg_pages (struct linux_binprm *bprm);
+extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t);
+extern void ia32_load_segment_descriptors (struct task_struct *task);
+
+#define ia32f2ia64f(dst,src) \
+ do { \
+ register double f6 asm ("f6"); \
+ asm volatile ("ldfe f6=[%2];; stf.spill [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
+ } while(0)
+
+#define ia64f2ia32f(dst,src) \
+ do { \
+ register double f6 asm ("f6"); \
+ asm volatile ("ldf.fill f6=[%2];; stfe [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
+ } while(0)
+
+#endif /* !CONFIG_IA32_SUPPORT */
+
+#endif /* _ASM_IA64_IA32_H */
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
-#include <asm/ia32.h>
+
+#include "ia32priv.h"
#include <net/scm.h>
#include <net/sock.h>
static int
-get_page_prot (unsigned long addr)
+get_page_prot (struct vm_area_struct *vma, unsigned long addr)
{
- struct vm_area_struct *vma = find_vma(current->mm, addr);
int prot = 0;
if (!vma || vma->vm_start > addr)
mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
loff_t off)
{
- void *page = (void *) get_zeroed_page(GFP_KERNEL);
+ void *page = NULL;
struct inode *inode;
- unsigned long ret;
- int old_prot = get_page_prot(start);
+ unsigned long ret = 0;
+ struct vm_area_struct *vma = find_vma(current->mm, start);
+ int old_prot = get_page_prot(vma, start);
DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
file, start, end, prot, flags, off);
+
+ /* Optimize the case where the old mmap and the new mmap are both anonymous */
+ if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
+ if (clear_user((void *) start, end - start)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ goto skip_mmap;
+ }
+
+ page = (void *) get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
copy_to_user((void *) end, page + PAGE_OFF(end),
PAGE_SIZE - PAGE_OFF(end));
}
+
if (!(flags & MAP_ANONYMOUS)) {
/* read the file contents */
inode = file->f_dentry->d_inode;
goto out;
}
}
+
+ skip_mmap:
if (!(prot & PROT_WRITE))
ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
out:
- free_page((unsigned long) page);
+ if (page)
+ free_page((unsigned long) page);
return ret;
}
mprotect_subpage (unsigned long address, int new_prot)
{
int old_prot;
+ struct vm_area_struct *vma;
if (new_prot == PROT_NONE)
return 0; /* optimize case where nothing changes... */
-
- old_prot = get_page_prot(address);
+ vma = find_vma(current->mm, address);
+ old_prot = get_page_prot(vma, address);
return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
}
sorts of things, like timeval and itimerval. */
extern struct timezone sys_tz;
-extern int do_sys_settimeofday (struct timeval *tv, struct timezone *tz);
asmlinkage long
sys32_gettimeofday (struct compat_timeval *tv, struct timezone *tz)
sys32_settimeofday (struct compat_timeval *tv, struct timezone *tz)
{
struct timeval ktv;
+ struct timespec kts;
struct timezone ktz;
if (tv) {
if (get_tv32(&ktv, tv))
return -EFAULT;
+ kts.tv_sec = ktv.tv_sec;
+ kts.tv_nsec = ktv.tv_usec * 1000;
}
if (tz) {
if (copy_from_user(&ktz, tz, sizeof(ktz)))
return -EFAULT;
}
- return do_sys_settimeofday(tv ? &ktv : NULL, tz ? &ktz : NULL);
+ return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
}
struct getdents32_callback {
}
}
- size = FDS_BYTES(n);
ret = -EINVAL;
- if (n < 0 || size < n)
+ if (n < 0)
goto out_nofds;
if (n > current->files->max_fdset)
* long-words.
*/
ret = -ENOMEM;
+ size = FDS_BYTES(n);
bits = kmalloc(6 * size, GFP_KERNEL);
if (!bits)
goto out_nofds;
};
struct shmid64_ds32 {
- struct ipc64_perm shm_perm;
+ struct ipc64_perm32 shm_perm;
compat_size_t shm_segsz;
compat_time_t shm_atime;
unsigned int __unused1;
msgctl32 (int first, int second, void *uptr)
{
int err = -EINVAL, err2;
- struct msqid_ds m;
struct msqid64_ds m64;
struct msqid_ds32 *up32 = (struct msqid_ds32 *)uptr;
struct msqid64_ds32 *up64 = (struct msqid64_ds32 *)uptr;
case IPC_SET:
if (version == IPC_64) {
- err = get_user(m.msg_perm.uid, &up64->msg_perm.uid);
- err |= get_user(m.msg_perm.gid, &up64->msg_perm.gid);
- err |= get_user(m.msg_perm.mode, &up64->msg_perm.mode);
- err |= get_user(m.msg_qbytes, &up64->msg_qbytes);
+ err = get_user(m64.msg_perm.uid, &up64->msg_perm.uid);
+ err |= get_user(m64.msg_perm.gid, &up64->msg_perm.gid);
+ err |= get_user(m64.msg_perm.mode, &up64->msg_perm.mode);
+ err |= get_user(m64.msg_qbytes, &up64->msg_qbytes);
} else {
- err = get_user(m.msg_perm.uid, &up32->msg_perm.uid);
- err |= get_user(m.msg_perm.gid, &up32->msg_perm.gid);
- err |= get_user(m.msg_perm.mode, &up32->msg_perm.mode);
- err |= get_user(m.msg_qbytes, &up32->msg_qbytes);
+ err = get_user(m64.msg_perm.uid, &up32->msg_perm.uid);
+ err |= get_user(m64.msg_perm.gid, &up32->msg_perm.gid);
+ err |= get_user(m64.msg_perm.mode, &up32->msg_perm.mode);
+ err |= get_user(m64.msg_qbytes, &up32->msg_qbytes);
}
if (err)
break;
old_fs = get_fs();
set_fs(KERNEL_DS);
- err = sys_msgctl(first, second, &m);
+ err = sys_msgctl(first, second, &m64);
set_fs(old_fs);
break;
shmctl32 (int first, int second, void *uptr)
{
int err = -EFAULT, err2;
- struct shmid_ds s;
+
struct shmid64_ds s64;
struct shmid_ds32 *up32 = (struct shmid_ds32 *)uptr;
struct shmid64_ds32 *up64 = (struct shmid64_ds32 *)uptr;
case IPC_SET:
if (version == IPC_64) {
- err = get_user(s.shm_perm.uid, &up64->shm_perm.uid);
- err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid);
- err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode);
+ err = get_user(s64.shm_perm.uid, &up64->shm_perm.uid);
+ err |= get_user(s64.shm_perm.gid, &up64->shm_perm.gid);
+ err |= get_user(s64.shm_perm.mode, &up64->shm_perm.mode);
} else {
- err = get_user(s.shm_perm.uid, &up32->shm_perm.uid);
- err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid);
- err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode);
+ err = get_user(s64.shm_perm.uid, &up32->shm_perm.uid);
+ err |= get_user(s64.shm_perm.gid, &up32->shm_perm.gid);
+ err |= get_user(s64.shm_perm.mode, &up32->shm_perm.mode);
}
if (err)
break;
old_fs = get_fs();
set_fs(KERNEL_DS);
- err = sys_shmctl(first, second, &s);
+ err = sys_shmctl(first, second, &s64);
set_fs(old_fs);
break;
ia64f2ia32f(f, &ptp->f9);
break;
case 2:
+ ia64f2ia32f(f, &ptp->f10);
+ break;
case 3:
+ ia64f2ia32f(f, &ptp->f11);
+ break;
case 4:
case 5:
case 6:
case 7:
- ia64f2ia32f(f, &swp->f10 + (regno - 2));
+ ia64f2ia32f(f, &swp->f12 + (regno - 4));
break;
}
copy_to_user(reg, f, sizeof(*reg));
copy_from_user(&ptp->f9, reg, sizeof(*reg));
break;
case 2:
+ copy_from_user(&ptp->f10, reg, sizeof(*reg));
+ break;
case 3:
+ copy_from_user(&ptp->f11, reg, sizeof(*reg));
+ break;
case 4:
case 5:
case 6:
case 7:
- copy_from_user(&swp->f10 + (regno - 2), reg, sizeof(*reg));
+ copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
break;
}
return;
ptp = ia64_task_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
- put_fpreg(i, (struct _fpreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
+ put_fpreg(i, &save->st_space[i], ptp, swp, tos);
return 0;
}
ptp = ia64_task_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
- get_fpreg(i, (struct _fpreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
+ get_fpreg(i, &save->st_space[i], ptp, swp, tos);
return 0;
}
extra-y := head.o init_task.o
-obj-y := acpi.o entry.o efi.o efi_stub.o gate.o ia64_ksyms.o irq.o irq_ia64.o irq_lsapic.o \
- ivt.o machvec.o pal.o perfmon.o process.o ptrace.o sal.o semaphore.o setup.o signal.o \
- sys_ia64.o time.o traps.o unaligned.o unwind.o
+obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
+ irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
+ semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o unwind.o
obj-$(CONFIG_EFI_VARS) += efivars.o
-obj-$(CONFIG_FSYS) += fsys.o
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
+obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
+
+# The gate DSO image is built using a special linker script.
+targets += gate.so gate-syms.o
+
+AFLAGS_gate.lds.o += -P -C -U$(ARCH)
+arch/ia64/kernel/gate.lds.s: %.s: %.S scripts FORCE
+ $(call if_changed_dep,as_s_S)
+
+quiet_cmd_gate = GATE $@
+ cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
+
+GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1
+$(obj)/gate.so: $(src)/gate.lds.s $(obj)/gate.o FORCE
+ $(call if_changed,gate)
+
+$(obj)/built-in.o: $(obj)/gate-syms.o
+$(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o
+
+GATECFLAGS_gate-syms.o = -r
+$(obj)/gate-syms.o: $(src)/gate.lds.s $(obj)/gate.o FORCE
+ $(call if_changed,gate)
+
+# gate-data.o contains the gate DSO image as data in section .data.gate.
+# We must build gate.so before we can assemble it.
+# Note: kbuild does not track this dependency due to usage of .incbin
+$(obj)/gate-data.o: $(obj)/gate.so
acpi_status status;
u8 *data;
u32 length;
- int i;
status = acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length);
if (!strcmp(hdr->oem_id, "HP")) {
return "hpzx1";
}
+ else if (!strcmp(hdr->oem_id, "SGI")) {
+ return "sn2";
+ }
return "dig";
#else
return "hpsim";
# elif defined (CONFIG_IA64_HP_ZX1)
return "hpzx1";
-# elif defined (CONFIG_IA64_SGI_SN1)
- return "sn1";
# elif defined (CONFIG_IA64_SGI_SN2)
return "sn2";
# elif defined (CONFIG_IA64_DIG)
printk(KERN_INFO "CPU %d (0x%04x)", total_cpus, (lsapic->id << 8) | lsapic->eid);
- if (lsapic->flags.enabled) {
- available_cpus++;
+ if (!lsapic->flags.enabled)
+ printk(" disabled");
+ else if (available_cpus >= NR_CPUS)
+ printk(" ignored (increase NR_CPUS)");
+ else {
printk(" enabled");
#ifdef CONFIG_SMP
- smp_boot_data.cpu_phys_id[total_cpus] = (lsapic->id << 8) | lsapic->eid;
+ smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid;
if (hard_smp_processor_id()
- == (unsigned int) smp_boot_data.cpu_phys_id[total_cpus])
+ == (unsigned int) smp_boot_data.cpu_phys_id[available_cpus])
printk(" (BSP)");
#endif
- }
- else {
- printk(" disabled");
-#ifdef CONFIG_SMP
- smp_boot_data.cpu_phys_id[total_cpus] = -1;
-#endif
+ ++available_cpus;
}
printk("\n");
}
-#ifdef CONFIG_SERIAL_8250_ACPI
-
-#include <linux/acpi_serial.h>
-
-static int __init
-acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
-{
- acpi_ser_t *spcr;
- unsigned int gsi;
-
- if (!phys_addr || !size)
- return -EINVAL;
-
- if (!iosapic_register_intr)
- return -ENODEV;
-
- /*
- * ACPI is able to describe serial ports that live at non-standard
- * memory addresses and use non-standard interrupts, either via
- * direct SAPIC mappings or via PCI interrupts. We handle interrupt
- * routing for SAPIC-based (non-PCI) devices here. Interrupt routing
- * for PCI devices will be handled when processing the PCI Interrupt
- * Routing Table (PRT).
- */
-
- spcr = (acpi_ser_t *) __va(phys_addr);
-
- setup_serial_acpi(spcr);
-
- if (spcr->length < sizeof(acpi_ser_t))
- /* Table not long enough for full info, thus no interrupt */
- return -ENODEV;
-
- if ((spcr->base_addr.space_id != ACPI_SERIAL_PCICONF_SPACE) &&
- (spcr->int_type == ACPI_SERIAL_INT_SAPIC))
- {
- int vector;
-
- /* We have a UART in memory space with an SAPIC interrupt */
-
- gsi = ( (spcr->global_int[3] << 24) |
- (spcr->global_int[2] << 16) |
- (spcr->global_int[1] << 8) |
- (spcr->global_int[0]) );
-
- vector = iosapic_register_intr(gsi, IOSAPIC_POL_HIGH, IOSAPIC_EDGE);
- }
- return 0;
-}
-
-#endif /* CONFIG_SERIAL_8250_ACPI */
-
-
int __init
acpi_boot_init (void)
{
if (acpi_table_parse(ACPI_FADT, acpi_parse_fadt) < 1)
printk(KERN_ERR PREFIX "Can't find FADT\n");
-#ifdef CONFIG_SERIAL_8250_ACPI
- /*
- * TBD: Need phased approach to table parsing (only do those absolutely
- * required during boot-up). Recommend expanding concept of fix-
- * feature devices (LDM) to include table-based devices such as
- * serial ports, EC, SMBus, etc.
- */
- acpi_table_parse(ACPI_SPCR, acpi_parse_spcr);
-#endif
-
#ifdef CONFIG_SMP
+ smp_boot_data.cpu_count = available_cpus;
if (available_cpus == 0) {
printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
available_cpus = 1; /* We've got at least one of these, no? */
}
- smp_boot_data.cpu_count = total_cpus;
smp_build_cpu_map();
# ifdef CONFIG_NUMA
--- /dev/null
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+
+#include <linux/config.h>
+
+#include <linux/sched.h>
+
+#include <asm-ia64/processor.h>
+#include <asm-ia64/ptrace.h>
+#include <asm-ia64/siginfo.h>
+#include <asm-ia64/sigcontext.h>
+
+#include "../kernel/sigframe.h"
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+void foo(void)
+{
+ DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct));
+ DEFINE(IA64_THREAD_INFO_SIZE, sizeof (struct thread_info));
+ DEFINE(IA64_PT_REGS_SIZE, sizeof (struct pt_regs));
+ DEFINE(IA64_SWITCH_STACK_SIZE, sizeof (struct switch_stack));
+ DEFINE(IA64_SIGINFO_SIZE, sizeof (struct siginfo));
+ DEFINE(IA64_CPU_SIZE, sizeof (struct cpuinfo_ia64));
+ DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe));
+ DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info));
+
+ BLANK();
+
+ DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid));
+ DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader));
+ DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid));
+ DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent));
+ DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid));
+ DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct task_struct, thread.ksp));
+ DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct task_struct, thread.on_ustack));
+
+ BLANK();
+
+ DEFINE(IA64_PT_REGS_B6_OFFSET, offsetof (struct pt_regs, b6));
+ DEFINE(IA64_PT_REGS_B7_OFFSET, offsetof (struct pt_regs, b7));
+ DEFINE(IA64_PT_REGS_AR_CSD_OFFSET, offsetof (struct pt_regs, ar_csd));
+ DEFINE(IA64_PT_REGS_AR_SSD_OFFSET, offsetof (struct pt_regs, ar_ssd));
+ DEFINE(IA64_PT_REGS_R8_OFFSET, offsetof (struct pt_regs, r8));
+ DEFINE(IA64_PT_REGS_R9_OFFSET, offsetof (struct pt_regs, r9));
+ DEFINE(IA64_PT_REGS_R10_OFFSET, offsetof (struct pt_regs, r10));
+ DEFINE(IA64_PT_REGS_R11_OFFSET, offsetof (struct pt_regs, r11));
+ DEFINE(IA64_PT_REGS_CR_IPSR_OFFSET, offsetof (struct pt_regs, cr_ipsr));
+ DEFINE(IA64_PT_REGS_CR_IIP_OFFSET, offsetof (struct pt_regs, cr_iip));
+ DEFINE(IA64_PT_REGS_CR_IFS_OFFSET, offsetof (struct pt_regs, cr_ifs));
+ DEFINE(IA64_PT_REGS_AR_UNAT_OFFSET, offsetof (struct pt_regs, ar_unat));
+ DEFINE(IA64_PT_REGS_AR_PFS_OFFSET, offsetof (struct pt_regs, ar_pfs));
+ DEFINE(IA64_PT_REGS_AR_RSC_OFFSET, offsetof (struct pt_regs, ar_rsc));
+ DEFINE(IA64_PT_REGS_AR_RNAT_OFFSET, offsetof (struct pt_regs, ar_rnat));
+
+ DEFINE(IA64_PT_REGS_AR_BSPSTORE_OFFSET, offsetof (struct pt_regs, ar_bspstore));
+ DEFINE(IA64_PT_REGS_PR_OFFSET, offsetof (struct pt_regs, pr));
+ DEFINE(IA64_PT_REGS_B0_OFFSET, offsetof (struct pt_regs, b0));
+ DEFINE(IA64_PT_REGS_LOADRS_OFFSET, offsetof (struct pt_regs, loadrs));
+ DEFINE(IA64_PT_REGS_R1_OFFSET, offsetof (struct pt_regs, r1));
+ DEFINE(IA64_PT_REGS_R12_OFFSET, offsetof (struct pt_regs, r12));
+ DEFINE(IA64_PT_REGS_R13_OFFSET, offsetof (struct pt_regs, r13));
+ DEFINE(IA64_PT_REGS_AR_FPSR_OFFSET, offsetof (struct pt_regs, ar_fpsr));
+ DEFINE(IA64_PT_REGS_R15_OFFSET, offsetof (struct pt_regs, r15));
+ DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14));
+ DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2));
+ DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3));
+ DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16));
+ DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17));
+ DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18));
+ DEFINE(IA64_PT_REGS_R19_OFFSET, offsetof (struct pt_regs, r19));
+ DEFINE(IA64_PT_REGS_R20_OFFSET, offsetof (struct pt_regs, r20));
+ DEFINE(IA64_PT_REGS_R21_OFFSET, offsetof (struct pt_regs, r21));
+ DEFINE(IA64_PT_REGS_R22_OFFSET, offsetof (struct pt_regs, r22));
+ DEFINE(IA64_PT_REGS_R23_OFFSET, offsetof (struct pt_regs, r23));
+ DEFINE(IA64_PT_REGS_R24_OFFSET, offsetof (struct pt_regs, r24));
+ DEFINE(IA64_PT_REGS_R25_OFFSET, offsetof (struct pt_regs, r25));
+ DEFINE(IA64_PT_REGS_R26_OFFSET, offsetof (struct pt_regs, r26));
+ DEFINE(IA64_PT_REGS_R27_OFFSET, offsetof (struct pt_regs, r27));
+ DEFINE(IA64_PT_REGS_R28_OFFSET, offsetof (struct pt_regs, r28));
+ DEFINE(IA64_PT_REGS_R29_OFFSET, offsetof (struct pt_regs, r29));
+ DEFINE(IA64_PT_REGS_R30_OFFSET, offsetof (struct pt_regs, r30));
+ DEFINE(IA64_PT_REGS_R31_OFFSET, offsetof (struct pt_regs, r31));
+ DEFINE(IA64_PT_REGS_AR_CCV_OFFSET, offsetof (struct pt_regs, ar_ccv));
+ DEFINE(IA64_PT_REGS_F6_OFFSET, offsetof (struct pt_regs, f6));
+ DEFINE(IA64_PT_REGS_F7_OFFSET, offsetof (struct pt_regs, f7));
+ DEFINE(IA64_PT_REGS_F8_OFFSET, offsetof (struct pt_regs, f8));
+ DEFINE(IA64_PT_REGS_F9_OFFSET, offsetof (struct pt_regs, f9));
+ DEFINE(IA64_PT_REGS_F10_OFFSET, offsetof (struct pt_regs, f10));
+ DEFINE(IA64_PT_REGS_F11_OFFSET, offsetof (struct pt_regs, f11));
+
+ BLANK();
+
+ DEFINE(IA64_SWITCH_STACK_CALLER_UNAT_OFFSET, offsetof (struct switch_stack, caller_unat));
+ DEFINE(IA64_SWITCH_STACK_AR_FPSR_OFFSET, offsetof (struct switch_stack, ar_fpsr));
+ DEFINE(IA64_SWITCH_STACK_F2_OFFSET, offsetof (struct switch_stack, f2));
+ DEFINE(IA64_SWITCH_STACK_F3_OFFSET, offsetof (struct switch_stack, f3));
+ DEFINE(IA64_SWITCH_STACK_F4_OFFSET, offsetof (struct switch_stack, f4));
+ DEFINE(IA64_SWITCH_STACK_F5_OFFSET, offsetof (struct switch_stack, f5));
+ DEFINE(IA64_SWITCH_STACK_F12_OFFSET, offsetof (struct switch_stack, f12));
+ DEFINE(IA64_SWITCH_STACK_F13_OFFSET, offsetof (struct switch_stack, f13));
+ DEFINE(IA64_SWITCH_STACK_F14_OFFSET, offsetof (struct switch_stack, f14));
+ DEFINE(IA64_SWITCH_STACK_F15_OFFSET, offsetof (struct switch_stack, f15));
+ DEFINE(IA64_SWITCH_STACK_F16_OFFSET, offsetof (struct switch_stack, f16));
+ DEFINE(IA64_SWITCH_STACK_F17_OFFSET, offsetof (struct switch_stack, f17));
+ DEFINE(IA64_SWITCH_STACK_F18_OFFSET, offsetof (struct switch_stack, f18));
+ DEFINE(IA64_SWITCH_STACK_F19_OFFSET, offsetof (struct switch_stack, f19));
+ DEFINE(IA64_SWITCH_STACK_F20_OFFSET, offsetof (struct switch_stack, f20));
+ DEFINE(IA64_SWITCH_STACK_F21_OFFSET, offsetof (struct switch_stack, f21));
+ DEFINE(IA64_SWITCH_STACK_F22_OFFSET, offsetof (struct switch_stack, f22));
+ DEFINE(IA64_SWITCH_STACK_F23_OFFSET, offsetof (struct switch_stack, f23));
+ DEFINE(IA64_SWITCH_STACK_F24_OFFSET, offsetof (struct switch_stack, f24));
+ DEFINE(IA64_SWITCH_STACK_F25_OFFSET, offsetof (struct switch_stack, f25));
+ DEFINE(IA64_SWITCH_STACK_F26_OFFSET, offsetof (struct switch_stack, f26));
+ DEFINE(IA64_SWITCH_STACK_F27_OFFSET, offsetof (struct switch_stack, f27));
+ DEFINE(IA64_SWITCH_STACK_F28_OFFSET, offsetof (struct switch_stack, f28));
+ DEFINE(IA64_SWITCH_STACK_F29_OFFSET, offsetof (struct switch_stack, f29));
+ DEFINE(IA64_SWITCH_STACK_F30_OFFSET, offsetof (struct switch_stack, f30));
+ DEFINE(IA64_SWITCH_STACK_F31_OFFSET, offsetof (struct switch_stack, f31));
+ DEFINE(IA64_SWITCH_STACK_R4_OFFSET, offsetof (struct switch_stack, r4));
+ DEFINE(IA64_SWITCH_STACK_R5_OFFSET, offsetof (struct switch_stack, r5));
+ DEFINE(IA64_SWITCH_STACK_R6_OFFSET, offsetof (struct switch_stack, r6));
+ DEFINE(IA64_SWITCH_STACK_R7_OFFSET, offsetof (struct switch_stack, r7));
+ DEFINE(IA64_SWITCH_STACK_B0_OFFSET, offsetof (struct switch_stack, b0));
+ DEFINE(IA64_SWITCH_STACK_B1_OFFSET, offsetof (struct switch_stack, b1));
+ DEFINE(IA64_SWITCH_STACK_B2_OFFSET, offsetof (struct switch_stack, b2));
+ DEFINE(IA64_SWITCH_STACK_B3_OFFSET, offsetof (struct switch_stack, b3));
+ DEFINE(IA64_SWITCH_STACK_B4_OFFSET, offsetof (struct switch_stack, b4));
+ DEFINE(IA64_SWITCH_STACK_B5_OFFSET, offsetof (struct switch_stack, b5));
+ DEFINE(IA64_SWITCH_STACK_AR_PFS_OFFSET, offsetof (struct switch_stack, ar_pfs));
+ DEFINE(IA64_SWITCH_STACK_AR_LC_OFFSET, offsetof (struct switch_stack, ar_lc));
+ DEFINE(IA64_SWITCH_STACK_AR_UNAT_OFFSET, offsetof (struct switch_stack, ar_unat));
+ DEFINE(IA64_SWITCH_STACK_AR_RNAT_OFFSET, offsetof (struct switch_stack, ar_rnat));
+ DEFINE(IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET, offsetof (struct switch_stack, ar_bspstore));
+ DEFINE(IA64_SWITCH_STACK_PR_OFFSET, offsetof (struct switch_stack, pr));
+
+ BLANK();
+
+ DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, sc_ip));
+ DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, sc_ar_bsp));
+ DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, sc_ar_fpsr));
+ DEFINE(IA64_SIGCONTEXT_AR_RNAT_OFFSET, offsetof (struct sigcontext, sc_ar_rnat));
+ DEFINE(IA64_SIGCONTEXT_AR_UNAT_OFFSET, offsetof (struct sigcontext, sc_ar_unat));
+ DEFINE(IA64_SIGCONTEXT_B0_OFFSET, offsetof (struct sigcontext, sc_br[0]));
+ DEFINE(IA64_SIGCONTEXT_CFM_OFFSET, offsetof (struct sigcontext, sc_cfm));
+ DEFINE(IA64_SIGCONTEXT_FLAGS_OFFSET, offsetof (struct sigcontext, sc_flags));
+ DEFINE(IA64_SIGCONTEXT_FR6_OFFSET, offsetof (struct sigcontext, sc_fr[6]));
+ DEFINE(IA64_SIGCONTEXT_PR_OFFSET, offsetof (struct sigcontext, sc_pr));
+ DEFINE(IA64_SIGCONTEXT_R12_OFFSET, offsetof (struct sigcontext, sc_gr[12]));
+ DEFINE(IA64_SIGCONTEXT_RBS_BASE_OFFSET,offsetof (struct sigcontext, sc_rbs_base));
+ DEFINE(IA64_SIGCONTEXT_LOADRS_OFFSET, offsetof (struct sigcontext, sc_loadrs));
+
+ BLANK();
+
+ DEFINE(IA64_SIGFRAME_ARG0_OFFSET, offsetof (struct sigframe, arg0));
+ DEFINE(IA64_SIGFRAME_ARG1_OFFSET, offsetof (struct sigframe, arg1));
+ DEFINE(IA64_SIGFRAME_ARG2_OFFSET, offsetof (struct sigframe, arg2));
+ DEFINE(IA64_SIGFRAME_HANDLER_OFFSET, offsetof (struct sigframe, handler));
+ DEFINE(IA64_SIGFRAME_SIGCONTEXT_OFFSET, offsetof (struct sigframe, sc));
+ BLANK();
+ /* for assembly files which can't include sched.h: */
+ DEFINE(IA64_CLONE_VFORK, CLONE_VFORK);
+ DEFINE(IA64_CLONE_VM, CLONE_VM);
+
+ BLANK();
+ /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
+ DEFINE(IA64_CPUINFO_ITM_DELTA_OFFSET, offsetof (struct cpuinfo_ia64, itm_delta));
+ DEFINE(IA64_CPUINFO_ITM_NEXT_OFFSET, offsetof (struct cpuinfo_ia64, itm_next));
+ DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc));
+ DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
+
+
+ DEFINE(CLONE_IDLETASK_BIT, 12);
+#if CLONE_IDLETASK != (1 << 12)
+# error "CLONE_IDLETASK_BIT incorrect, please fix"
+#endif
+
+ DEFINE(CLONE_SETTLS_BIT, 19);
+#if CLONE_SETTLS != (1<<19)
+# error "CLONE_SETTLS_BIT incorrect, please fix"
+#endif
+
+}
mov b6=r2
;;
andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
- br.call.sptk.many rp=ia64_switch_mode
+ br.call.sptk.many rp=ia64_switch_mode_phys
.ret0: mov out4=in5
mov out0=in1
mov out1=in2
br.call.sptk.many rp=b6 // call the EFI function
.ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3
- br.call.sptk.many rp=ia64_switch_mode // return to virtual mode
+ br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret2: mov ar.rsc=loc4 // restore RSE configuration
mov ar.pfs=loc1
mov rp=loc0
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999, 2002-2003
+ * Asit Mallick <Asit.K.Mallick@intel.com>
+ * Don Dugger <Don.Dugger@intel.com>
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ * Fenghua Yu <fenghua.yu@intel.com>
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999 Asit Mallick <Asit.K.Mallick@intel.com>
- * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com>
*/
/*
* ia64_switch_to now places correct virtual mapping in in TR2 for
* this executes in less than 20 cycles even on Itanium, so it's not worth
* optimizing for...).
*/
+ mov ar.unat=0; mov ar.lc=0
mov r4=0; mov f2=f0; mov b1=r0
mov r5=0; mov f3=f0; mov b2=r0
mov r6=0; mov f4=f0; mov b3=r0
mov r7=0; mov f5=f0; mov b4=r0
- mov ar.unat=0; mov f10=f0; mov b5=r0
- ldf.fill f11=[sp]; ldf.fill f12=[sp]; mov f13=f0
+ ldf.fill f12=[sp]; mov f13=f0; mov b5=r0
ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0
ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0
ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0
ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
- mov ar.lc=0
br.ret.sptk.many rp
END(ia64_execve)
br.call.sptk.many rp=do_fork
.ret1: .restore sp
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
- mov r2=-1000
- adds r3=IA64_TASK_PID_OFFSET,r8
- ;;
- cmp.leu p6,p0=r8,r2
mov ar.pfs=loc1
mov rp=loc0
- ;;
-(p6) ld4 r8=[r3]
br.ret.sptk.many rp
END(sys_clone2)
br.call.sptk.many rp=do_fork
.ret2: .restore sp
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
- mov r2=-1000
- adds r3=IA64_TASK_PID_OFFSET,r8
- ;;
- cmp.leu p6,p0=r8,r2
mov ar.pfs=loc1
mov rp=loc0
- ;;
-(p6) ld4 r8=[r3]
br.ret.sptk.many rp
END(sys_clone)
;;
st8 [r22]=sp // save kernel stack pointer of old task
shr.u r26=r20,IA64_GRANULE_SHIFT
- shr.u r17=r20,KERNEL_TR_PAGE_SHIFT
- ;;
- cmp.ne p6,p7=KERNEL_TR_PAGE_NUM,r17
adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
;;
/*
* If we've already mapped this task's page, we can skip doing it again.
*/
-(p6) cmp.eq p7,p6=r26,r27
+ cmp.eq p7,p6=r26,r27
(p6) br.cond.dpnt .map
;;
.done:
END(ia64_switch_to)
/*
- * Note that interrupts are enabled during save_switch_stack and
- * load_switch_stack. This means that we may get an interrupt with
- * "sp" pointing to the new kernel stack while ar.bspstore is still
- * pointing to the old kernel backing store area. Since ar.rsc,
- * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts,
- * this is not a problem. Also, we don't need to specify unwind
- * information for preserved registers that are not modified in
- * save_switch_stack as the right unwind information is already
+ * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
+ * means that we may get an interrupt with "sp" pointing to the new kernel stack while
+ * ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc,
+ * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a
+ * problem. Also, we don't need to specify unwind information for preserved registers
+ * that are not modified in save_switch_stack as the right unwind information is already
* specified at the call-site of save_switch_stack.
*/
st8 [r14]=r21,SW(B1)-SW(B0) // save b0
st8 [r15]=r23,SW(B3)-SW(B2) // save b2
mov r25=b4
- stf.spill [r2]=f10,32
- stf.spill [r3]=f11,32
mov r26=b5
;;
st8 [r14]=r22,SW(B4)-SW(B1) // save b1
ldf.fill f4=[r14],32
ldf.fill f5=[r15],32
;;
- ldf.fill f10=[r14],32
- ldf.fill f11=[r15],32
- ;;
ldf.fill f12=[r14],32
ldf.fill f13=[r15],32
;;
(p6) br.cond.sptk strace_error // syscall failed ->
;; // avoid RAW on r10
strace_save_retval:
-.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
-.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
+.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
+.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
ia64_strace_leave_kernel:
br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch return value
-.rety: br.cond.sptk ia64_leave_kernel
+.rety: br.cond.sptk ia64_leave_syscall
strace_error:
ld8 r3=[r2] // load pt_regs.r8
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
;;
- .mem.offset 0,0
-(p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
- .mem.offset 8,0
-(p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
+.mem.offset 0,0; (p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
+.mem.offset 8,0; (p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
END(ia64_ret_from_syscall)
// fall through
-GLOBAL_ENTRY(ia64_leave_kernel)
+/*
+ * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
+ * need to switch to bank 0 and doesn't restore the scratch registers.
+ * To avoid leaking kernel bits, the scratch registers are set to
+ * the following known-to-be-safe values:
+ *
+ * r1: restored (global pointer)
+ * r2: cleared
+ * r3: 1 (when returning to user-level)
+ * r8-r11: restored (syscall return value(s))
+ * r12: restored (user-level stack pointer)
+ * r13: restored (user-level thread pointer)
+ * r14: cleared
+ * r15: restored (syscall #)
+ * r16-r19: cleared
+ * r20: user-level ar.fpsr
+ * r21: user-level b0
+ * r22: user-level b6
+ * r23: user-level ar.bspstore
+ * r24: user-level ar.rnat
+ * r25: user-level ar.unat
+ * r26: user-level ar.pfs
+ * r27: user-level ar.rsc
+ * r28: user-level ip
+ * r29: user-level psr
+ * r30: user-level cfm
+ * r31: user-level pr
+ * f6-f11: cleared
+ * pr: restored (user-level pr)
+ * b0: restored (user-level rp)
+ * b6: restored
+ * b7: cleared
+ * ar.unat: restored (user-level ar.unat)
+ * ar.pfs: restored (user-level ar.pfs)
+ * ar.rsc: restored (user-level ar.rsc)
+ * ar.rnat: restored (user-level ar.rnat)
+ * ar.bspstore: restored (user-level ar.bspstore)
+ * ar.fpsr: restored (user-level ar.fpsr)
+ * ar.ccv: cleared
+ * ar.csd: cleared
+ * ar.ssd: cleared
+ */
+GLOBAL_ENTRY(ia64_leave_syscall)
PT_REGS_UNWIND_INFO(0)
- // work.need_resched etc. mustn't get changed by this CPU before it returns to
- // user- or fsys-mode:
-(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
+ /*
+ * work.need_resched etc. mustn't get changed by this CPU before it returns to
+ * user- or fsys-mode, hence we disable interrupts early on:
+ */
#ifdef CONFIG_PREEMPT
rsm psr.i // disable interrupts
- adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
+#else
+(pUStk) rsm psr.i
+#endif
+ cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
+(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
+.work_processed_syscall:
+#ifdef CONFIG_PREEMPT
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
-(pKStk) ld4 r21=[r20] // preempt_count ->r21
+ .pred.rel.mutex pUStk,pKStk
+(pKStk) ld4 r21=[r20] // r21 <- preempt_count
+(pUStk) mov r21=0 // r21 <- 0
+ ;;
+(p6) cmp.eq.unc p6,p0=r21,r0 // p6 <- p6 && (r21 == 0)
+#endif /* CONFIG_PREEMPT */
+ adds r16=PT(LOADRS)+16,r12
+ adds r17=PT(AR_BSPSTORE)+16,r12
+ adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
+ ;;
+(p6) ld4 r31=[r18] // load current_thread_info()->flags
+ ld8 r19=[r16],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
+ nop.i 0
+ ;;
+ ld8 r23=[r17],PT(R9)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
+ ld8 r22=[r16],PT(R8)-PT(B6) // load b6
+(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
+ ;;
+
+ mov.m ar.ccv=r0 // clear ar.ccv
+(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
+(p6) br.cond.spnt .work_pending
+ ;;
+ // start restoring the state saved on the kernel stack (struct pt_regs):
+ ld8.fill r8=[r16],16
+ ld8.fill r9=[r17],16
+ mov f6=f0 // clear f6
+ ;;
+ ld8.fill r10=[r16],16
+ ld8.fill r11=[r17],16
+ mov f7=f0 // clear f7
+ ;;
+ ld8 r29=[r16],16 // load cr.ipsr
+ ld8 r28=[r17],16 // load cr.iip
+ mov f8=f0 // clear f8
+ ;;
+ ld8 r30=[r16],16 // load cr.ifs
+ ld8 r25=[r17],16 // load ar.unat
+ cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
+ ;;
+ rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
+ invala // invalidate ALAT
+ mov f9=f0 // clear f9
+
+ mov.m ar.ssd=r0 // clear ar.ssd
+ mov.m ar.csd=r0 // clear ar.csd
+ mov f10=f0 // clear f10
+ ;;
+ ld8 r26=[r16],16 // load ar.pfs
+ ld8 r27=[r17],PT(PR)-PT(AR_RSC) // load ar.rsc
+ mov f11=f0 // clear f11
+ ;;
+ ld8 r24=[r16],PT(B0)-PT(AR_RNAT) // load ar.rnat (may be garbage)
+ ld8 r31=[r17],PT(R1)-PT(PR) // load predicates
+(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
+ ;;
+ ld8 r21=[r16],PT(R12)-PT(B0) // load b0
+ ld8.fill r1=[r17],16 // load r1
+(pUStk) mov r3=1
+ ;;
+ ld8.fill r12=[r16],16
+ ld8.fill r13=[r17],16
+ mov r2=r0 // clear r2
;;
-(pKStk) cmp4.eq p6,p0=r21,r0 // p6 <- preempt_count == 0
+ ld8 r20=[r16] // load ar.fpsr
+ ld8.fill r15=[r17] // load r15
+ mov b7=r0 // clear b7
+ ;;
+(pUStk) st1 [r14]=r3
+ movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
+ ;;
+ mov r16=ar.bsp // get existing backing store pointer
+ srlz.i // ensure interruption collection is off
+ mov r14=r0 // clear r14
;;
-#else /* CONFIG_PREEMPT */
+ ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
+ mov b6=r22 // restore b6
+ shr.u r18=r19,16 // get byte size of existing "dirty" partition
+(pKStk) br.cond.dpnt.many skip_rbs_switch
+ br.cond.sptk.many rbs_switch
+END(ia64_leave_syscall)
+
+GLOBAL_ENTRY(ia64_leave_kernel)
+ PT_REGS_UNWIND_INFO(0)
+ /*
+ * work.need_resched etc. mustn't get changed by this CPU before it returns to
+ * user- or fsys-mode, hence we disable interrupts early on:
+ */
+#ifdef CONFIG_PREEMPT
+ rsm psr.i // disable interrupts
+#else
(pUStk) rsm psr.i
+#endif
+ cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
+(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
+ ;;
+.work_processed_kernel:
+#ifdef CONFIG_PREEMPT
+ adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
-(pUStk) adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
+ .pred.rel.mutex pUStk,pKStk
+(pKStk) ld4 r21=[r20] // r21 <- preempt_count
+(pUStk) mov r21=0 // r21 <- 0
;;
+(p6) cmp.eq.unc p6,p0=r21,r0 // p6 <- p6 && (r21 == 0)
#endif /* CONFIG_PREEMPT */
-.work_processed:
-(p6) ld4 r18=[r17] // load current_thread_info()->flags
- adds r2=PT(R8)+16,r12
- adds r3=PT(R9)+16,r12
+ adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
;;
- // start restoring the state saved on the kernel stack (struct pt_regs):
- ld8.fill r8=[r2],16
- ld8.fill r9=[r3],16
-(p6) and r19=TIF_WORK_MASK,r18 // any work other than TIF_SYSCALL_TRACE?
+(p6) ld4 r31=[r17] // load current_thread_info()->flags
+ adds r21=PT(PR)+16,r12
+ ;;
+
+ lfetch [r21],PT(CR_IPSR)-PT(PR)
+ adds r2=PT(B6)+16,r12
+ adds r3=PT(R16)+16,r12
;;
- ld8.fill r10=[r2],16
- ld8.fill r11=[r3],16
+ lfetch [r21]
+ ld8 r28=[r2],8 // load b6
+ adds r29=PT(R24)+16,r12
+
+ ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
+ adds r30=PT(AR_CCV)+16,r12
+(p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
+ ;;
+ ld8.fill r24=[r29]
+ ld8 r15=[r30] // load ar.ccv
(p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
;;
- ld8.fill r16=[r2],16
- ld8.fill r17=[r3],16
+ ld8 r29=[r2],16 // load b7
+ ld8 r30=[r3],16 // load ar.csd
(p6) br.cond.spnt .work_pending
;;
+ ld8 r31=[r2],16 // load ar.ssd
+ ld8.fill r8=[r3],16
+ ;;
+ ld8.fill r9=[r2],16
+ ld8.fill r10=[r3],PT(R17)-PT(R10)
+ ;;
+ ld8.fill r11=[r2],PT(R18)-PT(R11)
+ ld8.fill r17=[r3],16
+ ;;
ld8.fill r18=[r2],16
ld8.fill r19=[r3],16
;;
ld8.fill r20=[r2],16
ld8.fill r21=[r3],16
+ mov ar.csd=r30
+ mov ar.ssd=r31
;;
- ld8.fill r22=[r2],16
- ld8.fill r23=[r3],16
- ;;
- ld8.fill r24=[r2],16
- ld8.fill r25=[r3],16
- ;;
- ld8.fill r26=[r2],16
- ld8.fill r27=[r3],16
+ rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
+ invala // invalidate ALAT
;;
- ld8.fill r28=[r2],16
- ld8.fill r29=[r3],16
+ ld8.fill r22=[r2],24
+ ld8.fill r23=[r3],24
+ mov b6=r28
;;
- ld8.fill r30=[r2],16
- ld8.fill r31=[r3],16
+ ld8.fill r25=[r2],16
+ ld8.fill r26=[r3],16
+ mov b7=r29
;;
- rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
- invala // invalidate ALAT
+ ld8.fill r27=[r2],16
+ ld8.fill r28=[r3],16
;;
- ld8 r1=[r2],16 // ar.ccv
- ld8 r13=[r3],16 // ar.fpsr
+ ld8.fill r29=[r2],16
+ ld8.fill r30=[r3],24
;;
- ld8 r14=[r2],16 // b0
- ld8 r15=[r3],16+8 // b7
+ ld8.fill r31=[r2],PT(F9)-PT(R31)
+ adds r3=PT(F10)-PT(F6),r3
;;
- ldf.fill f6=[r2],32
- ldf.fill f7=[r3],32
+ ldf.fill f9=[r2],PT(F6)-PT(F9)
+ ldf.fill f10=[r3],PT(F8)-PT(F10)
;;
- ldf.fill f8=[r2],32
- ldf.fill f9=[r3],32
+ ldf.fill f6=[r2],PT(F7)-PT(F6)
;;
- mov ar.ccv=r1
- mov ar.fpsr=r13
- mov b0=r14
+ ldf.fill f7=[r2],PT(F11)-PT(F7)
+ ldf.fill f8=[r3],32
;;
srlz.i // ensure interruption collection is off
- mov b7=r15
+ mov ar.ccv=r15
+ ;;
bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
;;
+ ldf.fill f11=[r2]
(pUStk) mov r18=IA64_KR(CURRENT) // Itanium 2: 12 cycle read latency
- adds r16=16,r12
- adds r17=24,r12
+ adds r16=PT(CR_IPSR)+16,r12
+ adds r17=PT(CR_IIP)+16,r12
;;
- ld8 rCRIPSR=[r16],16 // load cr.ipsr
- ld8 rCRIIP=[r17],16 // load cr.iip
+ ld8 r29=[r16],16 // load cr.ipsr
+ ld8 r28=[r17],16 // load cr.iip
;;
- ld8 rCRIFS=[r16],16 // load cr.ifs
- ld8 rARUNAT=[r17],16 // load ar.unat
- cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
+ ld8 r30=[r16],16 // load cr.ifs
+ ld8 r25=[r17],16 // load ar.unat
;;
- ld8 rARPFS=[r16],16 // load ar.pfs
- ld8 rARRSC=[r17],16 // load ar.rsc
+ ld8 r26=[r16],16 // load ar.pfs
+ ld8 r27=[r17],16 // load ar.rsc
+ cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
;;
- ld8 rARRNAT=[r16],16 // load ar.rnat (may be garbage)
- ld8 rARBSPSTORE=[r17],16 // load ar.bspstore (may be garbage)
+ ld8 r24=[r16],16 // load ar.rnat (may be garbage)
+ ld8 r23=[r17],16// load ar.bspstore (may be garbage)
;;
- ld8 rARPR=[r16],16 // load predicates
- ld8 rB6=[r17],16 // load b6
+ ld8 r31=[r16],16 // load predicates
+ ld8 r21=[r17],16 // load b0
;;
ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
ld8.fill r1=[r17],16 // load r1
;;
- ld8.fill r2=[r16],16
- ld8.fill r3=[r17],16
- ;;
ld8.fill r12=[r16],16
ld8.fill r13=[r17],16
(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
;;
- ld8.fill r14=[r16]
- ld8.fill r15=[r17]
+ ld8 r20=[r16],16 // ar.fpsr
+ ld8.fill r15=[r17],16
+ ;;
+ ld8.fill r14=[r16],16
+ ld8.fill r2=[r17]
(pUStk) mov r17=1
;;
+ ld8.fill r3=[r16]
(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
shr.u r18=r19,16 // get byte size of existing "dirty" partition
;;
* NOTE: alloc, loadrs, and cover can't be predicated.
*/
(pNonSys) br.cond.dpnt dont_preserve_current_frame
+
+rbs_switch:
cover // add current frame into dirty partition and set cr.ifs
;;
mov r19=ar.bsp // get new backing store pointer
}{ .mib
mov loc3=0
mov loc4=0
-(pRecurse) br.call.sptk.many b6=rse_clear_invalid
+(pRecurse) br.call.sptk.many b0=rse_clear_invalid
}{ .mfi // cycle 2
mov loc5=0
}{ .mib
mov loc6=0
mov loc7=0
-(pReturn) br.ret.sptk.many b6
+(pReturn) br.ret.sptk.many b0
}
#else /* !CONFIG_ITANIUM */
alloc loc0=ar.pfs,2,Nregs-2,2,0
mov loc5=0
mov loc6=0
mov loc7=0
-(pRecurse) br.call.sptk.many b6=rse_clear_invalid
+(pRecurse) br.call.sptk.few b0=rse_clear_invalid
;;
mov loc8=0
mov loc9=0
cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
mov loc10=0
mov loc11=0
-(pReturn) br.ret.sptk.many b6
+(pReturn) br.ret.sptk.many b0
#endif /* !CONFIG_ITANIUM */
# undef pRecurse
# undef pReturn
loadrs
;;
skip_rbs_switch:
- mov b6=rB6
- mov ar.pfs=rARPFS
-(pUStk) mov ar.bspstore=rARBSPSTORE
-(p9) mov cr.ifs=rCRIFS
- mov cr.ipsr=rCRIPSR
- mov cr.iip=rCRIIP
- ;;
-(pUStk) mov ar.rnat=rARRNAT // must happen with RSE in lazy mode
- mov ar.rsc=rARRSC
- mov ar.unat=rARUNAT
- mov pr=rARPR,-1
+(pLvSys) mov r19=r0 // clear r19 for leave_syscall, no-op otherwise
+ mov b0=r21
+ mov ar.pfs=r26
+(pUStk) mov ar.bspstore=r23
+(p9) mov cr.ifs=r30
+(pLvSys)mov r16=r0 // clear r16 for leave_syscall, no-op otherwise
+ mov cr.ipsr=r29
+ mov ar.fpsr=r20
+(pLvSys)mov r17=r0 // clear r17 for leave_syscall, no-op otherwise
+ mov cr.iip=r28
+ ;;
+(pUStk) mov ar.rnat=r24 // must happen with RSE in lazy mode
+(pLvSys)mov r18=r0 // clear r18 for leave_syscall, no-op otherwise
+ mov ar.rsc=r27
+ mov ar.unat=r25
+ mov pr=r31,-1
rfi
+ /*
+ * On entry:
+ * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPT)
+ * r31 = current->thread_info->flags
+ * On exit:
+ * p6 = TRUE if work-pending-check needs to be redone
+ */
.work_pending:
- tbit.z p6,p0=r18,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
+ tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
(p6) br.cond.sptk.few .notify
#ifdef CONFIG_PREEMPT
-(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
+(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
;;
(pKStk) st4 [r20]=r21
- ssm psr.i // enable interrupts
+ ssm psr.i // enable interrupts
#endif
-
-#if __GNUC__ < 3
- br.call.spnt.many rp=invoke_schedule
-#else
br.call.spnt.many rp=schedule
-#endif
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
rsm psr.i // disable interrupts
;;
- adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
#ifdef CONFIG_PREEMPT
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
;;
(pKStk) st4 [r20]=r0 // preempt_count() <- 0
#endif
- br.cond.sptk.many .work_processed // re-check
+(pLvSys)br.cond.sptk.many .work_processed_syscall // re-check
+ br.cond.sptk.many .work_processed_kernel // re-check
.notify:
br.call.spnt.many rp=notify_resume_user
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
- br.cond.sptk.many .work_processed // don't re-check
+(pLvSys)br.cond.sptk.many .work_processed_syscall // don't re-check
+ br.cond.sptk.many .work_processed_kernel // don't re-check
END(ia64_leave_kernel)
ENTRY(handle_syscall_error)
/*
- * Some system calls (e.g., ptrace, mmap) can return arbitrary
- * values which could lead us to mistake a negative return
- * value as a failed syscall. Those syscall must deposit
- * a non-zero value in pt_regs.r8 to indicate an error.
- * If pt_regs.r8 is zero, we assume that the call completed
- * successfully.
+ * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
+ * lead us to mistake a negative return value as a failed syscall. Those syscall
+ * must deposit a non-zero value in pt_regs.r8 to indicate an error. If
+ * pt_regs.r8 is zero, we assume that the call completed successfully.
*/
PT_REGS_UNWIND_INFO(0)
ld8 r3=[r2] // load pt_regs.r8
;;
.mem.offset 0,0; st8.spill [r2]=r9 // store errno in pt_regs.r8 and set unat bit
.mem.offset 8,0; st8.spill [r3]=r10 // store error indication in pt_regs.r10 and set unat bit
- br.cond.sptk ia64_leave_kernel
+ br.cond.sptk ia64_leave_syscall
END(handle_syscall_error)
/*
br.ret.sptk.many rp
END(ia64_invoke_schedule_tail)
-#if __GNUC__ < 3
-
- /*
- * Invoke schedule() while preserving in0-in7, which may be needed
- * in case a system call gets restarted. Note that declaring schedule()
- * with asmlinkage() is NOT enough because that will only preserve as many
- * registers as there are formal arguments.
- *
- * XXX fix me: with gcc 3.0, we won't need this anymore because syscall_linkage
- * renders all eight input registers (in0-in7) as "untouchable".
- */
-ENTRY(invoke_schedule)
- .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
- alloc loc1=ar.pfs,8,2,0,0
- mov loc0=rp
- ;;
- .body
- br.call.sptk.many rp=schedule
-.ret14: mov ar.pfs=loc1
- mov rp=loc0
- br.ret.sptk.many rp
-END(invoke_schedule)
-
-#endif /* __GNUC__ < 3 */
-
/*
* Setup stack and call do_notify_resume_user(). Note that pSys and pNonSys need to
* be set up by the caller. We declare 8 input registers so the system call
.body
cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall...
;;
+ /*
+ * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined
+ * syscall-entry path does not save them we save them here instead. Note: we
+ * don't need to save any other registers that are not saved by the stream-lined
+ * syscall path, because restore_sigcontext() restores them.
+ */
+ adds r16=PT(F6)+32,sp
+ adds r17=PT(F7)+32,sp
+ ;;
+ stf.spill [r16]=f6,32
+ stf.spill [r17]=f7,32
+ ;;
+ stf.spill [r16]=f8,32
+ stf.spill [r17]=f9,32
+ ;;
+ stf.spill [r16]=f10
+ stf.spill [r17]=f11
adds out0=16,sp // out0 = &sigscratch
br.call.sptk.many rp=ia64_rt_sigreturn
.ret19: .restore sp 0
data8 ia64_ni_syscall
data8 ia64_ni_syscall
data8 ia64_ni_syscall
+ data8 ia64_ni_syscall
+
+ .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
* Preserved registers that are shared between code in ivt.S and entry.S. Be
* careful not to step on these!
*/
-#define pKStk p2 /* will leave_kernel return to kernel-stacks? */
-#define pUStk p3 /* will leave_kernel return to user-stacks? */
+#define pLvSys p1 /* set 1 if leave from syscall; otherwise, set 0*/
+#define pKStk p2 /* will leave_{kernel,syscall} return to kernel-stacks? */
+#define pUStk p3 /* will leave_{kernel,syscall} return to user-stacks? */
#define pSys p4 /* are we processing a (synchronous) system call? */
#define pNonSys p5 /* complement of pSys */
#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET)
#define PT_REGS_SAVES(off) \
+ .unwabi 3, 'i'; \
.unwabi @svr4, 'i'; \
.fframe IA64_PT_REGS_SIZE+16+(off); \
.spillsp rp, PT(CR_IIP)+16+(off); \
#include <asm/offsets.h>
#include <asm/percpu.h>
#include <asm/thread_info.h>
+#include <asm/sal.h>
+#include <asm/system.h>
+#include <asm/unistd.h>
+
+#include "entry.h"
/*
* See Documentation/ia64/fsys.txt for details on fsyscalls.
*/
ENTRY(fsys_ni_syscall)
+ .prologue
+ .altrp b6
+ .body
mov r8=ENOSYS
mov r10=-1
MCKINLEY_E9_WORKAROUND
END(fsys_ni_syscall)
ENTRY(fsys_getpid)
+ .prologue
+ .altrp b6
+ .body
add r9=TI_FLAGS+IA64_TASK_SIZE,r16
;;
ld4 r9=[r9]
END(fsys_getpid)
ENTRY(fsys_getppid)
+ .prologue
+ .altrp b6
+ .body
add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16
;;
ld8 r17=[r17] // r17 = current->group_leader
END(fsys_getppid)
ENTRY(fsys_set_tid_address)
+ .prologue
+ .altrp b6
+ .body
add r9=TI_FLAGS+IA64_TASK_SIZE,r16
;;
ld4 r9=[r9]
* we ought to either skip the ITC-based interpolation or run an ntp-like
* daemon to keep the ITCs from drifting too far apart.
*/
+
ENTRY(fsys_gettimeofday)
+ .prologue
+ .altrp b6
+ .body
add r9=TI_FLAGS+IA64_TASK_SIZE,r16
movl r3=THIS_CPU(cpu_info)
mov.m r31=ar.itc // put time stamp into r31 (ITC) == now (35 cyc)
- movl r19=xtime // xtime is a timespec struct
- ;;
-
#ifdef CONFIG_SMP
movl r10=__per_cpu_offset
+ movl r2=sal_platform_features
;;
+
+ ld8 r2=[r2]
+ movl r19=xtime // xtime is a timespec struct
+
ld8 r10=[r10] // r10 <- __per_cpu_offset[0]
- movl r21=cpu_info__per_cpu
+ movl r21=THIS_CPU(cpu_info)
;;
add r10=r21, r10 // r10 <- &cpu_data(time_keeper_id)
+ tbit.nz p8,p0 = r2, IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT
+(p8) br.spnt.many fsys_fallback_syscall
#else
+ ;;
mov r10=r3
+ movl r19=xtime // xtime is a timespec struct
#endif
ld4 r9=[r9]
movl r17=xtime_lock
br.ret.spnt.many b6 // return with r8 set to EINVAL
END(fsys_gettimeofday)
+ENTRY(fsys_fallback_syscall)
+ .prologue
+ .altrp b6
+ .body
+ /*
+ * We only get here from light-weight syscall handlers. Thus, we already
+ * know that r15 contains a valid syscall number. No need to re-check.
+ */
+ adds r17=-1024,r15
+ movl r14=sys_call_table
+ ;;
+ shladd r18=r17,3,r14
+ ;;
+ ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point
+ mov r29=psr // read psr (12 cyc load latency)
+ mov r27=ar.rsc
+ mov r21=ar.fpsr
+ mov r26=ar.pfs
+END(fsys_fallback_syscall)
+ /* FALL THROUGH */
+GLOBAL_ENTRY(fsys_bubble_down)
+ .prologue
+ .altrp b6
+ .body
+ /*
+ * We get here for syscalls that don't have a lightweight handler. For those, we
+ * need to bubble down into the kernel and that requires setting up a minimal
+ * pt_regs structure, and initializing the CPU state more or less as if an
+ * interruption had occurred. To make syscall-restarts work, we setup pt_regs
+ * such that cr_iip points to the second instruction in syscall_via_break.
+ * Decrementing the IP hence will restart the syscall via break and not
+ * decrementing IP will return us to the caller, as usual. Note that we preserve
+ * the value of psr.pp rather than initializing it from dcr.pp. This makes it
+ * possible to distinguish fsyscall execution from other privileged execution.
+ *
+ * On entry:
+ * - normal fsyscall handler register usage, except that we also have:
+ * - r18: address of syscall entry point
+ * - r21: ar.fpsr
+ * - r26: ar.pfs
+ * - r27: ar.rsc
+ * - r29: psr
+ */
+# define PSR_PRESERVED_BITS (IA64_PSR_UP | IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_PK \
+ | IA64_PSR_DT | IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_RT \
+ | IA64_PSR_IC)
+ /*
+ * Reading psr.l gives us only bits 0-31, psr.it, and psr.mc. The rest we have
+ * to synthesize.
+ */
+# define PSR_ONE_BITS ((3 << IA64_PSR_CPL0_BIT) | (0x1 << IA64_PSR_RI_BIT) \
+ | IA64_PSR_BN)
+
+ invala
+ movl r8=PSR_ONE_BITS
+
+ mov r25=ar.unat // save ar.unat (5 cyc)
+ movl r9=PSR_PRESERVED_BITS
+
+ mov ar.rsc=0 // set enforced lazy mode, pl 0, little-endian, loadrs=0
+ movl r28=__kernel_syscall_via_break
+ ;;
+ mov r23=ar.bspstore // save ar.bspstore (12 cyc)
+ mov r31=pr // save pr (2 cyc)
+ mov r20=r1 // save caller's gp in r20
+ ;;
+ mov r2=r16 // copy current task addr to addl-addressable register
+ and r9=r9,r29
+ mov r19=b6 // save b6 (2 cyc)
+ ;;
+ mov psr.l=r9 // slam the door (17 cyc to srlz.i)
+ or r29=r8,r29 // construct cr.ipsr value to save
+ addl r22=IA64_RBS_OFFSET,r2 // compute base of RBS
+ ;;
+ mov.m r24=ar.rnat // read ar.rnat (5 cyc lat)
+ lfetch.fault.excl.nt1 [r22]
+ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r2
+
+ // ensure previous insn group is issued before we stall for srlz.i:
+ ;;
+ srlz.i // ensure new psr.l has been established
+ /////////////////////////////////////////////////////////////////////////////
+ ////////// from this point on, execution is not interruptible anymore
+ /////////////////////////////////////////////////////////////////////////////
+ addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // compute base of memory stack
+ cmp.ne pKStk,pUStk=r0,r0 // set pKStk <- 0, pUStk <- 1
+ ;;
+ st1 [r16]=r0 // clear current->thread.on_ustack flag
+ mov ar.bspstore=r22 // switch to kernel RBS
+ mov b6=r18 // copy syscall entry-point to b6 (7 cyc)
+ add r3=TI_FLAGS+IA64_TASK_SIZE,r2
+ ;;
+ ld4 r3=[r3] // r2 = current_thread_info()->flags
+ mov r18=ar.bsp // save (kernel) ar.bsp (12 cyc)
+ mov ar.rsc=0x3 // set eager mode, pl 0, little-endian, loadrs=0
+ br.call.sptk.many b7=ia64_syscall_setup
+ ;;
+ ssm psr.i
+ movl r2=ia64_ret_from_syscall
+ ;;
+ mov rp=r2 // set the real return addr
+ tbit.z p8,p0=r3,TIF_SYSCALL_TRACE
+
+(p8) br.call.sptk.many b6=b6 // ignore this return addr
+ br.cond.sptk ia64_trace_syscall
+END(fsys_bubble_down)
+
.rodata
.align 8
.globl fsyscall_table
+
+ data8 fsys_bubble_down
fsyscall_table:
data8 fsys_ni_syscall
- data8 fsys_fallback_syscall // exit // 1025
- data8 fsys_fallback_syscall // read
- data8 fsys_fallback_syscall // write
- data8 fsys_fallback_syscall // open
- data8 fsys_fallback_syscall // close
- data8 fsys_fallback_syscall // creat // 1030
- data8 fsys_fallback_syscall // link
- data8 fsys_fallback_syscall // unlink
- data8 fsys_fallback_syscall // execve
- data8 fsys_fallback_syscall // chdir
- data8 fsys_fallback_syscall // fchdir // 1035
- data8 fsys_fallback_syscall // utimes
- data8 fsys_fallback_syscall // mknod
- data8 fsys_fallback_syscall // chmod
- data8 fsys_fallback_syscall // chown
- data8 fsys_fallback_syscall // lseek // 1040
- data8 fsys_getpid
+ data8 0 // exit // 1025
+ data8 0 // read
+ data8 0 // write
+ data8 0 // open
+ data8 0 // close
+ data8 0 // creat // 1030
+ data8 0 // link
+ data8 0 // unlink
+ data8 0 // execve
+ data8 0 // chdir
+ data8 0 // fchdir // 1035
+ data8 0 // utimes
+ data8 0 // mknod
+ data8 0 // chmod
+ data8 0 // chown
+ data8 0 // lseek // 1040
+ data8 fsys_getpid // getpid
data8 fsys_getppid // getppid
- data8 fsys_fallback_syscall // mount
- data8 fsys_fallback_syscall // umount
- data8 fsys_fallback_syscall // setuid // 1045
- data8 fsys_fallback_syscall // getuid
- data8 fsys_fallback_syscall // geteuid
- data8 fsys_fallback_syscall // ptrace
- data8 fsys_fallback_syscall // access
- data8 fsys_fallback_syscall // sync // 1050
- data8 fsys_fallback_syscall // fsync
- data8 fsys_fallback_syscall // fdatasync
- data8 fsys_fallback_syscall // kill
- data8 fsys_fallback_syscall // rename
- data8 fsys_fallback_syscall // mkdir // 1055
- data8 fsys_fallback_syscall // rmdir
- data8 fsys_fallback_syscall // dup
- data8 fsys_fallback_syscall // pipe
- data8 fsys_fallback_syscall // times
- data8 fsys_fallback_syscall // brk // 1060
- data8 fsys_fallback_syscall // setgid
- data8 fsys_fallback_syscall // getgid
- data8 fsys_fallback_syscall // getegid
- data8 fsys_fallback_syscall // acct
- data8 fsys_fallback_syscall // ioctl // 1065
- data8 fsys_fallback_syscall // fcntl
- data8 fsys_fallback_syscall // umask
- data8 fsys_fallback_syscall // chroot
- data8 fsys_fallback_syscall // ustat
- data8 fsys_fallback_syscall // dup2 // 1070
- data8 fsys_fallback_syscall // setreuid
- data8 fsys_fallback_syscall // setregid
- data8 fsys_fallback_syscall // getresuid
- data8 fsys_fallback_syscall // setresuid
- data8 fsys_fallback_syscall // getresgid // 1075
- data8 fsys_fallback_syscall // setresgid
- data8 fsys_fallback_syscall // getgroups
- data8 fsys_fallback_syscall // setgroups
- data8 fsys_fallback_syscall // getpgid
- data8 fsys_fallback_syscall // setpgid // 1080
- data8 fsys_fallback_syscall // setsid
- data8 fsys_fallback_syscall // getsid
- data8 fsys_fallback_syscall // sethostname
- data8 fsys_fallback_syscall // setrlimit
- data8 fsys_fallback_syscall // getrlimit // 1085
- data8 fsys_fallback_syscall // getrusage
+ data8 0 // mount
+ data8 0 // umount
+ data8 0 // setuid // 1045
+ data8 0 // getuid
+ data8 0 // geteuid
+ data8 0 // ptrace
+ data8 0 // access
+ data8 0 // sync // 1050
+ data8 0 // fsync
+ data8 0 // fdatasync
+ data8 0 // kill
+ data8 0 // rename
+ data8 0 // mkdir // 1055
+ data8 0 // rmdir
+ data8 0 // dup
+ data8 0 // pipe
+ data8 0 // times
+ data8 0 // brk // 1060
+ data8 0 // setgid
+ data8 0 // getgid
+ data8 0 // getegid
+ data8 0 // acct
+ data8 0 // ioctl // 1065
+ data8 0 // fcntl
+ data8 0 // umask
+ data8 0 // chroot
+ data8 0 // ustat
+ data8 0 // dup2 // 1070
+ data8 0 // setreuid
+ data8 0 // setregid
+ data8 0 // getresuid
+ data8 0 // setresuid
+ data8 0 // getresgid // 1075
+ data8 0 // setresgid
+ data8 0 // getgroups
+ data8 0 // setgroups
+ data8 0 // getpgid
+ data8 0 // setpgid // 1080
+ data8 0 // setsid
+ data8 0 // getsid
+ data8 0 // sethostname
+ data8 0 // setrlimit
+ data8 0 // getrlimit // 1085
+ data8 0 // getrusage
data8 fsys_gettimeofday // gettimeofday
- data8 fsys_fallback_syscall // settimeofday
- data8 fsys_fallback_syscall // select
- data8 fsys_fallback_syscall // poll // 1090
- data8 fsys_fallback_syscall // symlink
- data8 fsys_fallback_syscall // readlink
- data8 fsys_fallback_syscall // uselib
- data8 fsys_fallback_syscall // swapon
- data8 fsys_fallback_syscall // swapoff // 1095
- data8 fsys_fallback_syscall // reboot
- data8 fsys_fallback_syscall // truncate
- data8 fsys_fallback_syscall // ftruncate
- data8 fsys_fallback_syscall // fchmod
- data8 fsys_fallback_syscall // fchown // 1100
- data8 fsys_fallback_syscall // getpriority
- data8 fsys_fallback_syscall // setpriority
- data8 fsys_fallback_syscall // statfs
- data8 fsys_fallback_syscall // fstatfs
- data8 fsys_fallback_syscall // gettid // 1105
- data8 fsys_fallback_syscall // semget
- data8 fsys_fallback_syscall // semop
- data8 fsys_fallback_syscall // semctl
- data8 fsys_fallback_syscall // msgget
- data8 fsys_fallback_syscall // msgsnd // 1110
- data8 fsys_fallback_syscall // msgrcv
- data8 fsys_fallback_syscall // msgctl
- data8 fsys_fallback_syscall // shmget
- data8 fsys_fallback_syscall // shmat
- data8 fsys_fallback_syscall // shmdt // 1115
- data8 fsys_fallback_syscall // shmctl
- data8 fsys_fallback_syscall // syslog
- data8 fsys_fallback_syscall // setitimer
- data8 fsys_fallback_syscall // getitimer
- data8 fsys_fallback_syscall // 1120
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall // vhangup
- data8 fsys_fallback_syscall // lchown
- data8 fsys_fallback_syscall // remap_file_pages // 1125
- data8 fsys_fallback_syscall // wait4
- data8 fsys_fallback_syscall // sysinfo
- data8 fsys_fallback_syscall // clone
- data8 fsys_fallback_syscall // setdomainname
- data8 fsys_fallback_syscall // newuname // 1130
- data8 fsys_fallback_syscall // adjtimex
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall // init_module
- data8 fsys_fallback_syscall // delete_module
- data8 fsys_fallback_syscall // 1135
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall // quotactl
- data8 fsys_fallback_syscall // bdflush
- data8 fsys_fallback_syscall // sysfs
- data8 fsys_fallback_syscall // personality // 1140
- data8 fsys_fallback_syscall // afs_syscall
- data8 fsys_fallback_syscall // setfsuid
- data8 fsys_fallback_syscall // setfsgid
- data8 fsys_fallback_syscall // getdents
- data8 fsys_fallback_syscall // flock // 1145
- data8 fsys_fallback_syscall // readv
- data8 fsys_fallback_syscall // writev
- data8 fsys_fallback_syscall // pread64
- data8 fsys_fallback_syscall // pwrite64
- data8 fsys_fallback_syscall // sysctl // 1150
- data8 fsys_fallback_syscall // mmap
- data8 fsys_fallback_syscall // munmap
- data8 fsys_fallback_syscall // mlock
- data8 fsys_fallback_syscall // mlockall
- data8 fsys_fallback_syscall // mprotect // 1155
- data8 fsys_fallback_syscall // mremap
- data8 fsys_fallback_syscall // msync
- data8 fsys_fallback_syscall // munlock
- data8 fsys_fallback_syscall // munlockall
- data8 fsys_fallback_syscall // sched_getparam // 1160
- data8 fsys_fallback_syscall // sched_setparam
- data8 fsys_fallback_syscall // sched_getscheduler
- data8 fsys_fallback_syscall // sched_setscheduler
- data8 fsys_fallback_syscall // sched_yield
- data8 fsys_fallback_syscall // sched_get_priority_max // 1165
- data8 fsys_fallback_syscall // sched_get_priority_min
- data8 fsys_fallback_syscall // sched_rr_get_interval
- data8 fsys_fallback_syscall // nanosleep
- data8 fsys_fallback_syscall // nfsservctl
- data8 fsys_fallback_syscall // prctl // 1170
- data8 fsys_fallback_syscall // getpagesize
- data8 fsys_fallback_syscall // mmap2
- data8 fsys_fallback_syscall // pciconfig_read
- data8 fsys_fallback_syscall // pciconfig_write
- data8 fsys_fallback_syscall // perfmonctl // 1175
- data8 fsys_fallback_syscall // sigaltstack
- data8 fsys_fallback_syscall // rt_sigaction
- data8 fsys_fallback_syscall // rt_sigpending
- data8 fsys_fallback_syscall // rt_sigprocmask
- data8 fsys_fallback_syscall // rt_sigqueueinfo // 1180
- data8 fsys_fallback_syscall // rt_sigreturn
- data8 fsys_fallback_syscall // rt_sigsuspend
- data8 fsys_fallback_syscall // rt_sigtimedwait
- data8 fsys_fallback_syscall // getcwd
- data8 fsys_fallback_syscall // capget // 1185
- data8 fsys_fallback_syscall // capset
- data8 fsys_fallback_syscall // sendfile
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall // socket // 1190
- data8 fsys_fallback_syscall // bind
- data8 fsys_fallback_syscall // connect
- data8 fsys_fallback_syscall // listen
- data8 fsys_fallback_syscall // accept
- data8 fsys_fallback_syscall // getsockname // 1195
- data8 fsys_fallback_syscall // getpeername
- data8 fsys_fallback_syscall // socketpair
- data8 fsys_fallback_syscall // send
- data8 fsys_fallback_syscall // sendto
- data8 fsys_fallback_syscall // recv // 1200
- data8 fsys_fallback_syscall // recvfrom
- data8 fsys_fallback_syscall // shutdown
- data8 fsys_fallback_syscall // setsockopt
- data8 fsys_fallback_syscall // getsockopt
- data8 fsys_fallback_syscall // sendmsg // 1205
- data8 fsys_fallback_syscall // recvmsg
- data8 fsys_fallback_syscall // pivot_root
- data8 fsys_fallback_syscall // mincore
- data8 fsys_fallback_syscall // madvise
- data8 fsys_fallback_syscall // newstat // 1210
- data8 fsys_fallback_syscall // newlstat
- data8 fsys_fallback_syscall // newfstat
- data8 fsys_fallback_syscall // clone2
- data8 fsys_fallback_syscall // getdents64
- data8 fsys_fallback_syscall // getunwind // 1215
- data8 fsys_fallback_syscall // readahead
- data8 fsys_fallback_syscall // setxattr
- data8 fsys_fallback_syscall // lsetxattr
- data8 fsys_fallback_syscall // fsetxattr
- data8 fsys_fallback_syscall // getxattr // 1220
- data8 fsys_fallback_syscall // lgetxattr
- data8 fsys_fallback_syscall // fgetxattr
- data8 fsys_fallback_syscall // listxattr
- data8 fsys_fallback_syscall // llistxattr
- data8 fsys_fallback_syscall // flistxattr // 1225
- data8 fsys_fallback_syscall // removexattr
- data8 fsys_fallback_syscall // lremovexattr
- data8 fsys_fallback_syscall // fremovexattr
- data8 fsys_fallback_syscall // tkill
- data8 fsys_fallback_syscall // futex // 1230
- data8 fsys_fallback_syscall // sched_setaffinity
- data8 fsys_fallback_syscall // sched_getaffinity
+ data8 0 // settimeofday
+ data8 0 // select
+ data8 0 // poll // 1090
+ data8 0 // symlink
+ data8 0 // readlink
+ data8 0 // uselib
+ data8 0 // swapon
+ data8 0 // swapoff // 1095
+ data8 0 // reboot
+ data8 0 // truncate
+ data8 0 // ftruncate
+ data8 0 // fchmod
+ data8 0 // fchown // 1100
+ data8 0 // getpriority
+ data8 0 // setpriority
+ data8 0 // statfs
+ data8 0 // fstatfs
+ data8 0 // gettid // 1105
+ data8 0 // semget
+ data8 0 // semop
+ data8 0 // semctl
+ data8 0 // msgget
+ data8 0 // msgsnd // 1110
+ data8 0 // msgrcv
+ data8 0 // msgctl
+ data8 0 // shmget
+ data8 0 // shmat
+ data8 0 // shmdt // 1115
+ data8 0 // shmctl
+ data8 0 // syslog
+ data8 0 // setitimer
+ data8 0 // getitimer
+ data8 0 // 1120
+ data8 0
+ data8 0
+ data8 0 // vhangup
+ data8 0 // lchown
+ data8 0 // remap_file_pages // 1125
+ data8 0 // wait4
+ data8 0 // sysinfo
+ data8 0 // clone
+ data8 0 // setdomainname
+ data8 0 // newuname // 1130
+ data8 0 // adjtimex
+ data8 0
+ data8 0 // init_module
+ data8 0 // delete_module
+ data8 0 // 1135
+ data8 0
+ data8 0 // quotactl
+ data8 0 // bdflush
+ data8 0 // sysfs
+ data8 0 // personality // 1140
+ data8 0 // afs_syscall
+ data8 0 // setfsuid
+ data8 0 // setfsgid
+ data8 0 // getdents
+ data8 0 // flock // 1145
+ data8 0 // readv
+ data8 0 // writev
+ data8 0 // pread64
+ data8 0 // pwrite64
+ data8 0 // sysctl // 1150
+ data8 0 // mmap
+ data8 0 // munmap
+ data8 0 // mlock
+ data8 0 // mlockall
+ data8 0 // mprotect // 1155
+ data8 0 // mremap
+ data8 0 // msync
+ data8 0 // munlock
+ data8 0 // munlockall
+ data8 0 // sched_getparam // 1160
+ data8 0 // sched_setparam
+ data8 0 // sched_getscheduler
+ data8 0 // sched_setscheduler
+ data8 0 // sched_yield
+ data8 0 // sched_get_priority_max // 1165
+ data8 0 // sched_get_priority_min
+ data8 0 // sched_rr_get_interval
+ data8 0 // nanosleep
+ data8 0 // nfsservctl
+ data8 0 // prctl // 1170
+ data8 0 // getpagesize
+ data8 0 // mmap2
+ data8 0 // pciconfig_read
+ data8 0 // pciconfig_write
+ data8 0 // perfmonctl // 1175
+ data8 0 // sigaltstack
+ data8 0 // rt_sigaction
+ data8 0 // rt_sigpending
+ data8 0 // rt_sigprocmask
+ data8 0 // rt_sigqueueinfo // 1180
+ data8 0 // rt_sigreturn
+ data8 0 // rt_sigsuspend
+ data8 0 // rt_sigtimedwait
+ data8 0 // getcwd
+ data8 0 // capget // 1185
+ data8 0 // capset
+ data8 0 // sendfile
+ data8 0
+ data8 0
+ data8 0 // socket // 1190
+ data8 0 // bind
+ data8 0 // connect
+ data8 0 // listen
+ data8 0 // accept
+ data8 0 // getsockname // 1195
+ data8 0 // getpeername
+ data8 0 // socketpair
+ data8 0 // send
+ data8 0 // sendto
+ data8 0 // recv // 1200
+ data8 0 // recvfrom
+ data8 0 // shutdown
+ data8 0 // setsockopt
+ data8 0 // getsockopt
+ data8 0 // sendmsg // 1205
+ data8 0 // recvmsg
+ data8 0 // pivot_root
+ data8 0 // mincore
+ data8 0 // madvise
+ data8 0 // newstat // 1210
+ data8 0 // newlstat
+ data8 0 // newfstat
+ data8 0 // clone2
+ data8 0 // getdents64
+ data8 0 // getunwind // 1215
+ data8 0 // readahead
+ data8 0 // setxattr
+ data8 0 // lsetxattr
+ data8 0 // fsetxattr
+ data8 0 // getxattr // 1220
+ data8 0 // lgetxattr
+ data8 0 // fgetxattr
+ data8 0 // listxattr
+ data8 0 // llistxattr
+ data8 0 // flistxattr // 1225
+ data8 0 // removexattr
+ data8 0 // lremovexattr
+ data8 0 // fremovexattr
+ data8 0 // tkill
+ data8 0 // futex // 1230
+ data8 0 // sched_setaffinity
+ data8 0 // sched_getaffinity
data8 fsys_set_tid_address // set_tid_address
- data8 fsys_fallback_syscall // unused
- data8 fsys_fallback_syscall // unused // 1235
- data8 fsys_fallback_syscall // exit_group
- data8 fsys_fallback_syscall // lookup_dcookie
- data8 fsys_fallback_syscall // io_setup
- data8 fsys_fallback_syscall // io_destroy
- data8 fsys_fallback_syscall // io_getevents // 1240
- data8 fsys_fallback_syscall // io_submit
- data8 fsys_fallback_syscall // io_cancel
- data8 fsys_fallback_syscall // epoll_create
- data8 fsys_fallback_syscall // epoll_ctl
- data8 fsys_fallback_syscall // epoll_wait // 1245
- data8 fsys_fallback_syscall // restart_syscall
- data8 fsys_fallback_syscall // semtimedop
- data8 fsys_fallback_syscall // timer_create
- data8 fsys_fallback_syscall // timer_settime
- data8 fsys_fallback_syscall // timer_gettime // 1250
- data8 fsys_fallback_syscall // timer_getoverrun
- data8 fsys_fallback_syscall // timer_delete
- data8 fsys_fallback_syscall // clock_settime
- data8 fsys_fallback_syscall // clock_gettime
- data8 fsys_fallback_syscall // clock_getres // 1255
- data8 fsys_fallback_syscall // clock_nanosleep
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall // 1260
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall // 1265
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall // 1270
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall // 1275
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
- data8 fsys_fallback_syscall
+ data8 0 // unused
+ data8 0 // unused // 1235
+ data8 0 // exit_group
+ data8 0 // lookup_dcookie
+ data8 0 // io_setup
+ data8 0 // io_destroy
+ data8 0 // io_getevents // 1240
+ data8 0 // io_submit
+ data8 0 // io_cancel
+ data8 0 // epoll_create
+ data8 0 // epoll_ctl
+ data8 0 // epoll_wait // 1245
+ data8 0 // restart_syscall
+ data8 0 // semtimedop
+ data8 0 // timer_create
+ data8 0 // timer_settime
+ data8 0 // timer_gettime // 1250
+ data8 0 // timer_getoverrun
+ data8 0 // timer_delete
+ data8 0 // clock_settime
+ data8 0 // clock_gettime
+ data8 0 // clock_getres // 1255
+ data8 0 // clock_nanosleep
+ data8 0
+ data8 0
+ data8 0
+ data8 0 // 1260
+ data8 0
+ data8 0
+ data8 0
+ data8 0
+ data8 0 // 1265
+ data8 0
+ data8 0
+ data8 0
+ data8 0
+ data8 0 // 1270
+ data8 0
+ data8 0
+ data8 0
+ data8 0
+ data8 0 // 1275
+ data8 0
+ data8 0
+ data8 0
+ data8 0
+
+ .org fsyscall_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
--- /dev/null
+ .section .data.gate, "ax"
+
+ .incbin "arch/ia64/kernel/gate.so"
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <linux/config.h>
+
#include <asm/asmmacro.h>
+#include <asm/errno.h>
#include <asm/offsets.h>
#include <asm/sigcontext.h>
#include <asm/system.h>
#include <asm/unistd.h>
-#include <asm/page.h>
-
- .section .text.gate, "ax"
-.start_gate:
+/*
+ * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation,
+ * complications with the linker (which likes to create PLT stubs for branches
+ * to targets outside the shared object) and to avoid multi-phase kernel builds, we
+ * simply create minimalistic "patch lists" in special ELF sections.
+ */
+ .section ".data.patch.fsyscall_table", "a"
+ .previous
+#define LOAD_FSYSCALL_TABLE(reg) \
+[1:] movl reg=0; \
+ .xdata4 ".data.patch.fsyscall_table", 1b-.
-#ifdef CONFIG_FSYS
+ .section ".data.patch.brl_fsys_bubble_down", "a"
+ .previous
+#define BRL_COND_FSYS_BUBBLE_DOWN(pr) \
+[1:](pr)brl.cond.sptk 0; \
+ .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-.
-#include <asm/errno.h>
+GLOBAL_ENTRY(__kernel_syscall_via_break)
+ .prologue
+ .altrp b6
+ .body
+ /*
+ * Note: for (fast) syscall restart to work, the break instruction must be
+ * the first one in the bundle addressed by syscall_via_break.
+ */
+{ .mib
+ break 0x100000
+ nop.i 0
+ br.ret.sptk.many b6
+}
+END(__kernel_syscall_via_break)
/*
* On entry:
* all other "scratch" registers: undefined
* all "preserved" registers: same as on entry
*/
-GLOBAL_ENTRY(syscall_via_epc)
+
+GLOBAL_ENTRY(__kernel_syscall_via_epc)
.prologue
.altrp b6
.body
epc
}
;;
- rsm psr.be
- movl r18=fsyscall_table
+ rsm psr.be // note: on McKinley "rsm psr.be/srlz.d" is slightly faster than "rum psr.be"
+ LOAD_FSYSCALL_TABLE(r14)
- mov r16=IA64_KR(CURRENT)
- mov r19=255
+ mov r16=IA64_KR(CURRENT) // 12 cycle read latency
+ mov r19=NR_syscalls-1
;;
- shladd r18=r17,3,r18
- cmp.geu p6,p0=r19,r17 // (syscall > 0 && syscall <= 1024+255)?
+ shladd r18=r17,3,r14
+
+ srlz.d
+ cmp.ne p8,p0=r0,r0 // p8 <- FALSE
+ /* Note: if r17 is a NaT, p6 will be set to zero. */
+ cmp.geu p6,p7=r19,r17 // (syscall > 0 && syscall < 1024+NR_syscalls)?
;;
- srlz.d // ensure little-endian byteorder is in effect
(p6) ld8 r18=[r18]
+ mov r29=psr // read psr (12 cyc load latency)
+ add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
;;
(p6) mov b7=r18
+(p6) tbit.z p8,p0=r18,0
+(p8) br.dptk.many b7
+
+ mov r27=ar.rsc
+ mov r21=ar.fpsr
+ mov r26=ar.pfs
+/*
+ * brl.cond doesn't work as intended because the linker would convert this branch
+ * into a branch to a PLT. Perhaps there will be a way to avoid this with some
+ * future version of the linker. In the meantime, we just use an indirect branch
+ * instead.
+ */
+#ifdef CONFIG_ITANIUM
+(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
+ ;;
+(p6) mov b7=r14
(p6) br.sptk.many b7
+#else
+ BRL_COND_FSYS_BUBBLE_DOWN(p6)
+#endif
mov r10=-1
mov r8=ENOSYS
MCKINLEY_E9_WORKAROUND
br.ret.sptk.many b6
-END(syscall_via_epc)
-
-GLOBAL_ENTRY(syscall_via_break)
- .prologue
- .altrp b6
- .body
- break 0x100000
- br.ret.sptk.many b6
-END(syscall_via_break)
-
-GLOBAL_ENTRY(fsys_fallback_syscall)
- /*
- * It would be better/fsyser to do the SAVE_MIN magic directly here, but for now
- * we simply fall back on doing a system-call via break. Good enough
- * to get started. (Note: we have to do this through the gate page again, since
- * the br.ret will switch us back to user-level privilege.)
- *
- * XXX Move this back to fsys.S after changing it over to avoid break 0x100000.
- */
- movl r2=(syscall_via_break - .start_gate) + GATE_ADDR
- ;;
- MCKINLEY_E9_WORKAROUND
- mov b7=r2
- br.ret.sptk.many b7
-END(fsys_fallback_syscall)
-
-#endif /* CONFIG_FSYS */
+END(__kernel_syscall_via_epc)
# define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET)
# define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET)
*/
#define SIGTRAMP_SAVES \
- .unwabi @svr4, 's'; /* mark this as a sigtramp handler (saves scratch regs) */ \
+ .unwabi 3, 's'; /* mark this as a sigtramp handler (saves scratch regs) */ \
+ .unwabi @svr4, 's'; /* backwards compatibility with old unwinders (remove in v2.7) */ \
.savesp ar.unat, UNAT_OFF+SIGCONTEXT_OFF; \
.savesp ar.fpsr, FPSR_OFF+SIGCONTEXT_OFF; \
.savesp pr, PR_OFF+SIGCONTEXT_OFF; \
.savesp ar.pfs, CFM_OFF+SIGCONTEXT_OFF; \
.vframesp SP_OFF+SIGCONTEXT_OFF
-GLOBAL_ENTRY(ia64_sigtramp)
+GLOBAL_ENTRY(__kernel_sigtramp)
// describe the state that is active when we get here:
.prologue
SIGTRAMP_SAVES
mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc)
// invala not necessary as that will happen when returning to user-mode
br.cond.sptk back_from_restore_rbs
-END(ia64_sigtramp)
+END(__kernel_sigtramp)
--- /dev/null
+/*
+ * Linker script for gate DSO. The gate pages are an ELF shared object prelinked to its
+ * virtual address, with only one read-only segment and one execute-only segment (both fit
+ * in one page). This script controls its layout.
+ */
+
+#include <linux/config.h>
+
+#include <asm/system.h>
+
+SECTIONS
+{
+ . = GATE_ADDR + SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :readable
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .dynamic : { *(.dynamic) } :readable :dynamic
+
+ /*
+ * This linker script is used both with -r and with -shared. For the layouts to match,
+ * we need to skip more than enough space for the dynamic symbol table et al. If this
+ * amount is insufficient, ld -shared will barf. Just increase it here.
+ */
+ . = GATE_ADDR + 0x500;
+
+ .data.patch : {
+ __start_gate_mckinley_e9_patchlist = .;
+ *(.data.patch.mckinley_e9)
+ __end_gate_mckinley_e9_patchlist = .;
+
+ __start_gate_vtop_patchlist = .;
+ *(.data.patch.vtop)
+ __end_gate_vtop_patchlist = .;
+
+ __start_gate_fsyscall_patchlist = .;
+ *(.data.patch.fsyscall_table)
+ __end_gate_fsyscall_patchlist = .;
+
+ __start_gate_brl_fsys_bubble_down_patchlist = .;
+ *(.data.patch.brl_fsys_bubble_down)
+ __end_gate_brl_fsys_bubble_down_patchlist = .;
+ } :readable
+ .IA_64.unwind_info : { *(.IA_64.unwind_info*) }
+ .IA_64.unwind : { *(.IA_64.unwind*) } :readable :unwind
+#ifdef HAVE_BUGGY_SEGREL
+ .text (GATE_ADDR + PAGE_SIZE) : { *(.text) *(.text.*) } :readable
+#else
+ . = ALIGN (PERCPU_PAGE_SIZE) + (. & (PERCPU_PAGE_SIZE - 1));
+ .text : { *(.text) *(.text.*) } :epc
+#endif
+
+ /DISCARD/ : {
+ *(.got.plt) *(.got)
+ *(.data .data.* .gnu.linkonce.d.*)
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(__ex_table)
+ }
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ readable PT_LOAD FILEHDR PHDRS FLAGS(4); /* PF_R */
+#ifndef HAVE_BUGGY_SEGREL
+ epc PT_LOAD FILEHDR PHDRS FLAGS(1); /* PF_X */
+#endif
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+ LINUX_2.5 {
+ global:
+ __kernel_syscall_via_break;
+ __kernel_syscall_via_epc;
+ __kernel_sigtramp;
+
+ local: *;
+ };
+}
+
+/* The ELF entry point can be used to set the AT_SYSINFO value. */
+ENTRY(__kernel_syscall_via_epc)
mov r4=r0
.body
- /*
- * Initialize the region register for region 7 and install a translation register
- * that maps the kernel's text and data:
- */
rsm psr.i | psr.ic
- mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, PAGE_OFFSET) << 8) | (IA64_GRANULE_SHIFT << 2))
;;
srlz.i
+ ;;
+ /*
+ * Initialize kernel region registers:
+ * rr[5]: VHPT enabled, page size = PAGE_SHIFT
+ * rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT
+ * rr[5]: VHPT disabled, page size = IA64_GRANULE_SHIFT
+ */
+ mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
+ movl r17=(5<<61)
+ mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
+ movl r19=(6<<61)
+ mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2))
+ movl r21=(7<<61)
+ ;;
+ mov rr[r17]=r16
+ mov rr[r19]=r18
+ mov rr[r21]=r20
+ ;;
+ /*
+ * Now pin mappings into the TLB for kernel text and data
+ */
mov r18=KERNEL_TR_PAGE_SHIFT<<2
movl r17=KERNEL_START
;;
- mov rr[r17]=r16
mov cr.itir=r18
mov cr.ifa=r17
mov r16=IA64_TR_KERNEL
- movl r18=((1 << KERNEL_TR_PAGE_SHIFT) | PAGE_KERNEL)
+ mov r3=ip
+ movl r18=PAGE_KERNEL
+ ;;
+ dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
+ ;;
+ or r18=r2,r18
;;
srlz.i
;;
mov ar.fpsr=r2
;;
-#ifdef CONFIG_IA64_EARLY_PRINTK
- mov r3=(6<<8) | (IA64_GRANULE_SHIFT<<2)
- movl r2=6<<61
- ;;
- mov rr[r2]=r3
- ;;
- srlz.i
- ;;
-#endif
-
#define isAP p2 // are we an Application Processor?
#define isBP p3 // are we the Bootstrap Processor?
movl r2=init_thread_union
cmp.eq isBP,isAP=r0,r0
#endif
- mov r16=KERNEL_TR_PAGE_NUM
;;
+ tpa r3=r2 // r3 == phys addr of task struct
+ // load mapping for stack (virtaddr in r2, physaddr in r3)
+ rsm psr.ic
+ movl r17=PAGE_KERNEL
+ ;;
+ srlz.d
+ dep r18=0,r3,0,12
+ ;;
+ or r18=r17,r18
+ dep r2=-1,r3,61,3 // IMVA of task
+ ;;
+ mov r17=rr[r2]
+ shr.u r16=r3,IA64_GRANULE_SHIFT
+ ;;
+ dep r17=0,r17,8,24
+ ;;
+ mov cr.itir=r17
+ mov cr.ifa=r2
+
+ mov r19=IA64_TR_CURRENT_STACK
+ ;;
+ itr.d dtr[r19]=r18
+ ;;
+ ssm psr.ic
+ srlz.d
+ ;;
// load the "current" pointer (r13) and ar.k6 with the current task
mov IA64_KR(CURRENT)=r2 // virtual address
- // initialize k4 to a safe value (64-128MB is mapped by TR_KERNEL)
mov IA64_KR(CURRENT_STACK)=r16
mov r13=r2
/*
END(__ia64_init_fpu)
/*
- * Switch execution mode from virtual to physical or vice versa.
+ * Switch execution mode from virtual to physical
*
* Inputs:
* r16 = new psr to establish
*
* Note: RSE must already be in enforced lazy mode
*/
-GLOBAL_ENTRY(ia64_switch_mode)
+GLOBAL_ENTRY(ia64_switch_mode_phys)
{
alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
{
flushrs // must be first insn in group
srlz.i
- shr.u r19=r15,61 // r19 <- top 3 bits of current IP
}
;;
mov cr.ipsr=r16 // set new PSR
- add r3=1f-ia64_switch_mode,r15
- xor r15=0x7,r19 // flip the region bits
+ add r3=1f-ia64_switch_mode_phys,r15
mov r17=ar.bsp
mov r14=rp // get return address into a general register
+ ;;
- // switch RSE backing store:
+ // going to physical mode, use tpa to translate virt->phys
+ tpa r17=r17
+ tpa r3=r3
+ tpa sp=sp
+ tpa r14=r14
;;
- dep r17=r15,r17,61,3 // make ar.bsp physical or virtual
+
mov r18=ar.rnat // save ar.rnat
- ;;
mov ar.bspstore=r17 // this steps on ar.rnat
- dep r3=r15,r3,61,3 // make rfi return address physical or virtual
+ mov cr.iip=r3
+ mov cr.ifs=r0
;;
+ mov ar.rnat=r18 // restore ar.rnat
+ rfi // must be last insn in group
+ ;;
+1: mov rp=r14
+ br.ret.sptk.many rp
+END(ia64_switch_mode_phys)
+
+/*
+ * Switch execution mode from physical to virtual
+ *
+ * Inputs:
+ * r16 = new psr to establish
+ *
+ * Note: RSE must already be in enforced lazy mode
+ */
+GLOBAL_ENTRY(ia64_switch_mode_virt)
+ {
+ alloc r2=ar.pfs,0,0,0,0
+ rsm psr.i | psr.ic // disable interrupts and interrupt collection
+ mov r15=ip
+ }
+ ;;
+ {
+ flushrs // must be first insn in group
+ srlz.i
+ }
+ ;;
+ mov cr.ipsr=r16 // set new PSR
+ add r3=1f-ia64_switch_mode_virt,r15
+
+ mov r17=ar.bsp
+ mov r14=rp // get return address into a general register
+ ;;
+
+ // going to virtual
+ // - for code addresses, set upper bits of addr to KERNEL_START
+ // - for stack addresses, set upper 3 bits to 0xe.... Dont change any of the
+ // lower bits since we want it to stay identity mapped
+ movl r18=KERNEL_START
+ dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
+ dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
+ dep r17=-1,r17,61,3
+ dep sp=-1,sp,61,3
+ ;;
+ or r3=r3,r18
+ or r14=r14,r18
+ ;;
+
+ mov r18=ar.rnat // save ar.rnat
+ mov ar.bspstore=r17 // this steps on ar.rnat
mov cr.iip=r3
mov cr.ifs=r0
- dep sp=r15,sp,61,3 // make stack pointer physical or virtual
;;
mov ar.rnat=r18 // restore ar.rnat
- dep r14=r15,r14,61,3 // make function return address physical or virtual
rfi // must be last insn in group
;;
1: mov rp=r14
br.ret.sptk.many rp
-END(ia64_switch_mode)
+END(ia64_switch_mode_virt)
#ifdef CONFIG_IA64_BRL_EMU
* r29 - available for use.
* r30 - available for use.
* r31 - address of lock, available for use.
- * b7 - return address
+ * b6 - return address
* p14 - available for use.
*
* If you patch this code to use more registers, do not forget to update
#include <asm/processor.h>
EXPORT_SYMBOL(cpu_info__per_cpu);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(__per_cpu_offset);
+#endif
EXPORT_SYMBOL(kernel_thread);
#include <asm/system.h>
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_call_function_single);
EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL(phys_cpu_present_map);
EXPORT_SYMBOL(ia64_cpu_to_sapicid);
#else /* !CONFIG_SMP */
EXPORT_SYMBOL_NOVERS(__moddi3);
EXPORT_SYMBOL_NOVERS(__umoddi3);
+#if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE)
+extern void xor_ia64_2(void);
+extern void xor_ia64_3(void);
+extern void xor_ia64_4(void);
+extern void xor_ia64_5(void);
+
+EXPORT_SYMBOL_NOVERS(xor_ia64_2);
+EXPORT_SYMBOL_NOVERS(xor_ia64_3);
+EXPORT_SYMBOL_NOVERS(xor_ia64_4);
+EXPORT_SYMBOL_NOVERS(xor_ia64_5);
+#endif
+
extern unsigned long ia64_iobase;
EXPORT_SYMBOL(ia64_iobase);
extern struct proc_dir_entry *efi_dir;
EXPORT_SYMBOL(efi_dir);
-#include <linux/pm.h>
-EXPORT_SYMBOL(pm_idle);
-EXPORT_SYMBOL(pm_power_off);
-
#include <asm/machvec.h>
#ifdef CONFIG_IA64_GENERIC
EXPORT_SYMBOL(ia64_mv);
#endif
EXPORT_SYMBOL(machvec_noop);
+EXPORT_SYMBOL(machvec_memory_fence);
+EXPORT_SYMBOL(zero_page_memmap_ptr);
#ifdef CONFIG_PERFMON
#include <asm/perfmon.h>
-EXPORT_SYMBOL(pfm_install_alternate_syswide_subsystem);
-EXPORT_SYMBOL(pfm_remove_alternate_syswide_subsystem);
+EXPORT_SYMBOL(pfm_register_buffer_fmt);
+EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
+EXPORT_SYMBOL(pfm_mod_fast_read_pmds);
+EXPORT_SYMBOL(pfm_mod_read_pmds);
+EXPORT_SYMBOL(pfm_mod_write_pmcs);
#endif
#ifdef CONFIG_NUMA
EXPORT_SYMBOL(unw_access_ar);
EXPORT_SYMBOL(unw_access_pr);
-#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
-extern void ia64_spinlock_contention_pre3_4 (void);
+#ifdef CONFIG_SMP
+# if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
+/*
+ * This is not a normal routine and we don't want a function descriptor for it, so we use
+ * a fake declaration here.
+ */
+extern char ia64_spinlock_contention_pre3_4;
EXPORT_SYMBOL(ia64_spinlock_contention_pre3_4);
-#else
-extern void ia64_spinlock_contention (void);
+# else
+/*
+ * This is not a normal routine and we don't want a function descriptor for it, so we use
+ * a fake declaration here.
+ */
+extern char ia64_spinlock_contention;
EXPORT_SYMBOL(ia64_spinlock_contention);
+# endif
#endif
+
+EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
+
+#include <linux/pm.h>
+EXPORT_SYMBOL(pm_idle);
unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
} init_thread_union __attribute__((section(".data.init_task"))) = {{
.task = INIT_TASK(init_thread_union.s.task),
- .thread_info = INIT_THREAD_INFO(init_thread_union.s.thread_info)
+ .thread_info = INIT_THREAD_INFO(init_thread_union.s.task)
}};
asm (".global init_task; init_task = init_thread_union");
/*
* Controller mappings for all interrupt sources:
*/
-irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+irq_desc_t _irq_desc[NR_IRQS] __cacheline_aligned = {
[0 ... NR_IRQS-1] = {
.status = IRQ_DISABLED,
.handler = &no_irq_type,
{
int status = 1; /* Force the "do bottom halves" bit */
int retval = 0;
- struct irqaction *first_action = action;
if (!(action->flags & SA_INTERRUPT))
local_irq_enable();
if (status & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
local_irq_disable();
- if (retval != 1) {
- static int count = 100;
- if (count) {
- count--;
- if (retval) {
- printk("irq event %d: bogus retval mask %x\n",
- irq, retval);
- } else {
- printk("irq %d: nobody cared!\n", irq);
- }
- dump_stack();
- printk("handlers:\n");
- action = first_action;
- do {
- printk("[<%p>]", action->handler);
- print_symbol(" (%s)",
- (unsigned long)action->handler);
- printk("\n");
- action = action->next;
- } while (action);
- }
+ return retval;
+}
+
+static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+ struct irqaction *action;
+
+ if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
+ printk(KERN_ERR "irq event %d: bogus return value %x\n",
+ irq, action_ret);
+ } else {
+ printk(KERN_ERR "irq %d: nobody cared!\n", irq);
+ }
+ dump_stack();
+ printk(KERN_ERR "handlers:\n");
+ action = desc->action;
+ do {
+ printk(KERN_ERR "[<%p>]", action->handler);
+ print_symbol(" (%s)",
+ (unsigned long)action->handler);
+ printk("\n");
+ action = action->next;
+ } while (action);
+}
+
+static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+ static int count = 100;
+
+ if (count) {
+ count--;
+ __report_bad_irq(irq, desc, action_ret);
+ }
+}
+
+static int noirqdebug;
+
+static int __init noirqdebug_setup(char *str)
+{
+ noirqdebug = 1;
+ printk("IRQ lockup detection disabled\n");
+ return 1;
+}
+
+__setup("noirqdebug", noirqdebug_setup);
+
+/*
+ * If 99,900 of the previous 100,000 interrupts have not been handled then
+ * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
+ * turn the IRQ off.
+ *
+ * (The other 100-of-100,000 interrupts may have been a correctly-functioning
+ * device sharing an IRQ with the failing one)
+ *
+ * Called under desc->lock
+ */
+static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+ if (action_ret != IRQ_HANDLED) {
+ desc->irqs_unhandled++;
+ if (action_ret != IRQ_NONE)
+ report_bad_irq(irq, desc, action_ret);
}
- return status;
+ desc->irq_count++;
+ if (desc->irq_count < 100000)
+ return;
+
+ desc->irq_count = 0;
+ if (desc->irqs_unhandled > 99900) {
+ /*
+ * The interrupt is stuck
+ */
+ __report_bad_irq(irq, desc, action_ret);
+ /*
+ * Now kill the IRQ
+ */
+ printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
+ desc->status |= IRQ_DISABLED;
+ desc->handler->disable(irq);
+ }
+ desc->irqs_unhandled = 0;
}
/*
* 0 return value means that this irq is already being
* handled by some other CPU. (or is disabled)
*/
- int cpu;
irq_desc_t *desc = irq_desc(irq);
struct irqaction * action;
+ irqreturn_t action_ret;
unsigned int status;
+ int cpu;
irq_enter();
- cpu = smp_processor_id();
+ cpu = smp_processor_id(); /* for CONFIG_PREEMPT, this must come after irq_enter()! */
kstat_cpu(cpu).irqs[irq]++;
if (desc->status & IRQ_PER_CPU) {
/* no locking required for CPU-local interrupts: */
desc->handler->ack(irq);
- handle_IRQ_event(irq, regs, desc->action);
+ action_ret = handle_IRQ_event(irq, regs, desc->action);
desc->handler->end(irq);
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret);
} else {
spin_lock(&desc->lock);
desc->handler->ack(irq);
*/
for (;;) {
spin_unlock(&desc->lock);
- handle_IRQ_event(irq, regs, action);
+ action_ret = handle_IRQ_event(irq, regs, action);
spin_lock(&desc->lock);
-
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret);
if (!(desc->status & IRQ_PENDING))
break;
desc->status &= ~IRQ_PENDING;
/*
* arch/ia64/kernel/ivt.S
*
- * Copyright (C) 1998-2001 Hewlett-Packard Co
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger <davidm@hpl.hp.com>
+ * Copyright (C) 2000, 2002-2003 Intel Co
+ * Asit Mallick <asit.k.mallick@intel.com>
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ * Kenneth Chen <kenneth.w.chen@intel.com>
+ * Fenghua Yu <fenghua.yu@intel.com>
*
* 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
* 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
;;
(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
- srlz.d // ensure "rsm psr.dt" has taken effect
-(p6) movl r19=__pa(swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
+
+ srlz.d
+ LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
+
+ .pred.rel "mutex", p6, p7
(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
;;
shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
;;
(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
+
srlz.d
-(p6) movl r19=__pa(swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
+ LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
+
+ .pred.rel "mutex", p6, p7
(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
;;
/////////////////////////////////////////////////////////////////////////////////////////
// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
ENTRY(break_fault)
+ /*
+ * The streamlined system call entry/exit paths only save/restore the initial part
+ * of pt_regs. This implies that the callers of system-calls must adhere to the
+ * normal procedure calling conventions.
+ *
+ * Registers to be saved & restored:
+ * CR registers: cr.ipsr, cr.iip, cr.ifs
+ * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
+ * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
+ * Registers to be restored only:
+ * r8-r11: output value from the system call.
+ *
+ * During system call exit, scratch registers (including r15) are modified/cleared
+ * to prevent leaking bits from kernel to user level.
+ */
DBG_FAULT(11)
- mov r16=cr.iim
- mov r17=__IA64_BREAK_SYSCALL
- mov r31=pr // prepare to save predicates
- ;;
- cmp.eq p0,p7=r16,r17 // is this a system call? (p7 <- false, if so)
+ mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat.
+ mov r17=cr.iim
+ mov r18=__IA64_BREAK_SYSCALL
+ mov r21=ar.fpsr
+ mov r29=cr.ipsr
+ mov r19=b6
+ mov r25=ar.unat
+ mov r27=ar.rsc
+ mov r26=ar.pfs
+ mov r28=cr.iip
+ mov r31=pr // prepare to save predicates
+ mov r20=r1
+ ;;
+ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
+ cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so)
(p7) br.cond.spnt non_syscall
+ ;;
+ ld1 r17=[r16] // load current->thread.on_ustack flag
+ st1 [r16]=r0 // clear current->thread.on_ustack flag
+ add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT
+ ;;
+ invala
- SAVE_MIN // uses r31; defines r2:
+ /* adjust return address so we skip over the break instruction: */
- ssm psr.ic | PSR_DEFAULT_BITS
+ extr.u r8=r29,41,2 // extract ei field from cr.ipsr
;;
- srlz.i // guarantee that interruption collection is on
- cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
+ cmp.eq p6,p7=2,r8 // isr.ei==2?
+ mov r2=r1 // setup r2 for ia64_syscall_setup
;;
-(p15) ssm psr.i // restore psr.i
- adds r8=(IA64_PT_REGS_R8_OFFSET-IA64_PT_REGS_R16_OFFSET),r2
+(p6) mov r8=0 // clear ei to 0
+(p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped
+(p7) adds r8=1,r8 // increment ei to next slot
;;
- stf8 [r8]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
- adds r3=8,r2 // set up second base pointer for SAVE_REST
+ cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
+ dep r29=r8,r29,41,2 // insert new ei into cr.ipsr
;;
- SAVE_REST
- br.call.sptk.many rp=demine_args // clear NaT bits in (potential) syscall args
- mov r3=255
- adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
+ // switch from user to kernel RBS:
+ MINSTATE_START_SAVE_MIN_VIRT
+ br.call.sptk.many b7=ia64_syscall_setup
;;
- cmp.geu p6,p7=r3,r15 // (syscall > 0 && syscall <= 1024+255) ?
- movl r16=sys_call_table
+ MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1
+ ssm psr.ic | PSR_DEFAULT_BITS
;;
-(p6) shladd r16=r15,3,r16
- movl r15=ia64_ret_from_syscall
-(p7) adds r16=(__NR_ni_syscall-1024)*8,r16 // force __NR_ni_syscall
+ srlz.i // guarantee that interruption collection is on
;;
- ld8 r16=[r16] // load address of syscall entry point
- mov rp=r15 // set the real return addr
+(p15) ssm psr.i // restore psr.i
;;
- mov b6=r16
-
- // arrange things so we skip over break instruction when returning:
+ mov r3=NR_syscalls - 1
+ movl r16=sys_call_table
- adds r16=16,sp // get pointer to cr_ipsr
- adds r17=24,sp // get pointer to cr_iip
- add r2=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
- ld8 r18=[r16] // fetch cr_ipsr
- ld4 r2=[r2] // r2 = current_thread_info()->flags
+ adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
+ movl r2=ia64_ret_from_syscall
;;
- ld8 r19=[r17] // fetch cr_iip
- extr.u r20=r18,41,2 // extract ei field
+ shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
+ cmp.geu p0,p7=r3,r15 // (syscall > 0 && syscall < 1024 + NR_syscalls) ?
+ mov rp=r2 // set the real return addr
;;
- cmp.eq p6,p7=2,r20 // isr.ei==2?
- adds r19=16,r19 // compute address of next bundle
+(p7) add r20=(__NR_ni_syscall-1024)*8,r16 // force __NR_ni_syscall
+ add r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
-(p6) mov r20=0 // clear ei to 0
-(p7) adds r20=1,r20 // increment ei to next slot
+ ld8 r20=[r20] // load address of syscall entry point
+ ld4 r2=[r2] // r2 = current_thread_info()->flags
;;
-(p6) st8 [r17]=r19 // store new cr.iip if cr.isr.ei wrapped around
- dep r18=r20,r18,41,2 // insert new ei into cr.isr
tbit.z p8,p0=r2,TIF_SYSCALL_TRACE
+ mov b6=r20
;;
- st8 [r16]=r18 // store new value for cr.isr
-
(p8) br.call.sptk.many b6=b6 // ignore this return addr
br.cond.sptk ia64_trace_syscall
// NOT REACHED
END(break_fault)
-ENTRY_MIN_ALIGN(demine_args)
- alloc r2=ar.pfs,8,0,0,0
- tnat.nz p8,p0=in0
- tnat.nz p9,p0=in1
- ;;
-(p8) mov in0=-1
- tnat.nz p10,p0=in2
- tnat.nz p11,p0=in3
-
-(p9) mov in1=-1
- tnat.nz p12,p0=in4
- tnat.nz p13,p0=in5
- ;;
-(p10) mov in2=-1
- tnat.nz p14,p0=in6
- tnat.nz p15,p0=in7
-
-(p11) mov in3=-1
- tnat.nz p8,p0=r15 // demining r15 is not a must, but it is safer
-
-(p12) mov in4=-1
-(p13) mov in5=-1
- ;;
-(p14) mov in6=-1
-(p15) mov in7=-1
-(p8) mov r15=-1
- br.ret.sptk.many rp
-END(demine_args)
-
.org ia64_ivt+0x3000
/////////////////////////////////////////////////////////////////////////////////////////
// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
DBG_FAULT(12)
mov r31=pr // prepare to save predicates
;;
-
SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
ssm psr.ic | PSR_DEFAULT_BITS
;;
mov out0=cr.ivr // pass cr.ivr as first arg
add out1=16,sp // pass pointer to pt_regs as second arg
;;
- srlz.d // make sure we see the effect of cr.ivr
+ srlz.d // make sure we see the effect of cr.ivr
movl r14=ia64_leave_kernel
;;
mov rp=r14
DBG_FAULT(14)
FAULT(14)
+ /*
+ * There is no particular reason for this code to be here, other than that
+ * there happens to be space here that would go unused otherwise. If this
+ * fault ever gets "unreserved", simply moved the following code to a more
+ * suitable spot...
+ *
+ * ia64_syscall_setup() is a separate subroutine so that it can
+ * allocate stacked registers so it can safely demine any
+ * potential NaT values from the input registers.
+ *
+ * On entry:
+ * - executing on bank 0 or bank 1 register set (doesn't matter)
+ * - r1: stack pointer
+ * - r2: current task pointer
+ * - r3: preserved
+ * - r11: original contents (saved ar.pfs to be saved)
+ * - r12: original contents (sp to be saved)
+ * - r13: original contents (tp to be saved)
+ * - r15: original contents (syscall # to be saved)
+ * - r18: saved bsp (after switching to kernel stack)
+ * - r19: saved b6
+ * - r20: saved r1 (gp)
+ * - r21: saved ar.fpsr
+ * - r22: kernel's register backing store base (krbs_base)
+ * - r23: saved ar.bspstore
+ * - r24: saved ar.rnat
+ * - r25: saved ar.unat
+ * - r26: saved ar.pfs
+ * - r27: saved ar.rsc
+ * - r28: saved cr.iip
+ * - r29: saved cr.ipsr
+ * - r31: saved pr
+ * - b0: original contents (to be saved)
+ * On exit:
+ * - executing on bank 1 registers
+ * - psr.ic enabled, interrupts restored
+ * - r1: kernel's gp
+ * - r3: preserved (same as on entry)
+ * - r12: points to kernel stack
+ * - r13: points to current task
+ * - p15: TRUE if interrupts need to be re-enabled
+ * - ar.fpsr: set to kernel settings
+ */
+GLOBAL_ENTRY(ia64_syscall_setup)
+#if PT(B6) != 0
+# error This code assumes that b6 is the first field in pt_regs.
+#endif
+ st8 [r1]=r19 // save b6
+ add r16=PT(CR_IPSR),r1 // initialize first base pointer
+ add r17=PT(R11),r1 // initialize second base pointer
+ ;;
+ alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
+ st8 [r16]=r29,PT(CR_IFS)-PT(CR_IPSR) // save cr.ipsr
+ tnat.nz p8,p0=in0
+
+ st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
+ tnat.nz p9,p0=in1
+(pKStk) mov r18=r0 // make sure r18 isn't NaT
+ ;;
+
+ st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
+ mov r28=b0 // save b0 (2 cyc)
+(p8) mov in0=-1
+ ;;
+
+ st8 [r16]=r0,PT(AR_PFS)-PT(CR_IFS) // clear cr.ifs
+ st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
+(p9) mov in1=-1
+ ;;
+
+ st8 [r16]=r26,PT(AR_RNAT)-PT(AR_PFS) // save ar.pfs
+ st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
+ tnat.nz p10,p0=in2
+
+(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
+ tbit.nz p15,p0=r29,IA64_PSR_I_BIT
+ tnat.nz p11,p0=in3
+ ;;
+(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
+(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
+(p10) mov in2=-1
+
+(p11) mov in3=-1
+ tnat.nz p12,p0=in4
+ tnat.nz p13,p0=in5
+ ;;
+(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
+(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
+ shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
+ ;;
+ st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
+ st8 [r17]=r28,PT(R1)-PT(B0) // save b0
+(p12) mov in4=-1
+ ;;
+ st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
+ st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
+(p13) mov in5=-1
+ ;;
+
+.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
+.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
+ tnat.nz p14,p0=in6
+ ;;
+ st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
+ st8.spill [r17]=r15 // save r15
+ tnat.nz p8,p0=in7
+ ;;
+ stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
+ adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
+(p14) mov in6=-1
+
+ mov r13=r2 // establish `current'
+ movl r1=__gp // establish kernel global pointer
+ ;;
+(p8) mov in7=-1
+ tnat.nz p9,p0=r15
+
+ cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
+ movl r17=FPSR_DEFAULT
+ ;;
+ mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
+(p9) mov r15=-1
+ br.ret.sptk.many b7
+END(ia64_syscall_setup)
+
.org ia64_ivt+0x3c00
/////////////////////////////////////////////////////////////////////////////////////////
// 0x3c00 Entry 15 (size 64 bundles) Reserved
DBG_FAULT(16)
FAULT(16)
-#ifdef CONFIG_IA32_SUPPORT
-
- /*
- * There is no particular reason for this code to be here, other than that
- * there happens to be space here that would go unused otherwise. If this
- * fault ever gets "unreserved", simply moved the following code to a more
- * suitable spot...
- */
-
- // IA32 interrupt entry point
-
-ENTRY(dispatch_to_ia32_handler)
- SAVE_MIN
- ;;
- mov r14=cr.isr
- ssm psr.ic | PSR_DEFAULT_BITS
- ;;
- srlz.i // guarantee that interruption collection is on
- ;;
-(p15) ssm psr.i
- adds r3=8,r2 // Base pointer for SAVE_REST
- ;;
- SAVE_REST
- ;;
- mov r15=0x80
- shr r14=r14,16 // Get interrupt number
- ;;
- cmp.ne p6,p0=r14,r15
-(p6) br.call.dpnt.many b6=non_ia32_syscall
-
- adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
- adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
- ;;
- cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
- st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
- ;;
- alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
- ;;
- ld4 r8=[r14],8 // r8 == eax (syscall number)
- mov r15=250 // number of entries in ia32 system call table
- ;;
- cmp.ltu.unc p6,p7=r8,r15
- ld4 out1=[r14],8 // r9 == ecx
- ;;
- ld4 out2=[r14],8 // r10 == edx
- ;;
- ld4 out0=[r14] // r11 == ebx
- adds r14=(IA64_PT_REGS_R8_OFFSET-(8*3)) + 16,sp
- ;;
- ld4 out5=[r14],8 // r13 == ebp
- ;;
- ld4 out3=[r14],8 // r14 == esi
- adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
- ;;
- ld4 out4=[r14] // r15 == edi
- movl r16=ia32_syscall_table
- ;;
-(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
- ld4 r2=[r2] // r2 = current_thread_info()->flags
- ;;
- ld8 r16=[r16]
- tbit.z p8,p0=r2,TIF_SYSCALL_TRACE
- ;;
- mov b6=r16
- movl r15=ia32_ret_from_syscall
- ;;
- mov rp=r15
-(p8) br.call.sptk.many b6=b6
- br.cond.sptk ia32_trace_syscall
-
-non_ia32_syscall:
- alloc r15=ar.pfs,0,0,2,0
- mov out0=r14 // interrupt #
- add out1=16,sp // pointer to pt_regs
- ;; // avoid WAW on CFM
- br.call.sptk.many rp=ia32_bad_interrupt
-.ret1: movl r15=ia64_leave_kernel
- ;;
- mov rp=r15
- br.ret.sptk.many rp
-END(dispatch_to_ia32_handler)
-
-#endif /* CONFIG_IA32_SUPPORT */
-
.org ia64_ivt+0x4400
/////////////////////////////////////////////////////////////////////////////////////////
// 0x4400 Entry 17 (size 64 bundles) Reserved
// 0x7f00 Entry 67 (size 16 bundles) Reserved
DBG_FAULT(67)
FAULT(67)
+
+#ifdef CONFIG_IA32_SUPPORT
+
+ /*
+ * There is no particular reason for this code to be here, other than that
+ * there happens to be space here that would go unused otherwise. If this
+ * fault ever gets "unreserved", simply moved the following code to a more
+ * suitable spot...
+ */
+
+ // IA32 interrupt entry point
+
+ENTRY(dispatch_to_ia32_handler)
+ SAVE_MIN
+ ;;
+ mov r14=cr.isr
+ ssm psr.ic | PSR_DEFAULT_BITS
+ ;;
+ srlz.i // guarantee that interruption collection is on
+ ;;
+(p15) ssm psr.i
+ adds r3=8,r2 // Base pointer for SAVE_REST
+ ;;
+ SAVE_REST
+ ;;
+ mov r15=0x80
+ shr r14=r14,16 // Get interrupt number
+ ;;
+ cmp.ne p6,p0=r14,r15
+(p6) br.call.dpnt.many b6=non_ia32_syscall
+
+ adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
+ adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
+ ;;
+ cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
+ ld8 r8=[r14] // get r8
+ ;;
+ st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
+ ;;
+ alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
+ ;;
+ ld4 r8=[r14],8 // r8 == eax (syscall number)
+ mov r15=250 // number of entries in ia32 system call table
+ ;;
+ cmp.ltu.unc p6,p7=r8,r15
+ ld4 out1=[r14],8 // r9 == ecx
+ ;;
+ ld4 out2=[r14],8 // r10 == edx
+ ;;
+ ld4 out0=[r14] // r11 == ebx
+ adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
+ ;;
+ ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
+ ;;
+ ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
+ adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
+ ;;
+ ld4 out4=[r14] // r15 == edi
+ movl r16=ia32_syscall_table
+ ;;
+(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
+ ld4 r2=[r2] // r2 = current_thread_info()->flags
+ ;;
+ ld8 r16=[r16]
+ tbit.z p8,p0=r2,TIF_SYSCALL_TRACE
+ ;;
+ mov b6=r16
+ movl r15=ia32_ret_from_syscall
+ ;;
+ mov rp=r15
+(p8) br.call.sptk.many b6=b6
+ br.cond.sptk ia32_trace_syscall
+
+non_ia32_syscall:
+ alloc r15=ar.pfs,0,0,2,0
+ mov out0=r14 // interrupt #
+ add out1=16,sp // pointer to pt_regs
+ ;; // avoid WAW on CFM
+ br.call.sptk.many rp=ia32_bad_interrupt
+.ret1: movl r15=ia64_leave_kernel
+ ;;
+ mov rp=r15
+ br.ret.sptk.many rp
+END(dispatch_to_ia32_handler)
+
+#endif /* CONFIG_IA32_SUPPORT */
}
void
-init_handler_platform (sal_log_processor_info_t *proc_ptr,
+init_handler_platform (pal_min_state_area_t *ms,
struct pt_regs *pt, struct switch_stack *sw)
{
struct unw_frame_info info;
*/
printk("Delaying for 5 seconds...\n");
udelay(5*1000000);
- show_min_state(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area);
+ show_min_state(ms);
printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
- fetch_min_state(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area, pt, sw);
+ fetch_min_state(ms, pt, sw);
unw_init_from_interruption(&info, current, pt, sw);
ia64_do_show_stack(&info, NULL);
+#ifdef CONFIG_SMP
+ /* read_trylock() would be handy... */
if (!tasklist_lock.write_lock)
read_lock(&tasklist_lock);
+#endif
{
struct task_struct *g, *t;
do_each_thread (g, t) {
continue;
printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
- show_stack(t);
+ show_stack(t, NULL);
} while_each_thread (g, t);
}
+#ifdef CONFIG_SMP
if (!tasklist_lock.write_lock)
read_unlock(&tasklist_lock);
+#endif
printk("\nINIT dump complete. Please reboot now.\n");
while (1); /* hang city if no debugger */
IA64_MCA_DEBUG("ia64_mca_init: registered mca rendezvous spinloop and wakeup mech.\n");
- ia64_mc_info.imi_mca_handler = __pa(mca_hldlr_ptr->fp);
+ ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
/*
* XXX - disable SAL checksum by setting size to 0; should be
- * __pa(ia64_os_mca_dispatch_end) - __pa(ia64_os_mca_dispatch);
+ * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
*/
ia64_mc_info.imi_mca_handler_size = 0;
/* Register the os mca handler with SAL */
if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
ia64_mc_info.imi_mca_handler,
- mca_hldlr_ptr->gp,
+ ia64_tpa(mca_hldlr_ptr->gp),
ia64_mc_info.imi_mca_handler_size,
0, 0, 0)))
{
}
IA64_MCA_DEBUG("ia64_mca_init: registered os mca handler with SAL at 0x%lx, gp = 0x%lx\n",
- ia64_mc_info.imi_mca_handler, mca_hldlr_ptr->gp);
+ ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
/*
* XXX - disable SAL checksum by setting size to 0, should be
* IA64_INIT_HANDLER_SIZE
*/
- ia64_mc_info.imi_monarch_init_handler = __pa(mon_init_ptr->fp);
+ ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp);
ia64_mc_info.imi_monarch_init_handler_size = 0;
- ia64_mc_info.imi_slave_init_handler = __pa(slave_init_ptr->fp);
+ ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp);
ia64_mc_info.imi_slave_init_handler_size = 0;
IA64_MCA_DEBUG("ia64_mca_init: os init handler at %lx\n",
/* Register the os init handler with SAL */
if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
ia64_mc_info.imi_monarch_init_handler,
- __pa(ia64_get_gp()),
+ ia64_tpa(ia64_get_gp()),
ia64_mc_info.imi_monarch_init_handler_size,
ia64_mc_info.imi_slave_init_handler,
- __pa(ia64_get_gp()),
+ ia64_tpa(ia64_get_gp()),
ia64_mc_info.imi_slave_init_handler_size)))
{
printk(KERN_ERR "ia64_mca_init: Failed to register m/s init handlers with SAL. "
void
ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
{
- sal_log_processor_info_t *proc_ptr;
- ia64_err_rec_t *plog_ptr;
+ pal_min_state_area_t *ms;
- printk(KERN_INFO "Entered OS INIT handler\n");
-
- /* Get the INIT processor log */
- if (!ia64_log_get(SAL_INFO_TYPE_INIT, (prfunc_t)printk))
- return; // no record retrieved
-
-#ifdef IA64_DUMP_ALL_PROC_INFO
- ia64_log_print(SAL_INFO_TYPE_INIT, (prfunc_t)printk);
-#endif
+ printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
+ ia64_sal_to_os_handoff_state.proc_state_param);
/*
- * get pointer to min state save area
- *
+ * Address of minstate area provided by PAL is physical,
+ * uncacheable (bit 63 set). Convert to Linux virtual
+ * address in region 6.
*/
- plog_ptr=(ia64_err_rec_t *)IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_INIT);
- proc_ptr = &plog_ptr->proc_err;
-
- ia64_process_min_state_save(&SAL_LPI_PSI_INFO(proc_ptr)->min_state_area);
-
- /* Clear the INIT SAL logs now that they have been saved in the OS buffer */
- ia64_sal_clear_state_info(SAL_INFO_TYPE_INIT);
+ ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
- init_handler_platform(proc_ptr, pt, sw); /* call platform specific routines */
+ init_handler_platform(ms, pt, sw); /* call platform specific routines */
}
/*
* 6. GR12 = Return address to location within SAL_CHECK
*/
#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
- movl _tmp=ia64_sal_to_os_handoff_state;; \
- DATA_VA_TO_PA(_tmp);; \
+ LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
st8 [_tmp]=r1,0x08;; \
st8 [_tmp]=r8,0x08;; \
st8 [_tmp]=r9,0x08;; \
st8 [_tmp]=r10,0x08;; \
st8 [_tmp]=r11,0x08;; \
- st8 [_tmp]=r12,0x08
+ st8 [_tmp]=r12,0x08;; \
+ st8 [_tmp]=r17,0x08;; \
+ st8 [_tmp]=r18,0x08
/*
* OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
* returns ptr to SAL rtn save loc in _tmp
*/
#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
-(p6) movl _tmp=ia64_sal_to_os_handoff_state;; \
-(p7) movl _tmp=ia64_os_to_sal_handoff_state;; \
- DATA_VA_TO_PA(_tmp);; \
+ LOAD_PHYSICAL(p6, _tmp, ia64_sal_to_os_handoff_state);; \
+ LOAD_PHYSICAL(p7, _tmp, ia64_os_to_sal_handoff_state);; \
(p6) movl r8=IA64_MCA_COLD_BOOT; \
(p6) movl r10=IA64_MCA_SAME_CONTEXT; \
(p6) add _tmp=0x18,_tmp;; \
#include "entry.h"
/*
- * A couple of convenience macros that make writing and reading
- * SAVE_MIN and SAVE_REST easier.
- */
-#define rARPR r31
-#define rCRIFS r30
-#define rCRIPSR r29
-#define rCRIIP r28
-#define rARRSC r27
-#define rARPFS r26
-#define rARUNAT r25
-#define rARRNAT r24
-#define rARBSPSTORE r23
-#define rKRBS r22
-#define rB6 r21
-#define rR1 r20
-
-/*
- * Here start the source dependent macros.
- */
-
-/*
* For ivt.s we want to access the stack virtually so we don't have to disable translation
* on interrupts.
+ *
+ * On entry:
+ * r1: pointer to current task (ar.k6)
*/
#define MINSTATE_START_SAVE_MIN_VIRT \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
;; \
-(pUStk) mov.m rARRNAT=ar.rnat; \
-(pUStk) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
+(pUStk) mov.m r24=ar.rnat; \
+(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
(pKStk) mov r1=sp; /* get sp */ \
;; \
-(pUStk) lfetch.fault.excl.nt1 [rKRBS]; \
+(pUStk) lfetch.fault.excl.nt1 [r22]; \
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
-(pUStk) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \
+(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
;; \
-(pUStk) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \
+(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
;; \
(pUStk) mov r18=ar.bsp; \
#define MINSTATE_START_SAVE_MIN_PHYS \
(pKStk) movl sp=ia64_init_stack+IA64_STK_OFFSET-IA64_PT_REGS_SIZE; \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
-(pUStk) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
+(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
;; \
-(pUStk) mov rARRNAT=ar.rnat; \
-(pKStk) dep r1=0,sp,61,3; /* compute physical addr of sp */ \
+(pUStk) mov r24=ar.rnat; \
+(pKStk) tpa r1=sp; /* compute physical addr of sp */ \
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
-(pUStk) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \
-(pUStk) dep rKRBS=-1,rKRBS,61,3; /* compute kernel virtual addr of RBS */\
+(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
+(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
;; \
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
-(pUStk) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \
+(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
;; \
(pUStk) mov r18=ar.bsp; \
(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
*
* Upon exit, the state is as follows:
* psr.ic: off
- * r2 = points to &pt_regs.r16
+ * r2 = points to &pt_regs.r16
+ * r8 = contents of ar.ccv
+ * r9 = contents of ar.csd
+ * r10 = contents of ar.ssd
+ * r11 = FPSR_DEFAULT
* r12 = kernel sp (kernel virtual address)
* r13 = points to current task_struct (kernel virtual address)
* p15 = TRUE if psr.i is set in cr.ipsr
- * predicate registers (other than p2, p3, and p15), b6, r3, r8, r9, r10, r11, r14, r15:
+ * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
* preserved
*
* Note that psr.ic is NOT turned on by this macro. This is so that
* we can pass interruption state as arguments to a handler.
*/
-#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
- mov rARRSC=ar.rsc; /* M */ \
- mov rARUNAT=ar.unat; /* M */ \
- mov rR1=r1; /* A */ \
- MINSTATE_GET_CURRENT(r1); /* M (or M;;I) */ \
- mov rCRIPSR=cr.ipsr; /* M */ \
- mov rARPFS=ar.pfs; /* I */ \
- mov rCRIIP=cr.iip; /* M */ \
- mov rB6=b6; /* I */ /* rB6 = branch reg 6 */ \
- COVER; /* B;; (or nothing) */ \
- ;; \
- adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r1; \
- ;; \
- ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
- st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
- /* switch from user to kernel RBS: */ \
- ;; \
- invala; /* M */ \
- SAVE_IFS; \
- cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? (psr.cpl==0) */ \
- ;; \
- MINSTATE_START_SAVE_MIN \
- add r17=L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ \
- ;; \
- st8 [r1]=rCRIPSR; /* save cr.ipsr */ \
- lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
- add r16=16,r1; /* initialize first base pointer */ \
- ;; \
- lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
- ;; \
- lfetch.fault.excl.nt1 [r17]; \
- adds r17=8,r1; /* initialize second base pointer */ \
-(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
- ;; \
- st8 [r17]=rCRIIP,16; /* save cr.iip */ \
- st8 [r16]=rCRIFS,16; /* save cr.ifs */ \
-(pUStk) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */ \
- ;; \
- st8 [r17]=rARUNAT,16; /* save ar.unat */ \
- st8 [r16]=rARPFS,16; /* save ar.pfs */ \
- shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
- ;; \
- st8 [r17]=rARRSC,16; /* save ar.rsc */ \
-(pUStk) st8 [r16]=rARRNAT,16; /* save ar.rnat */ \
-(pKStk) adds r16=16,r16; /* skip over ar_rnat field */ \
- ;; /* avoid RAW on r16 & r17 */ \
-(pUStk) st8 [r17]=rARBSPSTORE,16; /* save ar.bspstore */ \
- st8 [r16]=rARPR,16; /* save predicates */ \
-(pKStk) adds r17=16,r17; /* skip over ar_bspstore field */ \
- ;; \
- st8 [r17]=rB6,16; /* save b6 */ \
- st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \
- tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT \
- ;; \
-.mem.offset 8,0; st8.spill [r17]=rR1,16; /* save original r1 */ \
-.mem.offset 0,0; st8.spill [r16]=r2,16; \
- ;; \
-.mem.offset 8,0; st8.spill [r17]=r3,16; \
-.mem.offset 0,0; st8.spill [r16]=r12,16; \
- adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
- ;; \
-.mem.offset 8,0; st8.spill [r17]=r13,16; \
-.mem.offset 0,0; st8.spill [r16]=r14,16; \
- cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
- ;; \
-.mem.offset 8,0; st8.spill [r17]=r15,16; \
-.mem.offset 0,0; st8.spill [r16]=r8,16; \
- dep r14=-1,r0,61,3; \
- ;; \
-.mem.offset 8,0; st8.spill [r17]=r9,16; \
-.mem.offset 0,0; st8.spill [r16]=r10,16; \
- adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
- ;; \
-.mem.offset 8,0; st8.spill [r17]=r11,16; \
- mov r13=IA64_KR(CURRENT); /* establish `current' */ \
- ;; \
- EXTRA; \
- movl r1=__gp; /* establish kernel global pointer */ \
- ;; \
+#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
+ MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
+ mov r27=ar.rsc; /* M */ \
+ mov r20=r1; /* A */ \
+ mov r25=ar.unat; /* M */ \
+ mov r29=cr.ipsr; /* M */ \
+ mov r26=ar.pfs; /* I */ \
+ mov r28=cr.iip; /* M */ \
+ mov r21=ar.fpsr; /* M */ \
+ COVER; /* B;; (or nothing) */ \
+ ;; \
+ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
+ ;; \
+ ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
+ st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
+ adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
+ /* switch from user to kernel RBS: */ \
+ ;; \
+ invala; /* M */ \
+ SAVE_IFS; \
+ cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
+ ;; \
+ MINSTATE_START_SAVE_MIN \
+ adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
+ adds r16=PT(CR_IPSR),r1; \
+ ;; \
+ lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
+ st8 [r16]=r29; /* save cr.ipsr */ \
+ ;; \
+ lfetch.fault.excl.nt1 [r17]; \
+ tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
+ mov r29=b0 \
+ ;; \
+ adds r16=PT(R8),r1; /* initialize first base pointer */ \
+ adds r17=PT(R9),r1; /* initialize second base pointer */ \
+(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r8,16; \
+.mem.offset 8,0; st8.spill [r17]=r9,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r10,24; \
+.mem.offset 8,0; st8.spill [r17]=r11,24; \
+ ;; \
+ st8 [r16]=r28,16; /* save cr.iip */ \
+ st8 [r17]=r30,16; /* save cr.ifs */ \
+(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
+ mov r8=ar.ccv; \
+ mov r9=ar.csd; \
+ mov r10=ar.ssd; \
+ movl r11=FPSR_DEFAULT; /* L-unit */ \
+ ;; \
+ st8 [r16]=r25,16; /* save ar.unat */ \
+ st8 [r17]=r26,16; /* save ar.pfs */ \
+ shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
+ ;; \
+ st8 [r16]=r27,16; /* save ar.rsc */ \
+(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
+(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
+ ;; /* avoid RAW on r16 & r17 */ \
+(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
+ st8 [r17]=r31,16; /* save predicates */ \
+(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
+ ;; \
+ st8 [r16]=r29,16; /* save b0 */ \
+ st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
+ cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
+.mem.offset 8,0; st8.spill [r17]=r12,16; \
+ adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r13,16; \
+.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
+ mov r13=IA64_KR(CURRENT); /* establish `current' */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r15,16; \
+.mem.offset 8,0; st8.spill [r17]=r14,16; \
+ dep r14=-1,r0,61,3; \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r2,16; \
+.mem.offset 8,0; st8.spill [r17]=r3,16; \
+ adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
+ ;; \
+ EXTRA; \
+ movl r1=__gp; /* establish kernel global pointer */ \
+ ;; \
MINSTATE_END_SAVE_MIN
/*
- * SAVE_REST saves the remainder of pt_regs (with psr.ic on). This
- * macro guarantees to preserve all predicate registers, r8, r9, r10,
- * r11, r14, and r15.
+ * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
*
* Assumed state upon entry:
* psr.ic: on
* r2: points to &pt_regs.r16
* r3: points to &pt_regs.r17
+ * r8: contents of ar.ccv
+ * r9: contents of ar.csd
+ * r10: contents of ar.ssd
+ * r11: FPSR_DEFAULT
+ *
+ * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
*/
#define SAVE_REST \
-.mem.offset 0,0; st8.spill [r2]=r16,16; \
+.mem.offset 0,0; st8.spill [r2]=r16,16; \
+.mem.offset 8,0; st8.spill [r3]=r17,16; \
;; \
-.mem.offset 8,0; st8.spill [r3]=r17,16; \
-.mem.offset 0,0; st8.spill [r2]=r18,16; \
+.mem.offset 0,0; st8.spill [r2]=r18,16; \
+.mem.offset 8,0; st8.spill [r3]=r19,16; \
;; \
-.mem.offset 8,0; st8.spill [r3]=r19,16; \
-.mem.offset 0,0; st8.spill [r2]=r20,16; \
+.mem.offset 0,0; st8.spill [r2]=r20,16; \
+.mem.offset 8,0; st8.spill [r3]=r21,16; \
+ mov r18=b6; \
;; \
- mov r16=ar.ccv; /* M-unit */ \
- movl r18=FPSR_DEFAULT /* L-unit */ \
- ;; \
- mov r17=ar.fpsr; /* M-unit */ \
- mov ar.fpsr=r18; /* M-unit */ \
- ;; \
-.mem.offset 8,0; st8.spill [r3]=r21,16; \
-.mem.offset 0,0; st8.spill [r2]=r22,16; \
- mov r18=b0; \
- ;; \
-.mem.offset 8,0; st8.spill [r3]=r23,16; \
-.mem.offset 0,0; st8.spill [r2]=r24,16; \
+.mem.offset 0,0; st8.spill [r2]=r22,16; \
+.mem.offset 8,0; st8.spill [r3]=r23,16; \
mov r19=b7; \
;; \
-.mem.offset 8,0; st8.spill [r3]=r25,16; \
-.mem.offset 0,0; st8.spill [r2]=r26,16; \
- ;; \
-.mem.offset 8,0; st8.spill [r3]=r27,16; \
-.mem.offset 0,0; st8.spill [r2]=r28,16; \
+.mem.offset 0,0; st8.spill [r2]=r24,16; \
+.mem.offset 8,0; st8.spill [r3]=r25,16; \
;; \
-.mem.offset 8,0; st8.spill [r3]=r29,16; \
-.mem.offset 0,0; st8.spill [r2]=r30,16; \
+.mem.offset 0,0; st8.spill [r2]=r26,16; \
+.mem.offset 8,0; st8.spill [r3]=r27,16; \
;; \
-.mem.offset 8,0; st8.spill [r3]=r31,16; \
- st8 [r2]=r16,16; /* ar.ccv */ \
+.mem.offset 0,0; st8.spill [r2]=r28,16; \
+.mem.offset 8,0; st8.spill [r3]=r29,16; \
;; \
- st8 [r3]=r17,16; /* ar.fpsr */ \
- st8 [r2]=r18,16; /* b0 */ \
+.mem.offset 0,0; st8.spill [r2]=r30,16; \
+.mem.offset 8,0; st8.spill [r3]=r31,32; \
;; \
- st8 [r3]=r19,16+8; /* b7 */ \
+ mov ar.fpsr=r11; /* M-unit */ \
+ st8 [r2]=r8,8; /* ar.ccv */ \
+ adds r24=PT(B6)-PT(F7),r3; \
;; \
stf.spill [r2]=f6,32; \
stf.spill [r3]=f7,32; \
;; \
stf.spill [r2]=f8,32; \
- stf.spill [r3]=f9,32
+ stf.spill [r3]=f9,32; \
+ ;; \
+ stf.spill [r2]=f10; \
+ stf.spill [r3]=f11; \
+ adds r25=PT(B7)-PT(F11),r3; \
+ ;; \
+ st8 [r24]=r18,16; /* b6 */ \
+ st8 [r25]=r19,16; /* b7 */ \
+ ;; \
+ st8 [r24]=r9; /* ar.csd */ \
+ st8 [r25]=r10; /* ar.ssd */ \
+ ;;
-#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs,)
-#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov rCRIFS=cr.ifs, mov r15=r19)
-#define SAVE_MIN DO_SAVE_MIN( , mov rCRIFS=r0, )
+#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
+#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
+#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )
LTOFF22X
LTOFF22X
LTOFF_FPTR22
- PCREL21B
+ PCREL21B (for br.call only; br.cond is not supported out of modules!)
+ PCREL60B (for brl.cond only; brl.call is not supported for modules!)
PCREL64LSB
SECREL32LSB
SEGREL64LSB
#include <linux/string.h>
#include <linux/vmalloc.h>
+#include <asm/patch.h>
#include <asm/unaligned.h>
#define ARCH_MODULE_DEBUG 0
return (uint64_t) insn & 0x3;
}
-/* Patch instruction with "val" where "mask" has 1 bits. */
-static void
-apply (struct insn *insn, uint64_t mask, uint64_t val)
-{
- uint64_t m0, m1, v0, v1, b0, b1, *b = (uint64_t *) bundle(insn);
-# define insn_mask ((1UL << 41) - 1)
- unsigned long shift;
-
- b0 = b[0]; b1 = b[1];
- shift = 5 + 41 * slot(insn); /* 5 bits of template, then 3 x 41-bit instructions */
- if (shift >= 64) {
- m1 = mask << (shift - 64);
- v1 = val << (shift - 64);
- } else {
- m0 = mask << shift; m1 = mask >> (64 - shift);
- v0 = val << shift; v1 = val >> (64 - shift);
- b[0] = (b0 & ~m0) | (v0 & m0);
- }
- b[1] = (b1 & ~m1) | (v1 & m1);
-}
-
static int
apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
{
mod->name, slot(insn));
return 0;
}
- apply(insn, 0x01fffefe000, ( ((val & 0x8000000000000000) >> 27) /* bit 63 -> 36 */
- | ((val & 0x0000000000200000) << 0) /* bit 21 -> 21 */
- | ((val & 0x00000000001f0000) << 6) /* bit 16 -> 22 */
- | ((val & 0x000000000000ff80) << 20) /* bit 7 -> 27 */
- | ((val & 0x000000000000007f) << 13) /* bit 0 -> 13 */));
- apply((void *) insn - 1, 0x1ffffffffff, val >> 22);
+ ia64_patch_imm64((u64) insn, val);
return 1;
}
printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (int64_t) val);
return 0;
}
- apply(insn, 0x011ffffe000, ( ((val & 0x1000000000000000) >> 24) /* bit 60 -> 36 */
- | ((val & 0x00000000000fffff) << 13) /* bit 0 -> 13 */));
- apply((void *) insn - 1, 0x1fffffffffc, val >> 18);
+ ia64_patch_imm60((u64) insn, val);
return 1;
}
printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val);
return 0;
}
- apply(insn, 0x01fffcfe000, ( ((val & 0x200000) << 15) /* bit 21 -> 36 */
- | ((val & 0x1f0000) << 6) /* bit 16 -> 22 */
- | ((val & 0x00ff80) << 20) /* bit 7 -> 27 */
- | ((val & 0x00007f) << 13) /* bit 0 -> 13 */));
+ ia64_patch((u64) insn, 0x01fffcfe000, ( ((val & 0x200000) << 15) /* bit 21 -> 36 */
+ | ((val & 0x1f0000) << 6) /* bit 16 -> 22 */
+ | ((val & 0x00ff80) << 20) /* bit 7 -> 27 */
+ | ((val & 0x00007f) << 13) /* bit 0 -> 13 */));
return 1;
}
printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val);
return 0;
}
- apply(insn, 0x11ffffe000, ( ((val & 0x100000) << 16) /* bit 20 -> 36 */
- | ((val & 0x0fffff) << 13) /* bit 0 -> 13 */));
+ ia64_patch((u64) insn, 0x11ffffe000, ( ((val & 0x100000) << 16) /* bit 20 -> 36 */
+ | ((val & 0x0fffff) << 13) /* bit 0 -> 13 */));
return 1;
}
b0 = b[0]; b1 = b[1];
off = ( ((b1 & 0x00fffff000000000) >> 36) /* imm20b -> bit 0 */
| ((b0 >> 48) << 20) | ((b1 & 0x7fffff) << 36) /* imm39 -> bit 20 */
- | ((b1 & 0x0800000000000000) << 1)); /* i -> bit 60 */
+ | ((b1 & 0x0800000000000000) << 0)); /* i -> bit 59 */
return (long) plt->bundle[1] + 16*off;
}
if (gp_addressable(mod, val)) {
/* turn "ld8" into "mov": */
DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location);
- apply(location, 0x1fff80fe000, 0x10000000000);
+ ia64_patch((u64) location, 0x1fff80fe000, 0x10000000000);
}
return 0;
}
#ifdef CONFIG_SMP
-void percpu_modcopy(void *pcpudst, const void *src, unsigned long size)
+void
+percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
{
unsigned int i;
for (i = 0; i < NR_CPUS; i++)
;;
mov loc4=ar.rsc // save RSE configuration
dep.z loc2=loc2,0,61 // convert pal entry point to physical
- dep.z r8=r8,0,61 // convert rp to physical
+ tpa r8=r8 // convert rp to physical
;;
mov b7 = loc2 // install target to branch reg
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
or loc3=loc3,r17 // add in psr the bits to set
;;
andcm r16=loc3,r16 // removes bits to clear from psr
- br.call.sptk.many rp=ia64_switch_mode
+ br.call.sptk.many rp=ia64_switch_mode_phys
.ret1: mov rp = r8 // install return address (physical)
br.cond.sptk.many b7
1:
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
- br.call.sptk.many rp=ia64_switch_mode // return to virtual mode
+ br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret2:
mov psr.l = loc3 // restore init PSR
mov b7 = loc2 // install target to branch reg
;;
andcm r16=loc3,r16 // removes bits to clear from psr
- br.call.sptk.many rp=ia64_switch_mode
+ br.call.sptk.many rp=ia64_switch_mode_phys
.ret6:
br.call.sptk.many rp=b7 // now make the call
.ret7:
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
- br.call.sptk.many rp=ia64_switch_mode // return to virtual mode
+ br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret8: mov psr.l = loc3 // restore init PSR
mov ar.pfs = loc1
--- /dev/null
+/*
+ * Instruction-patching support.
+ *
+ * Copyright (C) 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <asm/patch.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/unistd.h>
+
+/*
+ * This was adapted from code written by Tony Luck:
+ *
+ * The 64-bit value in a "movl reg=value" is scattered between the two words of the bundle
+ * like this:
+ *
+ * 6 6 5 4 3 2 1
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG
+ *
+ * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB
+ */
+static u64
+get_imm64 (u64 insn_addr)
+{
+ u64 *p = (u64 *) (insn_addr & -16); /* mask out slot number */
+
+ return ( (p[1] & 0x0800000000000000UL) << 4) | /*A*/
+ ((p[1] & 0x00000000007fffffUL) << 40) | /*B*/
+ ((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/
+ ((p[1] & 0x0000100000000000UL) >> 23) | /*D*/
+ ((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/
+ ((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/
+ ((p[1] & 0x000007f000000000UL) >> 36); /*G*/
+}
+
+/* Patch instruction with "val" where "mask" has 1 bits. */
+void
+ia64_patch (u64 insn_addr, u64 mask, u64 val)
+{
+ u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16);
+# define insn_mask ((1UL << 41) - 1)
+ unsigned long shift;
+
+ b0 = b[0]; b1 = b[1];
+ shift = 5 + 41 * (insn_addr % 16); /* 5 bits of template, then 3 x 41-bit instructions */
+ if (shift >= 64) {
+ m1 = mask << (shift - 64);
+ v1 = val << (shift - 64);
+ } else {
+ m0 = mask << shift; m1 = mask >> (64 - shift);
+ v0 = val << shift; v1 = val >> (64 - shift);
+ b[0] = (b0 & ~m0) | (v0 & m0);
+ }
+ b[1] = (b1 & ~m1) | (v1 & m1);
+}
+
+void
+ia64_patch_imm64 (u64 insn_addr, u64 val)
+{
+ ia64_patch(insn_addr,
+ 0x01fffefe000, ( ((val & 0x8000000000000000) >> 27) /* bit 63 -> 36 */
+ | ((val & 0x0000000000200000) << 0) /* bit 21 -> 21 */
+ | ((val & 0x00000000001f0000) << 6) /* bit 16 -> 22 */
+ | ((val & 0x000000000000ff80) << 20) /* bit 7 -> 27 */
+ | ((val & 0x000000000000007f) << 13) /* bit 0 -> 13 */));
+ ia64_patch(insn_addr - 1, 0x1ffffffffff, val >> 22);
+}
+
+void
+ia64_patch_imm60 (u64 insn_addr, u64 val)
+{
+ ia64_patch(insn_addr,
+ 0x011ffffe000, ( ((val & 0x1000000000000000) >> 24) /* bit 60 -> 36 */
+ | ((val & 0x00000000000fffff) << 13) /* bit 0 -> 13 */));
+ ia64_patch(insn_addr - 1, 0x1fffffffffc, val >> 18);
+}
+
+/*
+ * We need sometimes to load the physical address of a kernel
+ * object. Often we can convert the virtual address to physical
+ * at execution time, but sometimes (either for performance reasons
+ * or during error recovery) we cannot to this. Patch the marked
+ * bundles to load the physical address.
+ */
+void __init
+ia64_patch_vtop (unsigned long start, unsigned long end)
+{
+ s32 *offp = (s32 *) start;
+ u64 ip;
+
+ while (offp < (s32 *) end) {
+ ip = (u64) offp + *offp;
+
+ /* replace virtual address with corresponding physical address: */
+ ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip)));
+ ia64_fc((void *) ip);
+ ++offp;
+ }
+ ia64_sync_i();
+ ia64_srlz_i();
+}
+
+void
+ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
+{
+ static int first_time = 1;
+ int need_workaround;
+ s32 *offp = (s32 *) start;
+ u64 *wp;
+
+ need_workaround = (local_cpu_data->family == 0x1f && local_cpu_data->model == 0);
+
+ if (first_time) {
+ first_time = 0;
+ if (need_workaround)
+ printk(KERN_INFO "Leaving McKinley Errata 9 workaround enabled\n");
+ else
+ printk(KERN_INFO "McKinley Errata 9 workaround not needed; "
+ "disabling it\n");
+ }
+ if (need_workaround)
+ return;
+
+ while (offp < (s32 *) end) {
+ wp = (u64 *) ia64_imva((char *) offp + *offp);
+ wp[0] = 0x0000000100000000;
+ wp[1] = 0x0004000000000200;
+ ia64_fc(wp);
+ ++offp;
+ }
+ ia64_sync_i();
+ ia64_srlz_i();
+}
+
+static void
+patch_fsyscall_table (unsigned long start, unsigned long end)
+{
+ extern unsigned long fsyscall_table[NR_syscalls];
+ s32 *offp = (s32 *) start;
+ u64 ip;
+
+ while (offp < (s32 *) end) {
+ ip = (u64) ia64_imva((char *) offp + *offp);
+ ia64_patch_imm64(ip, (u64) fsyscall_table);
+ ia64_fc((void *) ip);
+ ++offp;
+ }
+ ia64_sync_i();
+ ia64_srlz_i();
+}
+
+static void
+patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
+{
+ extern char fsys_bubble_down[];
+ s32 *offp = (s32 *) start;
+ u64 ip;
+
+ while (offp < (s32 *) end) {
+ ip = (u64) offp + *offp;
+ ia64_patch_imm60((u64) ia64_imva((void *) ip),
+ (u64) (fsys_bubble_down - (ip & -16)) / 16);
+ ia64_fc((void *) ip);
+ ++offp;
+ }
+ ia64_sync_i();
+ ia64_srlz_i();
+}
+
+void
+ia64_patch_gate (void)
+{
+ extern char __start_gate_mckinley_e9_patchlist;
+ extern char __end_gate_mckinley_e9_patchlist;
+ extern char __start_gate_vtop_patchlist;
+ extern char __end_gate_vtop_patchlist;
+ extern char __start_gate_fsyscall_patchlist;
+ extern char __end_gate_fsyscall_patchlist;
+ extern char __start_gate_brl_fsys_bubble_down_patchlist;
+ extern char __end_gate_brl_fsys_bubble_down_patchlist;
+# define START(name) ((unsigned long) &__start_gate_##name##_patchlist)
+# define END(name) ((unsigned long)&__end_gate_##name##_patchlist)
+
+ patch_fsyscall_table(START(fsyscall), END(fsyscall));
+ patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down));
+ ia64_patch_vtop(START(vtop), END(vtop));
+ ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
+}
/*
- * This file implements the perfmon subsystem which is used
+ * This file implements the perfmon-2 subsystem which is used
* to program the IA-64 Performance Monitoring Unit (PMU).
*
- * Originally Written by Ganesh Venkitachalam, IBM Corp.
- * Copyright (C) 1999 Ganesh Venkitachalam <venkitac@us.ibm.com>
+ * The initial version of perfmon.c was written by
+ * Ganesh Venkitachalam, IBM Corp.
*
- * Modifications by Stephane Eranian, Hewlett-Packard Co.
- * Modifications by David Mosberger-Tang, Hewlett-Packard Co.
+ * Then it was modified for perfmon-1.x by Stephane Eranian and
+ * David Mosberger, Hewlett Packard Co.
+ *
+ * Version Perfmon-2.x is a rewrite of perfmon-1.x
+ * by Stephane Eranian, Hewlett Packard Co.
*
* Copyright (C) 1999-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * More information about perfmon available at:
+ * http://www.hpl.hp.com/research/linux/perfmon
*/
#include <linux/config.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
-#include <linux/smp.h>
+#include <linux/list.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/vfs.h>
+#include <linux/pagemap.h>
+#include <linux/mount.h>
+#include <linux/version.h>
#include <asm/bitops.h>
#include <asm/errno.h>
#include <asm/signal.h>
#include <asm/system.h>
#include <asm/uaccess.h>
-#include <asm/delay.h> /* for ia64_get_itc() */
+#include <asm/delay.h>
#ifdef CONFIG_PERFMON
-
/*
- * For PMUs which rely on the debug registers for some features, you must
- * you must enable the following flag to activate the support for
- * accessing the registers via the perfmonctl() interface.
+ * perfmon context state
*/
-#if defined(CONFIG_ITANIUM) || defined(CONFIG_MCKINLEY)
-#define PFM_PMU_USES_DBR 1
-#endif
+#define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
+#define PFM_CTX_LOADED 2 /* context is loaded onto a task */
+#define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
+#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
+#define PFM_CTX_TERMINATED 5 /* the task the context was loaded onto is gone */
-/*
- * perfmon context states
- */
-#define PFM_CTX_DISABLED 0
-#define PFM_CTX_ENABLED 1
+#define CTX_LOADED(c) (c)->ctx_state = PFM_CTX_LOADED
+#define CTX_UNLOADED(c) (c)->ctx_state = PFM_CTX_UNLOADED
+#define CTX_ZOMBIE(c) (c)->ctx_state = PFM_CTX_ZOMBIE
+#define CTX_DESTROYED(c) (c)->ctx_state = PFM_CTX_DESTROYED
+#define CTX_MASKED(c) (c)->ctx_state = PFM_CTX_MASKED
+#define CTX_TERMINATED(c) (c)->ctx_state = PFM_CTX_TERMINATED
+
+#define CTX_IS_UNLOADED(c) ((c)->ctx_state == PFM_CTX_UNLOADED)
+#define CTX_IS_LOADED(c) ((c)->ctx_state == PFM_CTX_LOADED)
+#define CTX_IS_ZOMBIE(c) ((c)->ctx_state == PFM_CTX_ZOMBIE)
+#define CTX_IS_MASKED(c) ((c)->ctx_state == PFM_CTX_MASKED)
+#define CTX_IS_TERMINATED(c) ((c)->ctx_state == PFM_CTX_TERMINATED)
+#define CTX_IS_DEAD(c) ((c)->ctx_state == PFM_CTX_TERMINATED || (c)->ctx_state == PFM_CTX_ZOMBIE)
+
+#define PFM_INVALID_ACTIVATION (~0UL)
-/*
- * Reset register flags
- */
-#define PFM_PMD_LONG_RESET 1
-#define PFM_PMD_SHORT_RESET 2
/*
- * Misc macros and definitions
+ * depth of message queue
*/
-#define PMU_FIRST_COUNTER 4
-#define PMU_MAX_PMCS 256
-#define PMU_MAX_PMDS 256
+#define PFM_MAX_MSGS 32
+#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
/*
* type of a PMU register (bitmask).
* bitmask structure:
* bit0 : register implemented
- * bit1 : end marker
+ * bit1 : end marker
* bit2-3 : reserved
- * bit4-7 : register type
+ * bit4 : pmc has pmc.pm
+ * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
+ * bit6-7 : register type
* bit8-31: reserved
*/
+#define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
#define PFM_REG_IMPL 0x1 /* register implemented */
#define PFM_REG_END 0x2 /* end marker */
#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
-#define PFM_REG_COUNTING (0x2<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm AND pmc.oi, a PMD used as a counter */
-#define PFM_REG_CONTROL (0x3<<4|PFM_REG_IMPL) /* PMU control register */
-#define PFM_REG_CONFIG (0x4<<4|PFM_REG_IMPL) /* refine configuration */
-#define PFM_REG_BUFFER (0x5<<4|PFM_REG_IMPL) /* PMD used as buffer */
+#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR|PFM_REG_IMPL) /* a monitor + pmc.oi+ PMD used as a counter */
+#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
+#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
+#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
#define PMC_IS_LAST(i) (pmu_conf.pmc_desc[i].type & PFM_REG_END)
#define PMD_IS_LAST(i) (pmu_conf.pmd_desc[i].type & PFM_REG_END)
-#define PFM_IS_DISABLED() pmu_conf.disabled
+#define PFM_IS_DISABLED() (pmu_conf.enabled == 0)
-#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_soft_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
-#define PFM_FL_INHERIT_MASK (PFM_FL_INHERIT_NONE|PFM_FL_INHERIT_ONCE|PFM_FL_INHERIT_ALL)
+#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
-/* i assume unsigned */
+/* i assumed unsigned */
#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf.pmc_desc[i].type & PFM_REG_IMPL))
#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf.pmd_desc[i].type & PFM_REG_IMPL))
-/* XXX: these three assume that register i is implemented */
-#define PMD_IS_COUNTING(i) (pmu_conf.pmd_desc[i].type == PFM_REG_COUNTING)
-#define PMC_IS_COUNTING(i) (pmu_conf.pmc_desc[i].type == PFM_REG_COUNTING)
-#define PMC_IS_MONITOR(i) (pmu_conf.pmc_desc[i].type == PFM_REG_MONITOR)
+/* XXX: these assume that register i is implemented */
+#define PMD_IS_COUNTING(i) ((pmu_conf.pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
+#define PMC_IS_COUNTING(i) ((pmu_conf.pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
+#define PMC_IS_MONITOR(i) ((pmu_conf.pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
#define PMC_DFL_VAL(i) pmu_conf.pmc_desc[i].default_value
#define PMC_RSVD_MASK(i) pmu_conf.pmc_desc[i].reserved_mask
#define PMD_PMD_DEP(i) pmu_conf.pmd_desc[i].dep_pmd[0]
#define PMC_PMD_DEP(i) pmu_conf.pmc_desc[i].dep_pmd[0]
-/* k assume unsigned */
-#define IBR_IS_IMPL(k) (k<pmu_conf.num_ibrs)
-#define DBR_IS_IMPL(k) (k<pmu_conf.num_dbrs)
+/* k assumed unsigned (up to 64 registers) */
+#define IBR_IS_IMPL(k) (k< IA64_NUM_DBG_REGS)
+#define DBR_IS_IMPL(k) (k< IA64_NUM_DBG_REGS)
-#define CTX_IS_ENABLED(c) ((c)->ctx_flags.state == PFM_CTX_ENABLED)
#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
-#define CTX_INHERIT_MODE(c) ((c)->ctx_fl_inherit)
-#define CTX_HAS_SMPL(c) ((c)->ctx_psb != NULL)
+#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
+#define PFM_CTX_TASK(h) (h)->ctx_task
+
/* XXX: does not support more than 64 PMDs */
#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
+#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
+#define PFM_CODE_RR 0 /* requesting code range restriction */
+#define PFM_DATA_RR 1 /* requestion data range restriction */
+
+#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
+#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
+#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
+
+/*
+ * context protection macros
+ * in SMP:
+ * - we need to protect against CPU concurrency (spin_lock)
+ * - we need to protect against PMU overflow interrupts (local_irq_disable)
+ * in UP:
+ * - we need to protect against PMU overflow interrupts (local_irq_disable)
+ *
+ * spin_lock_irqsave()/spin_lock_irqrestore():
+ * in SMP: local_irq_disable + spin_lock
+ * in UP : local_irq_disable
+ *
+ * spin_lock()/spin_lock():
+ * in UP : removed automatically
+ * in SMP: protect against context accesses from other CPU. interrupts
+ * are not masked. This is useful for the PMU interrupt handler
+ * because we know we will not get PMU concurrency in that code.
+ */
+#define PROTECT_CTX(c, f) \
+ do { \
+ DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \
+ spin_lock_irqsave(&(c)->ctx_lock, f); \
+ DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \
+ } while(0)
+
+#define UNPROTECT_CTX(c, f) \
+ do { \
+ DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \
+ spin_unlock_irqrestore(&(c)->ctx_lock, f); \
+ } while(0)
+
+#define PROTECT_CTX_NOPRINT(c, f) \
+ do { \
+ spin_lock_irqsave(&(c)->ctx_lock, f); \
+ } while(0)
+
+
+#define UNPROTECT_CTX_NOPRINT(c, f) \
+ do { \
+ spin_unlock_irqrestore(&(c)->ctx_lock, f); \
+ } while(0)
+
+
+#define PROTECT_CTX_NOIRQ(c) \
+ do { \
+ spin_lock(&(c)->ctx_lock); \
+ } while(0)
+
+#define UNPROTECT_CTX_NOIRQ(c) \
+ do { \
+ spin_unlock(&(c)->ctx_lock); \
+ } while(0)
+
+
+#ifdef CONFIG_SMP
-#define LOCK_CTX(ctx) spin_lock(&(ctx)->ctx_lock)
-#define UNLOCK_CTX(ctx) spin_unlock(&(ctx)->ctx_lock)
+#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
+#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
+#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
-#define SET_PMU_OWNER(t) do { pmu_owners[smp_processor_id()].owner = (t); } while(0)
-#define PMU_OWNER() pmu_owners[smp_processor_id()].owner
+#else /* !CONFIG_SMP */
+#define SET_ACTIVATION(t) do {} while(0)
+#define GET_ACTIVATION(t) do {} while(0)
+#define INC_ACTIVATION(t) do {} while(0)
+#endif /* CONFIG_SMP */
-#define LOCK_PFS() spin_lock(&pfm_sessions.pfs_lock)
-#define UNLOCK_PFS() spin_unlock(&pfm_sessions.pfs_lock)
+#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
+#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
+#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
+
+#define LOCK_PFS() spin_lock(&pfm_sessions.pfs_lock)
+#define UNLOCK_PFS() spin_unlock(&pfm_sessions.pfs_lock)
#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
-#define PFM_CPUINFO_CLEAR(v) __get_cpu_var(pfm_syst_info) &= ~(v)
-#define PFM_CPUINFO_SET(v) __get_cpu_var(pfm_syst_info) |= (v)
+#ifdef CONFIG_SMP
+#define PFM_CPU_ONLINE_MAP cpu_online_map
+#define cpu_is_online(i) (PFM_CPU_ONLINE_MAP & (1UL << i))
+#else
+#define PFM_CPU_ONLINE_MAP 1UL
+#define cpu_is_online(i) (i==0)
+#endif
+
+/*
+ * cmp0 must be the value of pmc0
+ */
+#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
/*
* debugging
*/
-#define DBprintk(a) \
+#define DPRINT(a) \
do { \
- if (pfm_sysctl.debug >0) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
+ if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
} while (0)
-#define DBprintk_ovfl(a) \
+#define DPRINT_ovfl(a) \
do { \
- if (pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
+ if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
} while (0)
-
-
-
-/*
+/*
* Architected PMC structure
*/
typedef struct {
} pfm_monitor_t;
/*
- * There is one such data structure per perfmon context. It is used to describe the
- * sampling buffer. It is to be shared among siblings whereas the pfm_context
- * is not.
- * Therefore we maintain a refcnt which is incremented on fork().
- * This buffer is private to the kernel only the actual sampling buffer
- * including its header are exposed to the user. This construct allows us to
- * export the buffer read-write, if needed, without worrying about security
- * problems.
- */
-typedef struct _pfm_smpl_buffer_desc {
- spinlock_t psb_lock; /* protection lock */
- unsigned long psb_refcnt; /* how many users for the buffer */
- int psb_flags; /* bitvector of flags (not yet used) */
-
- void *psb_addr; /* points to location of first entry */
- unsigned long psb_entries; /* maximum number of entries */
- unsigned long psb_size; /* aligned size of buffer */
- unsigned long psb_index; /* next free entry slot XXX: must use the one in buffer */
- unsigned long psb_entry_size; /* size of each entry including entry header */
-
- perfmon_smpl_hdr_t *psb_hdr; /* points to sampling buffer header */
-
- struct _pfm_smpl_buffer_desc *psb_next; /* next psb, used for rvfreeing of psb_hdr */
-
-} pfm_smpl_buffer_desc_t;
-
-/*
- * psb_flags
- */
-#define PSB_HAS_VMA 0x1 /* a virtual mapping for the buffer exists */
-
-#define LOCK_PSB(p) spin_lock(&(p)->psb_lock)
-#define UNLOCK_PSB(p) spin_unlock(&(p)->psb_lock)
-
-/*
* 64-bit software counter structure
*/
typedef struct {
- u64 val; /* virtual 64bit counter value */
- u64 lval; /* last value */
- u64 long_reset; /* reset value on sampling overflow */
- u64 short_reset;/* reset value on overflow */
- u64 reset_pmds[4]; /* which other pmds to reset when this counter overflows */
- u64 seed; /* seed for random-number generator */
- u64 mask; /* mask for random-number generator */
- unsigned int flags; /* notify/do not notify */
+ unsigned long val; /* virtual 64bit counter value */
+ unsigned long lval; /* last reset value */
+ unsigned long long_reset; /* reset value on sampling overflow */
+ unsigned long short_reset; /* reset value on overflow */
+ unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
+ unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
+ unsigned long seed; /* seed for random-number generator */
+ unsigned long mask; /* mask for random-number generator */
+ unsigned int flags; /* notify/do not notify */
+ unsigned int reserved; /* for future use */
+ unsigned long eventid; /* overflow event identifier */
} pfm_counter_t;
/*
- * perfmon context. One per process, is cloned on fork() depending on
- * inheritance flags
+ * context flags
*/
typedef struct {
- unsigned int state:1; /* 0=disabled, 1=enabled */
- unsigned int inherit:2; /* inherit mode */
unsigned int block:1; /* when 1, task will blocked on user notifications */
unsigned int system:1; /* do system wide monitoring */
- unsigned int frozen:1; /* pmu must be kept frozen on ctxsw in */
- unsigned int protected:1; /* allow access to creator of context only */
unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
+ unsigned int is_sampling:1; /* true if using a custom format */
unsigned int excl_idle:1; /* exclude idle task in system wide session */
- unsigned int unsecure:1; /* sp = 0 for non self-monitored task */
- unsigned int trap_reason:2; /* reason for going into pfm_block_ovfl_reset() */
- unsigned int reserved:20;
+ unsigned int unsecure:1; /* exclude idle task in system wide session */
+ unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
+ unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
+ unsigned int no_msg:1; /* no message sent on overflow */
+ unsigned int reserved:22;
} pfm_context_flags_t;
#define PFM_TRAP_REASON_NONE 0x0 /* default value */
-#define PFM_TRAP_REASON_BLOCKSIG 0x1 /* we need to block on overflow and signal user */
-#define PFM_TRAP_REASON_SIG 0x2 /* we simply need to signal user */
-#define PFM_TRAP_REASON_RESET 0x3 /* we need to reset PMDs */
+#define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
+#define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
+
/*
* perfmon context: encapsulates all the state of a monitoring session
- * XXX: probably need to change layout
*/
+
typedef struct pfm_context {
- pfm_smpl_buffer_desc_t *ctx_psb; /* sampling buffer, if any */
- unsigned long ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
+ spinlock_t ctx_lock; /* context protection */
- spinlock_t ctx_lock;
- pfm_context_flags_t ctx_flags; /* block/noblock */
+ pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
+ unsigned int ctx_state; /* state: active/inactive (no bitfield) */
- struct task_struct *ctx_notify_task; /* who to notify on overflow */
- struct task_struct *ctx_owner; /* pid of creator (debug) */
+ struct task_struct *ctx_task; /* task to which context is attached */
unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
- unsigned long ctx_smpl_regs[4]; /* which registers to record on overflow */
struct semaphore ctx_restart_sem; /* use for blocking notification mode */
- unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
- unsigned long ctx_reload_pmds[4]; /* bitmask of PMD to reload on ctxsw */
+ unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
+ unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
+ unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
+
+ unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
+ unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
+ unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
+
+ unsigned long ctx_pmcs[IA64_NUM_PMC_REGS]; /* saved copies of PMC values */
+
+ unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
+ unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
+ unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
+ unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
+
+ pfm_counter_t ctx_pmds[IA64_NUM_PMD_REGS]; /* software state for PMDS */
- unsigned long ctx_used_pmcs[4]; /* bitmask PMC used by context */
- unsigned long ctx_reload_pmcs[4]; /* bitmask of PMC to reload on ctxsw */
+ u64 ctx_saved_psr; /* copy of psr used for ctxsw */
- unsigned long ctx_used_ibrs[4]; /* bitmask of used IBR (speedup ctxsw) */
- unsigned long ctx_used_dbrs[4]; /* bitmask of used DBR (speedup ctxsw) */
+ unsigned long ctx_last_activation; /* context last activation number for last_cpu */
+ unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
+ unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
- pfm_counter_t ctx_soft_pmds[IA64_NUM_PMD_REGS]; /* XXX: size should be dynamic */
+ int ctx_fd; /* file descriptor used my this context */
- u64 ctx_saved_psr; /* copy of psr used for lazy ctxsw */
- unsigned long ctx_saved_cpus_allowed; /* copy of the task cpus_allowed (system wide) */
- unsigned int ctx_cpu; /* CPU used by system wide session */
+ pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
+ void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
+ unsigned long ctx_smpl_size; /* size of sampling buffer */
+ void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
- atomic_t ctx_last_cpu; /* CPU id of current or last CPU used */
+ wait_queue_head_t ctx_msgq_wait;
+ pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
+ int ctx_msgq_head;
+ int ctx_msgq_tail;
+ struct fasync_struct *ctx_async_queue;
+
+ wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
} pfm_context_t;
-#define ctx_fl_inherit ctx_flags.inherit
+/*
+ * magic number used to verify that structure is really
+ * a perfmon context
+ */
+#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
+
+#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
+
+#ifdef CONFIG_SMP
+#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
+#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
+#else
+#define SET_LAST_CPU(ctx, v) do {} while(0)
+#define GET_LAST_CPU(ctx) do {} while(0)
+#endif
+
+
#define ctx_fl_block ctx_flags.block
#define ctx_fl_system ctx_flags.system
-#define ctx_fl_frozen ctx_flags.frozen
-#define ctx_fl_protected ctx_flags.protected
#define ctx_fl_using_dbreg ctx_flags.using_dbreg
+#define ctx_fl_is_sampling ctx_flags.is_sampling
#define ctx_fl_excl_idle ctx_flags.excl_idle
-#define ctx_fl_trap_reason ctx_flags.trap_reason
#define ctx_fl_unsecure ctx_flags.unsecure
+#define ctx_fl_going_zombie ctx_flags.going_zombie
+#define ctx_fl_trap_reason ctx_flags.trap_reason
+#define ctx_fl_no_msg ctx_flags.no_msg
+
+#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
+#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
/*
* global information about all sessions
typedef struct {
spinlock_t pfs_lock; /* lock the structure */
- unsigned int pfs_task_sessions; /* number of per task sessions */
+ unsigned int pfs_task_sessions; /* number of per task sessions */
unsigned int pfs_sys_sessions; /* number of per system wide sessions */
unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
/*
* information about a PMC or PMD.
- * dep_pmd[]: a bitmask of dependent PMD registers
+ * dep_pmd[]: a bitmask of dependent PMD registers
* dep_pmc[]: a bitmask of dependent PMC registers
*/
typedef struct {
int pm_pos;
unsigned long default_value; /* power-on default value */
unsigned long reserved_mask; /* bitmask of reserved bits */
- int (*read_check)(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
- int (*write_check)(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
+ int (*read_check)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
+ int (*write_check)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
unsigned long dep_pmd[4];
unsigned long dep_pmc[4];
} pfm_reg_desc_t;
* a description of the PMU main characteristics.
*/
typedef struct {
- unsigned int disabled; /* indicates if perfmon is working properly */
- unsigned long ovfl_val; /* overflow value for generic counters */
- unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
- unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
- unsigned int num_pmcs; /* number of implemented PMCS */
- unsigned int num_pmds; /* number of implemented PMDS */
- unsigned int num_ibrs; /* number of implemented IBRS */
- unsigned int num_dbrs; /* number of implemented DBRS */
- unsigned int num_counters; /* number of PMD/PMC counters */
+ unsigned long ovfl_val; /* overflow value for counters */
+
pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
+
+ unsigned int num_pmcs; /* number of PMCS: computed at init time */
+ unsigned int num_pmds; /* number of PMDS: computed at init time */
+ unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
+ unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
+
+ char *pmu_name; /* PMU family name */
+ unsigned int enabled; /* indicates if perfmon initialized properly */
+ unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
+
+ unsigned int num_ibrs; /* number of IBRS: computed at init time */
+ unsigned int num_dbrs; /* number of DBRS: computed at init time */
+ unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
+
+ unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
} pmu_config_t;
/*
- * structure used to pass argument to/from remote CPU
- * using IPI to check and possibly save the PMU context on SMP systems.
- *
- * not used in UP kernels
+ * debug register related type definitions
*/
typedef struct {
- struct task_struct *task; /* which task we are interested in */
- int retval; /* return value of the call: 0=you can proceed, 1=need to wait for completion */
-} pfm_smp_ipi_arg_t;
+ unsigned long ibr_mask:56;
+ unsigned long ibr_plm:4;
+ unsigned long ibr_ig:3;
+ unsigned long ibr_x:1;
+} ibr_mask_reg_t;
+
+typedef struct {
+ unsigned long dbr_mask:56;
+ unsigned long dbr_plm:4;
+ unsigned long dbr_ig:2;
+ unsigned long dbr_w:1;
+ unsigned long dbr_r:1;
+} dbr_mask_reg_t;
+
+typedef union {
+ unsigned long val;
+ ibr_mask_reg_t ibr;
+ dbr_mask_reg_t dbr;
+} dbreg_t;
+
/*
* perfmon command descriptions
*/
typedef struct {
- int (*cmd_func)(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
+ int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
+ char *cmd_name;
int cmd_flags;
unsigned int cmd_narg;
size_t cmd_argsize;
+ int (*cmd_getsize)(void *arg, size_t *sz);
} pfm_cmd_desc_t;
-#define PFM_CMD_PID 0x1 /* command requires pid argument */
-#define PFM_CMD_ARG_READ 0x2 /* command must read argument(s) */
-#define PFM_CMD_ARG_RW 0x4 /* command must read/write argument(s) */
-#define PFM_CMD_CTX 0x8 /* command needs a perfmon context */
-#define PFM_CMD_NOCHK 0x10 /* command does not need to check task's state */
+#define PFM_CMD_FD 0x01 /* command requires a file descriptor */
+#define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
+#define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
+#define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
-#define PFM_CMD_IDX(cmd) (cmd)
-#define PFM_CMD_IS_VALID(cmd) ((PFM_CMD_IDX(cmd) >= 0) \
- && (PFM_CMD_IDX(cmd) < (int) PFM_CMD_COUNT) \
- && pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_func != NULL)
+#define PFM_CMD_IDX(cmd) (cmd)
+#define PFM_CMD_IS_VALID(cmd) ((PFM_CMD_IDX(cmd) >= 0) && (PFM_CMD_IDX(cmd) < PFM_CMD_COUNT) \
+ && pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_func != NULL)
-#define PFM_CMD_USE_PID(cmd) ((pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_PID) != 0)
-#define PFM_CMD_READ_ARG(cmd) ((pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_ARG_READ) != 0)
-#define PFM_CMD_RW_ARG(cmd) ((pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_ARG_RW) != 0)
-#define PFM_CMD_USE_CTX(cmd) ((pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_CTX) != 0)
-#define PFM_CMD_CHK(cmd) ((pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_NOCHK) == 0)
+#define PFM_CMD_NAME(cmd) pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_name
+#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_ARG_READ)
+#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_ARG_RW)
+#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_FD)
+#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_flags & PFM_CMD_STOP)
#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
#define PFM_CMD_NARG(cmd) (pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_narg)
#define PFM_CMD_ARG_SIZE(cmd) (pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_argsize)
+#define PFM_CMD_GETSIZE(cmd) (pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_getsize)
typedef struct {
int debug; /* turn on/off debugging via syslog */
int debug_ovfl; /* turn on/off debug printk in overflow handler */
int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
+ int debug_pfm_read;
} pfm_sysctl_t;
typedef struct {
- unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
- unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
- unsigned long pfm_recorded_samples_count;
- unsigned long pfm_full_smpl_buffer_count; /* how many times the sampling buffer was full */
+ unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
+ unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
+ unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
+ unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
+ unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
+ unsigned long pfm_sysupdt_count;
+ unsigned long pfm_sysupdt_cycles;
+ unsigned long pfm_smpl_handler_calls;
+ unsigned long pfm_smpl_handler_cycles;
char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
} pfm_stats_t;
/*
* perfmon internal variables
*/
-static pfm_session_t pfm_sessions; /* global sessions information */
-static struct proc_dir_entry *perfmon_dir; /* for debug only */
-static pfm_stats_t pfm_stats[NR_CPUS];
-static pfm_intr_handler_desc_t *pfm_alternate_intr_handler;
+static pfm_stats_t pfm_stats[NR_CPUS];
+static pfm_session_t pfm_sessions; /* global sessions information */
-DEFINE_PER_CPU(unsigned long, pfm_syst_info);
+static struct proc_dir_entry *perfmon_dir;
+static pfm_uuid_t pfm_null_uuid = {0,};
+
+static spinlock_t pfm_smpl_fmt_lock;
+static pfm_buffer_fmt_t *pfm_buffer_fmt_list;
+#define LOCK_BUF_FMT_LIST() spin_lock(&pfm_smpl_fmt_lock)
+#define UNLOCK_BUF_FMT_LIST() spin_unlock(&pfm_smpl_fmt_lock)
/* sysctl() controls */
static pfm_sysctl_t pfm_sysctl;
+int pfm_debug_var;
static ctl_table pfm_ctl_table[]={
{1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
static void pfm_vm_close(struct vm_area_struct * area);
static struct vm_operations_struct pfm_vm_ops={
- .close = pfm_vm_close
+ close: pfm_vm_close
};
/*
- * keep track of task owning the PMU per CPU.
+ * Linux 2.5 vs. 2.4 helper macros and definitions
+ *
+ * if not at least 2.5.69, then assume 2.4.x.
*/
-static struct {
- struct task_struct *owner;
- char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
-} pmu_owners[NR_CPUS];
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,69)
+
+#define PFM_COMPILED_FOR_2_4 1
+
+#include <linux/wrapper.h>
+
+#define pfm_get_cpu_var(v) local_cpu_data->v
+#define pfm_get_cpu_data(a,b) cpu_data((b))->a
+typedef void pfm_irq_handler_t;
+#define PFM_IRQ_HANDLER_RET(v)
+
+#define DEFINE_PER_CPU(a,b)
+
+static inline int
+pfm_wait_task_inactive(struct task_struct *task)
+{
+#ifdef CONFIG_SMP
+ /* Make sure the child gets off its CPU.. */
+ for (;;) {
+ task_lock(task);
+ if (!task_has_cpu(task)) break;
+ task_unlock(task);
+ do {
+ if (task->state != TASK_STOPPED)
+ return -ESRCH;
+ barrier();
+ cpu_relax();
+ } while (task_has_cpu(task));
+ }
+ task_unlock(task);
+#endif
+ return 0;
+}
+
+static inline void
+pfm_put_task(struct task_struct *task)
+{
+ if (task != current) free_task_struct(task);
+}
+
+static inline void
+pfm_set_task_notify(struct task_struct *task)
+{
+}
+
+static inline void
+pfm_clear_task_notify(void)
+{
+}
+
+static inline void
+pfm_reserve_page(unsigned long a)
+{
+ unsigned long page;
+
+ page = ia64_tpa(a);
+ mem_map_reserve(virt_to_page(__va(page)));
+}
+
+static inline void
+pfm_unreserve_page(unsigned long a)
+{
+ unsigned long page;
+
+ page = ia64_tpa(a);
+ mem_map_unreserve(virt_to_page(__va(page)));
+}
+
+static inline int
+pfm_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
+{
+ return remap_page_range(from, phys_addr, size, prot);
+}
+
+static inline unsigned long
+pfm_protect_ctx_ctxsw(pfm_context_t *x)
+{
+ unsigned long f;
+ spin_lock(&(x)->ctx_lock);
+ return f;
+}
+
+static inline unsigned long
+pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
+{
+ spin_unlock(&(x)->ctx_lock);
+}
+
+#else /* 2.5.69 or higher */
+
+#define pfm_wait_task_inactive(t) wait_task_inactive(t)
+#define pfm_get_cpu_var(v) __get_cpu_var(v)
+#define pfm_get_cpu_data(a,b) per_cpu(a, b)
+typedef irqreturn_t pfm_irq_handler_t;
+#define PFM_IRQ_HANDLER_RET(v) do { \
+ put_cpu_no_resched(); \
+ return IRQ_HANDLED; \
+ } while(0);
+
+static inline void
+pfm_put_task(struct task_struct *task)
+{
+ if (task != current) put_task_struct(task);
+}
+
+static inline void
+pfm_set_task_notify(struct task_struct *task)
+{
+ struct thread_info *info;
+
+ info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
+ set_bit(TIF_NOTIFY_RESUME, &info->flags);
+}
+
+static inline void
+pfm_clear_task_notify(void)
+{
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+}
+
+static inline void
+pfm_reserve_page(unsigned long a)
+{
+ SetPageReserved(vmalloc_to_page((void *)a));
+}
+static inline void
+pfm_unreserve_page(unsigned long a)
+{
+ ClearPageReserved(vmalloc_to_page((void*)a));
+}
+
+static inline int
+pfm_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
+{
+ return remap_page_range(vma, from, phys_addr, size, prot);
+}
+
+static inline unsigned long
+pfm_protect_ctx_ctxsw(pfm_context_t *x)
+{
+ spin_lock_irq(&(x)->ctx_lock);
+ return 0UL;
+}
+
+static inline unsigned long
+pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
+{
+ spin_unlock(&(x)->ctx_lock);
+}
+
+#endif /* 2.5 vs. 2.4 */
+
+DEFINE_PER_CPU(unsigned long, pfm_syst_info);
+DEFINE_PER_CPU(struct task_struct *, pmu_owner);
+DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
+DEFINE_PER_CPU(unsigned long, pmu_activation_number);
+/* forward declaration */
+static struct file_operations pfm_file_ops;
/*
* forward declarations
*/
-static void pfm_reset_pmu(struct task_struct *);
+#ifndef CONFIG_SMP
static void pfm_lazy_save_regs (struct task_struct *ta);
+#endif
#if defined(CONFIG_ITANIUM)
#include "perfmon_itanium.h"
#include "perfmon_generic.h"
#endif
+static int pfm_end_notify_user(pfm_context_t *ctx);
+
static inline void
pfm_clear_psr_pp(void)
{
ia64_srlz_d();
}
+/*
+ * PMD[i] must be a counter. no check is made
+ */
static inline unsigned long
pfm_read_soft_counter(pfm_context_t *ctx, int i)
{
- return ctx->ctx_soft_pmds[i].val + (ia64_get_pmd(i) & pmu_conf.ovfl_val);
+ return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf.ovfl_val);
}
+/*
+ * PMD[i] must be a counter. no check is made
+ */
static inline void
pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
{
- ctx->ctx_soft_pmds[i].val = val & ~pmu_conf.ovfl_val;
+ ctx->ctx_pmds[i].val = val & ~pmu_conf.ovfl_val;
/*
* writing to unimplemented part is ignore, so we do not need to
* mask off top part
ia64_set_pmd(i, val & pmu_conf.ovfl_val);
}
-/*
- * Generates a unique (per CPU) timestamp
- */
-static inline unsigned long
-pfm_get_stamp(void)
+static pfm_msg_t *
+pfm_get_new_msg(pfm_context_t *ctx)
+{
+ int idx, next;
+
+ next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
+
+ DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
+ if (next == ctx->ctx_msgq_head) return NULL;
+
+ idx = ctx->ctx_msgq_tail;
+ ctx->ctx_msgq_tail = next;
+
+ DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
+
+ return ctx->ctx_msgq+idx;
+}
+
+static pfm_msg_t *
+pfm_get_next_msg(pfm_context_t *ctx)
{
+ pfm_msg_t *msg;
+
+ DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
+
+ if (PFM_CTXQ_EMPTY(ctx)) return NULL;
+
+ /*
+ * get oldest message
+ */
+ msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
+
/*
- * XXX: must find something more efficient
+ * and move forward
*/
- return ia64_get_itc();
+ ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
+
+ DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
+
+ return msg;
+}
+
+static void
+pfm_reset_msgq(pfm_context_t *ctx)
+{
+ ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
+ DPRINT(("ctx=%p msgq reset\n", ctx));
}
+
/* Here we want the physical address of the memory.
* This is used when initializing the contents of the
* area and marking the pages as reserved.
pfm_kvirt_to_pa(unsigned long adr)
{
__u64 pa = ia64_tpa(adr);
- //DBprintk(("kv2pa(%lx-->%lx)\n", adr, pa));
return pa;
}
pfm_rvmalloc(unsigned long size)
{
void *mem;
- unsigned long adr;
+ unsigned long addr;
- size=PAGE_ALIGN(size);
- mem=vmalloc(size);
+ size = PAGE_ALIGN(size);
+ mem = vmalloc(size);
if (mem) {
//printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
- memset(mem, 0, size); /* Clear the ram out, no junk to the user */
- adr=(unsigned long) mem;
+ memset(mem, 0, size);
+ addr = (unsigned long)mem;
while (size > 0) {
- SetPageReserved(vmalloc_to_page((void *)adr));
- adr+=PAGE_SIZE;
+ pfm_reserve_page(addr);
+ addr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
}
static void
pfm_rvfree(void *mem, unsigned long size)
{
- unsigned long adr;
+ unsigned long addr;
if (mem) {
- adr=(unsigned long) mem;
+ DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
+ addr = (unsigned long) mem;
while ((long) size > 0) {
- ClearPageReserved(vmalloc_to_page((void*)adr));
- adr+=PAGE_SIZE;
+ pfm_unreserve_page(addr);
+ addr+=PAGE_SIZE;
size-=PAGE_SIZE;
}
vfree(mem);
return;
}
-/*
- * This function gets called from mm/mmap.c:exit_mmap() only when there is a sampling buffer
- * attached to the context AND the current task has a mapping for it, i.e., it is the original
- * creator of the context.
- *
- * This function is used to remember the fact that the vma describing the sampling buffer
- * has now been removed. It can only be called when no other tasks share the same mm context.
- *
- */
-static void
-pfm_vm_close(struct vm_area_struct *vma)
+static pfm_context_t *
+pfm_context_alloc(void)
{
- pfm_smpl_buffer_desc_t *psb = (pfm_smpl_buffer_desc_t *)vma->vm_private_data;
+ pfm_context_t *ctx;
- if (psb == NULL) {
- printk(KERN_DEBUG "perfmon: psb is null in [%d]\n", current->pid);
- return;
+ /* allocate context descriptor */
+ ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL);
+ if (ctx) {
+ memset(ctx, 0, sizeof(pfm_context_t));
+ DPRINT(("alloc ctx @%p\n", ctx));
}
- /*
- * Add PSB to list of buffers to free on release_thread() when no more users
- *
- * This call is safe because, once the count is zero is cannot be modified anymore.
- * This is not because there is no more user of the mm context, that the sampling
- * buffer is not being used anymore outside of this task. In fact, it can still
- * be accessed from within the kernel by another task (such as the monitored task).
- *
- * Therefore, we only move the psb into the list of buffers to free when we know
- * nobody else is using it.
- * The linked list if independent of the perfmon context, because in the case of
- * multi-threaded processes, the last thread may not have been involved with
- * monitoring however it will be the one removing the vma and it should therefore
- * also remove the sampling buffer. This buffer cannot be removed until the vma
- * is removed.
- *
- * This function cannot remove the buffer from here, because exit_mmap() must first
- * complete. Given that there is no other vma related callback in the generic code,
- * we have created our own with the linked list of sampling buffers to free. The list
- * is part of the thread structure. In release_thread() we check if the list is
- * empty. If not we call into perfmon to free the buffer and psb. That is the only
- * way to ensure a safe deallocation of the sampling buffer which works when
- * the buffer is shared between distinct processes or with multi-threaded programs.
- *
- * We need to lock the psb because the refcnt test and flag manipulation must
- * looked like an atomic operation vis a vis pfm_context_exit()
- */
- LOCK_PSB(psb);
-
- if (psb->psb_refcnt == 0) {
-
- psb->psb_next = current->thread.pfm_smpl_buf_list;
- current->thread.pfm_smpl_buf_list = psb;
+ return ctx;
+}
- DBprintk(("[%d] add smpl @%p size %lu to smpl_buf_list psb_flags=0x%x\n",
- current->pid, psb->psb_hdr, psb->psb_size, psb->psb_flags));
+static void
+pfm_context_free(pfm_context_t *ctx)
+{
+ if (ctx) {
+ DPRINT(("free ctx @%p\n", ctx));
+ kfree(ctx);
}
- DBprintk(("[%d] clearing psb_flags=0x%x smpl @%p size %lu\n",
- current->pid, psb->psb_flags, psb->psb_hdr, psb->psb_size));
- /*
- * decrement the number vma for the buffer
- */
- psb->psb_flags &= ~PSB_HAS_VMA;
-
- UNLOCK_PSB(psb);
}
-/*
- * This function is called from pfm_destroy_context() and also from pfm_inherit()
- * to explicitly remove the sampling buffer mapping from the user level address space.
- */
-static int
-pfm_remove_smpl_mapping(struct task_struct *task)
+static void
+pfm_mask_monitoring(struct task_struct *task)
{
- pfm_context_t *ctx = task->thread.pfm_context;
- pfm_smpl_buffer_desc_t *psb;
- int r;
+ pfm_context_t *ctx = PFM_GET_CTX(task);
+ struct thread_struct *th = &task->thread;
+ unsigned long mask, val;
+ int i;
+
+ DPRINT(("[%d] masking monitoring for [%d]\n", current->pid, task->pid));
/*
- * some sanity checks first
+ * monitoring can only be masked as a result of a valid
+ * counter overflow. In UP, it means that the PMU still
+ * has an owner. Note that the owner can be different
+ * from the current task. However the PMU state belongs
+ * to the owner.
+ * In SMP, a valid overflow only happens when task is
+ * current. Therefore if we come here, we know that
+ * the PMU state belongs to the current task, therefore
+ * we can access the live registers.
+ *
+ * So in both cases, the live register contains the owner's
+ * state. We can ONLY touch the PMU registers and NOT the PSR.
+ *
+ * As a consequence to this call, the thread->pmds[] array
+ * contains stale information which must be ignored
+ * when context is reloaded AND monitoring is active (see
+ * pfm_restart).
*/
- if (ctx == NULL || task->mm == NULL || ctx->ctx_smpl_vaddr == 0 || ctx->ctx_psb == NULL) {
- printk(KERN_DEBUG "perfmon: invalid context mm=%p\n", task->mm);
- return -1;
+ mask = ctx->ctx_used_pmds[0];
+ for (i = 0; mask; i++, mask>>=1) {
+ /* skip non used pmds */
+ if ((mask & 0x1) == 0) continue;
+ val = ia64_get_pmd(i);
+
+ if (PMD_IS_COUNTING(i)) {
+ /*
+ * we rebuild the full 64 bit value of the counter
+ */
+ ctx->ctx_pmds[i].val += (val & pmu_conf.ovfl_val);
+ } else {
+ ctx->ctx_pmds[i].val = val;
+ }
+ DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
+ i,
+ ctx->ctx_pmds[i].val,
+ val & pmu_conf.ovfl_val));
+ }
+ /*
+ * mask monitoring by setting the privilege level to 0
+ * we cannot use psr.pp/psr.up for this, it is controlled by
+ * the user
+ *
+ * if task is current, modify actual registers, otherwise modify
+ * thread save state, i.e., what will be restored in pfm_load_regs()
+ */
+ mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
+ for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
+ if ((mask & 0x1) == 0UL) continue;
+ ia64_set_pmc(i, th->pmcs[i] & ~0xfUL);
+ th->pmcs[i] &= ~0xfUL;
+ }
+ /*
+ * make all of this visible
+ */
+ ia64_srlz_d();
+}
+
+/*
+ * must always be done with task == current
+ *
+ * context must be in MASKED state when calling
+ */
+static void
+pfm_restore_monitoring(struct task_struct *task)
+{
+ pfm_context_t *ctx = PFM_GET_CTX(task);
+ struct thread_struct *th = &task->thread;
+ unsigned long mask;
+ unsigned long psr, val;
+ int i;
+
+ if (task != current) {
+ printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid);
+ return;
+ }
+ if (CTX_IS_MASKED(ctx) == 0) {
+ printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
+ task->pid, current->pid, ctx->ctx_state);
+ return;
+ }
+ psr = pfm_get_psr();
+ /*
+ * monitoring is masked via the PMC.
+ * As we restore their value, we do not want each counter to
+ * restart right away. We stop monitoring using the PSR,
+ * restore the PMC (and PMD) and then re-establish the psr
+ * as it was. Note that there can be no pending overflow at
+ * this point, because monitoring was MASKED.
+ *
+ * system-wide session are pinned and self-monitoring
+ */
+ if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
+ /* disable dcr pp */
+ ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
+ pfm_clear_psr_pp();
+ } else {
+ pfm_clear_psr_up();
+ }
+ /*
+ * first, we restore the PMD
+ */
+ mask = ctx->ctx_used_pmds[0];
+ for (i = 0; mask; i++, mask>>=1) {
+ /* skip non used pmds */
+ if ((mask & 0x1) == 0) continue;
+
+ if (PMD_IS_COUNTING(i)) {
+ /*
+ * we split the 64bit value according to
+ * counter width
+ */
+ val = ctx->ctx_pmds[i].val & pmu_conf.ovfl_val;
+ ctx->ctx_pmds[i].val &= ~pmu_conf.ovfl_val;
+ } else {
+ val = ctx->ctx_pmds[i].val;
+ }
+ ia64_set_pmd(i, val);
+
+ DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
+ i,
+ ctx->ctx_pmds[i].val,
+ val));
+ }
+ /*
+ * restore the PMCs
+ */
+ mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
+ for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
+ if ((mask & 0x1) == 0UL) continue;
+ th->pmcs[i] = ctx->ctx_pmcs[i];
+ ia64_set_pmc(i, th->pmcs[i]);
+ DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, th->pmcs[i]));
+ }
+ ia64_srlz_d();
+
+ /*
+ * now restore PSR
+ */
+ if (ctx->ctx_fl_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
+ /* enable dcr pp */
+ ia64_set_dcr(ia64_get_dcr() | IA64_DCR_PP);
+ ia64_srlz_i();
+ }
+ pfm_set_psr_l(psr);
+}
+
+static inline void
+pfm_save_pmds(unsigned long *pmds, unsigned long mask)
+{
+ int i;
+
+ ia64_srlz_d();
+
+ for (i=0; mask; i++, mask>>=1) {
+ if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
+ }
+}
+
+/*
+ * reload from thread state (used for ctxw only)
+ */
+static inline void
+pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
+{
+ int i;
+ unsigned long val, ovfl_val = pmu_conf.ovfl_val;
+
+ DPRINT(("mask=0x%lx\n", mask));
+ for (i=0; mask; i++, mask>>=1) {
+ if ((mask & 0x1) == 0) continue;
+ val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
+ ia64_set_pmd(i, val);
+ DPRINT(("pmd[%d]=0x%lx\n", i, val));
+ }
+ ia64_srlz_d();
+}
+
+/*
+ * propagate PMD from context to thread-state
+ */
+static inline void
+pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
+{
+ struct thread_struct *thread = &task->thread;
+ unsigned long ovfl_val = pmu_conf.ovfl_val;
+ unsigned long mask = ctx->ctx_all_pmds[0];
+ unsigned long val;
+ int i;
+
+ DPRINT(("mask=0x%lx\n", mask));
+
+ for (i=0; mask; i++, mask>>=1) {
+
+ val = ctx->ctx_pmds[i].val;
+
+ /*
+ * We break up the 64 bit value into 2 pieces
+ * the lower bits go to the machine state in the
+ * thread (will be reloaded on ctxsw in).
+ * The upper part stays in the soft-counter.
+ */
+ if (PMD_IS_COUNTING(i)) {
+ ctx->ctx_pmds[i].val = val & ~ovfl_val;
+ val &= ovfl_val;
+ }
+ thread->pmds[i] = val;
+
+ DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
+ i,
+ thread->pmds[i],
+ ctx->ctx_pmds[i].val));
+ }
+}
+
+/*
+ * propagate PMC from context to thread-state
+ */
+static inline void
+pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
+{
+ struct thread_struct *thread = &task->thread;
+ unsigned long mask = ctx->ctx_all_pmcs[0];
+ int i;
+
+ DPRINT(("mask=0x%lx\n", mask));
+
+ for (i=0; mask; i++, mask>>=1) {
+ /* masking 0 with ovfl_val yields 0 */
+ thread->pmcs[i] = ctx->ctx_pmcs[i];
+ DPRINT(("pmc[%d]=0x%lx\n", i, thread->pmcs[i]));
+ }
+}
+
+
+
+static inline void
+pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
+{
+ int i;
+
+ DPRINT(("mask=0x%lx\n", mask));
+ for (i=0; mask; i++, mask>>=1) {
+ if ((mask & 0x1) == 0) continue;
+ ia64_set_pmc(i, pmcs[i]);
+ DPRINT(("pmc[%d]=0x%lx\n", i, pmcs[i]));
+ }
+ ia64_srlz_d();
+}
+
+static inline void
+pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
+{
+ int i;
+
+ for (i=0; i < nibrs; i++) {
+ ia64_set_ibr(i, ibrs[i]);
+ }
+ ia64_srlz_i();
+}
+
+static inline void
+pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
+{
+ int i;
+
+ for (i=0; i < ndbrs; i++) {
+ ia64_set_dbr(i, dbrs[i]);
+ }
+ ia64_srlz_d();
+}
+
+static inline int
+pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
+{
+ return memcmp(a, b, sizeof(pfm_uuid_t));
+}
+
+static inline int
+pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
+{
+ int ret = 0;
+ if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
+ return ret;
+}
+
+static inline int
+pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
+{
+ int ret = 0;
+ if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
+ return ret;
+}
+
+
+static inline int
+pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
+ int cpu, void *arg)
+{
+ int ret = 0;
+ if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
+ return ret;
+}
+
+static inline int
+pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
+ int cpu, void *arg)
+{
+ int ret = 0;
+ if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
+ return ret;
+}
+
+static inline int
+pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
+{
+ int ret = 0;
+ if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
+ return ret;
+}
+
+static inline int
+pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
+{
+ int ret = 0;
+ if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
+ return ret;
+}
+
+
+
+int
+pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
+{
+ pfm_buffer_fmt_t *p;
+ int ret = 0;
+
+ /* some sanity checks */
+ if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
+
+ /* we need at least a handler */
+ if (fmt->fmt_handler == NULL) return -EINVAL;
+
+ /*
+ * XXX: need check validity of fmt_arg_size
+ */
+
+ LOCK_BUF_FMT_LIST();
+ p = pfm_buffer_fmt_list;
+
+
+ while (p) {
+ if (pfm_uuid_cmp(fmt->fmt_uuid, p->fmt_uuid) == 0) break;
+ p = p->fmt_next;
}
- psb = ctx->ctx_psb;
- down_write(&task->mm->mmap_sem);
+ if (p) {
+ printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
+ ret = -EBUSY;
+ } else {
+ fmt->fmt_prev = NULL;
+ fmt->fmt_next = pfm_buffer_fmt_list;
+ pfm_buffer_fmt_list = fmt;
+ printk(KERN_ERR "perfmon: added sampling format %s\n", fmt->fmt_name);
+ }
+ UNLOCK_BUF_FMT_LIST();
+
+ return ret;
+}
+
+int
+pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
+{
+ pfm_buffer_fmt_t *p;
+ int ret = 0;
+
+ LOCK_BUF_FMT_LIST();
+ p = pfm_buffer_fmt_list;
+ while (p) {
+ if (memcmp(uuid, p->fmt_uuid, sizeof(pfm_uuid_t)) == 0) break;
+ p = p->fmt_next;
+ }
+ if (p) {
+ if (p->fmt_prev)
+ p->fmt_prev->fmt_next = p->fmt_next;
+ else
+ pfm_buffer_fmt_list = p->fmt_next;
+
+ if (p->fmt_next)
+ p->fmt_next->fmt_prev = p->fmt_prev;
+
+ printk(KERN_ERR "perfmon: removed sampling format: %s\n", p->fmt_name);
+ p->fmt_next = p->fmt_prev = NULL;
+ } else {
+ printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
+ ret = -EINVAL;
+ }
+ UNLOCK_BUF_FMT_LIST();
+
+ return ret;
+
+}
+
+/*
+ * find a buffer format based on its uuid
+ */
+static pfm_buffer_fmt_t *
+pfm_find_buffer_fmt(pfm_uuid_t uuid, int nolock)
+{
+ pfm_buffer_fmt_t *p;
+
+ LOCK_BUF_FMT_LIST();
+ for (p = pfm_buffer_fmt_list; p ; p = p->fmt_next) {
+ if (pfm_uuid_cmp(uuid, p->fmt_uuid) == 0) break;
+ }
+
+ UNLOCK_BUF_FMT_LIST();
+
+ return p;
+}
+
+static int
+pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
+{
+ /*
+ * validy checks on cpu_mask have been done upstream
+ */
+ LOCK_PFS();
+
+ DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
+ pfm_sessions.pfs_sys_sessions,
+ pfm_sessions.pfs_task_sessions,
+ pfm_sessions.pfs_sys_use_dbregs,
+ is_syswide,
+ cpu));
+
+ if (is_syswide) {
+ /*
+ * cannot mix system wide and per-task sessions
+ */
+ if (pfm_sessions.pfs_task_sessions > 0UL) {
+ DPRINT(("system wide not possible, %u conflicting task_sessions\n",
+ pfm_sessions.pfs_task_sessions));
+ goto abort;
+ }
+
+ if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
+
+ DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
+
+ pfm_sessions.pfs_sys_session[cpu] = task;
+
+ pfm_sessions.pfs_sys_sessions++ ;
+
+ } else {
+ if (pfm_sessions.pfs_sys_sessions) goto abort;
+ pfm_sessions.pfs_task_sessions++;
+ }
+
+ DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
+ pfm_sessions.pfs_sys_sessions,
+ pfm_sessions.pfs_task_sessions,
+ pfm_sessions.pfs_sys_use_dbregs,
+ is_syswide,
+ cpu));
+
+ UNLOCK_PFS();
+
+ return 0;
+
+error_conflict:
+ DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
+ pfm_sessions.pfs_sys_session[cpu]->pid,
+ smp_processor_id()));
+abort:
+ UNLOCK_PFS();
+
+ return -EBUSY;
+
+}
+
+static int
+pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
+{
+
+ /*
+ * validy checks on cpu_mask have been done upstream
+ */
+ LOCK_PFS();
+
+ DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
+ pfm_sessions.pfs_sys_sessions,
+ pfm_sessions.pfs_task_sessions,
+ pfm_sessions.pfs_sys_use_dbregs,
+ is_syswide,
+ cpu));
+
+
+ if (is_syswide) {
+ pfm_sessions.pfs_sys_session[cpu] = NULL;
+ /*
+ * would not work with perfmon+more than one bit in cpu_mask
+ */
+ if (ctx && ctx->ctx_fl_using_dbreg) {
+ if (pfm_sessions.pfs_sys_use_dbregs == 0) {
+ printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
+ } else {
+ pfm_sessions.pfs_sys_use_dbregs--;
+ }
+ }
+ pfm_sessions.pfs_sys_sessions--;
+ } else {
+ pfm_sessions.pfs_task_sessions--;
+ }
+ DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
+ pfm_sessions.pfs_sys_sessions,
+ pfm_sessions.pfs_task_sessions,
+ pfm_sessions.pfs_sys_use_dbregs,
+ is_syswide,
+ cpu));
+
+ UNLOCK_PFS();
+
+ return 0;
+}
+
+/*
+ * removes virtual mapping of the sampling buffer.
+ * IMPORTANT: cannot be called with interrupts disable, e.g. inside
+ * a PROTECT_CTX() section.
+ */
+static int
+pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
+{
+ int r;
+
+ /* sanity checks */
+ if (task->mm == NULL || size == 0UL || vaddr == NULL) {
+ printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm);
+ return -EINVAL;
+ }
+
+ DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
+
+ /*
+ * does the actual unmapping
+ */
+ down_write(&task->mm->mmap_sem);
+
+ DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
+
+ r = do_munmap(task->mm, (unsigned long)vaddr, size);
+
+ up_write(&task->mm->mmap_sem);
+ if (r !=0) {
+ printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size);
+ }
+
+ DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
+
+ return 0;
+}
+
+/*
+ * free actual physical storage used by sampling buffer
+ */
+#if 0
+static int
+pfm_free_smpl_buffer(pfm_context_t *ctx)
+{
+ pfm_buffer_fmt_t *fmt;
+
+ if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
+
+ /*
+ * we won't use the buffer format anymore
+ */
+ fmt = ctx->ctx_buf_fmt;
+
+ DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
+ ctx->ctx_smpl_hdr,
+ ctx->ctx_smpl_size,
+ ctx->ctx_smpl_vaddr));
+
+ pfm_buf_fmt_exit(fmt, current, NULL, NULL);
+
+ /*
+ * free the buffer
+ */
+ pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
+
+ ctx->ctx_smpl_hdr = NULL;
+ ctx->ctx_smpl_size = 0UL;
+
+ return 0;
+
+invalid_free:
+ printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid);
+ return -EINVAL;
+}
+#endif
+
+static inline void
+pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
+{
+ if (fmt == NULL) return;
+
+ pfm_buf_fmt_exit(fmt, current, NULL, NULL);
+
+}
+
+/*
+ * pfmfs should _never_ be mounted by userland - too much of security hassle,
+ * no real gain from having the whole whorehouse mounted. So we don't need
+ * any operations on the root directory. However, we need a non-trivial
+ * d_name - pfm: will go nicely and kill the special-casing in procfs.
+ */
+static struct vfsmount *pfmfs_mnt;
+#define PFMFS_MAGIC 0xa0b4d889
+
+#ifdef PFM_COMPILED_FOR_2_4
+
+static int
+pfmfs_statfs(struct super_block *sb, struct statfs *buf)
+{
+ buf->f_type = PFMFS_MAGIC;
+ buf->f_bsize = 1024;
+ buf->f_namelen = 255;
+ return 0;
+}
+
+static struct super_operations pfmfs_ops = {
+ statfs: pfmfs_statfs,
+};
+
+static struct super_block *
+pfmfs_read_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *root = new_inode(sb);
+ if (!root)
+ return NULL;
+ root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
+ root->i_uid = root->i_gid = 0;
+ root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME;
+ sb->s_blocksize = 1024;
+ sb->s_blocksize_bits = 10;
+ sb->s_magic = PFMFS_MAGIC;
+ sb->s_op = &pfmfs_ops;
+ sb->s_root = d_alloc(NULL, &(const struct qstr) { "pfm:", 4, 0 });
+ if (!sb->s_root) {
+ iput(root);
+ return NULL;
+ }
+ sb->s_root->d_sb = sb;
+ sb->s_root->d_parent = sb->s_root;
+ d_instantiate(sb->s_root, root);
+ return sb;
+}
+
+//static DECLARE_FSTYPE(pfm_fs_type, "pfmfs", pfmfs_read_super, FS_NOMOUNT);
+static struct file_system_type pfm_fs_type = {
+ name: "pfmfs",
+ read_super: pfmfs_read_super,
+ fs_flags: FS_NOMOUNT,
+};
+
+#else /* ! COMPILED_FOR_2_4 */
+
+static struct super_block *
+pfmfs_get_sb(struct file_system_type *fs_type, int flags, char *dev_name, void *data)
+{
+ return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
+}
+
+static struct file_system_type pfm_fs_type = {
+ .name = "pfmfs",
+ .get_sb = pfmfs_get_sb,
+ .kill_sb = kill_anon_super,
+};
+#endif /* COMPILED_FOR_2_4 */
+
+static int __init
+init_pfm_fs(void)
+{
+ int err = register_filesystem(&pfm_fs_type);
+ if (!err) {
+ pfmfs_mnt = kern_mount(&pfm_fs_type);
+ err = PTR_ERR(pfmfs_mnt);
+ if (IS_ERR(pfmfs_mnt))
+ unregister_filesystem(&pfm_fs_type);
+ else
+ err = 0;
+ }
+ return err;
+}
+
+static void __exit
+exit_pfm_fs(void)
+{
+ unregister_filesystem(&pfm_fs_type);
+ mntput(pfmfs_mnt);
+}
+
+static loff_t
+pfm_lseek(struct file *file, loff_t offset, int whence)
+{
+ DPRINT(("pfm_lseek called\n"));
+ return -ESPIPE;
+}
+
+static ssize_t
+pfm_do_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
+{
+ pfm_context_t *ctx;
+ pfm_msg_t *msg;
+ ssize_t ret;
+ unsigned long flags;
+ DECLARE_WAITQUEUE(wait, current);
+ if (PFM_IS_FILE(filp) == 0) {
+ printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
+ return -EINVAL;
+ }
+
+ ctx = (pfm_context_t *)filp->private_data;
+ if (ctx == NULL) {
+ printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid);
+ return -EINVAL;
+ }
+
+ /*
+ * check even when there is no message
+ */
+ if (size < sizeof(pfm_msg_t)) {
+ DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
+ return -EINVAL;
+ }
+ /*
+ * seeks are not allowed on message queues
+ */
+ if (ppos != &filp->f_pos) return -ESPIPE;
+
+ PROTECT_CTX(ctx, flags);
+
+ /*
+ * put ourselves on the wait queue
+ */
+ add_wait_queue(&ctx->ctx_msgq_wait, &wait);
+
+
+ for(;;) {
+ /*
+ * check wait queue
+ */
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
+
+ ret = 0;
+ if(PFM_CTXQ_EMPTY(ctx) == 0) break;
+
+ UNPROTECT_CTX(ctx, flags);
+
+ /*
+ * check non-blocking read
+ */
+ ret = -EAGAIN;
+ if(filp->f_flags & O_NONBLOCK) break;
+
+ /*
+ * check pending signals
+ */
+ if(signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ /*
+ * no message, so wait
+ */
+ schedule();
+
+ PROTECT_CTX(ctx, flags);
+ }
+ DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret));
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
+
+ if (ret < 0) goto abort;
+
+ ret = -EINVAL;
+ msg = pfm_get_next_msg(ctx);
+ if (msg == NULL) {
+ printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid);
+ goto abort_locked;
+ }
+
+ DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
+
+ ret = -EFAULT;
+ if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
+
+abort_locked:
+ UNPROTECT_CTX(ctx, flags);
+abort:
+ return ret;
+}
+
+static ssize_t
+pfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos)
+{
+ int oldvar, ret;
+
+ oldvar = pfm_debug_var;
+ pfm_debug_var = pfm_sysctl.debug_pfm_read;
+ ret = pfm_do_read(filp, buf, size, ppos);
+ pfm_debug_var = oldvar;
+ return ret;
+}
+
+static ssize_t
+pfm_write(struct file *file, const char *ubuf,
+ size_t size, loff_t *ppos)
+{
+ DPRINT(("pfm_write called\n"));
+ return -EINVAL;
+}
+
+static unsigned int
+pfm_poll(struct file *filp, poll_table * wait)
+{
+ pfm_context_t *ctx;
+ unsigned long flags;
+ unsigned int mask = 0;
+
+ if (PFM_IS_FILE(filp) == 0) {
+ printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
+ return 0;
+ }
+
+ ctx = (pfm_context_t *)filp->private_data;
+ if (ctx == NULL) {
+ printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid);
+ return 0;
+ }
+
+
+ DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
+
+ poll_wait(filp, &ctx->ctx_msgq_wait, wait);
- r = do_munmap(task->mm, ctx->ctx_smpl_vaddr, psb->psb_size);
+ PROTECT_CTX(ctx, flags);
- up_write(&task->mm->mmap_sem);
- if (r !=0) {
- printk(KERN_DEBUG "perfmon: pid %d unable to unmap sampling buffer "
- "@0x%lx size=%ld\n", task->pid, ctx->ctx_smpl_vaddr, psb->psb_size);
- }
+ if (PFM_CTXQ_EMPTY(ctx) == 0)
+ mask = POLLIN | POLLRDNORM;
- DBprintk(("[%d] do_unmap(0x%lx, %ld)=%d refcnt=%lu psb_flags=0x%x\n",
- task->pid, ctx->ctx_smpl_vaddr, psb->psb_size, r, psb->psb_refcnt, psb->psb_flags));
+ UNPROTECT_CTX(ctx, flags);
- return 0;
+ DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
+
+ return mask;
}
-static pfm_context_t *
-pfm_context_alloc(void)
+static int
+pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ DPRINT(("pfm_ioctl called\n"));
+ return -EINVAL;
+}
+
+/*
+ * context is locked when coming here
+ */
+static inline int
+pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
+{
+ int ret;
+
+ ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
+
+ DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
+ current->pid,
+ fd,
+ on,
+ ctx->ctx_async_queue, ret));
+
+ return ret;
+}
+
+static int
+pfm_fasync(int fd, struct file *filp, int on)
{
pfm_context_t *ctx;
+ unsigned long flags;
+ int ret;
- /* allocate context descriptor */
- ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL);
- if (ctx) memset(ctx, 0, sizeof(pfm_context_t));
-
- return ctx;
+ if (PFM_IS_FILE(filp) == 0) {
+ printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid);
+ return -EBADF;
+ }
+
+ ctx = (pfm_context_t *)filp->private_data;
+ if (ctx == NULL) {
+ printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid);
+ return -EBADF;
+ }
+
+
+ PROTECT_CTX(ctx, flags);
+
+ ret = pfm_do_fasync(fd, filp, ctx, on);
+
+ DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
+ current->pid,
+ fd,
+ on,
+ ctx->ctx_async_queue, ret));
+
+ UNPROTECT_CTX(ctx, flags);
+
+ return ret;
}
+#ifdef CONFIG_SMP
+/*
+ * this function is exclusively called from pfm_close().
+ * The context is not protected at that time, nor are interrupts
+ * on the remote CPU. That's necessary to avoid deadlocks.
+ */
static void
-pfm_context_free(pfm_context_t *ctx)
+pfm_syswide_force_stop(void *info)
+{
+ pfm_context_t *ctx = (pfm_context_t *)info;
+ struct pt_regs *regs = ia64_task_regs(current);
+ struct task_struct *owner;
+
+ if (ctx->ctx_cpu != smp_processor_id()) {
+ printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
+ ctx->ctx_cpu,
+ smp_processor_id());
+ return;
+ }
+ owner = GET_PMU_OWNER();
+ if (owner != ctx->ctx_task) {
+ printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
+ smp_processor_id(),
+ owner->pid, ctx->ctx_task->pid);
+ return;
+ }
+ if (GET_PMU_CTX() != ctx) {
+ printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
+ smp_processor_id(),
+ GET_PMU_CTX(), ctx);
+ return;
+ }
+
+ DPRINT(("[%d] on CPU%d forcing system wide stop for [%d]\n", current->pid, smp_processor_id(), ctx->ctx_task->pid));
+ /*
+ * Update local PMU
+ */
+ ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
+ ia64_srlz_i();
+ /*
+ * update local cpuinfo
+ */
+ PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
+ PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
+ PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
+
+ pfm_clear_psr_pp();
+
+ /*
+ * also stop monitoring in the local interrupted task
+ */
+ ia64_psr(regs)->pp = 0;
+
+ SET_PMU_OWNER(NULL, NULL);
+}
+
+static void
+pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
{
- if (ctx) kfree(ctx);
+ int ret;
+
+ DPRINT(("[%d] calling CPU%d for cleanup\n", current->pid, ctx->ctx_cpu));
+ ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
+ DPRINT(("[%d] called CPU%d for cleanup ret=%d\n", current->pid, ctx->ctx_cpu, ret));
}
+#endif /* CONFIG_SMP */
+/*
+ * called either on explicit close() or from exit_files().
+ *
+ * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero (fput()),i.e,
+ * last task to access the file. Nobody else can access the file at this point.
+ *
+ * When called from exit_files(), the VMA has been freed because exit_mm()
+ * is executed before exit_files().
+ *
+ * When called from exit_files(), the current task is not yet ZOMBIE but we will
+ * flush the PMU state to the context. This means * that when we see the context
+ * state as TERMINATED we are guranteed to have the latest PMU state available,
+ * even if the task itself is in the middle of being ctxsw out.
+ */
+static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
static int
-pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
+pfm_close(struct inode *inode, struct file *filp)
{
- unsigned long page;
+ pfm_context_t *ctx;
+ struct task_struct *task;
+ struct pt_regs *regs;
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+ unsigned long smpl_buf_size = 0UL;
+ void *smpl_buf_vaddr = NULL;
+ void *smpl_buf_addr = NULL;
+ int free_possible = 1;
+
+ { u64 psr = pfm_get_psr();
+ BUG_ON((psr & IA64_PSR_I) == 0UL);
+ }
- DBprintk(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
+ DPRINT(("pfm_close called private=%p\n", filp->private_data));
- while (size > 0) {
- page = pfm_kvirt_to_pa(buf);
+ if (!inode) {
+ printk(KERN_ERR "pfm_close: NULL inode\n");
+ return 0;
+ }
- if (remap_page_range(vma, addr, page, PAGE_SIZE, PAGE_READONLY)) return -ENOMEM;
+ if (PFM_IS_FILE(filp) == 0) {
+ printk(KERN_ERR "perfmon: pfm_close: bad magic [%d]\n", current->pid);
+ return -EBADF;
+ }
- addr += PAGE_SIZE;
- buf += PAGE_SIZE;
- size -= PAGE_SIZE;
+ ctx = (pfm_context_t *)filp->private_data;
+ if (ctx == NULL) {
+ printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
+ return -EBADF;
+ }
+
+ PROTECT_CTX(ctx, flags);
+
+ /*
+ * remove our file from the async queue, if we use it
+ */
+ if (filp->f_flags & FASYNC) {
+ DPRINT(("[%d] before async_queue=%p\n", current->pid, ctx->ctx_async_queue));
+ pfm_do_fasync (-1, filp, ctx, 0);
+ DPRINT(("[%d] after async_queue=%p\n", current->pid, ctx->ctx_async_queue));
+ }
+
+ task = PFM_CTX_TASK(ctx);
+
+ DPRINT(("[%d] ctx_state=%d\n", current->pid, ctx->ctx_state));
+
+ if (CTX_IS_UNLOADED(ctx) || CTX_IS_TERMINATED(ctx)) {
+ goto doit;
+ }
+
+ regs = ia64_task_regs(task);
+
+ /*
+ * context still loaded/masked and self monitoring,
+ * we stop/unload and we destroy right here
+ *
+ * We always go here for system-wide sessions
+ */
+ if (task == current) {
+#ifdef CONFIG_SMP
+ /*
+ * the task IS the owner but it migrated to another CPU: that's bad
+ * but we must handle this cleanly. Unfortunately, the kernel does
+ * not provide a mechanism to block migration (while the context is loaded).
+ *
+ * We need to release the resource on the ORIGINAL cpu.
+ */
+ if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) {
+
+ DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+
+ UNPROTECT_CTX(ctx, flags);
+
+ pfm_syswide_cleanup_other_cpu(ctx);
+
+ PROTECT_CTX(ctx, flags);
+
+ /*
+ * short circuit pfm_context_unload();
+ */
+ task->thread.pfm_context = NULL;
+ ctx->ctx_task = NULL;
+
+ CTX_UNLOADED(ctx);
+
+ pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
+ } else
+#endif /* CONFIG_SMP */
+ {
+
+ DPRINT(("forcing unload on [%d]\n", current->pid));
+ /*
+ * stop and unload, returning with state UNLOADED
+ * and session unreserved.
+ */
+ pfm_context_unload(ctx, NULL, 0, regs);
+
+ CTX_TERMINATED(ctx);
+
+ DPRINT(("[%d] ctx_state=%d\n", current->pid, ctx->ctx_state));
+ }
+ goto doit;
+ }
+
+ /*
+ * The task is currently blocked or will block after an overflow.
+ * we must force it to wakeup to get out of the
+ * MASKED state and transition to the unloaded state by itself
+ */
+ if (CTX_IS_MASKED(ctx) && CTX_OVFL_NOBLOCK(ctx) == 0) {
+
+ /*
+ * set a "partial" zombie state to be checked
+ * upon return from down() in pfm_handle_work().
+ *
+ * We cannot use the ZOMBIE state, because it is checked
+ * by pfm_load_regs() which is called upon wakeup from down().
+ * In such cas, it would free the context and then we would
+ * return to pfm_handle_work() which would access the
+ * stale context. Instead, we set a flag invisible to pfm_load_regs()
+ * but visible to pfm_handle_work().
+ *
+ * For some window of time, we have a zombie context with
+ * ctx_state = MASKED and not ZOMBIE
+ */
+ ctx->ctx_fl_going_zombie = 1;
+
+ /*
+ * force task to wake up from MASKED state
+ */
+ up(&ctx->ctx_restart_sem);
+
+ DPRINT(("waking up ctx_state=%d for [%d]\n", ctx->ctx_state, current->pid));
+
+ /*
+ * put ourself to sleep waiting for the other
+ * task to report completion
+ *
+ * the context is protected by mutex, therefore there
+ * is no risk of being notified of completion before
+ * begin actually on the waitq.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&ctx->ctx_zombieq, &wait);
+
+ UNPROTECT_CTX(ctx, flags);
+
+ /*
+ * XXX: check for signals :
+ * - ok of explicit close
+ * - not ok when coming from exit_files()
+ */
+ schedule();
+
+ DPRINT(("woken up ctx_state=%d for [%d]\n", ctx->ctx_state, current->pid));
+
+ PROTECT_CTX(ctx, flags);
+
+ remove_wait_queue(&ctx->ctx_zombieq, &wait);
+ set_current_state(TASK_RUNNING);
+
+ /*
+ * context is terminated at this point
+ */
+ DPRINT(("after zombie wakeup ctx_state=%d for [%d]\n", ctx->ctx_state, current->pid));
+ }
+ else {
+#ifdef CONFIG_SMP
+ /*
+ * switch context to zombie state
+ */
+ CTX_ZOMBIE(ctx);
+
+ DPRINT(("zombie ctx for [%d]\n", task->pid));
+ /*
+ * cannot free the context on the spot. deferred until
+ * the task notices the ZOMBIE state
+ */
+ free_possible = 0;
+#else
+ pfm_context_unload(ctx, NULL, 0, regs);
+#endif
+ }
+
+doit: /* cannot assume task is defined from now on */
+ /*
+ * the context is still attached to a task (possibly current)
+ * we cannot destroy it right now
+ */
+ /*
+ * remove virtual mapping, if any. will be NULL when
+ * called from exit_files().
+ */
+ if (ctx->ctx_smpl_vaddr) {
+ smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
+ smpl_buf_size = ctx->ctx_smpl_size;
+ ctx->ctx_smpl_vaddr = NULL;
+ }
+
+ /*
+ * we must fre the sampling buffer right here because
+ * we cannot rely on it being cleaned up later by the
+ * monitored task. It is not possible to free vmalloc'ed
+ * memory in pfm_load_regs(). Instead, we remove the buffer
+ * now. should there be subsequent PMU overflow originally
+ * meant for sampling, the will be converted to spurious
+ * and that's fine because the monitoring tools is gone anyway.
+ */
+ if (ctx->ctx_smpl_hdr) {
+ smpl_buf_addr = ctx->ctx_smpl_hdr;
+ smpl_buf_size = ctx->ctx_smpl_size;
+ /* no more sampling */
+ ctx->ctx_smpl_hdr = NULL;
}
+
+
+ DPRINT(("[%d] ctx_state=%d free_possible=%d vaddr=%p addr=%p size=%lu\n",
+ current->pid,
+ ctx->ctx_state,
+ free_possible,
+ smpl_buf_vaddr,
+ smpl_buf_addr,
+ smpl_buf_size));
+
+ if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
+
+ /*
+ * UNLOADED and TERMINATED mean that the session has already been
+ * unreserved.
+ */
+ if (CTX_IS_ZOMBIE(ctx)) {
+ pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
+ }
+
+ /*
+ * disconnect file descriptor from context must be done
+ * before we unlock.
+ */
+ filp->private_data = NULL;
+
+ /*
+ * if we free on the spot, the context is now completely unreacheable
+ * from the callers side. The monitored task side is also cut, so we
+ * can freely cut.
+ *
+ * If we have a deferred free, only the caller side is disconnected.
+ */
+ UNPROTECT_CTX(ctx, flags);
+
+ /*
+ * if there was a mapping, then we systematically remove it
+ * at this point. Cannot be done inside critical section
+ * because some VM function reenables interrupts.
+ *
+ * All memory free operations (especially for vmalloc'ed memory)
+ * MUST be done with interrupts ENABLED.
+ */
+ if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
+ if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
+
+ /*
+ * return the memory used by the context
+ */
+ if (free_possible) pfm_context_free(ctx);
+
return 0;
}
+static int
+pfm_no_open(struct inode *irrelevant, struct file *dontcare)
+{
+ DPRINT(("pfm_no_open called\n"));
+ return -ENXIO;
+}
+
+static struct file_operations pfm_file_ops = {
+ .llseek = pfm_lseek,
+ .read = pfm_read,
+ .write = pfm_write,
+ .poll = pfm_poll,
+ .ioctl = pfm_ioctl,
+ .open = pfm_no_open, /* special open code to disallow open via /proc */
+ .fasync = pfm_fasync,
+ .release = pfm_close
+};
+
+static int
+pfmfs_delete_dentry(struct dentry *dentry)
+{
+ return 1;
+}
+static struct dentry_operations pfmfs_dentry_operations = {
+ d_delete: pfmfs_delete_dentry,
+};
+
+
+static int
+pfm_alloc_fd(struct file **cfile)
+{
+ int fd, ret = 0;
+ struct file *file = NULL;
+ struct inode * inode;
+ char name[32];
+ struct qstr this;
+
+ fd = get_unused_fd();
+ if (fd < 0) return -ENFILE;
+
+ ret = -ENFILE;
+
+ file = get_empty_filp();
+ if (!file) goto out;
+
+ /*
+ * allocate a new inode
+ */
+ inode = new_inode(pfmfs_mnt->mnt_sb);
+ if (!inode) goto out;
+
+ DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
+
+ inode->i_sb = pfmfs_mnt->mnt_sb;
+ inode->i_mode = S_IFCHR|S_IRUGO;
+ inode->i_sock = 0;
+ inode->i_uid = current->fsuid;
+ inode->i_gid = current->fsgid;
+
+ sprintf(name, "[%lu]", inode->i_ino);
+ this.name = name;
+ this.len = strlen(name);
+ this.hash = inode->i_ino;
+
+ ret = -ENOMEM;
+
+ /*
+ * allocate a new dcache entry
+ */
+ file->f_dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
+ if (!file->f_dentry) goto out;
+
+ file->f_dentry->d_op = &pfmfs_dentry_operations;
+
+ d_add(file->f_dentry, inode);
+ file->f_vfsmnt = mntget(pfmfs_mnt);
+
+ file->f_op = &pfm_file_ops;
+ file->f_mode = FMODE_READ;
+ file->f_flags = O_RDONLY;
+ file->f_pos = 0;
+
+ /*
+ * may have to delay until context is attached?
+ */
+ fd_install(fd, file);
+
+ /*
+ * the file structure we will use
+ */
+ *cfile = file;
+
+ return fd;
+out:
+ if (file) put_filp(file);
+ put_unused_fd(fd);
+ return ret;
+}
+
+static void
+pfm_free_fd(int fd, struct file *file)
+{
+ if (file) put_filp(file);
+ put_unused_fd(fd);
+}
+
/*
- * counts the number of PMDS to save per entry.
- * This code is generic enough to accommodate more than 64 PMDS when they become available
+ * This function gets called from mm/mmap.c:exit_mmap() only when there is a sampling buffer
+ * attached to the context AND the current task has a mapping for it, i.e., it is the original
+ * creator of the context.
+ *
+ * This function is used to remember the fact that the vma describing the sampling buffer
+ * has now been removed. It can only be called when no other tasks share the same mm context.
+ *
*/
-static unsigned long
-pfm_smpl_entry_size(unsigned long *which, unsigned long size)
+static void
+pfm_vm_close(struct vm_area_struct *vma)
+{
+ pfm_context_t *ctx = (pfm_context_t *)vma->vm_private_data;
+ unsigned long flags;
+
+ PROTECT_CTX(ctx, flags);
+ ctx->ctx_smpl_vaddr = NULL;
+ UNPROTECT_CTX(ctx, flags);
+ DPRINT(("[%d] clearing vaddr for ctx %p\n", current->pid, ctx));
+}
+
+static int
+pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
{
- unsigned long i, res = 0;
+ unsigned long page;
+
+ DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
- for (i=0; i < size; i++, which++) res += hweight64(*which);
+ while (size > 0) {
+ page = pfm_kvirt_to_pa(buf);
- DBprintk(("weight=%ld\n", res));
+ if (pfm_remap_page_range(vma, addr, page, PAGE_SIZE, PAGE_READONLY)) return -ENOMEM;
- return res;
+ addr += PAGE_SIZE;
+ buf += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ return 0;
}
/*
- * Allocates the sampling buffer and remaps it into caller's address space
+ * allocate a sampling buffer and remaps it into the user address space of the task
*/
static int
-pfm_smpl_buffer_alloc(pfm_context_t *ctx, unsigned long *which_pmds, unsigned long entries,
- void **user_vaddr)
+pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
{
- struct mm_struct *mm = current->mm;
+ struct mm_struct *mm = task->mm;
struct vm_area_struct *vma = NULL;
- unsigned long size, regcount;
+ unsigned long size;
void *smpl_buf;
- pfm_smpl_buffer_desc_t *psb;
-
- /* note that regcount might be 0, in this case only the header for each
- * entry will be recorded.
- */
- regcount = pfm_smpl_entry_size(which_pmds, 1);
-
- if ((sizeof(perfmon_smpl_hdr_t)+ entries*sizeof(perfmon_smpl_entry_t)) <= entries) {
- DBprintk(("requested entries %lu is too big\n", entries));
- return -EINVAL;
- }
/*
- * 1 buffer hdr and for each entry a header + regcount PMDs to save
+ * the fixed header + requested size and align to page boundary
*/
- size = PAGE_ALIGN( sizeof(perfmon_smpl_hdr_t)
- + entries * (sizeof(perfmon_smpl_entry_t) + regcount*sizeof(u64)));
+ size = PAGE_ALIGN(rsize);
- DBprintk(("sampling buffer size=%lu bytes\n", size));
+ DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
/*
* check requested size to avoid Denial-of-service attacks
- * XXX: may have to refine this test
+ * XXX: may have to refine this test
* Check against address space limit.
*
- * if ((mm->total_vm << PAGE_SHIFT) + len> current->rlim[RLIMIT_AS].rlim_cur)
+ * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
* return -ENOMEM;
*/
- if (size > current->rlim[RLIMIT_MEMLOCK].rlim_cur) return -EAGAIN;
+ if (size > task->rlim[RLIMIT_MEMLOCK].rlim_cur) return -EAGAIN;
/*
* We do the easy to undo allocations first.
*/
smpl_buf = pfm_rvmalloc(size);
if (smpl_buf == NULL) {
- DBprintk(("Can't allocate sampling buffer\n"));
+ DPRINT(("Can't allocate sampling buffer\n"));
return -ENOMEM;
}
- DBprintk(("smpl_buf @%p\n", smpl_buf));
-
- /* allocate sampling buffer descriptor now */
- psb = kmalloc(sizeof(*psb), GFP_KERNEL);
- if (psb == NULL) {
- DBprintk(("Can't allocate sampling buffer descriptor\n"));
- goto error_kmalloc;
- }
+ DPRINT(("[%d] smpl_buf @%p\n", current->pid, smpl_buf));
/* allocate vma */
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!vma) {
- DBprintk(("Cannot allocate vma\n"));
+ DPRINT(("Cannot allocate vma\n"));
goto error_kmem;
}
/*
* partially initialize the vma for the sampling buffer
*
* The VM_DONTCOPY flag is very important as it ensures that the mapping
- * will never be inherited for any child process (via fork()) which is always
+ * will never be inherited for any child process (via fork()) which is always
* what we want.
*/
vma->vm_mm = mm;
vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED|VM_DONTCOPY;
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
- vma->vm_ops = &pfm_vm_ops; /* necesarry to get the close() callback */
+ vma->vm_ops = &pfm_vm_ops;
vma->vm_pgoff = 0;
vma->vm_file = NULL;
- vma->vm_private_data = psb; /* information needed by the pfm_vm_close() function */
+ vma->vm_private_data = ctx; /* information needed by the pfm_vm_close() function */
/*
* Now we have everything we need and we can initialize
* and connect all the data structures
*/
- psb->psb_hdr = smpl_buf;
- psb->psb_addr = ((char *)smpl_buf)+sizeof(perfmon_smpl_hdr_t); /* first entry */
- psb->psb_size = size; /* aligned size */
- psb->psb_index = 0;
- psb->psb_entries = entries;
- psb->psb_refcnt = 1;
- psb->psb_flags = PSB_HAS_VMA;
-
- spin_lock_init(&psb->psb_lock);
-
- /*
- * XXX: will need to do cacheline alignment to avoid false sharing in SMP mode and
- * multitask monitoring.
- */
- psb->psb_entry_size = sizeof(perfmon_smpl_entry_t) + regcount*sizeof(u64);
-
- DBprintk(("psb @%p entry_size=%ld hdr=%p addr=%p refcnt=%lu psb_flags=0x%x\n",
- (void *)psb,psb->psb_entry_size, (void *)psb->psb_hdr,
- (void *)psb->psb_addr, psb->psb_refcnt, psb->psb_flags));
-
- /* initialize some of the fields of user visible buffer header */
- psb->psb_hdr->hdr_version = PFM_SMPL_VERSION;
- psb->psb_hdr->hdr_entry_size = psb->psb_entry_size;
- psb->psb_hdr->hdr_pmds[0] = which_pmds[0];
+ ctx->ctx_smpl_hdr = smpl_buf;
+ ctx->ctx_smpl_size = size; /* aligned size */
/*
* Let's do the difficult operations next.
* now we atomically find some area in the address space and
* remap the buffer in it.
*/
- down_write(¤t->mm->mmap_sem);
-
+ down_write(&task->mm->mmap_sem);
/* find some free area in address space, must have mmap sem held */
vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
if (vma->vm_start == 0UL) {
- DBprintk(("Cannot find unmapped area for size %ld\n", size));
- up_write(¤t->mm->mmap_sem);
+ DPRINT(("Cannot find unmapped area for size %ld\n", size));
+ up_write(&task->mm->mmap_sem);
goto error;
}
vma->vm_end = vma->vm_start + size;
- DBprintk(("entries=%ld aligned size=%ld, unmapped @0x%lx\n", entries, size, vma->vm_start));
+ DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
- /* can only be applied to current, need to have the mm semaphore held when called */
+ /* can only be applied to current task, need to have the mm semaphore held when called */
if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
- DBprintk(("Can't remap buffer\n"));
- up_write(¤t->mm->mmap_sem);
+ DPRINT(("Can't remap buffer\n"));
+ up_write(&task->mm->mmap_sem);
goto error;
}
mm->total_vm += size >> PAGE_SHIFT;
- up_write(¤t->mm->mmap_sem);
-
- /* store which PMDS to record */
- ctx->ctx_smpl_regs[0] = which_pmds[0];
-
-
- /* link to perfmon context */
- ctx->ctx_psb = psb;
+ up_write(&task->mm->mmap_sem);
/*
- * keep track of user level virtual address
+ * keep track of user level virtual address
*/
- ctx->ctx_smpl_vaddr = *(unsigned long *)user_vaddr = vma->vm_start;
+ ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
+ *(unsigned long *)user_vaddr = vma->vm_start;
return 0;
error:
kmem_cache_free(vm_area_cachep, vma);
error_kmem:
- kfree(psb);
-error_kmalloc:
pfm_rvfree(smpl_buf, size);
+
return -ENOMEM;
}
+/*
+ * XXX: do something better here
+ */
static int
-pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned long cpu_mask)
+pfm_bad_permissions(struct task_struct *task)
{
- unsigned long m, undo_mask;
- unsigned int n, i;
-
- /*
- * validy checks on cpu_mask have been done upstream
- */
- LOCK_PFS();
-
- if (is_syswide) {
- /*
- * cannot mix system wide and per-task sessions
- */
- if (pfm_sessions.pfs_task_sessions > 0UL) {
- DBprintk(("system wide not possible, %u conflicting task_sessions\n",
- pfm_sessions.pfs_task_sessions));
- goto abort;
- }
+ /* stolen from bad_signal() */
+ return (current->session != task->session)
+ && (current->euid ^ task->suid) && (current->euid ^ task->uid)
+ && (current->uid ^ task->suid) && (current->uid ^ task->uid);
+}
- m = cpu_mask; undo_mask = 0UL; n = 0;
- DBprintk(("cpu_mask=0x%lx\n", cpu_mask));
- for(i=0; m; i++, m>>=1) {
+static int
+pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
+{
+ int ctx_flags;
- if ((m & 0x1) == 0UL) continue;
+ /* valid signal */
- if (pfm_sessions.pfs_sys_session[i]) goto undo;
+ ctx_flags = pfx->ctx_flags;
- DBprintk(("reserving CPU%d currently on CPU%d\n", i, smp_processor_id()));
+ if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
- pfm_sessions.pfs_sys_session[i] = task;
- undo_mask |= 1UL << i;
- n++;
+ /*
+ * cannot block in this mode
+ */
+ if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
+ DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
+ return -EINVAL;
}
- pfm_sessions.pfs_sys_sessions += n;
} else {
- if (pfm_sessions.pfs_sys_sessions) goto abort;
- pfm_sessions.pfs_task_sessions++;
- }
- DBprintk(("task_sessions=%u sys_session[%d]=%d",
- pfm_sessions.pfs_task_sessions,
- smp_processor_id(), pfm_sessions.pfs_sys_session[smp_processor_id()] ? 1 : 0));
- UNLOCK_PFS();
- return 0;
-undo:
- DBprintk(("system wide not possible, conflicting session [%d] on CPU%d\n",
- pfm_sessions.pfs_sys_session[i]->pid, i));
-
- for(i=0; undo_mask; i++, undo_mask >>=1) {
- pfm_sessions.pfs_sys_session[i] = NULL;
}
-abort:
- UNLOCK_PFS();
-
- return -EBUSY;
+ /* probably more to add here */
+ return 0;
}
static int
-pfm_unreserve_session(struct task_struct *task, int is_syswide, unsigned long cpu_mask)
+pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags,
+ unsigned int cpu, pfarg_context_t *arg)
{
- pfm_context_t *ctx;
- unsigned long m;
- unsigned int n, i;
+ pfm_buffer_fmt_t *fmt = NULL;
+ unsigned long size = 0UL;
+ void *uaddr = NULL;
+ void *fmt_arg = NULL;
+ int ret = 0;
+#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
- ctx = task ? task->thread.pfm_context : NULL;
+ /* invoke and lock buffer format, if found */
+ fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id, 0);
+ if (fmt == NULL) {
+ DPRINT(("[%d] cannot find buffer format\n", task->pid));
+ return -EINVAL;
+ }
/*
- * validy checks on cpu_mask have been done upstream
+ * buffer argument MUST be contiguous to pfarg_context_t
*/
- LOCK_PFS();
+ if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
- DBprintk(("[%d] sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu_mask=0x%lx\n",
- task->pid,
- pfm_sessions.pfs_sys_sessions,
- pfm_sessions.pfs_task_sessions,
- pfm_sessions.pfs_sys_use_dbregs,
- is_syswide,
- cpu_mask));
+ ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
+ DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret));
- if (is_syswide) {
- m = cpu_mask; n = 0;
- for(i=0; m; i++, m>>=1) {
- if ((m & 0x1) == 0UL) continue;
- pfm_sessions.pfs_sys_session[i] = NULL;
- n++;
- }
- /*
- * would not work with perfmon+more than one bit in cpu_mask
+ if (ret) goto error;
+
+ /* link buffer format and context */
+ ctx->ctx_buf_fmt = fmt;
+
+ /*
+ * check if buffer format wants to use perfmon buffer allocation/mapping service
+ */
+ ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
+ if (ret) goto error;
+
+ if (size) {
+ /*
+ * buffer is always remapped into the caller's address space
*/
- if (ctx && ctx->ctx_fl_using_dbreg) {
- if (pfm_sessions.pfs_sys_use_dbregs == 0) {
- printk(KERN_DEBUG "perfmon: invalid release for [%d] "
- "sys_use_dbregs=0\n", task->pid);
- } else {
- pfm_sessions.pfs_sys_use_dbregs--;
- }
- }
- pfm_sessions.pfs_sys_sessions -= n;
+ ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr);
+ if (ret) goto error;
- DBprintk(("CPU%d sys_sessions=%u\n",
- smp_processor_id(), pfm_sessions.pfs_sys_sessions));
- } else {
- pfm_sessions.pfs_task_sessions--;
- DBprintk(("[%d] task_sessions=%u\n",
- task->pid, pfm_sessions.pfs_task_sessions));
+ /* keep track of user address of buffer */
+ arg->ctx_smpl_vaddr = uaddr;
}
+ ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
- UNLOCK_PFS();
-
- return 0;
+error:
+ return ret;
}
-/*
- * XXX: do something better here
- */
-static int
-pfm_bad_permissions(struct task_struct *task)
+static void
+pfm_reset_pmu_state(pfm_context_t *ctx)
{
- /* stolen from bad_signal() */
- return (current->session != task->session)
- && (current->euid ^ task->suid) && (current->euid ^ task->uid)
- && (current->uid ^ task->suid) && (current->uid ^ task->uid);
-}
+ int i;
+ /*
+ * install reset values for PMC.
+ */
+ for (i=1; PMC_IS_LAST(i) == 0; i++) {
+ if (PMC_IS_IMPL(i) == 0) continue;
+ ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
+ DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
+ }
+ /*
+ * PMD registers are set to 0UL when the context in memset()
+ */
-static int
-pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
-{
- unsigned long smpl_pmds = pfx->ctx_smpl_regs[0];
- int ctx_flags;
- int cpu;
+ /*
+ * On context switched restore, we must restore ALL pmc and ALL pmd even
+ * when they are not actively used by the task. In UP, the incoming process
+ * may otherwise pick up left over PMC, PMD state from the previous process.
+ * As opposed to PMD, stale PMC can cause harm to the incoming
+ * process because they may change what is being measured.
+ * Therefore, we must systematically reinstall the entire
+ * PMC state. In SMP, the same thing is possible on the
+ * same CPU but also on between 2 CPUs.
+ *
+ * The problem with PMD is information leaking especially
+ * to user level when psr.sp=0
+ *
+ * There is unfortunately no easy way to avoid this problem
+ * on either UP or SMP. This definitively slows down the
+ * pfm_load_regs() function.
+ */
- /* valid signal */
+ /*
+ * bitmask of all PMCs accessible to this context
+ *
+ * PMC0 is treated differently.
+ */
+ ctx->ctx_all_pmcs[0] = pmu_conf.impl_pmcs[0] & ~0x1;
- /* cannot send to process 1, 0 means do not notify */
- if (pfx->ctx_notify_pid == 1) {
- DBprintk(("invalid notify_pid %d\n", pfx->ctx_notify_pid));
- return -EINVAL;
- }
- ctx_flags = pfx->ctx_flags;
+ /*
+ * bitmask of all PMDs that are accesible to this context
+ */
+ ctx->ctx_all_pmds[0] = pmu_conf.impl_pmds[0];
- if ((ctx_flags & PFM_FL_INHERIT_MASK) == (PFM_FL_INHERIT_ONCE|PFM_FL_INHERIT_ALL)) {
- DBprintk(("invalid inherit mask 0x%x\n",ctx_flags & PFM_FL_INHERIT_MASK));
- return -EINVAL;
- }
+ DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
- if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
- DBprintk(("cpu_mask=0x%lx\n", pfx->ctx_cpu_mask));
- /*
- * cannot block in this mode
- */
- if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
- DBprintk(("cannot use blocking mode when in system wide monitoring\n"));
- return -EINVAL;
- }
- /*
- * must only have one bit set in the CPU mask
- */
- if (hweight64(pfx->ctx_cpu_mask) != 1UL) {
- DBprintk(("invalid CPU mask specified\n"));
- return -EINVAL;
- }
- /*
- * and it must be a valid CPU
- */
- cpu = ffz(~pfx->ctx_cpu_mask);
-#ifdef CONFIG_SMP
- if (cpu_online(cpu) == 0) {
-#else
- if (cpu != 0) {
-#endif
- DBprintk(("CPU%d is not online\n", cpu));
- return -EINVAL;
- }
+ /*
+ * useful in case of re-enable after disable
+ */
+ ctx->ctx_used_ibrs[0] = 0UL;
+ ctx->ctx_used_dbrs[0] = 0UL;
+}
- /*
- * check for pre-existing pinning, if conflicting reject
- */
- if (task->cpus_allowed != ~0UL && (task->cpus_allowed & (1UL<<cpu)) == 0) {
- DBprintk(("[%d] pinned on 0x%lx, mask for CPU%d \n", task->pid,
- task->cpus_allowed, cpu));
- return -EINVAL;
- }
+static int
+pfm_ctx_getsize(void *arg, size_t *sz)
+{
+ pfarg_context_t *req = (pfarg_context_t *)arg;
+ pfm_buffer_fmt_t *fmt;
- } else {
- /*
- * must provide a target for the signal in blocking mode even when
- * no counter is configured with PFM_FL_REG_OVFL_NOTIFY
- */
- if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == 0) {
- DBprintk(("must have notify_pid when blocking for [%d]\n", task->pid));
- return -EINVAL;
- }
-#if 0
- if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == task->pid) {
- DBprintk(("cannot notify self when blocking for [%d]\n", task->pid));
- return -EINVAL;
- }
-#endif
- }
- /* verify validity of smpl_regs */
- if ((smpl_pmds & pmu_conf.impl_pmds[0]) != smpl_pmds) {
- DBprintk(("invalid smpl_regs 0x%lx\n", smpl_pmds));
+ *sz = 0;
+
+ if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
+
+ /* no buffer locking here, will be called again */
+ fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id, 1);
+ if (fmt == NULL) {
+ DPRINT(("cannot find buffer format\n"));
return -EINVAL;
}
- /* probably more to add here */
+ /* get just enough to copy in user parameters */
+ *sz = fmt->fmt_arg_size;
+ DPRINT(("arg_size=%lu\n", *sz));
return 0;
}
+
+
+/*
+ * cannot attach if :
+ * - kernel task
+ * - task not owned by caller
+ * - task incompatible with context mode
+ */
static int
-pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int count,
- struct pt_regs *regs)
+pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
{
- pfarg_context_t tmp;
- void *uaddr = NULL;
- int ret;
- int ctx_flags;
- pid_t notify_pid;
+ /*
+ * no kernel task or task not owner by caller
+ */
+ if (task->mm == NULL) {
+ DPRINT(("[%d] task [%d] has not memory context (kernel thread)\n", current->pid, task->pid));
+ return -EPERM;
+ }
+ if (pfm_bad_permissions(task)) {
+ DPRINT(("[%d] no permission to attach to [%d]\n", current->pid, task->pid));
+ return -EPERM;
+ }
+ /*
+ * cannot block in self-monitoring mode
+ */
+ if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
+ DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid));
+ return -EINVAL;
+ }
+
+ if (task->state == TASK_ZOMBIE) {
+ DPRINT(("[%d] cannot attach to zombie task [%d]\n", current->pid, task->pid));
+ return -EBUSY;
+ }
- /* a context has already been defined */
- if (ctx) return -EBUSY;
+ /*
+ * always ok for self
+ */
+ if (task == current) return 0;
+ if (task->state != TASK_STOPPED) {
+ DPRINT(("[%d] cannot attach to non-stopped task [%d] state=%ld\n", current->pid, task->pid, task->state));
+ return -EBUSY;
+ }
/*
- * not yet supported
+ * make sure the task is off any CPU
*/
- if (task != current) return -EINVAL;
+ pfm_wait_task_inactive(task);
- if (__copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT;
+ /* more to come... */
- ret = pfx_is_sane(task, &tmp);
- if (ret < 0) return ret;
+ return 0;
+}
- ctx_flags = tmp.ctx_flags;
+static int
+pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
+{
+ struct task_struct *p = current;
+ int ret;
- ret = pfm_reserve_session(task, ctx_flags & PFM_FL_SYSTEM_WIDE, tmp.ctx_cpu_mask);
- if (ret) goto abort;
+ /* XXX: need to add more checks here */
+ if (pid < 2) return -EPERM;
- ret = -ENOMEM;
+ if (pid != current->pid) {
- ctx = pfm_context_alloc();
- if (!ctx) goto error;
+ read_lock(&tasklist_lock);
- /* record the creator (important for inheritance) */
- ctx->ctx_owner = current;
+ p = find_task_by_pid(pid);
- notify_pid = tmp.ctx_notify_pid;
+ /* make sure task cannot go away while we operate on it */
+ if (p) get_task_struct(p);
- spin_lock_init(&ctx->ctx_lock);
+ read_unlock(&tasklist_lock);
- if (notify_pid == current->pid) {
+ if (p == NULL) return -ESRCH;
+ }
- ctx->ctx_notify_task = current;
- task->thread.pfm_context = ctx;
+ ret = pfm_task_incompatible(ctx, p);
+ if (ret == 0) {
+ *task = p;
+ } else if (p != current) {
+ pfm_put_task(p);
+ }
+ return ret;
+}
- } else if (notify_pid!=0) {
- struct task_struct *notify_task;
- read_lock(&tasklist_lock);
- notify_task = find_task_by_pid(notify_pid);
+static int
+pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+{
+ pfarg_context_t *req = (pfarg_context_t *)arg;
+ struct file *filp;
+ int ctx_flags;
+ int ret;
- if (notify_task) {
+ /* let's check the arguments first */
+ ret = pfarg_is_sane(current, req);
+ if (ret < 0) return ret;
- ret = -EPERM;
+ ctx_flags = req->ctx_flags;
- /*
- * check if we can send this task a signal
- */
- if (pfm_bad_permissions(notify_task)) {
- read_unlock(&tasklist_lock);
- goto buffer_error;
- }
+ ret = -ENOMEM;
- /*
- * make visible
- * must be done inside critical section
- *
- * if the initialization does not go through it is still
- * okay because child will do the scan for nothing which
- * won't hurt.
- */
- task->thread.pfm_context = ctx;
+ ctx = pfm_context_alloc();
+ if (!ctx) goto error;
- /*
- * will cause task to check on exit for monitored
- * processes that would notify it. see release_thread()
- * Note: the scan MUST be done in release thread, once the
- * task has been detached from the tasklist otherwise you are
- * exposed to race conditions.
- */
- atomic_add(1, &ctx->ctx_notify_task->thread.pfm_notifiers_check);
+ req->ctx_fd = ctx->ctx_fd = pfm_alloc_fd(&filp);
+ if (req->ctx_fd < 0) goto error_file;
- ctx->ctx_notify_task = notify_task;
- }
- read_unlock(&tasklist_lock);
- }
+ /*
+ * attach context to file
+ */
+ filp->private_data = ctx;
/*
- * notification process does not exist
+ * does the user want to sample?
*/
- if (notify_pid != 0 && ctx->ctx_notify_task == NULL) {
- ret = -EINVAL;
- goto buffer_error;
+ if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
+ ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req);
+ if (ret) goto buffer_error;
}
- if (tmp.ctx_smpl_entries) {
- DBprintk(("sampling entries=%lu\n",tmp.ctx_smpl_entries));
-
- ret = pfm_smpl_buffer_alloc(ctx, tmp.ctx_smpl_regs,
- tmp.ctx_smpl_entries, &uaddr);
- if (ret<0) goto buffer_error;
+ /*
+ * init context protection lock
+ */
+ spin_lock_init(&ctx->ctx_lock);
- tmp.ctx_smpl_vaddr = uaddr;
- }
- /* initialization of context's flags */
- ctx->ctx_fl_inherit = ctx_flags & PFM_FL_INHERIT_MASK;
- ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
- ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
- ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
- ctx->ctx_fl_unsecure = (ctx_flags & PFM_FL_UNSECURE) ? 1: 0;
- ctx->ctx_fl_frozen = 0;
- ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
+ /*
+ * context is unloaded
+ */
+ CTX_UNLOADED(ctx);
/*
- * setting this flag to 0 here means, that the creator or the task that the
- * context is being attached are granted access. Given that a context can only
- * be created for the calling process this, in effect only allows the creator
- * to access the context. See pfm_protect() for more.
+ * initialization of context's flags
+ */
+ ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
+ ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
+ ctx->ctx_fl_unsecure = (ctx_flags & PFM_FL_UNSECURE) ? 1: 0;
+ ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */
+ ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
+ /*
+ * will move to set properties
+ * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
*/
- ctx->ctx_fl_protected = 0;
- /* for system wide mode only (only 1 bit set) */
- ctx->ctx_cpu = ffz(~tmp.ctx_cpu_mask);
-
- atomic_set(&ctx->ctx_last_cpu,-1); /* SMP only, means no CPU */
-
- sema_init(&ctx->ctx_restart_sem, 0); /* init this semaphore to locked */
-
- if (__copy_to_user(req, &tmp, sizeof(tmp))) {
- ret = -EFAULT;
- goto buffer_error;
- }
-
- DBprintk(("context=%p, pid=%d notify_task=%p\n",
- (void *)ctx, task->pid, ctx->ctx_notify_task));
+ /*
+ * init restart semaphore to locked
+ */
+ sema_init(&ctx->ctx_restart_sem, 0);
- DBprintk(("context=%p, pid=%d flags=0x%x inherit=%d block=%d system=%d excl_idle=%d unsecure=%d\n",
- (void *)ctx, task->pid, ctx_flags, ctx->ctx_fl_inherit,
- ctx->ctx_fl_block, ctx->ctx_fl_system,
- ctx->ctx_fl_excl_idle,
- ctx->ctx_fl_unsecure));
+ /*
+ * activation is used in SMP only
+ */
+ ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
+ SET_LAST_CPU(ctx, -1);
/*
- * when no notification is required, we can make this visible at the last moment
+ * initialize notification message queue
*/
- if (notify_pid == 0) task->thread.pfm_context = ctx;
+ ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
+ init_waitqueue_head(&ctx->ctx_msgq_wait);
+ init_waitqueue_head(&ctx->ctx_zombieq);
+
+ DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d unsecure=%d no_msg=%d ctx_fd=%d \n",
+ ctx,
+ ctx_flags,
+ ctx->ctx_fl_system,
+ ctx->ctx_fl_block,
+ ctx->ctx_fl_excl_idle,
+ ctx->ctx_fl_unsecure,
+ ctx->ctx_fl_no_msg,
+ ctx->ctx_fd));
+
/*
- * pin task to CPU and force reschedule on exit to ensure
- * that when back to user level the task runs on the designated
- * CPU.
+ * initialize soft PMU state
*/
- if (ctx->ctx_fl_system) {
- ctx->ctx_saved_cpus_allowed = task->cpus_allowed;
- set_cpus_allowed(task, tmp.ctx_cpu_mask);
- DBprintk(("[%d] rescheduled allowed=0x%lx\n", task->pid, task->cpus_allowed));
- }
+ pfm_reset_pmu_state(ctx);
return 0;
buffer_error:
+ pfm_free_fd(ctx->ctx_fd, filp);
+
+ if (ctx->ctx_buf_fmt) {
+ pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
+ }
+error_file:
pfm_context_free(ctx);
-error:
- pfm_unreserve_session(task, ctx_flags & PFM_FL_SYSTEM_WIDE , tmp.ctx_cpu_mask);
-abort:
- /* make sure we don't leave anything behind */
- task->thread.pfm_context = NULL;
+error:
return ret;
}
}
static void
+pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag)
+{
+ unsigned long mask = ovfl_regs[0];
+ unsigned long reset_others = 0UL;
+ unsigned long val;
+ int i, is_long_reset = (flag == PFM_PMD_LONG_RESET);
+
+ DPRINT_ovfl(("ovfl_regs=0x%lx flag=%d\n", ovfl_regs[0], flag));
+
+ if (flag == PFM_PMD_NO_RESET) return;
+
+ /*
+ * now restore reset value on sampling overflowed counters
+ */
+ mask >>= PMU_FIRST_COUNTER;
+ for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
+ if (mask & 0x1) {
+ ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
+ reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
+
+ DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n",
+ is_long_reset ? "long" : "short", i, val));
+ }
+ }
+
+ /*
+ * Now take care of resetting the other registers
+ */
+ for(i = 0; reset_others; i++, reset_others >>= 1) {
+
+ if ((reset_others & 0x1) == 0) continue;
+
+ ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
+
+ DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
+ is_long_reset ? "long" : "short", i, val));
+ }
+}
+
+static void
pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag)
{
unsigned long mask = ovfl_regs[0];
unsigned long val;
int i, is_long_reset = (flag == PFM_PMD_LONG_RESET);
+ DPRINT_ovfl(("ovfl_regs=0x%lx flag=%d\n", ovfl_regs[0], flag));
+
+ if (flag == PFM_PMD_NO_RESET) return;
+
+ if (CTX_IS_MASKED(ctx)) {
+ pfm_reset_regs_masked(ctx, ovfl_regs, flag);
+ return;
+ }
+
/*
* now restore reset value on sampling overflowed counters
*/
mask >>= PMU_FIRST_COUNTER;
for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
if (mask & 0x1) {
- val = pfm_new_counter_value(ctx->ctx_soft_pmds + i, is_long_reset);
- reset_others |= ctx->ctx_soft_pmds[i].reset_pmds[0];
+ val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
+ reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
- DBprintk_ovfl(("[%d] %s reset soft_pmd[%d]=%lx\n", current->pid,
+ DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n",
is_long_reset ? "long" : "short", i, val));
- /* upper part is ignored on rval */
pfm_write_soft_counter(ctx, i, val);
}
}
if ((reset_others & 0x1) == 0) continue;
- val = pfm_new_counter_value(ctx->ctx_soft_pmds + i, is_long_reset);
+ val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
if (PMD_IS_COUNTING(i)) {
pfm_write_soft_counter(ctx, i, val);
} else {
ia64_set_pmd(i, val);
}
- DBprintk_ovfl(("[%d] %s reset_others pmd[%d]=%lx\n", current->pid,
+ DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
is_long_reset ? "long" : "short", i, val));
}
ia64_srlz_d();
}
static int
-pfm_write_pmcs(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
- struct thread_struct *th = &task->thread;
- pfarg_reg_t tmp, *req = (pfarg_reg_t *)arg;
- unsigned long value, reset_pmds;
+ struct thread_struct *thread = NULL;
+ pfarg_reg_t *req = (pfarg_reg_t *)arg;
+ unsigned long value;
+ unsigned long smpl_pmds, reset_pmds;
unsigned int cnum, reg_flags, flags;
- int i;
+ int i, can_access_pmu = 0, is_loaded;
+ int is_monitor, is_counting;
int ret = -EINVAL;
+#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
- /* we don't quite support this right now */
- if (task != current) return -EINVAL;
+ if (CTX_IS_DEAD(ctx)) return -EINVAL;
- if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
+ is_loaded = CTX_IS_LOADED(ctx);
- /* XXX: ctx locking may be required here */
+ if (is_loaded) {
+ thread = &ctx->ctx_task->thread;
+ can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task? 1 : 0;
+ /*
+ * In system wide and when the context is loaded, access can only happen
+ * when the caller is running on the CPU being monitored by the session.
+ * It does not have to be the owner (ctx_task) of the context per se.
+ */
+ if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) {
+ DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ return -EBUSY;
+ }
+ }
for (i = 0; i < count; i++, req++) {
- if (__copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT;
-
- cnum = tmp.reg_num;
- reg_flags = tmp.reg_flags;
- value = tmp.reg_value;
- reset_pmds = tmp.reg_reset_pmds[0];
+ cnum = req->reg_num;
+ reg_flags = req->reg_flags;
+ value = req->reg_value;
+ smpl_pmds = req->reg_smpl_pmds[0];
+ reset_pmds = req->reg_reset_pmds[0];
flags = 0;
- /*
+ is_counting = PMC_IS_COUNTING(cnum);
+ is_monitor = PMC_IS_MONITOR(cnum);
+
+ /*
* we reject all non implemented PMC as well
* as attempts to modify PMC[0-3] which are used
* as status registers by the PMU
*/
if (!PMC_IS_IMPL(cnum) || cnum < 4) {
- DBprintk(("pmc[%u] is unimplemented or invalid\n", cnum));
+ DPRINT(("pmc%u is unimplemented or invalid\n", cnum));
goto error;
}
/*
- * A PMC used to configure monitors must be:
- * - system-wide session: privileged monitor
- * - per-task : user monitor
- * any other configuration is rejected.
+ * If the PMC is a monitor, then if the value is not the default:
+ * - system-wide session: PMCx.pm=1 (privileged monitor)
+ * - per-task : PMCx.pm=0 (user monitor)
*/
- if (PMC_IS_MONITOR(cnum) || PMC_IS_COUNTING(cnum)) {
- DBprintk(("pmc[%u].pm=%ld\n", cnum, PMC_PM(cnum, value)));
-
- if (ctx->ctx_fl_system ^ PMC_PM(cnum, value)) {
- DBprintk(("pmc_pm=%ld fl_system=%d\n", PMC_PM(cnum, value), ctx->ctx_fl_system));
- goto error;
- }
+ if ((is_monitor || is_counting) && value != PMC_DFL_VAL(i) && PFM_CHECK_PMC_PM(ctx, cnum, value)) {
+ DPRINT(("pmc%u pmc_pm=%ld fl_system=%d\n",
+ cnum,
+ PMC_PM(cnum, value),
+ ctx->ctx_fl_system));
+ goto error;
}
- if (PMC_IS_COUNTING(cnum)) {
+
+ if (is_counting) {
pfm_monitor_t *p = (pfm_monitor_t *)&value;
/*
* enforce generation of overflow interrupt. Necessary on all
p->pmc_oi = 1;
if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
- /*
- * must have a target for the signal
- */
- if (ctx->ctx_notify_task == NULL) {
- DBprintk(("cannot set ovfl_notify: no notify_task\n"));
- goto error;
- }
flags |= PFM_REGFL_OVFL_NOTIFY;
}
if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
+ /* verify validity of smpl_pmds */
+ if ((smpl_pmds & pmu_conf.impl_pmds[0]) != smpl_pmds) {
+ DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
+ goto error;
+ }
+
/* verify validity of reset_pmds */
if ((reset_pmds & pmu_conf.impl_pmds[0]) != reset_pmds) {
- DBprintk(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
+ DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
goto error;
}
- } else if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
- DBprintk(("cannot set ovfl_notify or random on pmc%u\n", cnum));
+ } else {
+ if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
+ DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
goto error;
+ }
+ /* eventid on non-counting monitors are ignored */
}
/*
* execute write checker, if any
*/
if (PMC_WR_FUNC(cnum)) {
- ret = PMC_WR_FUNC(cnum)(task, cnum, &value, regs);
+ ret = PMC_WR_FUNC(cnum)(ctx->ctx_task, ctx, cnum, &value, regs);
if (ret) goto error;
ret = -EINVAL;
}
/*
* no error on this register
*/
- PFM_REG_RETFLAG_SET(tmp.reg_flags, 0);
-
- /*
- * update register return value, abort all if problem during copy.
- * we only modify the reg_flags field. no check mode is fine because
- * access has been verified upfront in sys_perfmonctl().
- *
- * If this fails, then the software state is not modified
- */
- if (__put_user(tmp.reg_flags, &req->reg_flags)) return -EFAULT;
+ PFM_REG_RETFLAG_SET(req->reg_flags, 0);
/*
* Now we commit the changes to the software state
*/
- /*
- * full flag update each time a register is programmed
+ /*
+ * update overflow information
*/
- ctx->ctx_soft_pmds[cnum].flags = flags;
+ if (is_counting) {
+ /*
+ * full flag update each time a register is programmed
+ */
+ ctx->ctx_pmds[cnum].flags = flags;
- if (PMC_IS_COUNTING(cnum)) {
- ctx->ctx_soft_pmds[cnum].reset_pmds[0] = reset_pmds;
+ ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
+ ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
+ ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
- /* mark all PMDS to be accessed as used */
+ /*
+ * Mark all PMDS to be accessed as used.
+ *
+ * We do not keep track of PMC because we have to
+ * systematically restore ALL of them.
+ *
+ * We do not update the used_monitors mask, because
+ * if we have not programmed them, then will be in
+ * a quiescent state, therefore we will not need to
+ * mask/restore then when context is MASKED.
+ */
CTX_USED_PMD(ctx, reset_pmds);
+ CTX_USED_PMD(ctx, smpl_pmds);
+ /*
+ * make sure we do not try to reset on
+ * restart because we have established new values
+ */
+ if (CTX_IS_MASKED(ctx)) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
}
-
/*
* Needed in case the user does not initialize the equivalent
- * PMD. Clearing is done in reset_pmu() so there is no possible
- * leak here.
+ * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
+ * possible leak here.
*/
CTX_USED_PMD(ctx, pmu_conf.pmc_desc[cnum].dep_pmd[0]);
- /*
- * keep copy the pmc, used for register reload
+ /*
+ * keep track of the monitor PMC that we are using.
+ * we save the value of the pmc in ctx_pmcs[] and if
+ * the monitoring is not stopped for the context we also
+ * place it in the saved state area so that it will be
+ * picked up later by the context switch code.
+ *
+ * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
+ *
+ * The value in t->pmc[] may be modified on overflow, i.e., when
+ * monitoring needs to be stopped.
+ */
+ if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
+
+ /*
+ * update context state
*/
- th->pmc[cnum] = value;
+ ctx->ctx_pmcs[cnum] = value;
- ia64_set_pmc(cnum, value);
+ if (is_loaded) {
+ /*
+ * write thread state
+ */
+ if (ctx->ctx_fl_system == 0) thread->pmcs[cnum] = value;
- DBprintk(("[%d] pmc[%u]=0x%lx flags=0x%x used_pmds=0x%lx\n",
- task->pid, cnum, value,
- ctx->ctx_soft_pmds[cnum].flags,
- ctx->ctx_used_pmds[0]));
+ /*
+ * write hardware register if we can
+ */
+ if (can_access_pmu) {
+ ia64_set_pmc(cnum, value);
+ }
+#ifdef CONFIG_SMP
+ else {
+ /*
+ * per-task SMP only here
+ *
+ * we are guaranteed that the task is not running on the other CPU,
+ * we indicate that this PMD will need to be reloaded if the task
+ * is rescheduled on the CPU it ran last on.
+ */
+ ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
+ }
+#endif
+ }
+ DPRINT(("pmc[%u]=0x%lx loaded=%d access_pmu=%d all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
+ cnum,
+ value,
+ is_loaded,
+ can_access_pmu,
+ ctx->ctx_all_pmcs[0],
+ ctx->ctx_used_pmds[0],
+ ctx->ctx_pmds[cnum].eventid,
+ smpl_pmds,
+ reset_pmds,
+ ctx->ctx_reload_pmcs[0],
+ ctx->ctx_used_monitors[0],
+ ctx->ctx_ovfl_regs[0]));
}
- return 0;
+ /*
+ * make sure the changes are visible
+ */
+ if (can_access_pmu) ia64_srlz_d();
+ return 0;
error:
- PFM_REG_RETFLAG_SET(tmp.reg_flags, PFM_REG_RETFL_EINVAL);
+ PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
- if (__put_user(tmp.reg_flags, &req->reg_flags)) ret = -EFAULT;
+ req->reg_flags = PFM_REG_RETFL_EINVAL;
- DBprintk(("[%d] pmc[%u]=0x%lx error %d\n", task->pid, cnum, value, ret));
+ DPRINT(("pmc[%u]=0x%lx error %d\n", cnum, value, ret));
return ret;
}
static int
-pfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
- pfarg_reg_t tmp, *req = (pfarg_reg_t *)arg;
+ struct thread_struct *thread = NULL;
+ pfarg_reg_t *req = (pfarg_reg_t *)arg;
unsigned long value, hw_value;
unsigned int cnum;
- int i;
+ int i, can_access_pmu = 0;
+ int is_counting, is_loaded;
int ret = -EINVAL;
- /* we don't quite support this right now */
- if (task != current) return -EINVAL;
-
- /*
- * Cannot do anything before PMU is enabled
- */
- if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
- preempt_disable();
+ if (CTX_IS_DEAD(ctx)) return -EINVAL;
- /* XXX: ctx locking may be required here */
+ is_loaded = CTX_IS_LOADED(ctx);
+ /*
+ * on both UP and SMP, we can only write to the PMC when the task is
+ * the owner of the local PMU.
+ */
+ if (is_loaded) {
+ thread = &ctx->ctx_task->thread;
+ can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task ? 1 : 0;
+ /*
+ * In system wide and when the context is loaded, access can only happen
+ * when the caller is running on the CPU being monitored by the session.
+ * It does not have to be the owner (ctx_task) of the context per se.
+ */
+ if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) {
+ DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ return -EBUSY;
+ }
+ }
for (i = 0; i < count; i++, req++) {
- if (__copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT;
-
- cnum = tmp.reg_num;
- value = tmp.reg_value;
+ cnum = req->reg_num;
+ value = req->reg_value;
if (!PMD_IS_IMPL(cnum)) {
- DBprintk(("pmd[%u] is unimplemented or invalid\n", cnum));
+ DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
goto abort_mission;
}
+ is_counting = PMD_IS_COUNTING(cnum);
/*
* execute write checker, if any
*/
if (PMD_WR_FUNC(cnum)) {
unsigned long v = value;
- ret = PMD_WR_FUNC(cnum)(task, cnum, &v, regs);
+
+ ret = PMD_WR_FUNC(cnum)(ctx->ctx_task, ctx, cnum, &v, regs);
if (ret) goto abort_mission;
+
value = v;
- ret = -EINVAL;
+ ret = -EINVAL;
}
- hw_value = value;
+
/*
* no error on this register
*/
- PFM_REG_RETFLAG_SET(tmp.reg_flags, 0);
-
- if (__put_user(tmp.reg_flags, &req->reg_flags)) return -EFAULT;
+ PFM_REG_RETFLAG_SET(req->reg_flags, 0);
/*
* now commit changes to software state
*/
+ hw_value = value;
- /* update virtualized (64bits) counter */
- if (PMD_IS_COUNTING(cnum)) {
- ctx->ctx_soft_pmds[cnum].lval = value;
- ctx->ctx_soft_pmds[cnum].val = value & ~pmu_conf.ovfl_val;
+ /*
+ * update virtualized (64bits) counter
+ */
+ if (is_counting) {
+ /*
+ * write context state
+ */
+ ctx->ctx_pmds[cnum].lval = value;
- hw_value = value & pmu_conf.ovfl_val;
+ /*
+ * when context is load we use the split value
+ */
+ if (is_loaded) {
+ hw_value = value & pmu_conf.ovfl_val;
+ value = value & ~pmu_conf.ovfl_val;
+ }
- ctx->ctx_soft_pmds[cnum].long_reset = tmp.reg_long_reset;
- ctx->ctx_soft_pmds[cnum].short_reset = tmp.reg_short_reset;
+ /*
+ * update sampling periods
+ */
+ ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
+ ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
- ctx->ctx_soft_pmds[cnum].seed = tmp.reg_random_seed;
- ctx->ctx_soft_pmds[cnum].mask = tmp.reg_random_mask;
+ /*
+ * update randomization parameters
+ */
+ ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
+ ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
}
- /* keep track of what we use */
- CTX_USED_PMD(ctx, pmu_conf.pmd_desc[(cnum)].dep_pmd[0]);
+ /*
+ * update context value
+ */
+ ctx->ctx_pmds[cnum].val = value;
+
+ /*
+ * Keep track of what we use
+ *
+ * We do not keep track of PMC because we have to
+ * systematically restore ALL of them.
+ */
+ CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
- /* mark this register as used as well */
+ /*
+ * mark this PMD register used as well
+ */
CTX_USED_PMD(ctx, RDEP(cnum));
- /* writes to unimplemented part is ignored, so this is safe */
- ia64_set_pmd(cnum, hw_value);
+ /*
+ * make sure we do not try to reset on
+ * restart because we have established new values
+ */
+ if (is_counting && CTX_IS_MASKED(ctx)) {
+ ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
+ }
- /* to go away */
- ia64_srlz_d();
+ if (is_loaded) {
+ /*
+ * write thread state
+ */
+ if (ctx->ctx_fl_system == 0) thread->pmds[cnum] = hw_value;
+
+ /*
+ * write hardware register if we can
+ */
+ if (can_access_pmu) {
+ ia64_set_pmd(cnum, hw_value);
+ } else {
+#ifdef CONFIG_SMP
+ /*
+ * we are guaranteed that the task is not running on the other CPU,
+ * we indicate that this PMD will need to be reloaded if the task
+ * is rescheduled on the CPU it ran last on.
+ */
+ ctx->ctx_reload_pmds[0] |= 1UL << cnum;
+#endif
+ }
+ }
+
+ DPRINT(("pmd[%u]=0x%lx loaded=%d access_pmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
+ "long_reset=0x%lx notify=%c used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
+ cnum,
+ value,
+ is_loaded,
+ can_access_pmu,
+ hw_value,
+ ctx->ctx_pmds[cnum].val,
+ ctx->ctx_pmds[cnum].short_reset,
+ ctx->ctx_pmds[cnum].long_reset,
+ PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
+ ctx->ctx_used_pmds[0],
+ ctx->ctx_pmds[cnum].reset_pmds[0],
+ ctx->ctx_reload_pmds[0],
+ ctx->ctx_all_pmds[0],
+ ctx->ctx_ovfl_regs[0]));
+ }
+
+ /*
+ * make changes visible
+ */
+ if (can_access_pmu) ia64_srlz_d();
- DBprintk(("[%d] pmd[%u]: value=0x%lx hw_value=0x%lx soft_pmd=0x%lx short_reset=0x%lx "
- "long_reset=0x%lx hw_pmd=%lx notify=%c used_pmds=0x%lx reset_pmds=0x%lx\n",
- task->pid, cnum,
- value, hw_value,
- ctx->ctx_soft_pmds[cnum].val,
- ctx->ctx_soft_pmds[cnum].short_reset,
- ctx->ctx_soft_pmds[cnum].long_reset,
- ia64_get_pmd(cnum) & pmu_conf.ovfl_val,
- PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
- ctx->ctx_used_pmds[0],
- ctx->ctx_soft_pmds[cnum].reset_pmds[0]));
- }
- preempt_enable();
return 0;
abort_mission:
- preempt_enable();
-
/*
* for now, we have only one possibility for error
*/
- PFM_REG_RETFLAG_SET(tmp.reg_flags, PFM_REG_RETFL_EINVAL);
+ PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
/*
* we change the return value to EFAULT in case we cannot write register return code.
* The caller first must correct this error, then a resubmission of the request will
* eventually yield the EINVAL.
*/
- if (__put_user(tmp.reg_flags, &req->reg_flags)) ret = -EFAULT;
+ req->reg_flags = PFM_REG_RETFL_EINVAL;
- DBprintk(("[%d] pmc[%u]=0x%lx ret %d\n", task->pid, cnum, value, ret));
+ DPRINT(("pmd[%u]=0x%lx ret %d\n", cnum, value, ret));
return ret;
}
+/*
+ * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
+ * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
+ * interrupt is delivered during the call, it will be kept pending until we leave, making
+ * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
+ * guaranteed to return consistent data to the user, it may simply be old. It is not
+ * trivial to treat the overflow while inside the call because you may end up in
+ * some module sampling buffer code causing deadlocks.
+ */
static int
-pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
- struct thread_struct *th = &task->thread;
- unsigned long val, lval;
+ struct thread_struct *thread = NULL;
+ unsigned long val = 0UL, lval ;
pfarg_reg_t *req = (pfarg_reg_t *)arg;
unsigned int cnum, reg_flags = 0;
- int i, ret = 0;
-
-#if __GNUC__ < 3
- int foo;
-#endif
+ int i, is_loaded, can_access_pmu = 0;
+ int ret = -EINVAL;
- if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
+ if (CTX_IS_ZOMBIE(ctx)) return -EINVAL;
/*
- * XXX: MUST MAKE SURE WE DON"T HAVE ANY PENDING OVERFLOW BEFORE READING
- * This is required when the monitoring has been stoppped by user or kernel.
- * If it is still going on, then that's fine because we a re not guaranteed
- * to return an accurate value in this case.
+ * access is possible when loaded only for
+ * self-monitoring tasks or in UP mode
*/
+ is_loaded = CTX_IS_LOADED(ctx);
- /* XXX: ctx locking may be required here */
+ if (is_loaded) {
+ thread = &ctx->ctx_task->thread;
+ /*
+ * this can be true when not self-monitoring only in UP
+ */
+ can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task? 1 : 0;
+
+ if (can_access_pmu) ia64_srlz_d();
+ /*
+ * In system wide and when the context is loaded, access can only happen
+ * when the caller is running on the CPU being monitored by the session.
+ * It does not have to be the owner (ctx_task) of the context per se.
+ */
+ if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) {
+ DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ return -EBUSY;
+ }
+ }
+ DPRINT(("enter loaded=%d access_pmu=%d ctx_state=%d\n",
+ is_loaded,
+ can_access_pmu,
+ ctx->ctx_state));
- DBprintk(("ctx_last_cpu=%d for [%d]\n", atomic_read(&ctx->ctx_last_cpu), task->pid));
+ /*
+ * on both UP and SMP, we can only read the PMD from the hardware register when
+ * the task is the owner of the local PMU.
+ */
for (i = 0; i < count; i++, req++) {
- int me;
-#if __GNUC__ < 3
- foo = __get_user(cnum, &req->reg_num);
- if (foo) return -EFAULT;
- foo = __get_user(reg_flags, &req->reg_flags);
- if (foo) return -EFAULT;
-#else
- if (__get_user(cnum, &req->reg_num)) return -EFAULT;
- if (__get_user(reg_flags, &req->reg_flags)) return -EFAULT;
-#endif
- lval = 0UL;
- if (!PMD_IS_IMPL(cnum)) goto abort_mission;
+ lval = 0UL;
+ cnum = req->reg_num;
+ reg_flags = req->reg_flags;
+
+ if (!PMD_IS_IMPL(cnum)) goto error;
/*
* we can only read the register that we use. That includes
- * the one we explicitly initialize AND the one we want included
+ * the one we explicitely initialize AND the one we want included
* in the sampling buffer (smpl_regs).
*
* Having this restriction allows optimization in the ctxsw routine
* without compromising security (leaks)
*/
- if (!CTX_IS_USED_PMD(ctx, cnum)) goto abort_mission;
+ if (!CTX_IS_USED_PMD(ctx, cnum)) goto error;
/*
* If the task is not the current one, then we check if the
* PMU state is still in the local live register due to lazy ctxsw.
* If true, then we read directly from the registers.
*/
- me = get_cpu();
- if (atomic_read(&ctx->ctx_last_cpu) == me){
- ia64_srlz_d();
+ if (can_access_pmu){
val = ia64_get_pmd(cnum);
- DBprintk(("reading pmd[%u]=0x%lx from hw\n", cnum, val));
} else {
- val = th->pmd[cnum];
+ /*
+ * context has been saved
+ * if context is zombie, then task does not exist anymore.
+ * In this case, we use the full value saved in the context (pfm_flush_regs()).
+ */
+ val = CTX_IS_LOADED(ctx) ? thread->pmds[cnum] : 0UL;
}
-
if (PMD_IS_COUNTING(cnum)) {
/*
- * XXX: need to check for overflow
+ * XXX: need to check for overflow when loaded
*/
val &= pmu_conf.ovfl_val;
- val += ctx->ctx_soft_pmds[cnum].val;
+ val += ctx->ctx_pmds[cnum].val;
+
+ lval = ctx->ctx_pmds[cnum].lval;
+ }
+
+ /*
+ * execute read checker, if any
+ */
+ if (PMD_RD_FUNC(cnum)) {
+ unsigned long v = val;
+ ret = PMD_RD_FUNC(cnum)(ctx->ctx_task, ctx, cnum, &v, regs);
+ if (ret) goto error;
+ val = v;
+ ret = -EINVAL;
+ }
+
+ PFM_REG_RETFLAG_SET(reg_flags, 0);
+
+ DPRINT(("pmd[%u]=0x%lx loaded=%d access_pmu=%d ctx_state=%d\n",
+ cnum,
+ val,
+ is_loaded,
+ can_access_pmu,
+ ctx->ctx_state));
+
+ /*
+ * update register return value, abort all if problem during copy.
+ * we only modify the reg_flags field. no check mode is fine because
+ * access has been verified upfront in sys_perfmonctl().
+ */
+ req->reg_value = val;
+ req->reg_flags = reg_flags;
+ req->reg_last_reset_val = lval;
+ }
+
+ return 0;
+
+error:
+ PFM_REG_RETFLAG_SET(reg_flags, PFM_REG_RETFL_EINVAL);
+
+ req->reg_flags = PFM_REG_RETFL_EINVAL;
+
+ DPRINT(("error pmd[%u]=0x%lx\n", cnum, val));
+
+ return ret;
+}
+
+long
+pfm_mod_write_pmcs(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs)
+{
+ pfm_context_t *ctx;
+
+ if (task == NULL || req == NULL) return -EINVAL;
+
+ ctx = task->thread.pfm_context;
+
+ if (ctx == NULL) return -EINVAL;
+
+ /*
+ * for now limit to current task, which is enough when calling
+ * from overflow handler
+ */
+ if (task != current) return -EBUSY;
+
+ return pfm_write_pmcs(ctx, req, nreq, regs);
+}
+
+long
+pfm_mod_read_pmds(struct task_struct *task, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs)
+{
+ pfm_context_t *ctx;
+
+ if (task == NULL || req == NULL) return -EINVAL;
+
+ //ctx = task->thread.pfm_context;
+ ctx = GET_PMU_CTX();
+
+ if (ctx == NULL) return -EINVAL;
+
+ /*
+ * for now limit to current task, which is enough when calling
+ * from overflow handler
+ */
+ if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
+
+ return pfm_read_pmds(ctx, req, nreq, regs);
+}
+
+long
+pfm_mod_fast_read_pmds(struct task_struct *task, unsigned long mask[4], unsigned long *addr, struct pt_regs *regs)
+{
+ pfm_context_t *ctx;
+ unsigned long m, val;
+ unsigned int j;
- lval = ctx->ctx_soft_pmds[cnum].lval;
- }
+ if (task == NULL || addr == NULL) return -EINVAL;
- /*
- * execute read checker, if any
- */
- if (PMD_RD_FUNC(cnum)) {
- unsigned long v = val;
- ret = PMD_RD_FUNC(cnum)(task, cnum, &v, regs);
- val = v;
- }
+ //ctx = task->thread.pfm_context;
+ ctx = GET_PMU_CTX();
- PFM_REG_RETFLAG_SET(reg_flags, ret);
+ if (ctx == NULL) return -EINVAL;
- put_cpu();
+ /*
+ * for now limit to current task, which is enough when calling
+ * from overflow handler
+ */
+ if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
- DBprintk(("read pmd[%u] ret=%d value=0x%lx pmc=0x%lx\n",
- cnum, ret, val, ia64_get_pmc(cnum)));
+ m = mask[0];
+ for (j=0; m; m >>=1, j++) {
- /*
- * update register return value, abort all if problem during copy.
- * we only modify the reg_flags field. no check mode is fine because
- * access has been verified upfront in sys_perfmonctl().
- */
- if (__put_user(cnum, &req->reg_num)) return -EFAULT;
- if (__put_user(val, &req->reg_value)) return -EFAULT;
- if (__put_user(reg_flags, &req->reg_flags)) return -EFAULT;
- if (__put_user(lval, &req->reg_last_reset_value)) return -EFAULT;
- }
+ if ((m & 0x1) == 0) continue;
- return 0;
+ if (!(PMD_IS_IMPL(j) && CTX_IS_USED_PMD(ctx, j)) ) return -EINVAL;
-abort_mission:
- PFM_REG_RETFLAG_SET(reg_flags, PFM_REG_RETFL_EINVAL);
- /*
- * XXX: if this fails, we stick with the original failure, flag not updated!
- */
- __put_user(reg_flags, &req->reg_flags);
+ if (PMD_IS_COUNTING(j)) {
+ val = pfm_read_soft_counter(ctx, j);
+ } else {
+ val = ia64_get_pmd(j);
+ }
- return -EINVAL;
+ *addr++ = val;
+
+ /* XXX: should call read checker routine? */
+ DPRINT(("single_read_pmd[%u]=0x%lx\n", j, val));
+ }
+ return 0;
}
-#ifdef PFM_PMU_USES_DBR
/*
* Only call this function when a process it trying to
* write the debug registers (reading is always allowed)
pfm_context_t *ctx = task->thread.pfm_context;
int ret = 0;
- DBprintk(("called for [%d]\n", task->pid));
+ if (pmu_conf.use_rr_dbregs == 0) return 0;
+
+ DPRINT(("called for [%d]\n", task->pid));
/*
* do it only once
else
pfm_sessions.pfs_ptrace_use_dbregs++;
- DBprintk(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
- pfm_sessions.pfs_ptrace_use_dbregs,
- pfm_sessions.pfs_sys_use_dbregs,
+ DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
+ pfm_sessions.pfs_ptrace_use_dbregs,
+ pfm_sessions.pfs_sys_use_dbregs,
task->pid, ret));
UNLOCK_PFS();
{
int ret;
+ if (pmu_conf.use_rr_dbregs == 0) return 0;
+
LOCK_PFS();
if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
- printk(KERN_DEBUG "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n",
- task->pid);
+ printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid);
ret = -1;
} else {
pfm_sessions.pfs_ptrace_use_dbregs--;
return ret;
}
-#else /* PFM_PMU_USES_DBR is true */
-/*
- * in case, the PMU does not use the debug registers, these two functions are nops.
- * The first function is called from arch/ia64/kernel/ptrace.c.
- * The second function is called from arch/ia64/kernel/process.c.
- */
-int
-pfm_use_debug_registers(struct task_struct *task)
-{
- return 0;
-}
-
-int
-pfm_release_debug_registers(struct task_struct *task)
-{
- return 0;
-}
-#endif /* PFM_PMU_USES_DBR */
static int
-pfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
+pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
- void *sem = &ctx->ctx_restart_sem;
+ struct task_struct *task;
+ pfm_buffer_fmt_t *fmt;
+ pfm_ovfl_ctrl_t rst_ctrl;
+ int is_loaded;
+ int ret = 0;
+
+ fmt = ctx->ctx_buf_fmt;
+ is_loaded = CTX_IS_LOADED(ctx);
+
+ if (is_loaded && CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) goto proceed;
+
+ /*
+ * restarting a terminated context is a nop
+ */
+ if (unlikely(CTX_IS_TERMINATED(ctx))) {
+ DPRINT(("context is terminated, nothing to do\n"));
+ return 0;
+ }
- /*
- * Cannot do anything before PMU is enabled
+
+ /*
+ * LOADED, UNLOADED, ZOMBIE
*/
- if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
+ if (CTX_IS_MASKED(ctx) == 0) return -EBUSY;
+
+proceed:
+ /*
+ * In system wide and when the context is loaded, access can only happen
+ * when the caller is running on the CPU being monitored by the session.
+ * It does not have to be the owner (ctx_task) of the context per se.
+ */
+ if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) {
+ DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ return -EBUSY;
+ }
+
+ task = PFM_CTX_TASK(ctx);
+
+ /* sanity check */
+ if (unlikely(task == NULL)) {
+ printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid);
+ return -EINVAL;
+ }
+ /*
+ * this test is always true in system wide mode
+ */
if (task == current) {
- DBprintk(("restarting self %d frozen=%d ovfl_regs=0x%lx\n",
- task->pid,
- ctx->ctx_fl_frozen,
+
+ fmt = ctx->ctx_buf_fmt;
+
+ DPRINT(("restarting self %d ovfl=0x%lx\n",
+ task->pid,
ctx->ctx_ovfl_regs[0]));
- preempt_disable();
- pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
+ if (CTX_HAS_SMPL(ctx)) {
- ctx->ctx_ovfl_regs[0] = 0UL;
+ prefetch(ctx->ctx_smpl_hdr);
- /*
- * We ignore block/don't block because we never block
- * for a self-monitoring process.
- */
- ctx->ctx_fl_frozen = 0;
+ rst_ctrl.stop_monitoring = 0;
+ rst_ctrl.reset_pmds = PFM_PMD_NO_RESET;
- if (CTX_HAS_SMPL(ctx)) {
- ctx->ctx_psb->psb_hdr->hdr_count = 0;
- ctx->ctx_psb->psb_index = 0;
+ if (is_loaded)
+ ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
+ else
+ ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
+
+
+ } else {
+ rst_ctrl.stop_monitoring = 0;
+ rst_ctrl.reset_pmds = PFM_PMD_LONG_RESET;
}
- /* simply unfreeze */
- pfm_unfreeze_pmu();
+ if (ret == 0) {
+ if (rst_ctrl.reset_pmds)
+ pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, rst_ctrl.reset_pmds);
+
+ if (rst_ctrl.stop_monitoring == 0) {
+ DPRINT(("resuming monitoring for [%d]\n", task->pid));
- preempt_enable();
+ if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(task);
+ } else {
+ DPRINT(("keeping monitoring stopped for [%d]\n", task->pid));
+
+ // cannot use pfm_stop_monitoring(task, regs);
+ }
+ }
+ /*
+ * clear overflowed PMD mask to remove any stale information
+ */
+ ctx->ctx_ovfl_regs[0] = 0UL;
+
+ /*
+ * back to LOADED state
+ */
+ CTX_LOADED(ctx);
return 0;
- }
- /* restart on another task */
+ }
+ /* restart another task */
/*
* if blocking, then post the semaphore.
* if non-blocking, then we ensure that the task will go into
- * pfm_overflow_must_block() before returning to user mode.
- * We cannot explicitly reset another task, it MUST always
+ * pfm_handle_work() before returning to user mode.
+ * We cannot explicitely reset another task, it MUST always
* be done by the task itself. This works for system wide because
* the tool that is controlling the session is doing "self-monitoring".
*
*
*/
if (CTX_OVFL_NOBLOCK(ctx) == 0) {
- DBprintk(("unblocking %d \n", task->pid));
- up(sem);
+ DPRINT(("unblocking [%d] \n", task->pid));
+ up(&ctx->ctx_restart_sem);
} else {
- struct thread_info *info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
- task->thread.pfm_ovfl_block_reset = 1;
+ DPRINT(("[%d] armed exit trap\n", task->pid));
+
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
- set_bit(TIF_NOTIFY_RESUME, &info->flags);
+
+
+ PFM_SET_WORK_PENDING(task, 1);
+
+ pfm_set_task_notify(task);
+
+ /*
+ * XXX: send reschedule if task runs on another CPU
+ */
}
-#if 0
+ return 0;
+}
+
+static int
+pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+{
+ unsigned int m = *(unsigned int *)arg;
+
+ pfm_sysctl.debug = m == 0 ? 0 : 1;
+
+ pfm_debug_var = pfm_sysctl.debug;
+
+ printk(KERN_ERR "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
+
+
+ if (m==0) {
+ memset(pfm_stats, 0, sizeof(pfm_stats));
+ for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
+ }
+
+ return 0;
+}
+
+
+static int
+pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+{
+ struct thread_struct *thread = NULL;
+ pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
+ dbreg_t dbreg;
+ unsigned int rnum;
+ int first_time;
+ int ret = 0;
+ int i, can_access_pmu = 0, is_loaded;
+
+ if (pmu_conf.use_rr_dbregs == 0) return -EINVAL;
+
+ if (CTX_IS_DEAD(ctx)) return -EINVAL;
+
+ is_loaded = CTX_IS_LOADED(ctx);
+ /*
+ * on both UP and SMP, we can only write to the PMC when the task is
+ * the owner of the local PMU.
+ */
+ if (is_loaded) {
+ thread = &ctx->ctx_task->thread;
+ can_access_pmu = GET_PMU_OWNER() == ctx->ctx_task ? 1 : 0;
+ /*
+ * In system wide and when the context is loaded, access can only happen
+ * when the caller is running on the CPU being monitored by the session.
+ * It does not have to be the owner (ctx_task) of the context per se.
+ */
+ if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) {
+ DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ return -EBUSY;
+ }
+ }
+
/*
- * in case of non blocking mode, then it's just a matter of
- * of reseting the sampling buffer (if any) index. The PMU
- * is already active.
+ * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
+ * ensuring that no real breakpoint can be installed via this call.
+ *
+ * IMPORTANT: regs can be NULL in this function
*/
+ first_time = ctx->ctx_fl_using_dbreg == 0;
+
/*
- * must reset the header count first
+ * don't bother if we are loaded and task is being debugged
*/
- if (CTX_HAS_SMPL(ctx)) {
- DBprintk(("resetting sampling indexes for %d \n", task->pid));
- ctx->ctx_psb->psb_hdr->hdr_count = 0;
- ctx->ctx_psb->psb_index = 0;
+ if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
+ DPRINT(("debug registers already in use for [%d]\n", ctx->ctx_task->pid));
+ return -EBUSY;
}
-#endif
+
+ /*
+ * check for debug registers in system wide mode
+ *
+ * We make the reservation even when context is not loaded
+ * to make sure we get our slot. Note that the PFM_LOAD_CONTEXT
+ * may still fail if the task has DBG_VALID set.
+ */
+ LOCK_PFS();
+
+ if (first_time && ctx->ctx_fl_system) {
+ if (pfm_sessions.pfs_ptrace_use_dbregs)
+ ret = -EBUSY;
+ else
+ pfm_sessions.pfs_sys_use_dbregs++;
+ }
+
+ UNLOCK_PFS();
+
+ if (ret != 0) return ret;
+
+ /*
+ * mark ourself as user of the debug registers for
+ * perfmon purposes.
+ */
+ ctx->ctx_fl_using_dbreg = 1;
+
+ /*
+ * clear hardware registers to make sure we don't
+ * pick up stale state.
+ *
+ * for a system wide session, we do not use
+ * thread.dbr, thread.ibr because this process
+ * never leaves the current CPU and the state
+ * is shared by all processes running on it
+ */
+ if (first_time && can_access_pmu) {
+ DPRINT(("[%d] clearing ibrs, dbrs\n", ctx->ctx_task->pid));
+ for (i=0; i < pmu_conf.num_ibrs; i++) {
+ ia64_set_ibr(i, 0UL);
+ ia64_srlz_i();
+ }
+ ia64_srlz_i();
+ for (i=0; i < pmu_conf.num_dbrs; i++) {
+ ia64_set_dbr(i, 0UL);
+ ia64_srlz_d();
+ }
+ ia64_srlz_d();
+ }
+
+ /*
+ * Now install the values into the registers
+ */
+ for (i = 0; i < count; i++, req++) {
+
+ rnum = req->dbreg_num;
+ dbreg.val = req->dbreg_value;
+
+ ret = -EINVAL;
+
+ if ((mode == PFM_CODE_RR && !IBR_IS_IMPL(rnum)) || ((mode == PFM_DATA_RR) && !DBR_IS_IMPL(rnum))) {
+ DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
+ rnum, dbreg.val, mode, i, count));
+
+ goto abort_mission;
+ }
+
+ /*
+ * make sure we do not install enabled breakpoint
+ */
+ if (rnum & 0x1) {
+ if (mode == PFM_CODE_RR)
+ dbreg.ibr.ibr_x = 0;
+ else
+ dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
+ }
+
+ PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
+
+ /*
+ * Debug registers, just like PMC, can only be modified
+ * by a kernel call. Moreover, perfmon() access to those
+ * registers are centralized in this routine. The hardware
+ * does not modify the value of these registers, therefore,
+ * if we save them as they are written, we can avoid having
+ * to save them on context switch out. This is made possible
+ * by the fact that when perfmon uses debug registers, ptrace()
+ * won't be able to modify them concurrently.
+ */
+ if (mode == PFM_CODE_RR) {
+ CTX_USED_IBR(ctx, rnum);
+
+ if (can_access_pmu) ia64_set_ibr(rnum, dbreg.val);
+
+ ctx->ctx_ibrs[rnum] = dbreg.val;
+
+ DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x is_loaded=%d access_pmu=%d\n",
+ rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
+ } else {
+ CTX_USED_DBR(ctx, rnum);
+
+ if (can_access_pmu) ia64_set_dbr(rnum, dbreg.val);
+
+ ctx->ctx_dbrs[rnum] = dbreg.val;
+
+ DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x is_loaded=%d access_pmu=%d\n",
+ rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
+ }
+ }
+
+ return 0;
+
+abort_mission:
+ /*
+ * in case it was our first attempt, we undo the global modifications
+ */
+ if (first_time) {
+ LOCK_PFS();
+ if (ctx->ctx_fl_system) {
+ pfm_sessions.pfs_sys_use_dbregs--;
+ }
+ UNLOCK_PFS();
+ ctx->ctx_fl_using_dbreg = 0;
+ }
+ /*
+ * install error return flag
+ */
+ PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
+
+ return ret;
+}
+
+static int
+pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+{
+ return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
+}
+
+static int
+pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+{
+ return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
+}
+
+static int
+pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+{
+ pfarg_features_t *req = (pfarg_features_t *)arg;
+
+ req->ft_version = PFM_VERSION;
return 0;
}
static int
-pfm_stop(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
+pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
- /* we don't quite support this right now */
- if (task != current) return -EINVAL;
+ struct pt_regs *tregs;
- /*
- * Cannot do anything before PMU is enabled
- */
- if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
- DBprintk(("[%d] fl_system=%d owner=%p current=%p\n",
- current->pid,
- ctx->ctx_fl_system, PMU_OWNER(),
- current));
+ if (CTX_IS_LOADED(ctx) == 0 && CTX_IS_MASKED(ctx) == 0) return -EINVAL;
+
+ /*
+ * In system wide and when the context is loaded, access can only happen
+ * when the caller is running on the CPU being monitored by the session.
+ * It does not have to be the owner (ctx_task) of the context per se.
+ */
+ if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) {
+ DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ return -EBUSY;
+ }
- preempt_disable();
- /* simply stop monitoring but not the PMU */
+ /*
+ * in system mode, we need to update the PMU directly
+ * and the user level state of the caller, which may not
+ * necessarily be the creator of the context.
+ */
if (ctx->ctx_fl_system) {
-
- /* disable dcr pp */
+ /*
+ * Update local PMU first
+ *
+ * disable dcr pp
+ */
ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
-
- /* stop monitoring */
- pfm_clear_psr_pp();
-
ia64_srlz_i();
+ /*
+ * update local cpuinfo
+ */
PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
+ /*
+ * stop monitoring, does srlz.i
+ */
+ pfm_clear_psr_pp();
+
+ /*
+ * stop monitoring in the caller
+ */
ia64_psr(regs)->pp = 0;
- } else {
+ return 0;
+ }
+ /*
+ * per-task mode
+ */
- /* stop monitoring */
+ if (ctx->ctx_task == current) {
+ /* stop monitoring at kernel level */
pfm_clear_psr_up();
- ia64_srlz_i();
+ /*
+ * stop monitoring at the user level
+ */
+ ia64_psr(regs)->up = 0;
+ } else {
+ tregs = ia64_task_regs(ctx->ctx_task);
+
+ /*
+ * stop monitoring at the user level
+ */
+ ia64_psr(tregs)->up = 0;
/*
- * clear user level psr.up
+ * monitoring disabled in kernel at next reschedule
*/
- ia64_psr(regs)->up = 0;
+ ctx->ctx_saved_psr &= ~IA64_PSR_UP;
+ printk("pfm_stop: current [%d] task=[%d]\n", current->pid, ctx->ctx_task->pid);
}
- preempt_enable();
return 0;
}
+
static int
-pfm_disable(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
-{
- /* we don't quite support this right now */
- if (task != current) return -EINVAL;
+pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+{
+ struct pt_regs *tregs;
- if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
+ if (CTX_IS_LOADED(ctx) == 0) return -EINVAL;
- preempt_disable();
/*
- * stop monitoring, freeze PMU, and save state in context
- * this call will clear IA64_THREAD_PM_VALID for per-task sessions.
- */
- pfm_flush_regs(task);
-
- if (ctx->ctx_fl_system) {
- ia64_psr(regs)->pp = 0;
- } else {
- ia64_psr(regs)->up = 0;
+ * In system wide and when the context is loaded, access can only happen
+ * when the caller is running on the CPU being monitored by the session.
+ * It does not have to be the owner (ctx_task) of the context per se.
+ */
+ if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) {
+ DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ return -EBUSY;
}
- /*
- * goes back to default behavior: no user level control
- * no need to change live psr.sp because useless at the kernel level
+
+ /*
+ * in system mode, we need to update the PMU directly
+ * and the user level state of the caller, which may not
+ * necessarily be the creator of the context.
*/
- ia64_psr(regs)->sp = 1;
+ if (ctx->ctx_fl_system) {
- DBprintk(("enabling psr.sp for [%d]\n", current->pid));
+ /*
+ * set user level psr.pp for the caller
+ */
+ ia64_psr(regs)->pp = 1;
- ctx->ctx_flags.state = PFM_CTX_DISABLED;
- preempt_enable();
+ /*
+ * now update the local PMU and cpuinfo
+ */
+ PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
- return 0;
-}
+ /*
+ * start monitoring at kernel level
+ */
+ pfm_set_psr_pp();
-static int
-pfm_context_destroy(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
-{
- /* we don't quite support this right now */
- if (task != current) return -EINVAL;
+ /* enable dcr pp */
+ ia64_set_dcr(ia64_get_dcr()|IA64_DCR_PP);
+ ia64_srlz_i();
- /*
- * if context was never enabled, then there is not much
- * to do
- */
- if (!CTX_IS_ENABLED(ctx)) goto skipped_stop;
+ return 0;
+ }
/*
- * Disable context: stop monitoring, flush regs to software state (useless here),
- * and freeze PMU
- *
- * The IA64_THREAD_PM_VALID is cleared by pfm_flush_regs() called from pfm_disable()
+ * per-process mode
*/
- pfm_disable(task, ctx, arg, count, regs);
- if (ctx->ctx_fl_system) {
- ia64_psr(regs)->pp = 0;
- } else {
- ia64_psr(regs)->up = 0;
- }
+ if (ctx->ctx_task == current) {
-skipped_stop:
- /*
- * remove sampling buffer mapping, if any
- */
- if (ctx->ctx_smpl_vaddr) {
- pfm_remove_smpl_mapping(task);
- ctx->ctx_smpl_vaddr = 0UL;
- }
- /* now free context and related state */
- pfm_context_exit(task);
+ /* start monitoring at kernel level */
+ pfm_set_psr_up();
- return 0;
-}
+ /*
+ * activate monitoring at user level
+ */
+ ia64_psr(regs)->up = 1;
-/*
- * does nothing at the moment
- */
-static int
-pfm_context_unprotect(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
-{
- return 0;
-}
+ } else {
+ tregs = ia64_task_regs(ctx->ctx_task);
-static int
-pfm_protect_context(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
-{
- DBprintk(("context from [%d] is protected\n", task->pid));
- /*
- * from now on, only the creator of the context has access to it
- */
- ctx->ctx_fl_protected = 1;
+ /*
+ * start monitoring at the kernel level the next
+ * time the task is scheduled
+ */
+ ctx->ctx_saved_psr |= IA64_PSR_UP;
- /*
- * reinforce secure monitoring: cannot toggle psr.up
- */
- if (ctx->ctx_fl_unsecure == 0) ia64_psr(regs)->sp = 1;
+ /*
+ * activate monitoring at user level
+ */
+ ia64_psr(tregs)->up = 1;
+ }
return 0;
}
static int
-pfm_debug(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
+pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
- unsigned int mode = *(unsigned int *)arg;
+ pfarg_reg_t *req = (pfarg_reg_t *)arg;
+ unsigned int cnum;
+ int i;
+ int ret = -EINVAL;
- pfm_sysctl.debug = mode == 0 ? 0 : 1;
+ for (i = 0; i < count; i++, req++) {
- printk(KERN_INFO "perfmon debugging %s\n", pfm_sysctl.debug ? "on" : "off");
+ cnum = req->reg_num;
- return 0;
-}
+ if (!PMC_IS_IMPL(cnum)) goto abort_mission;
-#ifdef PFM_PMU_USES_DBR
+ req->reg_value = PMC_DFL_VAL(cnum);
-typedef struct {
- unsigned long ibr_mask:56;
- unsigned long ibr_plm:4;
- unsigned long ibr_ig:3;
- unsigned long ibr_x:1;
-} ibr_mask_reg_t;
+ PFM_REG_RETFLAG_SET(req->reg_flags, 0);
-typedef struct {
- unsigned long dbr_mask:56;
- unsigned long dbr_plm:4;
- unsigned long dbr_ig:2;
- unsigned long dbr_w:1;
- unsigned long dbr_r:1;
-} dbr_mask_reg_t;
+ DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
+ }
+ return 0;
-typedef union {
- unsigned long val;
- ibr_mask_reg_t ibr;
- dbr_mask_reg_t dbr;
-} dbreg_t;
+abort_mission:
+ PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
+ return ret;
+}
static int
-pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs)
+pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
- struct thread_struct *thread = &task->thread;
- pfm_context_t *ctx = task->thread.pfm_context;
- pfarg_dbreg_t tmp, *req = (pfarg_dbreg_t *)arg;
- dbreg_t dbreg;
- unsigned int rnum;
- int first_time;
- int i, ret = 0;
+ struct task_struct *task;
+ struct thread_struct *thread;
+ struct pfm_context_t *old;
+#ifndef CONFIG_SMP
+ struct task_struct *owner_task = NULL;
+#endif
+ pfarg_load_t *req = (pfarg_load_t *)arg;
+ unsigned long *pmcs_source, *pmds_source;
+ int the_cpu;
+ int ret = 0;
/*
- * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
- * ensuring that no real breakpoint can be installed via this call.
+ * can only load from unloaded or terminated state
*/
+ if (CTX_IS_UNLOADED(ctx) == 0 && CTX_IS_TERMINATED(ctx) == 0) {
+ DPRINT(("[%d] cannot load to [%d], invalid ctx_state=%d\n",
+ current->pid,
+ req->load_pid,
+ ctx->ctx_state));
+ return -EINVAL;
+ }
- first_time = ctx->ctx_fl_using_dbreg == 0;
+ DPRINT(("load_pid [%d]\n", req->load_pid));
+
+ if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
+ DPRINT(("cannot use blocking mode on self for [%d]\n", current->pid));
+ return -EINVAL;
+ }
+
+ ret = pfm_get_task(ctx, req->load_pid, &task);
+ if (ret) {
+ DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
+ return ret;
+ }
+
+ ret = -EINVAL;
/*
- * check for debug registers in system wide mode
- *
+ * system wide is self monitoring only
*/
- LOCK_PFS();
- if (ctx->ctx_fl_system && first_time) {
- if (pfm_sessions.pfs_ptrace_use_dbregs)
- ret = -EBUSY;
- else
- pfm_sessions.pfs_sys_use_dbregs++;
+ if (ctx->ctx_fl_system && task != current) {
+ DPRINT(("system wide is self monitoring only current=%d load_pid=%d\n",
+ current->pid,
+ req->load_pid));
+ goto error;
}
- UNLOCK_PFS();
- if (ret != 0) return ret;
+ thread = &task->thread;
- if (ctx->ctx_fl_system) {
- /* we mark ourselves as owner of the debug registers */
- ctx->ctx_fl_using_dbreg = 1;
- DBprintk(("system-wide setting fl_using_dbreg for [%d]\n", task->pid));
- } else if (first_time) {
- ret= -EBUSY;
- if ((thread->flags & IA64_THREAD_DBG_VALID) != 0) {
- DBprintk(("debug registers already in use for [%d]\n", task->pid));
- goto abort_mission;
- }
- /* we mark ourselves as owner of the debug registers */
- ctx->ctx_fl_using_dbreg = 1;
-
- DBprintk(("setting fl_using_dbreg for [%d]\n", task->pid));
- /*
- * Given debug registers cannot be used for both debugging
- * and performance monitoring at the same time, we reuse
- * the storage area to save and restore the registers on ctxsw.
- */
- memset(task->thread.dbr, 0, sizeof(task->thread.dbr));
- memset(task->thread.ibr, 0, sizeof(task->thread.ibr));
- }
+ ret = -EBUSY;
- if (first_time) {
- DBprintk(("[%d] clearing ibrs,dbrs\n", task->pid));
- /*
- * clear hardware registers to make sure we don't
- * pick up stale state.
- *
- * for a system wide session, we do not use
- * thread.dbr, thread.ibr because this process
- * never leaves the current CPU and the state
- * is shared by all processes running on it
- */
- for (i=0; i < (int) pmu_conf.num_ibrs; i++) {
- ia64_set_ibr(i, 0UL);
- }
- ia64_srlz_i();
- for (i=0; i < (int) pmu_conf.num_dbrs; i++) {
- ia64_set_dbr(i, 0UL);
- }
- ia64_srlz_d();
+ /*
+ * cannot load a context which is using range restrictions,
+ * into a task that is being debugged.
+ */
+ if (ctx->ctx_fl_using_dbreg && (thread->flags & IA64_THREAD_DBG_VALID)) {
+ DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
+ goto error;
}
- ret = -EFAULT;
+ /*
+ * SMP system-wide monitoring implies self-monitoring.
+ *
+ * The programming model expects the task to
+ * be pinned on a CPU throughout the session.
+ * Here we take note of the current CPU at the
+ * time the context is loaded. No call from
+ * another CPU will be allowed.
+ *
+ * The pinning via shed_setaffinity()
+ * must be done by the calling task prior
+ * to this call.
+ *
+ * systemwide: keep track of CPU this session is supposed to run on
+ */
+ the_cpu = ctx->ctx_cpu = smp_processor_id();
/*
- * Now install the values into the registers
+ * now reserve the session
*/
- for (i = 0; i < count; i++, req++) {
-
- if (__copy_from_user(&tmp, req, sizeof(tmp))) goto abort_mission;
-
- rnum = tmp.dbreg_num;
- dbreg.val = tmp.dbreg_value;
-
- ret = -EINVAL;
+ ret = pfm_reserve_session(current, ctx->ctx_fl_system, the_cpu);
+ if (ret) goto error;
- if ((mode == 0 && !IBR_IS_IMPL(rnum)) || ((mode == 1) && !DBR_IS_IMPL(rnum))) {
- DBprintk(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
- rnum, dbreg.val, mode, i, count));
+ ret = -EBUSY;
+ /*
+ * task is necessarily stopped at this point.
+ *
+ * If the previous context was zombie, then it got removed in
+ * pfm_save_regs(). Therefore we should not see it here.
+ * If we see a context, then this is an active context
+ *
+ * XXX: needs to be atomic
+ */
+ DPRINT(("[%d] before cmpxchg() old_ctx=%p new_ctx=%p\n",
+ current->pid,
+ thread->pfm_context, ctx));
- goto abort_mission;
- }
+ old = ia64_cmpxchg("acq", &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
+ if (old != NULL) {
+ DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
+ goto error_unres;
+ }
- /*
- * make sure we do not install enabled breakpoint
- */
- if (rnum & 0x1) {
- if (mode == 0)
- dbreg.ibr.ibr_x = 0;
- else
- dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
- }
+ pfm_reset_msgq(ctx);
- /*
- * clear return flags and copy back to user
- *
- * XXX: fix once EAGAIN is implemented
- */
- ret = -EFAULT;
+ CTX_LOADED(ctx);
- PFM_REG_RETFLAG_SET(tmp.dbreg_flags, 0);
+ /*
+ * link context to task
+ */
+ ctx->ctx_task = task;
- if (__copy_to_user(req, &tmp, sizeof(tmp))) goto abort_mission;
+ if (ctx->ctx_fl_system) {
/*
- * Debug registers, just like PMC, can only be modified
- * by a kernel call. Moreover, perfmon() access to those
- * registers are centralized in this routine. The hardware
- * does not modify the value of these registers, therefore,
- * if we save them as they are written, we can avoid having
- * to save them on context switch out. This is made possible
- * by the fact that when perfmon uses debug registers, ptrace()
- * won't be able to modify them concurrently.
+ * we load as stopped
*/
- if (mode == 0) {
- CTX_USED_IBR(ctx, rnum);
+ PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
+ PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
- ia64_set_ibr(rnum, dbreg.val);
- ia64_srlz_i();
+ if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
+ } else {
+ thread->flags |= IA64_THREAD_PM_VALID;
+ }
- thread->ibr[rnum] = dbreg.val;
+ /*
+ * propagate into thread-state
+ */
+ pfm_copy_pmds(task, ctx);
+ pfm_copy_pmcs(task, ctx);
- DBprintk(("write ibr%u=0x%lx used_ibrs=0x%lx\n", rnum, dbreg.val, ctx->ctx_used_ibrs[0]));
- } else {
- CTX_USED_DBR(ctx, rnum);
+ pmcs_source = thread->pmcs;
+ pmds_source = thread->pmds;
- ia64_set_dbr(rnum, dbreg.val);
- ia64_srlz_d();
+ /*
+ * always the case for system-wide
+ */
+ if (task == current) {
- thread->dbr[rnum] = dbreg.val;
+ if (ctx->ctx_fl_system == 0) {
- DBprintk(("write dbr%u=0x%lx used_dbrs=0x%lx\n", rnum, dbreg.val, ctx->ctx_used_dbrs[0]));
+ /* allow user level control */
+ ia64_psr(regs)->sp = 0;
+ DPRINT(("clearing psr.sp for [%d]\n", task->pid));
+
+ SET_LAST_CPU(ctx, smp_processor_id());
+ INC_ACTIVATION();
+ SET_ACTIVATION(ctx);
+#ifndef CONFIG_SMP
+ /*
+ * push the other task out, if any
+ */
+ owner_task = GET_PMU_OWNER();
+ if (owner_task) pfm_lazy_save_regs(owner_task);
+#endif
}
- }
+ /*
+ * load all PMD from ctx to PMU (as opposed to thread state)
+ * restore all PMC from ctx to PMU
+ */
+ pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
+ pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
- return 0;
+ ctx->ctx_reload_pmcs[0] = 0UL;
+ ctx->ctx_reload_pmds[0] = 0UL;
-abort_mission:
- /*
- * in case it was our first attempt, we undo the global modifications
- */
- if (first_time) {
- LOCK_PFS();
- if (ctx->ctx_fl_system) {
- pfm_sessions.pfs_sys_use_dbregs--;
+ /*
+ * guaranteed safe by earlier check against DBG_VALID
+ */
+ if (ctx->ctx_fl_using_dbreg) {
+ pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs);
+ pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs);
}
- UNLOCK_PFS();
- ctx->ctx_fl_using_dbreg = 0;
- }
- /*
- * install error return flag
- */
- if (ret != -EFAULT) {
/*
- * XXX: for now we can only come here on EINVAL
+ * set new ownership
*/
- PFM_REG_RETFLAG_SET(tmp.dbreg_flags, PFM_REG_RETFL_EINVAL);
- if (__put_user(tmp.dbreg_flags, &req->dbreg_flags)) ret = -EFAULT;
- }
- return ret;
-}
+ SET_PMU_OWNER(task, ctx);
-static int
-pfm_write_ibrs(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
-{
- /* we don't quite support this right now */
- if (task != current) return -EINVAL;
+ DPRINT(("context loaded on PMU for [%d]\n", task->pid));
+ } else {
+ /*
+ * when not current, task MUST be stopped, so this is safe
+ */
+ regs = ia64_task_regs(task);
- if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
+ /* force a full reload */
+ ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
+ SET_LAST_CPU(ctx, -1);
- return pfm_write_ibr_dbr(0, task, arg, count, regs);
-}
+ /* initial saved psr (stopped) */
+ ctx->ctx_saved_psr = pfm_get_psr() & ~(IA64_PSR_PP|IA64_PSR_UP);
+ ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
-static int
-pfm_write_dbrs(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
-{
- /* we don't quite support this right now */
- if (task != current) return -EINVAL;
+ if (ctx->ctx_fl_unsecure) {
+ ia64_psr(regs)->sp = 0;
+ DPRINT(("context unsecured for [%d]\n", task->pid));
+ }
+ }
+
+ ret = 0;
- if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
+error_unres:
+ if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
+error:
+ /*
+ * release task, there is now a link with the context
+ */
+ if (ctx->ctx_fl_system == 0 && task != current) pfm_put_task(task);
- return pfm_write_ibr_dbr(1, task, arg, count, regs);
+ return ret;
}
-#endif /* PFM_PMU_USES_DBR */
+/*
+ * in this function, we do not need to increase the use count
+ * for the task via get_task_struct(), because we hold the
+ * context lock. If the task were to disappear while having
+ * a context attached, it would go through pfm_exit_thread()
+ * which also grabs the context lock and would therefore be blocked
+ * until we are here.
+ */
+static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
static int
-pfm_get_features(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
{
- pfarg_features_t tmp;
-
- memset(&tmp, 0, sizeof(tmp));
-
- tmp.ft_version = PFM_VERSION;
- tmp.ft_smpl_version = PFM_SMPL_VERSION;
-
- if (__copy_to_user(arg, &tmp, sizeof(tmp))) return -EFAULT;
-
- return 0;
-}
+ struct task_struct *task = ctx->ctx_task;
+ struct pt_regs *tregs;
-static int
-pfm_start(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
-{
- /* we don't quite support this right now */
- if (task != current) return -EINVAL;
+ DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
- /*
- * Cannot do anything before PMU is enabled
+ /*
+ * unload only when necessary
*/
- if (!CTX_IS_ENABLED(ctx)) return -EINVAL;
-
- DBprintk(("[%d] fl_system=%d owner=%p current=%p\n",
- current->pid,
- ctx->ctx_fl_system, PMU_OWNER(),
- current));
+ if (CTX_IS_TERMINATED(ctx) || CTX_IS_UNLOADED(ctx)) {
+ DPRINT(("[%d] ctx_state=%d, nothing to do\n", current->pid, ctx->ctx_state));
+ return 0;
+ }
- if (PMU_OWNER() != task) {
- printk(KERN_DEBUG "perfmon: pfm_start task [%d] not pmu owner\n", task->pid);
- return -EINVAL;
+ /*
+ * In system wide and when the context is loaded, access can only happen
+ * when the caller is running on the CPU being monitored by the session.
+ * It does not have to be the owner (ctx_task) of the context per se.
+ */
+ if (ctx->ctx_fl_system && ctx->ctx_cpu != smp_processor_id()) {
+ DPRINT(("[%d] should be running on CPU%d\n", current->pid, ctx->ctx_cpu));
+ return -EBUSY;
}
- preempt_disable();
- if (ctx->ctx_fl_system) {
-
- PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
+ /*
+ * clear psr and dcr bits
+ */
+ pfm_stop(ctx, NULL, 0, regs);
- /* set user level psr.pp */
- ia64_psr(regs)->pp = 1;
+ CTX_UNLOADED(ctx);
- /* start monitoring at kernel level */
- pfm_set_psr_pp();
+ /*
+ * in system mode, we need to update the PMU directly
+ * and the user level state of the caller, which may not
+ * necessarily be the creator of the context.
+ */
+ if (ctx->ctx_fl_system) {
- /* enable dcr pp */
- ia64_set_dcr(ia64_get_dcr()|IA64_DCR_PP);
+ /*
+ * Update cpuinfo
+ *
+ * local PMU is taken care of in pfm_stop()
+ */
+ PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
+ PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
- ia64_srlz_i();
+ /*
+ * save PMDs in context
+ * release ownership
+ */
+ pfm_flush_pmds(current, ctx);
- } else {
- if ((task->thread.flags & IA64_THREAD_PM_VALID) == 0) {
- preempt_enable();
- printk(KERN_DEBUG "perfmon: pfm_start task flag not set for [%d]\n",
- task->pid);
- return -EINVAL;
- }
- /* set user level psr.up */
- ia64_psr(regs)->up = 1;
+ /*
+ * at this point we are done with the PMU
+ * so we can unreserve the resource.
+ */
+ pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
- /* start monitoring at kernel level */
- pfm_set_psr_up();
+ /*
+ * disconnect context from task
+ */
+ task->thread.pfm_context = NULL;
+ /*
+ * disconnect task from context
+ */
+ ctx->ctx_task = NULL;
- ia64_srlz_i();
+ /*
+ * There is nothing more to cleanup here.
+ */
+ return 0;
}
- preempt_enable();
- return 0;
-}
+ /*
+ * per-task mode
+ */
+ tregs = task == current ? regs : ia64_task_regs(task);
-static int
-pfm_enable(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
-{
- int me;
+ if (task == current || ctx->ctx_fl_unsecure) {
+ /*
+ * cancel user level control
+ */
+ ia64_psr(regs)->sp = 1;
+ DPRINT(("setting psr.sp for [%d]\n", task->pid));
- /* we don't quite support this right now */
- if (task != current) return -EINVAL;
+ }
+ /*
+ * save PMDs to context
+ * release ownership
+ */
+ pfm_flush_pmds(task, ctx);
- me = get_cpu(); /* make sure we're not migrated or preempted */
+ /*
+ * at this point we are done with the PMU
+ * so we can unreserve the resource.
+ */
+ pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
- if (ctx->ctx_fl_system == 0 && PMU_OWNER() && PMU_OWNER() != current)
- pfm_lazy_save_regs(PMU_OWNER());
+ /*
+ * reset activation counter and psr
+ */
+ ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
+ SET_LAST_CPU(ctx, -1);
- /* reset all registers to stable quiet state */
- pfm_reset_pmu(task);
+ /*
+ * PMU state will not be restored
+ */
+ task->thread.flags &= ~IA64_THREAD_PM_VALID;
- /* make sure nothing starts */
- if (ctx->ctx_fl_system) {
- ia64_psr(regs)->pp = 0;
- ia64_psr(regs)->up = 0; /* just to make sure! */
+ /*
+ * break links between context and task
+ */
+ task->thread.pfm_context = NULL;
+ ctx->ctx_task = NULL;
- /* make sure monitoring is stopped */
- pfm_clear_psr_pp();
- ia64_srlz_i();
+ PFM_SET_WORK_PENDING(task, 0);
+ ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
- PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
- if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
- } else {
- /*
- * needed in case the task was a passive task during
- * a system wide session and now wants to have its own
- * session
- */
- ia64_psr(regs)->pp = 0; /* just to make sure! */
- ia64_psr(regs)->up = 0;
+ DPRINT(("disconnected [%d] from context\n", task->pid));
- /* make sure monitoring is stopped */
- pfm_clear_psr_up();
- ia64_srlz_i();
+ return 0;
+}
- DBprintk(("clearing psr.sp for [%d]\n", current->pid));
+static void
+pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
+{
+ struct task_struct *task = ctx->ctx_task;
- /* allow user level control */
- ia64_psr(regs)->sp = 0;
+ ia64_psr(regs)->up = 0;
+ ia64_psr(regs)->sp = 1;
- /* PMU state will be saved/restored on ctxsw */
- task->thread.flags |= IA64_THREAD_PM_VALID;
+ if (GET_PMU_OWNER() == task) {
+ DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid));
+ SET_PMU_OWNER(NULL, NULL);
}
- SET_PMU_OWNER(task);
-
- ctx->ctx_flags.state = PFM_CTX_ENABLED;
- atomic_set(&ctx->ctx_last_cpu, me);
-
- /* simply unfreeze */
- pfm_unfreeze_pmu();
+ /*
+ * disconnect the task from the context and vice-versa
+ */
+ PFM_SET_WORK_PENDING(task, 0);
- put_cpu();
+ task->thread.pfm_context = NULL;
+ task->thread.flags &= ~IA64_THREAD_PM_VALID;
- return 0;
+ DPRINT(("context <%d> force cleanup for [%d] by [%d]\n", ctx->ctx_fd, task->pid, current->pid));
}
-static int
-pfm_get_pmc_reset(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
- struct pt_regs *regs)
+
+/*
+ * called only from exit_thread(): task == current
+ */
+void
+pfm_exit_thread(struct task_struct *task)
{
- pfarg_reg_t tmp, *req = (pfarg_reg_t *)arg;
- unsigned int cnum;
- int i, ret = -EINVAL;
+ pfm_context_t *ctx;
+ unsigned long flags;
+ struct pt_regs *regs = ia64_task_regs(task);
+ int ret;
+ int free_ok = 0;
- for (i = 0; i < count; i++, req++) {
+ ctx = PFM_GET_CTX(task);
- if (__copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT;
+ PROTECT_CTX(ctx, flags);
- cnum = tmp.reg_num;
+ DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid));
- if (!PMC_IS_IMPL(cnum)) goto abort_mission;
+ /*
+ * come here only if attached
+ */
+ if (unlikely(CTX_IS_UNLOADED(ctx))) {
+ printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
+ goto skip_all;
+ }
+
+ if (CTX_IS_LOADED(ctx) || CTX_IS_MASKED(ctx)) {
+
+ ret = pfm_context_unload(ctx, NULL, 0, regs);
+ if (ret) {
+ printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, ctx->ctx_state, ret);
+ }
+ CTX_TERMINATED(ctx);
+ DPRINT(("ctx terminated by [%d]\n", task->pid));
- tmp.reg_value = PMC_DFL_VAL(cnum);
+ pfm_end_notify_user(ctx);
- PFM_REG_RETFLAG_SET(tmp.reg_flags, 0);
+ } else if (CTX_IS_ZOMBIE(ctx)) {
+ pfm_clear_psr_up();
+
+ BUG_ON(ctx->ctx_smpl_hdr);
- DBprintk(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, tmp.reg_value));
+ pfm_force_cleanup(ctx, regs);
- if (__copy_to_user(req, &tmp, sizeof(tmp))) return -EFAULT;
+ free_ok = 1;
}
- return 0;
-abort_mission:
- PFM_REG_RETFLAG_SET(tmp.reg_flags, PFM_REG_RETFL_EINVAL);
- if (__copy_to_user(req, &tmp, sizeof(tmp))) ret = -EFAULT;
+ { u64 psr = pfm_get_psr();
+ BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
+ }
+skip_all:
+ UNPROTECT_CTX(ctx, flags);
- return ret;
+ /*
+ * All memory free operations (especially for vmalloc'ed memory)
+ * MUST be done with interrupts ENABLED.
+ */
+ if (free_ok) pfm_context_free(ctx);
}
/*
* functions MUST be listed in the increasing order of their index (see permfon.h)
*/
+#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
+#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
+#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
+#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
+#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
+
static pfm_cmd_desc_t pfm_cmd_tab[]={
-/* 0 */{ NULL, 0, 0, 0}, /* not used */
-/* 1 */{ pfm_write_pmcs, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, sizeof(pfarg_reg_t)},
-/* 2 */{ pfm_write_pmds, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, sizeof(pfarg_reg_t)},
-/* 3 */{ pfm_read_pmds,PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, sizeof(pfarg_reg_t)},
-/* 4 */{ pfm_stop, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},
-/* 5 */{ pfm_start, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},
-/* 6 */{ pfm_enable, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},
-/* 7 */{ pfm_disable, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},
-/* 8 */{ pfm_context_create, PFM_CMD_PID|PFM_CMD_ARG_RW, 1, sizeof(pfarg_context_t)},
-/* 9 */{ pfm_context_destroy, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},
-/* 10 */{ pfm_restart, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_NOCHK, 0, 0},
-/* 11 */{ pfm_protect_context, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},
-/* 12 */{ pfm_get_features, PFM_CMD_ARG_RW, 0, 0},
-/* 13 */{ pfm_debug, 0, 1, sizeof(unsigned int)},
-/* 14 */{ pfm_context_unprotect, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},
-/* 15 */{ pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, sizeof(pfarg_reg_t)},
-/* 16 */{ NULL, 0, 0, 0}, /* not used */
-/* 17 */{ NULL, 0, 0, 0}, /* not used */
-/* 18 */{ NULL, 0, 0, 0}, /* not used */
-/* 19 */{ NULL, 0, 0, 0}, /* not used */
-/* 20 */{ NULL, 0, 0, 0}, /* not used */
-/* 21 */{ NULL, 0, 0, 0}, /* not used */
-/* 22 */{ NULL, 0, 0, 0}, /* not used */
-/* 23 */{ NULL, 0, 0, 0}, /* not used */
-/* 24 */{ NULL, 0, 0, 0}, /* not used */
-/* 25 */{ NULL, 0, 0, 0}, /* not used */
-/* 26 */{ NULL, 0, 0, 0}, /* not used */
-/* 27 */{ NULL, 0, 0, 0}, /* not used */
-/* 28 */{ NULL, 0, 0, 0}, /* not used */
-/* 29 */{ NULL, 0, 0, 0}, /* not used */
-/* 30 */{ NULL, 0, 0, 0}, /* not used */
-/* 31 */{ NULL, 0, 0, 0}, /* not used */
-#ifdef PFM_PMU_USES_DBR
-/* 32 */{ pfm_write_ibrs, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, sizeof(pfarg_dbreg_t)},
-/* 33 */{ pfm_write_dbrs, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, sizeof(pfarg_dbreg_t)}
-#endif
+/* 0 */PFM_CMD_NONE,
+/* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
+/* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
+/* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
+/* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
+/* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
+/* 6 */PFM_CMD_NONE,
+/* 7 */PFM_CMD_NONE,
+/* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
+/* 9 */PFM_CMD_NONE,
+/* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
+/* 11 */PFM_CMD_NONE,
+/* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
+/* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
+/* 14 */PFM_CMD_NONE,
+/* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
+/* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
+/* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
+/* 18 */PFM_CMD_NONE,
+/* 19 */PFM_CMD_NONE,
+/* 20 */PFM_CMD_NONE,
+/* 21 */PFM_CMD_NONE,
+/* 22 */PFM_CMD_NONE,
+/* 23 */PFM_CMD_NONE,
+/* 24 */PFM_CMD_NONE,
+/* 25 */PFM_CMD_NONE,
+/* 26 */PFM_CMD_NONE,
+/* 27 */PFM_CMD_NONE,
+/* 28 */PFM_CMD_NONE,
+/* 29 */PFM_CMD_NONE,
+/* 30 */PFM_CMD_NONE,
+/* 31 */PFM_CMD_NONE,
+/* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
+/* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
};
-#define PFM_CMD_COUNT ARRAY_SIZE(pfm_cmd_tab)
+#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
static int
-check_task_state(struct task_struct *task)
+pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
{
- int ret = 0;
-#ifdef CONFIG_SMP
- /* We must wait until the state has been completely
- * saved. There can be situations where the reader arrives before
- * after the task is marked as STOPPED but before pfm_save_regs()
- * is completed.
- */
- if (task->state != TASK_ZOMBIE && task->state != TASK_STOPPED) return -EBUSY;
- DBprintk(("before wait_task_inactive [%d] state %ld\n", task->pid, task->state));
- wait_task_inactive(task);
- DBprintk(("after wait_task_inactive [%d] state %ld\n", task->pid, task->state));
-#else
- if (task->state != TASK_ZOMBIE && task->state != TASK_STOPPED) {
- DBprintk(("warning [%d] not in stable state %ld\n", task->pid, task->state));
- ret = -EBUSY;
+ struct task_struct *task;
+
+ task = PFM_CTX_TASK(ctx);
+ if (task == NULL) {
+ DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, ctx->ctx_state));
+ return 0;
}
-#endif
- return ret;
+
+ DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
+ ctx->ctx_fd,
+ ctx->ctx_state,
+ task->pid,
+ task->state, PFM_CMD_STOPPED(cmd)));
+
+ /*
+ * self-monitoring always ok.
+ *
+ * for system-wide the caller can either be the creator of the
+ * context (to one to which the context is attached to) OR
+ * a task running on the same CPU as the session.
+ */
+ if (task == current || ctx->ctx_fl_system) return 0;
+
+ /*
+ * context is UNLOADED, MASKED, TERMINATED we are safe to go
+ */
+ if (CTX_IS_LOADED(ctx) == 0) return 0;
+
+ if (CTX_IS_ZOMBIE(ctx)) return -EINVAL;
+
+ /*
+ * context is loaded, we must make sure the task is stopped
+ * We could lift this restriction for UP but it would mean that
+ * the user has no guarantee the task would not run between
+ * two successive calls to perfmonctl(). That's probably OK.
+ * If this user wants to ensure the task does not run, then
+ * the task must be stopped.
+ */
+ if (PFM_CMD_STOPPED(cmd) && task->state != TASK_STOPPED) {
+ DPRINT(("[%d] task not in stopped state\n", task->pid));
+ return -EBUSY;
+ }
+
+ UNPROTECT_CTX(ctx, flags);
+
+ pfm_wait_task_inactive(task);
+
+ PROTECT_CTX(ctx, flags);
+ return 0;
}
+/*
+ * system-call entry point (must return long)
+ */
asmlinkage long
-sys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6, long arg7,
+sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, long arg7,
long arg8, long stack)
{
struct pt_regs *regs = (struct pt_regs *)&stack;
- struct task_struct *task = current;
- pfm_context_t *ctx;
- size_t sz;
- int ret, narg;
+ struct file *file = NULL;
+ pfm_context_t *ctx = NULL;
+ unsigned long flags = 0UL;
+ void *args_k = NULL;
+ long ret; /* will expand int return types */
+ size_t base_sz, sz, xtra_sz = 0;
+ int narg, completed_args = 0, call_made = 0;
+#define PFM_MAX_ARGSIZE 4096
- /*
+ /*
* reject any call if perfmon was disabled at initialization time
*/
if (PFM_IS_DISABLED()) return -ENOSYS;
- DBprintk(("cmd=%d idx=%d valid=%d narg=0x%x\n", cmd, PFM_CMD_IDX(cmd),
- PFM_CMD_IS_VALID(cmd), PFM_CMD_NARG(cmd)));
+ if (unlikely(PFM_CMD_IS_VALID(cmd) == 0)) {
+ DPRINT(("[%d] invalid cmd=%d\n", current->pid, cmd));
+ return -EINVAL;
+ }
- if (PFM_CMD_IS_VALID(cmd) == 0) return -EINVAL;
+ DPRINT(("cmd=%s idx=%d valid=%d narg=0x%x argsz=%lu count=%d\n",
+ PFM_CMD_NAME(cmd),
+ PFM_CMD_IDX(cmd),
+ PFM_CMD_IS_VALID(cmd),
+ PFM_CMD_NARG(cmd),
+ PFM_CMD_ARG_SIZE(cmd), count));
- /* ingore arguments when command has none */
+ /*
+ * check if number of arguments matches what the command expects
+ */
narg = PFM_CMD_NARG(cmd);
- if ((narg == PFM_CMD_ARG_MANY && count == 0) || (narg > 0 && narg != count)) return -EINVAL;
+ if ((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count))
+ return -EINVAL;
- sz = PFM_CMD_ARG_SIZE(cmd);
+ /* get single argument size */
+ base_sz = PFM_CMD_ARG_SIZE(cmd);
- if (PFM_CMD_READ_ARG(cmd) && !access_ok(VERIFY_READ, arg, sz*count)) return -EFAULT;
+restart_args:
+ sz = xtra_sz + base_sz*count;
+ /*
+ * limit abuse to min page size
+ */
+ if (unlikely(sz > PFM_MAX_ARGSIZE)) {
+ printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz);
+ return -E2BIG;
+ }
- if (PFM_CMD_RW_ARG(cmd) && !access_ok(VERIFY_WRITE, arg, sz*count)) return -EFAULT;
+ /*
+ * allocate default-sized argument buffer
+ */
+ if (count && args_k == NULL) {
+ args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
+ if (args_k == NULL) return -ENOMEM;
+ }
- if (PFM_CMD_USE_PID(cmd)) {
- /*
- * XXX: may need to fine tune this one
- */
- if (pid < 2) return -EPERM;
+ ret = -EFAULT;
- if (pid != current->pid) {
+ /*
+ * copy arguments
+ *
+ * assume sz = 0 for command without parameters
+ */
+ if (sz && copy_from_user(args_k, arg, sz)) {
+ DPRINT(("[%d] cannot copy_from_user %lu bytes @%p\n", current->pid, sz, arg));
+ goto error_args;
+ }
- ret = -ESRCH;
+ /*
+ * check if command supports extra parameters
+ */
+ if (completed_args == 0 && PFM_CMD_GETSIZE(cmd)) {
+ /*
+ * get extra parameters size (based on main argument)
+ */
+ ret = PFM_CMD_GETSIZE(cmd)(args_k, &xtra_sz);
+ if (ret) goto error_args;
- read_lock(&tasklist_lock);
+ completed_args = 1;
- task = find_task_by_pid(pid);
+ DPRINT(("[%d] restart_args sz=%lu xtra_sz=%lu\n", current->pid, sz, xtra_sz));
- if (task) get_task_struct(task);
+ /* retry if necessary */
+ if (xtra_sz) goto restart_args;
+ }
- read_unlock(&tasklist_lock);
+ if (PFM_CMD_USE_FD(cmd)) {
- if (!task) goto abort_call;
+ ret = -EBADF;
- ret = -EPERM;
+ file = fget(fd);
+ if (file == NULL) {
+ DPRINT(("[%d] invalid fd %d\n", current->pid, fd));
+ goto error_args;
+ }
+ if (PFM_IS_FILE(file) == 0) {
+ DPRINT(("[%d] fd %d not related to perfmon\n", current->pid, fd));
+ goto error_args;
+ }
- if (pfm_bad_permissions(task)) goto abort_call;
- if (PFM_CMD_CHK(cmd)) {
- ret = check_task_state(task);
- if (ret != 0) goto abort_call;
- }
+ ctx = (pfm_context_t *)file->private_data;
+ if (ctx == NULL) {
+ DPRINT(("[%d] no context for fd %d\n", current->pid, fd));
+ goto error_args;
}
- }
- ctx = task->thread.pfm_context;
+ PROTECT_CTX(ctx, flags);
- if (PFM_CMD_USE_CTX(cmd)) {
- ret = -EINVAL;
- if (ctx == NULL) {
- DBprintk(("no context for task %d\n", task->pid));
- goto abort_call;
- }
- ret = -EPERM;
- /*
- * we only grant access to the context if:
- * - the caller is the creator of the context (ctx_owner)
- * OR - the context is attached to the caller AND The context IS NOT
- * in protected mode
- */
- if (ctx->ctx_owner != current && (ctx->ctx_fl_protected || task != current)) {
- DBprintk(("context protected, no access for [%d]\n", task->pid));
- goto abort_call;
- }
- }
-
- ret = (*pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_func)(task, ctx, arg, count, regs);
-
-abort_call:
- if (task && task != current) put_task_struct(task);
+ /*
+ * check task is stopped
+ */
+ ret = pfm_check_task_state(ctx, cmd, flags);
+ if (ret) goto abort_locked;
+ }
- return ret;
-}
+ ret = (*pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_func)(ctx, args_k, count, regs);
-/*
- * send SIGPROF to register task, must be invoked when it
- * is safe to send a signal, e.g., not holding any runqueue
- * related locks.
- */
-static int
-pfm_notify_user(pfm_context_t *ctx)
-{
- struct siginfo si;
- int ret;
+ call_made = 1;
- if (ctx->ctx_notify_task == NULL) {
- DBprintk(("[%d] no notifier\n", current->pid));
- return -EINVAL;
+abort_locked:
+ if (ctx) {
+ DPRINT(("[%d] context unlocked\n", current->pid));
+ UNPROTECT_CTX(ctx, flags);
+ fput(file);
}
- si.si_errno = 0;
- si.si_addr = NULL;
- si.si_pid = current->pid; /* who is sending */
- si.si_signo = SIGPROF;
- si.si_code = PROF_OVFL;
+ /* copy argument back to user, if needed */
+ if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
- si.si_pfm_ovfl[0] = ctx->ctx_ovfl_regs[0];
+error_args:
+ if (args_k) kfree(args_k);
- /*
- * when the target of the signal is not ourself, we have to be more
- * careful. The notify_task may being cleared by the target task itself
- * in release_thread(). We must ensure mutual exclusion here such that
- * the signal is delivered (even to a dying task) safely.
- */
+ return ret;
+}
- if (ctx->ctx_notify_task != current) {
- /*
- * grab the notification lock for this task
- * This guarantees that the sequence: test + send_signal
- * is atomic with regards to the ctx_notify_task field.
- *
- * We need a spinlock and not just an atomic variable for this.
- *
- */
- spin_lock(&ctx->ctx_lock);
+static void
+pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
+{
+ pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
+ pfm_ovfl_ctrl_t rst_ctrl;
+ int ret = 0;
- /*
- * now notify_task cannot be modified until we're done
- * if NULL, they it got modified while we were in the handler
- */
- if (ctx->ctx_notify_task == NULL) {
+ /*
+ * Unlock sampling buffer and reset index atomically
+ * XXX: not really needed when blocking
+ */
+ if (CTX_HAS_SMPL(ctx)) {
- spin_unlock(&ctx->ctx_lock);
+ rst_ctrl.stop_monitoring = 1;
+ rst_ctrl.reset_pmds = PFM_PMD_NO_RESET;
- /*
- * If we've lost the notified task, then we will run
- * to completion wbut keep the PMU frozen. Results
- * will be incorrect anyway. We do not kill task
- * to leave it possible to attach perfmon context
- * to already running task.
- */
- printk("perfmon: pfm_notify_user() lost notify_task\n");
- DBprintk_ovfl(("notification task has disappeared !\n"));
+ /* XXX: check return value */
+ if (fmt->fmt_restart)
+ ret = (*fmt->fmt_restart)(current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
+ } else {
+ rst_ctrl.stop_monitoring = 0;
+ rst_ctrl.reset_pmds = PFM_PMD_LONG_RESET;
+ }
- /* we cannot afford to block now */
- ctx->ctx_fl_block = 0;
+ if (ret == 0) {
+ if (rst_ctrl.reset_pmds != PFM_PMD_NO_RESET)
+ pfm_reset_regs(ctx, &ovfl_regs, rst_ctrl.reset_pmds);
- return -EINVAL;
+ if (rst_ctrl.stop_monitoring == 0) {
+ DPRINT(("resuming monitoring\n"));
+ if (CTX_IS_MASKED(ctx)) pfm_restore_monitoring(current);
+ } else {
+ DPRINT(("stopping monitoring\n"));
+ //pfm_stop_monitoring(current, regs);
}
+ CTX_LOADED(ctx);
+ }
+}
- /*
- * required by send_sig_info() to make sure the target
- * task does not disappear on us.
- */
- read_lock(&tasklist_lock);
+
+/*
+ * context MUST BE LOCKED when calling
+ * can only be called for current
+ */
+static void
+pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
+{
+ if (ctx->ctx_fl_system) {
+ printk(KERN_ERR "perfmon: pfm_context_force_terminate [%d] is system-wide\n", current->pid);
+ return;
}
/*
- * in this case, we don't stop the task, we let it go on. It will
- * necessarily go to the signal handler (if any) when it goes back to
- * user mode.
- */
- DBprintk_ovfl(("[%d] sending notification to [%d]\n",
- current->pid, ctx->ctx_notify_task->pid));
+ * we stop the whole thing, we do no need to flush
+ * we know we WERE masked
+ */
+ pfm_clear_psr_up();
+ ia64_psr(regs)->up = 0;
+ ia64_psr(regs)->sp = 1;
- /*
- * this call is safe in an interrupt handler, so does read_lock() on tasklist_lock
+ /*
+ * disconnect the task from the context and vice-versa
*/
- ret = send_sig_info(SIGPROF, &si, ctx->ctx_notify_task);
- if (ret) {
- printk("perfmon: send_sig_info(process %d, SIGPROF)=%d\n",
- ctx->ctx_notify_task->pid, ret);
- }
+ current->thread.pfm_context = NULL;
+ current->thread.flags &= ~IA64_THREAD_PM_VALID;
+ ctx->ctx_task = NULL;
+
+ /*
+ * switch to terminated state
+ */
+ CTX_TERMINATED(ctx);
+
+ DPRINT(("context <%d> terminated for [%d]\n", ctx->ctx_fd, current->pid));
+
+ /*
+ * and wakeup controlling task, indicating we are now disconnected
+ */
+ wake_up_interruptible(&ctx->ctx_zombieq);
/*
- * now undo the protections in order
+ * given that context is still locked, the controlling
+ * task will only get access when we return from
+ * pfm_handle_work().
*/
- if (ctx->ctx_notify_task != current) {
- read_unlock(&tasklist_lock);
- spin_unlock(&ctx->ctx_lock);
- }
- return ret;
}
+static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
+
void
-pfm_ovfl_block_reset(void)
+pfm_handle_work(void)
{
- struct thread_struct *th = ¤t->thread;
- pfm_context_t *ctx = current->thread.pfm_context;
+ pfm_context_t *ctx;
+ struct pt_regs *regs;
+ unsigned long flags;
+ unsigned long ovfl_regs;
unsigned int reason;
int ret;
- /*
- * clear the flag, to make sure we won't get here
- * again
- */
- th->pfm_ovfl_block_reset = 0;
- clear_thread_flag(TIF_NOTIFY_RESUME);
-
- /*
- * do some sanity checks first
- */
- if (!ctx) {
+ ctx = PFM_GET_CTX(current);
+ if (ctx == NULL) {
printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid);
return;
}
+
+ PROTECT_CTX(ctx, flags);
+
+ PFM_SET_WORK_PENDING(current, 0);
+
+ pfm_clear_task_notify();
+
+ regs = ia64_task_regs(current);
+
/*
* extract reason for being here and clear
*/
reason = ctx->ctx_fl_trap_reason;
ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
- DBprintk(("[%d] reason=%d\n", current->pid, reason));
+ DPRINT(("[%d] reason=%d\n", current->pid, reason));
/*
- * just here for a reset (non-blocking context only)
+ * must be done before we check non-blocking mode
*/
- if (reason == PFM_TRAP_REASON_RESET) goto non_blocking;
+ if (ctx->ctx_fl_going_zombie || CTX_IS_ZOMBIE(ctx)) goto do_zombie;
- /*
- * first notify user. This can fail if notify_task has disappeared.
- */
- if (reason == PFM_TRAP_REASON_SIG || reason == PFM_TRAP_REASON_BLOCKSIG) {
- ret = pfm_notify_user(ctx);
- if (ret) return;
- }
+ ovfl_regs = ctx->ctx_ovfl_regs[0];
- /*
- * came here just to signal (non-blocking)
- */
- if (reason == PFM_TRAP_REASON_SIG) return;
+ //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
+ if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
+
+ UNPROTECT_CTX(ctx, flags);
- DBprintk(("[%d] before sleeping\n", current->pid));
+ DPRINT(("before block sleeping\n"));
/*
* may go through without blocking on SMP systems
*/
ret = down_interruptible(&ctx->ctx_restart_sem);
- DBprintk(("[%d] after sleeping ret=%d\n", current->pid, ret));
+ DPRINT(("after block sleeping ret=%d\n", ret));
+ PROTECT_CTX(ctx, flags);
+
+ if (ctx->ctx_fl_going_zombie) {
+do_zombie:
+ DPRINT(("context is zombie, bailing out\n"));
+ pfm_context_force_terminate(ctx, regs);
+ goto nothing_to_do;
+ }
/*
* in case of interruption of down() we don't restart anything
*/
- if (ret >= 0) {
-
-non_blocking:
- /* we reactivate on context switch */
- ctx->ctx_fl_frozen = 0;
- /*
- * the ovfl_sem is cleared by the restart task and this is safe because we always
- * use the local reference
- */
-
- pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
-
- ctx->ctx_ovfl_regs[0] = 0UL;
+ if (ret < 0) goto nothing_to_do;
- /*
- * Unlock sampling buffer and reset index atomically
- * XXX: not really needed when blocking
- */
- if (CTX_HAS_SMPL(ctx)) {
- ctx->ctx_psb->psb_hdr->hdr_count = 0;
- ctx->ctx_psb->psb_index = 0;
- }
+skip_blocking:
+ pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
+ ctx->ctx_ovfl_regs[0] = 0UL;
- pfm_unfreeze_pmu();
+nothing_to_do:
- /* state restored, can go back to work (user mode) */
- }
+ UNPROTECT_CTX(ctx, flags);
}
-/*
- * This function will record an entry in the sampling if it is not full already.
- * Return:
- * 0 : buffer is not full (did not BECOME full: still space or was already full)
- * 1 : buffer is full (recorded the last entry)
- */
static int
-pfm_record_sample(struct task_struct *task, pfm_context_t *ctx, unsigned long ovfl_mask, struct pt_regs *regs)
+pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
{
- pfm_smpl_buffer_desc_t *psb = ctx->ctx_psb;
- unsigned long *e, m, idx;
- perfmon_smpl_entry_t *h;
- int j;
+ if (CTX_IS_ZOMBIE(ctx)) {
+ DPRINT(("ignoring overflow notification, owner is zombie\n"));
+ return 0;
+ }
+ DPRINT(("[%d] waking up somebody\n", current->pid));
- idx = ia64_fetch_and_add(1, &psb->psb_index);
- DBprintk_ovfl(("recording index=%ld entries=%ld\n", idx-1, psb->psb_entries));
+ if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
/*
- * XXX: there is a small chance that we could run out on index before resetting
- * but index is unsigned long, so it will take some time.....
- * We use > instead of == because fetch_and_add() is off by one (see below)
- *
- * This case can happen in non-blocking mode or with multiple processes.
- * For non-blocking, we need to reload and continue.
- */
- if (idx > psb->psb_entries) return 0;
+ * safe, we are not in intr handler, nor in ctxsw when
+ * we come here
+ */
+ kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
+
+ return 0;
+}
- /* first entry is really entry 0, not 1 caused by fetch_and_add */
- idx--;
+static int
+pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
+{
+ pfm_msg_t *msg = NULL;
- h = (perfmon_smpl_entry_t *)(((char *)psb->psb_addr) + idx*(psb->psb_entry_size));
+ if (ctx->ctx_fl_no_msg == 0) {
+ msg = pfm_get_new_msg(ctx);
+ if (msg == NULL) {
+ printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
+ return -1;
+ }
- /*
- * initialize entry header
- */
- h->pid = current->pid;
- h->cpu = get_cpu();
- h->last_reset_value = ovfl_mask ? ctx->ctx_soft_pmds[ffz(~ovfl_mask)].lval : 0UL;
- h->ip = regs ? regs->cr_iip | ((regs->cr_ipsr >> 41) & 0x3): 0x0UL;
- h->regs = ovfl_mask; /* which registers overflowed */
+ msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
+ msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
+ msg->pfm_ovfl_msg.msg_tstamp = ia64_get_itc(); /* relevant on UP only */
+ msg->pfm_ovfl_msg.msg_active_set = 0;
+ msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
+ msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
- /* guaranteed to monotonically increase on each cpu */
- h->stamp = pfm_get_stamp();
+ }
- /* position for first pmd */
- e = (unsigned long *)(h+1);
+ DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d pid=%d ovfl_pmds=0x%lx\n",
+ msg,
+ ctx->ctx_fl_no_msg,
+ ctx->ctx_fd,
+ current->pid,
+ ovfl_pmds));
- /*
- * selectively store PMDs in increasing index number
- */
- m = ctx->ctx_smpl_regs[0];
- for (j=0; m; m >>=1, j++) {
+ return pfm_notify_user(ctx, msg);
+}
- if ((m & 0x1) == 0) continue;
+static int
+pfm_end_notify_user(pfm_context_t *ctx)
+{
+ pfm_msg_t *msg;
- if (PMD_IS_COUNTING(j)) {
- *e = pfm_read_soft_counter(ctx, j);
- } else {
- *e = ia64_get_pmd(j); /* slow */
- }
- DBprintk_ovfl(("e=%p pmd%d =0x%lx\n", (void *)e, j, *e));
- e++;
+ msg = pfm_get_new_msg(ctx);
+ if (msg == NULL) {
+ printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
+ return -1;
}
- pfm_stats[h->cpu].pfm_recorded_samples_count++;
- /*
- * make the new entry visible to user, needs to be atomic
- */
- ia64_fetch_and_add(1, &psb->psb_hdr->hdr_count);
+ msg->pfm_end_msg.msg_type = PFM_MSG_END;
+ msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
+ msg->pfm_ovfl_msg.msg_tstamp = ia64_get_itc(); /* relevant on UP only */
- DBprintk_ovfl(("index=%ld entries=%ld hdr_count=%ld\n",
- idx, psb->psb_entries, psb->psb_hdr->hdr_count));
- /*
- * sampling buffer full ?
- */
- if (idx == (psb->psb_entries-1)) {
- DBprintk_ovfl(("sampling buffer full\n"));
- /*
- * XXX: must reset buffer in blocking mode and lost notified
- */
- pfm_stats[h->cpu].pfm_full_smpl_buffer_count++;
- put_cpu();
- return 1;
- }
- put_cpu();
- return 0;
+ DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d pid=%d\n",
+ msg,
+ ctx->ctx_fl_no_msg,
+ ctx->ctx_fd, current->pid));
+
+ return pfm_notify_user(ctx, msg);
}
/*
* main overflow processing routine.
- * it can be called from the interrupt path or explicitly during the context switch code
- * Arguments:
- * mode: 0=coming from PMU interrupt, 1=coming from ctxsw
- *
- * Return:
- * new value of pmc[0]. if 0x0 then unfreeze, else keep frozen
+ * it can be called from the interrupt path or explicitely during the context switch code
*/
-static unsigned long
-pfm_overflow_handler(int mode, struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
+static void
+pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
{
- struct thread_struct *t;
+ pfm_ovfl_arg_t ovfl_arg;
unsigned long mask;
unsigned long old_val;
- unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL;
- int i;
- int ret = 1;
- /*
- * It is never safe to access the task for which the overflow interrupt is destinated
- * using the current variable as the interrupt may occur in the middle of a context switch
- * where current does not hold the task that is running yet.
- *
- * For monitoring, however, we do need to get access to the task which caused the overflow
- * to account for overflow on the counters.
- *
- * We accomplish this by maintaining a current owner of the PMU per CPU. During context
- * switch the ownership is changed in a way such that the reflected owner is always the
- * valid one, i.e. the one that caused the interrupt.
- */
+ unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL;
+ pfm_ovfl_ctrl_t ovfl_ctrl;
+ unsigned int i, j, has_smpl, first_pmd = ~0U;
+ int must_notify = 0;
- preempt_disable();
+ if (unlikely(CTX_IS_ZOMBIE(ctx))) goto stop_monitoring;
- t = &task->thread;
-
- /*
- * XXX: debug test
- * Don't think this could happen given upfront tests
- */
- if ((t->flags & IA64_THREAD_PM_VALID) == 0 && ctx->ctx_fl_system == 0) {
- printk(KERN_DEBUG "perfmon: Spurious overflow interrupt: process %d not "
- "using perfmon\n", task->pid);
- preempt_enable_no_resched();
- return 0x1;
- }
/*
* sanity test. Should never happen
*/
- if ((pmc0 & 0x1) == 0) {
- printk(KERN_DEBUG "perfmon: pid %d pmc0=0x%lx assumption error for freeze bit\n",
- task->pid, pmc0);
- preempt_enable_no_resched();
- return 0x0;
- }
+ if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
mask = pmc0 >> PMU_FIRST_COUNTER;
- DBprintk_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s"
- " mode used_pmds=0x%lx used_pmcs=0x%lx reload_pmcs=0x%lx\n",
- pmc0, task->pid, (regs ? regs->cr_iip : 0),
+ DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s"
+ "used_pmds=0x%lx reload_pmcs=0x%lx\n",
+ pmc0,
+ task ? task->pid: -1,
+ (regs ? regs->cr_iip : 0),
CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
ctx->ctx_used_pmds[0],
- ctx->ctx_used_pmcs[0],
ctx->ctx_reload_pmcs[0]));
+ has_smpl = CTX_HAS_SMPL(ctx);
+
/*
- * First we update the virtual counters
+ * first we update the virtual counters
+ * assume there was a prior ia64_srlz_d() issued
*/
for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
/* skip pmd which did not overflow */
if ((mask & 0x1) == 0) continue;
- DBprintk_ovfl(("pmd[%d] overflowed hw_pmd=0x%lx soft_pmd=0x%lx\n",
- i, ia64_get_pmd(i), ctx->ctx_soft_pmds[i].val));
+ DPRINT_ovfl(("pmd[%d] overflowed hw_pmd=0x%lx ctx_pmd=0x%lx\n",
+ i, ia64_get_pmd(i), ctx->ctx_pmds[i].val));
/*
* Note that the pmd is not necessarily 0 at this point as qualified events
* taken into consideration here but will be with any read of the pmd via
* pfm_read_pmds().
*/
- old_val = ctx->ctx_soft_pmds[i].val;
- ctx->ctx_soft_pmds[i].val += 1 + pmu_conf.ovfl_val;
+ old_val = ctx->ctx_pmds[i].val;
+ ctx->ctx_pmds[i].val += 1 + pmu_conf.ovfl_val;
/*
* check for overflow condition
*/
- if (old_val > ctx->ctx_soft_pmds[i].val) {
+ if (likely(old_val > ctx->ctx_pmds[i].val)) {
ovfl_pmds |= 1UL << i;
- if (PMC_OVFL_NOTIFY(ctx, i)) {
- ovfl_notify |= 1UL << i;
+ /*
+ * keep track of pmds of interest for samples
+ */
+ if (has_smpl) {
+ if (first_pmd == ~0U) first_pmd = i;
+ smpl_pmds |= ctx->ctx_pmds[i].smpl_pmds[0];
}
+
+ if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
}
- DBprintk_ovfl(("soft_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
- i, ctx->ctx_soft_pmds[i].val, old_val,
- ia64_get_pmd(i) & pmu_conf.ovfl_val, ovfl_pmds, ovfl_notify));
+
+ DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx first_pmd=%u smpl_pmds=0x%lx\n",
+ i, ctx->ctx_pmds[i].val, old_val,
+ ia64_get_pmd(i) & pmu_conf.ovfl_val, ovfl_pmds, ovfl_notify, first_pmd, smpl_pmds));
}
+ ovfl_ctrl.notify_user = ovfl_notify ? 1 : 0;
+ ovfl_ctrl.reset_pmds = ovfl_pmds && ovfl_notify == 0UL ? 1 : 0;
+ ovfl_ctrl.block = ovfl_notify ? 1 : 0;
+ ovfl_ctrl.stop_monitoring = ovfl_notify ? 1 : 0;
+
/*
- * check for sampling buffer
- *
- * if present, record sample only when a 64-bit counter has overflowed.
- * We propagate notification ONLY when buffer becomes full.
+ * when a overflow is detected, check for sampling buffer, if present, invoke
+ * record() callback.
*/
- if(CTX_HAS_SMPL(ctx) && ovfl_pmds) {
- ret = pfm_record_sample(task, ctx, ovfl_pmds, regs);
- if (ret == 1) {
- /*
- * Sampling buffer became full
- * If no notication was requested, then we reset buffer index
- * and reset registers (done below) and resume.
- * If notification requested, then defer reset until pfm_restart()
- */
- if (ovfl_notify == 0UL) {
- ctx->ctx_psb->psb_hdr->hdr_count = 0UL;
- ctx->ctx_psb->psb_index = 0UL;
+ if (ovfl_pmds && has_smpl) {
+ unsigned long start_cycles;
+ int this_cpu = smp_processor_id();
+
+ ovfl_arg.ovfl_pmds[0] = ovfl_pmds;
+ ovfl_arg.ovfl_notify[0] = ovfl_notify;
+ ovfl_arg.ovfl_ctrl = ovfl_ctrl;
+ ovfl_arg.smpl_pmds[0] = smpl_pmds;
+
+ prefetch(ctx->ctx_smpl_hdr);
+
+ ovfl_arg.pmd_value = ctx->ctx_pmds[first_pmd].val;
+ ovfl_arg.pmd_last_reset = ctx->ctx_pmds[first_pmd].lval;
+ ovfl_arg.pmd_eventid = ctx->ctx_pmds[first_pmd].eventid;
+
+ /*
+ * copy values of pmds of interest. Sampling format may copy them
+ * into sampling buffer.
+ */
+ if (smpl_pmds) {
+ for(i=0, j=0; smpl_pmds; i++, smpl_pmds >>=1) {
+ if ((smpl_pmds & 0x1) == 0) continue;
+ ovfl_arg.smpl_pmds_values[j++] = PMD_IS_COUNTING(i) ? pfm_read_soft_counter(ctx, i) : ia64_get_pmd(i);
}
- } else {
- /*
- * sample recorded in buffer, no need to notify user
- */
- ovfl_notify = 0UL;
}
- }
- /*
- * No overflow requiring a user level notification
- */
- if (ovfl_notify == 0UL) {
- if (ovfl_pmds)
- pfm_reset_regs(ctx, &ovfl_pmds, PFM_PMD_SHORT_RESET);
- preempt_enable_no_resched();
- return 0x0UL;
- }
+ pfm_stats[this_cpu].pfm_smpl_handler_calls++;
+ start_cycles = ia64_get_itc();
- /*
- * keep track of what to reset when unblocking
- */
- ctx->ctx_ovfl_regs[0] = ovfl_pmds;
+ /*
+ * call custom buffer format record (handler) routine
+ */
+ (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, &ovfl_arg, regs);
- DBprintk_ovfl(("block=%d notify [%d] current [%d]\n",
- ctx->ctx_fl_block,
- ctx->ctx_notify_task ? ctx->ctx_notify_task->pid: -1,
- current->pid ));
+ pfm_stats[this_cpu].pfm_smpl_handler_cycles += ia64_get_itc() - start_cycles;
- /*
- * ctx_notify_task could already be NULL, checked in pfm_notify_user()
- */
- if (CTX_OVFL_NOBLOCK(ctx) == 0 && ctx->ctx_notify_task != task) {
- ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCKSIG;
- } else {
- ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_SIG;
+ ovfl_pmds = ovfl_arg.ovfl_pmds[0];
+ ovfl_notify = ovfl_arg.ovfl_notify[0];
+ ovfl_ctrl = ovfl_arg.ovfl_ctrl;
+ }
+
+ if (ovfl_pmds && ovfl_ctrl.reset_pmds) {
+ pfm_reset_regs(ctx, &ovfl_pmds, ovfl_ctrl.reset_pmds);
}
- /*
- * we cannot block in system wide mode and we do not go
- * through the PMU ctxsw code. Therefore we can generate
- * the notification here. In system wide mode, the current
- * task maybe different from the task controlling the session
- * on this CPU, therefore owner can be different from current.
- *
- * In per-process mode, this function gets called from
- * the interrupt handler or pfm_load_regs(). The mode argument
- * tells where we are coming from. When coming from the interrupt
- * handler, it is safe to notify (send signal) right here because
- * we do not hold any runqueue locks needed by send_sig_info().
- *
- * However when coming from ctxsw, we cannot send the signal here.
- * It must be deferred until we are sure we do not hold any runqueue
- * related locks. The current task maybe different from the owner
- * only in UP mode. The deferral is implemented using the
- * TIF_NOTIFY_RESUME mechanism. In this case, the pending work
- * is checked when the task is about to leave the kernel (see
- * entry.S). As of this version of perfmon, a kernel only
- * task cannot be monitored in per-process mode. Therefore,
- * when this function gets called from pfm_load_regs(), we know
- * we have a user level task which will eventually either exit
- * or leave the kernel, and thereby go through the checkpoint
- * for TIF_*.
- */
- if (ctx->ctx_fl_system || mode == 0) {
- pfm_notify_user(ctx);
- ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
- } else {
- struct thread_info *info;
+ if (ovfl_notify && ovfl_ctrl.notify_user) {
/*
- * given that TIF_NOTIFY_RESUME is not specific to
- * perfmon, we need to have a second level check to
- * verify the source of the notification.
+ * keep track of what to reset when unblocking
*/
- task->thread.pfm_ovfl_block_reset = 1;
+ ctx->ctx_ovfl_regs[0] = ovfl_pmds;
+
+ if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.block) {
+
+ ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
+
+ /*
+ * set the perfmon specific checking pending work
+ */
+ PFM_SET_WORK_PENDING(task, 1);
+
+ /*
+ * when coming from ctxsw, current still points to the
+ * previous task, therefore we must work with task and not current.
+ */
+ pfm_set_task_notify(task);
+ }
/*
- * when coming from ctxsw, current still points to the
- * previous task, therefore we must work with task and not current.
+ * defer until state is changed (shorten spin window). the context is locked
+ * anyway, so the signal receiver would come spin for nothing.
*/
- info = ((struct thread_info *) ((char *) task + IA64_TASK_SIZE));
- set_bit(TIF_NOTIFY_RESUME, &info->flags);
+ must_notify = 1;
}
+ DPRINT_ovfl(("current [%d] owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx stopped=%d\n",
+ current->pid,
+ GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
+ PFM_GET_WORK_PENDING(task),
+ ctx->ctx_fl_trap_reason,
+ ovfl_pmds,
+ ovfl_notify,
+ ovfl_ctrl.stop_monitoring ? 1 : 0));
+ /*
+ * in case monitoring must be stopped, we toggle the psr bits
+ */
+ if (ovfl_ctrl.stop_monitoring) {
+ pfm_mask_monitoring(task);
+ CTX_MASKED(ctx);
+ }
/*
- * keep the PMU frozen until either pfm_restart() or
- * task completes (non-blocking or notify_task gone).
+ * send notification now
*/
- ctx->ctx_fl_frozen = 1;
+ if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
- DBprintk_ovfl(("current [%d] owner [%d] mode=%d return pmc0=0x%x must_block=%ld reason=%d\n",
- current->pid,
- PMU_OWNER() ? PMU_OWNER()->pid : -1,
- mode,
- ctx->ctx_fl_frozen ? 0x1 : 0x0,
- t->pfm_ovfl_block_reset,
- ctx->ctx_fl_trap_reason));
+ return;
+
+
+sanity_check:
+ printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
+ smp_processor_id(),
+ task ? task->pid : -1,
+ pmc0);
+ return;
- preempt_enable_no_resched();
- return 0x1UL;
+stop_monitoring:
+ /*
+ * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
+ * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
+ * come here as zombie only if the task is the current task. In which case, we
+ * can access the PMU hardware directly.
+ *
+ * Note that zombies do have PM_VALID set. So here we do the minimal.
+ *
+ * In case the context was zombified it could not be reclaimed at the time
+ * the monitoring program exited. At this point, the PMU reservation has been
+ * returned, the sampiing buffer has been freed. We must convert this call
+ * into a spurious interrupt. However, we must also avoid infinite overflows
+ * by stopping monitoring for this task. We can only come here for a per-task
+ * context. All we need to do is to stop monitoring using the psr bits which
+ * are always task private. By re-enabling secure montioring, we ensure that
+ * the monitored task will not be able to re-activate monitoring.
+ * The task will eventually be context switched out, at which point the context
+ * will be reclaimed (that includes releasing ownership of the PMU).
+ *
+ * So there might be a window of time where the number of per-task session is zero
+ * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
+ * context. This is safe because if a per-task session comes in, it will push this one
+ * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
+ * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
+ * also push our zombie context out.
+ *
+ * Overall pretty hairy stuff....
+ */
+ DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1));
+ pfm_clear_psr_up();
+ ia64_psr(regs)->up = 0;
+ ia64_psr(regs)->sp = 1;
+ return;
}
-static irqreturn_t
-pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
+static int
+pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
{
- u64 pmc0;
struct task_struct *task;
pfm_context_t *ctx;
+ unsigned long flags;
+ u64 pmc0;
+ int this_cpu = smp_processor_id();
+ int retval = 0;
- pfm_stats[get_cpu()].pfm_ovfl_intr_count++;
+ pfm_stats[this_cpu].pfm_ovfl_intr_count++;
/*
- * if an alternate handler is registered, just bypass the default one
- */
- if (pfm_alternate_intr_handler) {
- (*pfm_alternate_intr_handler->handler)(irq, arg, regs);
- put_cpu();
- return IRQ_HANDLED;
- }
-
- /*
* srlz.d done before arriving here
- *
- * This is slow
*/
- pmc0 = ia64_get_pmc(0);
+ pmc0 = ia64_get_pmc(0);
+
+ task = GET_PMU_OWNER();
+ ctx = GET_PMU_CTX();
/*
* if we have some pending bits set
- * assumes : if any PM[0].bit[63-1] is set, then PMC[0].fr = 1
+ * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
*/
- if ((pmc0 & ~0x1UL)!=0UL && (task=PMU_OWNER())!= NULL) {
- /*
+ if (PMC0_HAS_OVFL(pmc0) && task) {
+ /*
* we assume that pmc0.fr is always set here
*/
- ctx = task->thread.pfm_context;
/* sanity check */
- if (!ctx) {
- printk(KERN_DEBUG "perfmon: Spurious overflow interrupt: process %d has "
- "no PFM context\n", task->pid);
- put_cpu();
- return IRQ_HANDLED;
- }
+ if (!ctx) goto report_spurious;
- /*
- * assume PMC[0].fr = 1 at this point
- */
- pmc0 = pfm_overflow_handler(0, task, ctx, pmc0, regs);
- /*
- * we can only update pmc0 when the overflow
- * is for the current context or we are in system
- * wide mode. In UP (per-task) the current
- * task may not be the one owning the PMU,
- * same thing for system-wide.
- */
- if (task == current || ctx->ctx_fl_system) {
- /*
- * We always clear the overflow status bits and either unfreeze
- * or keep the PMU frozen.
- */
- ia64_set_pmc(0, pmc0);
- ia64_srlz_d();
- } else {
- task->thread.pmc[0] = pmc0;
+ if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0) {
+ printk("perfmon: current [%d] owner = [%d] PMVALID=0 state=%d\n", current->pid, task->pid, ctx->ctx_state);
+ goto report_spurious;
}
+
+ PROTECT_CTX_NOPRINT(ctx, flags);
+
+ pfm_overflow_handler(task, ctx, pmc0, regs);
+
+ UNPROTECT_CTX_NOPRINT(ctx, flags);
+
} else {
- pfm_stats[smp_processor_id()].pfm_spurious_ovfl_intr_count++;
+ pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
+ retval = -1;
+ }
+ /*
+ * keep it unfrozen at all times
+ */
+ pfm_unfreeze_pmu();
+
+ return retval;
+
+report_spurious:
+ printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
+ this_cpu, task->pid);
+ pfm_unfreeze_pmu();
+ return -1;
+}
+
+static pfm_irq_handler_t
+pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
+{
+ unsigned long m;
+ unsigned long min, max;
+ int this_cpu;
+ int ret;
+
+ this_cpu = smp_processor_id();
+ min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
+ max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
+
+ m = ia64_get_itc();
+
+ ret = pfm_do_interrupt_handler(irq, arg, regs);
+
+ m = ia64_get_itc() - m;
+
+ /*
+ * don't measure spurious interrupts
+ */
+ if (ret == 0) {
+ if (m < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = m;
+ if (m > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = m;
+ pfm_stats[this_cpu].pfm_ovfl_intr_cycles += m;
}
- put_cpu_no_resched();
- return IRQ_HANDLED;
+ PFM_IRQ_HANDLER_RET();
}
+
/* for debug only */
static int
pfm_proc_info(char *page)
{
char *p = page;
+ pfm_buffer_fmt_t *b;
+ unsigned long psr;
int i;
- p += sprintf(p, "fastctxsw : %s\n", pfm_sysctl.fastctxsw > 0 ? "Yes": "No");
- p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.ovfl_val);
+ p += sprintf(p, "model : %s\n", pmu_conf.pmu_name);
+ p += sprintf(p, "fastctxsw : %s\n", pfm_sysctl.fastctxsw > 0 ? "Yes": "No");
+ p += sprintf(p, "ovfl_mask : 0x%lx\n", pmu_conf.ovfl_val);
for(i=0; i < NR_CPUS; i++) {
- if (cpu_online(i) == 0) continue;
- p += sprintf(p, "CPU%-2d overflow intrs : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_count);
- p += sprintf(p, "CPU%-2d spurious intrs : %lu\n", i, pfm_stats[i].pfm_spurious_ovfl_intr_count);
- p += sprintf(p, "CPU%-2d recorded samples : %lu\n", i, pfm_stats[i].pfm_recorded_samples_count);
- p += sprintf(p, "CPU%-2d smpl buffer full : %lu\n", i, pfm_stats[i].pfm_full_smpl_buffer_count);
- p += sprintf(p, "CPU%-2d syst_wide : %d\n", i, per_cpu(pfm_syst_info, i) & PFM_CPUINFO_SYST_WIDE ? 1 : 0);
- p += sprintf(p, "CPU%-2d dcr_pp : %d\n", i, per_cpu(pfm_syst_info, i) & PFM_CPUINFO_DCR_PP ? 1 : 0);
- p += sprintf(p, "CPU%-2d exclude idle : %d\n", i, per_cpu(pfm_syst_info, i) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0);
- p += sprintf(p, "CPU%-2d owner : %d\n", i, pmu_owners[i].owner ? pmu_owners[i].owner->pid: -1);
+ if (cpu_is_online(i) == 0) continue;
+ p += sprintf(p, "CPU%-2d overflow intrs : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_count);
+ p += sprintf(p, "CPU%-2d overflow cycles : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_cycles);
+ p += sprintf(p, "CPU%-2d overflow min : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_cycles_min);
+ p += sprintf(p, "CPU%-2d overflow max : %lu\n", i, pfm_stats[i].pfm_ovfl_intr_cycles_max);
+ p += sprintf(p, "CPU%-2d smpl handler calls : %lu\n", i, pfm_stats[i].pfm_smpl_handler_calls);
+ p += sprintf(p, "CPU%-2d smpl handler cycles : %lu\n", i, pfm_stats[i].pfm_smpl_handler_cycles);
+ p += sprintf(p, "CPU%-2d spurious intrs : %lu\n", i, pfm_stats[i].pfm_spurious_ovfl_intr_count);
+ p += sprintf(p, "CPU%-2d sysupdt count : %lu\n", i, pfm_stats[i].pfm_sysupdt_count);
+ p += sprintf(p, "CPU%-2d sysupdt cycles : %lu\n", i, pfm_stats[i].pfm_sysupdt_cycles);
+ p += sprintf(p, "CPU%-2d syst_wide : %d\n" , i, pfm_get_cpu_data(pfm_syst_info, i) & PFM_CPUINFO_SYST_WIDE ? 1 : 0);
+ p += sprintf(p, "CPU%-2d dcr_pp : %d\n" , i, pfm_get_cpu_data(pfm_syst_info, i) & PFM_CPUINFO_DCR_PP ? 1 : 0);
+ p += sprintf(p, "CPU%-2d exclude idle : %d\n" , i, pfm_get_cpu_data(pfm_syst_info, i) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0);
+ p += sprintf(p, "CPU%-2d owner : %d\n" , i, pfm_get_cpu_data(pmu_owner, i) ? pfm_get_cpu_data(pmu_owner, i)->pid: -1);
+ p += sprintf(p, "CPU%-2d context : %p\n" , i, pfm_get_cpu_data(pmu_ctx, i));
+ p += sprintf(p, "CPU%-2d activations : %lu\n", i, pfm_get_cpu_data(pmu_activation_number,i));
}
- LOCK_PFS();
+ if (hweight64(PFM_CPU_ONLINE_MAP) == 1)
+ {
+ psr = pfm_get_psr();
+ ia64_srlz_d();
+ p += sprintf(p, "CPU%-2d psr : 0x%lx\n", smp_processor_id(), psr);
+ p += sprintf(p, "CPU%-2d pmc0 : 0x%lx\n", smp_processor_id(), ia64_get_pmc(0));
+ for(i=4; i < 8; i++) {
+ p += sprintf(p, "CPU%-2d pmc%u : 0x%lx\n", smp_processor_id(), i, ia64_get_pmc(i));
+ p += sprintf(p, "CPU%-2d pmd%u : 0x%lx\n", smp_processor_id(), i, ia64_get_pmd(i));
+ }
+ }
- p += sprintf(p, "proc_sessions : %u\n"
- "sys_sessions : %u\n"
- "sys_use_dbregs : %u\n"
- "ptrace_use_dbregs : %u\n",
- pfm_sessions.pfs_task_sessions,
+ LOCK_PFS();
+ p += sprintf(p, "proc_sessions : %u\n"
+ "sys_sessions : %u\n"
+ "sys_use_dbregs : %u\n"
+ "ptrace_use_dbregs : %u\n",
+ pfm_sessions.pfs_task_sessions,
pfm_sessions.pfs_sys_sessions,
pfm_sessions.pfs_sys_use_dbregs,
pfm_sessions.pfs_ptrace_use_dbregs);
-
UNLOCK_PFS();
+ LOCK_BUF_FMT_LIST();
+
+ for (b = pfm_buffer_fmt_list; b ; b = b->fmt_next) {
+ p += sprintf(p, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
+ b->fmt_uuid[0],
+ b->fmt_uuid[1],
+ b->fmt_uuid[2],
+ b->fmt_uuid[3],
+ b->fmt_uuid[4],
+ b->fmt_uuid[5],
+ b->fmt_uuid[6],
+ b->fmt_uuid[7],
+ b->fmt_uuid[8],
+ b->fmt_uuid[9],
+ b->fmt_uuid[10],
+ b->fmt_uuid[11],
+ b->fmt_uuid[12],
+ b->fmt_uuid[13],
+ b->fmt_uuid[14],
+ b->fmt_uuid[15],
+ b->fmt_name);
+ }
+ UNLOCK_BUF_FMT_LIST();
+
return p - page;
}
}
/*
- * we come here as soon as PFM_CPUINFO_SYST_WIDE is set. This happens
+ * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
* during pfm_enable() hence before pfm_start(). We cannot assume monitoring
- * is active or inactive based on mode. We must rely on the value in
- * cpu_data(i)->pfm_syst_info
+ * is active or inactive based on mode. We must rely on the value in
+ * local_cpu_data->pfm_syst_info
*/
void
-pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
+pfm_do_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
{
struct pt_regs *regs;
unsigned long dcr;
unsigned long dcr_pp;
- preempt_disable();
dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
/*
- * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
+ * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
* on every CPU, so we can rely on the pid to identify the idle task.
*/
if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
- regs = (struct pt_regs *)((unsigned long) task + IA64_STK_OFFSET);
- regs--;
+ regs = ia64_task_regs(task);
ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
- preempt_enable();
return;
}
/*
*/
if (dcr_pp) {
dcr = ia64_get_dcr();
- /*
- * context switching in?
+ /*
+ * context switching in?
*/
if (is_ctxswin) {
/* mask monitoring for the idle task */
ia64_set_dcr(dcr & ~IA64_DCR_PP);
pfm_clear_psr_pp();
ia64_srlz_i();
- preempt_enable();
return;
}
- /*
+ /*
* context switching out
- * restore monitoring for next task
+ * restore monitoring for next task
*
- * Due to inlining this odd if-then-else construction generates
+ * Due to inlining this odd if-then-else construction generates
* better code.
*/
ia64_set_dcr(dcr |IA64_DCR_PP);
pfm_set_psr_pp();
ia64_srlz_i();
}
- preempt_enable();
}
void
-pfm_save_regs (struct task_struct *task)
+pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
{
- pfm_context_t *ctx;
- unsigned long mask;
- u64 psr;
- int i;
-
- preempt_disable();
-
- ctx = task->thread.pfm_context;
-
-
- /*
- * save current PSR: needed because we modify it
- */
- psr = pfm_get_psr();
-
- /*
- * stop monitoring:
- * This is the last instruction which can generate an overflow
- *
- * We do not need to set psr.sp because, it is irrelevant in kernel.
- * It will be restored from ipsr when going back to user level
- */
- pfm_clear_psr_up();
- ia64_srlz_i();
-
- ctx->ctx_saved_psr = psr;
-
-#ifdef CONFIG_SMP
- /*
- * We do not use a lazy scheme in SMP because
- * of the new scheduler which masks interrupts
- * during low-level context switch. So we save
- * all the PMD register we use and restore on
- * ctxsw in.
- *
- * release ownership of this PMU.
- * must be done before we save the registers.
- */
- SET_PMU_OWNER(NULL);
-
- /*
- * save PMDs
- */
- ia64_srlz_d();
-
- mask = ctx->ctx_used_pmds[0];
- for (i=0; mask; i++, mask>>=1) {
- if (mask & 0x1) task->thread.pmd[i] =ia64_get_pmd(i);
- }
-
- /*
- * save pmc0
- */
- task->thread.pmc[0] = ia64_get_pmc(0);
-
- /*
- * force a full reload
- */
- atomic_set(&ctx->ctx_last_cpu, -1);
-#endif
- preempt_enable();
+ unsigned long start, end;
+ pfm_stats[smp_processor_id()].pfm_sysupdt_count++;
+ start = ia64_get_itc();
+ pfm_do_syst_wide_update_task(task, info, is_ctxswin);
+ end = ia64_get_itc();
+ pfm_stats[smp_processor_id()].pfm_sysupdt_cycles += end-start;
}
-static void
-pfm_lazy_save_regs (struct task_struct *task)
+#ifdef CONFIG_SMP
+void
+pfm_save_regs(struct task_struct *task)
{
pfm_context_t *ctx;
- struct thread_struct *t;
- unsigned long mask;
- int i;
-
- preempt_disable();
- DBprintk(("on [%d] by [%d]\n", task->pid, current->pid));
-
- t = &task->thread;
- ctx = task->thread.pfm_context;
-
- /*
- * do not own the PMU
- */
- SET_PMU_OWNER(NULL);
+ struct thread_struct *t;
+ unsigned long flags;
+ u64 psr;
- ia64_srlz_d();
+ ctx = PFM_GET_CTX(task);
+ if (ctx == NULL) goto save_error;
+ t = &task->thread;
/*
- * XXX needs further optimization.
- * Also must take holes into account
+ * we always come here with interrupts ALREADY disabled by
+ * the scheduler. So we simply need to protect against concurrent
+ * access, not CPU concurrency.
*/
- mask = ctx->ctx_used_pmds[0];
- for (i=0; mask; i++, mask>>=1) {
- if (mask & 0x1) t->pmd[i] =ia64_get_pmd(i);
- }
+ flags = pfm_protect_ctx_ctxsw(ctx);
- /* save pmc0 */
- t->pmc[0] = ia64_get_pmc(0);
+ if (CTX_IS_ZOMBIE(ctx)) {
+ struct pt_regs *regs = ia64_task_regs(task);
- /* not owned by this CPU */
- atomic_set(&ctx->ctx_last_cpu, -1);
- preempt_enable();
-}
+ pfm_clear_psr_up();
-void
-pfm_load_regs (struct task_struct *task)
-{
- struct thread_struct *t;
- pfm_context_t *ctx;
- struct task_struct *owner;
- unsigned long mask;
- u64 psr;
- int i;
+ DPRINT(("ctx zombie, forcing cleanup for [%d]\n", task->pid));
- preempt_disable();
+ pfm_force_cleanup(ctx, regs);
- owner = PMU_OWNER();
- ctx = task->thread.pfm_context;
- t = &task->thread;
+ BUG_ON(ctx->ctx_smpl_hdr);
- if (ctx == NULL) {
- preempt_enable();
- printk("perfmon: pfm_load_regs: null ctx for [%d]\n", task->pid);
+ pfm_unprotect_ctx_ctxsw(ctx, flags);
+
+ pfm_context_free(ctx);
return;
}
/*
- * we restore ALL the debug registers to avoid picking up
- * stale state.
- *
- * This must be done even when the task is still the owner
- * as the registers may have been modified via ptrace()
- * (not perfmon) by the previous task.
- *
- * XXX: dealing with this in a lazy fashion requires modifications
- * to the way the the debug registers are managed. This is will done
- * in the next version of perfmon.
+ * sanity check
*/
- if (ctx->ctx_fl_using_dbreg) {
- for (i=0; i < (int) pmu_conf.num_ibrs; i++) {
- ia64_set_ibr(i, t->ibr[i]);
- }
- ia64_srlz_i();
- for (i=0; i < (int) pmu_conf.num_dbrs; i++) {
- ia64_set_dbr(i, t->dbr[i]);
- }
- ia64_srlz_d();
- }
+ if (ctx->ctx_last_activation != GET_ACTIVATION()) {
+ DPRINT(("ctx_activation=%lu activation=%lu state=%d: no save\n",
+ ctx->ctx_last_activation,
+ GET_ACTIVATION(), ctx->ctx_state));
- /*
- * if we were the last user, then nothing to do except restore psr
- * this path cannot be used in SMP
- */
- if (owner == task) {
- if ((unsigned int) atomic_read(&ctx->ctx_last_cpu) != smp_processor_id())
- DBprintk(("invalid last_cpu=%d for [%d]\n",
- atomic_read(&ctx->ctx_last_cpu), task->pid));
+ pfm_unprotect_ctx_ctxsw(ctx, flags);
- psr = ctx->ctx_saved_psr;
- pfm_set_psr_l(psr);
- preempt_enable();
return;
}
/*
- * someone else is still using the PMU, first push it out and
- * then we'll be able to install our stuff !
- *
- * not possible in SMP
+ * save current PSR: needed because we modify it
*/
- if (owner) pfm_lazy_save_regs(owner);
+ psr = pfm_get_psr();
/*
- * To avoid leaking information to the user level when psr.sp=0,
- * we must reload ALL implemented pmds (even the ones we don't use).
- * In the kernel we only allow PFM_READ_PMDS on registers which
- * we initialized or requested (sampling) so there is no risk there.
+ * stop monitoring:
+ * This is the last instruction which may generate an overflow
*
- * As an optimization, we will only reload the PMD that we use when
- * the context is in protected mode, i.e. psr.sp=1 because then there
- * is no leak possible.
+ * We do not need to set psr.sp because, it is irrelevant in kernel.
+ * It will be restored from ipsr when going back to user level
*/
- mask = pfm_sysctl.fastctxsw || ctx->ctx_fl_protected ? ctx->ctx_used_pmds[0] : ctx->ctx_reload_pmds[0];
- for (i=0; mask; i++, mask>>=1) {
- if (mask & 0x1) ia64_set_pmd(i, t->pmd[i] & pmu_conf.ovfl_val);
- }
-
- /*
- * PMC0 is never set in the mask because it is always restored
- * separately.
- *
- * ALL PMCs are systematically reloaded, unused registers
- * get their default (PAL reset) values to avoid picking up
- * stale configuration.
- */
- mask = ctx->ctx_reload_pmcs[0];
- for (i=0; mask; i++, mask>>=1) {
- if (mask & 0x1) ia64_set_pmc(i, t->pmc[i]);
- }
+ pfm_clear_psr_up();
/*
- * manually invoke core interrupt handler
- * if the task had a pending overflow when it was ctxsw out.
- * Side effect on ctx_fl_frozen is possible.
+ * keep a copy of the saved psr (for reload)
*/
- if (t->pmc[0] & ~0x1) {
- t->pmc[0] = pfm_overflow_handler(1, task, ctx, t->pmc[0], NULL);
- }
+ ctx->ctx_saved_psr = psr;
/*
- * unfreeze PMU if possible
+ * release ownership of this PMU.
+ * PM interrupts are masked, so nothing
+ * can happen.
*/
- if (ctx->ctx_fl_frozen == 0) pfm_unfreeze_pmu();
-
- atomic_set(&ctx->ctx_last_cpu, smp_processor_id());
-
- SET_PMU_OWNER(task);
+ SET_PMU_OWNER(NULL, NULL);
/*
- * restore the psr we changed in pfm_save_regs()
+ * we systematically save the PMD as we have no
+ * guarantee we will be schedule at that same
+ * CPU again.
*/
- psr = ctx->ctx_saved_psr;
- preempt_enable();
- pfm_set_psr_l(psr);
-}
-
-/*
- * XXX: make this routine able to work with non current context
- */
-static void
-pfm_reset_pmu(struct task_struct *task)
-{
- struct thread_struct *t = &task->thread;
- pfm_context_t *ctx = t->pfm_context;
- int i;
-
- if (task != current) {
- printk("perfmon: invalid task in pfm_reset_pmu()\n");
- return;
- }
- preempt_disable();
-
- /* Let's make sure the PMU is frozen */
- pfm_freeze_pmu();
+ pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]);
/*
- * install reset values for PMC. We skip PMC0 (done above)
- * XX: good up to 64 PMCS
+ * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
+ * we will need it on the restore path to check
+ * for pending overflow.
*/
- for (i=1; (pmu_conf.pmc_desc[i].type & PFM_REG_END) == 0; i++) {
- if ((pmu_conf.pmc_desc[i].type & PFM_REG_IMPL) == 0) continue;
- ia64_set_pmc(i, PMC_DFL_VAL(i));
- /*
- * When restoring context, we must restore ALL pmcs, even the ones
- * that the task does not use to avoid leaks and possibly corruption
- * of the sesion because of configuration conflicts. So here, we
- * initialize the entire set used in the context switch restore routine.
- */
- t->pmc[i] = PMC_DFL_VAL(i);
- DBprintk(("pmc[%d]=0x%lx\n", i, t->pmc[i]));
- }
+ t->pmcs[0] = ia64_get_pmc(0);
/*
- * clear reset values for PMD.
- * XXX: good up to 64 PMDS.
+ * unfreeze PMU if had pending overflows
*/
- for (i=0; (pmu_conf.pmd_desc[i].type & PFM_REG_END) == 0; i++) {
- if ((pmu_conf.pmd_desc[i].type & PFM_REG_IMPL) == 0) continue;
- ia64_set_pmd(i, 0UL);
- t->pmd[i] = 0UL;
- }
+ if (t->pmcs[0] & ~1UL) pfm_unfreeze_pmu();
/*
- * On context switched restore, we must restore ALL pmc and ALL pmd even
- * when they are not actively used by the task. In UP, the incoming process
- * may otherwise pick up left over PMC, PMD state from the previous process.
- * As opposed to PMD, stale PMC can cause harm to the incoming
- * process because they may change what is being measured.
- * Therefore, we must systematically reinstall the entire
- * PMC state. In SMP, the same thing is possible on the
- * same CPU but also on between 2 CPUs.
- *
- * The problem with PMD is information leaking especially
- * to user level when psr.sp=0
- *
- * There is unfortunately no easy way to avoid this problem
- * on either UP or SMP. This definitively slows down the
- * pfm_load_regs() function.
- */
-
- /*
- * We must include all the PMC in this mask to make sure we don't
- * see any side effect of a stale state, such as opcode matching
- * or range restrictions, for instance.
- *
- * We never directly restore PMC0 so we do not include it in the mask.
- */
- ctx->ctx_reload_pmcs[0] = pmu_conf.impl_pmcs[0] & ~0x1;
- /*
- * We must include all the PMD in this mask to avoid picking
- * up stale value and leak information, especially directly
- * at the user level when psr.sp=0
- */
- ctx->ctx_reload_pmds[0] = pmu_conf.impl_pmds[0];
-
- /*
- * Keep track of the pmds we want to sample
- * XXX: may be we don't need to save/restore the DEAR/IEAR pmds
- * but we do need the BTB for sure. This is because of a hardware
- * buffer of 1 only for non-BTB pmds.
- *
- * We ignore the unimplemented pmds specified by the user
+ * finally, unmask interrupts and allow context
+ * access.
+ * Any pended overflow interrupt may be delivered
+ * here and will be treated as spurious because we
+ * have have no PMU owner anymore.
*/
- ctx->ctx_used_pmds[0] = ctx->ctx_smpl_regs[0];
- ctx->ctx_used_pmcs[0] = 1; /* always save/restore PMC[0] */
+ pfm_unprotect_ctx_ctxsw(ctx, flags);
- /*
- * useful in case of re-enable after disable
- */
- ctx->ctx_used_ibrs[0] = 0UL;
- ctx->ctx_used_dbrs[0] = 0UL;
+ return;
- ia64_srlz_d();
- preempt_enable();
+save_error:
+ printk(KERN_ERR "perfmon: pfm_save_regs CPU%d [%d] NULL context PM_VALID=%ld\n",
+ smp_processor_id(), task->pid,
+ task->thread.flags & IA64_THREAD_PM_VALID);
}
+#else /* !CONFIG_SMP */
+
/*
- * This function is called when a thread exits (from exit_thread()).
- * This is a simplified pfm_save_regs() that simply flushes the current
- * register state into the save area taking into account any pending
- * overflow. This time no notification is sent because the task is dying
- * anyway. The inline processing of overflows avoids loosing some counts.
- * The PMU is frozen on exit from this call and is to never be reenabled
- * again for this task.
- *
+ * in 2.5, interrupts are masked when we come here
*/
void
-pfm_flush_regs (struct task_struct *task)
+pfm_save_regs(struct task_struct *task)
{
pfm_context_t *ctx;
- u64 pmc0;
- unsigned long mask2, val;
- int i;
-
- ctx = task->thread.pfm_context;
+ u64 psr;
- if (ctx == NULL) return;
+ ctx = PFM_GET_CTX(task);
+ if (ctx == NULL) goto save_error;
- /*
- * that's it if context already disabled
+ /*
+ * save current PSR: needed because we modify it
*/
- if (ctx->ctx_flags.state == PFM_CTX_DISABLED) return;
+ psr = pfm_get_psr();
- preempt_disable();
/*
* stop monitoring:
- * This is the only way to stop monitoring without destroying overflow
- * information in PMC[0].
- * This is the last instruction which can cause overflow when monitoring
- * in kernel.
- * By now, we could still have an overflow interrupt in-flight.
+ * This is the last instruction which may generate an overflow
+ *
+ * We do not need to set psr.sp because, it is irrelevant in kernel.
+ * It will be restored from ipsr when going back to user level
*/
- if (ctx->ctx_fl_system) {
-
+ pfm_clear_psr_up();
- /* disable dcr pp */
- ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);
+ /*
+ * keep a copy of the saved psr (for reload)
+ */
+ ctx->ctx_saved_psr = psr;
- /* stop monitoring */
- pfm_clear_psr_pp();
+ psr = pfm_get_psr();
+ if (psr & IA64_PSR_UP) {
+ printk(KERN_ERR " perfmon: pfm_save_regs: psr.up set current [%d] owner [%d] psr=0x%lx\n", current->pid, GET_PMU_OWNER()->pid, psr);
+ }
+ if (psr & IA64_PSR_I) {
+ printk(KERN_ERR " perfmon: pfm_save_regs: psr.i set current [%d] owner [%d] psr=0x%lx\n", current->pid, GET_PMU_OWNER()->pid, psr);
+ }
- ia64_srlz_i();
+ return;
+save_error:
+ printk(KERN_ERR "perfmon: pfm_save_regs CPU%d [%d] NULL context PM_VALID=%ld\n",
+ smp_processor_id(), task->pid,
+ task->thread.flags & IA64_THREAD_PM_VALID);
+}
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
- PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
- } else {
+static void
+pfm_lazy_save_regs (struct task_struct *task)
+{
+ pfm_context_t *ctx;
+ struct thread_struct *t;
+ unsigned long flags;
+ unsigned long psr;
- /* stop monitoring */
+#if 1
+ psr = pfm_get_psr();
+ if (psr & IA64_PSR_UP) {
+ printk(KERN_ERR " perfmon: pfm_lazy_save_regs: psr.up set current [%d] owner [%d] psr=0x%lx\n", current->pid, task->pid, psr);
pfm_clear_psr_up();
-
- ia64_srlz_i();
-
- /* no more save/restore on ctxsw */
- current->thread.flags &= ~IA64_THREAD_PM_VALID;
}
+#endif
- /*
- * Mark the PMU as not owned
- * This will cause the interrupt handler to do nothing in case an overflow
- * interrupt was in-flight
- * This also guarantees that pmc0 will contain the final state
- * It virtually gives us full control on overflow processing from that point
- * on.
- * It must be an atomic operation.
- */
- SET_PMU_OWNER(NULL);
+ ctx = PFM_GET_CTX(task);
+ t = &task->thread;
+
+ DPRINT(("on [%d] used_pmds=0x%lx\n", task->pid, ctx->ctx_used_pmds[0]));
/*
- * read current overflow status:
+ * we need to mask PMU overflow here to
+ * make sure that we maintain pmc0 until
+ * we save it. overflow interrupts are
+ * treated as spurious if there is no
+ * owner.
*
- * we are guaranteed to read the final stable state
+ * XXX: I don't think this is necessary
*/
- ia64_srlz_d();
- pmc0 = ia64_get_pmc(0); /* slow */
+ PROTECT_CTX(ctx,flags);
/*
- * freeze PMU:
+ * release ownership of this PMU.
+ * must be done before we save the registers.
*
- * This destroys the overflow information. This is required to make sure
- * next process does not start with monitoring on if not requested
+ * after this call any PMU interrupt is treated
+ * as spurious.
*/
- pfm_freeze_pmu();
+ SET_PMU_OWNER(NULL, NULL);
/*
- * We don't need to restore psr, because we are on our way out
+ * save all the pmds we use
*/
+ pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]);
/*
- * This loop flushes the PMD into the PFM context.
- * It also processes overflow inline.
- *
- * IMPORTANT: No notification is sent at this point as the process is dying.
- * The implicit notification will come from a SIGCHILD or a return from a
- * waitpid().
- *
+ * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
+ * it is needed to check for pended overflow
+ * on the restore path
*/
-
- if ((unsigned int) atomic_read(&ctx->ctx_last_cpu) != smp_processor_id())
- printk(KERN_DEBUG "perfmon: [%d] last_cpu=%d\n",
- task->pid, atomic_read(&ctx->ctx_last_cpu));
+ t->pmcs[0] = ia64_get_pmc(0);
/*
- * we save all the used pmds
- * we take care of overflows for pmds used as counters
+ * unfreeze PMU if had pending overflows
*/
- mask2 = ctx->ctx_used_pmds[0];
- for (i = 0; mask2; i++, mask2>>=1) {
-
- /* skip non used pmds */
- if ((mask2 & 0x1) == 0) continue;
+ if (t->pmcs[0] & ~1UL) pfm_unfreeze_pmu();
- val = ia64_get_pmd(i);
-
- if (PMD_IS_COUNTING(i)) {
- DBprintk(("[%d] pmd[%d] soft_pmd=0x%lx hw_pmd=0x%lx\n",
- task->pid,
- i,
- ctx->ctx_soft_pmds[i].val,
- val & pmu_conf.ovfl_val));
-
- /* collect latest results */
- ctx->ctx_soft_pmds[i].val += val & pmu_conf.ovfl_val;
-
- /*
- * now everything is in ctx_soft_pmds[] and we need
- * to clear the saved context from save_regs() such that
- * pfm_read_pmds() gets the correct value
- */
- task->thread.pmd[i] = 0;
-
- /*
- * take care of overflow inline
- */
- if (pmc0 & (1UL << i)) {
- ctx->ctx_soft_pmds[i].val += 1 + pmu_conf.ovfl_val;
- DBprintk(("[%d] pmd[%d] overflowed soft_pmd=0x%lx\n",
- task->pid, i, ctx->ctx_soft_pmds[i].val));
- }
- } else {
- DBprintk(("[%d] pmd[%d] hw_pmd=0x%lx\n", task->pid, i, val));
- /*
- * not a counter, just save value as is
- */
- task->thread.pmd[i] = val;
- }
- }
- /*
- * indicates that context has been saved
+ /*
+ * now get can unmask PMU interrupts, they will
+ * be treated as purely spurious and we will not
+ * lose any information
*/
- atomic_set(&ctx->ctx_last_cpu, -1);
- preempt_enable();
+ UNPROTECT_CTX(ctx,flags);
}
+#endif /* CONFIG_SMP */
-
-/*
- * task is the newly created task, pt_regs for new child
- */
-int
-pfm_inherit(struct task_struct *task, struct pt_regs *regs)
+#ifdef CONFIG_SMP
+void
+pfm_load_regs (struct task_struct *task)
{
pfm_context_t *ctx;
- pfm_context_t *nctx;
- struct thread_struct *thread;
- unsigned long m;
- int i;
-
- /*
- * the new task was copied from parent and therefore points
- * to the parent's context at this point
- */
- ctx = task->thread.pfm_context;
- thread = &task->thread;
+ struct thread_struct *t;
+ struct task_struct *owner;
+ unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
+ unsigned long flags;
+ u64 psr;
- preempt_disable();
- /*
- * for secure sessions, make sure child cannot mess up
- * the monitoring session.
- */
- if (ctx->ctx_fl_unsecure == 0) {
- ia64_psr(regs)->sp = 1;
- DBprintk(("enabling psr.sp for [%d]\n", task->pid));
- } else {
- DBprintk(("psr.sp=%d [%d]\n", ia64_psr(regs)->sp, task->pid));
+ ctx = PFM_GET_CTX(task);
+ if (unlikely(ctx == NULL)) {
+ printk(KERN_ERR "perfmon: pfm_load_regs() null context\n");
+ return;
}
- /*
- * if there was a virtual mapping for the sampling buffer
- * the mapping is NOT inherited across fork() (see VM_DONTCOPY),
- * so we don't have to explicitly remove it here.
- *
- *
- * Part of the clearing of fields is also done in
- * copy_thread() because the fiels are outside the
- * pfm_context structure and can affect tasks not
- * using perfmon.
- */
+ owner = GET_PMU_OWNER();
+ t = &task->thread;
+
+#if 1
+ psr = pfm_get_psr();
+ BUG_ON(psr & IA64_PSR_UP);
+ psr = pfm_get_psr();
+ BUG_ON(psr & IA64_PSR_I);
+#endif
- /* clear pending notification */
- task->thread.pfm_ovfl_block_reset = 0;
/*
- * clear cpu pinning restriction for child
+ * possible on unload
*/
- if (ctx->ctx_fl_system) {
- set_cpus_allowed(task, ctx->ctx_saved_cpus_allowed);
-
- DBprintk(("setting cpus_allowed for [%d] to 0x%lx from 0x%lx\n",
- task->pid,
- ctx->ctx_saved_cpus_allowed,
- current->cpus_allowed));
+ if (unlikely((t->flags & IA64_THREAD_PM_VALID) == 0)) {
+ DPRINT(("[%d] PM_VALID=0, nothing to do\n", task->pid));
+ return;
}
/*
- * takes care of easiest case first
+ * we always come here with interrupts ALREADY disabled by
+ * the scheduler. So we simply need to protect against concurrent
+ * access, not CPU concurrency.
*/
- if (CTX_INHERIT_MODE(ctx) == PFM_FL_INHERIT_NONE) {
+ flags = pfm_protect_ctx_ctxsw(ctx);
- DBprintk(("removing PFM context for [%d]\n", task->pid));
+ if (unlikely(CTX_IS_ZOMBIE(ctx))) {
+ struct pt_regs *regs = ia64_task_regs(task);
- task->thread.pfm_context = NULL;
+ BUG_ON(ctx->ctx_smpl_hdr);
- /*
- * we must clear psr.up because the new child does
- * not have a context and the PM_VALID flag is cleared
- * in copy_thread().
- *
- * we do not clear psr.pp because it is always
- * controlled by the system wide logic and we should
- * never be here when system wide is running anyway
- */
- ia64_psr(regs)->up = 0;
+ DPRINT(("ctx zombie, forcing cleanup for [%d]\n", task->pid));
- preempt_enable();
+ pfm_force_cleanup(ctx, regs);
- /* copy_thread() clears IA64_THREAD_PM_VALID */
- return 0;
- }
- nctx = pfm_context_alloc();
- if (nctx == NULL) return -ENOMEM;
+ pfm_unprotect_ctx_ctxsw(ctx, flags);
- /* copy content */
- *nctx = *ctx;
+ /*
+ * this one (kmalloc'ed) is fine with interrupts disabled
+ */
+ pfm_context_free(ctx);
+ return;
+ }
- if (CTX_INHERIT_MODE(ctx) == PFM_FL_INHERIT_ONCE) {
- nctx->ctx_fl_inherit = PFM_FL_INHERIT_NONE;
- DBprintk(("downgrading to INHERIT_NONE for [%d]\n", task->pid));
+ /*
+ * we restore ALL the debug registers to avoid picking up
+ * stale state.
+ *
+ * This must be done even when the task is still the owner
+ * as the registers may have been modified via ptrace()
+ * (not perfmon) by the previous task.
+ */
+ if (ctx->ctx_fl_using_dbreg) {
+ pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs);
+ pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs);
}
/*
- * task is not yet visible in the tasklist, so we do
- * not need to lock the newly created context.
- * However, we must grab the tasklist_lock to ensure
- * that the ctx_owner or ctx_notify_task do not disappear
- * while we increment their check counters.
+ * retrieve saved psr
*/
- read_lock(&tasklist_lock);
+ psr = ctx->ctx_saved_psr;
- if (nctx->ctx_notify_task)
- atomic_inc(&nctx->ctx_notify_task->thread.pfm_notifiers_check);
+ /*
+ * if we were the last user of the PMU on that CPU,
+ * then nothing to do except restore psr
+ */
+ if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
- if (nctx->ctx_owner)
- atomic_inc(&nctx->ctx_owner->thread.pfm_owners_check);
+ /*
+ * retrieve partial reload masks (due to user modifications)
+ */
+ pmc_mask = ctx->ctx_reload_pmcs[0];
+ pmd_mask = ctx->ctx_reload_pmds[0];
- read_unlock(&tasklist_lock);
+ if (pmc_mask || pmd_mask) DPRINT(("partial reload [%d] pmd_mask=0x%lx pmc_mask=0x%lx\n", task->pid, pmd_mask, pmc_mask));
+ } else {
+ /*
+ * To avoid leaking information to the user level when psr.sp=0,
+ * we must reload ALL implemented pmds (even the ones we don't use).
+ * In the kernel we only allow PFM_READ_PMDS on registers which
+ * we initialized or requested (sampling) so there is no risk there.
+ */
+ pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
+ /*
+ * ALL accessible PMCs are systematically reloaded, unused registers
+ * get their default (from pfm_reset_pmu_state()) values to avoid picking
+ * up stale configuration.
+ *
+ * PMC0 is never in the mask. It is always restored separately.
+ */
+ pmc_mask = ctx->ctx_all_pmcs[0];
- LOCK_PFS();
- pfm_sessions.pfs_task_sessions++;
- UNLOCK_PFS();
+ DPRINT(("full reload for [%d] owner=%d activation=%lu last_activation=%lu last_cpu=%d pmd_mask=0x%lx pmc_mask=0x%lx\n",
+ task->pid, owner ? owner->pid : -1,
+ GET_ACTIVATION(), ctx->ctx_last_activation,
+ GET_LAST_CPU(ctx), pmd_mask, pmc_mask));
- /* initialize counters in new context */
- m = nctx->ctx_used_pmds[0] >> PMU_FIRST_COUNTER;
- for(i = PMU_FIRST_COUNTER ; m ; m>>=1, i++) {
- if ((m & 0x1) && pmu_conf.pmd_desc[i].type == PFM_REG_COUNTING) {
- nctx->ctx_soft_pmds[i].val = nctx->ctx_soft_pmds[i].lval & ~pmu_conf.ovfl_val;
- thread->pmd[i] = nctx->ctx_soft_pmds[i].lval & pmu_conf.ovfl_val;
- } else {
- thread->pmd[i] = 0UL; /* reset to initial state */
- }
}
-
- nctx->ctx_fl_frozen = 0;
- nctx->ctx_ovfl_regs[0] = 0UL;
- nctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
- atomic_set(&nctx->ctx_last_cpu, -1);
-
/*
- * here nctx->ctx_psb == ctx->ctx_psb
+ * when context is MASKED, we will restore PMC with plm=0
+ * and PMD with stale information, but that's ok, nothing
+ * will be captured.
*
- * increment reference count to sampling
- * buffer, if any. Note that this is independent
- * from the virtual mapping. The latter is never
- * inherited while the former will be if context
- * is setup to something different from PFM_FL_INHERIT_NONE
+ * XXX: optimize here
*/
- if (nctx->ctx_psb) {
- LOCK_PSB(nctx->ctx_psb);
+ if (pmd_mask) pfm_restore_pmds(t->pmds, pmd_mask);
+ if (pmc_mask) pfm_restore_pmcs(t->pmcs, pmc_mask);
- nctx->ctx_psb->psb_refcnt++;
-
- DBprintk(("updated smpl @ %p refcnt=%lu psb_flags=0x%x\n",
- ctx->ctx_psb->psb_hdr,
- ctx->ctx_psb->psb_refcnt,
- ctx->ctx_psb->psb_flags));
-
- UNLOCK_PSB(nctx->ctx_psb);
-
- /*
- * remove any pointer to sampling buffer mapping
- */
- nctx->ctx_smpl_vaddr = 0;
+ /*
+ * check for pending overflow at the time the state
+ * was saved.
+ */
+ if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) {
+ struct pt_regs *regs = ia64_task_regs(task);
+ pfm_overflow_handler(task, ctx, t->pmcs[0], regs);
}
- sema_init(&nctx->ctx_restart_sem, 0); /* reset this semaphore to locked */
-
/*
- * propagate kernel psr in new context (used for first ctxsw in
+ * we clear PMC0, to ensure that any in flight interrupt
+ * will not be attributed to the new context we are installing
+ * because the actual overflow has been processed above already.
+ * No real effect until we unmask interrupts at the end of the
+ * function.
*/
- nctx->ctx_saved_psr = pfm_get_psr();
+ pfm_unfreeze_pmu();
/*
- * propagate kernel psr in new context (used for first ctxsw in
+ * we just did a reload, so we reset the partial reload fields
*/
- nctx->ctx_saved_psr = pfm_get_psr();
+ ctx->ctx_reload_pmcs[0] = 0UL;
+ ctx->ctx_reload_pmds[0] = 0UL;
- /* link with new task */
- thread->pfm_context = nctx;
+ SET_LAST_CPU(ctx, smp_processor_id());
- DBprintk(("nctx=%p for process [%d]\n", (void *)nctx, task->pid));
+ /*
+ * dump activation value for this PMU
+ */
+ INC_ACTIVATION();
+ /*
+ * record current activation for this context
+ */
+ SET_ACTIVATION(ctx);
/*
- * the copy_thread routine automatically clears
- * IA64_THREAD_PM_VALID, so we need to reenable it, if it was used by the caller
+ * establish new ownership. Interrupts
+ * are still masked at this point.
*/
- if (current->thread.flags & IA64_THREAD_PM_VALID) {
- DBprintk(("setting PM_VALID for [%d]\n", task->pid));
- thread->flags |= IA64_THREAD_PM_VALID;
- }
+ SET_PMU_OWNER(task, ctx);
- preempt_enable();
+ /*
+ * restore the psr we changed
+ */
+ pfm_set_psr_l(psr);
- return 0;
+ /*
+ * allow concurrent access to context
+ */
+ pfm_unprotect_ctx_ctxsw(ctx, flags);
}
-
-/*
- *
- * We cannot touch any of the PMU registers at this point as we may
- * not be running on the same CPU the task was last run on. Therefore
- * it is assumed that the PMU has been stopped appropriately in
- * pfm_flush_regs() called from exit_thread().
- *
- * The function is called in the context of the parent via a release_thread()
- * and wait4(). The task is not in the tasklist anymore.
+#else /* !CONFIG_SMP */
+/*
+ * reload PMU state for UP kernels
+ * in 2.5 we come here with interrupts disabled
*/
void
-pfm_context_exit(struct task_struct *task)
+pfm_load_regs (struct task_struct *task)
{
- pfm_context_t *ctx = task->thread.pfm_context;
-
- /*
- * check sampling buffer
- */
- preempt_disable();
- if (ctx->ctx_psb) {
- pfm_smpl_buffer_desc_t *psb = ctx->ctx_psb;
-
- LOCK_PSB(psb);
-
- DBprintk(("sampling buffer from [%d] @%p size %ld refcnt=%lu psb_flags=0x%x\n",
- task->pid,
- psb->psb_hdr, psb->psb_size, psb->psb_refcnt, psb->psb_flags));
+ struct thread_struct *t;
+ pfm_context_t *ctx;
+ struct task_struct *owner;
+ unsigned long pmd_mask, pmc_mask;
+ u64 psr;
- /*
- * in the case where we are the last user, we may be able to free
- * the buffer
- */
- psb->psb_refcnt--;
+ owner = GET_PMU_OWNER();
+ ctx = PFM_GET_CTX(task);
+ t = &task->thread;
- if (psb->psb_refcnt == 0) {
+#if 1
+ psr = pfm_get_psr();
+ if (psr & IA64_PSR_UP) {
+ printk(KERN_ERR " perfmon: pfm_load_regs: psr.up set current [%d] owner [%d] psr=0x%lx\n", current->pid, owner->pid, psr);
+ }
+ psr = pfm_get_psr();
+ if (psr & IA64_PSR_I) {
+ printk(KERN_ERR " perfmon: pfm_load_regs: psr.i set current [%d] owner [%d] psr=0x%lx\n", current->pid, owner->pid, psr);
+ }
+#endif
- /*
- * The flag is cleared in pfm_vm_close(). which gets
- * called from do_exit() via exit_mm().
- * By the time we come here, the task has no more mm context.
- *
- * We can only free the psb and buffer here after the vm area
- * describing the buffer has been removed. This normally happens
- * as part of do_exit() but the entire mm context is ONLY removed
- * once its reference counts goes to zero. This is typically
- * the case except for multi-threaded (several tasks) processes.
- *
- * See pfm_vm_close() and pfm_cleanup_smpl_buf() for more details.
- */
- if ((psb->psb_flags & PSB_HAS_VMA) == 0) {
-
- DBprintk(("cleaning sampling buffer from [%d] @%p size %ld\n",
- task->pid,
- psb->psb_hdr, psb->psb_size));
-
- /*
- * free the buffer and psb
- */
- pfm_rvfree(psb->psb_hdr, psb->psb_size);
- kfree(psb);
- psb = NULL;
- }
- }
- /* psb may have been deleted */
- if (psb) UNLOCK_PSB(psb);
- }
-
- DBprintk(("cleaning [%d] pfm_context @%p notify_task=%p check=%d mm=%p\n",
- task->pid, ctx,
- ctx->ctx_notify_task,
- atomic_read(&task->thread.pfm_notifiers_check), task->mm));
-
- /*
- * To avoid getting the notified task or owner task scan the entire process
- * list when they exit, we decrement notifiers_check and owners_check respectively.
+ /*
+ * we restore ALL the debug registers to avoid picking up
+ * stale state.
*
- * Of course, there is race condition between decreasing the value and the
- * task exiting. The danger comes from the fact that, in both cases, we have a
- * direct pointer to a task structure thereby bypassing the tasklist.
- * We must make sure that, if we have task!= NULL, the target task is still
- * present and is identical to the initial task specified
- * during pfm_context_create(). It may already be detached from the tasklist but
- * that's okay. Note that it is okay if we miss the deadline and the task scans
- * the list for nothing, it will affect performance but not correctness.
- * The correctness is ensured by using the ctx_lock which prevents the
- * notify_task from changing the fields in our context.
- * Once holdhing this lock, if we see task!= NULL, then it will stay like
- * that until we release the lock. If it is NULL already then we came too late.
- */
- LOCK_CTX(ctx);
-
- if (ctx->ctx_notify_task != NULL) {
- DBprintk(("[%d], [%d] atomic_sub on [%d] notifiers=%u\n", current->pid,
- task->pid,
- ctx->ctx_notify_task->pid,
- atomic_read(&ctx->ctx_notify_task->thread.pfm_notifiers_check)));
-
- atomic_dec(&ctx->ctx_notify_task->thread.pfm_notifiers_check);
+ * This must be done even when the task is still the owner
+ * as the registers may have been modified via ptrace()
+ * (not perfmon) by the previous task.
+ */
+ if (ctx->ctx_fl_using_dbreg) {
+ pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf.num_ibrs);
+ pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf.num_dbrs);
}
- if (ctx->ctx_owner != NULL) {
- DBprintk(("[%d], [%d] atomic_sub on [%d] owners=%u\n",
- current->pid,
- task->pid,
- ctx->ctx_owner->pid,
- atomic_read(&ctx->ctx_owner->thread.pfm_owners_check)));
+ /*
+ * retrieved save psr
+ */
+ psr = ctx->ctx_saved_psr;
- atomic_dec(&ctx->ctx_owner->thread.pfm_owners_check);
+ /*
+ * short path, our state is still there, just
+ * need to restore psr and we go
+ *
+ * we do not touch either PMC nor PMD. the psr is not touched
+ * by the overflow_handler. So we are safe w.r.t. to interrupt
+ * concurrency even without interrupt masking.
+ */
+ if (likely(owner == task)) {
+ pfm_set_psr_l(psr);
+ return;
}
- UNLOCK_CTX(ctx);
- preempt_enable();
+ DPRINT(("reload for [%d] owner=%d\n", task->pid, owner ? owner->pid : -1));
- pfm_unreserve_session(task, ctx->ctx_fl_system, 1UL << ctx->ctx_cpu);
-
- if (ctx->ctx_fl_system) {
- /*
- * remove any CPU pinning
- */
- set_cpus_allowed(task, ctx->ctx_saved_cpus_allowed);
- }
+ /*
+ * someone else is still using the PMU, first push it out and
+ * then we'll be able to install our stuff !
+ *
+ * Upon return, there will be no owner for the current PMU
+ */
+ if (owner) pfm_lazy_save_regs(owner);
- pfm_context_free(ctx);
- /*
- * clean pfm state in thread structure,
+ /*
+ * To avoid leaking information to the user level when psr.sp=0,
+ * we must reload ALL implemented pmds (even the ones we don't use).
+ * In the kernel we only allow PFM_READ_PMDS on registers which
+ * we initialized or requested (sampling) so there is no risk there.
*/
- task->thread.pfm_context = NULL;
- task->thread.pfm_ovfl_block_reset = 0;
+ pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
- /* pfm_notifiers is cleaned in pfm_cleanup_notifiers() */
-}
+ /*
+ * ALL accessible PMCs are systematically reloaded, unused registers
+ * get their default (from pfm_reset_pmu_state()) values to avoid picking
+ * up stale configuration.
+ *
+ * PMC0 is never in the mask. It is always restored separately
+ */
+ pmc_mask = ctx->ctx_all_pmcs[0];
-/*
- * function invoked from release_thread when pfm_smpl_buf_list is not NULL
- */
-int
-pfm_cleanup_smpl_buf(struct task_struct *task)
-{
- pfm_smpl_buffer_desc_t *tmp, *psb = task->thread.pfm_smpl_buf_list;
+ pfm_restore_pmds(t->pmds, pmd_mask);
+ pfm_restore_pmcs(t->pmcs, pmc_mask);
- if (psb == NULL) {
- printk(KERN_DEBUG "perfmon: psb is null in [%d]\n", current->pid);
- return -1;
+ /*
+ * Check for pending overflow when state was last saved.
+ * invoked handler is overflow status bits set.
+ *
+ * Any PMU overflow in flight at this point, will still
+ * be treated as spurious because we have no declared
+ * owner. Note that the first level interrupt handler
+ * DOES NOT TOUCH any PMC except PMC0 for which we have
+ * a copy already.
+ */
+ if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) {
+ struct pt_regs *regs = ia64_task_regs(task);
+ pfm_overflow_handler(task, ctx, t->pmcs[0], regs);
}
+
/*
- * Walk through the list and free the sampling buffer and psb
+ * we clear PMC0, to ensure that any in flight interrupt
+ * will not be attributed to the new context we are installing
+ * because the actual overflow has been processed above already.
+ *
+ * This is an atomic operation.
*/
- while (psb) {
- DBprintk(("[%d] freeing smpl @%p size %ld\n", current->pid, psb->psb_hdr, psb->psb_size));
+ pfm_unfreeze_pmu();
- pfm_rvfree(psb->psb_hdr, psb->psb_size);
- tmp = psb->psb_next;
- kfree(psb);
- psb = tmp;
- }
+ /*
+ * establish new ownership. If there was an in-flight
+ * overflow interrupt, it will be treated as spurious
+ * before and after the call, because no overflow
+ * status bit can possibly be set. No new overflow
+ * can be generated because, at this point, psr.up
+ * is still cleared.
+ */
+ SET_PMU_OWNER(task, ctx);
- /* just in case */
- task->thread.pfm_smpl_buf_list = NULL;
+ /*
+ * restore the psr. This is the point at which
+ * new overflow interrupts can be generated again.
+ */
+ pfm_set_psr_l(psr);
- return 0;
}
+#endif /* CONFIG_SMP */
/*
- * function invoked from release_thread to make sure that the ctx_owner field does not
- * point to an unexisting task.
+ * this function assumes monitoring is stopped
*/
-void
-pfm_cleanup_owners(struct task_struct *task)
+static void
+pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
{
- struct task_struct *g, *p;
- pfm_context_t *ctx;
-
- DBprintk(("called by [%d] for [%d]\n", current->pid, task->pid));
+ u64 pmc0;
+ unsigned long mask2, val, pmd_val;
+ int i, can_access_pmu = 0;
+ int is_self;
- read_lock(&tasklist_lock);
+ /*
+ * is the caller the task being monitored (or which initiated the
+ * session for system wide measurements)
+ */
+ is_self = ctx->ctx_task == task ? 1 : 0;
- do_each_thread(g, p) {
+#ifdef CONFIG_SMP
+ if (task == current) {
+#else
+ /*
+ * in UP, the state can still be in the registers
+ */
+ if (task == current || GET_PMU_OWNER() == task) {
+#endif
+ can_access_pmu = 1;
/*
- * It is safe to do the 2-step test here, because thread.ctx
- * is cleaned up only in release_thread() and at that point
- * the task has been detached from the tasklist which is an
- * operation which uses the write_lock() on the tasklist_lock
- * so it cannot run concurrently to this loop. So we have the
- * guarantee that if we find p and it has a perfmon ctx then
- * it is going to stay like this for the entire execution of this
- * loop.
+ * Mark the PMU as not owned
+ * This will cause the interrupt handler to do nothing in case an overflow
+ * interrupt was in-flight
+ * This also guarantees that pmc0 will contain the final state
+ * It virtually gives us full control on overflow processing from that point
+ * on.
*/
- ctx = p->thread.pfm_context;
-
- //DBprintk(("[%d] scanning task [%d] ctx=%p\n", task->pid, p->pid, ctx));
-
- if (ctx && ctx->ctx_owner == task) {
- DBprintk(("trying for owner [%d] in [%d]\n", task->pid, p->pid));
- /*
- * the spinlock is required to take care of a race condition
- * with the send_sig_info() call. We must make sure that
- * either the send_sig_info() completes using a valid task,
- * or the notify_task is cleared before the send_sig_info()
- * can pick up a stale value. Note that by the time this
- * function is executed the 'task' is already detached from the
- * tasklist. The problem is that the notifiers have a direct
- * pointer to it. It is okay to send a signal to a task in this
- * stage, it simply will have no effect. But it is better than sending
- * to a completely destroyed task or worse to a new task using the same
- * task_struct address.
- */
- LOCK_CTX(ctx);
-
- ctx->ctx_owner = NULL;
-
- UNLOCK_CTX(ctx);
-
- DBprintk(("done for notifier [%d] in [%d]\n", task->pid, p->pid));
- }
- } while_each_thread(g, p);
-
- read_unlock(&tasklist_lock);
-
- atomic_set(&task->thread.pfm_owners_check, 0);
-}
+ SET_PMU_OWNER(NULL, NULL);
+ /*
+ * read current overflow status:
+ *
+ * we are guaranteed to read the final stable state
+ */
+ ia64_srlz_d();
+ pmc0 = ia64_get_pmc(0); /* slow */
-/*
- * function called from release_thread to make sure that the ctx_notify_task is not pointing
- * to an unexisting task
- */
-void
-pfm_cleanup_notifiers(struct task_struct *task)
-{
- struct task_struct *g, *p;
- pfm_context_t *ctx;
+ /*
+ * reset freeze bit, overflow status information destroyed
+ */
+ pfm_unfreeze_pmu();
+ } else {
+ pmc0 = task->thread.pmcs[0];
+ /*
+ * clear whatever overflow status bits there were
+ */
+ task->thread.pmcs[0] &= ~0x1;
+ }
- DBprintk(("called by [%d] for [%d]\n", current->pid, task->pid));
+ /*
+ * we save all the used pmds
+ * we take care of overflows for counting PMDs
+ *
+ * XXX: sampling situation is not taken into account here
+ */
+ mask2 = ctx->ctx_used_pmds[0];
+ for (i = 0; mask2; i++, mask2>>=1) {
- read_lock(&tasklist_lock);
+ /* skip non used pmds */
+ if ((mask2 & 0x1) == 0) continue;
- do_each_thread(g, p) {
/*
- * It is safe to do the 2-step test here, because thread.ctx is cleaned up
- * only in release_thread() and at that point the task has been detached
- * from the tasklist which is an operation which uses the write_lock() on
- * the tasklist_lock so it cannot run concurrently to this loop. So we
- * have the guarantee that if we find p and it has a perfmon ctx then it
- * is going to stay like this for the entire execution of this loop.
+ * can access PMU always true in system wide mode
*/
- ctx = p->thread.pfm_context;
+ val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : task->thread.pmds[i];
- //DBprintk(("[%d] scanning task [%d] ctx=%p\n", task->pid, p->pid, ctx));
+ if (PMD_IS_COUNTING(i)) {
+ DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
+ task->pid,
+ i,
+ ctx->ctx_pmds[i].val,
+ val & pmu_conf.ovfl_val));
- if (ctx && ctx->ctx_notify_task == task) {
- DBprintk(("trying for notifier [%d] in [%d]\n", task->pid, p->pid));
/*
- * the spinlock is required to take care of a race condition
- * with the send_sig_info() call. We must make sure that
- * either the send_sig_info() completes using a valid task,
- * or the notify_task is cleared before the send_sig_info()
- * can pick up a stale value. Note that by the time this
- * function is executed the 'task' is already detached from the
- * tasklist. The problem is that the notifiers have a direct
- * pointer to it. It is okay to send a signal to a task in this
- * stage, it simply will have no effect. But it is better than sending
- * to a completely destroyed task or worse to a new task using the same
- * task_struct address.
+ * we rebuild the full 64 bit value of the counter
*/
- LOCK_CTX(ctx);
-
- ctx->ctx_notify_task = NULL;
+ val = ctx->ctx_pmds[i].val + (val & pmu_conf.ovfl_val);
- UNLOCK_CTX(ctx);
+ /*
+ * now everything is in ctx_pmds[] and we need
+ * to clear the saved context from save_regs() such that
+ * pfm_read_pmds() gets the correct value
+ */
+ pmd_val = 0UL;
- DBprintk(("done for notifier [%d] in [%d]\n", task->pid, p->pid));
+ /*
+ * take care of overflow inline
+ */
+ if (pmc0 & (1UL << i)) {
+ val += 1 + pmu_conf.ovfl_val;
+ DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i));
+ }
}
- } while_each_thread(g, p);
- read_unlock(&tasklist_lock);
+ DPRINT(("[%d] is_self=%d ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, is_self, i, val, pmd_val));
- atomic_set(&task->thread.pfm_notifiers_check, 0);
+ if (is_self) task->thread.pmds[i] = pmd_val;
+ ctx->ctx_pmds[i].val = val;
+ }
}
static struct irqaction perfmon_irqaction = {
- .handler = pfm_interrupt_handler,
- .flags = SA_INTERRUPT,
- .name = "perfmon"
+ .handler = pfm_interrupt_handler,
+ .flags = SA_INTERRUPT,
+ .name = "perfmon"
};
-int
-pfm_install_alternate_syswide_subsystem(pfm_intr_handler_desc_t *hdl)
-{
- int ret;
-
-
- /* some sanity checks */
- if (hdl == NULL || hdl->handler == NULL) {
- return -EINVAL;
- }
-
- /* do the easy test first */
- if (pfm_alternate_intr_handler) {
- return -EBUSY;
- }
-
- preempt_disable();
- /* reserve our session */
- ret = pfm_reserve_session(NULL, 1, cpu_online_map);
- if (ret) {
- preempt_enable();
- return ret;
- }
-
- if (pfm_alternate_intr_handler) {
- preempt_enable();
- printk(KERN_DEBUG "perfmon: install_alternate, intr_handler not NULL "
- "after reserve\n");
- return -EINVAL;
- }
-
- pfm_alternate_intr_handler = hdl;
-
- preempt_enable();
- return 0;
-}
-
-int
-pfm_remove_alternate_syswide_subsystem(pfm_intr_handler_desc_t *hdl)
-{
- if (hdl == NULL)
- return -EINVAL;
-
- /* cannot remove someone else's handler! */
- if (pfm_alternate_intr_handler != hdl)
- return -EINVAL;
-
- preempt_disable();
- pfm_alternate_intr_handler = NULL;
-
- /*
- * XXX: assume cpu_online_map has not changed since reservation
- */
- pfm_unreserve_session(NULL, 1, cpu_online_map);
-
- preempt_enable();
-
- return 0;
-}
-
/*
* perfmon initialization routine, called from the initcall() table
*/
+static int init_pfm_fs(void);
+
int __init
pfm_init(void)
{
unsigned int n, n_counters, i;
- pmu_conf.disabled = 1;
+ printk("perfmon: version %u.%u IRQ %u\n",
+ PFM_VERSION_MAJ,
+ PFM_VERSION_MIN,
+ IA64_PERFMON_VECTOR);
- printk(KERN_INFO "perfmon: version %u.%u IRQ %u\n", PFM_VERSION_MAJ, PFM_VERSION_MIN,
- IA64_PERFMON_VECTOR);
+ /*
+ * PMU type sanity check
+ * XXX: maybe better to implement autodetection (but then we have a larger kernel)
+ */
+ if (local_cpu_data->family != pmu_conf.pmu_family) {
+ printk(KERN_INFO "perfmon: disabled, kernel only supports %s PMU family\n", pmu_conf.pmu_name);
+ return -ENODEV;
+ }
/*
* compute the number of implemented PMD/PMC from the
pmu_conf.num_pmds = n;
pmu_conf.num_counters = n_counters;
- printk(KERN_INFO "perfmon: %u PMCs, %u PMDs, %u counters (%lu bits)\n",
+ /*
+ * sanity checks on the number of debug registers
+ */
+ if (pmu_conf.use_rr_dbregs) {
+ if (pmu_conf.num_ibrs > IA64_NUM_DBG_REGS) {
+ printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf.num_ibrs);
+ return -1;
+ }
+ if (pmu_conf.num_dbrs > IA64_NUM_DBG_REGS) {
+ printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf.num_ibrs);
+ return -1;
+ }
+ }
+
+ printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
+ pmu_conf.pmu_name,
pmu_conf.num_pmcs,
pmu_conf.num_pmds,
pmu_conf.num_counters,
}
/*
- * for now here for debug purposes
+ * create /proc/perfmon (mostly for debugging purposes)
*/
perfmon_dir = create_proc_read_entry ("perfmon", 0, 0, perfmon_read_entry, NULL);
if (perfmon_dir == NULL) {
}
/*
- * create /proc/perfmon
+ * create /proc/sys/kernel/perfmon (for debugging purposes)
*/
pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root, 0);
* initialize all our spinlocks
*/
spin_lock_init(&pfm_sessions.pfs_lock);
+ spin_lock_init(&pfm_smpl_fmt_lock);
+
+ init_pfm_fs();
+
+ for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
/* we are all set */
- pmu_conf.disabled = 0;
+ pmu_conf.enabled = 1;
return 0;
}
+
__initcall(pfm_init);
void
-pfm_init_percpu(void)
+pfm_init_percpu (void)
{
int i;
- int me = get_cpu();
- if (me == 0)
+ /*
+ * make sure no measurement is active
+ * (may inherit programmed PMCs from EFI).
+ */
+ pfm_clear_psr_pp();
+ pfm_clear_psr_up();
+
+
+ if (smp_processor_id() == 0)
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ia64_set_pmv(IA64_PERFMON_VECTOR);
*
* At this point, pmu_conf has not yet been initialized
*
- * On McKinley, this code is ineffective until PMC4 is initialized.
+ * On McKinley, this code is ineffective until PMC4 is initialized
+ * but that's all right because we take care of pmc0 later.
+ *
+ * XXX: potential problems with pmc1.
*/
for (i=1; PMC_IS_LAST(i) == 0; i++) {
if (PMC_IS_IMPL(i) == 0) continue;
ia64_set_pmc(i, PMC_DFL_VAL(i));
}
- for (i=0; PMD_IS_LAST(i); i++) {
+ for (i=0; PMD_IS_LAST(i) == 0; i++) {
if (PMD_IS_IMPL(i) == 0) continue;
ia64_set_pmd(i, 0UL);
}
- put_cpu();
- pfm_freeze_pmu();
+
+ /*
+ * we run with the PMU not frozen at all times
+ */
+ pfm_unfreeze_pmu();
+}
+
+/*
+ * used for debug purposes only
+ */
+void
+dump_pmu_state(void)
+{
+ struct task_struct *task;
+ struct thread_struct *t;
+ pfm_context_t *ctx;
+ unsigned long psr;
+ int i;
+
+ printk("current [%d] %s\n", current->pid, current->comm);
+
+ task = GET_PMU_OWNER();
+ ctx = GET_PMU_CTX();
+
+ printk("owner [%d] ctx=%p\n", task ? task->pid : -1, ctx);
+
+ psr = pfm_get_psr();
+
+ printk("psr.pp=%ld psr.up=%ld\n", (psr >> IA64_PSR_PP_BIT) &0x1UL, (psr >> IA64_PSR_PP_BIT)&0x1UL);
+
+ t = ¤t->thread;
+
+ for (i=1; PMC_IS_LAST(i) == 0; i++) {
+ if (PMC_IS_IMPL(i) == 0) continue;
+ printk("pmc[%d]=0x%lx tpmc=0x%lx\n", i, ia64_get_pmc(i), t->pmcs[i]);
+ }
+
+ for (i=1; PMD_IS_LAST(i) == 0; i++) {
+ if (PMD_IS_IMPL(i) == 0) continue;
+ printk("pmd[%d]=0x%lx tpmd=0x%lx\n", i, ia64_get_pmd(i), t->pmds[i]);
+ }
+ if (ctx) {
+ printk("ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr=0x%lx\n",
+ ctx->ctx_state,
+ ctx->ctx_smpl_vaddr,
+ ctx->ctx_smpl_hdr,
+ ctx->ctx_msgq_head,
+ ctx->ctx_msgq_tail,
+ ctx->ctx_saved_psr);
+ }
}
-#else /* !CONFIG_PERFMON */
+/*
+ * called from process.c:copy_thread(). task is new child.
+ */
+void
+pfm_inherit(struct task_struct *task, struct pt_regs *regs)
+{
+ struct thread_struct *thread;
+
+ DPRINT(("perfmon: pfm_inherit clearing state for [%d] current [%d]\n", task->pid, current->pid));
+
+ thread = &task->thread;
+
+ /*
+ * cut links inherited from parent (current)
+ */
+ thread->pfm_context = NULL;
+
+ PFM_SET_WORK_PENDING(task, 0);
+ /*
+ * restore default psr settings
+ */
+ ia64_psr(regs)->pp = ia64_psr(regs)->up = 0;
+ ia64_psr(regs)->sp = 1;
+}
+#else /* !CONFIG_PERFMON */
asmlinkage long
-sys_perfmonctl (int pid, int cmd, void *req, int count, long arg5, long arg6,
- long arg7, long arg8, long stack)
+sys_perfmonctl (int fd, int cmd, void *arg, int count, long arg5, long arg6, long arg7,
+ long arg8, long stack)
{
return -ENOSYS;
}
-
-#endif /* !CONFIG_PERFMON */
+#endif /* CONFIG_PERFMON */
--- /dev/null
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * This file implements the default sampling buffer format
+ * for the Linux/ia64 perfmon-2 subsystem.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#include <asm/delay.h>
+#include <linux/smp.h>
+
+#include <asm/perfmon.h>
+#include <asm/perfmon_default_smpl.h>
+
+MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
+MODULE_DESCRIPTION("perfmon default sampling format");
+MODULE_LICENSE("GPL");
+
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "debug");
+
+MODULE_PARM(debug_ovfl, "i");
+MODULE_PARM_DESC(debug_ovfl, "debug ovfl");
+
+
+#define DEFAULT_DEBUG 1
+
+#ifdef DEFAULT_DEBUG
+#define DPRINT(a) \
+ do { \
+ if (unlikely(debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
+ } while (0)
+
+#define DPRINT_ovfl(a) \
+ do { \
+ if (unlikely(debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
+ } while (0)
+
+#else
+#define DPRINT(a)
+#define DPRINT_ovfl(a)
+#endif
+
+static int debug, debug_ovfl;
+
+static int
+default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data)
+{
+ pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t*)data;
+ int ret = 0;
+
+ if (data == NULL) {
+ DPRINT(("[%d] no argument passed\n", task->pid));
+ return -EINVAL;
+ }
+
+ DPRINT(("[%d] validate flags=0x%x CPU%d\n", task->pid, flags, cpu));
+
+ /*
+ * must hold at least the buffer header + one minimally sized entry
+ */
+ if (arg->buf_size < PFM_DEFAULT_SMPL_MIN_BUF_SIZE) return -EINVAL;
+
+ DPRINT(("buf_size=%lu\n", arg->buf_size));
+
+ return ret;
+}
+
+static int
+default_get_size(struct task_struct *task, unsigned int flags, int cpu, void *data, unsigned long *size)
+{
+ pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t *)data;
+
+ /*
+ * size has been validated in default_validate
+ */
+ *size = arg->buf_size;
+
+ return 0;
+}
+
+static int
+default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *data)
+{
+ pfm_default_smpl_hdr_t *hdr;
+ pfm_default_smpl_arg_t *arg = (pfm_default_smpl_arg_t *)data;
+
+ hdr = (pfm_default_smpl_hdr_t *)buf;
+
+ hdr->hdr_version = PFM_DEFAULT_SMPL_VERSION;
+ hdr->hdr_buf_size = arg->buf_size;
+ hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr);
+ hdr->hdr_last_pos = (void *)((unsigned long)buf)+arg->buf_size;
+ hdr->hdr_overflows = 0UL;
+ hdr->hdr_count = 0UL;
+
+ DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u\n",
+ task->pid,
+ buf,
+ hdr->hdr_buf_size,
+ sizeof(*hdr),
+ hdr->hdr_version));
+
+ return 0;
+}
+
+static int
+default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs)
+{
+ pfm_default_smpl_hdr_t *hdr;
+ pfm_default_smpl_entry_t *ent;
+ void *cur, *last;
+ unsigned long *e;
+ unsigned long ovfl_mask;
+ unsigned long ovfl_notify;
+ unsigned long stamp;
+ unsigned int npmds, i;
+
+ /*
+ * some time stamp
+ */
+ stamp = ia64_get_itc();
+
+ if (unlikely(buf == NULL || arg == NULL|| regs == NULL || task == NULL)) {
+ DPRINT(("[%d] invalid arguments buf=%p arg=%p\n", task->pid, buf, arg));
+ return -EINVAL;
+ }
+
+ hdr = (pfm_default_smpl_hdr_t *)buf;
+ cur = hdr->hdr_cur_pos;
+ last = hdr->hdr_last_pos;
+ ovfl_mask = arg->ovfl_pmds[0];
+ ovfl_notify = arg->ovfl_notify[0];
+
+ /*
+ * check for space against largest possibly entry.
+ * We may waste space at the end of the buffer.
+ */
+ if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full;
+
+ npmds = hweight64(arg->smpl_pmds[0]);
+
+ ent = (pfm_default_smpl_entry_t *)cur;
+
+ prefetch(arg->smpl_pmds_values);
+
+ /* position for first pmd */
+ e = (unsigned long *)(ent+1);
+
+ hdr->hdr_count++;
+
+ DPRINT_ovfl(("[%d] count=%lu cur=%p last=%p free_bytes=%lu ovfl_pmds=0x%lx ovfl_notify=0x%lx npmds=%u\n",
+ task->pid,
+ hdr->hdr_count,
+ cur, last,
+ last-cur,
+ ovfl_mask,
+ ovfl_notify, npmds));
+
+ /*
+ * current = task running at the time of the overflow.
+ *
+ * per-task mode:
+ * - this is ususally the task being monitored.
+ * Under certain conditions, it might be a different task
+ *
+ * system-wide:
+ * - this is not necessarily the task controlling the session
+ */
+ ent->pid = current->pid;
+ ent->cpu = smp_processor_id();
+ ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val;
+
+ /*
+ * where did the fault happen (includes slot number)
+ */
+ ent->ip = regs->cr_iip | ((regs->cr_ipsr >> 41) & 0x3);
+
+ /*
+ * which registers overflowed
+ */
+ ent->ovfl_pmds = ovfl_mask;
+ ent->tstamp = stamp;
+ ent->set = arg->active_set;
+ ent->reserved1 = 0;
+
+ /*
+ * selectively store PMDs in increasing index number
+ */
+ if (npmds) {
+ unsigned long *val = arg->smpl_pmds_values;
+ for(i=0; i < npmds; i++) {
+ *e++ = *val++;
+ }
+ }
+
+ /*
+ * update position for next entry
+ */
+ hdr->hdr_cur_pos = cur + sizeof(*ent) + (npmds << 3);
+
+ /*
+ * keep same ovfl_pmds, ovfl_notify
+ */
+ arg->ovfl_ctrl.notify_user = 0;
+ arg->ovfl_ctrl.block = 0;
+ arg->ovfl_ctrl.stop_monitoring = 0;
+ arg->ovfl_ctrl.reset_pmds = 1;
+
+ return 0;
+full:
+ DPRINT_ovfl(("sampling buffer full free=%lu, count=%lu, ovfl_notify=0x%lx\n", last-cur, hdr->hdr_count, ovfl_notify));
+
+ /*
+ * increment number of buffer overflow.
+ * important to detect duplicate set of samples.
+ */
+ hdr->hdr_overflows++;
+
+ /*
+ * if no notification is needed, then we just reset the buffer index.
+ */
+ if (ovfl_notify == 0UL) {
+ hdr->hdr_count = 0UL;
+ arg->ovfl_ctrl.notify_user = 0;
+ arg->ovfl_ctrl.block = 0;
+ arg->ovfl_ctrl.stop_monitoring = 0;
+ arg->ovfl_ctrl.reset_pmds = 1;
+ } else {
+ /* keep same ovfl_pmds, ovfl_notify */
+ arg->ovfl_ctrl.notify_user = 1;
+ arg->ovfl_ctrl.block = 1;
+ arg->ovfl_ctrl.stop_monitoring = 1;
+ arg->ovfl_ctrl.reset_pmds = 0;
+ }
+ return 0;
+}
+
+static int
+default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
+{
+ pfm_default_smpl_hdr_t *hdr;
+
+ hdr = (pfm_default_smpl_hdr_t *)buf;
+
+ hdr->hdr_count = 0UL;
+ hdr->hdr_cur_pos = (void *)((unsigned long)buf)+sizeof(*hdr);
+
+ ctrl->stop_monitoring = 0;
+ ctrl->reset_pmds = PFM_PMD_LONG_RESET;
+
+ return 0;
+}
+
+static int
+default_exit(struct task_struct *task, void *buf, struct pt_regs *regs)
+{
+ DPRINT(("[%d] exit(%p)\n", task->pid, buf));
+ return 0;
+}
+
+static pfm_buffer_fmt_t default_fmt={
+ .fmt_name = "default_format",
+ .fmt_uuid = PFM_DEFAULT_SMPL_UUID,
+ .fmt_arg_size = sizeof(pfm_default_smpl_arg_t),
+ .fmt_validate = default_validate,
+ .fmt_getsize = default_get_size,
+ .fmt_init = default_init,
+ .fmt_handler = default_handler,
+ .fmt_restart = default_restart,
+ .fmt_exit = default_exit,
+};
+
+static int __init
+pfm_default_smpl_init_module(void)
+{
+ int ret;
+
+ ret = pfm_register_buffer_fmt(&default_fmt);
+ if (ret == 0) {
+ printk("perfmon_default_smpl: %s v%u.%u registered\n",
+ default_fmt.fmt_name,
+ PFM_DEFAULT_SMPL_VERSION_MAJ,
+ PFM_DEFAULT_SMPL_VERSION_MIN);
+ } else {
+ printk("perfmon_default_smpl: %s cannot register ret=%d\n",
+ default_fmt.fmt_name,
+ ret);
+ }
+
+ return ret;
+}
+
+static void __exit
+pfm_default_smpl_cleanup_module(void)
+{
+ int ret;
+ ret = pfm_unregister_buffer_fmt(default_fmt.fmt_uuid);
+
+ printk("perfmon_default_smpl: unregister %s=%d\n", default_fmt.fmt_name, ret);
+}
+
+module_init(pfm_default_smpl_init_module);
+module_exit(pfm_default_smpl_cleanup_module);
+
/*
- * This file contains the architected PMU register description tables
+ * This file contains the generic PMU register description tables
* and pmc checker used by perfmon.c.
*
- * Copyright (C) 2002 Hewlett Packard Co
+ * Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
+
+
#define RDEP(x) (1UL<<(x))
#if defined(CONFIG_ITANIUM) || defined (CONFIG_MCKINLEY)
#error "This file should not be used when CONFIG_ITANIUM or CONFIG_MCKINLEY is defined"
#endif
-static pfm_reg_desc_t pmc_gen_desc[PMU_MAX_PMCS]={
+static pfm_reg_desc_t pfm_gen_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
-static pfm_reg_desc_t pmd_gen_desc[PMU_MAX_PMDS]={
+static pfm_reg_desc_t pfm_gen_pmd_desc[PMU_MAX_PMDS]={
/* pmd0 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd1 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd2 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
- .disabled = 1,
- .ovfl_val = (1UL << 32) - 1,
- .num_ibrs = 8,
- .num_dbrs = 8,
- .pmd_desc = pfm_gen_pmd_desc,
- .pmc_desc = pfm_gen_pmc_desc
+ .pmu_name = "Generic",
+ .pmu_family = 0xff, /* any */
+ .enabled = 0,
+ .ovfl_val = (1UL << 32) - 1,
+ .num_ibrs = 0, /* does not use */
+ .num_dbrs = 0, /* does not use */
+ .pmd_desc = pfm_gen_pmd_desc,
+ .pmc_desc = pfm_gen_pmc_desc
};
+
* This file contains the Itanium PMU register description tables
* and pmc checker used by perfmon.c.
*
- * Copyright (C) 2002 Hewlett Packard Co
+ * Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#error "This file is only valid when CONFIG_ITANIUM is defined"
#endif
-static int pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs);
+static int pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
+static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pfm_ita_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
- .disabled = 1,
- .ovfl_val = (1UL << 32) - 1,
- .num_ibrs = 8,
- .num_dbrs = 8,
- .pmd_desc = pfm_ita_pmd_desc,
- .pmc_desc = pfm_ita_pmc_desc
+ .pmu_name = "Itanium",
+ .pmu_family = 0x7,
+ .enabled = 0,
+ .ovfl_val = (1UL << 32) - 1,
+ .pmd_desc = pfm_ita_pmd_desc,
+ .pmc_desc = pfm_ita_pmc_desc,
+ .num_ibrs = 8,
+ .num_dbrs = 8,
+ .use_rr_dbregs = 1 /* debug register are use for range retrictions */
};
-
static int
-pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
+pfm_ita_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
{
- pfm_context_t *ctx = task->thread.pfm_context;
int ret;
/*
* we must clear the (instruction) debug registers if pmc13.ta bit is cleared
- * before they are written (fl_using_dbreg==0) to avoid picking up stale information.
+ * before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 13 && ((*val & 0x1) == 0UL) && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
- if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
+ if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
- /*
+ /*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
- ret = pfm_write_ibr_dbr(1, task, NULL, 0, regs);
+ ret = pfm_write_ibr_dbr(1, ctx, NULL, 0, regs);
if (ret) return ret;
}
/*
* we must clear the (data) debug registers if pmc11.pt bit is cleared
- * before they are written (fl_using_dbreg==0) to avoid picking up stale information.
+ * before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 11 && ((*val >> 28)& 0x1) == 0 && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
- if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
+ if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
- /*
+ /*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
- ret = pfm_write_ibr_dbr(0, task, NULL, 0, regs);
+ ret = pfm_write_ibr_dbr(0, ctx, NULL, 0, regs);
if (ret) return ret;
}
return 0;
* This file contains the McKinley PMU register description tables
* and pmc checker used by perfmon.c.
*
- * Copyright (C) 2002 Hewlett Packard Co
+ * Copyright (C) 2002-2003 Hewlett Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
#error "This file is only valid when CONFIG_MCKINLEY is defined"
#endif
-static int pfm_mck_reserved(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-static int pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
-static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs);
+static int pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
+static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 6, 0x0000000000800000UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc5 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_reserved, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc6 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_reserved, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc7 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_reserved, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc8 */ { PFM_REG_CONFIG , 0, 0xffffffff3fffffffUL, 0xffffffff3fffffffUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc9 */ { PFM_REG_CONFIG , 0, 0xffffffff3ffffffcUL, 0xffffffff3fffffffUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc10 */ { PFM_REG_MONITOR , 4, 0x0UL, 0xffffUL, NULL, pfm_mck_reserved, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc11 */ { PFM_REG_MONITOR , 6, 0x0UL, 0x30f01cf, NULL, pfm_mck_reserved, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, 0xffffUL, NULL, pfm_mck_reserved, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
+/* pmc5 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
+/* pmc6 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
+/* pmc7 */ { PFM_REG_COUNTING, 6, 0x0UL, 0xfffff7fUL, NULL, pfm_mck_pmc_check, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
+/* pmc8 */ { PFM_REG_CONFIG , 0, 0xffffffff3fffffffUL, 0xffffffff3ffffffbUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
+/* pmc9 */ { PFM_REG_CONFIG , 0, 0xffffffff3ffffffcUL, 0xffffffff3ffffffbUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
+/* pmc10 */ { PFM_REG_MONITOR , 4, 0x0UL, 0xffffUL, NULL, pfm_mck_pmc_check, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
+/* pmc11 */ { PFM_REG_MONITOR , 6, 0x0UL, 0x30f01cf, NULL, pfm_mck_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
+/* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, 0xffffUL, NULL, pfm_mck_pmc_check, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_CONFIG , 0, 0x00002078fefefefeUL, 0x1e00018181818UL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc14 */ { PFM_REG_CONFIG , 0, 0x0db60db60db60db6UL, 0x2492UL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
-/* pmc15 */ { PFM_REG_CONFIG , 0, 0x00000000fffffff0UL, 0xfUL, NULL, pfm_mck_reserved, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
+/* pmc15 */ { PFM_REG_CONFIG , 0, 0x00000000fffffff0UL, 0xfUL, NULL, pfm_mck_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
* impl_pmcs, impl_pmds are computed at runtime to minimize errors!
*/
static pmu_config_t pmu_conf={
- .disabled = 1,
- .ovfl_val = (1UL << 47) - 1,
- .num_ibrs = 8,
- .num_dbrs = 8,
- .pmd_desc = pfm_mck_pmd_desc,
- .pmc_desc = pfm_mck_pmc_desc
+ .pmu_name = "Itanium 2",
+ .pmu_family = 0x1f,
+ .enabled = 0,
+ .ovfl_val = (1UL << 47) - 1,
+ .pmd_desc = pfm_mck_pmd_desc,
+ .pmc_desc = pfm_mck_pmc_desc,
+ .num_ibrs = 8,
+ .num_dbrs = 8,
+ .use_rr_dbregs = 1 /* debug register are use for range retrictions */
};
-
/*
* PMC reserved fields must have their power-up values preserved
*/
static int
-pfm_mck_reserved(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
+pfm_mck_reserved(unsigned int cnum, unsigned long *val, struct pt_regs *regs)
{
unsigned long tmp1, tmp2, ival = *val;
*val = tmp1 | tmp2;
- DBprintk(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
- cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
+ DPRINT(("pmc[%d]=0x%lx, mask=0x%lx, reset=0x%lx, val=0x%lx\n",
+ cnum, ival, PMC_RSVD_MASK(cnum), PMC_DFL_VAL(cnum), *val));
return 0;
}
+/*
+ * task can be NULL if the context is unloaded
+ */
static int
-pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
+pfm_mck_pmc_check(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs)
{
- struct thread_struct *th = &task->thread;
- pfm_context_t *ctx = task->thread.pfm_context;
int ret = 0, check_case1 = 0;
unsigned long val8 = 0, val14 = 0, val13 = 0;
/* first preserve the reserved fields */
- pfm_mck_reserved(task, cnum, val, regs);
+ pfm_mck_reserved(cnum, val, regs);
+
+ /* sanitfy check */
+ if (ctx == NULL) return -EINVAL;
/*
- * we must clear the debug registers if any pmc13.ena_dbrpX bit is enabled
- * before they are written (fl_using_dbreg==0) to avoid picking up stale information.
+ * we must clear the debug registers if any pmc13.ena_dbrpX bit is enabled
+ * before they are written (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 13 && (*val & (0xfUL << 45)) && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
- if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
+ if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
- /*
+ /*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
- ret = pfm_write_ibr_dbr(1, task, NULL, 0, regs);
+ ret = pfm_write_ibr_dbr(1, ctx, NULL, 0, regs);
if (ret) return ret;
}
- /*
- * we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
- * before they are (fl_using_dbreg==0) to avoid picking up stale information.
+ /*
+ * we must clear the (instruction) debug registers if any pmc14.ibrpX bit is enabled
+ * before they are (fl_using_dbreg==0) to avoid picking up stale information.
*/
if (cnum == 14 && ((*val & 0x2222) != 0x2222) && ctx->ctx_fl_using_dbreg == 0) {
/* don't mix debug with perfmon */
- if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
+ if (task && (task->thread.flags & IA64_THREAD_DBG_VALID) != 0) return -EINVAL;
- /*
+ /*
* a count of 0 will mark the debug registers as in use and also
* ensure that they are properly cleared.
*/
- ret = pfm_write_ibr_dbr(0, task, NULL, 0, regs);
+ ret = pfm_write_ibr_dbr(0, ctx, NULL, 0, regs);
if (ret) return ret;
}
case 4: *val |= 1UL << 23; /* force power enable bit */
break;
case 8: val8 = *val;
- val13 = th->pmc[13];
- val14 = th->pmc[14];
+ val13 = ctx->ctx_pmcs[13];
+ val14 = ctx->ctx_pmcs[14];
check_case1 = 1;
break;
- case 13: val8 = th->pmc[8];
+ case 13: val8 = ctx->ctx_pmcs[8];
val13 = *val;
- val14 = th->pmc[14];
+ val14 = ctx->ctx_pmcs[14];
check_case1 = 1;
break;
- case 14: val8 = th->pmc[13];
- val13 = th->pmc[13];
+ case 14: val8 = ctx->ctx_pmcs[13];
+ val13 = ctx->ctx_pmcs[13];
val14 = *val;
check_case1 = 1;
break;
&& ((((val14>>1) & 0x3) == 0x2 || ((val14>>1) & 0x3) == 0x0)
||(((val14>>4) & 0x3) == 0x2 || ((val14>>4) & 0x3) == 0x0));
- if (ret) printk(KERN_DEBUG "perfmon: failure check_case1\n");
+ if (ret) printk("perfmon: failure check_case1\n");
}
return ret ? -EINVAL : 0;
#include <asm/delay.h>
#include <asm/elf.h>
-#include <asm/perfmon.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
#include <asm/unwind.h>
#include <asm/user.h>
-#ifdef CONFIG_IA64_SGI_SN
-#include <asm/sn/idle.h>
-#endif
-
#ifdef CONFIG_PERFMON
# include <asm/perfmon.h>
#endif
#include "sigframe.h"
+void (*ia64_mark_idle)(int);
+
+
void
ia64_do_show_stack (struct unw_frame_info *info, void *arg)
{
unsigned long ip, sp, bsp;
- char buf[80]; /* don't make it so big that it overflows the stack! */
+ char buf[128]; /* don't make it so big that it overflows the stack! */
printk("\nCall Trace:\n");
do {
unw_get_sp(info, &sp);
unw_get_bsp(info, &bsp);
- snprintf(buf, sizeof(buf), " [<%016lx>] %%s\n\t\t\t\tsp=%016lx bsp=%016lx\n",
+ snprintf(buf, sizeof(buf),
+ " [<%016lx>] %%s\n"
+ " sp=%016lx bsp=%016lx\n",
ip, sp, bsp);
print_symbol(buf, ip);
} while (unw_unwind(info) >= 0);
}
void
-show_trace_task (struct task_struct *task)
-{
- show_stack(task);
-}
-
-void
-show_stack (struct task_struct *task)
+show_stack (struct task_struct *task, unsigned long *sp)
{
if (!task)
unw_init_running(ia64_do_show_stack, 0);
void
dump_stack (void)
{
- show_stack(NULL);
+ show_stack(NULL, NULL);
}
void
regs->ar_rnat, regs->ar_bspstore, regs->pr);
printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
+ printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7);
printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
regs->f6.u.bits[1], regs->f6.u.bits[0],
printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
regs->f8.u.bits[1], regs->f8.u.bits[0],
regs->f9.u.bits[1], regs->f9.u.bits[0]);
+ printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
+ regs->f10.u.bits[1], regs->f10.u.bits[0],
+ regs->f11.u.bits[1], regs->f11.u.bits[0]);
printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3);
printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10);
((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
}
} else
- show_stack(NULL);
+ show_stack(NULL, NULL);
}
void
do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
{
-#ifdef CONFIG_FSYS
if (fsys_mode(current, &scr->pt)) {
/* defer signal-handling etc. until we return to privilege-level 0. */
if (!ia64_psr(&scr->pt)->lp)
ia64_psr(&scr->pt)->lp = 1;
return;
}
-#endif
#ifdef CONFIG_PERFMON
- if (current->thread.pfm_ovfl_block_reset)
- pfm_ovfl_block_reset();
+ if (current->thread.pfm_needs_checking)
+ pfm_handle_work();
#endif
/* deal with pending signal delivery */
void __attribute__((noreturn))
cpu_idle (void *unused)
{
+ void (*mark_idle)(int) = ia64_mark_idle;
+
/* endless idle loop with no priority at all */
while (1) {
void (*idle)(void) = pm_idle;
#endif
while (!need_resched()) {
-#ifdef CONFIG_IA64_SGI_SN
- snidle();
-#endif
+ if (mark_idle)
+ (*mark_idle)(1);
(*idle)();
}
-#ifdef CONFIG_IA64_SGI_SN
- snidleoff();
-#endif
+ if (mark_idle)
+ (*mark_idle)(0);
#ifdef CONFIG_SMP
normal_xtp();
# define THREAD_FLAGS_TO_SET 0
p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
| THREAD_FLAGS_TO_SET);
- p->thread.last_fph_cpu = -1;
+ ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
#ifdef CONFIG_IA32_SUPPORT
/*
* If we're cloning an IA32 task then save the IA32 extra
#endif
#ifdef CONFIG_PERFMON
- /*
- * reset notifiers and owner check (may not have a perfmon context)
- */
- atomic_set(&p->thread.pfm_notifiers_check, 0);
- atomic_set(&p->thread.pfm_owners_check, 0);
- /* clear list of sampling buffer to free for new task */
- p->thread.pfm_smpl_buf_list = NULL;
-
if (current->thread.pfm_context)
- retval = pfm_inherit(p, child_ptregs);
+ pfm_inherit(p, child_ptregs);
#endif
return retval;
}
dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
unw_get_ar(info, UNW_AR_LC, &dst[53]);
unw_get_ar(info, UNW_AR_EC, &dst[54]);
+ unw_get_ar(info, UNW_AR_CSD, &dst[55]);
+ unw_get_ar(info, UNW_AR_SSD, &dst[56]);
}
void
kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
{
struct task_struct *parent = current;
- int result, tid;
+ int result;
+ pid_t tid;
tid = clone(flags | CLONE_VM | CLONE_UNTRACED, 0);
if (parent != current) {
{
/* drop floating-point and debug-register state if it exists: */
current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
-
-#ifndef CONFIG_SMP
- if (ia64_get_fpu_owner() == current)
- ia64_set_fpu_owner(0);
-#endif
+ ia64_drop_fpu(current);
}
-#ifdef CONFIG_PERFMON
-/*
- * by the time we get here, the task is detached from the tasklist. This is important
- * because it means that no other tasks can ever find it as a notified task, therfore there
- * is no race condition between this code and let's say a pfm_context_create().
- * Conversely, the pfm_cleanup_notifiers() cannot try to access a task's pfm context if this
- * other task is in the middle of its own pfm_context_exit() because it would already be out of
- * the task list. Note that this case is very unlikely between a direct child and its parents
- * (if it is the notified process) because of the way the exit is notified via SIGCHLD.
- */
-
-void
-release_thread (struct task_struct *task)
-{
- if (task->thread.pfm_context)
- pfm_context_exit(task);
-
- if (atomic_read(&task->thread.pfm_notifiers_check) > 0)
- pfm_cleanup_notifiers(task);
-
- if (atomic_read(&task->thread.pfm_owners_check) > 0)
- pfm_cleanup_owners(task);
-
- if (task->thread.pfm_smpl_buf_list)
- pfm_cleanup_smpl_buf(task);
-}
-#endif
-
/*
* Clean up state associated with current thread. This is called when
* the thread calls exit().
void
exit_thread (void)
{
-#ifndef CONFIG_SMP
- if (ia64_get_fpu_owner() == current)
- ia64_set_fpu_owner(0);
-#endif
+ ia64_drop_fpu(current);
#ifdef CONFIG_PERFMON
/* if needed, stop monitoring and flush state to perfmon context */
- if (current->thread.pfm_context)
- pfm_flush_regs(current);
+ if (current->thread.pfm_context)
+ pfm_exit_thread(current);
/* free debug register resources */
if (current->thread.flags & IA64_THREAD_DBG_VALID)
pm_power_off();
machine_halt();
}
-
-void __init
-init_task_struct_cache (void)
-{
-}
-
-struct task_struct *
-dup_task_struct(struct task_struct *orig)
-{
- struct task_struct *tsk;
-
- tsk = (void *) __get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER);
- if (!tsk)
- return NULL;
-
- memcpy(tsk, orig, sizeof(struct task_struct) + sizeof(struct thread_info));
- tsk->thread_info = (struct thread_info *) ((char *) tsk + IA64_TASK_SIZE);
- atomic_set(&tsk->usage, 2);
- return tsk;
-}
-
-void
-free_task_struct (struct task_struct *tsk)
-{
- free_pages((unsigned long) tsk, KERNEL_STACK_SIZE_ORDER);
-}
*/
static unsigned long
get_rnat (struct pt_regs *pt, struct switch_stack *sw,
- unsigned long *krbs, unsigned long *urnat_addr)
+ unsigned long *krbs, unsigned long *urnat_addr, unsigned long *urbs_end)
{
- unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, umask = 0UL;
+ unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, umask = 0, mask, m;
unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
- long num_regs;
+ long num_regs, nbits;
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
+
+ if (urbs_end < urnat_addr)
+ nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
+ else
+ nbits = 63;
+ mask = (1UL << nbits) - 1;
/*
* First, figure out which bit number slot 0 in user-land maps to in the kernel
* rnat. Do this by figuring out how many register slots we're beyond the user's
if (ubspstore + 63 > urnat_addr) {
/* some bits need to be merged in from pt->ar_rnat */
- umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1);
+ umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1) & mask;
urnat = (pt->ar_rnat & umask);
+ mask &= ~umask;
+ if (!mask)
+ return urnat;
}
- if (rnat0_kaddr >= kbsp) {
+
+ m = mask << shift;
+ if (rnat0_kaddr >= kbsp)
rnat0 = sw->ar_rnat;
- } else if (rnat0_kaddr > krbs) {
+ else if (rnat0_kaddr > krbs)
rnat0 = *rnat0_kaddr;
- }
- if (rnat1_kaddr >= kbsp) {
+ urnat |= (rnat0 & m) >> shift;
+
+ m = mask >> (63 - shift);
+ if (rnat1_kaddr >= kbsp)
rnat1 = sw->ar_rnat;
- } else if (rnat1_kaddr > krbs) {
+ else if (rnat1_kaddr > krbs)
rnat1 = *rnat1_kaddr;
- }
- urnat |= ((rnat1 << (63 - shift)) | (rnat0 >> shift)) & ~umask;
+ urnat |= (rnat1 & m) << (63 - shift);
return urnat;
}
*/
static void
put_rnat (struct pt_regs *pt, struct switch_stack *sw,
- unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat)
+ unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
+ unsigned long *urbs_end)
{
unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
- unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift, slot, ndirty;
+ unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
long num_regs, nbits;
- ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
- nbits = ndirty % 63;
-
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
+
+ if (urbs_end < urnat_addr)
+ nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
+ else
+ nbits = 63;
+ mask = (1UL << nbits) - 1;
+
/*
* First, figure out which bit number slot 0 in user-land maps to in the kernel
* rnat. Do this by figuring out how many register slots we're beyond the user's
* backingstore and then computing the equivalent address in kernel space.
*/
- num_regs = (long) ia64_rse_num_regs(ubspstore, urnat_addr + 1);
+ num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
shift = ia64_rse_slot_num(slot0_kaddr);
rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
rnat0_kaddr = rnat1_kaddr - 64;
-printk("%s: ubspstore=%p urnat_addr=%p\n", __FUNCTION__, ubspstore, urnat_addr);
if (ubspstore + 63 > urnat_addr) {
/* some bits need to be place in pt->ar_rnat: */
- slot = ia64_rse_slot_num(ubspstore);
- umask = ((1UL << slot) - 1);
+ umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1) & mask;
pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
- nbits -= slot;
- if (nbits <= 0)
+ mask &= ~umask;
+ if (!mask)
return;
}
- mask = (1UL << nbits) - 1;
/*
* Note: Section 11.1 of the EAS guarantees that bit 63 of an
* rnat slot is ignored. so we don't have to clear it here.
*/
rnat0 = (urnat << shift);
m = mask << shift;
-printk("%s: rnat0=%016lx, m=%016lx, rnat0_kaddr=%p kbsp=%p\n", __FUNCTION__, rnat0, m, rnat0_kaddr, kbsp);
- if (rnat0_kaddr >= kbsp) {
+ if (rnat0_kaddr >= kbsp)
sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
- } else if (rnat0_kaddr > krbs) {
+ else if (rnat0_kaddr > krbs)
*rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
- }
rnat1 = (urnat >> (63 - shift));
m = mask >> (63 - shift);
-printk("%s: rnat1=%016lx, m=%016lx, rnat1_kaddr=%p kbsp=%p\n", __FUNCTION__, rnat1, m, rnat1_kaddr, kbsp);
- if (rnat1_kaddr >= kbsp) {
+ if (rnat1_kaddr >= kbsp)
sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
- } else if (rnat1_kaddr > krbs) {
+ else if (rnat1_kaddr > krbs)
*rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
- }
}
/*
* read the corresponding bits in the kernel RBS.
*/
rnat_addr = ia64_rse_rnat_addr(laddr);
- ret = get_rnat(child_regs, child_stack, krbs, rnat_addr);
+ ret = get_rnat(child_regs, child_stack, krbs, rnat_addr, urbs_end);
if (laddr == rnat_addr) {
/* return NaT collection word itself */
* => write the corresponding bits in the kernel RBS.
*/
if (ia64_rse_is_rnat_slot(laddr))
- put_rnat(child_regs, child_stack, krbs, laddr, val);
+ put_rnat(child_regs, child_stack, krbs, laddr, val, urbs_end);
else {
if (laddr < urbs_end) {
regnum = ia64_rse_num_regs(bspstore, laddr);
ia64_flush_fph (struct task_struct *task)
{
struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
-#ifdef CONFIG_SMP
- struct task_struct *fpu_owner = current;
-#else
- struct task_struct *fpu_owner = ia64_get_fpu_owner();
-#endif
- if (task == fpu_owner && psr->mfh) {
+ if (ia64_is_local_fpu_owner(task) && psr->mfh) {
psr->mfh = 0;
- ia64_save_fpu(&task->thread.fph[0]);
task->thread.flags |= IA64_THREAD_FPH_VALID;
- task->thread.last_fph_cpu = smp_processor_id();
+ ia64_save_fpu(&task->thread.fph[0]);
}
}
ia64_flush_fph(task);
if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
task->thread.flags |= IA64_THREAD_FPH_VALID;
- task->thread.last_fph_cpu = -1; /* force reload */
memset(&task->thread.fph, 0, sizeof(task->thread.fph));
}
- if (ia64_get_fpu_owner() == task)
- ia64_set_fpu_owner(0);
+ ia64_drop_fpu(task);
psr->dfh = 1;
}
else
ia64_flush_fph(child);
ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr);
- } else if (addr >= PT_F10 && addr < PT_F15 + 16) {
+ } else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) {
+ /* scratch registers untouched by kernel (saved in pt_regs) */
+ ptr = (unsigned long *)
+ ((long) pt + offsetof(struct pt_regs, f10) + addr - PT_F10);
+ } else if (addr >= PT_F12 && addr < PT_F15 + 16) {
/* scratch registers untouched by kernel (saved in switch_stack) */
- ptr = (unsigned long *) ((long) sw + addr - PT_NAT_BITS);
+ ptr = (unsigned long *) ((long) sw + (addr - PT_NAT_BITS - 32));
} else if (addr < PT_AR_LC + 8) {
/* preserved state: */
unsigned long nat_bits, scratch_unat, dummy = 0;
else
return ia64_peek(child, sw, urbs_end, rnat_addr, data);
- case PT_R1: case PT_R2: case PT_R3:
+ case PT_R1:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r1));
+ break;
+
+ case PT_R2: case PT_R3:
+ ptr = (unsigned long *)
+ ((long) pt + offsetof(struct pt_regs, r2) + addr - PT_R2);
+ break;
case PT_R8: case PT_R9: case PT_R10: case PT_R11:
- case PT_R12: case PT_R13: case PT_R14: case PT_R15:
+ ptr = (unsigned long *)
+ ((long) pt + offsetof(struct pt_regs, r8)+ addr - PT_R8);
+ break;
+ case PT_R12: case PT_R13:
+ ptr = (unsigned long *)
+ ((long) pt + offsetof(struct pt_regs, r12)+ addr - PT_R12);
+ break;
+ case PT_R14:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r14));
+ break;
+ case PT_R15:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r15));
+ break;
case PT_R16: case PT_R17: case PT_R18: case PT_R19:
case PT_R20: case PT_R21: case PT_R22: case PT_R23:
case PT_R24: case PT_R25: case PT_R26: case PT_R27:
case PT_R28: case PT_R29: case PT_R30: case PT_R31:
- case PT_B0: case PT_B6: case PT_B7:
+ ptr = (unsigned long *)
+ ((long) pt + offsetof(struct pt_regs, r16) + addr - PT_R16);
+ break;
+ case PT_B0:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b0));
+ break;
+ case PT_B6:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b6));
+ break;
+ case PT_B7:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b7));
+ break;
case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
+ ptr = (unsigned long *)
+ ((long) pt + offsetof(struct pt_regs, f6) + addr - PT_F6);
+ break;
case PT_AR_BSPSTORE:
- case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS:
- case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR:
- /* scratch register */
- ptr = (unsigned long *) ((long) pt + addr - PT_CR_IPSR);
+ ptr = (unsigned long *)
+ ((long) pt + offsetof(struct pt_regs, ar_bspstore));
+ break;
+ case PT_AR_RSC:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_rsc));
+ break;
+ case PT_AR_UNAT:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_unat));
+ break;
+ case PT_AR_PFS:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_pfs));
break;
+ case PT_AR_CCV:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_ccv));
+ break;
+ case PT_AR_FPSR:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_fpsr));
+ break;
+ case PT_CR_IIP:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, cr_iip));
+ break;
+ case PT_PR:
+ ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, pr));
+ break;
+ /* scratch register */
default:
/* disallow accessing anything else... */
addr);
return -1;
}
+ } else if (addr <= PT_AR_SSD) {
+ ptr = (unsigned long *)
+ ((long) pt + offsetof(struct pt_regs, ar_csd) + addr - PT_AR_CSD);
} else {
/* access debug registers */
/* gr1-gr3 */
- retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long) * 3);
+ retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
+ retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
/* gr4-gr7 */
/* gr12-gr15 */
- retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 4);
+ retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
+ retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
+ retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
/* gr16-gr31 */
retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 0);
}
- /* fr6-fr9 */
+ /* fr6-fr11 */
- retval |= __copy_to_user(&ppr->fr[6], &pt->f6, sizeof(struct ia64_fpreg) * 4);
+ retval |= __copy_to_user(&ppr->fr[6], &pt->f6, sizeof(struct ia64_fpreg) * 6);
- /* fp scratch regs(10-15) */
+ /* fp scratch regs(12-15) */
- retval |= __copy_to_user(&ppr->fr[10], &sw->f10, sizeof(struct ia64_fpreg) * 6);
+ retval |= __copy_to_user(&ppr->fr[12], &sw->f12, sizeof(struct ia64_fpreg) * 4);
/* fr16-fr31 */
/* gr1-gr3 */
- retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long) * 3);
+ retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
+ retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
/* gr4-gr7 */
/* gr12-gr15 */
- retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 4);
+ retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
+ retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
+ retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
/* gr16-gr31 */
retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 1);
}
- /* fr6-fr9 */
+ /* fr6-fr11 */
- retval |= __copy_from_user(&pt->f6, &ppr->fr[6], sizeof(ppr->fr[6]) * 4);
+ retval |= __copy_from_user(&pt->f6, &ppr->fr[6], sizeof(ppr->fr[6]) * 6);
- /* fp scratch regs(10-15) */
+ /* fp scratch regs(12-15) */
- retval |= __copy_from_user(&sw->f10, &ppr->fr[10], sizeof(ppr->fr[10]) * 6);
+ retval |= __copy_from_user(&sw->f12, &ppr->fr[12], sizeof(ppr->fr[12]) * 4);
/* fr16-fr31 */
#include <linux/initrd.h>
#include <asm/ia32.h>
+#include <asm/machvec.h>
+#include <asm/mca.h>
#include <asm/page.h>
+#include <asm/patch.h>
#include <asm/pgtable.h>
-#include <asm/machvec.h>
#include <asm/processor.h>
#include <asm/sal.h>
-#include <asm/system.h>
-#include <asm/mca.h>
#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/unistd.h>
#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
# error "struct cpuinfo_ia64 too big!"
unsigned char aux_device_present = 0xaa; /* XXX remove this when legacy I/O is gone */
+/*
+ * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
+ * mask specifies a mask of address bits that must be 0 in order for two buffers to be
+ * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
+ * address of the second buffer must be aligned to (merge_mask+1) in order to be
+ * mergeable). By default, we assume there is no I/O MMU which can merge physically
+ * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
+ * page-size of 2^64.
+ */
+unsigned long ia64_max_iommu_merge_mask = ~0UL;
+
#define COMMAND_LINE_SIZE 512
char saved_command_line[COMMAND_LINE_SIZE]; /* used in proc filesystem */
static int
find_max_pfn (unsigned long start, unsigned long end, void *arg)
{
- unsigned long *max_pfn = arg, pfn;
+ unsigned long *max_pfnp = arg, pfn;
pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
- if (pfn > *max_pfn)
- *max_pfn = pfn;
+ if (pfn > *max_pfnp)
+ *max_pfnp = pfn;
return 0;
}
static void
find_memory (void)
{
-# define KERNEL_END ((unsigned long) &_end)
+# define KERNEL_END (&_end)
unsigned long bootmap_size;
- unsigned long max_pfn;
int n = 0;
/*
+ strlen(__va(ia64_boot_param->command_line)) + 1);
n++;
- rsvd_region[n].start = KERNEL_START;
- rsvd_region[n].end = KERNEL_END;
+ rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
+ rsvd_region[n].end = (unsigned long) ia64_imva(KERNEL_END);
n++;
#ifdef CONFIG_BLK_DEV_INITRD
void __init
setup_arch (char **cmdline_p)
{
+ extern unsigned long *__start___vtop_patchlist[], *__end____vtop_patchlist[];
extern unsigned long ia64_iobase;
unsigned long phys_iobase;
unw_init();
+ ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end____vtop_patchlist);
+
*cmdline_p = __va(ia64_boot_param->command_line);
strlcpy(saved_command_line, *cmdline_p, sizeof(saved_command_line));
platform_setup(cmdline_p);
paging_init();
-
- unw_create_gate_table();
}
/*
/* Clear the stack memory reserved for pt_regs: */
memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
+ ia64_set_kr(IA64_KR_FPU_OWNER, 0);
+
/*
* Initialize default control register to defer all speculative faults. The
* kernel MUST NOT depend on a particular setting of these bits (in other words,
*/
ia64_set_dcr( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC);
-#ifndef CONFIG_SMP
- ia64_set_fpu_owner(0);
-#endif
-
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
if (current->mm)
BUG();
- ia64_mmu_init(cpu_data);
+ ia64_mmu_init(ia64_imva(cpu_data));
#ifdef CONFIG_IA32_SUPPORT
- /* initialize global ia32 state - CR0 and CR4 */
- asm volatile ("mov ar.cflg = %0" :: "r" (((ulong) IA32_CR4 << 32) | IA32_CR0));
+ ia32_cpu_init();
#endif
/* disable all local interrupt sources: */
void
check_bugs (void)
{
- extern int __start___mckinley_e9_bundles[];
- extern int __end___mckinley_e9_bundles[];
- u64 *bundle;
- int *wp;
+ extern char __start___mckinley_e9_bundles[];
+ extern char __end___mckinley_e9_bundles[];
- if (local_cpu_data->family == 0x1f && local_cpu_data->model == 0)
- printk(KERN_INFO "check_bugs: leaving McKinley Errata 9 workaround enabled\n");
- else {
- printk(KERN_INFO "check_bugs: McKinley Errata 9 workaround not needed; "
- "disabling it\n");
- for (wp = __start___mckinley_e9_bundles; wp < __end___mckinley_e9_bundles; ++wp) {
- bundle = (u64 *) ((char *) wp + *wp);
- /* install a bundle of NOPs: */
- bundle[0] = 0x0000000100000000;
- bundle[1] = 0x0004000000000200;
- ia64_fc(bundle);
- }
- ia64_insn_group_barrier();
- ia64_sync_i();
- ia64_insn_group_barrier();
- ia64_srlz_i();
- ia64_insn_group_barrier();
- }
+ ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
+ (unsigned long) __end___mckinley_e9_bundles);
}
long err;
/* restore scratch that always needs gets updated during signal delivery: */
- err = __get_user(flags, &sc->sc_flags);
-
+ err = __get_user(flags, &sc->sc_flags);
err |= __get_user(nat, &sc->sc_nat);
err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */
err |= __get_user(cfm, &sc->sc_cfm);
err |= __get_user(um, &sc->sc_um); /* user mask */
err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
- err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */
err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
- err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
- err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 3*8); /* r1-r3 */
+ err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */
err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */
- err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 4*8); /* r12-r15 */
- err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
+ err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */
+ err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */
scr->pt.cr_ifs = cfm | (1UL << 63);
scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat);
+ if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) {
+ /* Restore most scratch-state only when not in syscall. */
+ err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */
+ err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
+ err |= __get_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */
+ err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
+ err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */
+ err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
+ }
+
if ((flags & IA64_SC_FLAG_FPH_VALID) != 0) {
struct ia64_psr *psr = ia64_psr(&scr->pt);
__copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16);
psr->mfh = 0; /* drop signal handler's fph contents... */
if (psr->dfh)
- current->thread.last_fph_cpu = -1;
+ ia64_drop_fpu(current);
else {
+ /* We already own the local fph, otherwise psr->dfh wouldn't be 0. */
__ia64_load_fpu(current->thread.fph);
- ia64_set_fpu_owner(current);
- current->thread.last_fph_cpu = smp_processor_id();
+ ia64_set_local_fpu_owner(current);
}
}
return err;
int err;
/*
- * If you change siginfo_t structure, please be sure
- * this code is fixed accordingly. It should never
- * copy any pad contained in the structure to avoid
- * security leaks, but must copy the generic 3 ints
- * plus the relevant union member.
+ * If you change siginfo_t structure, please be sure this code is fixed
+ * accordingly. It should never copy any pad contained in the structure
+ * to avoid security leaks, but must copy the generic 3 ints plus the
+ * relevant union member.
*/
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
err |= __put_user(from->si_addr, &to->si_addr);
err |= __put_user(from->si_imm, &to->si_imm);
break;
- case __SI_CHLD >> 16:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- case __SI_PROF >> 16:
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_pid, &to->si_pid);
- if (from->si_code == PROF_OVFL) {
- err |= __put_user(from->si_pfm_ovfl[0], &to->si_pfm_ovfl[0]);
- err |= __put_user(from->si_pfm_ovfl[1], &to->si_pfm_ovfl[1]);
- err |= __put_user(from->si_pfm_ovfl[2], &to->si_pfm_ovfl[2]);
- err |= __put_user(from->si_pfm_ovfl[3], &to->si_pfm_ovfl[3]);
- }
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_value, &to->si_value);
break;
+ case __SI_CHLD >> 16:
+ err |= __put_user(from->si_utime, &to->si_utime);
+ err |= __put_user(from->si_stime, &to->si_stime);
+ err |= __put_user(from->si_status, &to->si_status);
default:
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_pid, &to->si_pid);
to->si_code |= __SI_POLL;
break;
- case SIGPROF:
- to->si_code |= __SI_PROF;
- break;
-
default:
break;
}
nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat);
err = __put_user(flags, &sc->sc_flags);
-
err |= __put_user(nat, &sc->sc_nat);
err |= PUT_SIGSET(mask, &sc->sc_mask);
err |= __put_user(cfm, &sc->sc_cfm);
err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um);
err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
- err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */
err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */
err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */
err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
- err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
-
- err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 3*8); /* r1-r3 */
+ err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */
err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */
- err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 4*8); /* r12-r15 */
- err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */
-
+ err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */
+ err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */
err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip);
+
+ if (flags & IA64_SC_FLAG_IN_SYSCALL) {
+ /* Clear scratch registers if the signal interrupted a system call. */
+ err |= __put_user(0, &sc->sc_ar_ccv); /* ar.ccv */
+ err |= __put_user(0, &sc->sc_br[7]); /* b7 */
+ err |= __put_user(0, &sc->sc_gr[14]); /* r14 */
+ err |= __clear_user(&sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
+ err |= __clear_user(&sc->sc_gr[2], 2*8); /* r2-r3 */
+ err |= __clear_user(&sc->sc_gr[16], 16*8); /* r16-r31 */
+ } else {
+ /* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */
+ err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */
+ err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
+ err |= __put_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */
+ err |= __copy_to_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */
+ err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */
+ err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */
+ }
return err;
}
setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
struct sigscratch *scr)
{
- extern char ia64_sigtramp[], __start_gate_section[];
+ extern char __kernel_sigtramp[];
unsigned long tramp_addr, new_rbs = 0;
struct sigframe *frame;
struct siginfo si;
long err;
frame = (void *) scr->pt.r12;
- tramp_addr = GATE_ADDR + (ia64_sigtramp - __start_gate_section);
+ tramp_addr = (unsigned long) __kernel_sigtramp;
if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags((unsigned long) frame) == 0) {
frame = (void *) ((current->sas_ss_sp + current->sas_ss_size)
& ~(STACK_ALIGN - 1));
* in the kernel, register stack is switched in the signal trampoline).
*/
if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
- new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
+ new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
}
frame = (void *) frame - ((sizeof(*frame) + STACK_ALIGN - 1) & ~(STACK_ALIGN - 1));
scr->scratch_unat = 0; /* ensure NaT bits of r12 is clear */
#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%lx\n",
- current->comm, current->pid, sig, scr->pt.r12, scr->pt.cr_iip, scr->pt.r3);
+ printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%p\n",
+ current->comm, current->pid, sig, scr->pt.r12, frame->sc.sc_ip, frame->handler);
#endif
return 1;
fork_by_hand (void)
{
/*
- * don't care about the eip and regs settings since we'll never reschedule the
+ * Don't care about the IP and regs settings since we'll never reschedule the
* forked task.
*/
- return do_fork(CLONE_VM|CLONE_IDLETASK, 0, 0, 0, NULL, NULL);
+ return copy_process(CLONE_VM|CLONE_IDLETASK, 0, 0, 0, NULL, NULL);
}
static int __init
idle = fork_by_hand();
if (IS_ERR(idle))
panic("failed fork for CPU %d", cpu);
+ wake_up_forked_process(idle);
/*
* We remove it from the pidhash and the runqueue
for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
sapicid = smp_boot_data.cpu_phys_id[i];
- if (sapicid == -1 || sapicid == boot_cpu_id)
+ if (sapicid == boot_cpu_id)
continue;
- phys_cpu_present_map |= (1 << cpu);
+ phys_cpu_present_map |= (1UL << cpu);
ia64_cpu_to_sapicid[cpu] = sapicid;
cpu++;
}
/* Tell SAL where to drop the AP's. */
ap_startup = (struct fptr *) start_ap;
sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
- __pa(ap_startup->fp), __pa(ap_startup->gp), 0, 0, 0, 0);
+ ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
if (sal_ret < 0)
printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
ia64_sal_strerror(sal_ret));
* This file contains various system calls that have different calling
* conventions on different platforms.
*
- * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
+ * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/efi.h>
+#include <linux/timex.h>
#include <asm/delay.h>
#include <asm/hw_irq.h>
#include <asm/system.h>
extern unsigned long wall_jiffies;
-extern unsigned long last_nsec_offset;
u64 jiffies_64 = INITIAL_JIFFIES;
+#define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */
+
#ifdef CONFIG_IA64_DEBUG_IRQ
unsigned long last_cli_ip;
atomic_inc((atomic_t *) &prof_buffer[ip]);
}
+static void
+itc_reset (void)
+{
+}
+
+/*
+ * Adjust for the fact that xtime has been advanced by delta_nsec (may be negative and/or
+ * larger than NSEC_PER_SEC.
+ */
+static void
+itc_update (long delta_nsec)
+{
+}
+
/*
* Return the number of nano-seconds that elapsed since the last update to jiffy. The
* xtime_lock must be at least read-locked when calling this routine.
*/
-static inline unsigned long
-gettimeoffset (void)
+unsigned long
+itc_get_offset (void)
{
unsigned long elapsed_cycles, lost = jiffies - wall_jiffies;
unsigned long now, last_tick;
-# define time_keeper_id 0 /* smp_processor_id() of time-keeper */
- last_tick = (cpu_data(time_keeper_id)->itm_next
- - (lost + 1)*cpu_data(time_keeper_id)->itm_delta);
+ last_tick = (cpu_data(TIME_KEEPER_ID)->itm_next
+ - (lost + 1)*cpu_data(TIME_KEEPER_ID)->itm_delta);
now = ia64_get_itc();
if (unlikely((long) (now - last_tick) < 0)) {
return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT;
}
+static struct time_interpolator itc_interpolator = {
+ .get_offset = itc_get_offset,
+ .update = itc_update,
+ .reset = itc_reset
+};
+
static inline void
set_normalized_timespec (struct timespec *ts, time_t sec, long nsec)
{
* Discover what correction gettimeofday would have done, and then undo
* it!
*/
- nsec -= gettimeoffset();
+ nsec -= time_interpolator_get_offset();
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT;
+ time_interpolator_reset();
}
write_sequnlock_irq(&xtime_lock);
clock_was_set();
{
unsigned long seq, nsec, usec, sec, old, offset;
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
while (1) {
seq = read_seqbegin(&xtime_lock);
{
old = last_nsec_offset;
- offset = gettimeoffset();
+ offset = time_interpolator_get_offset();
sec = xtime.tv_sec;
nsec = xtime.tv_nsec;
}
#endif
new_itm += local_cpu_data->itm_delta;
- if (smp_processor_id() == 0) {
+ if (smp_processor_id() == TIME_KEEPER_ID) {
/*
* Here we are in the timer irq handler. We have irqs locally
* disabled, but we don't know if the timer_bh is running on
void __init
ia64_init_itm (void)
{
- unsigned long platform_base_freq, itc_freq, drift;
+ unsigned long platform_base_freq, itc_freq;
struct pal_freq_ratio itc_ratio, proc_ratio;
- long status;
+ long status, platform_base_drift, itc_drift;
/*
* According to SAL v2.6, we need to use a SAL call to determine the platform base
* frequency and then a PAL call to determine the frequency ratio between the ITC
* and the base frequency.
*/
- status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, &platform_base_freq, &drift);
+ status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
+ &platform_base_freq, &platform_base_drift);
if (status != 0) {
printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
} else {
printk(KERN_ERR
"SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
platform_base_freq = 100000000;
+ platform_base_drift = -1; /* no drift info */
itc_ratio.num = 3;
itc_ratio.den = 1;
}
printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
platform_base_freq);
platform_base_freq = 75000000;
+ platform_base_drift = -1;
}
if (!proc_ratio.den)
proc_ratio.den = 1; /* avoid division by zero */
itc_ratio.den = 1; /* avoid division by zero */
itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
+ if (platform_base_drift != -1)
+ itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
+ else
+ itc_drift = -1;
+
local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
printk(KERN_INFO "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
- "ITC freq=%lu.%03luMHz\n", smp_processor_id(),
+ "ITC freq=%lu.%03luMHz+/-%ldppm\n", smp_processor_id(),
platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
- itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
+ itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000,
+ itc_drift);
local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
local_cpu_data->itc_freq = itc_freq;
local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
+ itc_freq/2)/itc_freq;
+ if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
+ itc_interpolator.frequency = local_cpu_data->itc_freq;
+ itc_interpolator.drift = itc_drift;
+ register_time_interpolator(&itc_interpolator);
+ }
+
/* Setup the CPU local timer tick */
ia64_cpu_local_tick();
}
psr->dfh = 0;
#ifndef CONFIG_SMP
{
- struct task_struct *fpu_owner = ia64_get_fpu_owner();
+ struct task_struct *fpu_owner
+ = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
- if (fpu_owner == current)
+ if (ia64_is_local_fpu_owner(current))
return;
if (fpu_owner)
ia64_flush_fph(fpu_owner);
}
#endif /* !CONFIG_SMP */
- ia64_set_fpu_owner(current);
+ ia64_set_local_fpu_owner(current);
if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) {
__ia64_load_fpu(current->thread.fph);
psr->mfh = 0;
fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,
struct pt_regs *regs)
{
- struct ia64_fpreg f6_11[6];
fp_state_t fp_state;
fpswa_ret_t ret;
* pointer to point to these registers.
*/
fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
- f6_11[0] = regs->f6; f6_11[1] = regs->f7;
- f6_11[2] = regs->f8; f6_11[3] = regs->f9;
- __asm__ ("stf.spill %0=f10%P0" : "=m"(f6_11[4]));
- __asm__ ("stf.spill %0=f11%P0" : "=m"(f6_11[5]));
- fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) f6_11;
+
+ fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) ®s->f6;
/*
* unsigned long (*EFI_FPSWA) (
* unsigned long trap_type,
(unsigned long *) ipsr, (unsigned long *) fpsr,
(unsigned long *) isr, (unsigned long *) pr,
(unsigned long *) ifs, &fp_state);
- regs->f6 = f6_11[0]; regs->f7 = f6_11[1];
- regs->f8 = f6_11[2]; regs->f9 = f6_11[3];
- __asm__ ("ldf.fill f10=%0%P0" :: "m"(f6_11[4]));
- __asm__ ("ldf.fill f11=%0%P0" :: "m"(f6_11[5]));
+
return ret.status;
}
if (jiffies - last_time > 5*HZ)
fpu_swa_count = 0;
- if ((++fpu_swa_count < 5) && !(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
+ if ((fpu_swa_count < 4) && !(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
last_time = jiffies;
+ ++fpu_swa_count;
printk(KERN_WARNING "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr);
}
case 29: /* Debug */
case 35: /* Taken Branch Trap */
case 36: /* Single Step Trap */
-#ifdef CONFIG_FSYS
if (fsys_mode(current, regs)) {
- extern char syscall_via_break[], __start_gate_section[];
+ extern char __kernel_syscall_via_break[];
/*
* Got a trap in fsys-mode: Taken Branch Trap and Single Step trap
* need special handling; Debug trap is not supposed to happen.
return;
}
/* re-do the system call via break 0x100000: */
- regs->cr_iip = GATE_ADDR + (syscall_via_break - __start_gate_section);
+ regs->cr_iip = (unsigned long) __kernel_syscall_via_break;
ia64_psr(regs)->ri = 0;
ia64_psr(regs)->cpl = 3;
return;
}
-#endif
switch (vector) {
case 29:
siginfo.si_code = TRAP_HWBKPT;
RSW(f2), RSW(f3), RSW(f4), RSW(f5),
RPT(f6), RPT(f7), RPT(f8), RPT(f9),
+ RPT(f10), RPT(f11),
- RSW(f10), RSW(f11), RSW(f12), RSW(f13), RSW(f14),
+ RSW(f12), RSW(f13), RSW(f14),
RSW(f15), RSW(f16), RSW(f17), RSW(f18), RSW(f19),
RSW(f20), RSW(f21), RSW(f22), RSW(f23), RSW(f24),
RSW(f25), RSW(f26), RSW(f27), RSW(f28), RSW(f29),
/*
* Copyright (C) 1999-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
+ * - Change pt_regs_off() to make it less dependant on pt_regs structure.
*/
/*
* This file implements call frame unwind support for the Linux
* acquired, then the read-write lock must be acquired first.
*/
#include <linux/bootmem.h>
+#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
# define STAT(x...)
#endif
-#define alloc_reg_state() kmalloc(sizeof(struct unw_state_record), GFP_ATOMIC)
+#define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
#define free_reg_state(usr) kfree(usr)
#define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
#define free_labeled_state(usr) kfree(usr)
typedef unsigned long unw_word;
typedef unsigned char unw_hash_index_t;
-#define struct_offset(str,fld) ((char *)&((str *)NULL)->fld - (char *) 0)
-
static struct {
spinlock_t lock; /* spinlock for unwind data */
/* index into unw_frame_info for preserved register i */
unsigned short preg_index[UNW_NUM_REGS];
+ short pt_regs_offsets[32];
+
/* unwind table for the kernel: */
struct unw_table kernel_table;
UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
},
.preg_index = {
- struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
- struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
- struct_offset(struct unw_frame_info, bsp_loc)/8,
- struct_offset(struct unw_frame_info, bspstore_loc)/8,
- struct_offset(struct unw_frame_info, pfs_loc)/8,
- struct_offset(struct unw_frame_info, rnat_loc)/8,
- struct_offset(struct unw_frame_info, psp)/8,
- struct_offset(struct unw_frame_info, rp_loc)/8,
- struct_offset(struct unw_frame_info, r4)/8,
- struct_offset(struct unw_frame_info, r5)/8,
- struct_offset(struct unw_frame_info, r6)/8,
- struct_offset(struct unw_frame_info, r7)/8,
- struct_offset(struct unw_frame_info, unat_loc)/8,
- struct_offset(struct unw_frame_info, pr_loc)/8,
- struct_offset(struct unw_frame_info, lc_loc)/8,
- struct_offset(struct unw_frame_info, fpsr_loc)/8,
- struct_offset(struct unw_frame_info, b1_loc)/8,
- struct_offset(struct unw_frame_info, b2_loc)/8,
- struct_offset(struct unw_frame_info, b3_loc)/8,
- struct_offset(struct unw_frame_info, b4_loc)/8,
- struct_offset(struct unw_frame_info, b5_loc)/8,
- struct_offset(struct unw_frame_info, f2_loc)/8,
- struct_offset(struct unw_frame_info, f3_loc)/8,
- struct_offset(struct unw_frame_info, f4_loc)/8,
- struct_offset(struct unw_frame_info, f5_loc)/8,
- struct_offset(struct unw_frame_info, fr_loc[16 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[17 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[18 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[19 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[20 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[21 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[22 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[23 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[24 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[25 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[26 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[27 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[28 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[29 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[30 - 16])/8,
- struct_offset(struct unw_frame_info, fr_loc[31 - 16])/8,
+ offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
+ offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
+ offsetof(struct unw_frame_info, bsp_loc)/8,
+ offsetof(struct unw_frame_info, bspstore_loc)/8,
+ offsetof(struct unw_frame_info, pfs_loc)/8,
+ offsetof(struct unw_frame_info, rnat_loc)/8,
+ offsetof(struct unw_frame_info, psp)/8,
+ offsetof(struct unw_frame_info, rp_loc)/8,
+ offsetof(struct unw_frame_info, r4)/8,
+ offsetof(struct unw_frame_info, r5)/8,
+ offsetof(struct unw_frame_info, r6)/8,
+ offsetof(struct unw_frame_info, r7)/8,
+ offsetof(struct unw_frame_info, unat_loc)/8,
+ offsetof(struct unw_frame_info, pr_loc)/8,
+ offsetof(struct unw_frame_info, lc_loc)/8,
+ offsetof(struct unw_frame_info, fpsr_loc)/8,
+ offsetof(struct unw_frame_info, b1_loc)/8,
+ offsetof(struct unw_frame_info, b2_loc)/8,
+ offsetof(struct unw_frame_info, b3_loc)/8,
+ offsetof(struct unw_frame_info, b4_loc)/8,
+ offsetof(struct unw_frame_info, b5_loc)/8,
+ offsetof(struct unw_frame_info, f2_loc)/8,
+ offsetof(struct unw_frame_info, f3_loc)/8,
+ offsetof(struct unw_frame_info, f4_loc)/8,
+ offsetof(struct unw_frame_info, f5_loc)/8,
+ offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
+ offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
+ },
+ .pt_regs_offsets = {
+ [0] = -1,
+ offsetof(struct pt_regs, r1),
+ offsetof(struct pt_regs, r2),
+ offsetof(struct pt_regs, r3),
+ [4] = -1, [5] = -1, [6] = -1, [7] = -1,
+ offsetof(struct pt_regs, r8),
+ offsetof(struct pt_regs, r9),
+ offsetof(struct pt_regs, r10),
+ offsetof(struct pt_regs, r11),
+ offsetof(struct pt_regs, r12),
+ offsetof(struct pt_regs, r13),
+ offsetof(struct pt_regs, r14),
+ offsetof(struct pt_regs, r15),
+ offsetof(struct pt_regs, r16),
+ offsetof(struct pt_regs, r17),
+ offsetof(struct pt_regs, r18),
+ offsetof(struct pt_regs, r19),
+ offsetof(struct pt_regs, r20),
+ offsetof(struct pt_regs, r21),
+ offsetof(struct pt_regs, r22),
+ offsetof(struct pt_regs, r23),
+ offsetof(struct pt_regs, r24),
+ offsetof(struct pt_regs, r25),
+ offsetof(struct pt_regs, r26),
+ offsetof(struct pt_regs, r27),
+ offsetof(struct pt_regs, r28),
+ offsetof(struct pt_regs, r29),
+ offsetof(struct pt_regs, r30),
+ offsetof(struct pt_regs, r31),
},
.hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
#ifdef UNW_DEBUG
#endif
};
-\f
/* Unwind accessors. */
/*
static inline unsigned long
pt_regs_off (unsigned long reg)
{
- unsigned long off =0;
+ short off = -1;
- if (reg >= 1 && reg <= 3)
- off = struct_offset(struct pt_regs, r1) + 8*(reg - 1);
- else if (reg <= 11)
- off = struct_offset(struct pt_regs, r8) + 8*(reg - 8);
- else if (reg <= 15)
- off = struct_offset(struct pt_regs, r12) + 8*(reg - 12);
- else if (reg <= 31)
- off = struct_offset(struct pt_regs, r16) + 8*(reg - 16);
- else
+ if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
+ off = unw.pt_regs_offsets[reg];
+
+ if (off < 0) {
UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
- return off;
+ off = 0;
+ }
+ return (unsigned long) off;
}
static inline struct pt_regs *
if (!info->pt) {
/* This should not happen with valid unwind info. */
UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
- info->pt = info->sp - 16;
+ if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
+ info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
+ else
+ info->pt = info->sp - 16;
}
UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
return (struct pt_regs *) info->pt;
unsigned long *addr;
struct pt_regs *pt;
- pt = get_scratch_regs(info);
switch (regnum) {
/* scratch: */
- case 0: addr = &pt->b0; break;
- case 6: addr = &pt->b6; break;
- case 7: addr = &pt->b7; break;
+ case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
+ case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
+ case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
/* preserved: */
case 1: case 2: case 3: case 4: case 5:
return -1;
}
- pt = get_scratch_regs(info);
-
if (regnum <= 5) {
addr = *(&info->f2_loc + (regnum - 2));
if (!addr)
addr = &info->sw->f2 + (regnum - 2);
} else if (regnum <= 15) {
- if (regnum <= 9)
+ if (regnum <= 11) {
+ pt = get_scratch_regs(info);
addr = &pt->f6 + (regnum - 6);
+ }
else
- addr = &info->sw->f10 + (regnum - 10);
+ addr = &info->sw->f12 + (regnum - 12);
} else if (regnum <= 31) {
addr = info->fr_loc[regnum - 16];
if (!addr)
unsigned long *addr;
struct pt_regs *pt;
- pt = get_scratch_regs(info);
switch (regnum) {
case UNW_AR_BSP:
addr = info->bsp_loc;
break;
case UNW_AR_RSC:
+ pt = get_scratch_regs(info);
addr = &pt->ar_rsc;
break;
case UNW_AR_CCV:
+ pt = get_scratch_regs(info);
addr = &pt->ar_ccv;
break;
+ case UNW_AR_CSD:
+ pt = get_scratch_regs(info);
+ addr = &pt->ar_csd;
+ break;
+
+ case UNW_AR_SSD:
+ pt = get_scratch_regs(info);
+ addr = &pt->ar_ssd;
+ break;
+
default:
UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
__FUNCTION__, regnum);
static inline void
desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
{
- if (abi == 0 && context == 'i') {
+ if (abi == 3 && context == 'i') {
sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
}
val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
else {
opc = UNW_INSN_MOVE_SCRATCH;
- if (rval <= 9)
- val = struct_offset(struct pt_regs, f6) + 16*(rval - 6);
+ if (rval <= 11)
+ val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
else
UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
__FUNCTION__, rval);
else {
opc = UNW_INSN_MOVE_SCRATCH;
if (rval == 0)
- val = struct_offset(struct pt_regs, b0);
+ val = offsetof(struct pt_regs, b0);
else if (rval == 6)
- val = struct_offset(struct pt_regs, b6);
+ val = offsetof(struct pt_regs, b6);
else
- val = struct_offset(struct pt_regs, b7);
+ val = offsetof(struct pt_regs, b7);
}
break;
&& sr.curr.reg[UNW_REG_PSP].val != 0) {
/* new psp is sp plus frame size */
insn.opc = UNW_INSN_ADD;
- insn.dst = struct_offset(struct unw_frame_info, psp)/8;
+ insn.dst = offsetof(struct unw_frame_info, psp)/8;
insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
script_emit(script, insn);
}
lazy_init:
off = unw.sw_off[val];
s[val] = (unsigned long) state->sw + off;
- if (off >= struct_offset(struct switch_stack, r4)
- && off <= struct_offset(struct switch_stack, r7))
+ if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
/*
* We're initializing a general register: init NaT info, too. Note that
* the offset is a multiple of 8 which gives us the 3 bits needed for
* the type field.
*/
- s[val+1] = (struct_offset(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
+ s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
goto redo;
}
return -1;
}
ip = info->ip = *info->rp_loc;
- if (ip < GATE_ADDR + PAGE_SIZE) {
- /*
- * We don't have unwind info for the gate page, so we consider that part
- * of user-space for the purpose of unwinding.
- */
+ if (ip < GATE_ADDR) {
UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
return -1;
if ((pr & (1UL << pNonSys)) != 0)
num_regs = *info->cfm_loc & 0x7f; /* size of frame */
info->pfs_loc =
- (unsigned long *) (info->pt + struct_offset(struct pt_regs, ar_pfs));
+ (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
} else
num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
__FUNCTION__, ip);
return -1;
}
- /*
- * We don't have unwind info for the gate page, so we consider that part
- * of user-space for the purpose of unwinding.
- */
- if (ip < GATE_ADDR + PAGE_SIZE)
+ if (ip < GATE_ADDR)
return 0;
}
unw_get_ip(info, &ip);
kfree(table);
}
-void
-unw_create_gate_table (void)
+static void __init
+create_gate_table (void)
{
- extern char __start_gate_section[], __stop_gate_section[];
- unsigned long *lp, start, end, segbase = unw.kernel_table.segment_base;
- const struct unw_table_entry *entry, *first, *unw_table_end;
- extern int ia64_unw_end;
+ const struct unw_table_entry *entry, *start, *end;
+ unsigned long *lp, segbase = GATE_ADDR;
size_t info_size, size;
char *info;
+ Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
+ int i;
+
+ for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
+ if (phdr->p_type == PT_IA_64_UNWIND) {
+ punw = phdr;
+ break;
+ }
+
+ if (!punw) {
+ printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
+ return;
+ }
- start = (unsigned long) __start_gate_section - segbase;
- end = (unsigned long) __stop_gate_section - segbase;
- unw_table_end = (struct unw_table_entry *) &ia64_unw_end;
+ start = (const struct unw_table_entry *) punw->p_vaddr;
+ end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
size = 0;
- first = lookup(&unw.kernel_table, start);
- for (entry = first; entry < unw_table_end && entry->start_offset < end; ++entry)
+ unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
+
+ for (entry = start; entry < end; ++entry)
size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
size += 8; /* reserve space for "end of table" marker */
- unw.gate_table = alloc_bootmem(size);
+ unw.gate_table = kmalloc(size, GFP_KERNEL);
if (!unw.gate_table) {
unw.gate_table_size = 0;
- printk(KERN_ERR "unwind: unable to create unwind data for gate page!\n");
+ printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
return;
}
unw.gate_table_size = size;
lp = unw.gate_table;
info = (char *) unw.gate_table + size;
- for (entry = first; entry < unw_table_end && entry->start_offset < end; ++entry, lp += 3) {
+ for (entry = start; entry < end; ++entry, lp += 3) {
info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
info -= info_size;
memcpy(info, (char *) segbase + entry->info_offset, info_size);
- lp[0] = entry->start_offset - start + GATE_ADDR; /* start */
- lp[1] = entry->end_offset - start + GATE_ADDR; /* end */
- lp[2] = info - (char *) unw.gate_table; /* info */
+ lp[0] = segbase + entry->start_offset; /* start */
+ lp[1] = segbase + entry->end_offset; /* end */
+ lp[2] = info - (char *) unw.gate_table; /* info */
}
*lp = 0; /* end-of-table marker */
}
-void
+__initcall(create_gate_table);
+
+void __init
unw_init (void)
{
extern int ia64_unw_start, ia64_unw_end, __gp;
}
/*
+ * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
+ *
+ * This system call has been deprecated. The new and improved way to get
+ * at the kernel's unwind info is via the gate DSO. The address of the
+ * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
+ *
+ * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
+ *
* This system call copies the unwind data into the buffer pointed to by BUF and returns
* the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
* or if BUF is NULL, nothing is copied, but the system call still returns the size of the
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o
+ifeq ($(CONFIG_MD_RAID5),m)
+ lib-y += xor.o
+else
+ lib-$(CONFIG_MD_RAID5) += xor.o
+endif
+
IGNORE_FLAGS_OBJS = __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
#define NAME PASTE(PASTE(__,SGN),PASTE(OP,di3))
GLOBAL_ENTRY(NAME)
- .prologue
.regstk 2,0,0,0
// Transfer inputs to FP registers.
setf.sig f8 = in0
setf.sig f9 = in1
;;
- .fframe 16
- .save.f 0x20
- stf.spill [sp] = f17,-16
-
// Convert the inputs to FP, to avoid FP software-assist faults.
INT_TO_FP(f8, f8)
- ;;
-
- .save.f 0x10
- stf.spill [sp] = f16
- .body
INT_TO_FP(f9, f9)
;;
- frcpa.s1 f17, p6 = f8, f9 // y0 = frcpa(b)
+ frcpa.s1 f11, p6 = f8, f9 // y0 = frcpa(b)
;;
-(p6) fmpy.s1 f7 = f8, f17 // q0 = a*y0
-(p6) fnma.s1 f6 = f9, f17, f1 // e0 = -b*y0 + 1
+(p6) fmpy.s1 f7 = f8, f11 // q0 = a*y0
+(p6) fnma.s1 f6 = f9, f11, f1 // e0 = -b*y0 + 1
;;
-(p6) fma.s1 f16 = f7, f6, f7 // q1 = q0*e0 + q0
+(p6) fma.s1 f10 = f7, f6, f7 // q1 = q0*e0 + q0
(p6) fmpy.s1 f7 = f6, f6 // e1 = e0*e0
;;
#ifdef MODULO
sub in1 = r0, in1 // in1 = -b
#endif
-(p6) fma.s1 f16 = f16, f7, f16 // q2 = q1*e1 + q1
-(p6) fma.s1 f6 = f17, f6, f17 // y1 = y0*e0 + y0
+(p6) fma.s1 f10 = f10, f7, f10 // q2 = q1*e1 + q1
+(p6) fma.s1 f6 = f11, f6, f11 // y1 = y0*e0 + y0
;;
(p6) fma.s1 f6 = f6, f7, f6 // y2 = y1*e1 + y1
-(p6) fnma.s1 f7 = f9, f16, f8 // r = -b*q2 + a
+(p6) fnma.s1 f7 = f9, f10, f8 // r = -b*q2 + a
;;
#ifdef MODULO
setf.sig f8 = in0 // f8 = a
setf.sig f9 = in1 // f9 = -b
#endif
-(p6) fma.s1 f17 = f7, f6, f16 // q3 = r*y2 + q2
+(p6) fma.s1 f11 = f7, f6, f10 // q3 = r*y2 + q2
;;
- .restore sp
- ldf.fill f16 = [sp], 16
- FP_TO_INT(f17, f17) // q = trunc(q3)
+ FP_TO_INT(f11, f11) // q = trunc(q3)
;;
#ifdef MODULO
- xma.l f17 = f17, f9, f8 // r = q*(-b) + a
+ xma.l f11 = f11, f9, f8 // r = q*(-b) + a
;;
#endif
- getf.sig r8 = f17 // transfer result to result register
- ldf.fill f17 = [sp]
+ getf.sig r8 = f11 // transfer result to result register
br.ret.sptk.many rp
END(NAME)
#include <asm/asmmacro.h>
#include <asm/page.h>
-#if __GNUC__ >= 3
-# define EK(y...) EX(y)
-#else
-# define EK(y,x...) x
-#endif
+#define EK(y...) EX(y)
GLOBAL_ENTRY(bcopy)
.regstk 3,0,0,0
--- /dev/null
+/*
+ * arch/ia64/lib/xor.S
+ *
+ * Optimized RAID-5 checksumming functions for IA-64.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/asmmacro.h>
+
+GLOBAL_ENTRY(xor_ia64_2)
+ .prologue
+ .fframe 0
+ .save ar.pfs, r31
+ alloc r31 = ar.pfs, 3, 0, 13, 16
+ .save ar.lc, r30
+ mov r30 = ar.lc
+ .save pr, r29
+ mov r29 = pr
+ ;;
+ .body
+ mov r8 = in1
+ mov ar.ec = 6 + 2
+ shr in0 = in0, 3
+ ;;
+ adds in0 = -1, in0
+ mov r16 = in1
+ mov r17 = in2
+ ;;
+ mov ar.lc = in0
+ mov pr.rot = 1 << 16
+ ;;
+ .rotr s1[6+1], s2[6+1], d[2]
+ .rotp p[6+2]
+0:
+(p[0]) ld8.nta s1[0] = [r16], 8
+(p[0]) ld8.nta s2[0] = [r17], 8
+(p[6]) xor d[0] = s1[6], s2[6]
+(p[6+1])st8.nta [r8] = d[1], 8
+ nop.f 0
+ br.ctop.dptk.few 0b
+ ;;
+ mov ar.lc = r30
+ mov pr = r29, -1
+ br.ret.sptk.few rp
+END(xor_ia64_2)
+
+GLOBAL_ENTRY(xor_ia64_3)
+ .prologue
+ .fframe 0
+ .save ar.pfs, r31
+ alloc r31 = ar.pfs, 4, 0, 20, 24
+ .save ar.lc, r30
+ mov r30 = ar.lc
+ .save pr, r29
+ mov r29 = pr
+ ;;
+ .body
+ mov r8 = in1
+ mov ar.ec = 6 + 2
+ shr in0 = in0, 3
+ ;;
+ adds in0 = -1, in0
+ mov r16 = in1
+ mov r17 = in2
+ ;;
+ mov r18 = in3
+ mov ar.lc = in0
+ mov pr.rot = 1 << 16
+ ;;
+ .rotr s1[6+1], s2[6+1], s3[6+1], d[2]
+ .rotp p[6+2]
+0:
+(p[0]) ld8.nta s1[0] = [r16], 8
+(p[0]) ld8.nta s2[0] = [r17], 8
+(p[6]) xor d[0] = s1[6], s2[6]
+ ;;
+(p[0]) ld8.nta s3[0] = [r18], 8
+(p[6+1])st8.nta [r8] = d[1], 8
+(p[6]) xor d[0] = d[0], s3[6]
+ br.ctop.dptk.few 0b
+ ;;
+ mov ar.lc = r30
+ mov pr = r29, -1
+ br.ret.sptk.few rp
+END(xor_ia64_3)
+
+GLOBAL_ENTRY(xor_ia64_4)
+ .prologue
+ .fframe 0
+ .save ar.pfs, r31
+ alloc r31 = ar.pfs, 5, 0, 27, 32
+ .save ar.lc, r30
+ mov r30 = ar.lc
+ .save pr, r29
+ mov r29 = pr
+ ;;
+ .body
+ mov r8 = in1
+ mov ar.ec = 6 + 2
+ shr in0 = in0, 3
+ ;;
+ adds in0 = -1, in0
+ mov r16 = in1
+ mov r17 = in2
+ ;;
+ mov r18 = in3
+ mov ar.lc = in0
+ mov pr.rot = 1 << 16
+ mov r19 = in4
+ ;;
+ .rotr s1[6+1], s2[6+1], s3[6+1], s4[6+1], d[2]
+ .rotp p[6+2]
+0:
+(p[0]) ld8.nta s1[0] = [r16], 8
+(p[0]) ld8.nta s2[0] = [r17], 8
+(p[6]) xor d[0] = s1[6], s2[6]
+(p[0]) ld8.nta s3[0] = [r18], 8
+(p[0]) ld8.nta s4[0] = [r19], 8
+(p[6]) xor r20 = s3[6], s4[6]
+ ;;
+(p[6+1])st8.nta [r8] = d[1], 8
+(p[6]) xor d[0] = d[0], r20
+ br.ctop.dptk.few 0b
+ ;;
+ mov ar.lc = r30
+ mov pr = r29, -1
+ br.ret.sptk.few rp
+END(xor_ia64_4)
+
+GLOBAL_ENTRY(xor_ia64_5)
+ .prologue
+ .fframe 0
+ .save ar.pfs, r31
+ alloc r31 = ar.pfs, 6, 0, 34, 40
+ .save ar.lc, r30
+ mov r30 = ar.lc
+ .save pr, r29
+ mov r29 = pr
+ ;;
+ .body
+ mov r8 = in1
+ mov ar.ec = 6 + 2
+ shr in0 = in0, 3
+ ;;
+ adds in0 = -1, in0
+ mov r16 = in1
+ mov r17 = in2
+ ;;
+ mov r18 = in3
+ mov ar.lc = in0
+ mov pr.rot = 1 << 16
+ mov r19 = in4
+ mov r20 = in5
+ ;;
+ .rotr s1[6+1], s2[6+1], s3[6+1], s4[6+1], s5[6+1], d[2]
+ .rotp p[6+2]
+0:
+(p[0]) ld8.nta s1[0] = [r16], 8
+(p[0]) ld8.nta s2[0] = [r17], 8
+(p[6]) xor d[0] = s1[6], s2[6]
+(p[0]) ld8.nta s3[0] = [r18], 8
+(p[0]) ld8.nta s4[0] = [r19], 8
+(p[6]) xor r21 = s3[6], s4[6]
+ ;;
+(p[0]) ld8.nta s5[0] = [r20], 8
+(p[6+1])st8.nta [r8] = d[1], 8
+(p[6]) xor d[0] = d[0], r21
+ ;;
+(p[6]) xor d[0] = d[0], s5[6]
+ nop.f 0
+ br.ctop.dptk.few 0b
+ ;;
+ mov ar.lc = r30
+ mov pr = r29, -1
+ br.ret.sptk.few rp
+END(xor_ia64_5)
kaddr = (unsigned long)__va(bdp->node_boot_start);
ekaddr = (unsigned long)__va(bdp->node_low_pfn << PAGE_SHIFT);
while (kaddr < ekaddr) {
- bid = BANK_MEM_MAP_INDEX(kaddr);
- node_data[mynode]->node_id_map[bid] = node;
- node_data[mynode]->bank_mem_map_base[bid] = page;
+ if (paddr_to_nid(__pa(kaddr)) == node) {
+ bid = BANK_MEM_MAP_INDEX(kaddr);
+ node_data[mynode]->node_id_map[bid] = node;
+ node_data[mynode]->bank_mem_map_base[bid] = page;
+ }
kaddr += BANKSIZE;
page += BANKSIZE/PAGE_SIZE;
}
return 0;
}
+/*
+ * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
+ * (inside region 5, on ia64) and that page is present.
+ */
+static int
+mapped_kernel_page_is_present (unsigned long address)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ pgd = pgd_offset_k(address);
+ if (pgd_none(*pgd) || pgd_bad(*pgd))
+ return 0;
+
+ pmd = pmd_offset(pgd,address);
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
+ return 0;
+
+ ptep = pte_offset_kernel(pmd, address);
+ if (!ptep)
+ return 0;
+
+ pte = *ptep;
+ return pte_present(pte);
+}
+
void
ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
{
return;
/*
+ * Since we have no vma's for region 5, we might get here even if the address is
+ * valid, due to the VHPT walker inserting a non present translation that becomes
+ * stale. If that happens, the non present fault handler already purged the stale
+ * translation, which fixed the problem. So, we check to see if the translation is
+ * valid, and return if it is.
+ */
+ if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
+ return;
+
+ /*
* Oops. The kernel tried to access some bad page. We'll have to terminate things
* with extreme prejudice.
*/
/* This function checks if the address and address+len falls out of HugeTLB region. It
* return -EINVAL if any part of address range falls in HugeTLB region.
*/
-int is_invalid_hugepage_range(unsigned long addr, unsigned long len)
+int check_valid_hugepage_range(unsigned long addr, unsigned long len)
{
if (REGION_NUMBER(addr) == REGION_HPAGE)
return -EINVAL;
/*
* Initialize MMU support.
*
- * Copyright (C) 1998-2002 Hewlett-Packard Co
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/efi.h>
+#include <linux/elf.h>
#include <linux/mm.h>
+#include <linux/mmzone.h>
#include <linux/personality.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/swap.h>
-#include <linux/efi.h>
-#include <linux/mmzone.h>
#include <asm/a.out.h>
#include <asm/bitops.h>
#include <asm/ia32.h>
#include <asm/io.h>
#include <asm/machvec.h>
+#include <asm/patch.h>
#include <asm/pgalloc.h>
#include <asm/sal.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
#include <asm/tlb.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
-struct mmu_gather mmu_gathers[NR_CPUS];
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
/* References to section boundaries: */
extern char _stext, _etext, _edata, __init_begin, __init_end, _end;
static int pgt_cache_water[2] = { 25, 50 };
+struct page *zero_page_memmap_ptr; /* map entry for zero page */
+
void
check_pgt_cache (void)
{
}
}
+void
+update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
+{
+ unsigned long addr;
+ struct page *page;
+
+ if (!pte_exec(pte))
+ return; /* not an executable page... */
+
+ page = pte_page(pte);
+ /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
+ addr = (unsigned long) page_address(page);
+
+ if (test_bit(PG_arch_1, &page->flags))
+ return; /* i-cache is already coherent with d-cache */
+
+ flush_icache_range(addr, addr + PAGE_SIZE);
+ set_bit(PG_arch_1, &page->flags); /* mark page as clean */
+}
+
+inline void
+ia64_set_rbs_bot (void)
+{
+ unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
+
+ if (stack_size > MAX_USER_STACK_SIZE)
+ stack_size = MAX_USER_STACK_SIZE;
+ current->thread.rbs_bot = STACK_TOP - stack_size;
+}
+
/*
* This performs some platform-dependent address space initialization.
* On IA-64, we want to setup the VM area for the register backing
{
struct vm_area_struct *vma;
+ ia64_set_rbs_bot();
+
/*
* If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
* the problem. When the process attempts to write to the register backing store
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma) {
vma->vm_mm = current->mm;
- vma->vm_start = IA64_RBS_BOT;
+ vma->vm_start = current->thread.rbs_bot;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
void
free_initmem (void)
{
- unsigned long addr;
+ unsigned long addr, eaddr;
- addr = (unsigned long) &__init_begin;
- for (; addr < (unsigned long) &__init_end; addr += PAGE_SIZE) {
+ addr = (unsigned long) ia64_imva(&__init_begin);
+ eaddr = (unsigned long) ia64_imva(&__init_end);
+ while (addr < eaddr) {
ClearPageReserved(virt_to_page(addr));
set_page_count(virt_to_page(addr), 1);
free_page(addr);
++totalram_pages;
+ addr += PAGE_SIZE;
}
printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
(&__init_end - &__init_begin) >> 10);
}
/*
- * This is like put_dirty_page() but installs a clean page with PAGE_GATE protection
- * (execute-only, typically).
+ * This is like put_dirty_page() but installs a clean page in the kernel's page table.
*/
struct page *
-put_gate_page (struct page *page, unsigned long address)
+put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
if (!PageReserved(page))
- printk(KERN_ERR "put_gate_page: gate page at 0x%p not in reserved memory\n",
+ printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
page_address(page));
pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
pte_unmap(pte);
goto out;
}
- set_pte(pte, mk_pte(page, PAGE_GATE));
+ set_pte(pte, mk_pte(page, pgprot));
pte_unmap(pte);
}
out: spin_unlock(&init_mm.page_table_lock);
return page;
}
+static void
+setup_gate (void)
+{
+ struct page *page;
+ extern char __start_gate_section[];
+
+ /*
+ * Map the gate page twice: once read-only to export the ELF headers etc. and once
+ * execute-only page to enable privilege-promotion via "epc":
+ */
+ page = virt_to_page(ia64_imva(__start_gate_section));
+ put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
+#ifdef HAVE_BUGGY_SEGREL
+ page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
+ put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
+#else
+ put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
+#endif
+ ia64_patch_gate();
+}
+
void __init
ia64_mmu_init (void *my_cpu_data)
{
- unsigned long psr, rid, pta, impl_va_bits;
+ unsigned long psr, pta, impl_va_bits;
extern void __init tlb_init (void);
#ifdef CONFIG_DISABLE_VHPT
# define VHPT_ENABLE_BIT 0
# define VHPT_ENABLE_BIT 1
#endif
- /*
- * Set up the kernel identity mapping for regions 6 and 5. The mapping for region
- * 7 is setup up in _start().
- */
+ /* Pin mapping for percpu area into TLB */
psr = ia64_clear_ic();
-
- rid = ia64_rid(IA64_REGION_ID_KERNEL, __IA64_UNCACHED_OFFSET);
- ia64_set_rr(__IA64_UNCACHED_OFFSET, (rid << 8) | (IA64_GRANULE_SHIFT << 2));
-
- rid = ia64_rid(IA64_REGION_ID_KERNEL, VMALLOC_START);
- ia64_set_rr(VMALLOC_START, (rid << 8) | (PAGE_SHIFT << 2) | 1);
-
- /* ensure rr6 is up-to-date before inserting the PERCPU_ADDR translation: */
- ia64_srlz_d();
-
ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
PERCPU_PAGE_SHIFT);
discontig_paging_init();
efi_memmap_walk(count_pages, &num_physpages);
+ zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
#else /* !CONFIG_DISCONTIGMEM */
void
}
free_area_init(zones_size);
# endif /* !CONFIG_VIRTUAL_MEM_MAP */
+ zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
#endif /* !CONFIG_DISCONTIGMEM */
return 0;
}
+/*
+ * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
+ * system call handler. When this option is in effect, all fsyscalls will end up bubbling
+ * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
+ * useful for performance testing, but conceivably could also come in handy for debugging
+ * purposes.
+ */
+
+static int nolwsys;
+
+static int __init
+nolwsys_setup (char *s)
+{
+ nolwsys = 1;
+ return 1;
+}
+
+__setup("nolwsys", nolwsys_setup);
+
void
mem_init (void)
{
- extern char __start_gate_section[];
long reserved_pages, codesize, datasize, initsize;
unsigned long num_pgt_pages;
pg_data_t *pgdat;
+ int i;
static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
#ifdef CONFIG_PCI
if (num_pgt_pages > (u64) pgt_cache_water[1])
pgt_cache_water[1] = num_pgt_pages;
- /* install the gate page in the global page table: */
- put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR);
+ /*
+ * For fsyscall entrpoints with no light-weight handler, use the ordinary
+ * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
+ * code can tell them apart.
+ */
+ for (i = 0; i < NR_syscalls; ++i) {
+ extern unsigned long fsyscall_table[NR_syscalls];
+ extern unsigned long sys_call_table[NR_syscalls];
+
+ if (!fsyscall_table[i] || nolwsys)
+ fsyscall_table[i] = sys_call_table[i] | 1;
+ }
+ setup_gate(); /* setup gate pages before we free up boot memory... */
#ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init();
/*
* TLB support routines.
*
- * Copyright (C) 1998-2001 Hewlett-Packard Co
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 08/02/00 A. Mallick <asit.k.mallick@intel.com>
#include <asm/pal.h>
#include <asm/tlbflush.h>
-#define SUPPORTED_PGBITS ( \
- 1 << _PAGE_SIZE_256M | \
- 1 << _PAGE_SIZE_64M | \
- 1 << _PAGE_SIZE_16M | \
- 1 << _PAGE_SIZE_4M | \
- 1 << _PAGE_SIZE_1M | \
- 1 << _PAGE_SIZE_256K | \
- 1 << _PAGE_SIZE_64K | \
- 1 << _PAGE_SIZE_16K | \
- 1 << _PAGE_SIZE_8K | \
- 1 << _PAGE_SIZE_4K )
+static struct {
+ unsigned long mask; /* mask of supported purge page-sizes */
+ unsigned long max_bits; /* log2() of largest supported purge page-size */
+} purge;
struct ia64_ctx ia64_ctx = {
.lock = SPIN_LOCK_UNLOCKED,
}
nbits = ia64_fls(size + 0xfff);
- if (((1UL << nbits) & SUPPORTED_PGBITS) == 0) {
- if (nbits > _PAGE_SIZE_256M)
- nbits = _PAGE_SIZE_256M;
- else
- /*
- * Some page sizes are not implemented in the
- * IA-64 arch, so if we get asked to clear an
- * unsupported page size, round up to the
- * nearest page size. Note that we depend on
- * the fact that if page size N is not
- * implemented, 2*N _is_ implemented.
- */
- ++nbits;
- if (((1UL << nbits) & SUPPORTED_PGBITS) == 0)
- panic("flush_tlb_range: BUG: nbits=%lu\n", nbits);
- }
+ while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits))
+ ++nbits;
+ if (nbits > purge.max_bits)
+ nbits = purge.max_bits;
start &= ~((1UL << nbits) - 1);
# ifdef CONFIG_SMP
ia64_tlb_init (void)
{
ia64_ptce_info_t ptce_info;
+ unsigned long tr_pgbits;
+ long status;
+
+ if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
+ printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;"
+ "defaulting to architected purge page-sizes.\n", status);
+ purge.mask = 0x115557000;
+ }
+ purge.max_bits = ia64_fls(purge.mask);
ia64_get_ptce(&ptce_info);
local_cpu_data->ptce_base = ptce_info.base;
dir=$(dirname $0)
CC=$1
OBJDUMP=$2
-$CC -c $dir/check-gas-asm.S
-res=$($OBJDUMP -r --section .data check-gas-asm.o | fgrep 00004 | tr -s ' ' |cut -f3 -d' ')
+tmp=${TMPDIR:-/tmp}
+out=$tmp/out$$.o
+$CC -c $dir/check-gas-asm.S -o $out
+res=$($OBJDUMP -r --section .data $out | fgrep 00004 | tr -s ' ' |cut -f3 -d' ')
+rm -f $out
if [ $res != ".text" ]; then
echo buggy
else
--- /dev/null
+ .rodata
+ data4 @segrel(start)
+ .data
+start:
--- /dev/null
+SECTIONS {
+ . = SIZEOF_HEADERS;
+ .rodata : { *(.rodata) } :ro
+ . = 0xa0000;
+ .data : { *(.data) } :dat
+ /DISCARD/ : { *(*) }
+}
+PHDRS {
+ ro PT_LOAD FILEHDR PHDRS;
+ dat PT_LOAD;
+}
--- /dev/null
+#!/bin/sh
+#
+# Check whether linker can handle cross-segment @segrel():
+#
+CC=$1
+OBJDUMP=$2
+dir=$(dirname $0)
+tmp=${TMPDIR:-/tmp}
+out=$tmp/out$$
+$CC -nostdlib -static -Wl,-T$dir/check-segrel.lds $dir/check-segrel.S -o $out
+res=$($OBJDUMP --full --section .rodata $out | fgrep 000 | cut -f3 -d' ')
+rm -f $out
+if [ $res != 00000a00 ]; then
+ echo " -DHAVE_BUGGY_SEGREL"
+ cat >&2 <<EOF
+warning: your linker cannot handle cross-segment segment-relative relocations.
+ please upgrade to a newer version (it is safe to use this linker, but
+ the kernel will be bigger than strictly necessary).
+EOF
+fi
EXTRA_CFLAGS := -DLITTLE_ENDIAN
-obj-y += kernel/ # io/
+obj-y += kernel/ io/
1GB*<dn>, where dn is the digit number. The amount of memory
is 8MB*2**<d>. (If <d> = 0, the memory size is 0).
- SN1 doesnt support dimms this small but small memory systems
+ SN1 doesn't support dimms this small but small memory systems
boot faster on Medusa.
* FPROM EFI memory descriptor build routines
*
* - Routines to build the EFI memory descriptor map
- * - Should also be usable by the SGI SN1 prom to convert
+ * - Should also be usable by the SGI prom to convert
* klconfig to efi_memmap
*/
#define KERNEL_SIZE (4*MB)
#define PROMRESERVED_SIZE (1*MB)
-#ifdef CONFIG_IA64_SGI_SN1
-#define PHYS_ADDRESS(_n, _x) (((long)_n<<33) | (long)_x)
-#define MD_BANK_SHFT 30
-#else
+#ifdef SGI_SN2
#define PHYS_ADDRESS(_n, _x) (((long)_n<<38) | (long)_x | 0x3000000000UL)
#define MD_BANK_SHFT 34
#endif
return sn_config->cpus;
}
-/* For SN1, get the index th nasid */
+/* For SN, get the index th nasid */
int
GetNasid(int index)
* actually disabled etc.
*/
-#ifdef CONFIG_IA64_SGI_SN1
-int
-IsBankPresent(int index, node_memmap_t nmemmap)
-{
- switch (index) {
- case 0:return nmemmap.b0;
- case 1:return nmemmap.b1;
- case 2:return nmemmap.b2;
- case 3:return nmemmap.b3;
- case 4:return nmemmap.b4;
- case 5:return nmemmap.b5;
- case 6:return nmemmap.b6;
- case 7:return nmemmap.b7;
- default:return -1 ;
- }
-}
-
-int
-GetBankSize(int index, node_memmap_t nmemmap)
-{
- switch (index) {
- case 0:
- case 1:return nmemmap.b01size;
- case 2:
- case 3:return nmemmap.b23size;
- case 4:
- case 5:return nmemmap.b45size;
- case 6:
- case 7:return nmemmap.b67size;
- default:return -1 ;
- }
-}
-
-#else
+#ifdef SGI_SN2
int
IsBankPresent(int index, node_memmap_t nmemmap)
{
for (cnode=0;cnode<numnodes;cnode++) {
nasid = GetNasid(cnode) ;
membank_info = GetMemBankInfo(cnode) ;
- for (bank=0;bank<NR_BANKS_PER_NODE;bank++) {
+ for (bank=0;bank<MD_BANKS_PER_NODE;bank++) {
if (IsBankPresent(bank, membank_info)) {
bsize = GetBankSize(bank, membank_info) ;
paddr = PHYS_ADDRESS(nasid, (long)bank<<MD_BANK_SHFT);
numbytes = BankSizeBytes(bsize);
-#ifdef CONFIG_IA64_SGI_SN2
+#ifdef SGI_SN2
/*
* Ignore directory.
* Shorten memory chunk by 1 page - makes a better
}
/*
- * Check for the node 0 hole. Since banks can't
+ * Check for the node 0 hole. Since banks cant
* span the hole, we only need to check if the end of
* the range is the end of the hole.
*/
numbytes -= NODE0_HOLE_SIZE;
/*
* UGLY hack - we must skip overr the kernel and
- * PROM runtime services but we don't exactly where it is.
+ * PROM runtime services but we dont exactly where it is.
* So lets just reserve:
* node 0
* 0-1MB for PAL
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/config.h>
/*
- * Structure of the mem config of the node as a SN1 MI reg
+ * Structure of the mem config of the node as a SN MI reg
* Medusa supports this reg config.
*
* BankSize nibble to bank size mapping
#define MBSHIFT 20
-#ifdef CONFIG_IA64_SGI_SN1
-typedef struct node_memmap_s
-{
- unsigned int b0 :1, /* 0 bank 0 present */
- b1 :1, /* 1 bank 1 present */
- r01 :2, /* 2-3 reserved */
- b01size :4, /* 4-7 Size of bank 0 and 1 */
- b2 :1, /* 8 bank 2 present */
- b3 :1, /* 9 bank 3 present */
- r23 :2, /* 10-11 reserved */
- b23size :4, /* 12-15 Size of bank 2 and 3 */
- b4 :1, /* 16 bank 4 present */
- b5 :1, /* 17 bank 5 present */
- r45 :2, /* 18-19 reserved */
- b45size :4, /* 20-23 Size of bank 4 and 5 */
- b6 :1, /* 24 bank 6 present */
- b7 :1, /* 25 bank 7 present */
- r67 :2, /* 26-27 reserved */
- b67size :4; /* 28-31 Size of bank 6 and 7 */
-} node_memmap_t ;
-
-/* Support the medusa hack for 8M/16M/32M nodes */
-#define SN1_BANK_SIZE_SHIFT (MBSHIFT+6) /* 64 MB */
-#define BankSizeBytes(bsize) ((bsize<6) ? (1<<((bsize-1)+SN1_BANK_SIZE_SHIFT)) :\
- (1<<((bsize-9)+MBSHIFT)))
-#else
+#ifdef SGI_SN2
typedef struct node_memmap_s
{
unsigned int b0size :3, /* 0-2 bank 0 size */
#define SN2_BANK_SIZE_SHIFT (MBSHIFT+6) /* 64 MB */
#define BankPresent(bsize) (bsize<6)
#define BankSizeBytes(bsize) (BankPresent(bsize) ? 1UL<<((bsize)+SN2_BANK_SIZE_SHIFT) : 0)
+#define MD_BANKS_PER_NODE 4
+#define MD_BANKSIZE (1UL << 34)
#endif
typedef struct sn_memmap_s
* Copyright (C) 1998-2000 Hewlett-Packard Co
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
* be 0x00000ffffc000000, but on snia we use the (inverse swizzled)
* IOSPEC_BASE value
*/
-#ifdef CONFIG_IA64_SGI_SN1
-#define IOPB_PA 0xc0000FFFFC000000
-#else
+#ifdef SGI_SN2
#define IOPB_PA 0xc000000fcc000000
#endif
// Isolate node number we are running on.
mov r6 = ip;;
-#ifdef CONFIG_IA64_SGI_SN1
- shr r5 = r6,33;; // r5 = node number
- shl r6 = r5,33 // r6 = base memory address of node
-#else
+#ifdef SGI_SN2
shr r5 = r6,38 // r5 = node number
dep r6 = 0,r6,0,36 // r6 = base memory address of node
or r1 = r1,r6 // Relocate to boot node
// Lets figure out who we are & put it in the LID register.
-#ifdef CONFIG_IA64_SGI_SN2
+#ifdef SGI_SN2
// On SN2, we (currently) pass the cpu number in r10 at boot
and r25=3,r10;;
movl r16=0x8000008110000400 // Allow IPIs
1: cmp.eq p6,p7=8,r28 /* PAL_VM_SUMMARY */
(p7) br.cond.sptk.few 1f
movl r8=0
-#ifdef CONFIG_IA64_SGI_SN1
- movl r9=0x0203083001151059
- movl r10=0x1232
-#else
+#ifdef SGI_SN2
movl r9=0x0203083001151065
movl r10=0x183f
#endif
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*
*
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
#include <linux/config.h>
-#include <asm/sn/pda.h>
#include <linux/efi.h>
#include <asm/pal.h>
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
#include <asm/processor.h>
#include <asm/sn/sn_cpuid.h>
-#ifdef CONFIG_IA64_SGI_SN2
+#ifdef SGI_SN2
#include <asm/sn/sn2/addrs.h>
#include <asm/sn/sn2/shub_mmr.h>
#endif
#define ACPI_SLIT_REVISION 1
#define OEMID "SGI"
-#ifdef CONFIG_IA64_SGI_SN1
-#define PRODUCT "SN1"
-#define PROXIMITY_DOMAIN(nasid) (nasid)
-#else
+#ifdef SGI_SN2
#define PRODUCT "SN2"
#define PROXIMITY_DOMAIN(nasid) (((nasid)>>1) & 255)
#endif
typedef union ia64_nasid_va {
struct {
-#if defined(CONFIG_IA64_SGI_SN1)
- unsigned long off : 33; /* intra-region offset */
- unsigned long nasid : 7; /* NASID */
- unsigned long off2 : 21; /* fill */
- unsigned long reg : 3; /* region number */
-#elif defined(CONFIG_IA64_SGI_SN2)
+#if defined(SGI_SN2)
unsigned long off : 36; /* intra-region offset */
unsigned long attr : 2;
unsigned long nasid : 11; /* NASID */
#define IS_VIRTUAL_MODE() ({struct ia64_psr psr; asm("mov %0=psr" : "=r"(psr)); psr.dt;})
#define ADDR_OF(p) (IS_VIRTUAL_MODE() ? ((void*)((long)(p)+PAGE_OFFSET)) : ((void*) (p)))
-#if defined(CONFIG_IA64_SGI_SN1)
-#define __fwtab_pa(n,x) ({ia64_nasid_va _v; _v.l = (long) (x); _v.f.nasid = (x) ? (n) : 0; _v.f.reg = 0; _v.l;})
-#elif defined(CONFIG_IA64_SGI_SN2)
+#if defined(SGI_SN2)
#define __fwtab_pa(n,x) ({ia64_nasid_va _v; _v.l = (long) (x); _v.f.nasid = (x) ? (n) : 0; _v.f.reg = 0; _v.f.attr = 3; _v.l;})
#endif
return EFI_UNSUPPORTED;
}
-#ifdef CONFIG_IA64_SGI_SN2
+#ifdef SGI_SN2
#undef cpu_physical_id
#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff)
;
} else if (index == SAL_UPDATE_PAL) {
;
-#ifdef CONFIG_IA64_SGI_SN2
+#ifdef SGI_SN2
} else if (index == SN_SAL_LOG_CE) {
#ifdef ajmtestcpei
fprom_send_cpei();
/*
* Pass the parameter base address to the build_efi_xxx routines.
*/
-#if defined(CONFIG_IA64_SGI_SN1)
- build_init(8LL*GB*base_nasid);
-#else
+#if defined(SGI_SN2)
build_init(0x3000000000UL | ((long)base_nasid<<38));
#endif
* You can also edit this line to pass other arguments to the kernel.
* Note: disable kernel text replication.
*/
- strcpy(cmd_line, "init=/bin/bash ktreplicate=0");
+ strcpy(cmd_line, "init=/bin/bash console=ttyS0");
memset(efi_systab, 0, sizeof(efi_systab));
efi_systab->hdr.signature = EFI_SYSTEM_TABLE_SIGNATURE;
lsapic20->header.length = sizeof(struct acpi_table_lsapic);
lsapic20->acpi_id = cnode*4+cpu;
lsapic20->flags.enabled = 1;
-#if defined(CONFIG_IA64_SGI_SN1)
- lsapic20->eid = cpu;
- lsapic20->id = nasid;
-#else
+#if defined(SGI_SN2)
lsapic20->eid = nasid&0xffff;
lsapic20->id = (cpu<<4) | (nasid>>16);
#endif
srat_memory_affinity->proximity_domain = PROXIMITY_DOMAIN(nasid);
srat_memory_affinity->base_addr_lo = 0;
srat_memory_affinity->length_lo = 0;
-#if defined(CONFIG_IA64_SGI_SN1)
- srat_memory_affinity->base_addr_hi = nasid<<1;
- srat_memory_affinity->length_hi = SN1_NODE_SIZE>>32;
-#else
+#if defined(SGI_SN2)
srat_memory_affinity->base_addr_hi = (nasid<<6) | (3<<4);
- srat_memory_affinity->length_hi = SN2_NODE_SIZE>>32;
+ srat_memory_affinity->length_hi = (MD_BANKSIZE*MD_BANKS_PER_NODE)>>32;
#endif
srat_memory_affinity->memory_type = ACPI_ADDRESS_RANGE_MEMORY;
srat_memory_affinity->flags.enabled = 1;
srat_cpu_affinity->header.length = sizeof(struct acpi_table_processor_affinity);
srat_cpu_affinity->proximity_domain = PROXIMITY_DOMAIN(nasid);
srat_cpu_affinity->flags.enabled = 1;
-#if defined(CONFIG_IA64_SGI_SN1)
- srat_cpu_affinity->apic_id = nasid;
- srat_cpu_affinity->lsapic_eid = cpu;
-#else
+#if defined(SGI_SN2)
srat_cpu_affinity->lsapic_eid = nasid&0xffff;
srat_cpu_affinity->apic_id = (cpu<<4) | (nasid>>16);
#endif
sal_systab->sal_b_rev_minor = 0x0; /* 1.00 */
strcpy(sal_systab->oem_id, "SGI");
- strcpy(sal_systab->product_id, "SN1");
+ strcpy(sal_systab->product_id, "SN2");
/* fill in an entry point: */
sal_ed->type = SAL_DESC_ENTRY_POINT;
sal_systab->checksum = -checksum;
/* If the checksum is correct, the kernel tries to use the
- * table. We don't build enough table & the kernel aborts.
+ * table. We dont build enough table & the kernel aborts.
* Note that the PROM hasd thhhe same problem!!
*/
for(cpu=0; cpu<CPUS_PER_NODE; cpu++) {
if (!IsCpuPresent(cnode, cpu))
continue;
-#ifdef CONFIG_IA64_SGI_SN1
- bsp_lid = (GetNasid(cnode)<<24) | (cpu<<16);
-#else
+#ifdef SGI_SN2
bsp_lid = (GetNasid(cnode)<<16) | (cpu<<28);
#endif
if (bsp-- > 0)
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
klgraph_init(void)
{
-#ifdef CONFIG_IA64_SGI_SN1
u64 *temp;
-#endif
/*
* Initialize some hub/xbow registers that allows access to
* Xbridge etc. These are normally done in PROM.
*/
/* Write IOERR clear to clear the CRAZY bit in the status */
-#ifdef CONFIG_IA64_SGI_SN1
- *(volatile uint64_t *)0xc0000a0001c001f8 = (uint64_t)0xffffffff;
-
- /* set widget control register...setting bedrock widget id to b */
- *(volatile uint64_t *)0xc0000a0001c00020 = (uint64_t)0x801b;
-
- /* set io outbound widget access...allow all */
- *(volatile uint64_t *)0xc0000a0001c00110 = (uint64_t)0xff01;
-
- /* set io inbound widget access...allow all */
- *(volatile uint64_t *)0xc0000a0001c00118 = (uint64_t)0xff01;
-
- /* set io crb timeout to max */
- *(volatile uint64_t *)0xc0000a0001c003c0 = (uint64_t)0xffffff;
- *(volatile uint64_t *)0xc0000a0001c003c0 = (uint64_t)0xffffff;
-
- /* set local block io permission...allow all */
- *(volatile uint64_t *)0xc0000a0001e04010 = (uint64_t)0xfffffffffffffff;
-
- /* clear any errors */
- /* clear_ii_error(); medusa should have cleared these */
-
- /* set default read response buffers in bridge */
- *(volatile u32 *)0xc0000a000f000280L = 0xba98;
- *(volatile u32 *)0xc0000a000f000288L = 0xba98;
-#elif CONFIG_IA64_SGI_SN2
*(volatile uint64_t *)0xc000000801c001f8 = (uint64_t)0xffffffff;
/* set widget control register...setting bedrock widget id to a */
/* set default read response buffers in bridge */
// [PI] *(volatile u32 *)0xc00000080f000280L = 0xba98;
// [PI] *(volatile u32 *)0xc00000080f000288L = 0xba98;
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-#ifdef CONFIG_IA64_SGI_SN1
-
- /*
- * kldir entries initialization - mankato
- */
- convert(0x8000000000002000, 0x0000000000000000, 0x0000000000000000);
- convert(0x8000000000002010, 0x0000000000000000, 0x0000000000000000);
- convert(0x8000000000002020, 0x0000000000000000, 0x0000000000000000);
- convert(0x8000000000002030, 0x0000000000000000, 0x0000000000000000);
- convert(0x8000000000002040, 0x434d5f53505f5357, 0x0000000000030000);
- convert(0x8000000000002050, 0x0000000000000000, 0x0000000000010000);
- convert(0x8000000000002060, 0x0000000000000001, 0x0000000000000000);
- convert(0x8000000000002070, 0x0000000000000000, 0x0000000000000000);
- convert(0x8000000000002080, 0x0000000000000000, 0x0000000000000000);
- convert(0x8000000000002090, 0x0000000000000000, 0x0000000000000000);
- convert(0x80000000000020a0, 0x0000000000000000, 0x0000000000000000);
- convert(0x80000000000020b0, 0x0000000000000000, 0x0000000000000000);
- convert(0x80000000000020c0, 0x434d5f53505f5357, 0x0000000000000000);
- convert(0x80000000000020d0, 0x0000000000002400, 0x0000000000000400);
- convert(0x80000000000020e0, 0x0000000000000001, 0x0000000000000000);
- convert(0x80000000000020f0, 0x0000000000000000, 0x0000000000000000);
- convert(0x8000000000002100, 0x434d5f53505f5357, 0x0000000000040000);
- convert(0x8000000000002110, 0x0000000000000000, 0xffffffffffffffff);
- convert(0x8000000000002120, 0x0000000000000001, 0x0000000000000000);
- convert(0x8000000000002130, 0x0000000000000000, 0x0000000000000000);
- convert(0x8000000000002140, 0x0000000000000000, 0x0000000000000000);
- convert(0x8000000000002150, 0x0000000000000000, 0x0000000000000000);
- convert(0x8000000000002160, 0x0000000000000000, 0x0000000000000000);
- convert(0x8000000000002170, 0x0000000000000000, 0x0000000000000000);
- convert(0x8000000000002180, 0x434d5f53505f5357, 0x0000000000020000);
- convert(0x8000000000002190, 0x0000000000000000, 0x0000000000010000);
- convert(0x80000000000021a0, 0x0000000000000001, 0x0000000000000000);
- /*
- * klconfig entries initialization - mankato
- */
- convert(0x0000000000030000, 0x00000000beedbabe, 0x0000004800000000);
- convert(0x0000000000030010, 0x0003007000000018, 0x800002000f820178);
- convert(0x0000000000030020, 0x80000a000f024000, 0x800002000f800000);
- convert(0x0000000000030030, 0x0300fafa00012580, 0x00000000040f0000);
- convert(0x0000000000030040, 0x0000000000000000, 0x0003097000030070);
- convert(0x0000000000030050, 0x00030970000303b0, 0x0003181000033f70);
- convert(0x0000000000030060, 0x0003d51000037570, 0x0000000000038330);
- convert(0x0000000000030070, 0x0203110100030140, 0x0001000000000101);
- convert(0x0000000000030080, 0x0900000000000000, 0x000000004e465e67);
- convert(0x0000000000030090, 0x0003097000000000, 0x00030b1000030a40);
- convert(0x00000000000300a0, 0x00030cb000030be0, 0x000315a0000314d0);
- convert(0x00000000000300b0, 0x0003174000031670, 0x0000000000000000);
- convert(0x0000000000030100, 0x000000000000001a, 0x3350490000000000);
- convert(0x0000000000030110, 0x0000000000000037, 0x0000000000000000);
- convert(0x0000000000030140, 0x0002420100030210, 0x0001000000000101);
- convert(0x0000000000030150, 0x0100000000000000, 0xffffffffffffffff);
- convert(0x0000000000030160, 0x00030d8000000000, 0x0000000000030e50);
- convert(0x00000000000301c0, 0x0000000000000000, 0x0000000000030070);
- convert(0x00000000000301d0, 0x0000000000000025, 0x424f490000000000);
- convert(0x00000000000301e0, 0x000000004b434952, 0x0000000000000000);
- convert(0x0000000000030210, 0x00027101000302e0, 0x00010000000e4101);
- convert(0x0000000000030220, 0x0200000000000000, 0xffffffffffffffff);
- convert(0x0000000000030230, 0x00030f2000000000, 0x0000000000030ff0);
- convert(0x0000000000030290, 0x0000000000000000, 0x0000000000030140);
- convert(0x00000000000302a0, 0x0000000000000026, 0x7262490000000000);
- convert(0x00000000000302b0, 0x00000000006b6369, 0x0000000000000000);
- convert(0x00000000000302e0, 0x0002710100000000, 0x00010000000f3101);
- convert(0x00000000000302f0, 0x0500000000000000, 0xffffffffffffffff);
- convert(0x0000000000030300, 0x000310c000000000, 0x0003126000031190);
- convert(0x0000000000030310, 0x0003140000031330, 0x0000000000000000);
- convert(0x0000000000030360, 0x0000000000000000, 0x0000000000030140);
- convert(0x0000000000030370, 0x0000000000000029, 0x7262490000000000);
- convert(0x0000000000030380, 0x00000000006b6369, 0x0000000000000000);
- convert(0x0000000000030970, 0x0000000002010102, 0x0000000000000000);
- convert(0x0000000000030980, 0x000000004e465e67, 0xffffffff00000000);
- /* convert(0x00000000000309a0, 0x0000000000037570, 0x0000000100000000); */
- convert(0x00000000000309a0, 0x0000000000037570, 0xffffffff00000000);
- convert(0x00000000000309b0, 0x0000000000030070, 0x0000000000000000);
- convert(0x00000000000309c0, 0x000000000003f420, 0x0000000000000000);
- convert(0x0000000000030a40, 0x0000000002010125, 0x0000000000000000);
- convert(0x0000000000030a50, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0x0000000000030a70, 0x0000000000037b78, 0x0000000000000000);
- convert(0x0000000000030b10, 0x0000000002010125, 0x0000000000000000);
- convert(0x0000000000030b20, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0x0000000000030b40, 0x0000000000037d30, 0x0000000000000001);
- convert(0x0000000000030be0, 0x00000000ff010203, 0x0000000000000000);
- convert(0x0000000000030bf0, 0xffffffffffffffff, 0xffffffff000000ff);
- convert(0x0000000000030c10, 0x0000000000037ee8, 0x0100010000000200);
- convert(0x0000000000030cb0, 0x00000000ff310111, 0x0000000000000000);
- convert(0x0000000000030cc0, 0xffffffffffffffff, 0x0000000000000000);
- convert(0x0000000000030d80, 0x0000000002010104, 0x0000000000000000);
- convert(0x0000000000030d90, 0xffffffffffffffff, 0x00000000000000ff);
- convert(0x0000000000030db0, 0x0000000000037f18, 0x0000000000000000);
- convert(0x0000000000030dc0, 0x0000000000000000, 0x0003007000060000);
- convert(0x0000000000030de0, 0x0000000000000000, 0x0003021000050000);
- convert(0x0000000000030df0, 0x000302e000050000, 0x0000000000000000);
- convert(0x0000000000030e30, 0x0000000000000000, 0x000000000000000a);
- convert(0x0000000000030e50, 0x00000000ff00011a, 0x0000000000000000);
- convert(0x0000000000030e60, 0xffffffffffffffff, 0x0000000000000000);
- convert(0x0000000000030e80, 0x0000000000037fe0, 0x9e6e9e9e9e9e9e9e);
- convert(0x0000000000030e90, 0x000000000000bc6e, 0x0000000000000000);
- convert(0x0000000000030f20, 0x0000000002010205, 0x00000000d0020000);
- convert(0x0000000000030f30, 0xffffffffffffffff, 0x0000000e0000000e);
- convert(0x0000000000030f40, 0x000000000000000e, 0x0000000000000000);
- convert(0x0000000000030f50, 0x0000000000038010, 0x00000000000007ff);
- convert(0x0000000000030f70, 0x0000000000000000, 0x0000000022001077);
- convert(0x0000000000030fa0, 0x0000000000000000, 0x000000000003f4a8);
- convert(0x0000000000030ff0, 0x0000000000310120, 0x0000000000000000);
- convert(0x0000000000031000, 0xffffffffffffffff, 0xffffffff00000002);
- convert(0x0000000000031010, 0x000000000000000e, 0x0000000000000000);
- convert(0x0000000000031020, 0x0000000000038088, 0x0000000000000000);
- convert(0x00000000000310c0, 0x0000000002010205, 0x00000000d0020000);
- convert(0x00000000000310d0, 0xffffffffffffffff, 0x0000000f0000000f);
- convert(0x00000000000310e0, 0x000000000000000f, 0x0000000000000000);
- convert(0x00000000000310f0, 0x00000000000380b8, 0x00000000000007ff);
- convert(0x0000000000031120, 0x0000000022001077, 0x00000000000310a9);
- convert(0x0000000000031130, 0x00000000580211c1, 0x000000008009104c);
- convert(0x0000000000031140, 0x0000000000000000, 0x000000000003f4c0);
- convert(0x0000000000031190, 0x0000000000310120, 0x0000000000000000);
- convert(0x00000000000311a0, 0xffffffffffffffff, 0xffffffff00000003);
- convert(0x00000000000311b0, 0x000000000000000f, 0x0000000000000000);
- convert(0x00000000000311c0, 0x0000000000038130, 0x0000000000000000);
- convert(0x0000000000031260, 0x0000000000110106, 0x0000000000000000);
- convert(0x0000000000031270, 0xffffffffffffffff, 0xffffffff00000004);
- convert(0x0000000000031280, 0x000000000000000f, 0x0000000000000000);
- convert(0x00000000000312a0, 0x00000000ff110013, 0x0000000000000000);
- convert(0x00000000000312b0, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0x00000000000312c0, 0x000000000000000f, 0x0000000000000000);
- convert(0x00000000000312e0, 0x0000000000110012, 0x0000000000000000);
- convert(0x00000000000312f0, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0x0000000000031300, 0x000000000000000f, 0x0000000000000000);
- convert(0x0000000000031310, 0x0000000000038160, 0x0000000000000000);
- convert(0x0000000000031330, 0x00000000ff310122, 0x0000000000000000);
- convert(0x0000000000031340, 0xffffffffffffffff, 0xffffffff00000005);
- convert(0x0000000000031350, 0x000000000000000f, 0x0000000000000000);
- convert(0x0000000000031360, 0x0000000000038190, 0x0000000000000000);
- convert(0x0000000000031400, 0x0000000000310121, 0x0000000000000000);
- convert(0x0000000000031400, 0x0000000000310121, 0x0000000000000000);
- convert(0x0000000000031410, 0xffffffffffffffff, 0xffffffff00000006);
- convert(0x0000000000031420, 0x000000000000000f, 0x0000000000000000);
- convert(0x0000000000031430, 0x00000000000381c0, 0x0000000000000000);
- convert(0x00000000000314d0, 0x00000000ff010201, 0x0000000000000000);
- convert(0x00000000000314e0, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0x0000000000031500, 0x00000000000381f0, 0x000030430000ffff);
- convert(0x0000000000031510, 0x000000000000ffff, 0x0000000000000000);
- convert(0x00000000000315a0, 0x00000020ff000201, 0x0000000000000000);
- convert(0x00000000000315b0, 0xffffffffffffffff, 0xffffffff00000001);
- convert(0x00000000000315d0, 0x0000000000038240, 0x00003f3f0000ffff);
- convert(0x00000000000315e0, 0x000000000000ffff, 0x0000000000000000);
- convert(0x0000000000031670, 0x00000000ff010201, 0x0000000000000000);
- convert(0x0000000000031680, 0xffffffffffffffff, 0x0000000100000002);
- convert(0x00000000000316a0, 0x0000000000038290, 0x000030430000ffff);
- convert(0x00000000000316b0, 0x000000000000ffff, 0x0000000000000000);
- convert(0x0000000000031740, 0x00000020ff000201, 0x0000000000000000);
- convert(0x0000000000031750, 0xffffffffffffffff, 0x0000000500000003);
- convert(0x0000000000031770, 0x00000000000382e0, 0x00003f3f0000ffff);
- convert(0x0000000000031780, 0x000000000000ffff, 0x0000000000000000);
-
- /*
- * GDA initialization - mankato
- */
- convert(0x8000000000002400, 0x0000000258464552, 0x000000000ead0000);
- convert(0x8000000000002480, 0xffffffff00010000, 0xffffffffffffffff);
- convert(0x8000000000002490, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x80000000000024a0, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x80000000000024b0, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x80000000000024c0, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x80000000000024d0, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x80000000000024e0, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x80000000000024f0, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x8000000000002500, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x8000000000002510, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x8000000000002520, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x8000000000002530, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x8000000000002540, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x8000000000002550, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x8000000000002560, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x8000000000002570, 0xffffffffffffffff, 0xffffffffffffffff);
- convert(0x8000000000002580, 0x000000000000ffff, 0x0000000000000000);
-#endif
-
+ /*
+ * klconfig entries initialization - mankato
+ */
+ convert(0xe000003000030000, 0x00000000beedbabe, 0x0000004800000000);
+ convert(0xe000003000030010, 0x0003007000000018, 0x800002000f820178);
+ convert(0xe000003000030020, 0x80000a000f024000, 0x800002000f800000);
+ convert(0xe000003000030030, 0x0300fafa00012580, 0x00000000040f0000);
+ convert(0xe000003000030040, 0x0000000000000000, 0x0003097000030070);
+ convert(0xe000003000030050, 0x00030970000303b0, 0x0003181000033f70);
+ convert(0xe000003000030060, 0x0003d51000037570, 0x0000000000038330);
+ convert(0xe000003000030070, 0x0203110100030140, 0x0001000000000101);
+ convert(0xe000003000030080, 0x0900000000000000, 0x000000004e465e67);
+ convert(0xe000003000030090, 0x0003097000000000, 0x00030b1000030a40);
+ convert(0xe0000030000300a0, 0x00030cb000030be0, 0x000315a0000314d0);
+ convert(0xe0000030000300b0, 0x0003174000031670, 0x0000000000000000);
+ convert(0xe000003000030100, 0x000000000000001a, 0x3350490000000000);
+ convert(0xe000003000030110, 0x0000000000000037, 0x0000000000000000);
+ convert(0xe000003000030140, 0x0002420100030210, 0x0001000000000101);
+ convert(0xe000003000030150, 0x0100000000000000, 0xffffffffffffffff);
+ convert(0xe000003000030160, 0x00030d8000000000, 0x0000000000030e50);
+ convert(0xe0000030000301c0, 0x0000000000000000, 0x0000000000030070);
+ convert(0xe0000030000301d0, 0x0000000000000025, 0x424f490000000000);
+ convert(0xe0000030000301e0, 0x000000004b434952, 0x0000000000000000);
+ convert(0xe000003000030210, 0x00027101000302e0, 0x00010000000e4101);
+ convert(0xe000003000030220, 0x0200000000000000, 0xffffffffffffffff);
+ convert(0xe000003000030230, 0x00030f2000000000, 0x0000000000030ff0);
+ convert(0xe000003000030290, 0x0000000000000000, 0x0000000000030140);
+ convert(0xe0000030000302a0, 0x0000000000000026, 0x7262490000000000);
+ convert(0xe0000030000302b0, 0x00000000006b6369, 0x0000000000000000);
+ convert(0xe0000030000302e0, 0x0002710100000000, 0x00010000000f3101);
+ convert(0xe0000030000302f0, 0x0500000000000000, 0xffffffffffffffff);
+ convert(0xe000003000030300, 0x000310c000000000, 0x0003126000031190);
+ convert(0xe000003000030310, 0x0003140000031330, 0x0000000000000000);
+ convert(0xe000003000030360, 0x0000000000000000, 0x0000000000030140);
+ convert(0xe000003000030370, 0x0000000000000029, 0x7262490000000000);
+ convert(0xe000003000030380, 0x00000000006b6369, 0x0000000000000000);
+ convert(0xe000003000030970, 0x0000000002010102, 0x0000000000000000);
+ convert(0xe000003000030980, 0x000000004e465e67, 0xffffffff00000000);
+ /* convert(0x00000000000309a0, 0x0000000000037570, 0x0000000100000000); */
+ convert(0xe0000030000309a0, 0x0000000000037570, 0xffffffff00000000);
+ convert(0xe0000030000309b0, 0x0000000000030070, 0x0000000000000000);
+ convert(0xe0000030000309c0, 0x000000000003f420, 0x0000000000000000);
+ convert(0xe000003000030a40, 0x0000000002010125, 0x0000000000000000);
+ convert(0xe000003000030a50, 0xffffffffffffffff, 0xffffffff00000000);
+ convert(0xe000003000030a70, 0x0000000000037b78, 0x0000000000000000);
+ convert(0xe000003000030b10, 0x0000000002010125, 0x0000000000000000);
+ convert(0xe000003000030b20, 0xffffffffffffffff, 0xffffffff00000000);
+ convert(0xe000003000030b40, 0x0000000000037d30, 0x0000000000000001);
+ convert(0xe000003000030be0, 0x00000000ff010203, 0x0000000000000000);
+ convert(0xe000003000030bf0, 0xffffffffffffffff, 0xffffffff000000ff);
+ convert(0xe000003000030c10, 0x0000000000037ee8, 0x0100010000000200);
+ convert(0xe000003000030cb0, 0x00000000ff310111, 0x0000000000000000);
+ convert(0xe000003000030cc0, 0xffffffffffffffff, 0x0000000000000000);
+ convert(0xe000003000030d80, 0x0000000002010104, 0x0000000000000000);
+ convert(0xe000003000030d90, 0xffffffffffffffff, 0x00000000000000ff);
+ convert(0xe000003000030db0, 0x0000000000037f18, 0x0000000000000000);
+ convert(0xe000003000030dc0, 0x0000000000000000, 0x0003007000060000);
+ convert(0xe000003000030de0, 0x0000000000000000, 0x0003021000050000);
+ convert(0xe000003000030df0, 0x000302e000050000, 0x0000000000000000);
+ convert(0xe000003000030e30, 0x0000000000000000, 0x000000000000000a);
+ convert(0xe000003000030e50, 0x00000000ff00011a, 0x0000000000000000);
+ convert(0xe000003000030e60, 0xffffffffffffffff, 0x0000000000000000);
+ convert(0xe000003000030e80, 0x0000000000037fe0, 0x9e6e9e9e9e9e9e9e);
+ convert(0xe000003000030e90, 0x000000000000bc6e, 0x0000000000000000);
+ convert(0xe000003000030f20, 0x0000000002010205, 0x00000000d0020000);
+ convert(0xe000003000030f30, 0xffffffffffffffff, 0x0000000e0000000e);
+ convert(0xe000003000030f40, 0x000000000000000e, 0x0000000000000000);
+ convert(0xe000003000030f50, 0x0000000000038010, 0x00000000000007ff);
+ convert(0xe000003000030f70, 0x0000000000000000, 0x0000000022001077);
+ convert(0xe000003000030fa0, 0x0000000000000000, 0x000000000003f4a8);
+ convert(0xe000003000030ff0, 0x0000000000310120, 0x0000000000000000);
+ convert(0xe000003000031000, 0xffffffffffffffff, 0xffffffff00000002);
+ convert(0xe000003000031010, 0x000000000000000e, 0x0000000000000000);
+ convert(0xe000003000031020, 0x0000000000038088, 0x0000000000000000);
+ convert(0xe0000030000310c0, 0x0000000002010205, 0x00000000d0020000);
+ convert(0xe0000030000310d0, 0xffffffffffffffff, 0x0000000f0000000f);
+ convert(0xe0000030000310e0, 0x000000000000000f, 0x0000000000000000);
+ convert(0xe0000030000310f0, 0x00000000000380b8, 0x00000000000007ff);
+ convert(0xe000003000031120, 0x0000000022001077, 0x00000000000310a9);
+ convert(0xe000003000031130, 0x00000000580211c1, 0x000000008009104c);
+ convert(0xe000003000031140, 0x0000000000000000, 0x000000000003f4c0);
+ convert(0xe000003000031190, 0x0000000000310120, 0x0000000000000000);
+ convert(0xe0000030000311a0, 0xffffffffffffffff, 0xffffffff00000003);
+ convert(0xe0000030000311b0, 0x000000000000000f, 0x0000000000000000);
+ convert(0xe0000030000311c0, 0x0000000000038130, 0x0000000000000000);
+ convert(0xe000003000031260, 0x0000000000110106, 0x0000000000000000);
+ convert(0xe000003000031270, 0xffffffffffffffff, 0xffffffff00000004);
+ convert(0xe000003000031270, 0xffffffffffffffff, 0xffffffff00000004);
+ convert(0xe000003000031280, 0x000000000000000f, 0x0000000000000000);
+ convert(0xe0000030000312a0, 0x00000000ff110013, 0x0000000000000000);
+ convert(0xe0000030000312b0, 0xffffffffffffffff, 0xffffffff00000000);
+ convert(0xe0000030000312c0, 0x000000000000000f, 0x0000000000000000);
+ convert(0xe0000030000312e0, 0x0000000000110012, 0x0000000000000000);
+ convert(0xe0000030000312f0, 0xffffffffffffffff, 0xffffffff00000000);
+ convert(0xe000003000031300, 0x000000000000000f, 0x0000000000000000);
+ convert(0xe000003000031310, 0x0000000000038160, 0x0000000000000000);
+ convert(0xe000003000031330, 0x00000000ff310122, 0x0000000000000000);
+ convert(0xe000003000031340, 0xffffffffffffffff, 0xffffffff00000005);
+ convert(0xe000003000031350, 0x000000000000000f, 0x0000000000000000);
+ convert(0xe000003000031360, 0x0000000000038190, 0x0000000000000000);
+ convert(0xe000003000031400, 0x0000000000310121, 0x0000000000000000);
+ convert(0xe000003000031400, 0x0000000000310121, 0x0000000000000000);
+ convert(0xe000003000031410, 0xffffffffffffffff, 0xffffffff00000006);
+ convert(0xe000003000031420, 0x000000000000000f, 0x0000000000000000);
+ convert(0xe000003000031430, 0x00000000000381c0, 0x0000000000000000);
+ convert(0xe0000030000314d0, 0x00000000ff010201, 0x0000000000000000);
+ convert(0xe0000030000314e0, 0xffffffffffffffff, 0xffffffff00000000);
+ convert(0xe000003000031500, 0x00000000000381f0, 0x000030430000ffff);
+ convert(0xe000003000031510, 0x000000000000ffff, 0x0000000000000000);
+ convert(0xe0000030000315a0, 0x00000020ff000201, 0x0000000000000000);
+ convert(0xe0000030000315b0, 0xffffffffffffffff, 0xffffffff00000001);
+ convert(0xe0000030000315d0, 0x0000000000038240, 0x00003f3f0000ffff);
+ convert(0xe0000030000315e0, 0x000000000000ffff, 0x0000000000000000);
+ convert(0xe000003000031670, 0x00000000ff010201, 0x0000000000000000);
+ convert(0xe000003000031680, 0xffffffffffffffff, 0x0000000100000002);
+ convert(0xe0000030000316a0, 0x0000000000038290, 0x000030430000ffff);
+ convert(0xe0000030000316b0, 0x000000000000ffff, 0x0000000000000000);
+ convert(0xe000003000031740, 0x00000020ff000201, 0x0000000000000000);
+ convert(0xe000003000031750, 0xffffffffffffffff, 0x0000000500000003);
+ convert(0xe000003000031770, 0x00000000000382e0, 0x00003f3f0000ffff);
+ convert(0xe000003000031780, 0x000000000000ffff, 0x0000000000000000);
}
-
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
* First lets figure out who we are. This is done from the
* LID passed to us.
*/
-
-#ifdef CONFIG_IA64_SGI_SN1
- nasid = (lid>>24);
- syn = (lid>>17)&1;
- cpu = (lid>>16)&1;
-
- /*
- * Now pick a synergy master to initialize synergy registers.
- */
- if (test_and_set_bit(syn, &nasidmaster[nasid]) == 0) {
- synergy_init(nasid, syn);
- test_and_set_bit(syn+2, &nasidmaster[nasid]);
- } else
- while (get_bit(syn+2, &nasidmaster[nasid]) == 0);
-#else
nasid = (lid>>16)&0xfff;
cpu = (lid>>28)&3;
syn = 0;
-#endif
/*
* Now pick a nasid master to initialize Bedrock registers.
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
-# Copyright (c) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+# Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
#
help() {
done
shift `expr $OPTIND - 1`
+#OBJDUMP=/usr/bin/ia64-linux-objdump
LINUX=${1:-vmlinux}
TEXTSYM=${2:-${LINUX}.sym}
TMPSYM=${2:-${LINUX}.sym.tmp}
awk '
+/ _start$/ {start=1}
+/ start_ap$/ {start=1}
/__start_gate_section/ {start=1}
/^'${dataprefix}\|${textprefix}'/ {
if ($4 == ".kdb")
n = 0
s = $(NF-1)
while (length(s) > 0) {
- n = n*16 + substr(s,1,1)
+ n = n*16 + (index("0123456789abcdef", substr(s,1,1)) - 1)
s = substr(s,2)
}
printf "GLOBAL | %s | DATA | %s | %d\n", $1, $NF, n
EXTRA_CFLAGS := -DLITTLE_ENDIAN
-ifdef CONFIG_IA64_SGI_SN2
-EXTRA_CFLAGS += -DSHUB_SWAP_WAR
-endif
-
-obj-$(CONFIG_IA64_SGI_SN) += stubs.o sgi_if.o xswitch.o klgraph_hack.o \
- hcl.o labelcl.o invent.o sgi_io_sim.o \
- klgraph_hack.o hcl_util.o cdl.o hubdev.o hubspc.o \
- alenlist.o pci.o pci_dma.o ate_utils.o \
- ifconfig_net.o io.o ioconfig_bus.o
-
-obj-$(CONFIG_IA64_SGI_SN2) += sn2/
-
-obj-$(CONFIG_PCIBA) += pciba.o
+obj-y += sgi_if.o xswitch.o sgi_io_sim.o cdl.o ate_utils.o \
+ io.o machvec/ drivers/ platform_init/ sn2/ hwgfs/
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/* Implementation of Address/Length Lists. */
-
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/mmzone.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/alenlist.h>
-
-/*
- * Logically, an Address/Length List is a list of Pairs, where each pair
- * holds an Address and a Length, all in some Address Space. In this
- * context, "Address Space" is a particular Crosstalk Widget address
- * space, a PCI device address space, a VME bus address space, a
- * physical memory address space, etc.
- *
- * The main use for these Lists is to provide a single mechanism that
- * describes where in an address space a DMA occurs. This allows the
- * various I/O Bus support layers to provide a single interface for
- * DMA mapping and DMA translation without regard to how the DMA target
- * was specified by upper layers. The upper layers commonly specify a
- * DMA target via a buf structure page list, a kernel virtual address,
- * a user virtual address, a vector of addresses (a la uio and iov),
- * or possibly a pfn list.
- *
- * Address/Length Lists also enable drivers to take advantage of their
- * inate scatter/gather capabilities in systems where some address
- * translation may be required between bus adapters. The driver forms
- * a List that represents physical memory targets. This list is passed
- * to the various adapters, which apply various translations. The final
- * list that's returned to the driver is in terms of its local address
- * address space -- addresses which can be passed off to a scatter/gather
- * capable DMA controller.
- *
- * The current implementation is intended to be useful both in kernels
- * that support interrupt threads (INTR_KTHREAD) and in systems that do
- * not support interrupt threads. Of course, in the latter case, some
- * interfaces can be called only within a suspendable context.
- *
- * Basic operations on Address/Length Lists include:
- * alenlist_create Create a list
- * alenlist_clear Clear a list
- * alenlist_destroy Destroy a list
- * alenlist_append Append a Pair to the end of a list
- * alenlist_replace Replace a Pair in the middle of a list
- * alenlist_get Get an Address/Length Pair from a list
- * alenlist_size Return the number of Pairs in a list
- * alenlist_concat Append one list to the end of another
- * alenlist_clone Create a new copy of a list
- *
- * Operations that convert from upper-level specifications to Address/
- * Length Lists currently include:
- * kvaddr_to_alenlist Convert from a kernel virtual address
- * uvaddr_to_alenlist Convert from a user virtual address
- * buf_to_alenlist Convert from a buf structure
- * alenlist_done Tell system that we're done with an alenlist
- * obtained from a conversion.
- * Additional convenience operations:
- * alenpair_init Create a list and initialize it with a Pair
- * alenpair_get Peek at the first pair on a List
- *
- * A supporting type for Address/Length Lists is an alenlist_cursor_t. A
- * cursor marks a position in a List, and determines which Pair is fetched
- * by alenlist_get.
- * alenlist_cursor_create Allocate and initialize a cursor
- * alenlist_cursor_destroy Free space consumed by a cursor
- * alenlist_cursor_init (Re-)Initialize a cursor to point
- * to the start of a list
- * alenlist_cursor_clone Clone a cursor (at the current offset)
- * alenlist_cursor_offset Return the number of bytes into
- * a list that this cursor marks
- * Multiple cursors can point at various points into a List. Also, each
- * list maintains one "internal cursor" which may be updated by alenlist_clear
- * and alenlist_get. If calling code simply wishes to scan sequentially
- * through a list starting at the beginning, and if it is the only user of
- * a list, it can rely on this internal cursor rather than managing a
- * separate explicit cursor.
- *
- * The current implementation allows callers to allocate both cursors and
- * the lists as local stack (structure) variables. This allows for some
- * extra efficiency at the expense of forward binary compatibility. It
- * is recommended that customer drivers refrain from local allocation.
- * In fact, we likely will choose to move the structures out of the public
- * header file into a private place in order to discourage this usage.
- *
- * Currently, no locking is provided by the alenlist implementation.
- *
- * Implementation notes:
- * For efficiency, Pairs are grouped into "chunks" of, say, 32 Pairs
- * and a List consists of some number of these chunks. Chunks are completely
- * invisible to calling code. Chunks should be large enough to hold most
- * standard-sized DMA's, but not so large that they consume excessive space.
- *
- * It is generally expected that Lists will be constructed at one time and
- * scanned at a later time. It is NOT expected that drivers will scan
- * a List while the List is simultaneously extended, although this is
- * theoretically possible with sufficient upper-level locking.
- *
- * In order to support demands of Real-Time drivers and in order to support
- * swapping under low-memory conditions, we support the concept of a
- * "pre-allocated fixed-sized List". After creating a List with
- * alenlist_create, a driver may explicitly grow the list (via "alenlist_grow")
- * to a specific number of Address/Length pairs. It is guaranteed that future
- * operations involving this list will never automatically grow the list
- * (i.e. if growth is ever required, the operation will fail). Additionally,
- * operations that use alenlist's (e.g. DMA operations) accept a flag which
- * causes processing to take place "in-situ"; that is, the input alenlist
- * entries are replaced with output alenlist entries. The combination of
- * pre-allocated Lists and in-situ processing allows us to avoid the
- * potential deadlock scenario where we sleep (waiting for memory) in the
- * swap out path.
- *
- * For debugging, we track the number of allocated Lists in alenlist_count
- * the number of allocated chunks in alenlist_chunk_count, and the number
- * of allocate cursors in alenlist_cursor_count. We also provide a debug
- * routine, alenlist_show, which dumps the contents of an Address/Length List.
- *
- * Currently, Lists are formed by drivers on-demand. Eventually, we may
- * associate an alenlist with a buf structure and keep it up to date as
- * we go along. In that case, buf_to_alenlist simply returns a pointer
- * to the existing List, and increments the Lists's reference count.
- * alenlist_done would decrement the reference count and destroys the List
- * if it was the last reference.
- *
- * Eventually alenlist's may allow better support for user-level scatter/
- * gather operations (e.g. via readv/writev): With proper support, we
- * could potentially handle a vector of reads with a single scatter/gather
- * DMA operation. This could be especially useful on NUMA systems where
- * there's more of a reason for users to use vector I/O operations.
- *
- * Eventually, alenlist's may replace kaio lists, vhand page lists,
- * buffer cache pfdat lists, DMA page lists, etc.
- */
-
-/* Opaque data types */
-
-/* An Address/Length pair. */
-typedef struct alen_s {
- alenaddr_t al_addr;
- size_t al_length;
-} alen_t;
-
-/*
- * Number of elements in one chunk of an Address/Length List.
- *
- * This size should be sufficient to hold at least an "average" size
- * DMA request. Must be at least 1, and should be a power of 2,
- * for efficiency.
- */
-#define ALEN_CHUNK_SZ ((512*1024)/NBPP)
-
-/*
- * A fixed-size set of Address/Length Pairs. Chunks of Pairs are strung together
- * to form a complete Address/Length List. Chunking is entirely hidden within the
- * alenlist implementation, and it simply makes allocation and growth of lists more
- * efficient.
- */
-typedef struct alenlist_chunk_s {
- alen_t alc_pair[ALEN_CHUNK_SZ];/* list of addr/len pairs */
- struct alenlist_chunk_s *alc_next; /* point to next chunk of pairs */
-} *alenlist_chunk_t;
-
-/*
- * An Address/Length List. An Address/Length List is allocated with alenlist_create.
- * Alternatively, a list can be allocated on the stack (local variable of type
- * alenlist_t) and initialized with alenpair_init or with a combination of
- * alenlist_clear and alenlist_append, etc. Code which statically allocates these
- * structures loses forward binary compatibility!
- *
- * A statically allocated List is sufficiently large to hold ALEN_CHUNK_SZ pairs.
- */
-struct alenlist_s {
- unsigned short al_flags;
- unsigned short al_logical_size; /* logical size of list, in pairs */
- unsigned short al_actual_size; /* actual size of list, in pairs */
- struct alenlist_chunk_s *al_last_chunk; /* pointer to last logical chunk */
- struct alenlist_cursor_s al_cursor; /* internal cursor */
- struct alenlist_chunk_s al_chunk; /* initial set of pairs */
- alenaddr_t al_compaction_address; /* used to compact pairs */
-};
-
-/* al_flags field */
-#define AL_FIXED_SIZE 0x1 /* List is pre-allocated, and of fixed size */
-
-
-struct zone *alenlist_zone = NULL;
-struct zone *alenlist_chunk_zone = NULL;
-struct zone *alenlist_cursor_zone = NULL;
-
-#if DEBUG
-int alenlist_count=0; /* Currently allocated Lists */
-int alenlist_chunk_count = 0; /* Currently allocated chunks */
-int alenlist_cursor_count = 0; /* Currently allocate cursors */
-#define INCR_COUNT(ptr) atomic_inc((ptr));
-#define DECR_COUNT(ptr) atomic_dec((ptr));
-#else
-#define INCR_COUNT(ptr)
-#define DECR_COUNT(ptr)
-#endif /* DEBUG */
-
-#if DEBUG
-static void alenlist_show(alenlist_t);
-#endif /* DEBUG */
-
-/*
- * Initialize Address/Length List management. One time initialization.
- */
-void
-alenlist_init(void)
-{
- alenlist_zone = snia_kmem_zone_init(sizeof(struct alenlist_s), "alenlist");
- alenlist_chunk_zone = snia_kmem_zone_init(sizeof(struct alenlist_chunk_s), "alchunk");
- alenlist_cursor_zone = snia_kmem_zone_init(sizeof(struct alenlist_cursor_s), "alcursor");
-#if DEBUG
- idbg_addfunc("alenshow", alenlist_show);
-#endif /* DEBUG */
-}
-
-
-/*
- * Initialize an Address/Length List cursor.
- */
-static void
-do_cursor_init(alenlist_t alenlist, alenlist_cursor_t cursorp)
-{
- cursorp->al_alenlist = alenlist;
- cursorp->al_offset = 0;
- cursorp->al_chunk = &alenlist->al_chunk;
- cursorp->al_index = 0;
- cursorp->al_bcount = 0;
-}
-
-
-/*
- * Create an Address/Length List, and clear it.
- * Set the cursor to the beginning.
- */
-alenlist_t
-alenlist_create(unsigned flags)
-{
- alenlist_t alenlist;
-
- alenlist = snia_kmem_zone_alloc(alenlist_zone, flags & AL_NOSLEEP ? VM_NOSLEEP : 0);
- if (alenlist) {
- INCR_COUNT(&alenlist_count);
-
- alenlist->al_flags = 0;
- alenlist->al_logical_size = 0;
- alenlist->al_actual_size = ALEN_CHUNK_SZ;
- alenlist->al_last_chunk = &alenlist->al_chunk;
- alenlist->al_chunk.alc_next = NULL;
- do_cursor_init(alenlist, &alenlist->al_cursor);
- }
-
- return(alenlist);
-}
-
-
-/*
- * Grow an Address/Length List so that all resources needed to contain
- * the specified number of Pairs are pre-allocated. An Address/Length
- * List that has been explicitly "grown" will never *automatically*
- * grow, shrink, or be destroyed.
- *
- * Pre-allocation is useful for Real-Time drivers and for drivers that
- * may be used along the swap-out path and therefore cannot afford to
- * sleep until memory is freed.
- *
- * The cursor is set to the beginning of the list.
- */
-int
-alenlist_grow(alenlist_t alenlist, size_t npairs)
-{
- /*
- * This interface should be used relatively rarely, so
- * the implementation is kept simple: We clear the List,
- * then append npairs bogus entries. Finally, we mark
- * the list as FIXED_SIZE and re-initialize the internal
- * cursor.
- */
-
- /*
- * Temporarily mark as non-fixed size, since we're about
- * to shrink and expand it.
- */
- alenlist->al_flags &= ~AL_FIXED_SIZE;
-
- /* Free whatever was in the alenlist. */
- alenlist_clear(alenlist);
-
- /* Allocate everything that we need via automatic expansion. */
- while (npairs--)
- if (alenlist_append(alenlist, 0, 0, AL_NOCOMPACT) == ALENLIST_FAILURE)
- return(ALENLIST_FAILURE);
-
- /* Now, mark as FIXED_SIZE */
- alenlist->al_flags |= AL_FIXED_SIZE;
-
- /* Clear out bogus entries */
- alenlist_clear(alenlist);
-
- /* Initialize internal cursor to the beginning */
- do_cursor_init(alenlist, &alenlist->al_cursor);
-
- return(ALENLIST_SUCCESS);
-}
-
-
-/*
- * Clear an Address/Length List so that it holds no pairs.
- */
-void
-alenlist_clear(alenlist_t alenlist)
-{
- alenlist_chunk_t chunk, freechunk;
-
- /*
- * If this List is not FIXED_SIZE, free all the
- * extra chunks.
- */
- if (!(alenlist->al_flags & AL_FIXED_SIZE)) {
- /* First, free any extension alenlist chunks */
- chunk = alenlist->al_chunk.alc_next;
- while (chunk) {
- freechunk = chunk;
- chunk = chunk->alc_next;
- snia_kmem_zone_free(alenlist_chunk_zone, freechunk);
- DECR_COUNT(&alenlist_chunk_count);
- }
- alenlist->al_actual_size = ALEN_CHUNK_SZ;
- alenlist->al_chunk.alc_next = NULL;
- }
-
- alenlist->al_logical_size = 0;
- alenlist->al_last_chunk = &alenlist->al_chunk;
- do_cursor_init(alenlist, &alenlist->al_cursor);
-}
-
-
-/*
- * Create and initialize an Address/Length Pair.
- * This is intended for degenerate lists, consisting of a single
- * address/length pair.
- */
-alenlist_t
-alenpair_init( alenaddr_t address,
- size_t length)
-{
- alenlist_t alenlist;
-
- alenlist = alenlist_create(0);
-
- alenlist->al_logical_size = 1;
- ASSERT(alenlist->al_last_chunk == &alenlist->al_chunk);
- alenlist->al_chunk.alc_pair[0].al_length = length;
- alenlist->al_chunk.alc_pair[0].al_addr = address;
-
- return(alenlist);
-}
-
-/*
- * Return address/length from a degenerate (1-pair) List, or
- * first pair from a larger list. Does NOT update the internal cursor,
- * so this is an easy way to peek at a start address.
- */
-int
-alenpair_get( alenlist_t alenlist,
- alenaddr_t *address,
- size_t *length)
-{
- if (alenlist->al_logical_size == 0)
- return(ALENLIST_FAILURE);
-
- *length = alenlist->al_chunk.alc_pair[0].al_length;
- *address = alenlist->al_chunk.alc_pair[0].al_addr;
- return(ALENLIST_SUCCESS);
-}
-
-
-/*
- * Destroy an Address/Length List.
- */
-void
-alenlist_destroy(alenlist_t alenlist)
-{
- if (alenlist == NULL)
- return;
-
- /*
- * Turn off FIXED_SIZE so this List can be
- * automatically shrunk.
- */
- alenlist->al_flags &= ~AL_FIXED_SIZE;
-
- /* Free extension chunks first */
- if (alenlist->al_chunk.alc_next)
- alenlist_clear(alenlist);
-
- /* Now, free the alenlist itself */
- snia_kmem_zone_free(alenlist_zone, alenlist);
- DECR_COUNT(&alenlist_count);
-}
-
-/*
- * Release an Address/Length List.
- * This is in preparation for a day when alenlist's may be longer-lived, and
- * perhaps associated with a buf structure. We'd add a reference count, and
- * this routine would decrement the count. For now, we create alenlist's on
- * on demand and free them when done. If the driver is not explicitly managing
- * a List for its own use, it should call alenlist_done rather than alenlist_destroy.
- */
-void
-alenlist_done(alenlist_t alenlist)
-{
- alenlist_destroy(alenlist);
-}
-
-
-/*
- * Append another address/length to the end of an Address/Length List,
- * growing the list if permitted and necessary.
- *
- * Returns: SUCCESS/FAILURE
- */
-int
-alenlist_append( alenlist_t alenlist, /* append to this list */
- alenaddr_t address, /* address to append */
- size_t length, /* length to append */
- unsigned flags)
-{
- alen_t *alenp;
- int index, last_index;
-
- index = alenlist->al_logical_size % ALEN_CHUNK_SZ;
-
- if ((alenlist->al_logical_size > 0)) {
- /*
- * See if we can compact this new pair in with the previous entry.
- * al_compaction_address holds that value that we'd need to see
- * in order to compact.
- */
- if (!(flags & AL_NOCOMPACT) &&
- (alenlist->al_compaction_address == address)) {
- last_index = (alenlist->al_logical_size-1) % ALEN_CHUNK_SZ;
- alenp = &(alenlist->al_last_chunk->alc_pair[last_index]);
- alenp->al_length += length;
- alenlist->al_compaction_address += length;
- return(ALENLIST_SUCCESS);
- }
-
- /*
- * If we're out of room in this chunk, move to a new chunk.
- */
- if (index == 0) {
- if (alenlist->al_flags & AL_FIXED_SIZE) {
- alenlist->al_last_chunk = alenlist->al_last_chunk->alc_next;
-
- /* If we're out of space in a FIXED_SIZE List, quit. */
- if (alenlist->al_last_chunk == NULL) {
- ASSERT(alenlist->al_logical_size == alenlist->al_actual_size);
- return(ALENLIST_FAILURE);
- }
- } else {
- alenlist_chunk_t new_chunk;
-
- new_chunk = snia_kmem_zone_alloc(alenlist_chunk_zone,
- flags & AL_NOSLEEP ? VM_NOSLEEP : 0);
-
- if (new_chunk == NULL)
- return(ALENLIST_FAILURE);
-
- alenlist->al_last_chunk->alc_next = new_chunk;
- new_chunk->alc_next = NULL;
- alenlist->al_last_chunk = new_chunk;
- alenlist->al_actual_size += ALEN_CHUNK_SZ;
- INCR_COUNT(&alenlist_chunk_count);
- }
- }
- }
-
- alenp = &(alenlist->al_last_chunk->alc_pair[index]);
- alenp->al_addr = address;
- alenp->al_length = length;
-
- alenlist->al_logical_size++;
- alenlist->al_compaction_address = address + length;
-
- return(ALENLIST_SUCCESS);
-}
-
-
-/*
- * Replace an item in an Address/Length List. Cursor is updated so
- * that alenlist_get will get the next item in the list. This interface
- * is not very useful for drivers; but it is useful to bus providers
- * that need to translate between address spaced in situ. The old Address
- * and Length are returned.
- */
-/* ARGSUSED */
-int
-alenlist_replace( alenlist_t alenlist, /* in: replace in this list */
- alenlist_cursor_t cursorp, /* inout: which item to replace */
- alenaddr_t *addrp, /* inout: address */
- size_t *lengthp, /* inout: length */
- unsigned flags)
-{
- alen_t *alenp;
- alenlist_chunk_t chunk;
- unsigned int index;
- size_t length;
- alenaddr_t addr;
-
- if ((addrp == NULL) || (lengthp == NULL))
- return(ALENLIST_FAILURE);
-
- if (alenlist->al_logical_size == 0)
- return(ALENLIST_FAILURE);
-
- addr = *addrp;
- length = *lengthp;
-
- /*
- * If no cursor explicitly specified, use the Address/Length List's
- * internal cursor.
- */
- if (cursorp == NULL)
- cursorp = &alenlist->al_cursor;
-
- chunk = cursorp->al_chunk;
- index = cursorp->al_index;
-
- ASSERT(cursorp->al_alenlist == alenlist);
- if (cursorp->al_alenlist != alenlist)
- return(ALENLIST_FAILURE);
-
- alenp = &chunk->alc_pair[index];
-
- /* Return old values */
- *addrp = alenp->al_length;
- *lengthp = alenp->al_addr;
-
- /* Set up new values */
- alenp->al_length = length;
- alenp->al_addr = addr;
-
- /* Update cursor to point to next item */
- cursorp->al_bcount = length;
-
- return(ALENLIST_SUCCESS);
-}
-
-
-/*
- * Initialize a cursor in order to walk an alenlist.
- * An alenlist_cursor always points to the last thing that was obtained
- * from the list. If al_chunk is NULL, then nothing has yet been obtained.
- *
- * Note: There is an "internal cursor" associated with every Address/Length List.
- * For users that scan sequentially through a List, it is more efficient to
- * simply use the internal cursor. The caller must insure that no other users
- * will simultaneously scan the List. The caller can reposition the internal
- * cursor by calling alenlist_cursor_init with a NULL cursorp.
- */
-int
-alenlist_cursor_init(alenlist_t alenlist, size_t offset, alenlist_cursor_t cursorp)
-{
- size_t byte_count;
-
- if (cursorp == NULL)
- cursorp = &alenlist->al_cursor;
-
- /* Get internal cursor's byte count for use as a hint.
- *
- * If the internal cursor points passed the point that we're interested in,
- * we need to seek forward from the beginning. Otherwise, we can seek forward
- * from the internal cursor.
- */
- if ((offset > 0) &&
- ((byte_count = alenlist_cursor_offset(alenlist, (alenlist_cursor_t)NULL)) <= offset)) {
- offset -= byte_count;
- alenlist_cursor_clone(alenlist, NULL, cursorp);
- } else
- do_cursor_init(alenlist, cursorp);
-
- /* We could easily speed this up, but it shouldn't be used very often. */
- while (offset != 0) {
- alenaddr_t addr;
- size_t length;
-
- if (alenlist_get(alenlist, cursorp, offset, &addr, &length, 0) != ALENLIST_SUCCESS)
- return(ALENLIST_FAILURE);
- offset -= length;
- }
- return(ALENLIST_SUCCESS);
-}
-
-
-/*
- * Copy a cursor. The source cursor is either an internal alenlist cursor
- * or an explicit cursor.
- */
-int
-alenlist_cursor_clone( alenlist_t alenlist,
- alenlist_cursor_t cursorp_in,
- alenlist_cursor_t cursorp_out)
-{
- ASSERT(cursorp_out);
-
- if (alenlist && cursorp_in)
- if (alenlist != cursorp_in->al_alenlist)
- return(ALENLIST_FAILURE);
-
- if (alenlist)
- *cursorp_out = alenlist->al_cursor; /* small structure copy */
- else if (cursorp_in)
- *cursorp_out = *cursorp_in; /* small structure copy */
- else
- return(ALENLIST_FAILURE); /* no source */
-
- return(ALENLIST_SUCCESS);
-}
-
-/*
- * Return the number of bytes passed so far according to the specified cursor.
- * If cursorp is NULL, use the alenlist's internal cursor.
- */
-size_t
-alenlist_cursor_offset(alenlist_t alenlist, alenlist_cursor_t cursorp)
-{
- ASSERT(!alenlist || !cursorp || (alenlist == cursorp->al_alenlist));
-
- if (cursorp == NULL) {
- ASSERT(alenlist);
- cursorp = &alenlist->al_cursor;
- }
-
- return(cursorp->al_offset);
-}
-
-/*
- * Allocate and initialize an Address/Length List cursor.
- */
-alenlist_cursor_t
-alenlist_cursor_create(alenlist_t alenlist, unsigned flags)
-{
- alenlist_cursor_t cursorp;
-
- ASSERT(alenlist != NULL);
- cursorp = snia_kmem_zone_alloc(alenlist_cursor_zone, flags & AL_NOSLEEP ? VM_NOSLEEP : 0);
- if (cursorp) {
- INCR_COUNT(&alenlist_cursor_count);
- alenlist_cursor_init(alenlist, 0, cursorp);
- }
- return(cursorp);
-}
-
-/*
- * Free an Address/Length List cursor.
- */
-void
-alenlist_cursor_destroy(alenlist_cursor_t cursorp)
-{
- DECR_COUNT(&alenlist_cursor_count);
- snia_kmem_zone_free(alenlist_cursor_zone, cursorp);
-}
-
-
-/*
- * Fetch an address/length pair from an Address/Length List. Update
- * the "cursor" so that next time this routine is called, we'll get
- * the next address range. Never return a length that exceeds maxlength
- * (if non-zero). If maxlength is a power of 2, never return a length
- * that crosses a maxlength boundary. [This may seem strange at first,
- * but it's what many drivers want.]
- *
- * Returns: SUCCESS/FAILURE
- */
-int
-alenlist_get( alenlist_t alenlist, /* in: get from this list */
- alenlist_cursor_t cursorp, /* inout: which item to get */
- size_t maxlength, /* in: at most this length */
- alenaddr_t *addrp, /* out: address */
- size_t *lengthp, /* out: length */
- unsigned flags)
-{
- alen_t *alenp;
- alenlist_chunk_t chunk;
- unsigned int index;
- size_t bcount;
- size_t length;
-
- /*
- * If no cursor explicitly specified, use the Address/Length List's
- * internal cursor.
- */
- if (cursorp == NULL) {
- if (alenlist->al_logical_size == 0)
- return(ALENLIST_FAILURE);
- cursorp = &alenlist->al_cursor;
- }
-
- chunk = cursorp->al_chunk;
- index = cursorp->al_index;
- bcount = cursorp->al_bcount;
-
- ASSERT(cursorp->al_alenlist == alenlist);
- if (cursorp->al_alenlist != alenlist)
- return(ALENLIST_FAILURE);
-
- alenp = &chunk->alc_pair[index];
- length = alenp->al_length - bcount;
-
- /* Bump up to next pair, if we're done with this pair. */
- if (length == 0) {
- cursorp->al_bcount = bcount = 0;
- cursorp->al_index = index = (index + 1) % ALEN_CHUNK_SZ;
-
- /* Bump up to next chunk, if we're done with this chunk. */
- if (index == 0) {
- if (cursorp->al_chunk == alenlist->al_last_chunk)
- return(ALENLIST_FAILURE);
- chunk = chunk->alc_next;
- ASSERT(chunk != NULL);
- } else {
- /* If in last chunk, don't go beyond end. */
- if (cursorp->al_chunk == alenlist->al_last_chunk) {
- int last_size = alenlist->al_logical_size % ALEN_CHUNK_SZ;
- if (last_size && (index >= last_size))
- return(ALENLIST_FAILURE);
- }
- }
-
- alenp = &chunk->alc_pair[index];
- length = alenp->al_length;
- }
-
- /* Constrain what we return according to maxlength */
- if (maxlength) {
- size_t maxlen1 = maxlength - 1;
-
- if ((maxlength & maxlen1) == 0) /* power of 2 */
- maxlength -=
- ((alenp->al_addr + cursorp->al_bcount) & maxlen1);
-
- length = min(maxlength, length);
- }
-
- /* Update the cursor, if desired. */
- if (!(flags & AL_LEAVE_CURSOR)) {
- cursorp->al_bcount += length;
- cursorp->al_chunk = chunk;
- }
-
- *lengthp = length;
- *addrp = alenp->al_addr + bcount;
-
- return(ALENLIST_SUCCESS);
-}
-
-
-/*
- * Return the number of pairs in the specified Address/Length List.
- * (For FIXED_SIZE Lists, this returns the logical size of the List,
- * not the actual capacity of the List.)
- */
-int
-alenlist_size(alenlist_t alenlist)
-{
- return(alenlist->al_logical_size);
-}
-
-
-/*
- * Concatenate two Address/Length Lists.
- */
-void
-alenlist_concat(alenlist_t from,
- alenlist_t to)
-{
- struct alenlist_cursor_s cursor;
- alenaddr_t addr;
- size_t length;
-
- alenlist_cursor_init(from, 0, &cursor);
-
- while(alenlist_get(from, &cursor, (size_t)0, &addr, &length, 0) == ALENLIST_SUCCESS)
- alenlist_append(to, addr, length, 0);
-}
-
-/*
- * Create a copy of a list.
- * (Not all attributes of the old list are cloned. For instance, if
- * a FIXED_SIZE list is cloned, the resulting list is NOT FIXED_SIZE.)
- */
-alenlist_t
-alenlist_clone(alenlist_t old_list, unsigned flags)
-{
- alenlist_t new_list;
-
- new_list = alenlist_create(flags);
- if (new_list != NULL)
- alenlist_concat(old_list, new_list);
-
- return(new_list);
-}
-
-
-/*
- * Convert a kernel virtual address to a Physical Address/Length List.
- */
-alenlist_t
-kvaddr_to_alenlist(alenlist_t alenlist, caddr_t kvaddr, size_t length, unsigned flags)
-{
- alenaddr_t paddr;
- long offset;
- size_t piece_length;
- int created_alenlist;
-
- if (length <=0)
- return(NULL);
-
- /* If caller supplied a List, use it. Otherwise, allocate one. */
- if (alenlist == NULL) {
- alenlist = alenlist_create(0);
- created_alenlist = 1;
- } else {
- alenlist_clear(alenlist);
- created_alenlist = 0;
- }
-
- paddr = kvtophys(kvaddr);
- offset = poff(kvaddr);
-
- /* Handle first page */
- piece_length = min((size_t)(NBPP - offset), length);
- if (alenlist_append(alenlist, paddr, piece_length, flags) == ALENLIST_FAILURE)
- goto failure;
- length -= piece_length;
- kvaddr += piece_length;
-
- /* Handle middle pages */
- while (length >= NBPP) {
- paddr = kvtophys(kvaddr);
- if (alenlist_append(alenlist, paddr, NBPP, flags) == ALENLIST_FAILURE)
- goto failure;
- length -= NBPP;
- kvaddr += NBPP;
- }
-
- /* Handle last page */
- if (length) {
- ASSERT(length < NBPP);
- paddr = kvtophys(kvaddr);
- if (alenlist_append(alenlist, paddr, length, flags) == ALENLIST_FAILURE)
- goto failure;
- }
-
- alenlist_cursor_init(alenlist, 0, NULL);
- return(alenlist);
-
-failure:
- if (created_alenlist)
- alenlist_destroy(alenlist);
- return(NULL);
-}
-
-
-#if DEBUG
-static void
-alenlist_show(alenlist_t alenlist)
-{
- struct alenlist_cursor_s cursor;
- alenaddr_t addr;
- size_t length;
- int i = 0;
-
- alenlist_cursor_init(alenlist, 0, &cursor);
-
- qprintf("Address/Length List@0x%x:\n", alenlist);
- qprintf("logical size=0x%x actual size=0x%x last_chunk at 0x%x\n",
- alenlist->al_logical_size, alenlist->al_actual_size,
- alenlist->al_last_chunk);
- qprintf("cursor: chunk=0x%x index=%d offset=0x%x\n",
- alenlist->al_cursor.al_chunk,
- alenlist->al_cursor.al_index,
- alenlist->al_cursor.al_bcount);
- while(alenlist_get(alenlist, &cursor, (size_t)0, &addr, &length, 0) == ALENLIST_SUCCESS)
- qprintf("%d:\t0x%lx 0x%lx\n", ++i, addr, length);
-}
-#endif /* DEBUG */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/ioerror_handling.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/ate_utils.h>
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/config.h>
#include <asm/sn/xtalk/xbow.h>
/* these get called directly in cdl_add_connpt in fops bypass hack */
-extern int pcibr_attach(devfs_handle_t);
-extern int xbow_attach(devfs_handle_t);
-extern int pic_attach(devfs_handle_t);
+extern int pcibr_attach(vertex_hdl_t);
+extern int xbow_attach(vertex_hdl_t);
+extern int pic_attach(vertex_hdl_t);
/*
* IO Infrastructure Drivers e.g. pcibr.
*/
-struct cdl {
- int part_num;
- int mfg_num;
- int (*attach) (devfs_handle_t);
-} dummy_reg;
-
-#ifdef CONFIG_IA64_SGI_SN1
-#define MAX_SGI_IO_INFRA_DRVR 4
-#else
#define MAX_SGI_IO_INFRA_DRVR 7
-#endif
+
static struct cdl sgi_infrastructure_drivers[MAX_SGI_IO_INFRA_DRVR] =
{
{ XBRIDGE_WIDGET_PART_NUM, XBRIDGE_WIDGET_MFGR_NUM, pcibr_attach /* &pcibr_fops */},
{ BRIDGE_WIDGET_PART_NUM, BRIDGE_WIDGET_MFGR_NUM, pcibr_attach /* &pcibr_fops */},
-#ifndef CONFIG_IA64_SGI_SN1
{ PIC_WIDGET_PART_NUM_BUS0, PIC_WIDGET_MFGR_NUM, pic_attach /* &pic_fops */},
{ PIC_WIDGET_PART_NUM_BUS1, PIC_WIDGET_MFGR_NUM, pic_attach /* &pic_fops */},
-#endif
{ XXBOW_WIDGET_PART_NUM, XXBOW_WIDGET_MFGR_NUM, xbow_attach /* &xbow_fops */},
{ XBOW_WIDGET_PART_NUM, XBOW_WIDGET_MFGR_NUM, xbow_attach /* &xbow_fops */},
-#ifndef CONFIG_IA64_SGI_SN1
{ PXBOW_WIDGET_PART_NUM, XXBOW_WIDGET_MFGR_NUM, xbow_attach /* &xbow_fops */},
-#endif
};
/*
- * cdl_new: Called by pciio and xtalk.
- */
-cdl_p
-cdl_new(char *name, char *k1str, char *k2str)
-{
- /*
- * Just return a dummy pointer.
- */
- return((cdl_p)&dummy_reg);
-}
-
-/*
- * cdl_del: Do nothing.
- */
-void
-cdl_del(cdl_p reg)
-{
- return;
-}
-
-/*
- * cdl_add_driver: The driver part number and manufacturers number
- * are statically initialized above.
- *
- Do nothing.
- */
-int
-cdl_add_driver(cdl_p reg, int key1, int key2, char *prefix, int flags, cdl_drv_f *func)
-{
- return 0;
-}
-
-/*
- * cdl_del_driver: Not supported.
- */
-void
-cdl_del_driver(cdl_p reg, char *prefix, cdl_drv_f *func)
-{
- return;
-}
-
-/*
* cdl_add_connpt: We found a device and it's connect point. Call the
* attach routine of that driver.
*
* vertices.
*/
int
-cdl_add_connpt(cdl_p reg, int part_num, int mfg_num,
- devfs_handle_t connpt, int drv_flags)
+cdl_add_connpt(int part_num, int mfg_num,
+ vertex_hdl_t connpt, int drv_flags)
{
int i;
* Find the driver entry point and call the attach routine.
*/
for (i = 0; i < MAX_SGI_IO_INFRA_DRVR; i++) {
-
if ( (part_num == sgi_infrastructure_drivers[i].part_num) &&
( mfg_num == sgi_infrastructure_drivers[i].mfg_num) ) {
/*
return (0);
}
-
-/*
- * cdl_del_connpt: Not implemented.
- */
-int
-cdl_del_connpt(cdl_p reg, int key1, int key2, devfs_handle_t connpt, int drv_flags)
-{
-
- return(0);
-}
-
-/*
- * cdl_iterate: Not Implemented.
- */
-void
-cdl_iterate(cdl_p reg,
- char *prefix,
- cdl_iter_f * func)
-{
- return;
-}
-
-async_attach_t
-async_attach_new(void)
-{
-
- return(0);
-}
-
-void
-async_attach_free(async_attach_t aa)
-{
- return;
-}
-
-async_attach_t
-async_attach_get_info(devfs_handle_t vhdl)
-{
-
- return(0);
-}
-
-void
-async_attach_add_info(devfs_handle_t vhdl, async_attach_t aa)
-{
- return;
-
-}
-
-void
-async_attach_del_info(devfs_handle_t vhdl)
-{
- return;
-}
-
-void async_attach_signal_start(async_attach_t aa)
-{
- return;
-}
-
-void async_attach_signal_done(async_attach_t aa)
-{
- return;
-}
-
-void async_attach_waitall(async_attach_t aa)
-{
- return;
-}
-
--- /dev/null
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn2 io routines.
+
+EXTRA_CFLAGS := -DLITTLE_ENDIAN
+
+obj-y += ioconfig_bus.o ifconfig_net.o
--- /dev/null
+/* $Id: ifconfig_net.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * ifconfig_net - SGI's Persistent Network Device names.
+ *
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/sn/sgi.h>
+#include <linux/devfs_fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <asm/io.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/ifconfig_net.h>
+
+#define SGI_IFCONFIG_NET "SGI-PERSISTENT NETWORK DEVICE NAME DRIVER"
+#define SGI_IFCONFIG_NET_VERSION "1.0"
+
+/*
+ * Some Global definitions.
+ */
+static devfs_handle_t ifconfig_net_handle;
+static unsigned long ifconfig_net_debug;
+
+/*
+ * ifconfig_net_open - Opens the special device node "/devhw/.ifconfig_net".
+ */
+static int ifconfig_net_open(struct inode * inode, struct file * filp)
+{
+ if (ifconfig_net_debug) {
+ printk("ifconfig_net_open called.\n");
+ }
+
+ return(0);
+
+}
+
+/*
+ * ifconfig_net_close - Closes the special device node "/devhw/.ifconfig_net".
+ */
+static int ifconfig_net_close(struct inode * inode, struct file * filp)
+{
+
+ if (ifconfig_net_debug) {
+ printk("ifconfig_net_close called.\n");
+ }
+
+ return(0);
+}
+
+/*
+ * assign_ifname - Assign the next available interface name from the persistent list.
+ */
+void
+assign_ifname(struct net_device *dev,
+ struct ifname_num *ifname_num)
+
+{
+
+ /*
+ * Handle eth devices.
+ */
+ if ( (memcmp(dev->name, "eth", 3) == 0) ) {
+ if (ifname_num->next_eth != -1) {
+ /*
+ * Assign it the next available eth interface number.
+ */
+ memset(dev->name, 0, strlen(dev->name));
+ sprintf(dev->name, "eth%d", (int)ifname_num->next_eth);
+ ifname_num->next_eth++;
+ }
+
+ return;
+ }
+
+ /*
+ * Handle fddi devices.
+ */
+ if ( (memcmp(dev->name, "fddi", 4) == 0) ) {
+ if (ifname_num->next_fddi != -1) {
+ /*
+ * Assign it the next available fddi interface number.
+ */
+ memset(dev->name, 0, strlen(dev->name));
+ sprintf(dev->name, "fddi%d", (int)ifname_num->next_fddi);
+ ifname_num->next_fddi++;
+ }
+
+ return;
+ }
+
+ /*
+ * Handle hip devices.
+ */
+ if ( (memcmp(dev->name, "hip", 3) == 0) ) {
+ if (ifname_num->next_hip != -1) {
+ /*
+ * Assign it the next available hip interface number.
+ */
+ memset(dev->name, 0, strlen(dev->name));
+ sprintf(dev->name, "hip%d", (int)ifname_num->next_hip);
+ ifname_num->next_hip++;
+ }
+
+ return;
+ }
+
+ /*
+ * Handle tr devices.
+ */
+ if ( (memcmp(dev->name, "tr", 2) == 0) ) {
+ if (ifname_num->next_tr != -1) {
+ /*
+ * Assign it the next available tr interface number.
+ */
+ memset(dev->name, 0, strlen(dev->name));
+ sprintf(dev->name, "tr%d", (int)ifname_num->next_tr);
+ ifname_num->next_tr++;
+ }
+
+ return;
+ }
+
+ /*
+ * Handle fc devices.
+ */
+ if ( (memcmp(dev->name, "fc", 2) == 0) ) {
+ if (ifname_num->next_fc != -1) {
+ /*
+ * Assign it the next available fc interface number.
+ */
+ memset(dev->name, 0, strlen(dev->name));
+ sprintf(dev->name, "fc%d", (int)ifname_num->next_fc);
+ ifname_num->next_fc++;
+ }
+
+ return;
+ }
+}
+
+/*
+ * find_persistent_ifname: Returns the entry that was seen in previous boot.
+ */
+struct ifname_MAC *
+find_persistent_ifname(struct net_device *dev,
+ struct ifname_MAC *ifname_MAC)
+
+{
+
+ while (ifname_MAC->addr_len) {
+ if (memcmp(dev->dev_addr, ifname_MAC->dev_addr, dev->addr_len) == 0)
+ return(ifname_MAC);
+
+ ifname_MAC++;
+ }
+
+ return(NULL);
+}
+
+/*
+ * ifconfig_net_ioctl: ifconfig_net driver ioctl interface.
+ */
+static int ifconfig_net_ioctl(struct inode * inode, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+
+ extern struct net_device *__dev_get_by_name(const char *);
+#ifdef CONFIG_NET
+ struct net_device *dev;
+ struct ifname_MAC *found;
+ char temp[64];
+#endif
+ struct ifname_MAC *ifname_MAC;
+ struct ifname_MAC *new_devices, *temp_new_devices;
+ struct ifname_num *ifname_num;
+ unsigned long size;
+
+
+ if (ifconfig_net_debug) {
+ printk("HCL: hcl_ioctl called.\n");
+ }
+
+ /*
+ * Read in the header and see how big of a buffer we really need to
+ * allocate.
+ */
+ ifname_num = (struct ifname_num *) kmalloc(sizeof(struct ifname_num),
+ GFP_KERNEL);
+ copy_from_user( ifname_num, (char *) arg, sizeof(struct ifname_num));
+ size = ifname_num->size;
+ kfree(ifname_num);
+ ifname_num = (struct ifname_num *) kmalloc(size, GFP_KERNEL);
+ ifname_MAC = (struct ifname_MAC *) ((char *)ifname_num + (sizeof(struct ifname_num)) );
+
+ copy_from_user( ifname_num, (char *) arg, size);
+ new_devices = kmalloc(size - sizeof(struct ifname_num), GFP_KERNEL);
+ temp_new_devices = new_devices;
+
+ memset(new_devices, 0, size - sizeof(struct ifname_num));
+
+#ifdef CONFIG_NET
+ /*
+ * Go through the net device entries and make them persistent!
+ */
+ for (dev = dev_base; dev != NULL; dev = dev->next) {
+ /*
+ * Skip NULL entries or "lo"
+ */
+ if ( (dev->addr_len == 0) || ( !strncmp(dev->name, "lo", strlen(dev->name))) ){
+ continue;
+ }
+
+ /*
+ * See if we have a persistent interface name for this device.
+ */
+ found = NULL;
+ found = find_persistent_ifname(dev, ifname_MAC);
+ if (found) {
+ strcpy(dev->name, found->name);
+ } else {
+ /* Never seen this before .. */
+ assign_ifname(dev, ifname_num);
+
+ /*
+ * Save the information for the next boot.
+ */
+ sprintf(temp,"%s %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
+ dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ strcpy(temp_new_devices->name, dev->name);
+ temp_new_devices->addr_len = dev->addr_len;
+ memcpy(temp_new_devices->dev_addr, dev->dev_addr, dev->addr_len);
+ temp_new_devices++;
+ }
+
+ }
+#endif
+
+ /*
+ * Copy back to the User Buffer area any new devices encountered.
+ */
+ copy_to_user((char *)arg + (sizeof(struct ifname_num)), new_devices,
+ size - sizeof(struct ifname_num));
+
+ return(0);
+
+}
+
+struct file_operations ifconfig_net_fops = {
+ ioctl:ifconfig_net_ioctl, /* ioctl */
+ open:ifconfig_net_open, /* open */
+ release:ifconfig_net_close /* release */
+};
+
+
+/*
+ * init_ifconfig_net() - Boot time initialization. Ensure that it is called
+ * after devfs has been initialized.
+ *
+ */
+#ifdef MODULE
+int init_module (void)
+#else
+int __init init_ifconfig_net(void)
+#endif
+{
+ ifconfig_net_handle = NULL;
+ ifconfig_net_handle = hwgraph_register(hwgraph_root, ".ifconfig_net",
+ 0, 0,
+ 0, 0,
+ S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
+ &ifconfig_net_fops, NULL);
+
+ if (ifconfig_net_handle == NULL) {
+ panic("Unable to create SGI PERSISTENT NETWORK DEVICE Name Driver.\n");
+ }
+
+ return(0);
+
+}
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * ioconfig_bus - SGI's Persistent PCI Bus Numbering.
+ *
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/pci.h>
+
+#include <asm/sn/sgi.h>
+#include <linux/devfs_fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <asm/io.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm//sn/sn_sal.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/ioconfig_bus.h>
+
+#define SGI_IOCONFIG_BUS "SGI-PERSISTENT PCI BUS NUMBERING"
+#define SGI_IOCONFIG_BUS_VERSION "1.0"
+
+/*
+ * Some Global definitions.
+ */
+static vertex_hdl_t ioconfig_bus_handle;
+static unsigned long ioconfig_bus_debug;
+
+#ifdef IOCONFIG_BUS_DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+static u64 ioconfig_file;
+static u64 ioconfig_file_size;
+static u64 ioconfig_activated;
+static char ioconfig_kernopts[128];
+
+/*
+ * For debugging purpose .. hardcode a table ..
+ */
+struct ascii_moduleid *ioconfig_bus_table;
+u64 ioconfig_bus_table_size;
+
+
+static int free_entry;
+static int new_entry;
+
+int next_basebus_number;
+
+void
+ioconfig_get_busnum(char *io_moduleid, int *bus_num)
+{
+ struct ascii_moduleid *temp;
+ int index;
+
+ DBG("ioconfig_get_busnum io_moduleid %s\n", io_moduleid);
+
+ *bus_num = -1;
+ temp = ioconfig_bus_table;
+ for (index = 0; index < free_entry; temp++, index++) {
+ if ( (io_moduleid[0] == temp->io_moduleid[0]) &&
+ (io_moduleid[1] == temp->io_moduleid[1]) &&
+ (io_moduleid[2] == temp->io_moduleid[2]) &&
+ (io_moduleid[4] == temp->io_moduleid[4]) &&
+ (io_moduleid[5] == temp->io_moduleid[5]) ) {
+ *bus_num = index * 0x10;
+ return;
+ }
+ }
+
+ /*
+ * New IO Brick encountered.
+ */
+ if (((int)io_moduleid[0]) == 0) {
+ DBG("ioconfig_get_busnum: Invalid Module Id given %s\n", io_moduleid);
+ return;
+ }
+
+ io_moduleid[3] = '#';
+ strcpy((char *)&(ioconfig_bus_table[free_entry].io_moduleid), io_moduleid);
+ *bus_num = free_entry * 0x10;
+ free_entry++;
+}
+
+static void
+dump_ioconfig_table(void)
+{
+
+ int index = 0;
+ struct ascii_moduleid *temp;
+
+ temp = ioconfig_bus_table;
+ while (index < free_entry) {
+ DBG("ASSCI Module ID %s\n", temp->io_moduleid);
+ temp++;
+ index++;
+ }
+}
+
+/*
+ * nextline
+ * This routine returns the nextline in the buffer.
+ */
+int nextline(char *buffer, char **next, char *line)
+{
+
+ char *temp;
+
+ if (buffer[0] == 0x0) {
+ return(0);
+ }
+
+ temp = buffer;
+ while (*temp != 0) {
+ *line = *temp;
+ if (*temp != '\n'){
+ *line = *temp;
+ temp++; line++;
+ } else
+ break;
+ }
+
+ if (*temp == 0)
+ *next = temp;
+ else
+ *next = ++temp;
+
+ return(1);
+}
+
+/*
+ * build_pcibus_name
+ * This routine parses the ioconfig contents read into
+ * memory by ioconfig command in EFI and builds the
+ * persistent pci bus naming table.
+ */
+void
+build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
+{
+ /*
+ * Read the whole file into memory.
+ */
+ int rc;
+ char *name;
+ char *temp;
+ char *next;
+ char *current;
+ char *line;
+ struct ascii_moduleid *moduleid;
+
+ line = kmalloc(256, GFP_KERNEL);
+ memset(line, 0,256);
+ name = kmalloc(125, GFP_KERNEL);
+ memset(name, 0, 125);
+ moduleid = table;
+ current = file_contents;
+ while (nextline(current, &next, line)){
+
+ DBG("current 0x%lx next 0x%lx\n", current, next);
+
+ temp = line;
+ /*
+ * Skip all leading Blank lines ..
+ */
+ while (isspace(*temp))
+ if (*temp != '\n')
+ temp++;
+ else
+ break;
+
+ if (*temp == '\n') {
+ current = next;
+ memset(line, 0, 256);
+ continue;
+ }
+
+ /*
+ * Skip comment lines
+ */
+ if (*temp == '#') {
+ current = next;
+ memset(line, 0, 256);
+ continue;
+ }
+
+ /*
+ * Get the next free entry in the table.
+ */
+ rc = sscanf(temp, "%s", name);
+ strcpy(&moduleid->io_moduleid[0], name);
+ DBG("Found %s\n", name);
+ moduleid++;
+ free_entry++;
+ current = next;
+ memset(line, 0, 256);
+ }
+
+ new_entry = free_entry;
+ kfree(line);
+ kfree(name);
+
+ return;
+}
+
+void
+ioconfig_bus_init(void)
+{
+
+ struct ia64_sal_retval ret_stuff;
+ u64 *temp;
+ int cnode;
+
+ DBG("ioconfig_bus_init called.\n");
+
+ for (cnode = 0; cnode < numnodes; cnode++) {
+ nasid_t nasid;
+ /*
+ * Make SAL call to get the address of the bus configuration table.
+ */
+ ret_stuff.status = (uint64_t)0;
+ ret_stuff.v0 = (uint64_t)0;
+ ret_stuff.v1 = (uint64_t)0;
+ ret_stuff.v2 = (uint64_t)0;
+ nasid = COMPACT_TO_NASID_NODEID(cnode);
+ SAL_CALL(ret_stuff, SN_SAL_BUS_CONFIG, 0, nasid, 0, 0, 0, 0, 0);
+ temp = (u64 *)TO_NODE_CAC(nasid, ret_stuff.v0);
+ ioconfig_file = *temp;
+ DBG("ioconfig_bus_init: Nasid %d ret_stuff.v0 0x%lx\n", nasid,
+ ret_stuff.v0);
+ if (ioconfig_file) {
+ ioconfig_file_size = ret_stuff.v1;
+ ioconfig_file = (ioconfig_file | CACHEABLE_MEM_SPACE);
+ ioconfig_activated = 1;
+ break;
+ }
+ }
+
+ DBG("ioconfig_bus_init: ret_stuff.v0 %p ioconfig_file %p %d\n",
+ ret_stuff.v0, (void *)ioconfig_file, (int)ioconfig_file_size);
+
+ ioconfig_bus_table = kmalloc( 512, GFP_KERNEL );
+ memset(ioconfig_bus_table, 0, 512);
+
+ /*
+ * If ioconfig options are given on the bootline .. take it.
+ */
+ if (*ioconfig_kernopts != '\0') {
+ /*
+ * ioconfig="..." kernel options given.
+ */
+ DBG("ioconfig_bus_init: Kernel Options given.\n");
+ (void) build_moduleid_table((char *)ioconfig_kernopts, ioconfig_bus_table);
+ (void) dump_ioconfig_table();
+ return;
+ }
+
+ if (ioconfig_activated) {
+ DBG("ioconfig_bus_init: ioconfig file given.\n");
+ (void) build_moduleid_table((char *)ioconfig_file, ioconfig_bus_table);
+ (void) dump_ioconfig_table();
+ } else {
+ DBG("ioconfig_bus_init: ioconfig command not executed in prom\n");
+ }
+
+}
+
+void
+ioconfig_bus_new_entries(void)
+{
+
+
+ int index = 0;
+ struct ascii_moduleid *temp;
+
+ if ((ioconfig_activated) && (free_entry > new_entry)) {
+ printk("### Please add the following new IO Bricks Module ID \n");
+ printk("### to your Persistent Bus Numbering Config File\n");
+ } else
+ return;
+
+ index = new_entry;
+ temp = &ioconfig_bus_table[index];
+ while (index < free_entry) {
+ printk("%s\n", (char *)temp);
+ temp++;
+ index++;
+ }
+ printk("### End\n");
+
+}
+static int ioconfig_bus_ioctl(struct inode * inode, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+
+ struct ioconfig_parm parm;
+
+ /*
+ * Copy in the parameters.
+ */
+ copy_from_user(&parm, (char *)arg, sizeof(struct ioconfig_parm));
+ parm.number = free_entry - new_entry;
+ parm.ioconfig_activated = ioconfig_activated;
+ copy_to_user((char *)arg, &parm, sizeof(struct ioconfig_parm));
+ copy_to_user((char *)parm.buffer, &ioconfig_bus_table[new_entry], sizeof(struct ascii_moduleid) * (free_entry - new_entry));
+
+ return 0;
+}
+
+/*
+ * ioconfig_bus_open - Opens the special device node "/dev/hw/.ioconfig_bus".
+ */
+static int ioconfig_bus_open(struct inode * inode, struct file * filp)
+{
+ if (ioconfig_bus_debug) {
+ DBG("ioconfig_bus_open called.\n");
+ }
+
+ return(0);
+
+}
+
+/*
+ * ioconfig_bus_close - Closes the special device node "/dev/hw/.ioconfig_bus".
+ */
+static int ioconfig_bus_close(struct inode * inode, struct file * filp)
+{
+
+ if (ioconfig_bus_debug) {
+ DBG("ioconfig_bus_close called.\n");
+ }
+
+ return(0);
+}
+
+struct file_operations ioconfig_bus_fops = {
+ ioctl:ioconfig_bus_ioctl,
+ open:ioconfig_bus_open, /* open */
+ release:ioconfig_bus_close /* release */
+};
+
+
+/*
+ * init_ifconfig_bus() - Boot time initialization. Ensure that it is called
+ * after devfs has been initialized.
+ *
+ */
+int init_ioconfig_bus(void)
+{
+ ioconfig_bus_handle = NULL;
+ ioconfig_bus_handle = hwgraph_register(hwgraph_root, ".ioconfig_bus",
+ 0, 0,
+ 0, 0,
+ S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
+ &ioconfig_bus_fops, NULL);
+
+ if (ioconfig_bus_handle == NULL) {
+ panic("Unable to create SGI PERSISTENT BUS NUMBERING Driver.\n");
+ }
+
+ return(0);
+
+}
+
+static int __init ioconfig_bus_setup (char *str)
+{
+
+ char *temp;
+
+ DBG("ioconfig_bus_setup: Kernel Options %s\n", str);
+
+ temp = (char *)ioconfig_kernopts;
+ memset(temp, 0, 128);
+ while ( (*str != '\0') && !isspace (*str) ) {
+ if (*str == ',') {
+ *temp = '\n';
+ temp++;
+ str++;
+ continue;
+ }
+ *temp = *str;
+ temp++;
+ str++;
+ }
+
+ return(0);
+
+}
+__setup("ioconfig=", ioconfig_bus_setup);
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * WARNING: There is more than one copy of this file in different isms.
- * All copies must be kept exactly in sync.
- * Do not modify this file without also updating the following:
- *
- * irix/kern/io/eeprom.c
- * stand/arcs/lib/libsk/ml/eeprom.c
- * stand/arcs/lib/libkl/io/eeprom.c
- *
- * (from time to time they might not be in sync but that's due to bringup
- * activity - this comment is to remind us that they eventually have to
- * get back together)
- *
- * eeprom.c
- *
- * access to board-mounted EEPROMs via the L1 system controllers
- *
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/eeprom.h>
-#include <asm/sn/router.h>
-#include <asm/sn/module.h>
-#include <asm/sn/ksys/l1.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/simulator.h>
-
-#if defined(EEPROM_DEBUG)
-#define db_printf(x) printk x
-#else
-#define db_printf(x) printk x
-#endif
-
-#define BCOPY(x,y,z) memcpy(y,x,z)
-
-#define UNDERSCORE 0 /* don't convert underscores to hyphens */
-#define HYPHEN 1 /* convert underscores to hyphens */
-
-void copy_ascii_field( char *to, char *from, int length,
- int change_underscore );
-uint64_t generate_unique_id( char *sn, int sn_len );
-uchar_t char_to_base36( char c );
-int nicify( char *dst, eeprom_brd_record_t *src );
-static void int64_to_hex_string( char *out, uint64_t val );
-
-// extern int router_lock( net_vec_t, int, int );
-// extern int router_unlock( net_vec_t );
-#define ROUTER_LOCK(p) // router_lock(p, 10000, 3000000)
-#define ROUTER_UNLOCK(p) // router_unlock(p)
-
-#define IP27LOG_OVNIC "OverrideNIC"
-
-
-/* the following function converts an EEPROM record to a close facsimile
- * of the string returned by reading a Dallas Semiconductor NIC (see
- * one of the many incarnations of nic.c for details on that driver)
- */
-int nicify( char *dst, eeprom_brd_record_t *src )
-{
- int field_len;
- uint64_t unique_id;
- char *cur_dst = dst;
- eeprom_board_ia_t *board;
-
- board = src->board_ia;
- ASSERT( board ); /* there should always be a board info area */
-
- /* copy part number */
- strcpy( cur_dst, "Part:" );
- cur_dst += strlen( cur_dst );
- ASSERT( (board->part_num_tl & FIELD_FORMAT_MASK)
- == FIELD_FORMAT_ASCII );
- field_len = board->part_num_tl & FIELD_LENGTH_MASK;
- copy_ascii_field( cur_dst, board->part_num, field_len, HYPHEN );
- cur_dst += field_len;
-
- /* copy product name */
- strcpy( cur_dst, ";Name:" );
- cur_dst += strlen( cur_dst );
- ASSERT( (board->product_tl & FIELD_FORMAT_MASK) == FIELD_FORMAT_ASCII );
- field_len = board->product_tl & FIELD_LENGTH_MASK;
- copy_ascii_field( cur_dst, board->product, field_len, UNDERSCORE );
- cur_dst += field_len;
-
- /* copy serial number */
- strcpy( cur_dst, ";Serial:" );
- cur_dst += strlen( cur_dst );
- ASSERT( (board->serial_num_tl & FIELD_FORMAT_MASK)
- == FIELD_FORMAT_ASCII );
- field_len = board->serial_num_tl & FIELD_LENGTH_MASK;
- copy_ascii_field( cur_dst, board->serial_num, field_len,
- HYPHEN);
-
- cur_dst += field_len;
-
- /* copy revision */
- strcpy( cur_dst, ";Revision:");
- cur_dst += strlen( cur_dst );
- ASSERT( (board->board_rev_tl & FIELD_FORMAT_MASK)
- == FIELD_FORMAT_ASCII );
- field_len = board->board_rev_tl & FIELD_LENGTH_MASK;
- copy_ascii_field( cur_dst, board->board_rev, field_len, HYPHEN );
- cur_dst += field_len;
-
- /* EEPROMs don't have equivalents for the Group, Capability and
- * Variety fields, so we pad these with 0's
- */
- strcpy( cur_dst, ";Group:ff;Capability:ffffffff;Variety:ff" );
- cur_dst += strlen( cur_dst );
-
- /* use the board serial number to "fake" a laser id */
- strcpy( cur_dst, ";Laser:" );
- cur_dst += strlen( cur_dst );
- unique_id = generate_unique_id( board->serial_num,
- board->serial_num_tl & FIELD_LENGTH_MASK );
- int64_to_hex_string( cur_dst, unique_id );
- strcat( dst, ";" );
-
- return 1;
-}
-
-
-/* These functions borrow heavily from chars2* in nic.c
- */
-void copy_ascii_field( char *to, char *from, int length,
- int change_underscore )
-{
- int i;
- for( i = 0; i < length; i++ ) {
-
- /* change underscores to hyphens if requested */
- if( from[i] == '_' && change_underscore == HYPHEN )
- to[i] = '-';
-
- /* ; and ; are separators, so mustn't appear within
- * a field */
- else if( from[i] == ':' || from[i] == ';' )
- to[i] = '?';
-
- /* I'm not sure why or if ASCII character 0xff would
- * show up in an EEPROM field, but the NIC parsing
- * routines wouldn't like it if it did... so we
- * get rid of it, just in case. */
- else if( (unsigned char)from[i] == (unsigned char)0xff )
- to[i] = ' ';
-
- /* unprintable characters are replaced with . */
- else if( from[i] < ' ' || from[i] >= 0x7f )
- to[i] = '.';
-
- /* otherwise, just copy the character */
- else
- to[i] = from[i];
- }
-
- if( i == 0 ) {
- to[i] = ' '; /* return at least a space... */
- i++;
- }
- to[i] = 0; /* terminating null */
-}
-
-/* Note that int64_to_hex_string currently only has a big-endian
- * implementation.
- */
-#ifdef _MIPSEB
-static void int64_to_hex_string( char *out, uint64_t val )
-{
- int i;
- uchar_t table[] = "0123456789abcdef";
- uchar_t *byte_ptr = (uchar_t *)&val;
- for( i = 0; i < sizeof(uint64_t); i++ ) {
- out[i*2] = table[ ((*byte_ptr) >> 4) & 0x0f ];
- out[i*2+1] = table[ (*byte_ptr) & 0x0f ];
- byte_ptr++;
- }
- out[i*2] = '\0';
-}
-
-#else /* little endian */
-
-static void int64_to_hex_string( char *out, uint64_t val )
-{
-
-
- printk("int64_to_hex_string needs a little-endian implementation.\n");
-}
-#endif /* _MIPSEB */
-
-/* Convert a standard ASCII serial number to a unique integer
- * id number by treating the serial number string as though
- * it were a base 36 number
- */
-uint64_t generate_unique_id( char *sn, int sn_len )
-{
- int uid = 0;
- int i;
-
- #define VALID_BASE36(c) ((c >= '0' && c <='9') \
- || (c >= 'A' && c <='Z') \
- || (c >= 'a' && c <='z'))
-
- for( i = 0; i < sn_len; i++ ) {
- if( !VALID_BASE36(sn[i]) )
- continue;
- uid *= 36;
- uid += char_to_base36( sn[i] );
- }
-
- if( uid == 0 )
- return rtc_time();
-
- return uid;
-}
-
-uchar_t char_to_base36( char c )
-{
- uchar_t val;
-
- if( c >= '0' && c <= '9' )
- val = (c - '0');
-
- else if( c >= 'A' && c <= 'Z' )
- val = (c - 'A' + 10);
-
- else if( c >= 'a' && c <= 'z' )
- val = (c - 'a' + 10);
-
- else val = 0;
-
- return val;
-}
-
-
-/* given a pointer to the three-byte little-endian EEPROM representation
- * of date-of-manufacture, this function translates to a big-endian
- * integer format
- */
-int eeprom_xlate_board_mfr_date( uchar_t *src )
-{
- int rval = 0;
- rval += *src; src++;
- rval += ((int)(*src) << 8); src ++;
- rval += ((int)(*src) << 16);
- return rval;
-}
-
-
-int eeprom_str( char *nic_str, nasid_t nasid, int component )
-{
- eeprom_brd_record_t eep;
- eeprom_board_ia_t board;
- eeprom_chassis_ia_t chassis;
- int r;
-
- if( (component & C_DIMM) == C_DIMM ) {
- /* this function isn't applicable to DIMMs */
- return EEP_PARAM;
- }
- else {
- eep.board_ia = &board;
- eep.spd = NULL;
- if( !(component & SUBORD_MASK) )
- eep.chassis_ia = &chassis; /* only main boards have a chassis
- * info area */
- else
- eep.chassis_ia = NULL;
- }
-
- switch( component & BRICK_MASK ) {
- case C_BRICK:
- r = cbrick_eeprom_read( &eep, nasid, component );
- break;
- case IO_BRICK:
- r = iobrick_eeprom_read( &eep, nasid, component );
- break;
- default:
- return EEP_PARAM; /* must be an invalid component */
- }
- if( r )
- return r;
- if( !nicify( nic_str, &eep ) )
- return EEP_NICIFY;
-
- return EEP_OK;
-}
-
-int vector_eeprom_str( char *nic_str, nasid_t nasid,
- int component, net_vec_t path )
-{
- eeprom_brd_record_t eep;
- eeprom_board_ia_t board;
- eeprom_chassis_ia_t chassis;
- int r;
-
- eep.board_ia = &board;
- if( !(component & SUBORD_MASK) )
- eep.chassis_ia = &chassis; /* only main boards have a chassis
- * info area */
- else
- eep.chassis_ia = NULL;
-
- if( !(component & VECTOR) )
- return EEP_PARAM;
-
- if( (r = vector_eeprom_read( &eep, nasid, path, component )) )
- return r;
-
- if( !nicify( nic_str, &eep ) )
- return EEP_NICIFY;
-
- return EEP_OK;
-}
-
-
-int is_iobrick( int nasid, int widget_num )
-{
- uint32_t wid_reg;
- int part_num, mfg_num;
-
- /* Read the widget's WIDGET_ID register to get
- * its part number and mfg number
- */
- wid_reg = *(volatile int32_t *)
- (NODE_SWIN_BASE( nasid, widget_num ) + WIDGET_ID);
-
- part_num = (wid_reg & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT;
- mfg_num = (wid_reg & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT;
-
- /* Is this the "xbow part" of an XBridge? If so, this
- * widget is definitely part of an I/O brick.
- */
- if( part_num == XXBOW_WIDGET_PART_NUM &&
- mfg_num == XXBOW_WIDGET_MFGR_NUM )
-
- return 1;
-
- /* Is this a "bridge part" of an XBridge? If so, once
- * again, we know this widget is part of an I/O brick.
- */
- if( part_num == XBRIDGE_WIDGET_PART_NUM &&
- mfg_num == XBRIDGE_WIDGET_MFGR_NUM )
-
- return 1;
-
- return 0;
-}
-
-
-int cbrick_uid_get( nasid_t nasid, uint64_t *uid )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- char uid_str[32];
- char msg[BRL1_QSIZE];
- int subch, len;
- l1sc_t sc;
- l1sc_t *scp;
- int local = (nasid == get_nasid());
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- /* If the promlog variable pointed to by IP27LOG_OVNIC is set,
- * use that value for the cbrick UID rather than the EEPROM
- * serial number.
- */
-#ifdef LOG_GETENV
- if( ip27log_getenv( nasid, IP27LOG_OVNIC, uid_str, NULL, 0 ) >= 0 )
- {
- /* We successfully read IP27LOG_OVNIC, so return it as the UID. */
- db_printf(( "cbrick_uid_get:"
- "Overriding UID with environment variable %s\n",
- IP27LOG_OVNIC ));
- *uid = strtoull( uid_str, NULL, 0 );
- return EEP_OK;
- }
-#endif
-
- /* If this brick is retrieving its own uid, use the local l1sc_t to
- * arbitrate access to the l1; otherwise, set up a new one.
- */
- if( local ) {
- scp = get_l1sc();
- }
- else {
- scp = ≻
- sc_init( &sc, nasid, BRL1_LOCALHUB_UART );
- }
-
- /* fill in msg with the opcode & params */
- BZERO( msg, BRL1_QSIZE );
- if( (subch = sc_open( scp, L1_ADDR_LOCAL )) < 0 )
- return EEP_L1;
-
- if( (len = sc_construct_msg( scp, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_SER_NUM, 0 )) < 0 )
- {
- sc_close( scp, subch );
- return( EEP_L1 );
- }
-
- /* send the request to the L1 */
- if( sc_command( scp, subch, msg, msg, &len ) ) {
- sc_close( scp, subch );
- return( EEP_L1 );
- }
-
- /* free up subchannel */
- sc_close(scp, subch);
-
- /* check response */
- if( sc_interpret_resp( msg, 2, L1_ARG_ASCII, uid_str ) < 0 )
- {
- return( EEP_L1 );
- }
-
- *uid = generate_unique_id( uid_str, strlen( uid_str ) );
-
- return EEP_OK;
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-int rbrick_uid_get( nasid_t nasid, net_vec_t path, uint64_t *uid )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- char uid_str[32];
- char msg[BRL1_QSIZE];
- int subch, len;
- l1sc_t sc;
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
-#define FAIL \
- { \
- *uid = rtc_time(); \
- printk( "rbrick_uid_get failed; using current time as uid\n" ); \
- return EEP_OK; \
- }
-
- ROUTER_LOCK(path);
- sc_init( &sc, nasid, path );
-
- /* fill in msg with the opcode & params */
- BZERO( msg, BRL1_QSIZE );
- if( (subch = sc_open( &sc, L1_ADDR_LOCAL )) < 0 ) {
- ROUTER_UNLOCK(path);
- FAIL;
- }
-
- if( (len = sc_construct_msg( &sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_SER_NUM, 0 )) < 0 )
- {
- ROUTER_UNLOCK(path);
- sc_close( &sc, subch );
- FAIL;
- }
-
- /* send the request to the L1 */
- if( sc_command( &sc, subch, msg, msg, &len ) ) {
- ROUTER_UNLOCK(path);
- sc_close( &sc, subch );
- FAIL;
- }
-
- /* free up subchannel */
- ROUTER_UNLOCK(path);
- sc_close(&sc, subch);
-
- /* check response */
- if( sc_interpret_resp( msg, 2, L1_ARG_ASCII, uid_str ) < 0 )
- {
- FAIL;
- }
-
- *uid = generate_unique_id( uid_str, strlen( uid_str ) );
-
- return EEP_OK;
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-int iobrick_uid_get( nasid_t nasid, uint64_t *uid )
-{
- eeprom_brd_record_t eep;
- eeprom_board_ia_t board;
- eeprom_chassis_ia_t chassis;
- int r;
-
- eep.board_ia = &board;
- eep.chassis_ia = &chassis;
- eep.spd = NULL;
-
- r = iobrick_eeprom_read( &eep, nasid, IO_BRICK );
- if( r != EEP_OK ) {
- *uid = rtc_time();
- return r;
- }
-
- *uid = generate_unique_id( board.serial_num,
- board.serial_num_tl & FIELD_LENGTH_MASK );
-
- return EEP_OK;
-}
-
-
-int ibrick_mac_addr_get( nasid_t nasid, char *eaddr )
-{
- eeprom_brd_record_t eep;
- eeprom_board_ia_t board;
- eeprom_chassis_ia_t chassis;
- int r;
- char *tmp;
-
- eep.board_ia = &board;
- eep.chassis_ia = &chassis;
- eep.spd = NULL;
-
- r = iobrick_eeprom_read( &eep, nasid, IO_BRICK );
- if( (r != EEP_OK) || (board.mac_addr[0] == '\0') ) {
- db_printf(( "ibrick_mac_addr_get: "
- "Couldn't read MAC address from EEPROM\n" ));
- return EEP_L1;
- }
- else {
- /* successfully read info area */
- int ix;
- tmp = board.mac_addr;
- for( ix = 0; ix < (board.mac_addr_tl & FIELD_LENGTH_MASK); ix++ )
- {
- *eaddr++ = *tmp++;
- }
- *eaddr = '\0';
- }
-
- return EEP_OK;
-}
-
-
-/*
- * eeprom_vertex_info_set
- *
- * Given a vertex handle, a component designation, a starting nasid
- * and (in the case of a router) a vector path to the component, this
- * function will read the EEPROM and attach the resulting information
- * to the vertex in the same string format as that provided by the
- * Dallas Semiconductor NIC drivers. If the vertex already has the
- * string, this function just returns the string.
- */
-
-extern char *nic_vertex_info_get( devfs_handle_t );
-extern void nic_vmc_check( devfs_handle_t, char * );
-/* the following were lifted from nic.c - change later? */
-#define MAX_INFO 2048
-#define NEWSZ(ptr,sz) ((ptr) = kern_malloc((sz)))
-#define DEL(ptr) (kern_free((ptr)))
-
-char *eeprom_vertex_info_set( int component, int nasid, devfs_handle_t v,
- net_vec_t path )
-{
- char *info_tmp;
- int info_len;
- char *info;
-
- /* see if this vertex is already marked */
- info_tmp = nic_vertex_info_get(v);
- if (info_tmp) return info_tmp;
-
- /* get a temporary place for the data */
- NEWSZ(info_tmp, MAX_INFO);
- if (!info_tmp) return NULL;
-
- /* read the EEPROM */
- if( component & R_BRICK ) {
- if( RBRICK_EEPROM_STR( info_tmp, nasid, path ) != EEP_OK )
- return NULL;
- }
- else {
- if( eeprom_str( info_tmp, nasid, component ) != EEP_OK )
- return NULL;
- }
-
- /* allocate a smaller final place */
- info_len = strlen(info_tmp)+1;
- NEWSZ(info, info_len);
- if (info) {
- strcpy(info, info_tmp);
- DEL(info_tmp);
- } else {
- info = info_tmp;
- }
-
- /* add info to the vertex */
- hwgraph_info_add_LBL(v, INFO_LBL_NIC,
- (arbitrary_info_t) info);
-
- /* see if someone else got there first */
- info_tmp = nic_vertex_info_get(v);
- if (info != info_tmp) {
- DEL(info);
- return info_tmp;
- }
-
- /* export the data */
- hwgraph_info_export_LBL(v, INFO_LBL_NIC, info_len);
-
- /* trigger all matching callbacks */
- nic_vmc_check(v, info);
-
- return info;
-}
-
-
-/*********************************************************************
- *
- * stubs for use until the Bedrock/L1 link is available
- *
- */
-
-#include <asm/sn/nic.h>
-
-/* #define EEPROM_TEST */
-
-/* fake eeprom reading functions (replace when the BR/L1 communication
- * channel is in working order)
- */
-
-
-/* generate a charater in [0-9A-Z]; if an "extra" character is
- * specified (such as '_'), include it as one of the possibilities.
- */
-char random_eeprom_ch( char extra )
-{
- char ch;
- int modval = 36;
- if( extra )
- modval++;
-
- ch = rtc_time() % modval;
-
- if( ch < 10 )
- ch += '0';
- else if( ch >= 10 && ch < 36 )
- ch += ('A' - 10);
- else
- ch = extra;
-
- return ch;
-}
-
-/* create a part number of the form xxx-xxxx-xxx.
- * It may be important later to generate different
- * part numbers depending on the component we're
- * supposed to be "reading" from, so the component
- * paramter is provided.
- */
-void fake_a_part_number( char *buf, int component )
-{
- int i;
- switch( component ) {
-
- /* insert component-specific routines here */
-
- case C_BRICK:
- strcpy( buf, "030-1266-001" );
- break;
- default:
- for( i = 0; i < 12; i++ ) {
- if( i == 3 || i == 8 )
- buf[i] = '-';
- else
- buf[i] = random_eeprom_ch(0);
- }
- }
-}
-
-
-/* create a six-character serial number */
-void fake_a_serial_number( char *buf, uint64_t ser )
-{
- int i;
- static const char hexchars[] = "0123456789ABCDEF";
-
- if (ser) {
- for( i = 5; i >=0; i-- ) {
- buf[i] = hexchars[ser & 0xf];
- ser >>= 4;
- }
- }
- else {
- for( i = 0; i < 6; i++ )
- buf[i] = random_eeprom_ch(0);
- }
-}
-
-
-void fake_a_product_name( uchar_t *format, char* buf, int component )
-{
- switch( component & BRICK_MASK ) {
-
- case C_BRICK:
- if( component & SUBORD_MASK ) {
- strcpy( buf, "C_BRICK_SUB" );
- *format = 0xCB;
- }
- else {
- strcpy( buf, "IP35" );
- *format = 0xC4;
- }
- break;
-
- case R_BRICK:
- if( component & SUBORD_MASK ) {
- strcpy( buf, "R_BRICK_SUB" );
- *format = 0xCB;
- }
- else {
- strcpy( buf, "R_BRICK" );
- *format = 0xC7;
- }
- break;
-
- case IO_BRICK:
- if( component & SUBORD_MASK ) {
- strcpy( buf, "IO_BRICK_SUB" );
- *format = 0xCC;
- }
- else {
- strcpy( buf, "IO_BRICK" );
- *format = 0xC8;
- }
- break;
-
- default:
- strcpy( buf, "UNK_DEVICE" );
- *format = 0xCA;
- }
-}
-
-
-
-int fake_an_eeprom_record( eeprom_brd_record_t *buf, int component,
- uint64_t ser )
-{
- eeprom_board_ia_t *board;
- eeprom_chassis_ia_t *chassis;
- int i, cs;
-
- board = buf->board_ia;
- chassis = buf->chassis_ia;
-
- if( !(component & SUBORD_MASK) ) {
- if( !chassis )
- return EEP_PARAM;
- chassis->format = 0;
- chassis->length = 5;
- chassis->type = 0x17;
-
- chassis->part_num_tl = 0xCC;
- fake_a_part_number( chassis->part_num, component );
- chassis->serial_num_tl = 0xC6;
- fake_a_serial_number( chassis->serial_num, ser );
-
- cs = chassis->format + chassis->length + chassis->type
- + chassis->part_num_tl + chassis->serial_num_tl;
- for( i = 0; i < (chassis->part_num_tl & FIELD_LENGTH_MASK); i++ )
- cs += chassis->part_num[i];
- for( i = 0; i < (chassis->serial_num_tl & FIELD_LENGTH_MASK); i++ )
- cs += chassis->serial_num[i];
- chassis->checksum = 256 - (cs % 256);
- }
-
- if( !board )
- return EEP_PARAM;
- board->format = 0;
- board->length = 10;
- board->language = 0;
- board->mfg_date = 1789200; /* noon, 5/26/99 */
- board->manuf_tl = 0xC3;
- strcpy( board->manuf, "SGI" );
-
- fake_a_product_name( &(board->product_tl), board->product, component );
-
- board->serial_num_tl = 0xC6;
- fake_a_serial_number( board->serial_num, ser );
-
- board->part_num_tl = 0xCC;
- fake_a_part_number( board->part_num, component );
-
- board->board_rev_tl = 0xC2;
- board->board_rev[0] = '0';
- board->board_rev[1] = '1';
-
- board->eeprom_size_tl = 0x01;
- board->eeprom_size = 1;
-
- board->temp_waiver_tl = 0xC2;
- board->temp_waiver[0] = '0';
- board->temp_waiver[1] = '1';
-
- cs = board->format + board->length + board->language
- + (board->mfg_date & 0xFF)
- + (board->mfg_date & 0xFF00)
- + (board->mfg_date & 0xFF0000)
- + board->manuf_tl + board->product_tl + board->serial_num_tl
- + board->part_num_tl + board->board_rev_tl
- + board->board_rev[0] + board->board_rev[1]
- + board->eeprom_size_tl + board->eeprom_size + board->temp_waiver_tl
- + board->temp_waiver[0] + board->temp_waiver[1];
- for( i = 0; i < (board->manuf_tl & FIELD_LENGTH_MASK); i++ )
- cs += board->manuf[i];
- for( i = 0; i < (board->product_tl & FIELD_LENGTH_MASK); i++ )
- cs += board->product[i];
- for( i = 0; i < (board->serial_num_tl & FIELD_LENGTH_MASK); i++ )
- cs += board->serial_num[i];
- for( i = 0; i < (board->part_num_tl & FIELD_LENGTH_MASK); i++ )
- cs += board->part_num[i];
-
- board->checksum = 256 - (cs % 256);
-
- return EEP_OK;
-}
-
-#define EEPROM_CHUNKSIZE 64
-
-#if defined(EEPROM_DEBUG)
-#define RETURN_ERROR \
-{ \
- printk( "read_ia error return, component 0x%x, line %d" \
- ", address 0x%x, ia code 0x%x\n", \
- l1_compt, __LINE__, sc->subch[subch].target, ia_code ); \
- return EEP_L1; \
-}
-
-#else
-#define RETURN_ERROR return(EEP_L1)
-#endif
-
-int read_ia( l1sc_t *sc, int subch, int l1_compt,
- int ia_code, char *eep_record )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- char msg[BRL1_QSIZE]; /* message buffer */
- int len; /* number of bytes used in message buffer */
- int ia_len = EEPROM_CHUNKSIZE; /* remaining bytes in info area */
- int offset = 0; /* current offset into info area */
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- BZERO( msg, BRL1_QSIZE );
-
- /* retrieve EEPROM data in 64-byte chunks
- */
-
- while( ia_len )
- {
- /* fill in msg with opcode & params */
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_EEPROM, 8,
- L1_ARG_INT, l1_compt,
- L1_ARG_INT, ia_code,
- L1_ARG_INT, offset,
- L1_ARG_INT, ia_len )) < 0 )
- {
- RETURN_ERROR;
- }
-
- /* send the request to the L1 */
-
- if( sc_command( sc, subch, msg, msg, &len ) ) {
- RETURN_ERROR;
- }
-
- /* check response */
- if( sc_interpret_resp( msg, 5,
- L1_ARG_INT, &ia_len,
- L1_ARG_UNKNOWN, &len, eep_record ) < 0 )
- {
- RETURN_ERROR;
- }
-
- if( ia_len > EEPROM_CHUNKSIZE )
- ia_len = EEPROM_CHUNKSIZE;
-
- eep_record += EEPROM_CHUNKSIZE;
- offset += EEPROM_CHUNKSIZE;
- }
-
- return EEP_OK;
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-int read_spd( l1sc_t *sc, int subch, int l1_compt,
- eeprom_spd_u *spd )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- char msg[BRL1_QSIZE]; /* message buffer */
- int len; /* number of bytes used in message buffer */
- int resp; /* l1 response code */
- int spd_len = EEPROM_CHUNKSIZE; /* remaining bytes in spd record */
- int offset = 0; /* current offset into spd record */
- char *spd_p = spd->bytes; /* "thumb" for writing to spd */
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- BZERO( msg, BRL1_QSIZE );
-
- /* retrieve EEPROM data in 64-byte chunks
- */
-
- while( spd_len )
- {
- /* fill in msg with opcode & params */
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_EEPROM, 8,
- L1_ARG_INT, l1_compt,
- L1_ARG_INT, L1_EEP_SPD,
- L1_ARG_INT, offset,
- L1_ARG_INT, spd_len )) < 0 )
- {
- return( EEP_L1 );
- }
-
- /* send the request to the L1 */
- if( sc_command( sc, subch, msg, msg, &len ) ) {
- return( EEP_L1 );
- }
-
- /* check response */
- if( (resp = sc_interpret_resp( msg, 5,
- L1_ARG_INT, &spd_len,
- L1_ARG_UNKNOWN, &len, spd_p )) < 0 )
- {
- /*
- * translate l1 response code to eeprom.c error codes:
- * The L1 response will be L1_RESP_NAVAIL if the spd
- * can't be read (i.e. the spd isn't physically there). It will
- * return L1_RESP_INVAL if the spd exists, but fails the checksum
- * test because the eeprom wasn't programmed, programmed incorrectly,
- * or corrupted. L1_RESP_NAVAIL indicates the eeprom is likely not present,
- * whereas L1_RESP_INVAL indicates the eeprom is present, but the data is
- * invalid.
- */
- if(resp == L1_RESP_INVAL) {
- resp = EEP_BAD_CHECKSUM;
- } else {
- resp = EEP_L1;
- }
- return( resp );
- }
-
- if( spd_len > EEPROM_CHUNKSIZE )
- spd_len = EEPROM_CHUNKSIZE;
-
- spd_p += EEPROM_CHUNKSIZE;
- offset += EEPROM_CHUNKSIZE;
- }
- return EEP_OK;
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-int read_chassis_ia( l1sc_t *sc, int subch, int l1_compt,
- eeprom_chassis_ia_t *ia )
-{
- char eep_record[512]; /* scratch area for building up info area */
- char *eep_rec_p = eep_record; /* thumb for moving through eep_record */
- int checksum = 0; /* use to verify eeprom record checksum */
- int i;
-
- /* Read in info area record from the L1.
- */
- if( read_ia( sc, subch, l1_compt, L1_EEP_CHASSIS, eep_record )
- != EEP_OK )
- {
- return EEP_L1;
- }
-
- /* Now we've got the whole info area. Transfer it to the data structure.
- */
-
- eep_rec_p = eep_record;
- ia->format = *eep_rec_p++;
- ia->length = *eep_rec_p++;
- if( ia->length == 0 ) {
- /* since we're using 8*ia->length-1 as an array index later, make
- * sure it's sane.
- */
- db_printf(( "read_chassis_ia: eeprom length byte of ZERO\n" ));
- return EEP_L1;
- }
- ia->type = *eep_rec_p++;
-
- ia->part_num_tl = *eep_rec_p++;
-
- (void)BCOPY( eep_rec_p, ia->part_num, (ia->part_num_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->part_num_tl & FIELD_LENGTH_MASK);
-
- ia->serial_num_tl = *eep_rec_p++;
-
- BCOPY( eep_rec_p, ia->serial_num,
- (ia->serial_num_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->serial_num_tl & FIELD_LENGTH_MASK);
-
- ia->checksum = eep_record[(8 * ia->length) - 1];
-
- /* verify checksum */
- eep_rec_p = eep_record;
- checksum = 0;
- for( i = 0; i < (8 * ia->length); i++ ) {
- checksum += *eep_rec_p++;
- }
-
- if( (checksum & 0xff) != 0 )
- {
- db_printf(( "read_chassis_ia: bad checksum\n" ));
- db_printf(( "read_chassis_ia: target 0x%x uart 0x%lx\n",
- sc->subch[subch].target, sc->uart ));
- return EEP_BAD_CHECKSUM;
- }
-
- return EEP_OK;
-}
-
-
-int read_board_ia( l1sc_t *sc, int subch, int l1_compt,
- eeprom_board_ia_t *ia )
-{
- char eep_record[512]; /* scratch area for building up info area */
- char *eep_rec_p = eep_record; /* thumb for moving through eep_record */
- int checksum = 0; /* running checksum total */
- int i;
-
- BZERO( ia, sizeof( eeprom_board_ia_t ) );
-
- /* Read in info area record from the L1.
- */
- if( read_ia( sc, subch, l1_compt, L1_EEP_BOARD, eep_record )
- != EEP_OK )
- {
- db_printf(( "read_board_ia: error reading info area from L1\n" ));
- return EEP_L1;
- }
-
- /* Now we've got the whole info area. Transfer it to the data structure.
- */
-
- eep_rec_p = eep_record;
- ia->format = *eep_rec_p++;
- ia->length = *eep_rec_p++;
- if( ia->length == 0 ) {
- /* since we're using 8*ia->length-1 as an array index later, make
- * sure it's sane.
- */
- db_printf(( "read_board_ia: eeprom length byte of ZERO\n" ));
- return EEP_L1;
- }
- ia->language = *eep_rec_p++;
-
- ia->mfg_date = eeprom_xlate_board_mfr_date( (uchar_t *)eep_rec_p );
- eep_rec_p += 3;
-
- ia->manuf_tl = *eep_rec_p++;
-
- BCOPY( eep_rec_p, ia->manuf, (ia->manuf_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->manuf_tl & FIELD_LENGTH_MASK);
-
- ia->product_tl = *eep_rec_p++;
-
- BCOPY( eep_rec_p, ia->product, (ia->product_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->product_tl & FIELD_LENGTH_MASK);
-
- ia->serial_num_tl = *eep_rec_p++;
-
- BCOPY(eep_rec_p, ia->serial_num, (ia->serial_num_tl & FIELD_LENGTH_MASK));
- eep_rec_p += (ia->serial_num_tl & FIELD_LENGTH_MASK);
-
- ia->part_num_tl = *eep_rec_p++;
-
- BCOPY( eep_rec_p, ia->part_num, (ia->part_num_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->part_num_tl & FIELD_LENGTH_MASK);
-
- eep_rec_p++; /* we do not use the FRU file id */
-
- ia->board_rev_tl = *eep_rec_p++;
-
- BCOPY( eep_rec_p, ia->board_rev, (ia->board_rev_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->board_rev_tl & FIELD_LENGTH_MASK);
-
- ia->eeprom_size_tl = *eep_rec_p++;
- ia->eeprom_size = *eep_rec_p++;
-
- ia->temp_waiver_tl = *eep_rec_p++;
-
- BCOPY( eep_rec_p, ia->temp_waiver,
- (ia->temp_waiver_tl & FIELD_LENGTH_MASK) );
- eep_rec_p += (ia->temp_waiver_tl & FIELD_LENGTH_MASK);
-
- /* if there's more, we must be reading a main board; get
- * additional fields
- */
- if( ((unsigned char)*eep_rec_p != (unsigned char)EEPROM_EOF) ) {
-
- ia->ekey_G_tl = *eep_rec_p++;
- BCOPY( eep_rec_p, (char *)&ia->ekey_G,
- ia->ekey_G_tl & FIELD_LENGTH_MASK );
- eep_rec_p += (ia->ekey_G_tl & FIELD_LENGTH_MASK);
-
- ia->ekey_P_tl = *eep_rec_p++;
- BCOPY( eep_rec_p, (char *)&ia->ekey_P,
- ia->ekey_P_tl & FIELD_LENGTH_MASK );
- eep_rec_p += (ia->ekey_P_tl & FIELD_LENGTH_MASK);
-
- ia->ekey_Y_tl = *eep_rec_p++;
- BCOPY( eep_rec_p, (char *)&ia->ekey_Y,
- ia->ekey_Y_tl & FIELD_LENGTH_MASK );
- eep_rec_p += (ia->ekey_Y_tl & FIELD_LENGTH_MASK);
-
- /*
- * need to get a couple more fields if this is an I brick
- */
- if( ((unsigned char)*eep_rec_p != (unsigned char)EEPROM_EOF) ) {
-
- ia->mac_addr_tl = *eep_rec_p++;
- BCOPY( eep_rec_p, ia->mac_addr,
- ia->mac_addr_tl & FIELD_LENGTH_MASK );
- eep_rec_p += (ia->mac_addr_tl & FIELD_LENGTH_MASK);
-
- ia->ieee1394_cfg_tl = *eep_rec_p++;
- BCOPY( eep_rec_p, ia->ieee1394_cfg,
- ia->ieee1394_cfg_tl & FIELD_LENGTH_MASK );
-
- }
- }
-
- ia->checksum = eep_record[(ia->length * 8) - 1];
-
- /* verify checksum */
- eep_rec_p = eep_record;
- checksum = 0;
- for( i = 0; i < (8 * ia->length); i++ ) {
- checksum += *eep_rec_p++;
- }
-
- if( (checksum & 0xff) != 0 )
- {
- db_printf(( "read_board_ia: bad checksum\n" ));
- db_printf(( "read_board_ia: target 0x%x uart 0x%lx\n",
- sc->subch[subch].target, sc->uart ));
- return EEP_BAD_CHECKSUM;
- }
-
- return EEP_OK;
-}
-
-
-int _cbrick_eeprom_read( eeprom_brd_record_t *buf, l1sc_t *scp,
- int component )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- int r;
- uint64_t uid = 0;
-#ifdef LOG_GETENV
- char uid_str[32];
-#endif
- int l1_compt, subch;
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- /* make sure we're targeting a cbrick */
- if( !(component & C_BRICK) )
- return EEP_PARAM;
-
- /* If the promlog variable pointed to by IP27LOG_OVNIC is set,
- * use that value for the cbrick UID rather than the EEPROM
- * serial number.
- */
-#ifdef LOG_GETENV
- if( ip27log_getenv( scp->nasid, IP27LOG_OVNIC, uid_str, "0", 0 ) >= 0 )
- {
- db_printf(( "_cbrick_eeprom_read: "
- "Overriding UID with environment variable %s\n",
- IP27LOG_OVNIC ));
- uid = strtoull( uid_str, NULL, 0 );
- }
-#endif
-
- if( (subch = sc_open( scp, L1_ADDR_LOCAL )) < 0 )
- return EEP_L1;
-
- if((component & C_DIMM) == C_DIMM) {
- l1_compt = L1_EEP_DIMM(component & COMPT_MASK);
- r = read_spd(scp,subch,l1_compt, buf->spd);
- sc_close(scp,subch);
- return(r);
- }
-
- switch( component )
- {
- case C_BRICK:
- /* c-brick motherboard */
- l1_compt = L1_EEP_NODE;
- r = read_chassis_ia( scp, subch, l1_compt, buf->chassis_ia );
- if( r != EEP_OK ) {
- sc_close( scp, subch );
- db_printf(( "_cbrick_eeprom_read: using a fake eeprom record\n" ));
- return fake_an_eeprom_record( buf, component, uid );
- }
- if( uid ) {
- /* If IP27LOG_OVNIC is set, we want to put that value
- * in as our UID. */
- fake_a_serial_number( buf->chassis_ia->serial_num, uid );
- buf->chassis_ia->serial_num_tl = 6;
- }
- break;
-
- case C_PIMM:
- /* one of the PIMM boards */
- l1_compt = L1_EEP_PIMM( component & COMPT_MASK );
- break;
-
- default:
- /* unsupported board type */
- sc_close( scp, subch );
- return EEP_PARAM;
- }
-
- r = read_board_ia( scp, subch, l1_compt, buf->board_ia );
- sc_close( scp, subch );
- if( r != EEP_OK )
- {
- db_printf(( "_cbrick_eeprom_read: using a fake eeprom record\n" ));
- return fake_an_eeprom_record( buf, component, uid );
- }
- return EEP_OK;
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-int cbrick_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
- int component )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- l1sc_t *scp;
- int local = (nasid == get_nasid());
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- /* If this brick is retrieving its own uid, use the local l1sc_t to
- * arbitrate access to the l1; otherwise, set up a new one (prom) or
- * use an existing remote l1sc_t (kernel)
- */
- if( local ) {
- scp = get_l1sc();
- }
- else {
- scp = &NODEPDA( NASID_TO_COMPACT_NODEID(nasid) )->module->elsc;
- }
-
- return _cbrick_eeprom_read( buf, scp, component );
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-int iobrick_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
- int component )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- int r;
- int l1_compt, subch;
- l1sc_t *scp;
- int local = (nasid == get_nasid());
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- /* make sure we're talking to an applicable brick */
- if( !(component & IO_BRICK) ) {
- return EEP_PARAM;
- }
-
- /* If we're talking to this c-brick's attached io brick, use
- * the local l1sc_t; otherwise, set up a new one (prom) or
- * use an existing remote l1sc_t (kernel)
- */
- if( local ) {
- scp = get_l1sc();
- }
- else {
- scp = &NODEPDA( NASID_TO_COMPACT_NODEID(nasid) )->module->elsc;
- }
-
- if( (subch = sc_open( scp, L1_ADDR_LOCALIO )) < 0 )
- return EEP_L1;
-
-
- switch( component )
- {
- case IO_BRICK:
- /* IO brick motherboard */
- l1_compt = L1_EEP_LOGIC;
- r = read_chassis_ia( scp, subch, l1_compt, buf->chassis_ia );
-
- if( r != EEP_OK ) {
- sc_close( scp, subch );
- /*
- * Whenever we no longer need to test on hardware
- * that does not have EEPROMS, then this can be removed.
- */
- r = fake_an_eeprom_record( buf, component, rtc_time() );
- return r;
- }
- break;
-
- case IO_POWER:
- /* IO brick power board */
- l1_compt = L1_EEP_POWER;
- break;
-
- default:
- /* unsupported board type */
- sc_close( scp, subch );
- return EEP_PARAM;
- }
-
- r = read_board_ia( scp, subch, l1_compt, buf->board_ia );
- sc_close( scp, subch );
- if( r != EEP_OK ) {
- return r;
- }
- return EEP_OK;
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-int vector_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
- net_vec_t path, int component )
-{
-#if !defined(CONFIG_SERIAL_SGI_L1_PROTOCOL)
- return EEP_L1;
-#else
- int r;
- uint64_t uid = 0;
- int l1_compt, subch;
- l1sc_t sc;
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return EEP_L1;
-
- /* make sure we're targeting an applicable brick */
- if( !(component & VECTOR) )
- return EEP_PARAM;
-
- switch( component & BRICK_MASK )
- {
- case R_BRICK:
- ROUTER_LOCK( path );
- sc_init( &sc, nasid, path );
-
- if( (subch = sc_open( &sc, L1_ADDR_LOCAL )) < 0 )
- {
- db_printf(( "vector_eeprom_read: couldn't open subch\n" ));
- ROUTER_UNLOCK(path);
- return EEP_L1;
- }
- switch( component )
- {
- case R_BRICK:
- /* r-brick motherboard */
- l1_compt = L1_EEP_LOGIC;
- r = read_chassis_ia( &sc, subch, l1_compt, buf->chassis_ia );
- if( r != EEP_OK ) {
- sc_close( &sc, subch );
- ROUTER_UNLOCK( path );
- printk( "vector_eeprom_read: couldn't get rbrick eeprom info;"
- " using current time as uid\n" );
- uid = rtc_time();
- db_printf(("vector_eeprom_read: using a fake eeprom record\n"));
- return fake_an_eeprom_record( buf, component, uid );
- }
- break;
-
- case R_POWER:
- /* r-brick power board */
- l1_compt = L1_EEP_POWER;
- break;
-
- default:
- /* unsupported board type */
- sc_close( &sc, subch );
- ROUTER_UNLOCK( path );
- return EEP_PARAM;
- }
- r = read_board_ia( &sc, subch, l1_compt, buf->board_ia );
- sc_close( &sc, subch );
- ROUTER_UNLOCK( path );
- if( r != EEP_OK ) {
- db_printf(( "vector_eeprom_read: using a fake eeprom record\n" ));
- return fake_an_eeprom_record( buf, component, uid );
- }
- return EEP_OK;
-
- case C_BRICK:
- sc_init( &sc, nasid, path );
- return _cbrick_eeprom_read( buf, &sc, component );
-
- default:
- /* unsupported brick type */
- return EEP_PARAM;
- }
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 Silicon Graphics, Inc.
- * Copyright (C) 2001 by Ralf Baechle
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/efi.h>
-#include <asm/sn/klclock.h>
-
-/*
- * No locking necessary when this is called from efirtc which protects us
- * from racing by efi_rtc_lock.
- */
-#define __swizzle(addr) ((u8 *)((unsigned long)(addr) ^ 3))
-#define read_io_port(addr) (*(volatile u8 *) __swizzle(addr))
-#define write_io_port(addr, data) (*(volatile u8 *) __swizzle(addr) = (data))
-
-#define TOD_SGS_M48T35 1
-#define TOD_DALLAS_DS1386 2
-
-static unsigned long nvram_base = 0;
-static int tod_chip_type;
-
-static int
-get_tod_chip_type(void)
-{
- unsigned char testval;
-
- write_io_port(RTC_DAL_CONTROL_ADDR, RTC_DAL_UPDATE_DISABLE);
- write_io_port(RTC_DAL_DAY_ADDR, 0xff);
- write_io_port(RTC_DAL_CONTROL_ADDR, RTC_DAL_UPDATE_ENABLE);
-
- testval = read_io_port(RTC_DAL_DAY_ADDR);
- if (testval == 0xff)
- return TOD_SGS_M48T35;
-
- return TOD_DALLAS_DS1386;
-}
-
-efi_status_t
-ioc3_get_time(efi_time_t *time, efi_time_cap_t *caps)
-{
- if (!nvram_base) {
- printk(KERN_CRIT "nvram_base is zero\n");
- return EFI_UNSUPPORTED;
- }
-
- memset(time, 0, sizeof(*time));
-
- switch (tod_chip_type) {
- case TOD_SGS_M48T35:
- write_io_port(RTC_SGS_CONTROL_ADDR, RTC_SGS_READ_PROTECT);
-
- time->year = BCD_TO_INT(read_io_port(RTC_SGS_YEAR_ADDR)) + YRREF;
- time->month = BCD_TO_INT(read_io_port(RTC_SGS_MONTH_ADDR));
- time->day = BCD_TO_INT(read_io_port(RTC_SGS_DATE_ADDR));
- time->hour = BCD_TO_INT(read_io_port(RTC_SGS_HOUR_ADDR));
- time->minute = BCD_TO_INT(read_io_port(RTC_SGS_MIN_ADDR));
- time->second = BCD_TO_INT(read_io_port(RTC_SGS_SEC_ADDR));
- time->nanosecond = 0;
-
- write_io_port(RTC_SGS_CONTROL_ADDR, 0);
- break;
-
- case TOD_DALLAS_DS1386:
- write_io_port(RTC_DAL_CONTROL_ADDR, RTC_DAL_UPDATE_DISABLE);
-
- time->nanosecond = 0;
- time->second = BCD_TO_INT(read_io_port(RTC_DAL_SEC_ADDR));
- time->minute = BCD_TO_INT(read_io_port(RTC_DAL_MIN_ADDR));
- time->hour = BCD_TO_INT(read_io_port(RTC_DAL_HOUR_ADDR));
- time->day = BCD_TO_INT(read_io_port(RTC_DAL_DATE_ADDR));
- time->month = BCD_TO_INT(read_io_port(RTC_DAL_MONTH_ADDR));
- time->year = BCD_TO_INT(read_io_port(RTC_DAL_YEAR_ADDR)) + YRREF;
-
- write_io_port(RTC_DAL_CONTROL_ADDR, RTC_DAL_UPDATE_ENABLE);
- break;
-
- default:
- break;
- }
-
- if (caps) {
- caps->resolution = 50000000; /* 50PPM */
- caps->accuracy = 1000; /* 1ms */
- caps->sets_to_zero = 0;
- }
-
- return EFI_SUCCESS;
-}
-
-static efi_status_t ioc3_set_time (efi_time_t *t)
-{
- if (!nvram_base) {
- printk(KERN_CRIT "nvram_base is zero\n");
- return EFI_UNSUPPORTED;
- }
-
- switch (tod_chip_type) {
- case TOD_SGS_M48T35:
- write_io_port(RTC_SGS_CONTROL_ADDR, RTC_SGS_WRITE_ENABLE);
- write_io_port(RTC_SGS_YEAR_ADDR, INT_TO_BCD((t->year - YRREF)));
- write_io_port(RTC_SGS_MONTH_ADDR,INT_TO_BCD(t->month));
- write_io_port(RTC_SGS_DATE_ADDR, INT_TO_BCD(t->day));
- write_io_port(RTC_SGS_HOUR_ADDR, INT_TO_BCD(t->hour));
- write_io_port(RTC_SGS_MIN_ADDR, INT_TO_BCD(t->minute));
- write_io_port(RTC_SGS_SEC_ADDR, INT_TO_BCD(t->second));
- write_io_port(RTC_SGS_CONTROL_ADDR, 0);
- break;
-
- case TOD_DALLAS_DS1386:
- write_io_port(RTC_DAL_CONTROL_ADDR, RTC_DAL_UPDATE_DISABLE);
- write_io_port(RTC_DAL_SEC_ADDR, INT_TO_BCD(t->second));
- write_io_port(RTC_DAL_MIN_ADDR, INT_TO_BCD(t->minute));
- write_io_port(RTC_DAL_HOUR_ADDR, INT_TO_BCD(t->hour));
- write_io_port(RTC_DAL_DATE_ADDR, INT_TO_BCD(t->day));
- write_io_port(RTC_DAL_MONTH_ADDR,INT_TO_BCD(t->month));
- write_io_port(RTC_DAL_YEAR_ADDR, INT_TO_BCD((t->year - YRREF)));
- write_io_port(RTC_DAL_CONTROL_ADDR, RTC_DAL_UPDATE_ENABLE);
- break;
-
- default:
- break;
- }
-
- return EFI_SUCCESS;
-}
-
-/* The following two are not supported atm. */
-static efi_status_t
-ioc3_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm)
-{
- return EFI_UNSUPPORTED;
-}
-
-static efi_status_t
-ioc3_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm)
-{
- return EFI_UNSUPPORTED;
-}
-
-/*
- * It looks like the master IOC3 is usually on bus 0, device 4. Hope
- * that's right
- */
-static __init int efi_ioc3_time_init(void)
-{
- struct pci_dev *dev;
- static struct ioc3 *ioc3;
-
- dev = pci_find_slot(0, PCI_DEVFN(4, 0));
- if (!dev) {
- printk(KERN_CRIT "Couldn't find master IOC3\n");
-
- return -ENODEV;
- }
-
- ioc3 = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0));
- nvram_base = (unsigned long) ioc3 + IOC3_BYTEBUS_DEV0;
-
- tod_chip_type = get_tod_chip_type();
- if (tod_chip_type == 1)
- printk(KERN_NOTICE "TOD type is SGS M48T35\n");
- else if (tod_chip_type == 2)
- printk(KERN_NOTICE "TOD type is Dallas DS1386\n");
- else
- printk(KERN_CRIT "No or unknown TOD\n");
-
- efi.get_time = ioc3_get_time;
- efi.set_time = ioc3_set_time;
- efi.get_wakeup_time = ioc3_get_wakeup_time;
- efi.set_wakeup_time = ioc3_set_wakeup_time;
-
- return 0;
-}
-
-module_init(efi_ioc3_time_init);
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * hcl - SGI's Hardware Graph compatibility layer.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <asm/sn/sgi.h>
-#include <linux/devfs_fs.h>
-#include <linux/devfs_fs_kernel.h>
-#include <asm/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-
-#define HCL_NAME "SGI-HWGRAPH COMPATIBILITY DRIVER"
-#define HCL_TEMP_NAME "HCL_TEMP_NAME_USED_FOR_HWGRAPH_VERTEX_CREATE"
-#define HCL_TEMP_NAME_LEN 44
-#define HCL_VERSION "1.0"
-devfs_handle_t hwgraph_root = NULL;
-devfs_handle_t linux_busnum = NULL;
-
-/*
- * Debug flag definition.
- */
-#define OPTION_NONE 0x00
-#define HCL_DEBUG_NONE 0x00000
-#define HCL_DEBUG_ALL 0x0ffff
-#if defined(CONFIG_HCL_DEBUG)
-static unsigned int hcl_debug_init __initdata = HCL_DEBUG_NONE;
-#endif
-static unsigned int hcl_debug = HCL_DEBUG_NONE;
-#if defined(CONFIG_HCL_DEBUG) && !defined(MODULE)
-static unsigned int boot_options = OPTION_NONE;
-#endif
-
-/*
- * Some Global definitions.
- */
-devfs_handle_t hcl_handle = NULL;
-
-invplace_t invplace_none = {
- GRAPH_VERTEX_NONE,
- GRAPH_VERTEX_PLACE_NONE,
- NULL
-};
-
-/*
- * HCL device driver.
- * The purpose of this device driver is to provide a facility
- * for User Level Apps e.g. hinv, ioconfig etc. an ioctl path
- * to manipulate label entries without having to implement
- * system call interfaces. This methodology will enable us to
- * make this feature module loadable.
- */
-static int hcl_open(struct inode * inode, struct file * filp)
-{
- if (hcl_debug) {
- printk("HCL: hcl_open called.\n");
- }
-
- return(0);
-
-}
-
-static int hcl_close(struct inode * inode, struct file * filp)
-{
-
- if (hcl_debug) {
- printk("HCL: hcl_close called.\n");
- }
-
- return(0);
-
-}
-
-static int hcl_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
-
- if (hcl_debug) {
- printk("HCL: hcl_ioctl called.\n");
- }
-
- switch (cmd) {
- default:
- if (hcl_debug) {
- printk("HCL: hcl_ioctl cmd = 0x%x\n", cmd);
- }
- }
-
- return(0);
-
-}
-
-struct file_operations hcl_fops = {
- (struct module *)0,
- NULL, /* lseek - default */
- NULL, /* read - general block-dev read */
- NULL, /* write - general block-dev write */
- NULL, /* readdir - bad */
- NULL, /* poll */
- hcl_ioctl, /* ioctl */
- NULL, /* mmap */
- hcl_open, /* open */
- NULL, /* flush */
- hcl_close, /* release */
- NULL, /* fsync */
- NULL, /* fasync */
- NULL, /* lock */
- NULL, /* readv */
- NULL, /* writev */
-};
-
-
-/*
- * init_hcl() - Boot time initialization. Ensure that it is called
- * after devfs has been initialized.
- *
- * For now this routine is being called out of devfs/base.c. Actually
- * Not a bad place to be ..
- *
- */
-#ifdef MODULE
-int init_module (void)
-#else
-int __init init_hcl(void)
-#endif
-{
- extern void string_table_init(struct string_table *);
- extern struct string_table label_string_table;
- extern int init_ifconfig_net(void);
- int rv = 0;
-
-#if defined(CONFIG_HCL_DEBUG) && !defined(MODULE)
- printk ("\n%s: v%s Colin Ngam (cngam@sgi.com)\n",
- HCL_NAME, HCL_VERSION);
-
- hcl_debug = hcl_debug_init;
- printk ("%s: hcl_debug: 0x%0x\n", HCL_NAME, hcl_debug);
- printk ("\n%s: boot_options: 0x%0x\n", HCL_NAME, boot_options);
-#endif
-
- /*
- * Create the hwgraph_root on devfs.
- */
- rv = hwgraph_path_add(NULL, EDGE_LBL_HW, &hwgraph_root);
- if (rv)
- printk ("WARNING: init_hcl: Failed to create hwgraph_root. Error = %d.\n", rv);
-
- /*
- * Create the hcl driver to support inventory entry manipulations.
- * By default, it is expected that devfs is mounted on /dev.
- *
- */
- hcl_handle = hwgraph_register(hwgraph_root, ".hcl",
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &hcl_fops, NULL);
-
- if (hcl_handle == NULL) {
- panic("HCL: Unable to create HCL Driver in init_hcl().\n");
- return(0);
- }
-
- /*
- * Initialize the HCL string table.
- */
- string_table_init(&label_string_table);
-
- /*
- * Create the directory that links Linux bus numbers to our Xwidget.
- */
- rv = hwgraph_path_add(hwgraph_root, EDGE_LBL_LINUX_BUS, &linux_busnum);
- if (linux_busnum == NULL) {
- panic("HCL: Unable to create %s\n", EDGE_LBL_LINUX_BUS);
- return(0);
- }
-
- /*
- * Initialize the ifconfgi_net driver that does network devices
- * Persistent Naming.
- */
- init_ifconfig_net();
-
- return(0);
-
-}
-
-
-/*
- * hcl_setup() - Process boot time parameters if given.
- * "hcl="
- * This routine gets called only if "hcl=" is given in the
- * boot line and before init_hcl().
- *
- * We currently do not have any boot options .. when we do,
- * functionalities can be added here.
- *
- */
-static int __init hcl_setup(char *str)
-{
- while ( (*str != '\0') && !isspace (*str) )
- {
-#ifdef CONFIG_HCL_DEBUG
- if (strncmp (str, "all", 3) == 0) {
- hcl_debug_init |= HCL_DEBUG_ALL;
- str += 3;
- } else
- return 0;
-#endif
- if (*str != ',') return 0;
- ++str;
- }
-
- return 1;
-
-}
-
-__setup("hcl=", hcl_setup);
-
-
-/*
- * Set device specific "fast information".
- *
- */
-void
-hwgraph_fastinfo_set(devfs_handle_t de, arbitrary_info_t fastinfo)
-{
-
- if (hcl_debug) {
- printk("HCL: hwgraph_fastinfo_set handle 0x%p fastinfo %ld\n", (void *)de, fastinfo);
- }
-
- labelcl_info_replace_IDX(de, HWGRAPH_FASTINFO, fastinfo, NULL);
-
-}
-
-
-/*
- * Get device specific "fast information".
- *
- */
-arbitrary_info_t
-hwgraph_fastinfo_get(devfs_handle_t de)
-{
- arbitrary_info_t fastinfo;
- int rv;
-
- if (!de) {
- printk(KERN_WARNING "HCL: hwgraph_fastinfo_get handle given is NULL.\n");
- return(-1);
- }
-
- rv = labelcl_info_get_IDX(de, HWGRAPH_FASTINFO, &fastinfo);
- if (rv == 0)
- return(fastinfo);
-
- return(0);
-}
-
-
-/*
- * hwgraph_connectpt_set - Sets the connect point handle in de to the
- * given connect_de handle. By default, the connect point of the
- * devfs node is the parent. This effectively changes this assumption.
- */
-int
-hwgraph_connectpt_set(devfs_handle_t de, devfs_handle_t connect_de)
-{
- int rv;
-
- if (!de)
- return(-1);
-
- rv = labelcl_info_connectpt_set(de, connect_de);
-
- return(rv);
-}
-
-
-/*
- * hwgraph_connectpt_get: Returns the entry's connect point in the devfs
- * tree.
- */
-devfs_handle_t
-hwgraph_connectpt_get(devfs_handle_t de)
-{
- int rv;
- arbitrary_info_t info;
- devfs_handle_t connect;
-
- rv = labelcl_info_get_IDX(de, HWGRAPH_CONNECTPT, &info);
- if (rv != 0) {
- return(NULL);
- }
-
- connect = (devfs_handle_t)info;
- return(connect);
-
-}
-
-
-/*
- * hwgraph_mk_dir - Creates a directory entry with devfs.
- * Note that a directory entry in devfs can have children
- * but it cannot be a char|block special file.
- */
-devfs_handle_t
-hwgraph_mk_dir(devfs_handle_t de, const char *name,
- unsigned int namelen, void *info)
-{
-
- int rv;
- labelcl_info_t *labelcl_info = NULL;
- devfs_handle_t new_devfs_handle = NULL;
- devfs_handle_t parent = NULL;
-
- /*
- * Create the device info structure for hwgraph compatiblity support.
- */
- labelcl_info = labelcl_info_create();
- if (!labelcl_info)
- return(NULL);
-
- /*
- * Create a devfs entry.
- */
- new_devfs_handle = devfs_mk_dir(de, name, (void *)labelcl_info);
- if (!new_devfs_handle) {
- labelcl_info_destroy(labelcl_info);
- return(NULL);
- }
-
- /*
- * Get the parent handle.
- */
- parent = devfs_get_parent (new_devfs_handle);
-
- /*
- * To provide the same semantics as the hwgraph, set the connect point.
- */
- rv = hwgraph_connectpt_set(new_devfs_handle, parent);
- if (!rv) {
- /*
- * We need to clean up!
- */
- }
-
- /*
- * If the caller provides a private data pointer, save it in the
- * labelcl info structure(fastinfo). This can be retrieved via
- * hwgraph_fastinfo_get()
- */
- if (info)
- hwgraph_fastinfo_set(new_devfs_handle, (arbitrary_info_t)info);
-
- return(new_devfs_handle);
-
-}
-
-/*
- * hwgraph_vertex_create - Create a vertex by giving it a temp name.
- */
-
-/*
- * hwgraph_path_add - Create a directory node with the given path starting
- * from the given devfs_handle_t.
- */
-extern char * dev_to_name(devfs_handle_t, char *, uint);
-int
-hwgraph_path_add(devfs_handle_t fromv,
- char *path,
- devfs_handle_t *new_de)
-{
-
- unsigned int namelen = strlen(path);
- int rv;
-
- /*
- * We need to handle the case when fromv is NULL ..
- * in this case we need to create the path from the
- * hwgraph root!
- */
- if (fromv == NULL)
- fromv = hwgraph_root;
-
- /*
- * check the entry doesn't already exist, if it does
- * then we simply want new_de to point to it (otherwise
- * we'll overwrite the existing labelcl_info struct)
- */
- rv = hwgraph_edge_get(fromv, path, new_de);
- if (rv) { /* couldn't find entry so we create it */
- *new_de = hwgraph_mk_dir(fromv, path, namelen, NULL);
- if (new_de == NULL)
- return(-1);
- else
- return(0);
- }
- else
- return(0);
-
-}
-
-/*
- * hwgraph_register - Creates a file entry with devfs.
- * Note that a file entry cannot have children .. it is like a
- * char|block special vertex in hwgraph.
- */
-devfs_handle_t
-hwgraph_register(devfs_handle_t de, const char *name,
- unsigned int namelen, unsigned int flags,
- unsigned int major, unsigned int minor,
- umode_t mode, uid_t uid, gid_t gid,
- struct file_operations *fops,
- void *info)
-{
-
- int rv;
- void *labelcl_info = NULL;
- devfs_handle_t new_devfs_handle = NULL;
- devfs_handle_t parent = NULL;
-
- /*
- * Create the labelcl info structure for hwgraph compatiblity support.
- */
- labelcl_info = labelcl_info_create();
- if (!labelcl_info)
- return(NULL);
-
- /*
- * Create a devfs entry.
- */
- new_devfs_handle = devfs_register(de, name, flags, major,
- minor, mode, fops, labelcl_info);
- if (!new_devfs_handle) {
- labelcl_info_destroy((labelcl_info_t *)labelcl_info);
- return(NULL);
- }
-
- /*
- * Get the parent handle.
- */
- if (de == NULL)
- parent = devfs_get_parent (new_devfs_handle);
- else
- parent = de;
-
- /*
- * To provide the same semantics as the hwgraph, set the connect point.
- */
- rv = hwgraph_connectpt_set(new_devfs_handle, parent);
- if (rv) {
- /*
- * We need to clean up!
- */
- printk(KERN_WARNING "HCL: Unable to set the connect point to its parent 0x%p\n",
- (void *)new_devfs_handle);
- }
-
- /*
- * If the caller provides a private data pointer, save it in the
- * labelcl info structure(fastinfo). This can be retrieved via
- * hwgraph_fastinfo_get()
- */
- if (info)
- hwgraph_fastinfo_set(new_devfs_handle, (arbitrary_info_t)info);
-
- return(new_devfs_handle);
-
-}
-
-
-/*
- * hwgraph_mk_symlink - Create a symbolic link.
- */
-int
-hwgraph_mk_symlink(devfs_handle_t de, const char *name, unsigned int namelen,
- unsigned int flags, const char *link, unsigned int linklen,
- devfs_handle_t *handle, void *info)
-{
-
- void *labelcl_info = NULL;
- int status = 0;
- devfs_handle_t new_devfs_handle = NULL;
-
- /*
- * Create the labelcl info structure for hwgraph compatiblity support.
- */
- labelcl_info = labelcl_info_create();
- if (!labelcl_info)
- return(-1);
-
- /*
- * Create a symbolic link devfs entry.
- */
- status = devfs_mk_symlink(de, name, flags, link,
- &new_devfs_handle, labelcl_info);
- if ( (!new_devfs_handle) || (!status) ){
- labelcl_info_destroy((labelcl_info_t *)labelcl_info);
- return(-1);
- }
-
- /*
- * If the caller provides a private data pointer, save it in the
- * labelcl info structure(fastinfo). This can be retrieved via
- * hwgraph_fastinfo_get()
- */
- if (info)
- hwgraph_fastinfo_set(new_devfs_handle, (arbitrary_info_t)info);
-
- *handle = new_devfs_handle;
- return(0);
-
-}
-
-/*
- * hwgraph_vertex_get_next - this routine returns the next sibbling for the
- * device entry given in de. If there are no more sibbling, NULL
- * is returned in next_sibbling.
- *
- * Currently we do not have any protection against de being deleted
- * while it's handle is being held.
- */
-int
-hwgraph_vertex_get_next(devfs_handle_t *next_sibbling, devfs_handle_t *de)
-{
- *next_sibbling = devfs_get_next_sibling (*de);
-
- if (*next_sibbling != NULL)
- *de = *next_sibbling;
- return (0);
-}
-
-
-/*
- * hwgraph_vertex_destroy - Destroy the devfs entry
- */
-int
-hwgraph_vertex_destroy(devfs_handle_t de)
-{
-
- void *labelcl_info = NULL;
-
- labelcl_info = devfs_get_info(de);
- devfs_unregister(de);
-
- if (labelcl_info)
- labelcl_info_destroy((labelcl_info_t *)labelcl_info);
-
- return(0);
-}
-
-/*
-** See if a vertex has an outgoing edge with a specified name.
-** Vertices in the hwgraph *implicitly* contain these edges:
-** "." refers to "current vertex"
-** ".." refers to "connect point vertex"
-** "char" refers to current vertex (character device access)
-** "block" refers to current vertex (block device access)
-*/
-
-/*
- * hwgraph_edge_add - This routines has changed from the original conext.
- * All it does now is to create a symbolic link from "from" to "to".
- */
-/* ARGSUSED */
-int
-hwgraph_edge_add(devfs_handle_t from, devfs_handle_t to, char *name)
-{
-
- char *path;
- char *s1;
- char *index;
- int name_start;
- devfs_handle_t handle = NULL;
- int rv;
- int i, count;
-
- path = kmalloc(1024, GFP_KERNEL);
- memset(path, 0x0, 1024);
- name_start = devfs_generate_path (from, path, 1024);
- s1 = &path[name_start];
- count = 0;
- while (1) {
- index = strstr (s1, "/");
- if (index) {
- count++;
- s1 = ++index;
- } else {
- count++;
- break;
- }
- }
-
- memset(path, 0x0, 1024);
- name_start = devfs_generate_path (to, path, 1024);
-
- for (i = 0; i < count; i++) {
- strcat(path,"../");
- }
-
- strcat(path, &path[name_start]);
-
- /*
- * Otherwise, just create a symlink to the vertex.
- * In this case the vertex was previous created with a REAL pathname.
- */
- rv = devfs_mk_symlink (from, (const char *)name,
- DEVFS_FL_DEFAULT, path,
- &handle, NULL);
-
- name_start = devfs_generate_path (handle, path, 1024);
- return(rv);
-
-
-}
-/* ARGSUSED */
-int
-hwgraph_edge_get(devfs_handle_t from, char *name, devfs_handle_t *toptr)
-{
-
- int namelen = 0;
- devfs_handle_t target_handle = NULL;
-
- if (name == NULL)
- return(-1);
-
- if (toptr == NULL)
- return(-1);
-
- /*
- * If the name is "." just return the current devfs entry handle.
- */
- if (!strcmp(name, HWGRAPH_EDGELBL_DOT)) {
- if (toptr) {
- *toptr = from;
- }
- } else if (!strcmp(name, HWGRAPH_EDGELBL_DOTDOT)) {
- /*
- * Hmmm .. should we return the connect point or parent ..
- * see in hwgraph, the concept of parent is the connectpt!
- *
- * Maybe we should see whether the connectpt is set .. if
- * not just return the parent!
- */
- target_handle = hwgraph_connectpt_get(from);
- if (target_handle) {
- /*
- * Just return the connect point.
- */
- *toptr = target_handle;
- return(0);
- }
- target_handle = devfs_get_parent(from);
- *toptr = target_handle;
-
- } else {
- /*
- * Call devfs to get the devfs entry.
- */
- namelen = (int) strlen(name);
- target_handle = devfs_get_handle(from, name, 1); /* Yes traverse symbolic links */
- if (target_handle == NULL)
- return(-1);
- else
- *toptr = target_handle;
- }
-
- return(0);
-}
-
-
-/*
- * hwgraph_edge_get_next - Retrieves the next sibbling given the current
- * entry number "placeptr".
- *
- * Allow the caller to retrieve walk through the sibblings of "source"
- * devfs_handle_t. The implicit edges "." and ".." is returned first
- * followed by each of the real children.
- *
- * We may end up returning garbage if another thread perform any deletion
- * in this directory before "placeptr".
- *
- */
-/* ARGSUSED */
-int
-hwgraph_edge_get_next(devfs_handle_t source, char *name, devfs_handle_t *target,
- uint *placeptr)
-
-{
-
- uint which_place;
- unsigned int namelen = 0;
- const char *tempname = NULL;
-
- if (placeptr == NULL)
- return(-1);
-
- which_place = *placeptr;
-
-again:
- if (which_place <= HWGRAPH_RESERVED_PLACES) {
- if (which_place == EDGE_PLACE_WANT_CURRENT) {
- /*
- * Looking for "."
- * Return the current devfs handle.
- */
- if (name != NULL)
- strcpy(name, HWGRAPH_EDGELBL_DOT);
-
- if (target != NULL) {
- *target = source;
- /* XXX should incr "source" ref count here if we
- * ever implement ref counts */
- }
-
- } else if (which_place == EDGE_PLACE_WANT_CONNECTPT) {
- /*
- * Looking for the connect point or parent.
- * If the connect point is set .. it returns the connect point.
- * Otherwise, it returns the parent .. will we support
- * connect point?
- */
- devfs_handle_t connect_point = hwgraph_connectpt_get(source);
-
- if (connect_point == NULL) {
- /*
- * No connectpoint set .. either the User
- * explicitly NULL it or this node was not
- * created via hcl.
- */
- which_place++;
- goto again;
- }
-
- if (name != NULL)
- strcpy(name, HWGRAPH_EDGELBL_DOTDOT);
-
- if (target != NULL)
- *target = connect_point;
-
- } else if (which_place == EDGE_PLACE_WANT_REAL_EDGES) {
- /*
- * return first "real" entry in directory, and increment
- * placeptr. Next time around we should have
- * which_place > HWGRAPH_RESERVED_EDGES so we'll fall through
- * this nested if block.
- */
- *target = devfs_get_first_child(source);
- if (*target && name) {
- tempname = devfs_get_name(*target, &namelen);
- if (tempname && namelen)
- strcpy(name, tempname);
- }
-
- *placeptr = which_place + 1;
- return (0);
- }
-
- *placeptr = which_place+1;
- return(0);
- }
-
- /*
- * walk linked list, (which_place - HWGRAPH_RESERVED_PLACES) times
- */
- {
- devfs_handle_t curr;
- int i = 0;
-
- for (curr=devfs_get_first_child(source), i= i+HWGRAPH_RESERVED_PLACES;
- curr!=NULL && i<which_place;
- curr=devfs_get_next_sibling(curr), i++)
- ;
- *target = curr;
- *placeptr = which_place + 1;
- if (curr && name) {
- tempname = devfs_get_name(*target, &namelen);
- if (tempname && namelen)
- strcpy(name, tempname);
- }
- }
- if (target == NULL)
- return(-1);
- else
- return(0);
-}
-
-/*
- * hwgraph_info_add_LBL - Adds a new label for the device. Mark the info_desc
- * of the label as INFO_DESC_PRIVATE and store the info in the label.
- */
-/* ARGSUSED */
-int
-hwgraph_info_add_LBL( devfs_handle_t de,
- char *name,
- arbitrary_info_t info)
-{
- return(labelcl_info_add_LBL(de, name, INFO_DESC_PRIVATE, info));
-}
-
-/*
- * hwgraph_info_remove_LBL - Remove the label entry for the device.
- */
-/* ARGSUSED */
-int
-hwgraph_info_remove_LBL( devfs_handle_t de,
- char *name,
- arbitrary_info_t *old_info)
-{
- return(labelcl_info_remove_LBL(de, name, NULL, old_info));
-}
-
-/*
- * hwgraph_info_replace_LBL - replaces an existing label with
- * a new label info value.
- */
-/* ARGSUSED */
-int
-hwgraph_info_replace_LBL( devfs_handle_t de,
- char *name,
- arbitrary_info_t info,
- arbitrary_info_t *old_info)
-{
- return(labelcl_info_replace_LBL(de, name,
- INFO_DESC_PRIVATE, info,
- NULL, old_info));
-}
-/*
- * hwgraph_info_get_LBL - Get and return the info value in the label of the
- * device.
- */
-/* ARGSUSED */
-int
-hwgraph_info_get_LBL( devfs_handle_t de,
- char *name,
- arbitrary_info_t *infop)
-{
- return(labelcl_info_get_LBL(de, name, NULL, infop));
-}
-
-/*
- * hwgraph_info_get_exported_LBL - Retrieve the info_desc and info pointer
- * of the given label for the device. The weird thing is that the label
- * that matches the name is return irrespective of the info_desc value!
- * Do not understand why the word "exported" is used!
- */
-/* ARGSUSED */
-int
-hwgraph_info_get_exported_LBL( devfs_handle_t de,
- char *name,
- int *export_info,
- arbitrary_info_t *infop)
-{
- int rc;
- arb_info_desc_t info_desc;
-
- rc = labelcl_info_get_LBL(de, name, &info_desc, infop);
- if (rc == 0)
- *export_info = (int)info_desc;
-
- return(rc);
-}
-
-/*
- * hwgraph_info_get_next_LBL - Returns the next label info given the
- * current label entry in place.
- *
- * Once again this has no locking or reference count for protection.
- *
- */
-/* ARGSUSED */
-int
-hwgraph_info_get_next_LBL( devfs_handle_t de,
- char *buf,
- arbitrary_info_t *infop,
- labelcl_info_place_t *place)
-{
- return(labelcl_info_get_next_LBL(de, buf, NULL, infop, place));
-}
-
-/*
- * hwgraph_info_export_LBL - Retrieve the specified label entry and modify
- * the info_desc field with the given value in nbytes.
- */
-/* ARGSUSED */
-int
-hwgraph_info_export_LBL(devfs_handle_t de, char *name, int nbytes)
-{
- arbitrary_info_t info;
- int rc;
-
- if (nbytes == 0)
- nbytes = INFO_DESC_EXPORT;
-
- if (nbytes < 0)
- return(-1);
-
- rc = labelcl_info_get_LBL(de, name, NULL, &info);
- if (rc != 0)
- return(rc);
-
- rc = labelcl_info_replace_LBL(de, name,
- nbytes, info, NULL, NULL);
-
- return(rc);
-}
-
-/*
- * hwgraph_info_unexport_LBL - Retrieve the given label entry and change the
- * label info_descr filed to INFO_DESC_PRIVATE.
- */
-/* ARGSUSED */
-int
-hwgraph_info_unexport_LBL(devfs_handle_t de, char *name)
-{
- arbitrary_info_t info;
- int rc;
-
- rc = labelcl_info_get_LBL(de, name, NULL, &info);
- if (rc != 0)
- return(rc);
-
- rc = labelcl_info_replace_LBL(de, name,
- INFO_DESC_PRIVATE, info, NULL, NULL);
-
- return(rc);
-}
-
-/*
- * hwgraph_path_lookup - return the handle for the given path.
- *
- */
-int
-hwgraph_path_lookup( devfs_handle_t start_vertex_handle,
- char *lookup_path,
- devfs_handle_t *vertex_handle_ptr,
- char **remainder)
-{
- *vertex_handle_ptr = devfs_get_handle(start_vertex_handle, /* start dir */
- lookup_path, /* path */
- 1); /* traverse symlinks */
- if (*vertex_handle_ptr == NULL)
- return(-1);
- else
- return(0);
-}
-
-/*
- * hwgraph_traverse - Find and return the devfs handle starting from de.
- *
- */
-graph_error_t
-hwgraph_traverse(devfs_handle_t de, char *path, devfs_handle_t *found)
-{
- /*
- * get the directory entry (path should end in a directory)
- */
-
- *found = devfs_get_handle(de, /* start dir */
- path, /* path */
- 1); /* traverse symlinks */
- if (*found == NULL)
- return(GRAPH_NOT_FOUND);
- else
- return(GRAPH_SUCCESS);
-}
-
-/*
- * hwgraph_path_to_vertex - Return the devfs entry handle for the given
- * pathname .. assume traverse symlinks too!.
- */
-devfs_handle_t
-hwgraph_path_to_vertex(char *path)
-{
- return(devfs_get_handle(NULL, /* start dir */
- path, /* path */
- 1)); /* traverse symlinks */
-}
-
-/*
- * hwgraph_path_to_dev - Returns the devfs_handle_t of the given path ..
- * We only deal with devfs handle and not devfs_handle_t.
-*/
-devfs_handle_t
-hwgraph_path_to_dev(char *path)
-{
- devfs_handle_t de;
-
- de = hwgraph_path_to_vertex(path);
- return(de);
-}
-
-/*
- * hwgraph_block_device_get - return the handle of the block device file.
- * The assumption here is that de is a directory.
-*/
-devfs_handle_t
-hwgraph_block_device_get(devfs_handle_t de)
-{
- return(devfs_get_handle(de, /* start dir */
- "block", /* path */
- 1)); /* traverse symlinks */
-}
-
-/*
- * hwgraph_char_device_get - return the handle of the char device file.
- * The assumption here is that de is a directory.
-*/
-devfs_handle_t
-hwgraph_char_device_get(devfs_handle_t de)
-{
- return(devfs_get_handle(de, /* start dir */
- "char", /* path */
- 1)); /* traverse symlinks */
-}
-
-/*
-** Inventory is now associated with a vertex in the graph. For items that
-** belong in the inventory but have no vertex
-** (e.g. old non-graph-aware drivers), we create a bogus vertex under the
-** INFO_LBL_INVENT name.
-**
-** For historical reasons, we prevent exact duplicate entries from being added
-** to a single vertex.
-*/
-
-/*
- * hwgraph_inventory_add - Adds an inventory entry into de.
- */
-int
-hwgraph_inventory_add( devfs_handle_t de,
- int class,
- int type,
- major_t controller,
- minor_t unit,
- int state)
-{
- inventory_t *pinv = NULL, *old_pinv = NULL, *last_pinv = NULL;
- int rv;
-
- /*
- * Add our inventory data to the list of inventory data
- * associated with this vertex.
- */
-again:
- /* GRAPH_LOCK_UPDATE(&invent_lock); */
- rv = labelcl_info_get_LBL(de,
- INFO_LBL_INVENT,
- NULL, (arbitrary_info_t *)&old_pinv);
- if ((rv != LABELCL_SUCCESS) && (rv != LABELCL_NOT_FOUND))
- goto failure;
-
- /*
- * Seek to end of inventory items associated with this
- * vertex. Along the way, make sure we're not duplicating
- * an inventory item (for compatibility with old add_to_inventory)
- */
- for (;old_pinv; last_pinv = old_pinv, old_pinv = old_pinv->inv_next) {
- if ((int)class != -1 && old_pinv->inv_class != class)
- continue;
- if ((int)type != -1 && old_pinv->inv_type != type)
- continue;
- if ((int)state != -1 && old_pinv->inv_state != state)
- continue;
- if ((int)controller != -1
- && old_pinv->inv_controller != controller)
- continue;
- if ((int)unit != -1 && old_pinv->inv_unit != unit)
- continue;
-
- /* exact duplicate of previously-added inventory item */
- rv = LABELCL_DUP;
- goto failure;
- }
-
- /* Not a duplicate, so we know that we need to add something. */
- if (pinv == NULL) {
- /* Release lock while we wait for memory. */
- /* GRAPH_LOCK_DONE_UPDATE(&invent_lock); */
- pinv = (inventory_t *)kmalloc(sizeof(inventory_t), GFP_KERNEL);
- replace_in_inventory(pinv, class, type, controller, unit, state);
- goto again;
- }
-
- pinv->inv_next = NULL;
- if (last_pinv) {
- last_pinv->inv_next = pinv;
- } else {
- rv = labelcl_info_add_LBL(de, INFO_LBL_INVENT,
- sizeof(inventory_t), (arbitrary_info_t)pinv);
-
- if (!rv)
- goto failure;
- }
-
- /* GRAPH_LOCK_DONE_UPDATE(&invent_lock); */
- return(0);
-
-failure:
- /* GRAPH_LOCK_DONE_UPDATE(&invent_lock); */
- if (pinv)
- kfree(pinv);
- return(rv);
-}
-
-
-/*
- * hwgraph_inventory_remove - Removes an inventory entry.
- *
- * Remove an inventory item associated with a vertex. It is the caller's
- * responsibility to make sure that there are no races between removing
- * inventory from a vertex and simultaneously removing that vertex.
-*/
-int
-hwgraph_inventory_remove( devfs_handle_t de,
- int class,
- int type,
- major_t controller,
- minor_t unit,
- int state)
-{
- inventory_t *pinv = NULL, *last_pinv = NULL, *next_pinv = NULL;
- labelcl_error_t rv;
-
- /*
- * We never remove stuff from ".invent" ..
- */
- if (!de)
- return (-1);
-
- /*
- * Remove our inventory data to the list of inventory data
- * associated with this vertex.
- */
- /* GRAPH_LOCK_UPDATE(&invent_lock); */
- rv = labelcl_info_get_LBL(de,
- INFO_LBL_INVENT,
- NULL, (arbitrary_info_t *)&pinv);
- if (rv != LABELCL_SUCCESS)
- goto failure;
-
- /*
- * Search through inventory items associated with this
- * vertex, looking for a match.
- */
- for (;pinv; pinv = next_pinv) {
- next_pinv = pinv->inv_next;
-
- if(((int)class == -1 || pinv->inv_class == class) &&
- ((int)type == -1 || pinv->inv_type == type) &&
- ((int)state == -1 || pinv->inv_state == state) &&
- ((int)controller == -1 || pinv->inv_controller == controller) &&
- ((int)unit == -1 || pinv->inv_unit == unit)) {
-
- /* Found a matching inventory item. Remove it. */
- if (last_pinv) {
- last_pinv->inv_next = pinv->inv_next;
- } else {
- rv = hwgraph_info_replace_LBL(de, INFO_LBL_INVENT, (arbitrary_info_t)pinv->inv_next, NULL);
- if (rv != LABELCL_SUCCESS)
- goto failure;
- }
-
- pinv->inv_next = NULL; /* sanity */
- kfree(pinv);
- } else
- last_pinv = pinv;
- }
-
- if (last_pinv == NULL) {
- rv = hwgraph_info_remove_LBL(de, INFO_LBL_INVENT, NULL);
- if (rv != LABELCL_SUCCESS)
- goto failure;
- }
-
- rv = LABELCL_SUCCESS;
-
-failure:
- /* GRAPH_LOCK_DONE_UPDATE(&invent_lock); */
- return(rv);
-}
-
-/*
- * hwgraph_inventory_get_next - Get next inventory item associated with the
- * specified vertex.
- *
- * No locking is really needed. We don't yet have the ability
- * to remove inventory items, and new items are always added to
- * the end of a vertex' inventory list.
- *
- * However, a devfs entry can be removed!
-*/
-int
-hwgraph_inventory_get_next(devfs_handle_t de, invplace_t *place, inventory_t **ppinv)
-{
- inventory_t *pinv;
- labelcl_error_t rv;
-
- if (de == NULL)
- return(LABELCL_BAD_PARAM);
-
- if (place->invplace_vhdl == NULL) {
- place->invplace_vhdl = de;
- place->invplace_inv = NULL;
- }
-
- if (de != place->invplace_vhdl)
- return(LABELCL_BAD_PARAM);
-
- if (place->invplace_inv == NULL) {
- /* Just starting on this vertex */
- rv = labelcl_info_get_LBL(de, INFO_LBL_INVENT,
- NULL, (arbitrary_info_t *)&pinv);
- if (rv != LABELCL_SUCCESS)
- return(LABELCL_NOT_FOUND);
-
- } else {
- /* Advance to next item on this vertex */
- pinv = place->invplace_inv->inv_next;
- }
- place->invplace_inv = pinv;
- *ppinv = pinv;
-
- return(LABELCL_SUCCESS);
-}
-
-/*
- * hwgraph_controller_num_get - Returns the controller number in the inventory
- * entry.
- */
-int
-hwgraph_controller_num_get(devfs_handle_t device)
-{
- inventory_t *pinv;
- invplace_t invplace = { NULL, NULL, NULL };
- int val = -1;
- if ((pinv = device_inventory_get_next(device, &invplace)) != NULL) {
- val = (pinv->inv_class == INV_NETWORK)? pinv->inv_unit: pinv->inv_controller;
- }
-#ifdef DEBUG
- /*
- * It does not make any sense to call this on vertexes with multiple
- * inventory structs chained together
- */
- if ( device_inventory_get_next(device, &invplace) != NULL ) {
- printk("Should panic here ... !\n");
-#endif
- return (val);
-}
-
-/*
- * hwgraph_controller_num_set - Sets the controller number in the inventory
- * entry.
- */
-void
-hwgraph_controller_num_set(devfs_handle_t device, int contr_num)
-{
- inventory_t *pinv;
- invplace_t invplace = { NULL, NULL, NULL };
- if ((pinv = device_inventory_get_next(device, &invplace)) != NULL) {
- if (pinv->inv_class == INV_NETWORK)
- pinv->inv_unit = contr_num;
- else {
- if (pinv->inv_class == INV_FCNODE)
- pinv = device_inventory_get_next(device, &invplace);
- if (pinv != NULL)
- pinv->inv_controller = contr_num;
- }
- }
-#ifdef DEBUG
- /*
- * It does not make any sense to call this on vertexes with multiple
- * inventory structs chained together
- */
- if(pinv != NULL)
- ASSERT(device_inventory_get_next(device, &invplace) == NULL);
-#endif
-}
-
-/*
- * Find the canonical name for a given vertex by walking back through
- * connectpt's until we hit the hwgraph root vertex (or until we run
- * out of buffer space or until something goes wrong).
- *
- * COMPATIBILITY FUNCTIONALITY
- * Walks back through 'parents', not necessarily the same as connectpts.
- *
- * Need to resolve the fact that devfs does not return the path from
- * "/" but rather it just stops right before /dev ..
- */
-int
-hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen)
-{
- char *locbuf;
- int pos;
-
- if (buflen < 1)
- return(-1); /* XXX should be GRAPH_BAD_PARAM ? */
-
- locbuf = kmalloc(buflen, GFP_KERNEL);
-
- pos = devfs_generate_path(vhdl, locbuf, buflen);
- if (pos < 0) {
- kfree(locbuf);
- return pos;
- }
-
- strcpy(buf, &locbuf[pos]);
- kfree(locbuf);
- return 0;
-}
-
-/*
-** vertex_to_name converts a vertex into a canonical name by walking
-** back through connect points until we hit the hwgraph root (or until
-** we run out of buffer space).
-**
-** Usually returns a pointer to the original buffer, filled in as
-** appropriate. If the buffer is too small to hold the entire name,
-** or if anything goes wrong while determining the name, vertex_to_name
-** returns "UnknownDevice".
-*/
-
-#define DEVNAME_UNKNOWN "UnknownDevice"
-
-char *
-vertex_to_name(devfs_handle_t vhdl, char *buf, uint buflen)
-{
- if (hwgraph_vertex_name_get(vhdl, buf, buflen) == GRAPH_SUCCESS)
- return(buf);
- else
- return(DEVNAME_UNKNOWN);
-}
-
-#ifdef LATER
-/*
-** Return the compact node id of the node that ultimately "owns" the specified
-** vertex. In order to do this, we walk back through masters and connect points
-** until we reach a vertex that represents a node.
-*/
-cnodeid_t
-master_node_get(devfs_handle_t vhdl)
-{
- cnodeid_t cnodeid;
- devfs_handle_t master;
-
- for (;;) {
- cnodeid = nodevertex_to_cnodeid(vhdl);
- if (cnodeid != CNODEID_NONE)
- return(cnodeid);
-
- master = device_master_get(vhdl);
-
- /* Check for exceptional cases */
- if (master == vhdl) {
- /* Since we got a reference to the "master" thru
- * device_master_get() we should decrement
- * its reference count by 1
- */
- hwgraph_vertex_unref(master);
- return(CNODEID_NONE);
- }
-
- if (master == GRAPH_VERTEX_NONE) {
- master = hwgraph_connectpt_get(vhdl);
- if ((master == GRAPH_VERTEX_NONE) ||
- (master == vhdl)) {
- if (master == vhdl)
- /* Since we got a reference to the
- * "master" thru
- * hwgraph_connectpt_get() we should
- * decrement its reference count by 1
- */
- hwgraph_vertex_unref(master);
- return(CNODEID_NONE);
- }
- }
-
- vhdl = master;
- /* Decrement the reference to "master" which was got
- * either thru device_master_get() or hwgraph_connectpt_get()
- * above.
- */
- hwgraph_vertex_unref(master);
- }
-}
-
-/*
- * Using the canonical path name to get hold of the desired vertex handle will
- * not work on multi-hub sn0 nodes. Hence, we use the following (slightly
- * convoluted) algorithm.
- *
- * - Start at the vertex corresponding to the driver (provided as input parameter)
- * - Loop till you reach a vertex which has EDGE_LBL_MEMORY
- * - If EDGE_LBL_CONN exists, follow that up.
- * else if EDGE_LBL_MASTER exists, follow that up.
- * else follow EDGE_LBL_DOTDOT up.
- *
- * * We should be at desired hub/heart vertex now *
- * - Follow EDGE_LBL_CONN to the widget vertex.
- *
- * - return vertex handle of this widget.
- */
-devfs_handle_t
-mem_vhdl_get(devfs_handle_t drv_vhdl)
-{
-devfs_handle_t cur_vhdl, cur_upper_vhdl;
-devfs_handle_t tmp_mem_vhdl, mem_vhdl;
-graph_error_t loop_rv;
-
- /* Initializations */
- cur_vhdl = drv_vhdl;
- loop_rv = ~GRAPH_SUCCESS;
-
- /* Loop till current vertex has EDGE_LBL_MEMORY */
- while (loop_rv != GRAPH_SUCCESS) {
-
- if ((hwgraph_edge_get(cur_vhdl, EDGE_LBL_CONN, &cur_upper_vhdl)) == GRAPH_SUCCESS) {
-
- } else if ((hwgraph_edge_get(cur_vhdl, EDGE_LBL_MASTER, &cur_upper_vhdl)) == GRAPH_SUCCESS) {
- } else { /* Follow HWGRAPH_EDGELBL_DOTDOT up */
- (void) hwgraph_edge_get(cur_vhdl, HWGRAPH_EDGELBL_DOTDOT, &cur_upper_vhdl);
- }
-
- cur_vhdl = cur_upper_vhdl;
-
-#if DEBUG && HWG_DEBUG
- printf("Current vhdl %d \n", cur_vhdl);
-#endif /* DEBUG */
-
- loop_rv = hwgraph_edge_get(cur_vhdl, EDGE_LBL_MEMORY, &tmp_mem_vhdl);
- }
-
- /* We should be at desired hub/heart vertex now */
- if ((hwgraph_edge_get(cur_vhdl, EDGE_LBL_CONN, &mem_vhdl)) != GRAPH_SUCCESS)
- return (GRAPH_VERTEX_NONE);
-
- return (mem_vhdl);
-}
-#endif /* LATER */
-
-
-/*
-** Add a char device -- if the driver supports it -- at a specified vertex.
-*/
-graph_error_t
-hwgraph_char_device_add( devfs_handle_t from,
- char *path,
- char *prefix,
- devfs_handle_t *devhdl)
-{
- devfs_handle_t xx = NULL;
-
- printk("WARNING: hwgraph_char_device_add() not supported .. use hwgraph_register.\n");
- *devhdl = xx; // Must set devhdl
- return(GRAPH_SUCCESS);
-}
-
-graph_error_t
-hwgraph_edge_remove(devfs_handle_t from, char *name, devfs_handle_t *toptr)
-{
- printk("WARNING: hwgraph_edge_remove NOT supported.\n");
- return(GRAPH_ILLEGAL_REQUEST);
-}
-
-graph_error_t
-hwgraph_vertex_unref(devfs_handle_t vhdl)
-{
- return(GRAPH_ILLEGAL_REQUEST);
-}
-
-
-EXPORT_SYMBOL(hwgraph_mk_dir);
-EXPORT_SYMBOL(hwgraph_path_add);
-EXPORT_SYMBOL(hwgraph_char_device_add);
-EXPORT_SYMBOL(hwgraph_register);
-EXPORT_SYMBOL(hwgraph_vertex_destroy);
-
-EXPORT_SYMBOL(hwgraph_fastinfo_get);
-EXPORT_SYMBOL(hwgraph_edge_get);
-
-EXPORT_SYMBOL(hwgraph_fastinfo_set);
-EXPORT_SYMBOL(hwgraph_connectpt_set);
-EXPORT_SYMBOL(hwgraph_connectpt_get);
-EXPORT_SYMBOL(hwgraph_edge_get_next);
-EXPORT_SYMBOL(hwgraph_info_add_LBL);
-EXPORT_SYMBOL(hwgraph_info_remove_LBL);
-EXPORT_SYMBOL(hwgraph_info_replace_LBL);
-EXPORT_SYMBOL(hwgraph_info_get_LBL);
-EXPORT_SYMBOL(hwgraph_info_get_exported_LBL);
-EXPORT_SYMBOL(hwgraph_info_get_next_LBL);
-EXPORT_SYMBOL(hwgraph_info_export_LBL);
-EXPORT_SYMBOL(hwgraph_info_unexport_LBL);
-EXPORT_SYMBOL(hwgraph_path_lookup);
-EXPORT_SYMBOL(hwgraph_traverse);
-EXPORT_SYMBOL(hwgraph_path_to_vertex);
-EXPORT_SYMBOL(hwgraph_path_to_dev);
-EXPORT_SYMBOL(hwgraph_block_device_get);
-EXPORT_SYMBOL(hwgraph_char_device_get);
-EXPORT_SYMBOL(hwgraph_vertex_name_get);
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/devfs_fs.h>
-#include <linux/devfs_fs_kernel.h>
-#include <asm/sn/sgi.h>
-#include <asm/io.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/nodepda.h>
-
-static devfs_handle_t hwgraph_all_cnodes = GRAPH_VERTEX_NONE;
-extern devfs_handle_t hwgraph_root;
-
-
-/*
-** Return the "master" for a given vertex. A master vertex is a
-** controller or adapter or other piece of hardware that the given
-** vertex passes through on the way to the rest of the system.
-*/
-devfs_handle_t
-device_master_get(devfs_handle_t vhdl)
-{
- graph_error_t rc;
- devfs_handle_t master;
-
- rc = hwgraph_edge_get(vhdl, EDGE_LBL_MASTER, &master);
- if (rc == GRAPH_SUCCESS)
- return(master);
- else
- return(GRAPH_VERTEX_NONE);
-}
-
-/*
-** Set the master for a given vertex.
-** Returns 0 on success, non-0 indicates failure
-*/
-int
-device_master_set(devfs_handle_t vhdl, devfs_handle_t master)
-{
- graph_error_t rc;
-
- rc = hwgraph_edge_add(vhdl, master, EDGE_LBL_MASTER);
- return(rc != GRAPH_SUCCESS);
-}
-
-
-/*
-** Return the compact node id of the node that ultimately "owns" the specified
-** vertex. In order to do this, we walk back through masters and connect points
-** until we reach a vertex that represents a node.
-*/
-cnodeid_t
-master_node_get(devfs_handle_t vhdl)
-{
- cnodeid_t cnodeid;
- devfs_handle_t master;
-
- for (;;) {
- cnodeid = nodevertex_to_cnodeid(vhdl);
- if (cnodeid != CNODEID_NONE)
- return(cnodeid);
-
- master = device_master_get(vhdl);
-
- /* Check for exceptional cases */
- if (master == vhdl) {
- /* Since we got a reference to the "master" thru
- * device_master_get() we should decrement
- * its reference count by 1
- */
- return(CNODEID_NONE);
- }
-
- if (master == GRAPH_VERTEX_NONE) {
- master = hwgraph_connectpt_get(vhdl);
- if ((master == GRAPH_VERTEX_NONE) ||
- (master == vhdl)) {
- return(CNODEID_NONE);
- }
- }
-
- vhdl = master;
- }
-}
-
-static devfs_handle_t hwgraph_all_cpuids = GRAPH_VERTEX_NONE;
-extern int maxcpus;
-
-void
-mark_cpuvertex_as_cpu(devfs_handle_t vhdl, cpuid_t cpuid)
-{
- if (cpuid == CPU_NONE)
- return;
-
- (void)labelcl_info_add_LBL(vhdl, INFO_LBL_CPUID, INFO_DESC_EXPORT,
- (arbitrary_info_t)cpuid);
- {
- char cpuid_buffer[10];
-
- if (hwgraph_all_cpuids == GRAPH_VERTEX_NONE) {
- (void)hwgraph_path_add( hwgraph_root,
- EDGE_LBL_CPUNUM,
- &hwgraph_all_cpuids);
- }
-
- sprintf(cpuid_buffer, "%ld", cpuid);
- (void)hwgraph_edge_add( hwgraph_all_cpuids,
- vhdl,
- cpuid_buffer);
- }
-}
-
-/*
-** If the specified device represents a node, return its
-** compact node ID; otherwise, return CNODEID_NONE.
-*/
-cnodeid_t
-nodevertex_to_cnodeid(devfs_handle_t vhdl)
-{
- int rv = 0;
- arbitrary_info_t cnodeid = CNODEID_NONE;
-
- rv = labelcl_info_get_LBL(vhdl, INFO_LBL_CNODEID, NULL, &cnodeid);
-
- return((cnodeid_t)cnodeid);
-}
-
-void
-mark_nodevertex_as_node(devfs_handle_t vhdl, cnodeid_t cnodeid)
-{
- if (cnodeid == CNODEID_NONE)
- return;
-
- cnodeid_to_vertex(cnodeid) = vhdl;
- labelcl_info_add_LBL(vhdl, INFO_LBL_CNODEID, INFO_DESC_EXPORT,
- (arbitrary_info_t)cnodeid);
-
- {
- char cnodeid_buffer[10];
-
- if (hwgraph_all_cnodes == GRAPH_VERTEX_NONE) {
- (void)hwgraph_path_add( hwgraph_root,
- EDGE_LBL_NODENUM,
- &hwgraph_all_cnodes);
- }
-
- sprintf(cnodeid_buffer, "%d", cnodeid);
- (void)hwgraph_edge_add( hwgraph_all_cnodes,
- vhdl,
- cnodeid_buffer);
- }
-}
-
-/*
-** If the specified device represents a CPU, return its cpuid;
-** otherwise, return CPU_NONE.
-*/
-cpuid_t
-cpuvertex_to_cpuid(devfs_handle_t vhdl)
-{
- arbitrary_info_t cpuid = CPU_NONE;
-
- (void)labelcl_info_get_LBL(vhdl, INFO_LBL_CPUID, NULL, &cpuid);
-
- return((cpuid_t)cpuid);
-}
-
-
-/*
-** dev_to_name converts a devfs_handle_t into a canonical name. If the devfs_handle_t
-** represents a vertex in the hardware graph, it is converted in the
-** normal way for vertices. If the devfs_handle_t is an old devfs_handle_t (one which
-** does not represent a hwgraph vertex), we synthesize a name based
-** on major/minor number.
-**
-** Usually returns a pointer to the original buffer, filled in as
-** appropriate. If the buffer is too small to hold the entire name,
-** or if anything goes wrong while determining the name, dev_to_name
-** returns "UnknownDevice".
-*/
-char *
-dev_to_name(devfs_handle_t dev, char *buf, uint buflen)
-{
- return(vertex_to_name(dev, buf, buflen));
-}
-
-
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/sn1/hubdev.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-
-struct hubdev_callout {
- int (*attach_method)(devfs_handle_t);
- struct hubdev_callout *fp;
-};
-
-typedef struct hubdev_callout hubdev_callout_t;
-
-mutex_t hubdev_callout_mutex;
-hubdev_callout_t *hubdev_callout_list = NULL;
-
-void
-hubdev_init(void)
-{
- mutex_init(&hubdev_callout_mutex);
- hubdev_callout_list = NULL;
-}
-
-void
-hubdev_register(int (*attach_method)(devfs_handle_t))
-{
- hubdev_callout_t *callout;
-
- ASSERT(attach_method);
-
- callout = (hubdev_callout_t *)snia_kmem_zalloc(sizeof(hubdev_callout_t), KM_SLEEP);
- ASSERT(callout);
-
- mutex_lock(&hubdev_callout_mutex);
- /*
- * Insert at the end of the list
- */
- callout->fp = hubdev_callout_list;
- hubdev_callout_list = callout;
- callout->attach_method = attach_method;
- mutex_unlock(&hubdev_callout_mutex);
-}
-
-int
-hubdev_unregister(int (*attach_method)(devfs_handle_t))
-{
- hubdev_callout_t **p;
-
- ASSERT(attach_method);
-
- mutex_lock(&hubdev_callout_mutex);
- /*
- * Remove registry element containing attach_method
- */
- for (p = &hubdev_callout_list; *p != NULL; p = &(*p)->fp) {
- if ((*p)->attach_method == attach_method) {
- hubdev_callout_t* victim = *p;
- *p = (*p)->fp;
- kfree(victim);
- mutex_unlock(&hubdev_callout_mutex);
- return (0);
- }
- }
- mutex_unlock(&hubdev_callout_mutex);
- return (ENOENT);
-}
-
-
-int
-hubdev_docallouts(devfs_handle_t hub)
-{
- hubdev_callout_t *p;
- int errcode;
-
- mutex_lock(&hubdev_callout_mutex);
-
- for (p = hubdev_callout_list; p != NULL; p = p->fp) {
- ASSERT(p->attach_method);
- errcode = (*p->attach_method)(hub);
- if (errcode != 0) {
- mutex_unlock(&hubdev_callout_mutex);
- return (errcode);
- }
- }
- mutex_unlock(&hubdev_callout_mutex);
- return (0);
-}
-
-/*
- * Given a hub vertex, return the base address of the Hspec space
- * for that hub.
- */
-
-#if defined(CONFIG_IA64_SGI_SN1)
-
-caddr_t
-hubdev_prombase_get(devfs_handle_t hub)
-{
- hubinfo_t hinfo = NULL;
-
- hubinfo_get(hub, &hinfo);
- ASSERT(hinfo);
-
- return ((caddr_t)NODE_RBOOT_BASE(hinfo->h_nasid));
-}
-
-cnodeid_t
-hubdev_cnodeid_get(devfs_handle_t hub)
-{
- hubinfo_t hinfo = NULL;
- hubinfo_get(hub, &hinfo);
- ASSERT(hinfo);
-
- return hinfo->h_cnodeid;
-}
-
-#endif /* CONFIG_IA64_SGI_SN1 */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * hubspc.c - Hub Memory Space Management Driver
- * This driver implements the managers for the following
- * memory resources:
- * 1) reference counters
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
-#include <linux/devfs_fs.h>
-#include <linux/devfs_fs_kernel.h>
-#include <asm/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/sn1/mem_refcnt.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/snconfig.h>
-#include <asm/sn/sn1/hubspc.h>
-#include <asm/sn/ksys/elsc.h>
-#include <asm/sn/simulator.h>
-
-
-/* Uncomment the following line for tracing */
-/* #define HUBSPC_DEBUG 1 */
-
-int hubspc_devflag = D_MP;
-
-
-/***********************************************************************/
-/* CPU Prom Space */
-/***********************************************************************/
-
-typedef struct cpuprom_info {
- devfs_handle_t prom_dev;
- devfs_handle_t nodevrtx;
- struct cpuprom_info *next;
-}cpuprom_info_t;
-
-static cpuprom_info_t *cpuprom_head;
-static spinlock_t cpuprom_spinlock;
-#define PROM_LOCK() mutex_spinlock(&cpuprom_spinlock)
-#define PROM_UNLOCK(s) mutex_spinunlock(&cpuprom_spinlock, (s))
-
-/*
- * Add prominfo to the linked list maintained.
- */
-void
-prominfo_add(devfs_handle_t hub, devfs_handle_t prom)
-{
- cpuprom_info_t *info;
- unsigned long s;
-
- info = kmalloc(sizeof(cpuprom_info_t), GFP_KERNEL);
- ASSERT(info);
- info->prom_dev = prom;
- info->nodevrtx = hub;
-
-
- s = PROM_LOCK();
- info->next = cpuprom_head;
- cpuprom_head = info;
- PROM_UNLOCK(s);
-}
-
-void
-prominfo_del(devfs_handle_t prom)
-{
- unsigned long s;
- cpuprom_info_t *info;
- cpuprom_info_t **prev;
-
- s = PROM_LOCK();
- prev = &cpuprom_head;
- while ( (info = *prev) ) {
- if (info->prom_dev == prom) {
- *prev = info->next;
- PROM_UNLOCK(s);
- return;
- }
-
- prev = &info->next;
- }
- PROM_UNLOCK(s);
- ASSERT(0);
-}
-
-devfs_handle_t
-prominfo_nodeget(devfs_handle_t prom)
-{
- unsigned long s;
- cpuprom_info_t *info;
-
- s = PROM_LOCK();
- info = cpuprom_head;
- while (info) {
- if(info->prom_dev == prom) {
- PROM_UNLOCK(s);
- return info->nodevrtx;
- }
- info = info->next;
- }
- PROM_UNLOCK(s);
- return 0;
-}
-
-#if defined(CONFIG_IA64_SGI_SN1)
-#define SN_PROMVERSION INV_IP35PROM
-
-/* Add "detailed" labelled inventory information to the
- * prom vertex
- */
-void
-cpuprom_detailed_inventory_info_add(devfs_handle_t prom_dev,devfs_handle_t node)
-{
- invent_miscinfo_t *cpuprom_inventory_info;
- extern invent_generic_t *klhwg_invent_alloc(cnodeid_t cnode,
- int class, int size);
- cnodeid_t cnode = hubdev_cnodeid_get(node);
-
- /* Allocate memory for the extra inventory information
- * for the prom
- */
- cpuprom_inventory_info = (invent_miscinfo_t *)
- klhwg_invent_alloc(cnode, INV_PROM, sizeof(invent_miscinfo_t));
-
- ASSERT(cpuprom_inventory_info);
-
- /* Set the enabled flag so that the hinv interprets this
- * information
- */
- cpuprom_inventory_info->im_gen.ig_flag = INVENT_ENABLED;
- cpuprom_inventory_info->im_type = SN_PROMVERSION;
- /* Store prom revision into inventory information */
- cpuprom_inventory_info->im_rev = IP27CONFIG.pvers_rev;
- cpuprom_inventory_info->im_version = IP27CONFIG.pvers_vers;
-
- /* Store this info as labelled information hanging off the
- * prom device vertex
- */
- hwgraph_info_add_LBL(prom_dev, INFO_LBL_DETAIL_INVENT,
- (arbitrary_info_t) cpuprom_inventory_info);
- /* Export this information so that user programs can get to
- * this by using attr_get()
- */
- hwgraph_info_export_LBL(prom_dev, INFO_LBL_DETAIL_INVENT,
- sizeof(invent_miscinfo_t));
-}
-
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-
-/***********************************************************************/
-/* Base Hub Space Driver */
-/***********************************************************************/
-
-/*
- * hubspc_init
- * Registration of the hubspc devices with the hub manager
- */
-void
-hubspc_init(void)
-{
- /*
- * Register with the hub manager
- */
-
- /* The reference counters */
-#if defined(CONFIG_IA64_SGI_SN1)
- hubdev_register(mem_refcnt_attach);
-#endif
-
-#ifdef CONFIG_IA64_SGI_SN1
- /* L1 system controller link */
- if ( !IS_RUNNING_ON_SIMULATOR() ) {
- /* initialize the L1 link */
- extern void l1_init(void);
- l1_init();
- }
-#endif /* CONFIG_IA64_SGI_SN1 */
-#ifdef HUBSPC_DEBUG
- printk("hubspc_init: Completed\n");
-#endif /* HUBSPC_DEBUG */
- /* Initialize spinlocks */
- mutex_spinlock_init(&cpuprom_spinlock);
-}
-
-/* ARGSUSED */
-int
-hubspc_open(devfs_handle_t *devp, mode_t oflag, int otyp, cred_t *crp)
-{
- return (0);
-}
-
-
-/* ARGSUSED */
-int
-hubspc_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
-{
- return (0);
-}
-
-/* ARGSUSED */
-int
-hubspc_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
-{
- /*REFERENCED*/
- int errcode = 0;
-
- /* check validity of request */
- if( len == 0 ) {
- return -ENXIO;
- }
-
- return errcode;
-}
-
-/* ARGSUSED */
-int
-hubspc_unmap(devfs_handle_t dev, vhandl_t *vt)
-{
- return (0);
-
-}
-
-/* ARGSUSED */
-int
-hubspc_ioctl(devfs_handle_t dev,
- int cmd,
- void *arg,
- int mode,
- cred_t *cred_p,
- int *rvalp)
-{
- return (0);
-
-}
--- /dev/null
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn2 io routines.
+
+EXTRA_CFLAGS := -DLITTLE_ENDIAN
+
+obj-y += hcl.o labelcl.o hcl_util.o invent_stub.o \
+ ramfs.o interface.o
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * hcl - SGI's Hardware Graph compatibility layer.
+ *
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/sched.h> /* needed for smp_lock.h :( */
+#include <linux/smp_lock.h>
+#include <asm/sn/sgi.h>
+#include <asm/io.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/hwgfs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/simulator.h>
+
+#define HCL_NAME "SGI-HWGRAPH COMPATIBILITY DRIVER"
+#define HCL_TEMP_NAME "HCL_TEMP_NAME_USED_FOR_HWGRAPH_VERTEX_CREATE"
+#define HCL_TEMP_NAME_LEN 44
+#define HCL_VERSION "1.0"
+
+#define vertex_hdl_t hwgfs_handle_t
+vertex_hdl_t hwgraph_root;
+vertex_hdl_t linux_busnum;
+
+extern void pci_bus_cvlink_init(void);
+
+/*
+ * Debug flag definition.
+ */
+#define OPTION_NONE 0x00
+#define HCL_DEBUG_NONE 0x00000
+#define HCL_DEBUG_ALL 0x0ffff
+#if defined(CONFIG_HCL_DEBUG)
+static unsigned int hcl_debug_init __initdata = HCL_DEBUG_NONE;
+#endif
+static unsigned int hcl_debug = HCL_DEBUG_NONE;
+#if defined(CONFIG_HCL_DEBUG) && !defined(MODULE)
+static unsigned int boot_options = OPTION_NONE;
+#endif
+
+/*
+ * Some Global definitions.
+ */
+vertex_hdl_t hcl_handle;
+
+invplace_t invplace_none = {
+ GRAPH_VERTEX_NONE,
+ GRAPH_VERTEX_PLACE_NONE,
+ NULL
+};
+
+/*
+ * HCL device driver.
+ * The purpose of this device driver is to provide a facility
+ * for User Level Apps e.g. hinv, ioconfig etc. an ioctl path
+ * to manipulate label entries without having to implement
+ * system call interfaces. This methodology will enable us to
+ * make this feature module loadable.
+ */
+static int hcl_open(struct inode * inode, struct file * filp)
+{
+ if (hcl_debug) {
+ printk("HCL: hcl_open called.\n");
+ }
+
+ return(0);
+
+}
+
+static int hcl_close(struct inode * inode, struct file * filp)
+{
+
+ if (hcl_debug) {
+ printk("HCL: hcl_close called.\n");
+ }
+
+ return(0);
+
+}
+
+static int hcl_ioctl(struct inode * inode, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+
+ if (hcl_debug) {
+ printk("HCL: hcl_ioctl called.\n");
+ }
+
+ switch (cmd) {
+ default:
+ if (hcl_debug) {
+ printk("HCL: hcl_ioctl cmd = 0x%x\n", cmd);
+ }
+ }
+
+ return(0);
+
+}
+
+struct file_operations hcl_fops = {
+ .owner = (struct module *)0,
+ .ioctl = hcl_ioctl,
+ .open = hcl_open,
+ .release = hcl_close,
+};
+
+
+/*
+ * init_hcl() - Boot time initialization.
+ *
+ */
+int __init init_hcl(void)
+{
+ extern void string_table_init(struct string_table *);
+ extern struct string_table label_string_table;
+ extern int init_ifconfig_net(void);
+ extern int init_ioconfig_bus(void);
+ extern int init_hwgfs_fs(void);
+ int rv = 0;
+
+ if (IS_RUNNING_ON_SIMULATOR()) {
+ extern u64 klgraph_addr[];
+ klgraph_addr[0] = 0xe000003000030000;
+ }
+
+ init_hwgfs_fs();
+
+ /*
+ * Create the hwgraph_root.
+ */
+ rv = hwgraph_path_add(NULL, EDGE_LBL_HW, &hwgraph_root);
+ if (rv)
+ printk ("WARNING: init_hcl: Failed to create hwgraph_root. Error = %d.\n", rv);
+
+ /*
+ * Create the hcl driver to support inventory entry manipulations.
+ *
+ */
+ hcl_handle = hwgraph_register(hwgraph_root, ".hcl",
+ 0, 0,
+ 0, 0,
+ S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
+ &hcl_fops, NULL);
+
+ if (hcl_handle == NULL) {
+ panic("HCL: Unable to create HCL Driver in init_hcl().\n");
+ return(0);
+ }
+
+ /*
+ * Initialize the HCL string table.
+ */
+
+ string_table_init(&label_string_table);
+
+ /*
+ * Create the directory that links Linux bus numbers to our Xwidget.
+ */
+ rv = hwgraph_path_add(hwgraph_root, EDGE_LBL_LINUX_BUS, &linux_busnum);
+ if (linux_busnum == NULL) {
+ panic("HCL: Unable to create %s\n", EDGE_LBL_LINUX_BUS);
+ return(0);
+ }
+
+ pci_bus_cvlink_init();
+
+ /*
+ * Initialize the ifconfgi_net driver that does network devices
+ * Persistent Naming.
+ */
+ init_ifconfig_net();
+ init_ioconfig_bus();
+
+ return(0);
+
+}
+
+
+/*
+ * hcl_setup() - Process boot time parameters if given.
+ * "hcl="
+ * This routine gets called only if "hcl=" is given in the
+ * boot line and before init_hcl().
+ *
+ * We currently do not have any boot options .. when we do,
+ * functionalities can be added here.
+ *
+ */
+static int __init hcl_setup(char *str)
+{
+ while ( (*str != '\0') && !isspace (*str) )
+ {
+#ifdef CONFIG_HCL_DEBUG
+ if (strncmp (str, "all", 3) == 0) {
+ hcl_debug_init |= HCL_DEBUG_ALL;
+ str += 3;
+ } else
+ return 0;
+#endif
+ if (*str != ',') return 0;
+ ++str;
+ }
+
+ return 1;
+
+}
+
+__setup("hcl=", hcl_setup);
+
+
+/*
+ * Set device specific "fast information".
+ *
+ */
+void
+hwgraph_fastinfo_set(vertex_hdl_t de, arbitrary_info_t fastinfo)
+{
+ labelcl_info_replace_IDX(de, HWGRAPH_FASTINFO, fastinfo, NULL);
+}
+
+
+/*
+ * Get device specific "fast information".
+ *
+ */
+arbitrary_info_t
+hwgraph_fastinfo_get(vertex_hdl_t de)
+{
+ arbitrary_info_t fastinfo;
+ int rv;
+
+ if (!de) {
+ printk(KERN_WARNING "HCL: hwgraph_fastinfo_get handle given is NULL.\n");
+ dump_stack();
+ return(-1);
+ }
+
+ rv = labelcl_info_get_IDX(de, HWGRAPH_FASTINFO, &fastinfo);
+ if (rv == 0)
+ return(fastinfo);
+
+ return(0);
+}
+
+
+/*
+ * hwgraph_connectpt_set - Sets the connect point handle in de to the
+ * given connect_de handle. By default, the connect point of the
+ * node is the parent. This effectively changes this assumption.
+ */
+int
+hwgraph_connectpt_set(vertex_hdl_t de, vertex_hdl_t connect_de)
+{
+ int rv;
+
+ if (!de)
+ return(-1);
+
+ rv = labelcl_info_connectpt_set(de, connect_de);
+
+ return(rv);
+}
+
+
+/*
+ * hwgraph_connectpt_get: Returns the entry's connect point.
+ *
+ */
+vertex_hdl_t
+hwgraph_connectpt_get(vertex_hdl_t de)
+{
+ int rv;
+ arbitrary_info_t info;
+ vertex_hdl_t connect;
+
+ rv = labelcl_info_get_IDX(de, HWGRAPH_CONNECTPT, &info);
+ if (rv != 0) {
+ return(NULL);
+ }
+
+ connect = (vertex_hdl_t)info;
+ return(connect);
+
+}
+
+
+/*
+ * hwgraph_mk_dir - Creates a directory entry.
+ */
+vertex_hdl_t
+hwgraph_mk_dir(vertex_hdl_t de, const char *name,
+ unsigned int namelen, void *info)
+{
+
+ int rv;
+ labelcl_info_t *labelcl_info = NULL;
+ vertex_hdl_t new_handle = NULL;
+ vertex_hdl_t parent = NULL;
+
+ /*
+ * Create the device info structure for hwgraph compatiblity support.
+ */
+ labelcl_info = labelcl_info_create();
+ if (!labelcl_info)
+ return(NULL);
+
+ /*
+ * Create an entry.
+ */
+ new_handle = hwgfs_mk_dir(de, name, (void *)labelcl_info);
+ if (!new_handle) {
+ labelcl_info_destroy(labelcl_info);
+ return(NULL);
+ }
+
+ /*
+ * Get the parent handle.
+ */
+ parent = hwgfs_get_parent (new_handle);
+
+ /*
+ * To provide the same semantics as the hwgraph, set the connect point.
+ */
+ rv = hwgraph_connectpt_set(new_handle, parent);
+ if (!rv) {
+ /*
+ * We need to clean up!
+ */
+ }
+
+ /*
+ * If the caller provides a private data pointer, save it in the
+ * labelcl info structure(fastinfo). This can be retrieved via
+ * hwgraph_fastinfo_get()
+ */
+ if (info)
+ hwgraph_fastinfo_set(new_handle, (arbitrary_info_t)info);
+
+ return(new_handle);
+
+}
+
+/*
+ * hwgraph_path_add - Create a directory node with the given path starting
+ * from the given fromv.
+ */
+int
+hwgraph_path_add(vertex_hdl_t fromv,
+ char *path,
+ vertex_hdl_t *new_de)
+{
+
+ unsigned int namelen = strlen(path);
+ int rv;
+
+ /*
+ * We need to handle the case when fromv is NULL ..
+ * in this case we need to create the path from the
+ * hwgraph root!
+ */
+ if (fromv == NULL)
+ fromv = hwgraph_root;
+
+ /*
+ * check the entry doesn't already exist, if it does
+ * then we simply want new_de to point to it (otherwise
+ * we'll overwrite the existing labelcl_info struct)
+ */
+ rv = hwgraph_edge_get(fromv, path, new_de);
+ if (rv) { /* couldn't find entry so we create it */
+ *new_de = hwgraph_mk_dir(fromv, path, namelen, NULL);
+ if (new_de == NULL)
+ return(-1);
+ else
+ return(0);
+ }
+ else
+ return(0);
+
+}
+
+/*
+ * hwgraph_register - Creates a special device file.
+ *
+ */
+vertex_hdl_t
+hwgraph_register(vertex_hdl_t de, const char *name,
+ unsigned int namelen, unsigned int flags,
+ unsigned int major, unsigned int minor,
+ umode_t mode, uid_t uid, gid_t gid,
+ struct file_operations *fops,
+ void *info)
+{
+
+ vertex_hdl_t new_handle = NULL;
+
+ /*
+ * Create an entry.
+ */
+ new_handle = hwgfs_register(de, name, flags, major,
+ minor, mode, fops, info);
+
+ return(new_handle);
+
+}
+
+
+/*
+ * hwgraph_mk_symlink - Create a symbolic link.
+ */
+int
+hwgraph_mk_symlink(vertex_hdl_t de, const char *name, unsigned int namelen,
+ unsigned int flags, const char *link, unsigned int linklen,
+ vertex_hdl_t *handle, void *info)
+{
+
+ void *labelcl_info = NULL;
+ int status = 0;
+ vertex_hdl_t new_handle = NULL;
+
+ /*
+ * Create the labelcl info structure for hwgraph compatiblity support.
+ */
+ labelcl_info = labelcl_info_create();
+ if (!labelcl_info)
+ return(-1);
+
+ /*
+ * Create a symbolic link.
+ */
+ status = hwgfs_mk_symlink(de, name, flags, link,
+ &new_handle, labelcl_info);
+ if ( (!new_handle) || (!status) ){
+ labelcl_info_destroy((labelcl_info_t *)labelcl_info);
+ return(-1);
+ }
+
+ /*
+ * If the caller provides a private data pointer, save it in the
+ * labelcl info structure(fastinfo). This can be retrieved via
+ * hwgraph_fastinfo_get()
+ */
+ if (info)
+ hwgraph_fastinfo_set(new_handle, (arbitrary_info_t)info);
+
+ *handle = new_handle;
+ return(0);
+
+}
+
+/*
+ * hwgraph_vertex_destroy - Destroy the entry
+ */
+int
+hwgraph_vertex_destroy(vertex_hdl_t de)
+{
+
+ void *labelcl_info = NULL;
+
+ labelcl_info = hwgfs_get_info(de);
+ hwgfs_unregister(de);
+
+ if (labelcl_info)
+ labelcl_info_destroy((labelcl_info_t *)labelcl_info);
+
+ return(0);
+}
+
+#if 0
+/*
+ * hwgraph_edge_add - This routines has changed from the original conext.
+ * All it does now is to create a symbolic link from "from" to "to".
+ */
+/* ARGSUSED */
+int
+hwgraph_edge_add(vertex_hdl_t from, vertex_hdl_t to, char *name)
+{
+
+ char *path, *link;
+ vertex_hdl_t handle = NULL;
+ int rv, i;
+
+ handle = hwgfs_find_handle(from, name, 0, 0, 0, 1);
+ if (handle) {
+ return(0);
+ }
+
+ path = kmalloc(1024, GFP_KERNEL);
+ memset(path, 0x0, 1024);
+ link = kmalloc(1024, GFP_KERNEL);
+ memset(path, 0x0, 1024);
+ i = hwgfs_generate_path (to, link, 1024);
+ rv = hwgfs_mk_symlink (from, (const char *)name,
+ DEVFS_FL_DEFAULT, link,
+ &handle, NULL);
+ return(0);
+
+
+}
+#endif
+
+int
+hwgraph_edge_add(vertex_hdl_t from, vertex_hdl_t to, char *name)
+{
+
+ char *path, *link;
+ char *s1;
+ char *index;
+ vertex_hdl_t handle = NULL;
+ int rv;
+ int i, count;
+
+ path = kmalloc(1024, GFP_KERNEL);
+ memset((char *)path, 0x0, 1024);
+ link = kmalloc(1024, GFP_KERNEL);
+ memset((char *)link, 0x0, 1024);
+
+ i = hwgfs_generate_path (from, path, 1024);
+ s1 = (char *)path;
+ count = 0;
+ while (1) {
+ index = strstr (s1, "/");
+ if (index) {
+ count++;
+ s1 = ++index;
+ } else {
+ count++;
+ break;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ strcat((char *)link,"../");
+ }
+
+ memset(path, 0x0, 1024);
+ i = hwgfs_generate_path (to, path, 1024);
+ strcat((char *)link, (char *)path);
+
+ /*
+ * Otherwise, just create a symlink to the vertex.
+ * In this case the vertex was previous created with a REAL pathname.
+ */
+ rv = hwgfs_mk_symlink (from, (const char *)name,
+ DEVFS_FL_DEFAULT, link,
+ &handle, NULL);
+ kfree(path);
+ kfree(link);
+
+ return(rv);
+
+
+}
+
+/* ARGSUSED */
+int
+hwgraph_edge_get(vertex_hdl_t from, char *name, vertex_hdl_t *toptr)
+{
+
+ vertex_hdl_t target_handle = NULL;
+
+ if (name == NULL)
+ return(-1);
+
+ if (toptr == NULL)
+ return(-1);
+
+ /*
+ * If the name is "." just return the current entry handle.
+ */
+ if (!strcmp(name, HWGRAPH_EDGELBL_DOT)) {
+ if (toptr) {
+ *toptr = from;
+ }
+ } else if (!strcmp(name, HWGRAPH_EDGELBL_DOTDOT)) {
+ /*
+ * Hmmm .. should we return the connect point or parent ..
+ * see in hwgraph, the concept of parent is the connectpt!
+ *
+ * Maybe we should see whether the connectpt is set .. if
+ * not just return the parent!
+ */
+ target_handle = hwgraph_connectpt_get(from);
+ if (target_handle) {
+ /*
+ * Just return the connect point.
+ */
+ *toptr = target_handle;
+ return(0);
+ }
+ target_handle = hwgfs_get_parent(from);
+ *toptr = target_handle;
+
+ } else {
+ target_handle = hwgfs_find_handle (from, name, 0, 0,
+ 0, 1); /* Yes traverse symbolic links */
+ }
+
+ if (target_handle == NULL)
+ return(-1);
+ else
+ *toptr = target_handle;
+
+ return(0);
+}
+
+/*
+ * hwgraph_info_add_LBL - Adds a new label for the device. Mark the info_desc
+ * of the label as INFO_DESC_PRIVATE and store the info in the label.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_add_LBL( vertex_hdl_t de,
+ char *name,
+ arbitrary_info_t info)
+{
+ return(labelcl_info_add_LBL(de, name, INFO_DESC_PRIVATE, info));
+}
+
+/*
+ * hwgraph_info_remove_LBL - Remove the label entry for the device.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_remove_LBL( vertex_hdl_t de,
+ char *name,
+ arbitrary_info_t *old_info)
+{
+ return(labelcl_info_remove_LBL(de, name, NULL, old_info));
+}
+
+/*
+ * hwgraph_info_replace_LBL - replaces an existing label with
+ * a new label info value.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_replace_LBL( vertex_hdl_t de,
+ char *name,
+ arbitrary_info_t info,
+ arbitrary_info_t *old_info)
+{
+ return(labelcl_info_replace_LBL(de, name,
+ INFO_DESC_PRIVATE, info,
+ NULL, old_info));
+}
+/*
+ * hwgraph_info_get_LBL - Get and return the info value in the label of the
+ * device.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_get_LBL(vertex_hdl_t de,
+ char *name,
+ arbitrary_info_t *infop)
+{
+ return(labelcl_info_get_LBL(de, name, NULL, infop));
+}
+
+/*
+ * hwgraph_info_get_exported_LBL - Retrieve the info_desc and info pointer
+ * of the given label for the device. The weird thing is that the label
+ * that matches the name is return irrespective of the info_desc value!
+ * Do not understand why the word "exported" is used!
+ */
+/* ARGSUSED */
+int
+hwgraph_info_get_exported_LBL(vertex_hdl_t de,
+ char *name,
+ int *export_info,
+ arbitrary_info_t *infop)
+{
+ int rc;
+ arb_info_desc_t info_desc;
+
+ rc = labelcl_info_get_LBL(de, name, &info_desc, infop);
+ if (rc == 0)
+ *export_info = (int)info_desc;
+
+ return(rc);
+}
+
+/*
+ * hwgraph_info_get_next_LBL - Returns the next label info given the
+ * current label entry in place.
+ *
+ * Once again this has no locking or reference count for protection.
+ *
+ */
+/* ARGSUSED */
+int
+hwgraph_info_get_next_LBL(vertex_hdl_t de,
+ char *buf,
+ arbitrary_info_t *infop,
+ labelcl_info_place_t *place)
+{
+ return(labelcl_info_get_next_LBL(de, buf, NULL, infop, place));
+}
+
+/*
+ * hwgraph_info_export_LBL - Retrieve the specified label entry and modify
+ * the info_desc field with the given value in nbytes.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_export_LBL(vertex_hdl_t de, char *name, int nbytes)
+{
+ arbitrary_info_t info;
+ int rc;
+
+ if (nbytes == 0)
+ nbytes = INFO_DESC_EXPORT;
+
+ if (nbytes < 0)
+ return(-1);
+
+ rc = labelcl_info_get_LBL(de, name, NULL, &info);
+ if (rc != 0)
+ return(rc);
+
+ rc = labelcl_info_replace_LBL(de, name,
+ nbytes, info, NULL, NULL);
+
+ return(rc);
+}
+
+/*
+ * hwgraph_info_unexport_LBL - Retrieve the given label entry and change the
+ * label info_descr filed to INFO_DESC_PRIVATE.
+ */
+/* ARGSUSED */
+int
+hwgraph_info_unexport_LBL(vertex_hdl_t de, char *name)
+{
+ arbitrary_info_t info;
+ int rc;
+
+ rc = labelcl_info_get_LBL(de, name, NULL, &info);
+ if (rc != 0)
+ return(rc);
+
+ rc = labelcl_info_replace_LBL(de, name,
+ INFO_DESC_PRIVATE, info, NULL, NULL);
+
+ return(rc);
+}
+
+/*
+ * hwgraph_path_lookup - return the handle for the given path.
+ *
+ */
+int
+hwgraph_path_lookup(vertex_hdl_t start_vertex_handle,
+ char *lookup_path,
+ vertex_hdl_t *vertex_handle_ptr,
+ char **remainder)
+{
+ *vertex_handle_ptr = hwgfs_find_handle(start_vertex_handle, /* start dir */
+ lookup_path, /* path */
+ 0, /* major */
+ 0, /* minor */
+ 0, /* char | block */
+ 1); /* traverse symlinks */
+ if (*vertex_handle_ptr == NULL)
+ return(-1);
+ else
+ return(0);
+}
+
+/*
+ * hwgraph_traverse - Find and return the handle starting from de.
+ *
+ */
+graph_error_t
+hwgraph_traverse(vertex_hdl_t de, char *path, vertex_hdl_t *found)
+{
+ /*
+ * get the directory entry (path should end in a directory)
+ */
+
+ *found = hwgfs_find_handle(de, /* start dir */
+ path, /* path */
+ 0, /* major */
+ 0, /* minor */
+ 0, /* char | block */
+ 1); /* traverse symlinks */
+ if (*found == NULL)
+ return(GRAPH_NOT_FOUND);
+ else
+ return(GRAPH_SUCCESS);
+}
+
+/*
+ * hwgraph_path_to_vertex - Return the entry handle for the given
+ * pathname .. assume traverse symlinks too!.
+ */
+vertex_hdl_t
+hwgraph_path_to_vertex(char *path)
+{
+ return(hwgfs_find_handle(NULL, /* start dir */
+ path, /* path */
+ 0, /* major */
+ 0, /* minor */
+ 0, /* char | block */
+ 1)); /* traverse symlinks */
+}
+
+/*
+ * hwgraph_inventory_remove - Removes an inventory entry.
+ *
+ * Remove an inventory item associated with a vertex. It is the caller's
+ * responsibility to make sure that there are no races between removing
+ * inventory from a vertex and simultaneously removing that vertex.
+*/
+int
+hwgraph_inventory_remove( vertex_hdl_t de,
+ int class,
+ int type,
+ major_t controller,
+ minor_t unit,
+ int state)
+{
+ return(0); /* Just a Stub for IRIX code. */
+}
+
+/*
+ * Find the canonical name for a given vertex by walking back through
+ * connectpt's until we hit the hwgraph root vertex (or until we run
+ * out of buffer space or until something goes wrong).
+ *
+ * COMPATIBILITY FUNCTIONALITY
+ * Walks back through 'parents', not necessarily the same as connectpts.
+ *
+ * Need to resolve the fact that does not return the path from
+ * "/" but rather it just stops right before /dev ..
+ */
+int
+hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf, uint buflen)
+{
+ char *locbuf;
+ int pos;
+
+ if (buflen < 1)
+ return(-1); /* XXX should be GRAPH_BAD_PARAM ? */
+
+ locbuf = kmalloc(buflen, GFP_KERNEL);
+
+ pos = hwgfs_generate_path(vhdl, locbuf, buflen);
+ if (pos < 0) {
+ kfree(locbuf);
+ return pos;
+ }
+
+ strcpy(buf, &locbuf[pos]);
+ kfree(locbuf);
+ return 0;
+}
+
+/*
+** vertex_to_name converts a vertex into a canonical name by walking
+** back through connect points until we hit the hwgraph root (or until
+** we run out of buffer space).
+**
+** Usually returns a pointer to the original buffer, filled in as
+** appropriate. If the buffer is too small to hold the entire name,
+** or if anything goes wrong while determining the name, vertex_to_name
+** returns "UnknownDevice".
+*/
+
+#define DEVNAME_UNKNOWN "UnknownDevice"
+
+char *
+vertex_to_name(vertex_hdl_t vhdl, char *buf, uint buflen)
+{
+ if (hwgraph_vertex_name_get(vhdl, buf, buflen) == GRAPH_SUCCESS)
+ return(buf);
+ else
+ return(DEVNAME_UNKNOWN);
+}
+
+graph_error_t
+hwgraph_edge_remove(vertex_hdl_t from, char *name, vertex_hdl_t *toptr)
+{
+ return(GRAPH_ILLEGAL_REQUEST);
+}
+
+graph_error_t
+hwgraph_vertex_unref(vertex_hdl_t vhdl)
+{
+ return(GRAPH_ILLEGAL_REQUEST);
+}
+
+
+EXPORT_SYMBOL(hwgraph_mk_dir);
+EXPORT_SYMBOL(hwgraph_path_add);
+EXPORT_SYMBOL(hwgraph_register);
+EXPORT_SYMBOL(hwgraph_vertex_destroy);
+EXPORT_SYMBOL(hwgraph_fastinfo_get);
+EXPORT_SYMBOL(hwgraph_fastinfo_set);
+EXPORT_SYMBOL(hwgraph_connectpt_set);
+EXPORT_SYMBOL(hwgraph_connectpt_get);
+EXPORT_SYMBOL(hwgraph_info_add_LBL);
+EXPORT_SYMBOL(hwgraph_info_remove_LBL);
+EXPORT_SYMBOL(hwgraph_info_replace_LBL);
+EXPORT_SYMBOL(hwgraph_info_get_LBL);
+EXPORT_SYMBOL(hwgraph_info_get_exported_LBL);
+EXPORT_SYMBOL(hwgraph_info_get_next_LBL);
+EXPORT_SYMBOL(hwgraph_info_export_LBL);
+EXPORT_SYMBOL(hwgraph_info_unexport_LBL);
+EXPORT_SYMBOL(hwgraph_path_lookup);
+EXPORT_SYMBOL(hwgraph_traverse);
+EXPORT_SYMBOL(hwgraph_vertex_name_get);
--- /dev/null
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/sn/sgi.h>
+#include <asm/io.h>
+#include <asm/sn/io.h>
+#include <asm/sn/iograph.h>
+#include <asm/sn/hwgfs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/nodepda.h>
+
+static vertex_hdl_t hwgraph_all_cnodes = GRAPH_VERTEX_NONE;
+extern vertex_hdl_t hwgraph_root;
+
+
+/*
+** Return the "master" for a given vertex. A master vertex is a
+** controller or adapter or other piece of hardware that the given
+** vertex passes through on the way to the rest of the system.
+*/
+vertex_hdl_t
+device_master_get(vertex_hdl_t vhdl)
+{
+ graph_error_t rc;
+ vertex_hdl_t master;
+
+ rc = hwgraph_edge_get(vhdl, EDGE_LBL_MASTER, &master);
+ if (rc == GRAPH_SUCCESS)
+ return(master);
+ else
+ return(GRAPH_VERTEX_NONE);
+}
+
+/*
+** Set the master for a given vertex.
+** Returns 0 on success, non-0 indicates failure
+*/
+int
+device_master_set(vertex_hdl_t vhdl, vertex_hdl_t master)
+{
+ graph_error_t rc;
+
+ rc = hwgraph_edge_add(vhdl, master, EDGE_LBL_MASTER);
+ return(rc != GRAPH_SUCCESS);
+}
+
+
+/*
+** Return the compact node id of the node that ultimately "owns" the specified
+** vertex. In order to do this, we walk back through masters and connect points
+** until we reach a vertex that represents a node.
+*/
+cnodeid_t
+master_node_get(vertex_hdl_t vhdl)
+{
+ cnodeid_t cnodeid;
+ vertex_hdl_t master;
+
+ for (;;) {
+ cnodeid = nodevertex_to_cnodeid(vhdl);
+ if (cnodeid != CNODEID_NONE)
+ return(cnodeid);
+
+ master = device_master_get(vhdl);
+
+ /* Check for exceptional cases */
+ if (master == vhdl) {
+ /* Since we got a reference to the "master" thru
+ * device_master_get() we should decrement
+ * its reference count by 1
+ */
+ return(CNODEID_NONE);
+ }
+
+ if (master == GRAPH_VERTEX_NONE) {
+ master = hwgraph_connectpt_get(vhdl);
+ if ((master == GRAPH_VERTEX_NONE) ||
+ (master == vhdl)) {
+ return(CNODEID_NONE);
+ }
+ }
+
+ vhdl = master;
+ }
+}
+
+static vertex_hdl_t hwgraph_all_cpuids = GRAPH_VERTEX_NONE;
+extern int maxcpus;
+
+void
+mark_cpuvertex_as_cpu(vertex_hdl_t vhdl, cpuid_t cpuid)
+{
+ if (cpuid == CPU_NONE)
+ return;
+
+ (void)labelcl_info_add_LBL(vhdl, INFO_LBL_CPUID, INFO_DESC_EXPORT,
+ (arbitrary_info_t)cpuid);
+ {
+ char cpuid_buffer[10];
+
+ if (hwgraph_all_cpuids == GRAPH_VERTEX_NONE) {
+ (void)hwgraph_path_add( hwgraph_root,
+ EDGE_LBL_CPUNUM,
+ &hwgraph_all_cpuids);
+ }
+
+ sprintf(cpuid_buffer, "%ld", cpuid);
+ (void)hwgraph_edge_add( hwgraph_all_cpuids,
+ vhdl,
+ cpuid_buffer);
+ }
+}
+
+/*
+** If the specified device represents a node, return its
+** compact node ID; otherwise, return CNODEID_NONE.
+*/
+cnodeid_t
+nodevertex_to_cnodeid(vertex_hdl_t vhdl)
+{
+ int rv = 0;
+ arbitrary_info_t cnodeid = CNODEID_NONE;
+
+ rv = labelcl_info_get_LBL(vhdl, INFO_LBL_CNODEID, NULL, &cnodeid);
+
+ return((cnodeid_t)cnodeid);
+}
+
+void
+mark_nodevertex_as_node(vertex_hdl_t vhdl, cnodeid_t cnodeid)
+{
+ if (cnodeid == CNODEID_NONE)
+ return;
+
+ cnodeid_to_vertex(cnodeid) = vhdl;
+ labelcl_info_add_LBL(vhdl, INFO_LBL_CNODEID, INFO_DESC_EXPORT,
+ (arbitrary_info_t)cnodeid);
+
+ {
+ char cnodeid_buffer[10];
+
+ if (hwgraph_all_cnodes == GRAPH_VERTEX_NONE) {
+ (void)hwgraph_path_add( hwgraph_root,
+ EDGE_LBL_NODENUM,
+ &hwgraph_all_cnodes);
+ }
+
+ sprintf(cnodeid_buffer, "%d", cnodeid);
+ (void)hwgraph_edge_add( hwgraph_all_cnodes,
+ vhdl,
+ cnodeid_buffer);
+ }
+}
+
+/*
+** If the specified device represents a CPU, return its cpuid;
+** otherwise, return CPU_NONE.
+*/
+cpuid_t
+cpuvertex_to_cpuid(vertex_hdl_t vhdl)
+{
+ arbitrary_info_t cpuid = CPU_NONE;
+
+ (void)labelcl_info_get_LBL(vhdl, INFO_LBL_CPUID, NULL, &cpuid);
+
+ return((cpuid_t)cpuid);
+}
+
+
+/*
+** dev_to_name converts a vertex_hdl_t into a canonical name. If the vertex_hdl_t
+** represents a vertex in the hardware graph, it is converted in the
+** normal way for vertices. If the vertex_hdl_t is an old vertex_hdl_t (one which
+** does not represent a hwgraph vertex), we synthesize a name based
+** on major/minor number.
+**
+** Usually returns a pointer to the original buffer, filled in as
+** appropriate. If the buffer is too small to hold the entire name,
+** or if anything goes wrong while determining the name, dev_to_name
+** returns "UnknownDevice".
+*/
+char *
+dev_to_name(vertex_hdl_t dev, char *buf, uint buflen)
+{
+ return(vertex_to_name(dev, buf, buflen));
+}
+
+
--- /dev/null
+/*
+ * Copyright (c) 2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * Portions based on Adam Richter's smalldevfs and thus
+ * Copyright 2002-2003 Yggdrasil Computing, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/sn/hwgfs.h>
+
+
+extern struct vfsmount *hwgfs_vfsmount;
+
+/* TODO: Move this to some .h file or, more likely, use a slightly
+ different interface from lookup_create. */
+extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
+
+static int
+walk_parents_mkdir(
+ const char **path,
+ struct nameidata *nd,
+ int is_dir)
+{
+ char *slash;
+ char buf[strlen(*path)+1];
+ int error;
+
+ while ((slash = strchr(*path, '/')) != NULL) {
+ int len = slash - *path;
+ memcpy(buf, *path, len);
+ buf[len] = '\0';
+
+ error = link_path_walk(buf, nd);
+ if (unlikely(error))
+ return error;
+
+ nd->dentry = lookup_create(nd, is_dir);
+ if (unlikely(IS_ERR(nd->dentry)))
+ return PTR_ERR(nd->dentry);
+
+ if (!nd->dentry->d_inode)
+ error = vfs_mkdir(nd->dentry->d_parent->d_inode,
+ nd->dentry, 0755);
+
+ up(&nd->dentry->d_parent->d_inode->i_sem);
+ if (unlikely(error))
+ return error;
+
+ *path += len + 1;
+ }
+
+ return 0;
+}
+
+/* On success, returns with parent_inode->i_sem taken. */
+static int
+hwgfs_decode(
+ hwgfs_handle_t dir,
+ const char *name,
+ int is_dir,
+ struct inode **parent_inode,
+ struct dentry **dentry)
+{
+ struct nameidata nd;
+ int error;
+
+ if (!dir)
+ dir = hwgfs_vfsmount->mnt_sb->s_root;
+
+ memset(&nd, 0, sizeof(nd));
+ nd.flags = LOOKUP_PARENT;
+ nd.mnt = mntget(hwgfs_vfsmount);
+ nd.dentry = dget(dir);
+
+ error = walk_parents_mkdir(&name, &nd, is_dir);
+ if (unlikely(error))
+ return error;
+
+ error = link_path_walk(name, &nd);
+ if (unlikely(error))
+ return error;
+
+ *dentry = lookup_create(&nd, is_dir);
+
+ if (unlikely(IS_ERR(*dentry)))
+ return PTR_ERR(*dentry);
+ *parent_inode = (*dentry)->d_parent->d_inode;
+ return 0;
+}
+
+static int
+path_len(
+ struct dentry *de,
+ struct dentry *root)
+{
+ int len = 0;
+
+ while (de != root) {
+ len += de->d_name.len + 1; /* count the '/' */
+ de = de->d_parent;
+ }
+ return len; /* -1 because we omit the leading '/',
+ +1 because we include trailing '\0' */
+}
+
+int
+hwgfs_generate_path(
+ hwgfs_handle_t de,
+ char *path,
+ int buflen)
+{
+ struct dentry *hwgfs_root;
+ int len;
+ char *path_orig = path;
+
+ if (unlikely(de == NULL))
+ return -EINVAL;
+
+ hwgfs_root = hwgfs_vfsmount->mnt_sb->s_root;
+ if (unlikely(de == hwgfs_root))
+ return -EINVAL;
+
+ spin_lock(&dcache_lock);
+ len = path_len(de, hwgfs_root);
+ if (len > buflen) {
+ spin_unlock(&dcache_lock);
+ return -ENAMETOOLONG;
+ }
+
+ path += len - 1;
+ *path = '\0';
+
+ for (;;) {
+ path -= de->d_name.len;
+ memcpy(path, de->d_name.name, de->d_name.len);
+ de = de->d_parent;
+ if (de == hwgfs_root)
+ break;
+ *(--path) = '/';
+ }
+
+ spin_unlock(&dcache_lock);
+ BUG_ON(path != path_orig);
+ return 0;
+}
+
+hwgfs_handle_t
+hwgfs_register(
+ hwgfs_handle_t dir,
+ const char *name,
+ unsigned int flags,
+ unsigned int major,
+ unsigned int minor,
+ umode_t mode,
+ void *ops,
+ void *info)
+{
+ dev_t devnum = MKDEV(major, minor);
+ struct inode *parent_inode;
+ struct dentry *dentry;
+ int error;
+
+ error = hwgfs_decode(dir, name, 0, &parent_inode, &dentry);
+ if (likely(!error)) {
+ error = vfs_mknod(parent_inode, dentry, mode, devnum);
+ if (likely(!error)) {
+ /*
+ * Do this inside parents i_sem to avoid racing
+ * with lookups.
+ */
+ if (S_ISCHR(mode))
+ dentry->d_inode->i_fop = ops;
+ dentry->d_fsdata = info;
+ up(&parent_inode->i_sem);
+ } else {
+ up(&parent_inode->i_sem);
+ dput(dentry);
+ dentry = NULL;
+ }
+ }
+
+ return dentry;
+}
+
+int
+hwgfs_mk_symlink(
+ hwgfs_handle_t dir,
+ const char *name,
+ unsigned int flags,
+ const char *link,
+ hwgfs_handle_t *handle,
+ void *info)
+{
+ struct inode *parent_inode;
+ struct dentry *dentry;
+ int error;
+
+ error = hwgfs_decode(dir, name, 0, &parent_inode, &dentry);
+ if (likely(!error)) {
+ error = vfs_symlink(parent_inode, dentry, link);
+ dentry->d_fsdata = info;
+ if (handle)
+ *handle = dentry;
+ up(&parent_inode->i_sem);
+ /* dput(dentry); */
+ }
+ return error;
+}
+
+hwgfs_handle_t
+hwgfs_mk_dir(
+ hwgfs_handle_t dir,
+ const char *name,
+ void *info)
+{
+ struct inode *parent_inode;
+ struct dentry *dentry;
+ int error;
+
+ error = hwgfs_decode(dir, name, 1, &parent_inode, &dentry);
+ if (likely(!error)) {
+ error = vfs_mkdir(parent_inode, dentry, 0755);
+ up(&parent_inode->i_sem);
+
+ if (unlikely(error)) {
+ dput(dentry);
+ dentry = NULL;
+ } else {
+ dentry->d_fsdata = info;
+ }
+ }
+ return dentry;
+}
+
+void
+hwgfs_unregister(
+ hwgfs_handle_t de)
+{
+ struct inode *parent_inode = de->d_parent->d_inode;
+
+ if (S_ISDIR(de->d_inode->i_mode))
+ vfs_rmdir(parent_inode, de);
+ else
+ vfs_unlink(parent_inode, de);
+}
+
+/* XXX: this function is utterly bogus. Every use of it is racy and the
+ prototype is stupid. You have been warned. --hch. */
+hwgfs_handle_t
+hwgfs_find_handle(
+ hwgfs_handle_t base,
+ const char *name,
+ unsigned int major, /* IGNORED */
+ unsigned int minor, /* IGNORED */
+ char type, /* IGNORED */
+ int traverse_symlinks)
+{
+ struct dentry *dentry = NULL;
+ struct nameidata nd;
+ int error;
+
+ BUG_ON(*name=='/');
+
+ memset(&nd, 0, sizeof(nd));
+
+ nd.mnt = mntget(hwgfs_vfsmount);
+ nd.dentry = dget(base ? base : hwgfs_vfsmount->mnt_sb->s_root);
+ if (traverse_symlinks)
+ nd.flags = LOOKUP_FOLLOW;
+
+ error = link_path_walk(name, &nd);
+ if (likely(!error)) {
+ dentry = nd.dentry;
+ path_release(&nd); /* stale data from here! */
+ }
+
+ return dentry;
+}
+
+hwgfs_handle_t
+hwgfs_get_parent(
+ hwgfs_handle_t de)
+{
+ struct dentry *parent;
+
+ spin_lock(&de->d_lock);
+ parent = de->d_parent;
+ spin_unlock(&de->d_lock);
+
+ return parent;
+}
+
+int
+hwgfs_set_info(
+ hwgfs_handle_t de,
+ void *info)
+{
+ if (unlikely(de == NULL))
+ return -EINVAL;
+ de->d_fsdata = info;
+ return 0;
+}
+
+void *
+hwgfs_get_info(
+ hwgfs_handle_t de)
+{
+ return de->d_fsdata;
+}
+
+EXPORT_SYMBOL(hwgfs_generate_path);
+EXPORT_SYMBOL(hwgfs_register);
+EXPORT_SYMBOL(hwgfs_unregister);
+EXPORT_SYMBOL(hwgfs_mk_symlink);
+EXPORT_SYMBOL(hwgfs_mk_dir);
+EXPORT_SYMBOL(hwgfs_find_handle);
+EXPORT_SYMBOL(hwgfs_get_parent);
+EXPORT_SYMBOL(hwgfs_set_info);
+EXPORT_SYMBOL(hwgfs_get_info);
--- /dev/null
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+/*
+ * Hardware Inventory
+ *
+ * See sys/sn/invent.h for an explanation of the hardware inventory contents.
+ *
+ */
+#include <linux/types.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/hwgfs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+#include <asm/sn/invent.h>
+
+void
+inventinit(void)
+{
+}
+
+/*
+ * For initializing/updating an inventory entry.
+ */
+void
+replace_in_inventory(
+ inventory_t *pinv, int class, int type,
+ int controller, int unit, int state)
+{
+}
+
+/*
+ * Inventory addition
+ *
+ * XXX NOTE: Currently must be called after dynamic memory allocator is
+ * initialized.
+ *
+ */
+void
+add_to_inventory(int class, int type, int controller, int unit, int state)
+{
+}
+
+
+/*
+ * Inventory retrieval
+ *
+ * These two routines are intended to prevent the caller from having to know
+ * the internal structure of the inventory table.
+ *
+ * The caller of get_next_inventory is supposed to call start_scan_invent
+ * before the irst call to get_next_inventory, and the caller is required
+ * to call end_scan_invent after the last call to get_next_inventory.
+ */
+inventory_t *
+get_next_inventory(invplace_t *place)
+{
+ return((inventory_t *) NULL);
+}
+
+/* ARGSUSED */
+int
+get_sizeof_inventory(int abi)
+{
+ return sizeof(inventory_t);
+}
+
+/* Must be called prior to first call to get_next_inventory */
+void
+start_scan_inventory(invplace_t *iplace)
+{
+}
+
+/* Must be called after last call to get_next_inventory */
+void
+end_scan_inventory(invplace_t *iplace)
+{
+}
+
+/*
+ * Hardware inventory scanner.
+ *
+ * Calls fun() for every entry in inventory list unless fun() returns something
+ * other than 0.
+ */
+int
+scaninvent(int (*fun)(inventory_t *, void *), void *arg)
+{
+ return 0;
+}
+
+/*
+ * Find a particular inventory object
+ *
+ * pinv can be a pointer to an inventory entry and the search will begin from
+ * there, or it can be 0 in which case the search starts at the beginning.
+ * A -1 for any of the other arguments is a wildcard (i.e. it always matches).
+ */
+inventory_t *
+find_inventory(inventory_t *pinv, int class, int type, int controller,
+ int unit, int state)
+{
+ return((inventory_t *) NULL);
+}
+
+
+/*
+** Retrieve inventory data associated with a device.
+*/
+inventory_t *
+device_inventory_get_next( vertex_hdl_t device,
+ invplace_t *invplace)
+{
+ return((inventory_t *) NULL);
+}
+
+
+/*
+** Associate canonical inventory information with a device (and
+** add it to the general inventory).
+*/
+void
+device_inventory_add( vertex_hdl_t device,
+ int class,
+ int type,
+ major_t controller,
+ minor_t unit,
+ int state)
+{
+}
+
+int
+device_controller_num_get(vertex_hdl_t device)
+{
+ return (0);
+}
+
+void
+device_controller_num_set(vertex_hdl_t device, int contr_num)
+{
+}
--- /dev/null
+/* labelcl - SGI's Hwgraph Compatibility Layer.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
+*/
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/sched.h> /* needed for smp_lock.h :( */
+#include <linux/smp_lock.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/hwgfs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/labelcl.h>
+
+/*
+** Very simple and dumb string table that supports only find/insert.
+** In practice, if this table gets too large, we may need a more
+** efficient data structure. Also note that currently there is no
+** way to delete an item once it's added. Therefore, name collision
+** will return an error.
+*/
+
+struct string_table label_string_table;
+
+
+
+/*
+ * string_table_init - Initialize the given string table.
+ */
+void
+string_table_init(struct string_table *string_table)
+{
+ string_table->string_table_head = NULL;
+ string_table->string_table_generation = 0;
+
+ /*
+ * We nedd to initialize locks here!
+ */
+
+ return;
+}
+
+
+/*
+ * string_table_destroy - Destroy the given string table.
+ */
+void
+string_table_destroy(struct string_table *string_table)
+{
+ struct string_table_item *item, *next_item;
+
+ item = string_table->string_table_head;
+ while (item) {
+ next_item = item->next;
+
+ STRTBL_FREE(item);
+ item = next_item;
+ }
+
+ /*
+ * We need to destroy whatever lock we have here
+ */
+
+ return;
+}
+
+
+
+/*
+ * string_table_insert - Insert an entry in the string table .. duplicate
+ * names are not allowed.
+ */
+char *
+string_table_insert(struct string_table *string_table, char *name)
+{
+ struct string_table_item *item, *new_item = NULL, *last_item = NULL;
+
+again:
+ /*
+ * Need to lock the table ..
+ */
+ item = string_table->string_table_head;
+ last_item = NULL;
+
+ while (item) {
+ if (!strcmp(item->string, name)) {
+ /*
+ * If we allocated space for the string and the found that
+ * someone else already entered it into the string table,
+ * free the space we just allocated.
+ */
+ if (new_item)
+ STRTBL_FREE(new_item);
+
+
+ /*
+ * Search optimization: move the found item to the head
+ * of the list.
+ */
+ if (last_item != NULL) {
+ last_item->next = item->next;
+ item->next = string_table->string_table_head;
+ string_table->string_table_head = item;
+ }
+ goto out;
+ }
+ last_item = item;
+ item=item->next;
+ }
+
+ /*
+ * name was not found, so add it to the string table.
+ */
+ if (new_item == NULL) {
+ long old_generation = string_table->string_table_generation;
+
+ new_item = STRTBL_ALLOC(strlen(name));
+
+ strcpy(new_item->string, name);
+
+ /*
+ * While we allocated memory for the new string, someone else
+ * changed the string table.
+ */
+ if (old_generation != string_table->string_table_generation) {
+ goto again;
+ }
+ } else {
+ /* At this we only have the string table lock in access mode.
+ * Promote the access lock to an update lock for the string
+ * table insertion below.
+ */
+ long old_generation =
+ string_table->string_table_generation;
+
+ /*
+ * After we did the unlock and wer waiting for update
+ * lock someone could have potentially updated
+ * the string table. Check the generation number
+ * for this case. If it is the case we have to
+ * try all over again.
+ */
+ if (old_generation !=
+ string_table->string_table_generation) {
+ goto again;
+ }
+ }
+
+ /*
+ * At this point, we're committed to adding new_item to the string table.
+ */
+ new_item->next = string_table->string_table_head;
+ item = string_table->string_table_head = new_item;
+ string_table->string_table_generation++;
+
+out:
+ /*
+ * Need to unlock here.
+ */
+ return(item->string);
+}
+
+/*
+ * labelcl_info_create - Creates the data structure that will hold the
+ * device private information asscoiated with a entry.
+ * The pointer to this structure is what gets stored in the
+ * (void * info).
+ */
+labelcl_info_t *
+labelcl_info_create()
+{
+
+ labelcl_info_t *new = NULL;
+
+ /* Initial allocation does not include any area for labels */
+ if ( ( new = (labelcl_info_t *)kmalloc (sizeof(labelcl_info_t), GFP_KERNEL) ) == NULL )
+ return NULL;
+
+ memset (new, 0, sizeof(labelcl_info_t));
+ new->hwcl_magic = LABELCL_MAGIC;
+ return( new);
+
+}
+
+/*
+ * labelcl_info_destroy - Frees the data structure that holds the
+ * device private information asscoiated with a entry. This
+ * data structure was created by device_info_create().
+ *
+ * The caller is responsible for nulling the (void *info) in the
+ * corresponding entry.
+ */
+int
+labelcl_info_destroy(labelcl_info_t *labelcl_info)
+{
+
+ if (labelcl_info == NULL)
+ return(0);
+
+ /* Free the label list */
+ if (labelcl_info->label_list)
+ kfree(labelcl_info->label_list);
+
+ /* Now free the label info area */
+ labelcl_info->hwcl_magic = 0;
+ kfree(labelcl_info);
+
+ return(0);
+}
+
+/*
+ * labelcl_info_add_LBL - Adds a new label entry in the labelcl info
+ * structure.
+ *
+ * Error is returned if we find another label with the same name.
+ */
+int
+labelcl_info_add_LBL(vertex_hdl_t de,
+ char *info_name,
+ arb_info_desc_t info_desc,
+ arbitrary_info_t info)
+{
+ labelcl_info_t *labelcl_info = NULL;
+ int num_labels;
+ int new_label_list_size;
+ label_info_t *old_label_list, *new_label_list = NULL;
+ char *name;
+ int i;
+
+ if (de == NULL)
+ return(-1);
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL)
+ return(-1);
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ if (info_name == NULL)
+ return(-1);
+
+ if (strlen(info_name) >= LABEL_LENGTH_MAX)
+ return(-1);
+
+ name = string_table_insert(&label_string_table, info_name);
+
+ num_labels = labelcl_info->num_labels;
+ new_label_list_size = sizeof(label_info_t) * (num_labels+1);
+
+ /*
+ * Create a new label info area.
+ */
+ if (new_label_list_size != 0) {
+ new_label_list = (label_info_t *) kmalloc(new_label_list_size, GFP_KERNEL);
+
+ if (new_label_list == NULL)
+ return(-1);
+ }
+
+ /*
+ * At this point, we are committed to adding the labelled info,
+ * if there isn't already information there with the same name.
+ */
+ old_label_list = labelcl_info->label_list;
+
+ /*
+ * Look for matching info name.
+ */
+ for (i=0; i<num_labels; i++) {
+ if (!strcmp(info_name, old_label_list[i].name)) {
+ /* Not allowed to add duplicate labelled info names. */
+ kfree(new_label_list);
+ return(-1);
+ }
+ new_label_list[i] = old_label_list[i]; /* structure copy */
+ }
+
+ new_label_list[num_labels].name = name;
+ new_label_list[num_labels].desc = info_desc;
+ new_label_list[num_labels].info = info;
+
+ labelcl_info->num_labels = num_labels+1;
+ labelcl_info->label_list = new_label_list;
+
+ if (old_label_list != NULL)
+ kfree(old_label_list);
+
+ return(0);
+}
+
+/*
+ * labelcl_info_remove_LBL - Remove a label entry.
+ */
+int
+labelcl_info_remove_LBL(vertex_hdl_t de,
+ char *info_name,
+ arb_info_desc_t *info_desc,
+ arbitrary_info_t *info)
+{
+ labelcl_info_t *labelcl_info = NULL;
+ int num_labels;
+ int new_label_list_size;
+ label_info_t *old_label_list, *new_label_list = NULL;
+ arb_info_desc_t label_desc_found;
+ arbitrary_info_t label_info_found;
+ int i;
+
+ if (de == NULL)
+ return(-1);
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL)
+ return(-1);
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ num_labels = labelcl_info->num_labels;
+ if (num_labels == 0) {
+ return(-1);
+ }
+
+ /*
+ * Create a new info area.
+ */
+ new_label_list_size = sizeof(label_info_t) * (num_labels-1);
+ if (new_label_list_size) {
+ new_label_list = (label_info_t *) kmalloc(new_label_list_size, GFP_KERNEL);
+ if (new_label_list == NULL)
+ return(-1);
+ }
+
+ /*
+ * At this point, we are committed to removing the labelled info,
+ * if it still exists.
+ */
+ old_label_list = labelcl_info->label_list;
+
+ /*
+ * Find matching info name.
+ */
+ for (i=0; i<num_labels; i++) {
+ if (!strcmp(info_name, old_label_list[i].name)) {
+ label_desc_found = old_label_list[i].desc;
+ label_info_found = old_label_list[i].info;
+ goto found;
+ }
+ if (i < num_labels-1) /* avoid walking off the end of the new vertex */
+ new_label_list[i] = old_label_list[i]; /* structure copy */
+ }
+
+ /* The named info doesn't exist. */
+ if (new_label_list)
+ kfree(new_label_list);
+
+ return(-1);
+
+found:
+ /* Finish up rest of labelled info */
+ for (i=i+1; i<num_labels; i++)
+ new_label_list[i-1] = old_label_list[i]; /* structure copy */
+
+ labelcl_info->num_labels = num_labels+1;
+ labelcl_info->label_list = new_label_list;
+
+ kfree(old_label_list);
+
+ if (info != NULL)
+ *info = label_info_found;
+
+ if (info_desc != NULL)
+ *info_desc = label_desc_found;
+
+ return(0);
+}
+
+
+/*
+ * labelcl_info_replace_LBL - Replace an existing label entry with the
+ * given new information.
+ *
+ * Label entry must exist.
+ */
+int
+labelcl_info_replace_LBL(vertex_hdl_t de,
+ char *info_name,
+ arb_info_desc_t info_desc,
+ arbitrary_info_t info,
+ arb_info_desc_t *old_info_desc,
+ arbitrary_info_t *old_info)
+{
+ labelcl_info_t *labelcl_info = NULL;
+ int num_labels;
+ label_info_t *label_list;
+ int i;
+
+ if (de == NULL)
+ return(-1);
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL)
+ return(-1);
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ num_labels = labelcl_info->num_labels;
+ if (num_labels == 0) {
+ return(-1);
+ }
+
+ if (info_name == NULL)
+ return(-1);
+
+ label_list = labelcl_info->label_list;
+
+ /*
+ * Verify that information under info_name already exists.
+ */
+ for (i=0; i<num_labels; i++)
+ if (!strcmp(info_name, label_list[i].name)) {
+ if (old_info != NULL)
+ *old_info = label_list[i].info;
+
+ if (old_info_desc != NULL)
+ *old_info_desc = label_list[i].desc;
+
+ label_list[i].info = info;
+ label_list[i].desc = info_desc;
+
+ return(0);
+ }
+
+
+ return(-1);
+}
+
+/*
+ * labelcl_info_get_LBL - Retrieve and return the information for the
+ * given label entry.
+ */
+int
+labelcl_info_get_LBL(vertex_hdl_t de,
+ char *info_name,
+ arb_info_desc_t *info_desc,
+ arbitrary_info_t *info)
+{
+ labelcl_info_t *labelcl_info = NULL;
+ int num_labels;
+ label_info_t *label_list;
+ int i;
+
+ if (de == NULL)
+ return(-1);
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL)
+ return(-1);
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ num_labels = labelcl_info->num_labels;
+ if (num_labels == 0) {
+ return(-1);
+ }
+
+ label_list = labelcl_info->label_list;
+
+ /*
+ * Find information under info_name.
+ */
+ for (i=0; i<num_labels; i++)
+ if (!strcmp(info_name, label_list[i].name)) {
+ if (info != NULL)
+ *info = label_list[i].info;
+ if (info_desc != NULL)
+ *info_desc = label_list[i].desc;
+
+ return(0);
+ }
+
+ return(-1);
+}
+
+/*
+ * labelcl_info_get_next_LBL - returns the next label entry on the list.
+ */
+int
+labelcl_info_get_next_LBL(vertex_hdl_t de,
+ char *buffer,
+ arb_info_desc_t *info_descp,
+ arbitrary_info_t *infop,
+ labelcl_info_place_t *placeptr)
+{
+ labelcl_info_t *labelcl_info = NULL;
+ uint which_info;
+ label_info_t *label_list;
+
+ if ((buffer == NULL) && (infop == NULL))
+ return(-1);
+
+ if (placeptr == NULL)
+ return(-1);
+
+ if (de == NULL)
+ return(-1);
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL)
+ return(-1);
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ which_info = *placeptr;
+
+ if (which_info >= labelcl_info->num_labels) {
+ return(-1);
+ }
+
+ label_list = (label_info_t *) labelcl_info->label_list;
+
+ if (buffer != NULL)
+ strcpy(buffer, label_list[which_info].name);
+
+ if (infop)
+ *infop = label_list[which_info].info;
+
+ if (info_descp)
+ *info_descp = label_list[which_info].desc;
+
+ *placeptr = which_info + 1;
+
+ return(0);
+}
+
+
+int
+labelcl_info_replace_IDX(vertex_hdl_t de,
+ int index,
+ arbitrary_info_t info,
+ arbitrary_info_t *old_info)
+{
+ arbitrary_info_t *info_list_IDX;
+ labelcl_info_t *labelcl_info = NULL;
+
+ if (de == NULL) {
+ printk(KERN_ALERT "labelcl: NULL handle given.\n");
+ return(-1);
+ }
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL) {
+ printk(KERN_ALERT "labelcl: Entry %p does not have info pointer.\n", (void *)de);
+ return(-1);
+ }
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ if ( (index < 0) || (index >= HWGRAPH_NUM_INDEX_INFO) )
+ return(-1);
+
+ /*
+ * Replace information at the appropriate index in this vertex with
+ * the new info.
+ */
+ info_list_IDX = labelcl_info->IDX_list;
+ if (old_info != NULL)
+ *old_info = info_list_IDX[index];
+ info_list_IDX[index] = info;
+
+ return(0);
+
+}
+
+/*
+ * labelcl_info_connectpt_set - Sets the connectpt.
+ */
+int
+labelcl_info_connectpt_set(hwgfs_handle_t de,
+ hwgfs_handle_t connect_de)
+{
+ arbitrary_info_t old_info;
+ int rv;
+
+ rv = labelcl_info_replace_IDX(de, HWGRAPH_CONNECTPT,
+ (arbitrary_info_t) connect_de, &old_info);
+
+ if (rv) {
+ return(rv);
+ }
+
+ return(0);
+}
+
+
+/*
+ * labelcl_info_get_IDX - Returns the information pointed at by index.
+ *
+ */
+int
+labelcl_info_get_IDX(vertex_hdl_t de,
+ int index,
+ arbitrary_info_t *info)
+{
+ arbitrary_info_t *info_list_IDX;
+ labelcl_info_t *labelcl_info = NULL;
+
+ if (de == NULL)
+ return(-1);
+
+ labelcl_info = hwgfs_get_info(de);
+ if (labelcl_info == NULL)
+ return(-1);
+
+ if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
+ return(-1);
+
+ if ( (index < 0) || (index >= HWGRAPH_NUM_INDEX_INFO) )
+ return(-1);
+
+ /*
+ * Return information at the appropriate index in this vertex.
+ */
+ info_list_IDX = labelcl_info->IDX_list;
+ if (info != NULL)
+ *info = info_list_IDX[index];
+
+ return(0);
+}
+
+/*
+ * labelcl_info_connectpt_get - Retrieve the connect point for a device entry.
+ */
+hwgfs_handle_t
+labelcl_info_connectpt_get(hwgfs_handle_t de)
+{
+ int rv;
+ arbitrary_info_t info;
+
+ rv = labelcl_info_get_IDX(de, HWGRAPH_CONNECTPT, &info);
+ if (rv)
+ return(NULL);
+
+ return((hwgfs_handle_t) info);
+}
--- /dev/null
+/*
+ * Copyright (c) 2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * Mostly shameless copied from Linus Torvalds' ramfs and thus
+ * Copyright (C) 2000 Linus Torvalds.
+ * 2000 Transmeta Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+
+/* some random number */
+#define HWGFS_MAGIC 0x12061983
+
+static struct super_operations hwgfs_ops;
+static struct address_space_operations hwgfs_aops;
+static struct file_operations hwgfs_file_operations;
+static struct inode_operations hwgfs_file_inode_operations;
+static struct inode_operations hwgfs_dir_inode_operations;
+
+static struct backing_dev_info hwgfs_backing_dev_info = {
+ .ra_pages = 0, /* No readahead */
+ .memory_backed = 1, /* Does not contribute to dirty memory */
+};
+
+struct inode *hwgfs_get_inode(struct super_block *sb, int mode, dev_t dev)
+{
+ struct inode * inode = new_inode(sb);
+
+ if (inode) {
+ inode->i_mode = mode;
+ inode->i_uid = current->fsuid;
+ inode->i_gid = current->fsgid;
+ inode->i_blksize = PAGE_CACHE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_rdev = NODEV;
+ inode->i_mapping->a_ops = &hwgfs_aops;
+ inode->i_mapping->backing_dev_info = &hwgfs_backing_dev_info;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ switch (mode & S_IFMT) {
+ default:
+ init_special_inode(inode, mode, dev);
+ break;
+ case S_IFREG:
+ inode->i_op = &hwgfs_file_inode_operations;
+ inode->i_fop = &hwgfs_file_operations;
+ break;
+ case S_IFDIR:
+ inode->i_op = &hwgfs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_nlink++;
+ break;
+ case S_IFLNK:
+ inode->i_op = &page_symlink_inode_operations;
+ break;
+ }
+ }
+ return inode;
+}
+
+static int hwgfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+{
+ struct inode * inode = hwgfs_get_inode(dir->i_sb, mode, dev);
+ int error = -ENOSPC;
+
+ if (inode) {
+ d_instantiate(dentry, inode);
+ dget(dentry); /* Extra count - pin the dentry in core */
+ error = 0;
+ }
+ return error;
+}
+
+static int hwgfs_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+{
+ return hwgfs_mknod(dir, dentry, mode | S_IFDIR, 0);
+}
+
+static int hwgfs_create(struct inode *dir, struct dentry *dentry, int mode)
+{
+ return hwgfs_mknod(dir, dentry, mode | S_IFREG, 0);
+}
+
+static int hwgfs_symlink(struct inode * dir, struct dentry *dentry, const char * symname)
+{
+ struct inode *inode;
+ int error = -ENOSPC;
+
+ inode = hwgfs_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
+ if (inode) {
+ int l = strlen(symname)+1;
+ error = page_symlink(inode, symname, l);
+ if (!error) {
+ d_instantiate(dentry, inode);
+ dget(dentry);
+ } else
+ iput(inode);
+ }
+ return error;
+}
+
+static struct address_space_operations hwgfs_aops = {
+ .readpage = simple_readpage,
+ .prepare_write = simple_prepare_write,
+ .commit_write = simple_commit_write
+};
+
+static struct file_operations hwgfs_file_operations = {
+ .read = generic_file_read,
+ .write = generic_file_write,
+ .mmap = generic_file_mmap,
+ .fsync = simple_sync_file,
+ .sendfile = generic_file_sendfile,
+};
+
+static struct inode_operations hwgfs_file_inode_operations = {
+ .getattr = simple_getattr,
+};
+
+static struct inode_operations hwgfs_dir_inode_operations = {
+ .create = hwgfs_create,
+ .lookup = simple_lookup,
+ .link = simple_link,
+ .unlink = simple_unlink,
+ .symlink = hwgfs_symlink,
+ .mkdir = hwgfs_mkdir,
+ .rmdir = simple_rmdir,
+ .mknod = hwgfs_mknod,
+ .rename = simple_rename,
+};
+
+static struct super_operations hwgfs_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+};
+
+static int hwgfs_fill_super(struct super_block * sb, void * data, int silent)
+{
+ struct inode * inode;
+ struct dentry * root;
+
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = HWGFS_MAGIC;
+ sb->s_op = &hwgfs_ops;
+ inode = hwgfs_get_inode(sb, S_IFDIR | 0755, 0);
+ if (!inode)
+ return -ENOMEM;
+
+ root = d_alloc_root(inode);
+ if (!root) {
+ iput(inode);
+ return -ENOMEM;
+ }
+ sb->s_root = root;
+ return 0;
+}
+
+static struct super_block *hwgfs_get_sb(struct file_system_type *fs_type,
+ int flags, char *dev_name, void *data)
+{
+ return get_sb_single(fs_type, flags, data, hwgfs_fill_super);
+}
+
+static struct file_system_type hwgfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "hwgfs",
+ .get_sb = hwgfs_get_sb,
+ .kill_sb = kill_litter_super,
+};
+
+struct vfsmount *hwgfs_vfsmount;
+
+int __init init_hwgfs_fs(void)
+{
+ int error;
+
+ error = register_filesystem(&hwgfs_fs_type);
+ if (error)
+ return error;
+
+ hwgfs_vfsmount = kern_mount(&hwgfs_fs_type);
+ if (IS_ERR(hwgfs_vfsmount))
+ goto fail;
+ return 0;
+
+fail:
+ unregister_filesystem(&hwgfs_fs_type);
+ return PTR_ERR(hwgfs_vfsmount);
+}
+
+static void __exit exit_hwgfs_fs(void)
+{
+ unregister_filesystem(&hwgfs_fs_type);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_hwgfs_fs)
+module_exit(exit_hwgfs_fs)
+++ /dev/null
-/* $Id: ifconfig_net.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * ifconfig_net - SGI's Persistent Network Device names.
- *
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include <asm/sn/sgi.h>
-#include <linux/devfs_fs.h>
-#include <linux/devfs_fs_kernel.h>
-#include <asm/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/ifconfig_net.h>
-
-#define SGI_IFCONFIG_NET "SGI-PERSISTENT NETWORK DEVICE NAME DRIVER"
-#define SGI_IFCONFIG_NET_VERSION "1.0"
-
-/*
- * Some Global definitions.
- */
-devfs_handle_t ifconfig_net_handle = NULL;
-unsigned long ifconfig_net_debug = 0;
-
-/*
- * ifconfig_net_open - Opens the special device node "/devhw/.ifconfig_net".
- */
-static int ifconfig_net_open(struct inode * inode, struct file * filp)
-{
- if (ifconfig_net_debug) {
- printk("ifconfig_net_open called.\n");
- }
-
- return(0);
-
-}
-
-/*
- * ifconfig_net_close - Closes the special device node "/devhw/.ifconfig_net".
- */
-static int ifconfig_net_close(struct inode * inode, struct file * filp)
-{
-
- if (ifconfig_net_debug) {
- printk("ifconfig_net_close called.\n");
- }
-
- return(0);
-}
-
-/*
- * assign_ifname - Assign the next available interface name from the persistent list.
- */
-void
-assign_ifname(struct net_device *dev,
- struct ifname_num *ifname_num)
-
-{
-
- /*
- * Handle eth devices.
- */
- if ( (memcmp(dev->name, "eth", 3) == 0) ) {
- if (ifname_num->next_eth != -1) {
- /*
- * Assign it the next available eth interface number.
- */
- memset(dev->name, 0, strlen(dev->name));
- sprintf(dev->name, "eth%d", (int)ifname_num->next_eth);
- ifname_num->next_eth++;
- }
-
- return;
- }
-
- /*
- * Handle fddi devices.
- */
- if ( (memcmp(dev->name, "fddi", 4) == 0) ) {
- if (ifname_num->next_fddi != -1) {
- /*
- * Assign it the next available fddi interface number.
- */
- memset(dev->name, 0, strlen(dev->name));
- sprintf(dev->name, "fddi%d", (int)ifname_num->next_fddi);
- ifname_num->next_fddi++;
- }
-
- return;
- }
-
- /*
- * Handle hip devices.
- */
- if ( (memcmp(dev->name, "hip", 3) == 0) ) {
- if (ifname_num->next_hip != -1) {
- /*
- * Assign it the next available hip interface number.
- */
- memset(dev->name, 0, strlen(dev->name));
- sprintf(dev->name, "hip%d", (int)ifname_num->next_hip);
- ifname_num->next_hip++;
- }
-
- return;
- }
-
- /*
- * Handle tr devices.
- */
- if ( (memcmp(dev->name, "tr", 2) == 0) ) {
- if (ifname_num->next_tr != -1) {
- /*
- * Assign it the next available tr interface number.
- */
- memset(dev->name, 0, strlen(dev->name));
- sprintf(dev->name, "tr%d", (int)ifname_num->next_tr);
- ifname_num->next_tr++;
- }
-
- return;
- }
-
- /*
- * Handle fc devices.
- */
- if ( (memcmp(dev->name, "fc", 2) == 0) ) {
- if (ifname_num->next_fc != -1) {
- /*
- * Assign it the next available fc interface number.
- */
- memset(dev->name, 0, strlen(dev->name));
- sprintf(dev->name, "fc%d", (int)ifname_num->next_fc);
- ifname_num->next_fc++;
- }
-
- return;
- }
-}
-
-/*
- * find_persistent_ifname: Returns the entry that was seen in previous boot.
- */
-struct ifname_MAC *
-find_persistent_ifname(struct net_device *dev,
- struct ifname_MAC *ifname_MAC)
-
-{
-
- while (ifname_MAC->addr_len) {
- if (memcmp(dev->dev_addr, ifname_MAC->dev_addr, dev->addr_len) == 0)
- return(ifname_MAC);
-
- ifname_MAC++;
- }
-
- return(NULL);
-}
-
-/*
- * ifconfig_net_ioctl: ifconfig_net driver ioctl interface.
- */
-static int ifconfig_net_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
-
- extern struct net_device *__dev_get_by_name(const char *);
-#ifdef CONFIG_NET
- struct net_device *dev;
- struct ifname_MAC *found;
- char temp[64];
-#endif
- struct ifname_MAC *ifname_MAC;
- struct ifname_MAC *new_devices, *temp_new_devices;
- struct ifname_num *ifname_num;
- unsigned long size;
-
-
- if (ifconfig_net_debug) {
- printk("HCL: hcl_ioctl called.\n");
- }
-
- /*
- * Read in the header and see how big of a buffer we really need to
- * allocate.
- */
- ifname_num = (struct ifname_num *) kmalloc(sizeof(struct ifname_num),
- GFP_KERNEL);
- copy_from_user( ifname_num, (char *) arg, sizeof(struct ifname_num));
- size = ifname_num->size;
- kfree(ifname_num);
- ifname_num = (struct ifname_num *) kmalloc(size, GFP_KERNEL);
- ifname_MAC = (struct ifname_MAC *) ((char *)ifname_num + (sizeof(struct ifname_num)) );
-
- copy_from_user( ifname_num, (char *) arg, size);
- new_devices = kmalloc(size - sizeof(struct ifname_num), GFP_KERNEL);
- temp_new_devices = new_devices;
-
- memset(new_devices, 0, size - sizeof(struct ifname_num));
-
-#ifdef CONFIG_NET
- /*
- * Go through the net device entries and make them persistent!
- */
- for (dev = dev_base; dev != NULL; dev = dev->next) {
- /*
- * Skip NULL entries or "lo"
- */
- if ( (dev->addr_len == 0) || ( !strncmp(dev->name, "lo", strlen(dev->name))) ){
- continue;
- }
-
- /*
- * See if we have a persistent interface name for this device.
- */
- found = NULL;
- found = find_persistent_ifname(dev, ifname_MAC);
- if (found) {
- strcpy(dev->name, found->name);
- } else {
- /* Never seen this before .. */
- assign_ifname(dev, ifname_num);
-
- /*
- * Save the information for the next boot.
- */
- sprintf(temp,"%s %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
- strcpy(temp_new_devices->name, dev->name);
- temp_new_devices->addr_len = dev->addr_len;
- memcpy(temp_new_devices->dev_addr, dev->dev_addr, dev->addr_len);
- temp_new_devices++;
- }
-
- }
-#endif
-
- /*
- * Copy back to the User Buffer area any new devices encountered.
- */
- copy_to_user((char *)arg + (sizeof(struct ifname_num)), new_devices,
- size - sizeof(struct ifname_num));
-
- return(0);
-
-}
-
-struct file_operations ifconfig_net_fops = {
- ioctl:ifconfig_net_ioctl, /* ioctl */
- open:ifconfig_net_open, /* open */
- release:ifconfig_net_close /* release */
-};
-
-
-/*
- * init_ifconfig_net() - Boot time initialization. Ensure that it is called
- * after devfs has been initialized.
- *
- */
-#ifdef MODULE
-int init_module (void)
-#else
-int __init init_ifconfig_net(void)
-#endif
-{
- ifconfig_net_handle = NULL;
- ifconfig_net_handle = hwgraph_register(hwgraph_root, ".ifconfig_net",
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &ifconfig_net_fops, NULL);
-
- if (ifconfig_net_handle == NULL) {
- panic("Unable to create SGI PERSISTENT NETWORK DEVICE Name Driver.\n");
- }
-
- return(0);
-
-}
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * Hardware Inventory
- *
- * See sys/sn/invent.h for an explanation of the hardware inventory contents.
- *
- */
-#include <linux/types.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-
-void
-inventinit(void)
-{
-}
-
-/*
- * For initializing/updating an inventory entry.
- */
-void
-replace_in_inventory(
- inventory_t *pinv, int class, int type,
- int controller, int unit, int state)
-{
- pinv->inv_class = class;
- pinv->inv_type = type;
- pinv->inv_controller = controller;
- pinv->inv_unit = unit;
- pinv->inv_state = state;
-}
-
-/*
- * Inventory addition
- *
- * XXX NOTE: Currently must be called after dynamic memory allocator is
- * initialized.
- *
- */
-void
-add_to_inventory(int class, int type, int controller, int unit, int state)
-{
- (void)device_inventory_add((devfs_handle_t)GRAPH_VERTEX_NONE, class, type,
- controller, unit, state);
-}
-
-
-/*
- * Inventory retrieval
- *
- * These two routines are intended to prevent the caller from having to know
- * the internal structure of the inventory table.
- *
- * The caller of get_next_inventory is supposed to call start_scan_invent
- * before the irst call to get_next_inventory, and the caller is required
- * to call end_scan_invent after the last call to get_next_inventory.
- */
-inventory_t *
-get_next_inventory(invplace_t *place)
-{
- inventory_t *pinv;
- devfs_handle_t device = place->invplace_vhdl;
- int rv;
-
- while ((pinv = device_inventory_get_next(device, place)) == NULL) {
- /*
- * We've exhausted inventory items on the last device.
- * Advance to next device.
- */
- place->invplace_inv = NULL; /* Start from beginning invent on this device */
- rv = hwgraph_vertex_get_next(&device, &place->invplace_vplace);
- if (rv == LABELCL_SUCCESS) {
- place->invplace_vhdl = device;
- }
- else {
- place->invplace_vhdl = GRAPH_VERTEX_NONE;
- return(NULL);
- }
- }
-
- return(pinv);
-}
-
-/* ARGSUSED */
-int
-get_sizeof_inventory(int abi)
-{
- return sizeof(inventory_t);
-}
-
-/* Must be called prior to first call to get_next_inventory */
-void
-start_scan_inventory(invplace_t *iplace)
-{
- *iplace = INVPLACE_NONE;
-}
-
-/* Must be called after last call to get_next_inventory */
-void
-end_scan_inventory(invplace_t *iplace)
-{
- devfs_handle_t vhdl = iplace->invplace_vhdl;
- if (vhdl != GRAPH_VERTEX_NONE)
- hwgraph_vertex_unref(vhdl);
- *iplace = INVPLACE_NONE; /* paranoia */
-}
-
-/*
- * Hardware inventory scanner.
- *
- * Calls fun() for every entry in inventory list unless fun() returns something
- * other than 0.
- */
-int
-scaninvent(int (*fun)(inventory_t *, void *), void *arg)
-{
- inventory_t *ie;
- invplace_t iplace = { NULL,NULL, NULL };
- int rc;
-
- ie = 0;
- rc = 0;
- start_scan_inventory(&iplace);
- while ((ie = (inventory_t *)get_next_inventory(&iplace))) {
- rc = (*fun)(ie, arg);
- if (rc)
- break;
- }
- end_scan_inventory(&iplace);
- return rc;
-}
-
-/*
- * Find a particular inventory object
- *
- * pinv can be a pointer to an inventory entry and the search will begin from
- * there, or it can be 0 in which case the search starts at the beginning.
- * A -1 for any of the other arguments is a wildcard (i.e. it always matches).
- */
-inventory_t *
-find_inventory(inventory_t *pinv, int class, int type, int controller,
- int unit, int state)
-{
- invplace_t iplace = { NULL,NULL, NULL };
-
- start_scan_inventory(&iplace);
- while ((pinv = (inventory_t *)get_next_inventory(&iplace)) != NULL) {
- if (class != -1 && pinv->inv_class != class)
- continue;
- if (type != -1 && pinv->inv_type != type)
- continue;
-
- /* XXXX - perhaps the "state" entry should be ignored so an
- * an existing entry can be updated. See vino_init() and
- * ml/IP22.c:add_ioboard() for an example.
- */
- if (state != -1 && pinv->inv_state != state)
- continue;
- if (controller != -1
- && pinv->inv_controller != controller)
- continue;
- if (unit != -1 && pinv->inv_unit != unit)
- continue;
- break;
- }
- end_scan_inventory(&iplace);
-
- return(pinv);
-}
-
-
-/*
-** Retrieve inventory data associated with a device.
-*/
-inventory_t *
-device_inventory_get_next( devfs_handle_t device,
- invplace_t *invplace)
-{
- inventory_t *pinv;
- int rv;
-
- rv = hwgraph_inventory_get_next(device, invplace, &pinv);
- if (rv == LABELCL_SUCCESS)
- return(pinv);
- else
- return(NULL);
-}
-
-
-/*
-** Associate canonical inventory information with a device (and
-** add it to the general inventory).
-*/
-void
-device_inventory_add( devfs_handle_t device,
- int class,
- int type,
- major_t controller,
- minor_t unit,
- int state)
-{
- hwgraph_inventory_add(device, class, type, controller, unit, state);
-}
-
-int
-device_controller_num_get(devfs_handle_t device)
-{
- return (hwgraph_controller_num_get(device));
-}
-
-void
-device_controller_num_set(devfs_handle_t device, int contr_num)
-{
- hwgraph_controller_num_set(device, contr_num);
-}
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/config.h>
#include <asm/sn/sn_cpuid.h>
extern xtalk_provider_t hub_provider;
-extern void hub_intr_init(devfs_handle_t hubv);
+extern void hub_intr_init(vertex_hdl_t hubv);
+static int force_fire_and_forget = 1;
+static int ignore_conveyor_override;
-/*
- * Perform any initializations needed to support hub-based I/O.
- * Called once during startup.
- */
-void
-hubio_init(void)
-{
-}
/*
* Implementation of hub iobus operations.
/*
* Setup pio structures needed for a particular hub.
*/
-void
-hub_pio_init(devfs_handle_t hubv)
+static void
+hub_pio_init(vertex_hdl_t hubv)
{
xwidgetnum_t widget;
hubinfo_t hubinfo;
*/
/* ARGSUSED */
hub_piomap_t
-hub_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
+hub_piomap_alloc(vertex_hdl_t dev, /* set up mapping for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* map for this xtalk_addr range */
size_t byte_count,
{
xwidget_info_t widget_info = xwidget_info_get(dev);
xwidgetnum_t widget = xwidget_info_id_get(widget_info);
- devfs_handle_t hubv = xwidget_info_master_get(widget_info);
+ vertex_hdl_t hubv = xwidget_info_master_get(widget_info);
hubinfo_t hubinfo;
hub_piomap_t bw_piomap;
int bigwin, free_bw_index;
void
hub_piomap_free(hub_piomap_t hub_piomap)
{
- devfs_handle_t hubv;
+ vertex_hdl_t hubv;
hubinfo_t hubinfo;
nasid_t nasid;
unsigned long s;
*/
/* ARGSUSED */
caddr_t
-hub_piotrans_addr( devfs_handle_t dev, /* translate to this device */
+hub_piotrans_addr( vertex_hdl_t dev, /* translate to this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* Crosstalk address */
size_t byte_count, /* map this many bytes */
{
xwidget_info_t widget_info = xwidget_info_get(dev);
xwidgetnum_t widget = xwidget_info_id_get(widget_info);
- devfs_handle_t hubv = xwidget_info_master_get(widget_info);
+ vertex_hdl_t hubv = xwidget_info_master_get(widget_info);
hub_piomap_t hub_piomap;
hubinfo_t hubinfo;
caddr_t addr;
*/
/* ARGSUSED */
hub_dmamap_t
-hub_dmamap_alloc( devfs_handle_t dev, /* set up mappings for this device */
+hub_dmamap_alloc( vertex_hdl_t dev, /* set up mappings for this device */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags) /* defined in dma.h */
hub_dmamap_t dmamap;
xwidget_info_t widget_info = xwidget_info_get(dev);
xwidgetnum_t widget = xwidget_info_id_get(widget_info);
- devfs_handle_t hubv = xwidget_info_master_get(widget_info);
+ vertex_hdl_t hubv = xwidget_info_master_get(widget_info);
dmamap = kmalloc(sizeof(struct hub_dmamap_s), GFP_ATOMIC);
dmamap->hdma_xtalk_info.xd_dev = dev;
paddr_t paddr, /* map for this address */
size_t byte_count) /* map this many bytes */
{
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
ASSERT(dmamap->hdma_flags & HUB_DMAMAP_IS_VALID);
}
/* There isn't actually any DMA mapping hardware on the hub. */
-#ifdef CONFIG_IA64_SGI_SN2
return( (PHYS_TO_DMA(paddr)) );
-#else
- /* no translation needed */
- return(paddr);
-#endif
}
/*
alenlist_t palenlist, /* map this area of memory */
unsigned flags)
{
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
ASSERT(hub_dmamap->hdma_flags & HUB_DMAMAP_IS_VALID);
void
hub_dmamap_done(hub_dmamap_t hub_dmamap) /* done with these mapping resources */
{
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
if (hub_dmamap->hdma_flags & HUB_DMAMAP_USED) {
hub_dmamap->hdma_flags &= ~HUB_DMAMAP_USED;
*/
/* ARGSUSED */
iopaddr_t
-hub_dmatrans_addr( devfs_handle_t dev, /* translate for this device */
+hub_dmatrans_addr( vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
unsigned flags) /* defined in dma.h */
{
-#ifdef CONFIG_IA64_SGI_SN2
return( (PHYS_TO_DMA(paddr)) );
-#else
- /* no translation needed */
- return(paddr);
-#endif
}
/*
*/
/* ARGSUSED */
alenlist_t
-hub_dmatrans_list( devfs_handle_t dev, /* translate for this device */
+hub_dmatrans_list( vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
alenlist_t palenlist, /* system address/length list */
unsigned flags) /* defined in dma.h */
/*ARGSUSED*/
void
-hub_dmaaddr_drain( devfs_handle_t vhdl,
+hub_dmaaddr_drain( vertex_hdl_t vhdl,
paddr_t addr,
size_t bytes)
{
/*ARGSUSED*/
void
-hub_dmalist_drain( devfs_handle_t vhdl,
+hub_dmalist_drain( vertex_hdl_t vhdl,
alenlist_t list)
{
/* XXX- flush caches, if cache coherency WAR is needed */
* Perform initializations that allow this hub to start crosstalk support.
*/
void
-hub_provider_startup(devfs_handle_t hubv)
+hub_provider_startup(vertex_hdl_t hubv)
{
- extern void hub_pio_init(devfs_handle_t hubv);
-
hub_pio_init(hubv);
hub_intr_init(hubv);
}
* Shutdown crosstalk support from a hub.
*/
void
-hub_provider_shutdown(devfs_handle_t hub)
+hub_provider_shutdown(vertex_hdl_t hub)
{
/* TBD */
xtalk_provider_unregister(hub);
/*
- * Determine whether two PCI addresses actually refer to the same device.
- * This only works if both addresses are in small windows. It's used to
- * determine whether prom addresses refer to particular PCI devices.
- */
-/*
- * XXX - This won't work as written if we ever have more than two nodes
- * on a crossbow. In that case, we'll need an array or partners.
- */
-int
-hub_check_pci_equiv(void *addra, void *addrb)
-{
- nasid_t nasida, nasidb;
-
- /*
- * This is for a permanent workaround that causes us to use a
- * big window in place of small window 0.
- */
- if (!hub_check_window_equiv(addra, addrb))
- return 0;
-
- /* If the offsets aren't the same, forget it. */
- if (SWIN_WIDGETADDR((__psunsigned_t)addra) !=
- (SWIN_WIDGETADDR((__psunsigned_t)addrb)))
- return 0;
-
- /* Now, check the nasids */
- nasida = NASID_GET(addra);
- nasidb = NASID_GET(addrb);
-
- ASSERT(NASID_TO_COMPACT_NODEID(nasida) != INVALID_NASID);
- ASSERT(NASID_TO_COMPACT_NODEID(nasidb) != INVALID_NASID);
-
- /*
- * Either the NASIDs must be the same or they must be crossbow
- * partners (on the same crossbow).
- */
- return (check_nasid_equiv(nasida, nasidb));
-}
-
-/*
* hub_setup_prb(nasid, prbnum, credits, conveyor)
*
* Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
{
iprb_t prb;
int prb_offset;
- extern int force_fire_and_forget;
- extern volatile int ignore_conveyor_override;
if (force_fire_and_forget && !ignore_conveyor_override)
if (conveyor == HUB_PIO_CONVEYOR)
int direct_connect;
hubii_wcr_t ii_wcr;
int prbnum;
- int cons_lock = 0;
ASSERT(NASID_TO_COMPACT_NODEID(nasid) != INVALID_CNODEID);
- if (nasid == get_console_nasid()) {
- PUTBUF_LOCK(s);
- cons_lock = 1;
- }
ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, 0);
}
REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, ii_iowa);
-
- if (cons_lock)
- PUTBUF_UNLOCK(s);
}
/* Interface to allow special drivers to set hub specific
* device flags.
return 1;
}
-/* Interface to allow special drivers to set hub specific
- * device flags.
- * Return 0 on failure , 1 on success
- */
-int
-hub_device_flags_set(devfs_handle_t widget_vhdl,
- hub_widget_flags_t flags)
-{
- xwidget_info_t widget_info = xwidget_info_get(widget_vhdl);
- xwidgetnum_t widget_num = xwidget_info_id_get(widget_info);
- devfs_handle_t hub_vhdl = xwidget_info_master_get(widget_info);
- hubinfo_t hub_info = 0;
- nasid_t nasid;
- unsigned long s;
- int rv;
-
- /* Use the nasid from the hub info hanging off the hub vertex
- * and widget number from the widget vertex
- */
- hubinfo_get(hub_vhdl, &hub_info);
- /* Being over cautious by grabbing a lock */
- s = mutex_spinlock(&hub_info->h_bwlock);
- nasid = hub_info->h_nasid;
- rv = hub_widget_flags_set(nasid,widget_num,flags);
- mutex_spinunlock(&hub_info->h_bwlock, s);
-
- return rv;
-}
-
-/*
- * hub_device_inquiry
- * Find out the xtalk widget related information stored in this
- * hub's II.
- */
-void
-hub_device_inquiry(devfs_handle_t xbus_vhdl, xwidgetnum_t widget)
-{
- devfs_handle_t xconn, hub_vhdl;
- char widget_name[8];
- hubreg_t ii_iidem,ii_iiwa, ii_iowa;
- hubinfo_t hubinfo;
- nasid_t nasid;
- int d;
-
- sprintf(widget_name, "%d", widget);
- if (hwgraph_traverse(xbus_vhdl, widget_name, &xconn)
- != GRAPH_SUCCESS)
- return;
-
- hub_vhdl = device_master_get(xconn);
- if (hub_vhdl == GRAPH_VERTEX_NONE)
- return;
-
- hubinfo_get(hub_vhdl, &hubinfo);
- if (!hubinfo)
- return;
-
- nasid = hubinfo->h_nasid;
-
- ii_iidem = REMOTE_HUB_L(nasid, IIO_IIDEM);
- ii_iiwa = REMOTE_HUB_L(nasid, IIO_IIWA);
- ii_iowa = REMOTE_HUB_L(nasid, IIO_IOWA);
-
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk("Inquiry Info for %v\n", xconn);
-#else
- printk("Inquiry Info for %p\n", (void *)xconn);
-#endif
-
- printk("\tDevices shutdown [ ");
-
- for (d = 0 ; d <= 7 ; d++)
- if (!(ii_iidem & (IIO_IIDEM_WIDGETDEV_MASK(widget,d))))
- printk(" %d", d);
-
- printk("]\n");
-
- printk("\tInbound access ? %s\n",
- ii_iiwa & IIO_IIWA_WIDGET(widget) ? "yes" : "no");
-
- printk("\tOutbound access ? %s\n",
- ii_iowa & IIO_IOWA_WIDGET(widget) ? "yes" : "no");
-
-}
/*
* A pointer to this structure hangs off of every hub hwgraph vertex.
(xtalk_intr_free_f *) hub_intr_free,
(xtalk_intr_connect_f *) hub_intr_connect,
(xtalk_intr_disconnect_f *) hub_intr_disconnect,
- (xtalk_intr_cpu_get_f *) hub_intr_cpu_get,
-
(xtalk_provider_startup_f *) hub_provider_startup,
(xtalk_provider_shutdown_f *) hub_provider_shutdown,
};
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * ioconfig_bus - SGI's Persistent PCI Bus Numbering.
- *
- * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include <linux/pci.h>
-
-#include <asm/sn/sgi.h>
-#include <linux/devfs_fs.h>
-#include <linux/devfs_fs_kernel.h>
-#include <asm/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm//sn/sn_sal.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/ioconfig_bus.h>
-
-#define SGI_IOCONFIG_BUS "SGI-PERSISTENT PCI BUS NUMBERING"
-#define SGI_IOCONFIG_BUS_VERSION "1.0"
-
-/*
- * Some Global definitions.
- */
-devfs_handle_t ioconfig_bus_handle = NULL;
-unsigned long ioconfig_bus_debug = 0;
-
-#ifdef IOCONFIG_BUS_DEBUG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
-u64 ioconfig_file = 0;
-u64 ioconfig_file_size = 0;
-u64 ioconfig_activated = 0;
-char ioconfig_kernopts[128];
-
-/*
- * For debugging purpose .. hardcode a table ..
- */
-struct ascii_moduleid *ioconfig_bus_table;
-u64 ioconfig_bus_table_size = 0;
-
-
-int free_entry = 0;
-int new_entry = 0;
-
-int next_basebus_number = 0;
-
-void
-ioconfig_get_busnum(char *io_moduleid, int *bus_num)
-{
- struct ascii_moduleid *temp;
- int index;
-
- DBG("ioconfig_get_busnum io_moduleid %s\n", io_moduleid);
-
- *bus_num = -1;
- temp = ioconfig_bus_table;
- for (index = 0; index < free_entry; temp++, index++) {
- if ( (io_moduleid[0] == temp->io_moduleid[0]) &&
- (io_moduleid[1] == temp->io_moduleid[1]) &&
- (io_moduleid[2] == temp->io_moduleid[2]) &&
- (io_moduleid[4] == temp->io_moduleid[4]) &&
- (io_moduleid[5] == temp->io_moduleid[5]) ) {
- *bus_num = index * 0x10;
- return;
- }
- }
-
- /*
- * New IO Brick encountered.
- */
- if (((int)io_moduleid[0]) == 0) {
- DBG("ioconfig_get_busnum: Invalid Module Id given %s\n", io_moduleid);
- return;
- }
-
- io_moduleid[3] = '#';
- strcpy((char *)&(ioconfig_bus_table[free_entry].io_moduleid), io_moduleid);
- *bus_num = free_entry * 0x10;
- free_entry++;
-}
-
-void
-dump_ioconfig_table()
-{
-
- int index = 0;
- struct ascii_moduleid *temp;
-
- temp = ioconfig_bus_table;
- while (index < free_entry) {
- DBG("ASSCI Module ID %s\n", temp->io_moduleid);
- temp++;
- index++;
- }
-}
-
-/*
- * nextline
- * This routine returns the nextline in the buffer.
- */
-int nextline(char *buffer, char **next, char *line)
-{
-
- char *temp;
-
- if (buffer[0] == 0x0) {
- return(0);
- }
-
- temp = buffer;
- while (*temp != 0) {
- *line = *temp;
- if (*temp != '\n'){
- *line = *temp;
- temp++; line++;
- } else
- break;
- }
-
- if (*temp == 0)
- *next = temp;
- else
- *next = ++temp;
-
- return(1);
-}
-
-/*
- * build_pcibus_name
- * This routine parses the ioconfig contents read into
- * memory by ioconfig command in EFI and builds the
- * persistent pci bus naming table.
- */
-void
-build_moduleid_table(char *file_contents, struct ascii_moduleid *table)
-{
- /*
- * Read the whole file into memory.
- */
- int rc;
- char *name;
- char *temp;
- char *next;
- char *current;
- char *line;
- struct ascii_moduleid *moduleid;
-
- line = kmalloc(256, GFP_KERNEL);
- memset(line, 0,256);
- name = kmalloc(125, GFP_KERNEL);
- memset(name, 0, 125);
- moduleid = table;
- current = file_contents;
- while (nextline(current, &next, line)){
-
- DBG("current 0x%lx next 0x%lx\n", current, next);
-
- temp = line;
- /*
- * Skip all leading Blank lines ..
- */
- while (isspace(*temp))
- if (*temp != '\n')
- temp++;
- else
- break;
-
- if (*temp == '\n') {
- current = next;
- memset(line, 0, 256);
- continue;
- }
-
- /*
- * Skip comment lines
- */
- if (*temp == '#') {
- current = next;
- memset(line, 0, 256);
- continue;
- }
-
- /*
- * Get the next free entry in the table.
- */
- rc = sscanf(temp, "%s", name);
- strcpy(&moduleid->io_moduleid[0], name);
- DBG("Found %s\n", name);
- moduleid++;
- free_entry++;
- current = next;
- memset(line, 0, 256);
- }
-
- new_entry = free_entry;
- kfree(line);
- kfree(name);
-
- return;
-}
-
-void
-ioconfig_bus_init(void)
-{
-
- struct ia64_sal_retval ret_stuff;
- u64 *temp;
- int cnode;
-
- DBG("ioconfig_bus_init called.\n");
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- nasid_t nasid;
- /*
- * Make SAL call to get the address of the bus configuration table.
- */
- ret_stuff.status = (uint64_t)0;
- ret_stuff.v0 = (uint64_t)0;
- ret_stuff.v1 = (uint64_t)0;
- ret_stuff.v2 = (uint64_t)0;
- nasid = COMPACT_TO_NASID_NODEID(cnode);
- SAL_CALL(ret_stuff, SN_SAL_BUS_CONFIG, 0, nasid, 0, 0, 0, 0, 0);
- temp = (u64 *)TO_NODE_CAC(nasid, ret_stuff.v0);
- ioconfig_file = *temp;
- DBG("ioconfig_bus_init: Nasid %d ret_stuff.v0 0x%lx\n", nasid,
- ret_stuff.v0);
- if (ioconfig_file) {
- ioconfig_file_size = ret_stuff.v1;
- ioconfig_file = (ioconfig_file | CACHEABLE_MEM_SPACE);
- ioconfig_activated = 1;
- break;
- }
- }
-
- DBG("ioconfig_bus_init: ret_stuff.v0 %p ioconfig_file %p %d\n",
- ret_stuff.v0, (void *)ioconfig_file, (int)ioconfig_file_size);
-
- ioconfig_bus_table = kmalloc( 512, GFP_KERNEL );
- memset(ioconfig_bus_table, 0, 512);
-
- /*
- * If ioconfig options are given on the bootline .. take it.
- */
- if (*ioconfig_kernopts != '\0') {
- /*
- * ioconfig="..." kernel options given.
- */
- DBG("ioconfig_bus_init: Kernel Options given.\n");
- (void) build_moduleid_table((char *)ioconfig_kernopts, ioconfig_bus_table);
- (void) dump_ioconfig_table(ioconfig_bus_table);
- return;
- }
-
- if (ioconfig_activated) {
- DBG("ioconfig_bus_init: ioconfig file given.\n");
- (void) build_moduleid_table((char *)ioconfig_file, ioconfig_bus_table);
- (void) dump_ioconfig_table(ioconfig_bus_table);
- } else {
- DBG("ioconfig_bus_init: ioconfig command not executed in prom\n");
- }
-
-}
-
-void
-ioconfig_bus_new_entries(void)
-{
-
-
- int index = 0;
- struct ascii_moduleid *temp;
-
- if ((ioconfig_activated) && (free_entry > new_entry)) {
- printk("### Please add the following new IO Bricks Module ID \n");
- printk("### to your Persistent Bus Numbering Config File\n");
- } else
- return;
-
- index = new_entry;
- temp = &ioconfig_bus_table[index];
- while (index < free_entry) {
- printk("%s\n", temp);
- temp++;
- index++;
- }
- printk("### End\n");
-
-}
-static int ioconfig_bus_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
-
- struct ioconfig_parm parm;
-
- /*
- * Copy in the parameters.
- */
- copy_from_user(&parm, (char *)arg, sizeof(struct ioconfig_parm));
- parm.number = free_entry - new_entry;
- parm.ioconfig_activated = ioconfig_activated;
- copy_to_user((char *)arg, &parm, sizeof(struct ioconfig_parm));
- copy_to_user((char *)parm.buffer, &ioconfig_bus_table[new_entry], sizeof(struct ascii_moduleid) * (free_entry - new_entry));
-
- return 0;
-}
-
-/*
- * ioconfig_bus_open - Opens the special device node "/dev/hw/.ioconfig_bus".
- */
-static int ioconfig_bus_open(struct inode * inode, struct file * filp)
-{
- if (ioconfig_bus_debug) {
- DBG("ioconfig_bus_open called.\n");
- }
-
- return(0);
-
-}
-
-/*
- * ioconfig_bus_close - Closes the special device node "/dev/hw/.ioconfig_bus".
- */
-static int ioconfig_bus_close(struct inode * inode, struct file * filp)
-{
-
- if (ioconfig_bus_debug) {
- DBG("ioconfig_bus_close called.\n");
- }
-
- return(0);
-}
-
-struct file_operations ioconfig_bus_fops = {
- ioctl:ioconfig_bus_ioctl,
- open:ioconfig_bus_open, /* open */
- release:ioconfig_bus_close /* release */
-};
-
-
-/*
- * init_ifconfig_bus() - Boot time initialization. Ensure that it is called
- * after devfs has been initialized.
- *
- */
-int init_ioconfig_bus(void)
-{
- ioconfig_bus_handle = NULL;
- ioconfig_bus_handle = hwgraph_register(hwgraph_root, ".ioconfig_bus",
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &ioconfig_bus_fops, NULL);
-
- if (ioconfig_bus_handle == NULL) {
- panic("Unable to create SGI PERSISTENT BUS NUMBERING Driver.\n");
- }
-
- return(0);
-
-}
-
-static int __init ioconfig_bus_setup (char *str)
-{
-
- char *temp;
-
- DBG("ioconfig_bus_setup: Kernel Options %s\n", str);
-
- temp = (char *)ioconfig_kernopts;
- memset(temp, 0, 128);
- while ( (*str != '\0') && !isspace (*str) ) {
- if (*str == ',') {
- *temp = '\n';
- temp++;
- str++;
- continue;
- }
- *temp = *str;
- temp++;
- str++;
- }
-
- return(0);
-
-}
-__setup("ioconfig=", ioconfig_bus_setup);
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/ctype.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/module.h>
-#include <asm/sn/router.h>
-#include <asm/sn/xtalk/xbow.h>
-
-#define printf printk
-int hasmetarouter;
-
-#define LDEBUG 0
-#define NIC_UNKNOWN ((nic_t) -1)
-
-#undef DEBUG_KLGRAPH
-#ifdef DEBUG_KLGRAPH
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif /* DEBUG_KLGRAPH */
-
-static void sort_nic_names(lboard_t *) ;
-
-u64 klgraph_addr[MAX_COMPACT_NODES];
-
-lboard_t *
-find_lboard(lboard_t *start, unsigned char brd_type)
-{
- /* Search all boards stored on this node. */
- while (start) {
- if (start->brd_type == brd_type)
- return start;
- start = KLCF_NEXT(start);
- }
-
- /* Didn't find it. */
- return (lboard_t *)NULL;
-}
-
-lboard_t *
-find_lboard_class(lboard_t *start, unsigned char brd_type)
-{
- /* Search all boards stored on this node. */
- while (start) {
- if (KLCLASS(start->brd_type) == KLCLASS(brd_type))
- return start;
- start = KLCF_NEXT(start);
- }
-
- /* Didn't find it. */
- return (lboard_t *)NULL;
-}
-
-klinfo_t *
-find_component(lboard_t *brd, klinfo_t *kli, unsigned char struct_type)
-{
- int index, j;
-
- if (kli == (klinfo_t *)NULL) {
- index = 0;
- } else {
- for (j = 0; j < KLCF_NUM_COMPS(brd); j++) {
- if (kli == KLCF_COMP(brd, j))
- break;
- }
- index = j;
- if (index == KLCF_NUM_COMPS(brd)) {
- DBG("find_component: Bad pointer: 0x%p\n", kli);
- return (klinfo_t *)NULL;
- }
- index++; /* next component */
- }
-
- for (; index < KLCF_NUM_COMPS(brd); index++) {
- kli = KLCF_COMP(brd, index);
- DBG("find_component: brd %p kli %p request type = 0x%x kli type 0x%x\n", brd, kli, kli->struct_type, KLCF_COMP_TYPE(kli));
- if (KLCF_COMP_TYPE(kli) == struct_type)
- return kli;
- }
-
- /* Didn't find it. */
- return (klinfo_t *)NULL;
-}
-
-klinfo_t *
-find_first_component(lboard_t *brd, unsigned char struct_type)
-{
- return find_component(brd, (klinfo_t *)NULL, struct_type);
-}
-
-lboard_t *
-find_lboard_modslot(lboard_t *start, moduleid_t mod, slotid_t slot)
-{
- /* Search all boards stored on this node. */
- while (start) {
- if (MODULE_MATCH(start->brd_module, mod) &&
- (start->brd_slot == slot))
- return start;
- start = KLCF_NEXT(start);
- }
-
- /* Didn't find it. */
- return (lboard_t *)NULL;
-}
-
-lboard_t *
-find_lboard_module(lboard_t *start, moduleid_t mod)
-{
- /* Search all boards stored on this node. */
- while (start) {
- if (MODULE_MATCH(start->brd_module, mod))
- return start;
- start = KLCF_NEXT(start);
- }
-
- /* Didn't find it. */
- return (lboard_t *)NULL;
-}
-
-lboard_t *
-find_lboard_module_class(lboard_t *start, moduleid_t mod,
- unsigned char brd_type)
-{
- while (start) {
-
- DBG("find_lboard_module_class: lboard 0x%p, start->brd_module 0x%x, mod 0x%x, start->brd_type 0x%x, brd_type 0x%x\n", start, start->brd_module, mod, start->brd_type, brd_type);
-
- if (MODULE_MATCH(start->brd_module, mod) &&
- (KLCLASS(start->brd_type) == KLCLASS(brd_type)))
- return start;
- start = KLCF_NEXT(start);
- }
-
- /* Didn't find it. */
- return (lboard_t *)NULL;
-}
-
-
-/*
- * Convert a NIC name to a name for use in the hardware graph.
- */
-void
-nic_name_convert(char *old_name, char *new_name)
-{
- int i;
- char c;
- char *compare_ptr;
-
- if ((old_name[0] == '\0') || (old_name[1] == '\0')) {
- strcpy(new_name, EDGE_LBL_XWIDGET);
- } else {
- for (i = 0; i < strlen(old_name); i++) {
- c = old_name[i];
-
- if (isalpha(c))
- new_name[i] = tolower(c);
- else if (isdigit(c))
- new_name[i] = c;
- else
- new_name[i] = '_';
- }
- new_name[i] = '\0';
- }
-
- /* XXX -
- * Since a bunch of boards made it out with weird names like
- * IO6-fibbbed and IO6P2, we need to look for IO6 in a name and
- * replace it with "baseio" to avoid confusion in the field.
- * We also have to make sure we don't report media_io instead of
- * baseio.
- */
-
- /* Skip underscores at the beginning of the name */
- for (compare_ptr = new_name; (*compare_ptr) == '_'; compare_ptr++)
- ;
-
- /*
- * Check for some names we need to replace. Early boards
- * had junk following the name so check only the first
- * characters.
- */
- if (!strncmp(new_name, "io6", 3) ||
- !strncmp(new_name, "mio", 3) ||
- !strncmp(new_name, "media_io", 8))
- strcpy(new_name, "baseio");
- else if (!strncmp(new_name, "divo", 4))
- strcpy(new_name, "divo") ;
-
-}
-
-/* Check if the given board corresponds to the global
- * master io6
- */
-int
-is_master_baseio(nasid_t nasid,moduleid_t module,slotid_t slot)
-{
- lboard_t *board;
-
-#if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
-/* If this works then look for callers of is_master_baseio()
- * (e.g. iograph.c) and let them pass in a slot if they want
- */
- board = find_lboard_module((lboard_t *)KL_CONFIG_INFO(nasid), module);
-#else
- board = find_lboard_modslot((lboard_t *)KL_CONFIG_INFO(nasid), module, slot);
-#endif
-
-#ifndef _STANDALONE
- {
- cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
-
- if (!board && (NODEPDA(cnode)->xbow_peer != INVALID_NASID))
-#if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
- board = find_lboard_module((lboard_t *)
- KL_CONFIG_INFO(NODEPDA(cnode)->xbow_peer),
- module);
-#else
- board = find_lboard_modslot((lboard_t *)
- KL_CONFIG_INFO(NODEPDA(cnode)->xbow_peer),
- module, slot);
-#endif
- }
-#endif
- if (!board)
- return(0);
- return(board->brd_flags & GLOBAL_MASTER_IO6);
-}
-/*
- * Find the lboard structure and get the board name.
- * If we can't find the structure or it's too low a revision,
- * use default name.
- */
-lboard_t *
-get_board_name(nasid_t nasid, moduleid_t mod, slotid_t slot, char *name)
-{
- lboard_t *brd;
-
- brd = find_lboard_modslot((lboard_t *)KL_CONFIG_INFO(nasid),
- mod, slot);
-
-#ifndef _STANDALONE
- {
- cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
-
- if (!brd && (NODEPDA(cnode)->xbow_peer != INVALID_NASID))
- brd = find_lboard_modslot((lboard_t *)
- KL_CONFIG_INFO(NODEPDA(cnode)->xbow_peer),
- mod, slot);
- }
-#endif
-
- if (!brd || (brd->brd_sversion < 2)) {
- strcpy(name, EDGE_LBL_XWIDGET);
- } else {
- nic_name_convert(brd->brd_name, name);
- }
-
- /*
- * PV # 540860
- * If the name is not 'baseio'
- * get the lowest of all the names in the nic string.
- * This is needed for boards like divo, which can have
- * a bunch of daughter cards, but would like to be called
- * divo. We could do this for baseio
- * but it has some special case names that we would not
- * like to disturb at this point.
- */
-
- /* gfx boards don't need any of this name scrambling */
- if (brd && (KLCLASS(brd->brd_type) == KLCLASS_GFX)) {
- return(brd);
- }
-
- if (!(!strcmp(name, "baseio") )) {
- if (brd) {
- sort_nic_names(brd) ;
- /* Convert to small case, '-' to '_' etc */
- nic_name_convert(brd->brd_name, name) ;
- }
- }
-
- return(brd);
-}
-
-/*
- * get_actual_nasid
- *
- * Completely disabled brds have their klconfig on
- * some other nasid as they have no memory. But their
- * actual nasid is hidden in the klconfig. Use this
- * routine to get it. Works for normal boards too.
- */
-nasid_t
-get_actual_nasid(lboard_t *brd)
-{
- klhub_t *hub ;
-
- if (!brd)
- return INVALID_NASID ;
-
- /* find out if we are a completely disabled brd. */
-
- hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
- if (!hub)
- return INVALID_NASID ;
- if (!(hub->hub_info.flags & KLINFO_ENABLE)) /* disabled node brd */
- return hub->hub_info.physid ;
- else
- return brd->brd_nasid ;
-}
-
-int
-xbow_port_io_enabled(nasid_t nasid, int link)
-{
- lboard_t *brd;
- klxbow_t *xbow_p;
-
- /*
- * look for boards that might contain an xbow or xbridge
- */
- brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IOBRICK_XBOW);
- if (brd == NULL) return 0;
-
- if ((xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW))
- == NULL)
- return 0;
-
- if (!XBOW_PORT_TYPE_IO(xbow_p, link) || !XBOW_PORT_IS_ENABLED(xbow_p, link))
- return 0;
-
- DBG("xbow_port_io_enabled: brd 0x%p xbow_p 0x%p \n", brd, xbow_p);
-
- return 1;
-}
-
-void
-board_to_path(lboard_t *brd, char *path)
-{
- moduleid_t modnum;
- char *board_name;
-
- ASSERT(brd);
-
- switch (KLCLASS(brd->brd_type)) {
-
- case KLCLASS_NODE:
- board_name = EDGE_LBL_NODE;
- break;
- case KLCLASS_ROUTER:
- if (brd->brd_type == KLTYPE_META_ROUTER) {
- board_name = EDGE_LBL_META_ROUTER;
- hasmetarouter++;
- } else if (brd->brd_type == KLTYPE_REPEATER_ROUTER) {
- board_name = EDGE_LBL_REPEATER_ROUTER;
- hasmetarouter++;
- } else
- board_name = EDGE_LBL_ROUTER;
- break;
- case KLCLASS_MIDPLANE:
- board_name = EDGE_LBL_MIDPLANE;
- break;
- case KLCLASS_IO:
- board_name = EDGE_LBL_IO;
- break;
- case KLCLASS_IOBRICK:
- if (brd->brd_type == KLTYPE_PBRICK)
- board_name = EDGE_LBL_PBRICK;
- else if (brd->brd_type == KLTYPE_IBRICK)
- board_name = EDGE_LBL_IBRICK;
- else if (brd->brd_type == KLTYPE_XBRICK)
- board_name = EDGE_LBL_XBRICK;
- else
- board_name = EDGE_LBL_IOBRICK;
- break;
- default:
- board_name = EDGE_LBL_UNKNOWN;
- }
-
- modnum = brd->brd_module;
-
- ASSERT(modnum != MODULE_UNKNOWN && modnum != INVALID_MODULE);
-#ifdef __ia64
- {
- char buffer[16];
- memset(buffer, 0, 16);
- format_module_id(buffer, modnum, MODULE_FORMAT_BRIEF);
- sprintf(path, EDGE_LBL_MODULE "/%s/%s", buffer, board_name);
- }
-#else
- sprintf(path, "%H/%s", modnum, board_name);
-#endif
-}
-
-/*
- * Get the module number for a NASID.
- */
-moduleid_t
-get_module_id(nasid_t nasid)
-{
- lboard_t *brd;
-
- brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
-
- if (!brd)
- return INVALID_MODULE;
- else
- return brd->brd_module;
-}
-
-
-#define MHZ 1000000
-
-
-/* Get the canonical hardware graph name for the given pci component
- * on the given io board.
- */
-void
-device_component_canonical_name_get(lboard_t *brd,
- klinfo_t *component,
- char *name)
-{
- moduleid_t modnum;
- slotid_t slot;
- char board_name[20];
-
- ASSERT(brd);
-
- /* Get the module number of this board */
- modnum = brd->brd_module;
-
- /* Convert the [ CLASS | TYPE ] kind of slotid
- * into a string
- */
- slot = brd->brd_slot;
- ASSERT(modnum != MODULE_UNKNOWN && modnum != INVALID_MODULE);
-
- /* Get the io board name */
- if (!brd || (brd->brd_sversion < 2)) {
- strcpy(name, EDGE_LBL_XWIDGET);
- } else {
- nic_name_convert(brd->brd_name, board_name);
- }
-
- /* Give out the canonical name of the pci device*/
- sprintf(name,
- "/dev/hw/"EDGE_LBL_MODULE "/%x/"EDGE_LBL_SLOT"/%s/"
- EDGE_LBL_PCI"/%d",
- modnum, board_name,KLCF_BRIDGE_W_ID(component));
-}
-
-/*
- * Get the serial number of the main component of a board
- * Returns 0 if a valid serial number is found
- * 1 otherwise.
- * Assumptions: Nic manufacturing string has the following format
- * *Serial:<serial_number>;*
- */
-static int
-component_serial_number_get(lboard_t *board,
- klconf_off_t mfg_nic_offset,
- char *serial_number,
- char *key_pattern)
-{
-
- char *mfg_nic_string;
- char *serial_string,*str;
- int i;
- char *serial_pattern = "Serial:";
-
- /* We have an error on a null mfg nic offset */
- if (!mfg_nic_offset)
- return(1);
- /* Get the hub's manufacturing nic information
- * which is in the form of a pre-formatted string
- */
- mfg_nic_string =
- (char *)NODE_OFFSET_TO_K0(NASID_GET(board),
- mfg_nic_offset);
- /* There is no manufacturing nic info */
- if (!mfg_nic_string)
- return(1);
-
- str = mfg_nic_string;
- /* Look for the key pattern first (if it is specified)
- * and then print the serial number corresponding to that.
- */
- if (strcmp(key_pattern,"") &&
- !(str = strstr(mfg_nic_string,key_pattern)))
- return(1);
-
- /* There is no serial number info in the manufacturing
- * nic info
- */
- if (!(serial_string = strstr(str,serial_pattern)))
- return(1);
-
- serial_string = serial_string + strlen(serial_pattern);
- /* Copy the serial number information from the klconfig */
- i = 0;
- while (serial_string[i] != ';') {
- serial_number[i] = serial_string[i];
- i++;
- }
- serial_number[i] = 0;
-
- return(0);
-}
-/*
- * Get the serial number of a board
- * Returns 0 if a valid serial number is found
- * 1 otherwise.
- */
-
-int
-board_serial_number_get(lboard_t *board,char *serial_number)
-{
- ASSERT(board && serial_number);
- if (!board || !serial_number)
- return(1);
-
- strcpy(serial_number,"");
- switch(KLCLASS(board->brd_type)) {
- case KLCLASS_CPU: { /* Node board */
- klhub_t *hub;
-
- /* Get the hub component information */
- hub = (klhub_t *)find_first_component(board,
- KLSTRUCT_HUB);
- /* If we don't have a hub component on an IP27
- * then we have a weird klconfig.
- */
- if (!hub)
- return(1);
- /* Get the serial number information from
- * the hub's manufacturing nic info
- */
- if (component_serial_number_get(board,
- hub->hub_mfg_nic,
- serial_number,
-#if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
- "IP37"))
-#else
- "IP27"))
- /* Try with IP31 key if IP27 key fails */
- if (component_serial_number_get(board,
- hub->hub_mfg_nic,
- serial_number,
- "IP31"))
-#endif /* CONFIG_IA64_SGI_SN1 */
- return(1);
- break;
- }
- case KLCLASS_IO: { /* IO board */
- if (KLTYPE(board->brd_type) == KLTYPE_TPU) {
- /* Special case for TPU boards */
- kltpu_t *tpu;
-
- /* Get the tpu component information */
- tpu = (kltpu_t *)find_first_component(board,
- KLSTRUCT_TPU);
- /* If we don't have a tpu component on a tpu board
- * then we have a weird klconfig.
- */
- if (!tpu)
- return(1);
- /* Get the serial number information from
- * the tpu's manufacturing nic info
- */
- if (component_serial_number_get(board,
- tpu->tpu_mfg_nic,
- serial_number,
- ""))
- return(1);
- break;
- } else if ((KLTYPE(board->brd_type) == KLTYPE_GSN_A) ||
- (KLTYPE(board->brd_type) == KLTYPE_GSN_B)) {
- /* Special case for GSN boards */
- klgsn_t *gsn;
-
- /* Get the gsn component information */
- gsn = (klgsn_t *)find_first_component(board,
- ((KLTYPE(board->brd_type) == KLTYPE_GSN_A) ?
- KLSTRUCT_GSN_A : KLSTRUCT_GSN_B));
- /* If we don't have a gsn component on a gsn board
- * then we have a weird klconfig.
- */
- if (!gsn)
- return(1);
- /* Get the serial number information from
- * the gsn's manufacturing nic info
- */
- if (component_serial_number_get(board,
- gsn->gsn_mfg_nic,
- serial_number,
- ""))
- return(1);
- break;
- } else {
- klbri_t *bridge;
-
- /* Get the bridge component information */
- bridge = (klbri_t *)find_first_component(board,
- KLSTRUCT_BRI);
- /* If we don't have a bridge component on an IO board
- * then we have a weird klconfig.
- */
- if (!bridge)
- return(1);
- /* Get the serial number information from
- * the bridge's manufacturing nic info
- */
- if (component_serial_number_get(board,
- bridge->bri_mfg_nic,
- serial_number,
- ""))
- return(1);
- break;
- }
- }
- case KLCLASS_ROUTER: { /* Router board */
- klrou_t *router;
-
- /* Get the router component information */
- router = (klrou_t *)find_first_component(board,
- KLSTRUCT_ROU);
- /* If we don't have a router component on a router board
- * then we have a weird klconfig.
- */
- if (!router)
- return(1);
- /* Get the serial number information from
- * the router's manufacturing nic info
- */
- if (component_serial_number_get(board,
- router->rou_mfg_nic,
- serial_number,
- ""))
- return(1);
- break;
- }
- case KLCLASS_GFX: { /* Gfx board */
- klgfx_t *graphics;
-
- /* Get the graphics component information */
- graphics = (klgfx_t *)find_first_component(board, KLSTRUCT_GFX);
- /* If we don't have a gfx component on a gfx board
- * then we have a weird klconfig.
- */
- if (!graphics)
- return(1);
- /* Get the serial number information from
- * the graphics's manufacturing nic info
- */
- if (component_serial_number_get(board,
- graphics->gfx_mfg_nic,
- serial_number,
- ""))
- return(1);
- break;
- }
- default:
- strcpy(serial_number,"");
- break;
- }
- return(0);
-}
-
-#include "asm/sn/sn_private.h"
-
-xwidgetnum_t
-nodevertex_widgetnum_get(devfs_handle_t node_vtx)
-{
- hubinfo_t hubinfo_p;
-
- hwgraph_info_get_LBL(node_vtx, INFO_LBL_NODE_INFO,
- (arbitrary_info_t *) &hubinfo_p);
- return(hubinfo_p->h_widgetid);
-}
-
-devfs_handle_t
-nodevertex_xbow_peer_get(devfs_handle_t node_vtx)
-{
- hubinfo_t hubinfo_p;
- nasid_t xbow_peer_nasid;
- cnodeid_t xbow_peer;
-
- hwgraph_info_get_LBL(node_vtx, INFO_LBL_NODE_INFO,
- (arbitrary_info_t *) &hubinfo_p);
- xbow_peer_nasid = hubinfo_p->h_nodepda->xbow_peer;
- if(xbow_peer_nasid == INVALID_NASID)
- return ( (devfs_handle_t)-1);
- xbow_peer = NASID_TO_COMPACT_NODEID(xbow_peer_nasid);
- return(NODEPDA(xbow_peer)->node_vertex);
-}
-
-/* NIC Sorting Support */
-
-#define MAX_NICS_PER_STRING 32
-#define MAX_NIC_NAME_LEN 32
-
-static char *
-get_nic_string(lboard_t *lb)
-{
- int i;
- klinfo_t *k = NULL ;
- klconf_off_t mfg_off = 0 ;
- char *mfg_nic = NULL ;
-
- for (i = 0; i < KLCF_NUM_COMPS(lb); i++) {
- k = KLCF_COMP(lb, i) ;
- switch(k->struct_type) {
- case KLSTRUCT_BRI:
- mfg_off = ((klbri_t *)k)->bri_mfg_nic ;
- break ;
-
- case KLSTRUCT_HUB:
- mfg_off = ((klhub_t *)k)->hub_mfg_nic ;
- break ;
-
- case KLSTRUCT_ROU:
- mfg_off = ((klrou_t *)k)->rou_mfg_nic ;
- break ;
-
- case KLSTRUCT_GFX:
- mfg_off = ((klgfx_t *)k)->gfx_mfg_nic ;
- break ;
-
- case KLSTRUCT_TPU:
- mfg_off = ((kltpu_t *)k)->tpu_mfg_nic ;
- break ;
-
- case KLSTRUCT_GSN_A:
- case KLSTRUCT_GSN_B:
- mfg_off = ((klgsn_t *)k)->gsn_mfg_nic ;
- break ;
-
- case KLSTRUCT_XTHD:
- mfg_off = ((klxthd_t *)k)->xthd_mfg_nic ;
- break;
-
- default:
- mfg_off = 0 ;
- break ;
- }
- if (mfg_off)
- break ;
- }
-
- if ((mfg_off) && (k))
- mfg_nic = (char *)NODE_OFFSET_TO_K0(k->nasid, mfg_off) ;
-
- return mfg_nic ;
-}
-
-char *
-get_first_string(char **ptrs, int n)
-{
- int i ;
- char *tmpptr ;
-
- if ((ptrs == NULL) || (n == 0))
- return NULL ;
-
- tmpptr = ptrs[0] ;
-
- if (n == 1)
- return tmpptr ;
-
- for (i = 0 ; i < n ; i++) {
- if (strcmp(tmpptr, ptrs[i]) > 0)
- tmpptr = ptrs[i] ;
- }
-
- return tmpptr ;
-}
-
-int
-get_ptrs(char *idata, char **ptrs, int n, char *label)
-{
- int i = 0 ;
- char *tmp = idata ;
-
- if ((ptrs == NULL) || (idata == NULL) || (label == NULL) || (n == 0))
- return 0 ;
-
- while ( (tmp = strstr(tmp, label)) ){
- tmp += strlen(label) ;
- /* check for empty name field, and last NULL ptr */
- if ((i < (n-1)) && (*tmp != ';')) {
- ptrs[i++] = tmp ;
- }
- }
-
- ptrs[i] = NULL ;
-
- return i ;
-}
-
-/*
- * sort_nic_names
- *
- * Does not really do sorting. Find the alphabetically lowest
- * name among all the nic names found in a nic string.
- *
- * Return:
- * Nothing
- *
- * Side Effects:
- *
- * lb->brd_name gets the new name found
- */
-
-static void
-sort_nic_names(lboard_t *lb)
-{
- char *nic_str ;
- char *ptrs[MAX_NICS_PER_STRING] ;
- char name[MAX_NIC_NAME_LEN] ;
- char *tmp, *tmp1 ;
-
- *name = 0 ;
-
- /* Get the nic pointer from the lb */
-
- if ((nic_str = get_nic_string(lb)) == NULL)
- return ;
-
- tmp = get_first_string(ptrs,
- get_ptrs(nic_str, ptrs, MAX_NICS_PER_STRING, "Name:")) ;
-
- if (tmp == NULL)
- return ;
-
- if ( (tmp1 = strchr(tmp, ';')) ){
- strlcpy(name, tmp, tmp1-tmp) ;
- } else {
- strlcpy(name, tmp, (sizeof(name))) ;
- }
-
- strlcpy(lb->brd_name, name, sizeof(lb->brd_name)) ;
-}
-
-
-
-char brick_types[MAX_BRICK_TYPES + 1] = "crikxdp789012345";
-
-#if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
-
-/*
- * Format a module id for printing.
- */
-void
-format_module_id(char *buffer, moduleid_t m, int fmt)
-{
- int rack, position;
- char brickchar;
-
- rack = MODULE_GET_RACK(m);
- ASSERT(MODULE_GET_BTYPE(m) < MAX_BRICK_TYPES);
- brickchar = MODULE_GET_BTCHAR(m);
- position = MODULE_GET_BPOS(m);
-
- if (fmt == MODULE_FORMAT_BRIEF) {
- /* Brief module number format, eg. 002c15 */
-
- /* Decompress the rack number */
- *buffer++ = '0' + RACK_GET_CLASS(rack);
- *buffer++ = '0' + RACK_GET_GROUP(rack);
- *buffer++ = '0' + RACK_GET_NUM(rack);
-
- /* Add the brick type */
- *buffer++ = brickchar;
- }
- else if (fmt == MODULE_FORMAT_LONG) {
- /* Fuller hwgraph format, eg. rack/002/bay/15 */
-
- strcpy(buffer, EDGE_LBL_RACK "/"); buffer += strlen(buffer);
-
- *buffer++ = '0' + RACK_GET_CLASS(rack);
- *buffer++ = '0' + RACK_GET_GROUP(rack);
- *buffer++ = '0' + RACK_GET_NUM(rack);
-
- strcpy(buffer, "/" EDGE_LBL_RPOS "/"); buffer += strlen(buffer);
- }
-
- /* Add the bay position, using at least two digits */
- if (position < 10)
- *buffer++ = '0';
- sprintf(buffer, "%d", position);
-
-}
-
-/*
- * Parse a module id, in either brief or long form.
- * Returns < 0 on error.
- * The long form does not include a brick type, so it defaults to 0 (CBrick)
- */
-int
-parse_module_id(char *buffer)
-{
- unsigned int v, rack, bay, type, form;
- moduleid_t m;
- char c;
-
- if (strstr(buffer, EDGE_LBL_RACK "/") == buffer) {
- form = MODULE_FORMAT_LONG;
- buffer += strlen(EDGE_LBL_RACK "/");
-
- /* A long module ID must be exactly 5 non-template chars. */
- if (strlen(buffer) != strlen("/" EDGE_LBL_RPOS "/") + 5)
- return -1;
- }
- else {
- form = MODULE_FORMAT_BRIEF;
-
- /* A brief module id must be exactly 6 characters */
- if (strlen(buffer) != 6)
- return -2;
- }
-
- /* The rack number must be exactly 3 digits */
- if (!(isdigit(buffer[0]) && isdigit(buffer[1]) && isdigit(buffer[2])))
- return -3;
-
- rack = 0;
- v = *buffer++ - '0';
- if (v > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
- return -4;
- RACK_ADD_CLASS(rack, v);
-
- v = *buffer++ - '0';
- if (v > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
- return -5;
- RACK_ADD_GROUP(rack, v);
-
- v = *buffer++ - '0';
- /* rack numbers are 1-based */
- if (v-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
- return -6;
- RACK_ADD_NUM(rack, v);
-
- if (form == MODULE_FORMAT_BRIEF) {
- /* Next should be a module type character. Accept ucase or lcase. */
- c = *buffer++;
- if (!isalpha(c))
- return -7;
-
- /* strchr() returns a pointer into brick_types[], or NULL */
- type = (unsigned int)(strchr(brick_types, tolower(c)) - brick_types);
- if (type > MODULE_BTYPE_MASK >> MODULE_BTYPE_SHFT)
- return -8;
- }
- else {
- /* Hardcode the module type, and skip over the boilerplate */
- type = MODULE_CBRICK;
-
- if (strstr(buffer, "/" EDGE_LBL_RPOS "/") != buffer)
- return -9;
-
- buffer += strlen("/" EDGE_LBL_RPOS "/");
- }
-
- /* The bay number is last. Make sure it's exactly two digits */
-
- if (!(isdigit(buffer[0]) && isdigit(buffer[1]) && !buffer[2]))
- return -10;
-
- bay = 10 * (buffer[0] - '0') + (buffer[1] - '0');
-
- if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
- return -11;
-
- m = RBT_TO_MODULE(rack, bay, type);
-
- /* avoid sign extending the moduleid_t */
- return (int)(unsigned short)m;
-}
-
-#else /* CONFIG_IA64_SGI_SN1 */
-
-/*
- * Format a module id for printing.
- */
-void
-format_module_id(char *buffer, moduleid_t m, int fmt)
-{
- if (fmt == MODULE_FORMAT_BRIEF) {
- sprintf(buffer, "%d", m);
- }
- else if (fmt == MODULE_FORMAT_LONG) {
- sprintf(buffer, EDGE_LBL_MODULE "/%d", m);
- }
-}
-
-/*
- * Parse a module id, in either brief or long form.
- * Returns < 0 on error.
- */
-int
-parse_module_id(char *buffer)
-{
- moduleid_t m;
- char c;
-
- if (strstr(buffer, EDGE_LBL_MODULE "/") == buffer)
- buffer += strlen(EDGE_LBL_MODULE "/");
-
- for (m = 0; *buffer; buffer++) {
- c = *buffer;
- if (!isdigit(c))
- return -1;
- m = 10 * m + (c - '0');
- }
-
- /* avoid sign extending the moduleid_t */
- return (int)(unsigned short)m;
-}
-
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * klgraph.c-
- * This file specifies the interface between the kernel and the PROM's
- * configuration data structures.
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/kldir.h>
-#include <asm/sn/gda.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/router.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/hcl_util.h>
-
-/* #define KLGRAPH_DEBUG 1 */
-#ifdef KLGRAPH_DEBUG
-#define GRPRINTF(x) printk x
-#define CE_GRPANIC CE_PANIC
-#else
-#define GRPRINTF(x)
-#define CE_GRPANIC CE_PANIC
-#endif
-
-#include <asm/sn/sn_private.h>
-
-extern char arg_maxnodes[];
-extern u64 klgraph_addr[];
-
-/*
- * Support for verbose inventory via hardware graph.
- * klhwg_invent_alloc allocates the necessary size of inventory information
- * and fills in the generic information.
- */
-invent_generic_t *
-klhwg_invent_alloc(cnodeid_t cnode, int class, int size)
-{
- invent_generic_t *invent;
-
- invent = kern_malloc(size);
- if (!invent) return NULL;
-
- invent->ig_module = NODE_MODULEID(cnode);
- invent->ig_slot = SLOTNUM_GETSLOT(NODE_SLOTID(cnode));
- invent->ig_invclass = class;
-
- return invent;
-}
-
-/*
- * Add information about the baseio prom version number
- * as a part of detailed inventory info in the hwgraph.
- */
-void
-klhwg_baseio_inventory_add(devfs_handle_t baseio_vhdl,cnodeid_t cnode)
-{
- invent_miscinfo_t *baseio_inventory;
- unsigned char version = 0,revision = 0;
-
- /* Allocate memory for the "detailed inventory" info
- * for the baseio
- */
- baseio_inventory = (invent_miscinfo_t *)
- klhwg_invent_alloc(cnode, INV_PROM, sizeof(invent_miscinfo_t));
- baseio_inventory->im_type = INV_IO6PROM;
- /* Read the io6prom revision from the nvram */
-#ifdef LATER
- nvram_prom_version_get(&version,&revision);
-#endif
- /* Store the revision info in the inventory */
- baseio_inventory->im_version = version;
- baseio_inventory->im_rev = revision;
- /* Put the inventory info in the hardware graph */
- hwgraph_info_add_LBL(baseio_vhdl, INFO_LBL_DETAIL_INVENT,
- (arbitrary_info_t) baseio_inventory);
- /* Make the information available to the user programs
- * thru hwgfs.
- */
- hwgraph_info_export_LBL(baseio_vhdl, INFO_LBL_DETAIL_INVENT,
- sizeof(invent_miscinfo_t));
-}
-
-char *hub_rev[] = {
- "0.0",
- "1.0",
- "2.0",
- "2.1",
- "2.2",
- "2.3"
-};
-
-/*
- * Add detailed cpu inventory info to the hardware graph.
- */
-void
-klhwg_hub_invent_info(devfs_handle_t hubv,
- cnodeid_t cnode,
- klhub_t *hub)
-{
- invent_miscinfo_t *hub_invent;
-
- hub_invent = (invent_miscinfo_t *)
- klhwg_invent_alloc(cnode, INV_MISC, sizeof(invent_miscinfo_t));
- if (!hub_invent)
- return;
-
- if (KLCONFIG_INFO_ENABLED((klinfo_t *)hub))
- hub_invent->im_gen.ig_flag = INVENT_ENABLED;
-
- hub_invent->im_type = INV_HUB;
- hub_invent->im_rev = hub->hub_info.revision;
- hub_invent->im_speed = hub->hub_speed;
- hwgraph_info_add_LBL(hubv, INFO_LBL_DETAIL_INVENT,
- (arbitrary_info_t) hub_invent);
- hwgraph_info_export_LBL(hubv, INFO_LBL_DETAIL_INVENT,
- sizeof(invent_miscinfo_t));
-}
-
-/* ARGSUSED */
-void
-klhwg_add_hub(devfs_handle_t node_vertex, klhub_t *hub, cnodeid_t cnode)
-{
-#if defined(CONFIG_IA64_SGI_SN1)
- devfs_handle_t myhubv;
- devfs_handle_t hub_mon;
- devfs_handle_t synergy;
- devfs_handle_t fsb0;
- devfs_handle_t fsb1;
- int rc;
- extern struct file_operations hub_mon_fops;
-
- GRPRINTF(("klhwg_add_hub: adding %s\n", EDGE_LBL_HUB));
-
- (void) hwgraph_path_add(node_vertex, EDGE_LBL_HUB, &myhubv);
- rc = device_master_set(myhubv, node_vertex);
-
- /*
- * hub perf stats.
- */
- rc = hwgraph_info_add_LBL(myhubv, INFO_LBL_HUB_INFO,
- (arbitrary_info_t)(&NODEPDA(cnode)->hubstats));
-
- if (rc != GRAPH_SUCCESS) {
- printk(KERN_WARNING "klhwg_add_hub: Can't add hub info label 0x%p, code %d",
- (void *)myhubv, rc);
- }
-
- klhwg_hub_invent_info(myhubv, cnode, hub);
-
- hub_mon = hwgraph_register(myhubv, EDGE_LBL_PERFMON,
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &hub_mon_fops,
- (void *)(long)cnode);
-
- init_hub_stats(cnode, NODEPDA(cnode));
-
- /*
- * synergy perf
- */
- (void) hwgraph_path_add(myhubv, EDGE_LBL_SYNERGY, &synergy);
- (void) hwgraph_path_add(synergy, "0", &fsb0);
- (void) hwgraph_path_add(synergy, "1", &fsb1);
-
- fsb0 = hwgraph_register(fsb0, EDGE_LBL_PERFMON,
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &synergy_mon_fops, (void *)SYNERGY_PERF_INFO(cnode, 0));
-
- fsb1 = hwgraph_register(fsb1, EDGE_LBL_PERFMON,
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &synergy_mon_fops, (void *)SYNERGY_PERF_INFO(cnode, 1));
-#endif /* CONFIG_IA64_SGI_SN1 */
-}
-
-void
-klhwg_add_xbow(cnodeid_t cnode, nasid_t nasid)
-{
- lboard_t *brd;
- klxbow_t *xbow_p;
- nasid_t hub_nasid;
- cnodeid_t hub_cnode;
- int widgetnum;
- devfs_handle_t xbow_v, hubv;
- /*REFERENCED*/
- graph_error_t err;
-
- if ((brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IOBRICK_XBOW)) == NULL)
- return;
-
- if (KL_CONFIG_DUPLICATE_BOARD(brd))
- return;
-
- GRPRINTF(("klhwg_add_xbow: adding cnode %d nasid %d xbow edges\n",
- cnode, nasid));
-
- if ((xbow_p = (klxbow_t *)find_component(brd, NULL, KLSTRUCT_XBOW))
- == NULL)
- return;
-
-#ifdef LATER
- /*
- * We cannot support this function in devfs .. see below where
- * we use hwgraph_path_add() to create this vertex with a known
- * name.
- */
- err = hwgraph_vertex_create(&xbow_v);
- ASSERT(err == GRAPH_SUCCESS);
-
- xswitch_vertex_init(xbow_v);
-#endif /* LATER */
-
- for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
- if (!XBOW_PORT_TYPE_HUB(xbow_p, widgetnum))
- continue;
-
- hub_nasid = XBOW_PORT_NASID(xbow_p, widgetnum);
- if (hub_nasid == INVALID_NASID) {
- printk(KERN_WARNING "hub widget %d, skipping xbow graph\n", widgetnum);
- continue;
- }
-
- hub_cnode = NASID_TO_COMPACT_NODEID(hub_nasid);
-
- if (is_specified(arg_maxnodes) && hub_cnode == INVALID_CNODEID) {
- continue;
- }
-
- hubv = cnodeid_to_vertex(hub_cnode);
-
- err = hwgraph_path_add(hubv, EDGE_LBL_XTALK, &xbow_v);
- if (err != GRAPH_SUCCESS) {
- if (err == GRAPH_DUP)
- printk(KERN_WARNING "klhwg_add_xbow: Check for "
- "working routers and router links!");
-
- PRINT_PANIC("klhwg_add_xbow: Failed to add "
- "edge: vertex 0x%p to vertex 0x%p,"
- "error %d\n",
- (void *)hubv, (void *)xbow_v, err);
- }
- xswitch_vertex_init(xbow_v);
-
- NODEPDA(hub_cnode)->xbow_vhdl = xbow_v;
-
- /*
- * XXX - This won't work is we ever hook up two hubs
- * by crosstown through a crossbow.
- */
- if (hub_nasid != nasid) {
- NODEPDA(hub_cnode)->xbow_peer = nasid;
- NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->xbow_peer =
- hub_nasid;
- }
-
- GRPRINTF(("klhwg_add_xbow: adding port nasid %d %s to vertex 0x%p\n",
- hub_nasid, EDGE_LBL_XTALK, hubv));
-
-#ifdef LATER
- err = hwgraph_edge_add(hubv, xbow_v, EDGE_LBL_XTALK);
- if (err != GRAPH_SUCCESS) {
- if (err == GRAPH_DUP)
- printk(KERN_WARNING "klhwg_add_xbow: Check for "
- "working routers and router links!");
-
- PRINT_PANIC("klhwg_add_xbow: Failed to add "
- "edge: vertex 0x%p (0x%p) to vertex 0x%p (0x%p), "
- "error %d\n",
- hubv, hubv, xbow_v, xbow_v, err);
- }
-#endif
- }
-}
-
-
-/* ARGSUSED */
-void
-klhwg_add_node(devfs_handle_t hwgraph_root, cnodeid_t cnode, gda_t *gdap)
-{
- nasid_t nasid;
- lboard_t *brd;
- klhub_t *hub;
- devfs_handle_t node_vertex = NULL;
- char path_buffer[100];
- int rv;
- char *s;
- int board_disabled = 0;
-
- nasid = COMPACT_TO_NASID_NODEID(cnode);
- brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
- GRPRINTF(("klhwg_add_node: Adding cnode %d, nasid %d, brd 0x%p\n",
- cnode, nasid, brd));
- ASSERT(brd);
-
- do {
-
- /* Generate a hardware graph path for this board. */
- board_to_path(brd, path_buffer);
-
- GRPRINTF(("klhwg_add_node: adding %s to vertex 0x%p\n",
- path_buffer, hwgraph_root));
- rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
-
- if (rv != GRAPH_SUCCESS)
- PRINT_PANIC("Node vertex creation failed. "
- "Path == %s",
- path_buffer);
-
- hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
- ASSERT(hub);
- if(hub->hub_info.flags & KLINFO_ENABLE)
- board_disabled = 0;
- else
- board_disabled = 1;
-
- if(!board_disabled) {
- mark_nodevertex_as_node(node_vertex,
- cnode + board_disabled * numnodes);
-
- s = dev_to_name(node_vertex, path_buffer, sizeof(path_buffer));
- NODEPDA(cnode)->hwg_node_name =
- kmalloc(strlen(s) + 1,
- GFP_KERNEL);
- ASSERT_ALWAYS(NODEPDA(cnode)->hwg_node_name != NULL);
- strcpy(NODEPDA(cnode)->hwg_node_name, s);
-
- hubinfo_set(node_vertex, NODEPDA(cnode)->pdinfo);
-
- /* Set up node board's slot */
- NODEPDA(cnode)->slotdesc = brd->brd_slot;
-
- /* Set up the module we're in */
- NODEPDA(cnode)->module_id = brd->brd_module;
- NODEPDA(cnode)->module = module_lookup(brd->brd_module);
- }
-
- if(!board_disabled)
- klhwg_add_hub(node_vertex, hub, cnode);
-
- brd = KLCF_NEXT(brd);
- if (brd)
- brd = find_lboard(brd, KLTYPE_SNIA);
- else
- break;
- } while(brd);
-}
-
-
-/* ARGSUSED */
-void
-klhwg_add_all_routers(devfs_handle_t hwgraph_root)
-{
- nasid_t nasid;
- cnodeid_t cnode;
- lboard_t *brd;
- devfs_handle_t node_vertex;
- char path_buffer[100];
- int rv;
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- GRPRINTF(("klhwg_add_all_routers: adding router on cnode %d\n",
- cnode));
-
- brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
- KLTYPE_ROUTER);
-
- if (!brd)
- /* No routers stored in this node's memory */
- continue;
-
- do {
- ASSERT(brd);
- GRPRINTF(("Router board struct is %p\n", brd));
-
- /* Don't add duplicate boards. */
- if (brd->brd_flags & DUPLICATE_BOARD)
- continue;
-
- GRPRINTF(("Router 0x%p module number is %d\n", brd, brd->brd_module));
- /* Generate a hardware graph path for this board. */
- board_to_path(brd, path_buffer);
-
- GRPRINTF(("Router path is %s\n", path_buffer));
-
- /* Add the router */
- GRPRINTF(("klhwg_add_all_routers: adding %s to vertex 0x%p\n",
- path_buffer, hwgraph_root));
- rv = hwgraph_path_add(hwgraph_root, path_buffer, &node_vertex);
-
- if (rv != GRAPH_SUCCESS)
- PRINT_PANIC("Router vertex creation "
- "failed. Path == %s",
- path_buffer);
-
- GRPRINTF(("klhwg_add_all_routers: get next board from 0x%p\n",
- brd));
- /* Find the rest of the routers stored on this node. */
- } while ( (brd = find_lboard_class(KLCF_NEXT(brd),
- KLTYPE_ROUTER)) );
-
- GRPRINTF(("klhwg_add_all_routers: Done.\n"));
- }
-
-}
-
-/* ARGSUSED */
-void
-klhwg_connect_one_router(devfs_handle_t hwgraph_root, lboard_t *brd,
- cnodeid_t cnode, nasid_t nasid)
-{
- klrou_t *router;
- char path_buffer[50];
- char dest_path[50];
- devfs_handle_t router_hndl;
- devfs_handle_t dest_hndl;
- int rc;
- int port;
- lboard_t *dest_brd;
-
- GRPRINTF(("klhwg_connect_one_router: Connecting router on cnode %d\n",
- cnode));
-
- /* Don't add duplicate boards. */
- if (brd->brd_flags & DUPLICATE_BOARD) {
- GRPRINTF(("klhwg_connect_one_router: Duplicate router 0x%p on cnode %d\n",
- brd, cnode));
- return;
- }
-
- /* Generate a hardware graph path for this board. */
- board_to_path(brd, path_buffer);
-
- rc = hwgraph_traverse(hwgraph_root, path_buffer, &router_hndl);
-
- if (rc != GRAPH_SUCCESS && is_specified(arg_maxnodes))
- return;
-
- if (rc != GRAPH_SUCCESS)
- printk(KERN_WARNING "Can't find router: %s", path_buffer);
-
- /* We don't know what to do with multiple router components */
- if (brd->brd_numcompts != 1) {
- PRINT_PANIC("klhwg_connect_one_router: %d cmpts on router\n",
- brd->brd_numcompts);
- return;
- }
-
-
- /* Convert component 0 to klrou_t ptr */
- router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd),
- brd->brd_compts[0]);
-
- for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
- /* See if the port's active */
- if (router->rou_port[port].port_nasid == INVALID_NASID) {
- GRPRINTF(("klhwg_connect_one_router: port %d inactive.\n",
- port));
- continue;
- }
- if (is_specified(arg_maxnodes) && NASID_TO_COMPACT_NODEID(router->rou_port[port].port_nasid)
- == INVALID_CNODEID) {
- continue;
- }
-
- dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
- router->rou_port[port].port_nasid,
- router->rou_port[port].port_offset);
-
- /* Generate a hardware graph path for this board. */
- board_to_path(dest_brd, dest_path);
-
- rc = hwgraph_traverse(hwgraph_root, dest_path, &dest_hndl);
-
- if (rc != GRAPH_SUCCESS) {
- if (is_specified(arg_maxnodes) && KL_CONFIG_DUPLICATE_BOARD(dest_brd))
- continue;
- PRINT_PANIC("Can't find router: %s", dest_path);
- }
- GRPRINTF(("klhwg_connect_one_router: Link from %s/%d to %s\n",
- path_buffer, port, dest_path));
-
- sprintf(dest_path, "%d", port);
-
- rc = hwgraph_edge_add(router_hndl, dest_hndl, dest_path);
-
- if (rc == GRAPH_DUP) {
- GRPRINTF(("Skipping port %d. nasid %d %s/%s\n",
- port, router->rou_port[port].port_nasid,
- path_buffer, dest_path));
- continue;
- }
-
- if (rc != GRAPH_SUCCESS && !is_specified(arg_maxnodes))
- PRINT_PANIC("Can't create edge: %s/%s to vertex 0x%p error 0x%x\n",
- path_buffer, dest_path, (void *)dest_hndl, rc);
-
- }
-}
-
-
-void
-klhwg_connect_routers(devfs_handle_t hwgraph_root)
-{
- nasid_t nasid;
- cnodeid_t cnode;
- lboard_t *brd;
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- GRPRINTF(("klhwg_connect_routers: Connecting routers on cnode %d\n",
- cnode));
-
- brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
- KLTYPE_ROUTER);
-
- if (!brd)
- continue;
-
- do {
-
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- klhwg_connect_one_router(hwgraph_root, brd,
- cnode, nasid);
-
- /* Find the rest of the routers stored on this node. */
- } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
- }
-}
-
-
-
-void
-klhwg_connect_hubs(devfs_handle_t hwgraph_root)
-{
- nasid_t nasid;
- cnodeid_t cnode;
- lboard_t *brd;
- klhub_t *hub;
- lboard_t *dest_brd;
- devfs_handle_t hub_hndl;
- devfs_handle_t dest_hndl;
- char path_buffer[50];
- char dest_path[50];
- graph_error_t rc;
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- GRPRINTF(("klhwg_connect_hubs: Connecting hubs on cnode %d\n",
- cnode));
-
- brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
- ASSERT(brd);
-
- hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
- ASSERT(hub);
-
- /* See if the port's active */
- if (hub->hub_port.port_nasid == INVALID_NASID) {
- GRPRINTF(("klhwg_connect_hubs: port inactive.\n"));
- continue;
- }
-
- if (is_specified(arg_maxnodes) && NASID_TO_COMPACT_NODEID(hub->hub_port.port_nasid) == INVALID_CNODEID)
- continue;
-
- /* Generate a hardware graph path for this board. */
- board_to_path(brd, path_buffer);
-
- GRPRINTF(("klhwg_connect_hubs: Hub path is %s.\n", path_buffer));
- rc = hwgraph_traverse(hwgraph_root, path_buffer, &hub_hndl);
-
- if (rc != GRAPH_SUCCESS)
- printk(KERN_WARNING "Can't find hub: %s", path_buffer);
-
- dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
- hub->hub_port.port_nasid,
- hub->hub_port.port_offset);
-
- /* Generate a hardware graph path for this board. */
- board_to_path(dest_brd, dest_path);
-
- rc = hwgraph_traverse(hwgraph_root, dest_path, &dest_hndl);
-
- if (rc != GRAPH_SUCCESS) {
- if (is_specified(arg_maxnodes) && KL_CONFIG_DUPLICATE_BOARD(dest_brd))
- continue;
- PRINT_PANIC("Can't find board: %s", dest_path);
- } else {
-
-
- GRPRINTF(("klhwg_connect_hubs: Link from %s to %s.\n",
- path_buffer, dest_path));
-
- rc = hwgraph_edge_add(hub_hndl, dest_hndl, EDGE_LBL_INTERCONNECT);
-
- if (rc != GRAPH_SUCCESS)
- PRINT_PANIC("Can't create edge: %s/%s to vertex 0x%p, error 0x%x\n",
- path_buffer, dest_path, (void *)dest_hndl, rc);
-
- }
- }
-}
-
-/* Store the pci/vme disabled board information as extended administrative
- * hints which can later be used by the drivers using the device/driver
- * admin interface.
- */
-void
-klhwg_device_disable_hints_add(void)
-{
- cnodeid_t cnode; /* node we are looking at */
- nasid_t nasid; /* nasid of the node */
- lboard_t *board; /* board we are looking at */
- int comp_index; /* component index */
- klinfo_t *component; /* component in the board we are
- * looking at
- */
- char device_name[MAXDEVNAME];
-
-#ifdef LATER
- device_admin_table_init();
-#endif
- for(cnode = 0; cnode < numnodes; cnode++) {
- nasid = COMPACT_TO_NASID_NODEID(cnode);
- board = (lboard_t *)KL_CONFIG_INFO(nasid);
- /* Check out all the board info stored on a node */
- while(board) {
- /* No need to look at duplicate boards or non-io
- * boards
- */
- if (KL_CONFIG_DUPLICATE_BOARD(board) ||
- KLCLASS(board->brd_type) != KLCLASS_IO) {
- board = KLCF_NEXT(board);
- continue;
- }
- /* Check out all the components of a board */
- for (comp_index = 0;
- comp_index < KLCF_NUM_COMPS(board);
- comp_index++) {
- component = KLCF_COMP(board,comp_index);
- /* If the component is enabled move on to
- * the next component
- */
- if (KLCONFIG_INFO_ENABLED(component))
- continue;
- /* NOTE : Since the prom only supports
- * the disabling of pci devices the following
- * piece of code makes sense.
- * Make sure that this assumption is valid
- */
- /* This component is disabled. Store this
- * hint in the extended device admin table
- */
- /* Get the canonical name of the pci device */
- device_component_canonical_name_get(board,
- component,
- device_name);
-#ifdef LATER
- device_admin_table_update(device_name,
- ADMIN_LBL_DISABLED,
- "yes");
-#endif
-#ifdef DEBUG
- printf("%s DISABLED\n",device_name);
-#endif
- }
- /* go to the next board info stored on this
- * node
- */
- board = KLCF_NEXT(board);
- }
- }
-}
-
-void
-klhwg_add_all_modules(devfs_handle_t hwgraph_root)
-{
- cmoduleid_t cm;
- char name[128];
- devfs_handle_t vhdl;
- int rc;
- char buffer[16];
-
- /* Add devices under each module */
-
- for (cm = 0; cm < nummodules; cm++) {
- /* Use module as module vertex fastinfo */
-
-#ifdef __ia64
- memset(buffer, 0, 16);
- format_module_id(buffer, modules[cm]->id, MODULE_FORMAT_BRIEF);
- sprintf(name, EDGE_LBL_MODULE "/%s", buffer);
-#else
- sprintf(name, EDGE_LBL_MODULE "/%x", modules[cm]->id);
-#endif
-
- rc = hwgraph_path_add(hwgraph_root, name, &vhdl);
- ASSERT(rc == GRAPH_SUCCESS);
- rc = rc;
-
- hwgraph_fastinfo_set(vhdl, (arbitrary_info_t) modules[cm]);
-
- /* Add system controller */
-
-#ifdef __ia64
- sprintf(name,
- EDGE_LBL_MODULE "/%s/" EDGE_LBL_L1,
- buffer);
-#else
- sprintf(name,
- EDGE_LBL_MODULE "/%x/" EDGE_LBL_L1,
- modules[cm]->id);
-#endif
-
- rc = hwgraph_path_add(hwgraph_root, name, &vhdl);
- ASSERT_ALWAYS(rc == GRAPH_SUCCESS);
- rc = rc;
-
- hwgraph_info_add_LBL(vhdl,
- INFO_LBL_ELSC,
- (arbitrary_info_t) (__psint_t) 1);
-
-#ifdef LATER
- sndrv_attach(vhdl);
-#else
- /*
- * We need to call the drivers attach routine ..
- */
- FIXME("klhwg_add_all_modules: Need code to call driver attach.\n");
-#endif
- }
-}
-
-void
-klhwg_add_all_nodes(devfs_handle_t hwgraph_root)
-{
- //gda_t *gdap = GDA;
- gda_t *gdap;
- cnodeid_t cnode;
-
- gdap = (gda_t *)0xe000000000002400;
-
- FIXME("klhwg_add_all_nodes: FIX GDA\n");
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- ASSERT(gdap->g_nasidtable[cnode] != INVALID_NASID);
- klhwg_add_node(hwgraph_root, cnode, gdap);
- }
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- ASSERT(gdap->g_nasidtable[cnode] != INVALID_NASID);
-
- klhwg_add_xbow(cnode, gdap->g_nasidtable[cnode]);
- }
-
- /*
- * As for router hardware inventory information, we set this
- * up in router.c.
- */
-
- klhwg_add_all_routers(hwgraph_root);
- klhwg_connect_routers(hwgraph_root);
- klhwg_connect_hubs(hwgraph_root);
-
- /* Assign guardian nodes to each of the
- * routers in the system.
- */
-
-#ifdef LATER
- router_guardians_set(hwgraph_root);
-#endif
-
- /* Go through the entire system's klconfig
- * to figure out which pci components have been disabled
- */
- klhwg_device_disable_hints_add();
-
-}
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-/*
- * This is a temporary file that statically initializes the expected
- * initial klgraph information that is normally provided by prom.
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/simulator.h>
-
-extern u64 klgraph_addr[];
-void * real_port;
-void * real_io_base;
-void * real_addr;
-
-char *BW0 = NULL;
-
-kl_config_hdr_t *linux_klcfg;
-
-#ifdef DEFINE_DUMP_RTNS
-/* forward declarations */
-static void dump_ii(void), dump_crossbow(void);
-static void clear_ii_error(void);
-#endif /* DEFINE_DUMP_RTNS */
-
-#define SYNERGY_WIDGET ((char *)0xc0000e0000000000)
-#define SYNERGY_SWIZZLE ((char *)0xc0000e0000000400)
-#define HUBREG ((char *)0xc0000a0001e00000)
-#define WIDGET0 ((char *)0xc0000a0000000000)
-#define WIDGET4 ((char *)0xc0000a0000000004)
-
-#define SYNERGY_WIDGET ((char *)0xc0000e0000000000)
-#define SYNERGY_SWIZZLE ((char *)0xc0000e0000000400)
-#define HUBREG ((char *)0xc0000a0001e00000)
-#define WIDGET0 ((char *)0xc0000a0000000000)
-
-#define convert(a,b,c) temp = (u64 *)a; *temp = b; temp++; *temp = c
-
-void
-klgraph_hack_init(void)
-{
-
- u64 *temp;
-
-#ifdef CONFIG_IA64_SGI_SN1
- /*
- * We need to know whether we are booting from PROM or
- * boot from disk.
- */
- linux_klcfg = (kl_config_hdr_t *)0xe000000000030000;
- if (linux_klcfg->ch_magic == 0xbeedbabe) {
- return;
- } else {
- panic("klgraph_hack_init: Unable to locate KLCONFIG TABLE\n");
- }
-
- convert(0x0000000000030000, 0x00000000beedbabe, 0x0000004800000000);
-
-#else
-
- if (IS_RUNNING_ON_SIMULATOR()) {
- printk("Creating FAKE Klconfig Structure for Embeded Kernel\n");
- klgraph_addr[0] = 0xe000003000030000;
-
- /*
- * klconfig entries initialization - mankato
- */
- convert(0xe000003000030000, 0x00000000beedbabe, 0x0000004800000000);
- convert(0xe000003000030010, 0x0003007000000018, 0x800002000f820178);
- convert(0xe000003000030020, 0x80000a000f024000, 0x800002000f800000);
- convert(0xe000003000030030, 0x0300fafa00012580, 0x00000000040f0000);
- convert(0xe000003000030040, 0x0000000000000000, 0x0003097000030070);
- convert(0xe000003000030050, 0x00030970000303b0, 0x0003181000033f70);
- convert(0xe000003000030060, 0x0003d51000037570, 0x0000000000038330);
- convert(0xe000003000030070, 0x0203110100030140, 0x0001000000000101);
- convert(0xe000003000030080, 0x0900000000000000, 0x000000004e465e67);
- convert(0xe000003000030090, 0x0003097000000000, 0x00030b1000030a40);
- convert(0xe0000030000300a0, 0x00030cb000030be0, 0x000315a0000314d0);
- convert(0xe0000030000300b0, 0x0003174000031670, 0x0000000000000000);
- convert(0xe000003000030100, 0x000000000000001a, 0x3350490000000000);
- convert(0xe000003000030110, 0x0000000000000037, 0x0000000000000000);
- convert(0xe000003000030140, 0x0002420100030210, 0x0001000000000101);
- convert(0xe000003000030150, 0x0100000000000000, 0xffffffffffffffff);
- convert(0xe000003000030160, 0x00030d8000000000, 0x0000000000030e50);
- convert(0xe0000030000301c0, 0x0000000000000000, 0x0000000000030070);
- convert(0xe0000030000301d0, 0x0000000000000025, 0x424f490000000000);
- convert(0xe0000030000301e0, 0x000000004b434952, 0x0000000000000000);
- convert(0xe000003000030210, 0x00027101000302e0, 0x00010000000e4101);
- convert(0xe000003000030220, 0x0200000000000000, 0xffffffffffffffff);
- convert(0xe000003000030230, 0x00030f2000000000, 0x0000000000030ff0);
- convert(0xe000003000030290, 0x0000000000000000, 0x0000000000030140);
- convert(0xe0000030000302a0, 0x0000000000000026, 0x7262490000000000);
- convert(0xe0000030000302b0, 0x00000000006b6369, 0x0000000000000000);
- convert(0xe0000030000302e0, 0x0002710100000000, 0x00010000000f3101);
- convert(0xe0000030000302f0, 0x0500000000000000, 0xffffffffffffffff);
- convert(0xe000003000030300, 0x000310c000000000, 0x0003126000031190);
- convert(0xe000003000030310, 0x0003140000031330, 0x0000000000000000);
- convert(0xe000003000030360, 0x0000000000000000, 0x0000000000030140);
- convert(0xe000003000030370, 0x0000000000000029, 0x7262490000000000);
- convert(0xe000003000030380, 0x00000000006b6369, 0x0000000000000000);
- convert(0xe000003000030970, 0x0000000002010102, 0x0000000000000000);
- convert(0xe000003000030980, 0x000000004e465e67, 0xffffffff00000000);
- /* convert(0x00000000000309a0, 0x0000000000037570, 0x0000000100000000); */
- convert(0xe0000030000309a0, 0x0000000000037570, 0xffffffff00000000);
- convert(0xe0000030000309b0, 0x0000000000030070, 0x0000000000000000);
- convert(0xe0000030000309c0, 0x000000000003f420, 0x0000000000000000);
- convert(0xe000003000030a40, 0x0000000002010125, 0x0000000000000000);
- convert(0xe000003000030a50, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0xe000003000030a70, 0x0000000000037b78, 0x0000000000000000);
- convert(0xe000003000030b10, 0x0000000002010125, 0x0000000000000000);
- convert(0xe000003000030b20, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0xe000003000030b40, 0x0000000000037d30, 0x0000000000000001);
- convert(0xe000003000030be0, 0x00000000ff010203, 0x0000000000000000);
- convert(0xe000003000030bf0, 0xffffffffffffffff, 0xffffffff000000ff);
- convert(0xe000003000030c10, 0x0000000000037ee8, 0x0100010000000200);
- convert(0xe000003000030cb0, 0x00000000ff310111, 0x0000000000000000);
- convert(0xe000003000030cc0, 0xffffffffffffffff, 0x0000000000000000);
- convert(0xe000003000030d80, 0x0000000002010104, 0x0000000000000000);
- convert(0xe000003000030d90, 0xffffffffffffffff, 0x00000000000000ff);
- convert(0xe000003000030db0, 0x0000000000037f18, 0x0000000000000000);
- convert(0xe000003000030dc0, 0x0000000000000000, 0x0003007000060000);
- convert(0xe000003000030de0, 0x0000000000000000, 0x0003021000050000);
- convert(0xe000003000030df0, 0x000302e000050000, 0x0000000000000000);
- convert(0xe000003000030e30, 0x0000000000000000, 0x000000000000000a);
- convert(0xe000003000030e50, 0x00000000ff00011a, 0x0000000000000000);
- convert(0xe000003000030e60, 0xffffffffffffffff, 0x0000000000000000);
- convert(0xe000003000030e80, 0x0000000000037fe0, 0x9e6e9e9e9e9e9e9e);
- convert(0xe000003000030e90, 0x000000000000bc6e, 0x0000000000000000);
- convert(0xe000003000030f20, 0x0000000002010205, 0x00000000d0020000);
- convert(0xe000003000030f30, 0xffffffffffffffff, 0x0000000e0000000e);
- convert(0xe000003000030f40, 0x000000000000000e, 0x0000000000000000);
- convert(0xe000003000030f50, 0x0000000000038010, 0x00000000000007ff);
- convert(0xe000003000030f70, 0x0000000000000000, 0x0000000022001077);
- convert(0xe000003000030fa0, 0x0000000000000000, 0x000000000003f4a8);
- convert(0xe000003000030ff0, 0x0000000000310120, 0x0000000000000000);
- convert(0xe000003000031000, 0xffffffffffffffff, 0xffffffff00000002);
- convert(0xe000003000031010, 0x000000000000000e, 0x0000000000000000);
- convert(0xe000003000031020, 0x0000000000038088, 0x0000000000000000);
- convert(0xe0000030000310c0, 0x0000000002010205, 0x00000000d0020000);
- convert(0xe0000030000310d0, 0xffffffffffffffff, 0x0000000f0000000f);
- convert(0xe0000030000310e0, 0x000000000000000f, 0x0000000000000000);
- convert(0xe0000030000310f0, 0x00000000000380b8, 0x00000000000007ff);
- convert(0xe000003000031120, 0x0000000022001077, 0x00000000000310a9);
- convert(0xe000003000031130, 0x00000000580211c1, 0x000000008009104c);
- convert(0xe000003000031140, 0x0000000000000000, 0x000000000003f4c0);
- convert(0xe000003000031190, 0x0000000000310120, 0x0000000000000000);
- convert(0xe0000030000311a0, 0xffffffffffffffff, 0xffffffff00000003);
- convert(0xe0000030000311b0, 0x000000000000000f, 0x0000000000000000);
- convert(0xe0000030000311c0, 0x0000000000038130, 0x0000000000000000);
- convert(0xe000003000031260, 0x0000000000110106, 0x0000000000000000);
- convert(0xe000003000031270, 0xffffffffffffffff, 0xffffffff00000004);
- convert(0xe000003000031270, 0xffffffffffffffff, 0xffffffff00000004);
- convert(0xe000003000031280, 0x000000000000000f, 0x0000000000000000);
- convert(0xe0000030000312a0, 0x00000000ff110013, 0x0000000000000000);
- convert(0xe0000030000312b0, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0xe0000030000312c0, 0x000000000000000f, 0x0000000000000000);
- convert(0xe0000030000312e0, 0x0000000000110012, 0x0000000000000000);
- convert(0xe0000030000312f0, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0xe000003000031300, 0x000000000000000f, 0x0000000000000000);
- convert(0xe000003000031310, 0x0000000000038160, 0x0000000000000000);
- convert(0xe000003000031330, 0x00000000ff310122, 0x0000000000000000);
- convert(0xe000003000031340, 0xffffffffffffffff, 0xffffffff00000005);
- convert(0xe000003000031350, 0x000000000000000f, 0x0000000000000000);
- convert(0xe000003000031360, 0x0000000000038190, 0x0000000000000000);
- convert(0xe000003000031400, 0x0000000000310121, 0x0000000000000000);
- convert(0xe000003000031400, 0x0000000000310121, 0x0000000000000000);
- convert(0xe000003000031410, 0xffffffffffffffff, 0xffffffff00000006);
- convert(0xe000003000031420, 0x000000000000000f, 0x0000000000000000);
- convert(0xe000003000031430, 0x00000000000381c0, 0x0000000000000000);
- convert(0xe0000030000314d0, 0x00000000ff010201, 0x0000000000000000);
- convert(0xe0000030000314e0, 0xffffffffffffffff, 0xffffffff00000000);
- convert(0xe000003000031500, 0x00000000000381f0, 0x000030430000ffff);
- convert(0xe000003000031510, 0x000000000000ffff, 0x0000000000000000);
- convert(0xe0000030000315a0, 0x00000020ff000201, 0x0000000000000000);
- convert(0xe0000030000315b0, 0xffffffffffffffff, 0xffffffff00000001);
- convert(0xe0000030000315d0, 0x0000000000038240, 0x00003f3f0000ffff);
- convert(0xe0000030000315e0, 0x000000000000ffff, 0x0000000000000000);
- convert(0xe000003000031670, 0x00000000ff010201, 0x0000000000000000);
- convert(0xe000003000031680, 0xffffffffffffffff, 0x0000000100000002);
- convert(0xe0000030000316a0, 0x0000000000038290, 0x000030430000ffff);
- convert(0xe0000030000316b0, 0x000000000000ffff, 0x0000000000000000);
- convert(0xe000003000031740, 0x00000020ff000201, 0x0000000000000000);
- convert(0xe000003000031750, 0xffffffffffffffff, 0x0000000500000003);
- convert(0xe000003000031770, 0x00000000000382e0, 0x00003f3f0000ffff);
- convert(0xe000003000031780, 0x000000000000ffff, 0x0000000000000000);
-}
-
-#endif
-
-}
-
-
-
-
-
-#ifdef DEFINE_DUMP_RTNS
-/*
- * these were useful for printing out registers etc
- * during bringup
- */
-
-static void
-xdump(long long *addr, int count)
-{
- int ii;
- volatile long long *xx = addr;
-
- for ( ii = 0; ii < count; ii++, xx++ ) {
- printk("0x%p : 0x%p\n", (void *)xx, (void *)*xx);
- }
-}
-
-static void
-xdump32(unsigned int *addr, int count)
-{
- int ii;
- volatile unsigned int *xx = addr;
-
- for ( ii = 0; ii < count; ii++, xx++ ) {
- printk("0x%p : 0x%0x\n", (void *)xx, (int)*xx);
- }
-}
-
-static void
-clear_ii_error(void)
-{
- volatile long long *tmp;
-
- printk("... WSTAT ");
- xdump((long long *)0xc0000a0001c00008, 1);
- printk("... WCTRL ");
- xdump((long long *)0xc0000a0001c00020, 1);
- printk("... WLCSR ");
- xdump((long long *)0xc0000a0001c00128, 1);
- printk("... IIDSR ");
- xdump((long long *)0xc0000a0001c00138, 1);
- printk("... IOPRBs ");
- xdump((long long *)0xc0000a0001c00198, 9);
- printk("... IXSS ");
- xdump((long long *)0xc0000a0001c00210, 1);
- printk("... IBLS0 ");
- xdump((long long *)0xc0000a0001c10000, 1);
- printk("... IBLS1 ");
- xdump((long long *)0xc0000a0001c20000, 1);
-
- /* Write IOERR clear to clear the CRAZY bit in the status */
- tmp = (long long *)0xc0000a0001c001f8; *tmp = (long long)0xffffffff;
-
- /* dump out local block error registers */
- printk("... ");
- xdump((long long *)0xc0000a0001e04040, 1); /* LB_ERROR_BITS */
- printk("... ");
- xdump((long long *)0xc0000a0001e04050, 1); /* LB_ERROR_HDR1 */
- printk("... ");
- xdump((long long *)0xc0000a0001e04058, 1); /* LB_ERROR_HDR2 */
- /* and clear the LB_ERROR_BITS */
- tmp = (long long *)0xc0000a0001e04040; *tmp = 0x0;
- printk("clr: ");
- xdump((long long *)0xc0000a0001e04040, 1); /* LB_ERROR_BITS */
- tmp = (long long *)0xc0000a0001e04050; *tmp = 0x0;
- tmp = (long long *)0xc0000a0001e04058; *tmp = 0x0;
-}
-
-
-static void
-dump_ii(void)
-{
- printk("===== Dump the II regs =====\n");
- xdump((long long *)0xc0000a0001c00000, 2);
- xdump((long long *)0xc0000a0001c00020, 1);
- xdump((long long *)0xc0000a0001c00100, 37);
- xdump((long long *)0xc0000a0001c00300, 98);
- xdump((long long *)0xc0000a0001c10000, 6);
- xdump((long long *)0xc0000a0001c20000, 6);
- xdump((long long *)0xc0000a0001c30000, 2);
-
- xdump((long long *)0xc0000a0000000000, 1);
- xdump((long long *)0xc0000a0001000000, 1);
- xdump((long long *)0xc0000a0002000000, 1);
- xdump((long long *)0xc0000a0003000000, 1);
- xdump((long long *)0xc0000a0004000000, 1);
- xdump((long long *)0xc0000a0005000000, 1);
- xdump((long long *)0xc0000a0006000000, 1);
- xdump((long long *)0xc0000a0007000000, 1);
- xdump((long long *)0xc0000a0008000000, 1);
- xdump((long long *)0xc0000a0009000000, 1);
- xdump((long long *)0xc0000a000a000000, 1);
- xdump((long long *)0xc0000a000b000000, 1);
- xdump((long long *)0xc0000a000c000000, 1);
- xdump((long long *)0xc0000a000d000000, 1);
- xdump((long long *)0xc0000a000e000000, 1);
- xdump((long long *)0xc0000a000f000000, 1);
-}
-
-static void
-dump_crossbow(void)
-{
- printk("===== Dump the Crossbow regs =====\n");
- clear_ii_error();
- xdump32((unsigned int *)0xc0000a0000000004, 1);
- clear_ii_error();
- xdump32((unsigned int *)0xc0000a0000000000, 1);
- printk("and again..\n");
- xdump32((unsigned int *)0xc0000a0000000000, 1);
- xdump32((unsigned int *)0xc0000a0000000000, 1);
-
-
- clear_ii_error();
-
- xdump32((unsigned int *)0xc000020000000004, 1);
- clear_ii_error();
- xdump32((unsigned int *)0xc000020000000000, 1);
- clear_ii_error();
-
- xdump32((unsigned int *)0xc0000a0000800004, 1);
- clear_ii_error();
- xdump32((unsigned int *)0xc0000a0000800000, 1);
- clear_ii_error();
-
- xdump32((unsigned int *)0xc000020000800004, 1);
- clear_ii_error();
- xdump32((unsigned int *)0xc000020000800000, 1);
- clear_ii_error();
-
-
-}
-#endif /* DEFINE_DUMP_RTNS */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/* In general, this file is organized in a hierarchy from lower-level
- * to higher-level layers, as follows:
- *
- * UART routines
- * Bedrock/L1 "PPP-like" protocol implementation
- * System controller "message" interface (allows multiplexing
- * of various kinds of requests and responses with
- * console I/O)
- * Console interface:
- * "l1_cons", the glue that allows the L1 to act
- * as the system console for the stdio libraries
- *
- * Routines making use of the system controller "message"-style interface
- * can be found in l1_command.c.
- */
-
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/eeprom.h>
-#include <asm/sn/router.h>
-#include <asm/sn/module.h>
-#include <asm/sn/ksys/l1.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/uart16550.h>
-#include <asm/sn/simulator.h>
-
-
-/* Make all console writes atomic */
-#define SYNC_CONSOLE_WRITE 1
-
-
-/*********************************************************************
- * Hardware-level (UART) driver routines.
- */
-
-/* macros for reading/writing registers */
-
-#define LD(x) (*(volatile uint64_t *)(x))
-#define SD(x, v) (LD(x) = (uint64_t) (v))
-
-/* location of uart receive/xmit data register */
-#if defined(CONFIG_IA64_SGI_SN1)
-#define L1_UART_BASE(n) ((ulong)REMOTE_HSPEC_ADDR((n), 0x00000080))
-#define LOCK_HUB REMOTE_HUB_ADDR
-#elif defined(CONFIG_IA64_SGI_SN2)
-#define L1_UART_BASE(n) ((ulong)REMOTE_HUB((n), SH_JUNK_BUS_UART0))
-#define LOCK_HUB REMOTE_HUB
-typedef u64 rtc_time_t;
-#endif
-
-
-#define ADDR_L1_REG(n, r) ( L1_UART_BASE(n) | ( (r) << 3 ) )
-#define READ_L1_UART_REG(n, r) ( LD(ADDR_L1_REG((n), (r))) )
-#define WRITE_L1_UART_REG(n, r, v) ( SD(ADDR_L1_REG((n), (r)), (v)) )
-
-/* upper layer interface calling methods */
-#define SERIAL_INTERRUPT_MODE 0
-#define SERIAL_POLLED_MODE 1
-
-
-/* UART-related #defines */
-
-#define UART_BAUD_RATE 57600
-#define UART_FIFO_DEPTH 16
-#define UART_DELAY_SPAN 10
-#define UART_PUTC_TIMEOUT 50000
-#define UART_INIT_TIMEOUT 100000
-
-/* error codes */
-#define UART_SUCCESS 0
-#define UART_TIMEOUT (-1)
-#define UART_LINK (-2)
-#define UART_NO_CHAR (-3)
-#define UART_VECTOR (-4)
-
-#define UART_DELAY(x) udelay(x)
-
-/* Some debug counters */
-#define L1C_INTERRUPTS 0
-#define L1C_OUR_R_INTERRUPTS 1
-#define L1C_OUR_X_INTERRUPTS 2
-#define L1C_SEND_CALLUPS 3
-#define L1C_RECEIVE_CALLUPS 4
-#define L1C_SET_BAUD 5
-#define L1C_ALREADY_LOCKED L1C_SET_BAUD
-#define L1C_R_IRQ 6
-#define L1C_R_IRQ_RET 7
-#define L1C_LOCK_TIMEOUTS 8
-#define L1C_LOCK_COUNTER 9
-#define L1C_UNLOCK_COUNTER 10
-#define L1C_REC_STALLS 11
-#define L1C_CONNECT_CALLS 12
-#define L1C_SIZE L1C_CONNECT_CALLS /* Set to the last one */
-
-uint64_t L1_collectibles[L1C_SIZE + 1];
-
-
-/*
- * Some macros for handling Endian-ness
- */
-
-#define COPY_INT_TO_BUFFER(_b, _i, _n) \
- { \
- _b[_i++] = (_n >> 24) & 0xff; \
- _b[_i++] = (_n >> 16) & 0xff; \
- _b[_i++] = (_n >> 8) & 0xff; \
- _b[_i++] = _n & 0xff; \
- }
-
-#define COPY_BUFFER_TO_INT(_b, _i, _n) \
- { \
- _n = (_b[_i++] << 24) & 0xff; \
- _n |= (_b[_i++] << 16) & 0xff; \
- _n |= (_b[_i++] << 8) & 0xff; \
- _n |= _b[_i++] & 0xff; \
- }
-
-#define COPY_BUFFER_TO_BUFFER(_b, _i, _bn) \
- { \
- char *_xyz = (char *)_bn; \
- _xyz[3] = _b[_i++]; \
- _xyz[2] = _b[_i++]; \
- _xyz[1] = _b[_i++]; \
- _xyz[0] = _b[_i++]; \
- }
-
-void snia_kmem_free(void *where, int size);
-
-#define ALREADY_LOCKED 1
-#define NOT_LOCKED 0
-static int early_l1_serial_out(nasid_t, char *, int, int /* defines above*/ );
-
-#define BCOPY(x,y,z) memcpy(y,x,z)
-
-uint8_t L1_interrupts_connected; /* Non-zero when we are in interrupt mode */
-
-
-/*
- * Console locking defines and functions.
- *
- */
-
-uint8_t L1_cons_is_inited = 0; /* non-zero when console is init'd */
-nasid_t Master_console_nasid = (nasid_t)-1;
-extern nasid_t console_nasid;
-
-u64 ia64_sn_get_console_nasid(void);
-
-inline nasid_t
-get_master_nasid(void)
-{
-#if defined(CONFIG_IA64_SGI_SN1)
- nasid_t nasid = Master_console_nasid;
-
- if ( nasid == (nasid_t)-1 ) {
- nasid = (nasid_t)ia64_sn_get_console_nasid();
- if ( (nasid < 0) || (nasid >= MAX_NASIDS) ) {
- /* Out of bounds, use local */
- console_nasid = nasid = get_nasid();
- }
- else {
- /* Got a valid nasid, set the console_nasid */
- char xx[100];
-/* zzzzzz - force nasid to 0 for now */
- sprintf(xx, "Master console is set to nasid %d (%d)\n", 0, (int)nasid);
-nasid = 0;
-/* end zzzzzz */
- xx[99] = (char)0;
- early_l1_serial_out(nasid, xx, strlen(xx), NOT_LOCKED);
- Master_console_nasid = console_nasid = nasid;
- }
- }
- return(nasid);
-#else
- return((nasid_t)0);
-#endif /* CONFIG_IA64_SGI_SN1 */
-}
-
-
-#if defined(CONFIG_IA64_SGI_SN1)
-
-#define HUB_LOCK 16
-
-#define PRIMARY_LOCK_TIMEOUT 10000000
-#define HUB_LOCK_REG(n) LOCK_HUB(n, MD_PERF_CNT0)
-
-#define SET_BITS(reg, bits) SD(reg, LD(reg) | (bits))
-#define CLR_BITS(reg, bits) SD(reg, LD(reg) & ~(bits))
-#define TST_BITS(reg, bits) ((LD(reg) & (bits)) != 0)
-
-#define HUB_TEST_AND_SET(n) LD(LOCK_HUB(n,LB_SCRATCH_REG3_RZ))
-#define HUB_CLEAR(n) SD(LOCK_HUB(n,LB_SCRATCH_REG3),0)
-
-#define RTC_TIME_MAX ((rtc_time_t) ~0ULL)
-
-/*
- * primary_lock
- *
- * Allows CPU's 0-3 to mutually exclude the hub from one another by
- * obtaining a blocking lock. Does nothing if only one CPU is active.
- *
- * This lock should be held just long enough to set or clear a global
- * lock bit. After a relatively short timeout period, this routine
- * figures something is wrong, and steals the lock. It does not set
- * any other CPU to "dead".
- */
-inline void
-primary_lock(nasid_t nasid)
-{
- rtc_time_t expire;
-
- expire = rtc_time() + PRIMARY_LOCK_TIMEOUT;
-
- while (HUB_TEST_AND_SET(nasid)) {
- if (rtc_time() > expire) {
- HUB_CLEAR(nasid);
- }
- }
-}
-
-/*
- * primary_unlock (internal)
- *
- * Counterpart to primary_lock
- */
-
-inline void
-primary_unlock(nasid_t nasid)
-{
- HUB_CLEAR(nasid);
-}
-
-/*
- * hub_unlock
- *
- * Counterpart to hub_lock_timeout and hub_lock
- */
-
-inline void
-hub_unlock(nasid_t nasid, int level)
-{
- uint64_t mask = 1ULL << level;
-
- primary_lock(nasid);
- CLR_BITS(HUB_LOCK_REG(nasid), mask);
- primary_unlock(nasid);
-}
-
-/*
- * hub_lock_timeout
- *
- * Uses primary_lock to implement multiple lock levels.
- *
- * There are 20 lock levels from 0 to 19 (limited by the number of bits
- * in HUB_LOCK_REG). To prevent deadlock, multiple locks should be
- * obtained in order of increasingly higher level, and released in the
- * reverse order.
- *
- * A timeout value of 0 may be used for no timeout.
- *
- * Returns 0 if successful, -1 if lock times out.
- */
-
-inline int
-hub_lock_timeout(nasid_t nasid, int level, rtc_time_t timeout)
-{
- uint64_t mask = 1ULL << level;
- rtc_time_t expire = (timeout ? rtc_time() + timeout : RTC_TIME_MAX);
- int done = 0;
-
- while (! done) {
- while (TST_BITS(HUB_LOCK_REG(nasid), mask)) {
- if (rtc_time() > expire)
- return -1;
- }
-
- primary_lock(nasid);
-
- if (! TST_BITS(HUB_LOCK_REG(nasid), mask)) {
- SET_BITS(HUB_LOCK_REG(nasid), mask);
- done = 1;
- }
- primary_unlock(nasid);
- }
- return 0;
-}
-
-
-#define LOCK_TIMEOUT (0x1500000 * 1) /* 0x1500000 is ~30 sec */
-
-void
-lock_console(nasid_t nasid)
-{
- int ret;
-
- /* If we already have it locked, just return */
- L1_collectibles[L1C_LOCK_COUNTER]++;
-
- ret = hub_lock_timeout(nasid, HUB_LOCK, (rtc_time_t)LOCK_TIMEOUT);
- if ( ret != 0 ) {
- L1_collectibles[L1C_LOCK_TIMEOUTS]++;
- /* timeout */
- hub_unlock(nasid, HUB_LOCK);
- /* If the 2nd lock fails, just pile ahead.... */
- hub_lock_timeout(nasid, HUB_LOCK, (rtc_time_t)LOCK_TIMEOUT);
- L1_collectibles[L1C_LOCK_TIMEOUTS]++;
- }
-}
-
-inline void
-unlock_console(nasid_t nasid)
-{
- L1_collectibles[L1C_UNLOCK_COUNTER]++;
- hub_unlock(nasid, HUB_LOCK);
-}
-
-#else /* SN2 */
-inline void lock_console(nasid_t n) {}
-inline void unlock_console(nasid_t n) {}
-
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-int
-get_L1_baud(void)
-{
- return UART_BAUD_RATE;
-}
-
-
-/* uart driver functions */
-
-static inline void
-uart_delay( rtc_time_t delay_span )
-{
- UART_DELAY( delay_span );
-}
-
-#define UART_PUTC_READY(n) (READ_L1_UART_REG((n), REG_LSR) & LSR_XHRE)
-
-static int
-uart_putc( l1sc_t *sc )
-{
- WRITE_L1_UART_REG( sc->nasid, REG_DAT, sc->send[sc->sent] );
- return UART_SUCCESS;
-}
-
-
-static int
-uart_getc( l1sc_t *sc )
-{
- u_char lsr_reg = 0;
- nasid_t nasid = sc->nasid;
-
- if( (lsr_reg = READ_L1_UART_REG( nasid, REG_LSR )) &
- (LSR_RCA | LSR_PARERR | LSR_FRMERR) )
- {
- if( lsr_reg & LSR_RCA )
- return( (u_char)READ_L1_UART_REG( nasid, REG_DAT ) );
- else if( lsr_reg & (LSR_PARERR | LSR_FRMERR) ) {
- return UART_LINK;
- }
- }
-
- return UART_NO_CHAR;
-}
-
-
-#define PROM_SER_CLK_SPEED 12000000
-#define PROM_SER_DIVISOR(x) (PROM_SER_CLK_SPEED / ((x) * 16))
-
-static void
-uart_init( l1sc_t *sc, int baud )
-{
- rtc_time_t expire;
- int clkdiv;
- nasid_t nasid;
-
- clkdiv = PROM_SER_DIVISOR(baud);
- expire = rtc_time() + UART_INIT_TIMEOUT;
- nasid = sc->nasid;
-
- /* make sure the transmit FIFO is empty */
- while( !(READ_L1_UART_REG( nasid, REG_LSR ) & LSR_XSRE) ) {
- uart_delay( UART_DELAY_SPAN );
- if( rtc_time() > expire ) {
- break;
- }
- }
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- lock_console(nasid);
-
- /* Setup for the proper baud rate */
- WRITE_L1_UART_REG( nasid, REG_LCR, LCR_DLAB );
- uart_delay( UART_DELAY_SPAN );
- WRITE_L1_UART_REG( nasid, REG_DLH, (clkdiv >> 8) & 0xff );
- uart_delay( UART_DELAY_SPAN );
- WRITE_L1_UART_REG( nasid, REG_DLL, clkdiv & 0xff );
- uart_delay( UART_DELAY_SPAN );
-
- /* set operating parameters and set DLAB to 0 */
-
- /* 8bit, one stop, clear request to send, auto flow control */
- WRITE_L1_UART_REG( nasid, REG_LCR, LCR_BITS8 | LCR_STOP1 );
- uart_delay( UART_DELAY_SPAN );
- WRITE_L1_UART_REG( nasid, REG_MCR, MCR_RTS | MCR_AFE );
- uart_delay( UART_DELAY_SPAN );
-
- /* disable interrupts */
- WRITE_L1_UART_REG( nasid, REG_ICR, 0x0 );
- uart_delay( UART_DELAY_SPAN );
-
- /* enable FIFO mode and reset both FIFOs, trigger on 1 */
- WRITE_L1_UART_REG( nasid, REG_FCR, FCR_FIFOEN );
- uart_delay( UART_DELAY_SPAN );
- WRITE_L1_UART_REG( nasid, REG_FCR, FCR_FIFOEN | FCR_RxFIFO | FCR_TxFIFO | RxLVL0);
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- unlock_console(nasid);
-}
-
-/* This requires the console lock */
-
-#if defined(CONFIG_IA64_SGI_SN1)
-
-static void
-uart_intr_enable( l1sc_t *sc, u_char mask )
-{
- u_char lcr_reg, icr_reg;
- nasid_t nasid = sc->nasid;
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- lock_console(nasid);
-
- /* make sure that the DLAB bit in the LCR register is 0
- */
- lcr_reg = READ_L1_UART_REG( nasid, REG_LCR );
- lcr_reg &= ~(LCR_DLAB);
- WRITE_L1_UART_REG( nasid, REG_LCR, lcr_reg );
-
- /* enable indicated interrupts
- */
- icr_reg = READ_L1_UART_REG( nasid, REG_ICR );
- icr_reg |= mask;
- WRITE_L1_UART_REG( nasid, REG_ICR, icr_reg /*(ICR_RIEN | ICR_TIEN)*/ );
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- unlock_console(nasid);
-}
-
-/* This requires the console lock */
-static void
-uart_intr_disable( l1sc_t *sc, u_char mask )
-{
- u_char lcr_reg, icr_reg;
- nasid_t nasid = sc->nasid;
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- lock_console(nasid);
-
- /* make sure that the DLAB bit in the LCR register is 0
- */
- lcr_reg = READ_L1_UART_REG( nasid, REG_LCR );
- lcr_reg &= ~(LCR_DLAB);
- WRITE_L1_UART_REG( nasid, REG_LCR, lcr_reg );
-
- /* enable indicated interrupts
- */
- icr_reg = READ_L1_UART_REG( nasid, REG_ICR );
- icr_reg &= mask;
- WRITE_L1_UART_REG( nasid, REG_ICR, icr_reg /*(ICR_RIEN | ICR_TIEN)*/ );
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- unlock_console(nasid);
-}
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-#define uart_enable_xmit_intr(sc) \
- uart_intr_enable((sc), ICR_TIEN)
-
-#define uart_disable_xmit_intr(sc) \
- uart_intr_disable((sc), ~(ICR_TIEN))
-
-#define uart_enable_recv_intr(sc) \
- uart_intr_enable((sc), ICR_RIEN)
-
-#define uart_disable_recv_intr(sc) \
- uart_intr_disable((sc), ~(ICR_RIEN))
-
-
-/*********************************************************************
- * Routines for accessing a remote (router) UART
- */
-
-#define READ_RTR_L1_UART_REG(p, n, r, v) \
- { \
- if( vector_read_node( (p), (n), 0, \
- RR_JBUS1(r), (v) ) ) { \
- return UART_VECTOR; \
- } \
- }
-
-#define WRITE_RTR_L1_UART_REG(p, n, r, v) \
- { \
- if( vector_write_node( (p), (n), 0, \
- RR_JBUS1(r), (v) ) ) { \
- return UART_VECTOR; \
- } \
- }
-
-#define RTR_UART_PUTC_TIMEOUT UART_PUTC_TIMEOUT*10
-#define RTR_UART_DELAY_SPAN UART_DELAY_SPAN
-#define RTR_UART_INIT_TIMEOUT UART_INIT_TIMEOUT*10
-
-static int
-rtr_uart_putc( l1sc_t *sc )
-{
- uint64_t regval, c;
- nasid_t nasid = sc->nasid;
- net_vec_t path = sc->uart;
- rtc_time_t expire = rtc_time() + RTR_UART_PUTC_TIMEOUT;
-
- c = (sc->send[sc->sent] & 0xffULL);
-
- while( 1 )
- {
- /* Check for "tx hold reg empty" bit. */
- READ_RTR_L1_UART_REG( path, nasid, REG_LSR, ®val );
- if( regval & LSR_XHRE )
- {
- WRITE_RTR_L1_UART_REG( path, nasid, REG_DAT, c );
- return UART_SUCCESS;
- }
-
- if( rtc_time() >= expire )
- {
- return UART_TIMEOUT;
- }
- uart_delay( RTR_UART_DELAY_SPAN );
- }
-}
-
-
-static int
-rtr_uart_getc( l1sc_t *sc )
-{
- uint64_t regval;
- nasid_t nasid = sc->nasid;
- net_vec_t path = sc->uart;
-
- READ_RTR_L1_UART_REG( path, nasid, REG_LSR, ®val );
- if( regval & (LSR_RCA | LSR_PARERR | LSR_FRMERR) )
- {
- if( regval & LSR_RCA )
- {
- READ_RTR_L1_UART_REG( path, nasid, REG_DAT, ®val );
- return( (int)regval );
- }
- else
- {
- return UART_LINK;
- }
- }
-
- return UART_NO_CHAR;
-}
-
-
-static int
-rtr_uart_init( l1sc_t *sc, int baud )
-{
- rtc_time_t expire;
- int clkdiv;
- nasid_t nasid;
- net_vec_t path;
- uint64_t regval;
-
- clkdiv = PROM_SER_DIVISOR(baud);
- expire = rtc_time() + RTR_UART_INIT_TIMEOUT;
- nasid = sc->nasid;
- path = sc->uart;
-
- /* make sure the transmit FIFO is empty */
- while(1) {
- READ_RTR_L1_UART_REG( path, nasid, REG_LSR, ®val );
- if( regval & LSR_XSRE ) {
- break;
- }
- if( rtc_time() > expire ) {
- break;
- }
- uart_delay( RTR_UART_DELAY_SPAN );
- }
-
- WRITE_RTR_L1_UART_REG( path, nasid, REG_LCR, LCR_DLAB );
- uart_delay( UART_DELAY_SPAN );
- WRITE_RTR_L1_UART_REG( path, nasid, REG_DLH, (clkdiv >> 8) & 0xff );
- uart_delay( UART_DELAY_SPAN );
- WRITE_RTR_L1_UART_REG( path, nasid, REG_DLL, clkdiv & 0xff );
- uart_delay( UART_DELAY_SPAN );
-
- /* set operating parameters and set DLAB to 0 */
- WRITE_RTR_L1_UART_REG( path, nasid, REG_LCR, LCR_BITS8 | LCR_STOP1 );
- uart_delay( UART_DELAY_SPAN );
- WRITE_RTR_L1_UART_REG( path, nasid, REG_MCR, MCR_RTS | MCR_AFE );
- uart_delay( UART_DELAY_SPAN );
-
- /* disable interrupts */
- WRITE_RTR_L1_UART_REG( path, nasid, REG_ICR, 0x0 );
- uart_delay( UART_DELAY_SPAN );
-
- /* enable FIFO mode and reset both FIFOs */
- WRITE_RTR_L1_UART_REG( path, nasid, REG_FCR, FCR_FIFOEN );
- uart_delay( UART_DELAY_SPAN );
- WRITE_RTR_L1_UART_REG( path, nasid, REG_FCR,
- FCR_FIFOEN | FCR_RxFIFO | FCR_TxFIFO );
-
- return 0;
-}
-
-/*********************************************************************
- * locking macros
- */
-
-#define L1SC_SEND_LOCK(l,p) { if ((l)->uart == BRL1_LOCALHUB_UART) spin_lock_irqsave(&((l)->send_lock),p); }
-#define L1SC_SEND_UNLOCK(l,p) { if ((l)->uart == BRL1_LOCALHUB_UART) spin_unlock_irqrestore(&((l)->send_lock), p); }
-#define L1SC_RECV_LOCK(l,p) { if ((l)->uart == BRL1_LOCALHUB_UART) spin_lock_irqsave(&((l)->recv_lock), p); }
-#define L1SC_RECV_UNLOCK(l,p) { if ((l)->uart == BRL1_LOCALHUB_UART) spin_unlock_irqrestore(&((l)->recv_lock), p); }
-
-
-/*********************************************************************
- * subchannel manipulation
- *
- * The SUBCH_[UN]LOCK macros are used to arbitrate subchannel
- * allocation. SUBCH_DATA_[UN]LOCK control access to data structures
- * associated with particular subchannels (e.g., receive queues).
- *
- */
-#define SUBCH_LOCK(sc, p) spin_lock_irqsave( &((sc)->subch_lock), p )
-#define SUBCH_UNLOCK(sc, p) spin_unlock_irqrestore( &((sc)->subch_lock), p )
-#define SUBCH_DATA_LOCK(sbch, p) spin_lock_irqsave( &((sbch)->data_lock), p )
-#define SUBCH_DATA_UNLOCK(sbch, p) spin_unlock_irqrestore( &((sbch)->data_lock), p )
-
-
-/*
- * set a function to be called for subchannel ch in the event of
- * a transmission low-water interrupt from the uart
- */
-void
-subch_set_tx_notify( l1sc_t *sc, int ch, brl1_notif_t func )
-{
- unsigned long pl = 0;
-
- L1SC_SEND_LOCK( sc, pl );
-#if !defined(SYNC_CONSOLE_WRITE)
- if ( func && !sc->send_in_use )
- uart_enable_xmit_intr( sc );
-#endif
- sc->subch[ch].tx_notify = func;
- L1SC_SEND_UNLOCK(sc, pl );
-}
-
-/*
- * set a function to be called for subchannel ch when data is received
- */
-void
-subch_set_rx_notify( l1sc_t *sc, int ch, brl1_notif_t func )
-{
- unsigned long pl = 0;
- brl1_sch_t *subch = &(sc->subch[ch]);
-
- SUBCH_DATA_LOCK( subch, pl );
- sc->subch[ch].rx_notify = func;
- SUBCH_DATA_UNLOCK( subch, pl );
-}
-
-/*********************************************************************
- * Queue manipulation macros
- *
- *
- */
-#define NEXT(p) (((p) + 1) & (BRL1_QSIZE-1)) /* assume power of 2 */
-
-#define cq_init(q) bzero((q), sizeof (*(q)))
-#define cq_empty(q) ((q)->ipos == (q)->opos)
-#define cq_full(q) (NEXT((q)->ipos) == (q)->opos)
-#define cq_used(q) ((q)->opos <= (q)->ipos ? \
- (q)->ipos - (q)->opos : \
- BRL1_QSIZE + (q)->ipos - (q)->opos)
-#define cq_room(q) ((q)->opos <= (q)->ipos ? \
- BRL1_QSIZE - 1 + (q)->opos - (q)->ipos : \
- (q)->opos - (q)->ipos - 1)
-#define cq_add(q, c) ((q)->buf[(q)->ipos] = (u_char) (c), \
- (q)->ipos = NEXT((q)->ipos))
-#define cq_rem(q, c) ((c) = (q)->buf[(q)->opos], \
- (q)->opos = NEXT((q)->opos))
-#define cq_discard(q) ((q)->opos = NEXT((q)->opos))
-
-#define cq_tent_full(q) (NEXT((q)->tent_next) == (q)->opos)
-#define cq_tent_len(q) ((q)->ipos <= (q)->tent_next ? \
- (q)->tent_next - (q)->ipos : \
- BRL1_QSIZE + (q)->tent_next - (q)->ipos)
-#define cq_tent_add(q, c) \
- ((q)->buf[(q)->tent_next] = (u_char) (c), \
- (q)->tent_next = NEXT((q)->tent_next))
-#define cq_commit_tent(q) \
- ((q)->ipos = (q)->tent_next)
-#define cq_discard_tent(q) \
- ((q)->tent_next = (q)->ipos)
-
-
-
-
-/*********************************************************************
- * CRC-16 (for checking bedrock/L1 packets).
- *
- * These are based on RFC 1662 ("PPP in HDLC-like framing").
- */
-
-static unsigned short fcstab[256] = {
- 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
- 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
- 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
- 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
- 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
- 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
- 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
- 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
- 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
- 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
- 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
- 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
- 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
- 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
- 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
- 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
- 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
- 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
- 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
- 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
- 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
- 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
- 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
- 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
- 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
- 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
- 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
- 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
- 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
- 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
- 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
- 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
-};
-
-#define INIT_CRC 0xFFFF /* initial CRC value */
-#define GOOD_CRC 0xF0B8 /* "good" final CRC value */
-
-static unsigned short crc16_calc( unsigned short crc, u_char c )
-{
- return( (crc >> 8) ^ fcstab[(crc ^ c) & 0xff] );
-}
-
-
-/***********************************************************************
- * The following functions implement the PPP-like bedrock/L1 protocol
- * layer.
- *
- */
-
-#define BRL1_FLAG_CH 0x7e
-#define BRL1_ESC_CH 0x7d
-#define BRL1_XOR_CH 0x20
-
-/* L1<->Bedrock packet types */
-#define BRL1_REQUEST 0x00
-#define BRL1_RESPONSE 0x20
-#define BRL1_EVENT 0x40
-
-#define BRL1_PKT_TYPE_MASK 0xE0
-#define BRL1_SUBCH_MASK 0x1F
-
-#define PKT_TYPE(tsb) ((tsb) & BRL1_PKT_TYPE_MASK)
-#define SUBCH(tsb) ((tsb) & BRL1_SUBCH_MASK)
-
-/* timeouts */
-#define BRL1_INIT_TIMEOUT 500000
-
-/*
- * brl1_discard_packet is a dummy "receive callback" used to get rid
- * of packets we don't want
- */
-void brl1_discard_packet( int dummy0, void *dummy1, struct pt_regs *dummy2, l1sc_t *sc, int ch )
-{
- unsigned long pl = 0;
- brl1_sch_t *subch = &sc->subch[ch];
-
- sc_cq_t *q = subch->iqp;
- SUBCH_DATA_LOCK( subch, pl );
- q->opos = q->ipos;
- atomic_set(&(subch->packet_arrived), 0);
- SUBCH_DATA_UNLOCK( subch, pl );
-}
-
-
-/*
- * brl1_send_chars sends the send buffer in the l1sc_t structure
- * out through the uart. Assumes that the caller has locked the
- * UART (or send buffer in the kernel).
- *
- * This routine doesn't block-- if you want it to, call it in
- * a loop.
- */
-static int
-brl1_send_chars( l1sc_t *sc )
-{
- /* We track the depth of the C brick's UART's
- * fifo in software, and only check if the UART is accepting
- * characters when our count indicates that the fifo should
- * be full.
- *
- * For remote (router) UARTs, we check with the UART before sending every
- * character.
- */
- if( sc->uart == BRL1_LOCALHUB_UART ) {
- if( !(sc->fifo_space) && UART_PUTC_READY( sc->nasid ) )
- sc->fifo_space = UART_FIFO_DEPTH;
-
- while( (sc->sent < sc->send_len) && (sc->fifo_space) ) {
- uart_putc( sc );
- sc->fifo_space--;
- sc->sent++;
- }
- }
- else {
-
- /* remote (router) UARTs */
-
- int result;
- int tries = 0;
-
- while( sc->sent < sc->send_len ) {
- result = sc->putc_f( sc );
- if( result >= 0 ) {
- (sc->sent)++;
- continue;
- }
- if( result == UART_TIMEOUT ) {
- tries++;
- /* send this character in TIMEOUT_RETRIES... */
- if( tries < 30 /* TIMEOUT_RETRIES */ ) {
- continue;
- }
- /* ...or else... */
- else {
- /* ...drop the packet. */
- sc->sent = sc->send_len;
- return sc->send_len;
- }
- }
- if( result < 0 ) {
- return result;
- }
- }
- }
- return sc->sent;
-}
-
-
-/* brl1_send formats up a packet and (at least begins to) send it
- * to the uart. If the send buffer is in use when this routine obtains
- * the lock, it will behave differently depending on the "wait" parameter.
- * For wait == 0 (most I/O), it will return 0 (as in "zero bytes sent"),
- * hopefully encouraging the caller to back off (unlock any high-level
- * spinlocks) and allow the buffer some time to drain. For wait==1 (high-
- * priority I/O along the lines of kernel error messages), we will flush
- * the current contents of the send buffer and beat on the uart
- * until our message has been completely transmitted.
- */
-
-static int
-brl1_send( l1sc_t *sc, char *msg, int len, u_char type_and_subch, int wait )
-{
- unsigned long pl = 0;
- int index;
- int pkt_len = 0;
- unsigned short crc = INIT_CRC;
- char *send_ptr = sc->send;
-
-
- if( sc->send_in_use && !(wait) ) {
- /* We are in the middle of sending, but can wait until done */
- return 0;
- }
- else if( sc->send_in_use ) {
- /* buffer's in use, but we're synchronous I/O, so we're going
- * to send whatever's in there right now and take the buffer
- */
- int counter = 0;
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- lock_console(sc->nasid);
- L1SC_SEND_LOCK(sc, pl);
- while( sc->sent < sc->send_len ) {
- brl1_send_chars( sc );
- if ( counter++ > 0xfffff ) {
- char *str = "Looping waiting for uart to clear (1)\n";
- early_l1_serial_out(sc->nasid, str, strlen(str), ALREADY_LOCKED);
- break;
- }
- }
- }
- else {
- if ( sc->uart == BRL1_LOCALHUB_UART )
- lock_console(sc->nasid);
- L1SC_SEND_LOCK(sc, pl);
- sc->send_in_use = 1;
- }
- *send_ptr++ = BRL1_FLAG_CH;
- *send_ptr++ = type_and_subch;
- pkt_len += 2;
- crc = crc16_calc( crc, type_and_subch );
-
- /* limit number of characters accepted to max payload size */
- if( len > (BRL1_QSIZE - 1) )
- len = (BRL1_QSIZE - 1);
-
- /* copy in the message buffer (inserting PPP
- * framing info where necessary)
- */
- for( index = 0; index < len; index++ ) {
-
- switch( *msg ) {
-
- case BRL1_FLAG_CH:
- *send_ptr++ = BRL1_ESC_CH;
- *send_ptr++ = (*msg) ^ BRL1_XOR_CH;
- pkt_len += 2;
- break;
-
- case BRL1_ESC_CH:
- *send_ptr++ = BRL1_ESC_CH;
- *send_ptr++ = (*msg) ^ BRL1_XOR_CH;
- pkt_len += 2;
- break;
-
- default:
- *send_ptr++ = *msg;
- pkt_len++;
- }
- crc = crc16_calc( crc, *msg );
- msg++;
- }
- crc ^= 0xffff;
-
- for( index = 0; index < sizeof(crc); index++ ) {
- char crc_char = (char)(crc & 0x00FF);
- if( (crc_char == BRL1_ESC_CH) || (crc_char == BRL1_FLAG_CH) ) {
- *send_ptr++ = BRL1_ESC_CH;
- pkt_len++;
- crc_char ^= BRL1_XOR_CH;
- }
- *send_ptr++ = crc_char;
- pkt_len++;
- crc >>= 8;
- }
-
- *send_ptr++ = BRL1_FLAG_CH;
- pkt_len++;
-
- sc->send_len = pkt_len;
- sc->sent = 0;
-
- {
- int counter = 0;
- do {
- brl1_send_chars( sc );
- if ( counter++ > 0xfffff ) {
- char *str = "Looping waiting for uart to clear (2)\n";
- early_l1_serial_out(sc->nasid, str, strlen(str), ALREADY_LOCKED);
- break;
- }
- } while( (sc->sent < sc->send_len) && wait );
- }
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- unlock_console(sc->nasid);
-
- if( sc->sent == sc->send_len ) {
- /* success! release the send buffer and call the callup */
-#if !defined(SYNC_CONSOLE_WRITE)
- brl1_notif_t callup;
-#endif
-
- sc->send_in_use = 0;
- /* call any upper layer that's asked for notification */
-#if defined(XX_SYNC_CONSOLE_WRITE)
- /*
- * This is probably not a good idea - since the l1_ write func can be called multiple
- * time within the callup function.
- */
- callup = subch->tx_notify;
- if( callup && (SUBCH(type_and_subch) == SC_CONS_SYSTEM) ) {
- L1_collectibles[L1C_SEND_CALLUPS]++;
- (*callup)(sc->subch[SUBCH(type_and_subch)].irq_frame.bf_irq,
- sc->subch[SUBCH(type_and_subch)].irq_frame.bf_dev_id,
- sc->subch[SUBCH(type_and_subch)].irq_frame.bf_regs, sc, SUBCH(type_and_subch));
- }
-#endif /* SYNC_CONSOLE_WRITE */
- }
-#if !defined(SYNC_CONSOLE_WRITE)
- else if ( !wait ) {
- /* enable low-water interrupts so buffer will be drained */
- uart_enable_xmit_intr(sc);
- }
-#endif
-
- L1SC_SEND_UNLOCK(sc, pl);
-
- return len;
-}
-
-/* brl1_send_cont is intended to be called as an interrupt service
- * routine. It sends until the UART won't accept any more characters,
- * or until an error is encountered (in which case we surrender the
- * send buffer and give up trying to send the packet). Once the
- * last character in the packet has been sent, this routine releases
- * the send buffer and calls any previously-registered "low-water"
- * output routines.
- */
-
-#if !defined(SYNC_CONSOLE_WRITE)
-
-int
-brl1_send_cont( l1sc_t *sc )
-{
- unsigned long pl = 0;
- int done = 0;
- brl1_notif_t callups[BRL1_NUM_SUBCHANS];
- brl1_notif_t *callup;
- brl1_sch_t *subch;
- int index;
-
- /*
- * I'm not sure how I think this is to be handled - whether the lock is held
- * over the interrupt - but it seems like it is a bad idea....
- */
-
- if ( sc->uart == BRL1_LOCALHUB_UART )
- lock_console(sc->nasid);
- L1SC_SEND_LOCK(sc, pl);
- brl1_send_chars( sc );
- done = (sc->sent == sc->send_len);
- if( done ) {
- sc->send_in_use = 0;
-#if !defined(SYNC_CONSOLE_WRITE)
- uart_disable_xmit_intr(sc);
-#endif
- }
- if ( sc->uart == BRL1_LOCALHUB_UART )
- unlock_console(sc->nasid);
- /* Release the lock */
- L1SC_SEND_UNLOCK(sc, pl);
-
- return 0;
-}
-#endif /* SYNC_CONSOLE_WRITE */
-
-/* internal function -- used by brl1_receive to read a character
- * from the uart and check whether errors occurred in the process.
- */
-static int
-read_uart( l1sc_t *sc, int *c, int *result )
-{
- *c = sc->getc_f( sc );
-
- /* no character is available */
- if( *c == UART_NO_CHAR ) {
- *result = BRL1_NO_MESSAGE;
- return 0;
- }
-
- /* some error in UART */
- if( *c < 0 ) {
- *result = BRL1_LINK;
- return 0;
- }
-
- /* everything's fine */
- *result = BRL1_VALID;
- return 1;
-}
-
-
-/*
- * brl1_receive
- *
- * This function reads a Bedrock-L1 protocol packet into the l1sc_t
- * response buffer.
- *
- * The operation of this function can be expressed as a finite state
- * machine:
- *
-
-START STATE INPUT TRANSITION
-==========================================================
-BRL1_IDLE (reset or error) flag BRL1_FLAG
- other BRL1_IDLE@
-
-BRL1_FLAG (saw a flag (0x7e)) flag BRL1_FLAG
- escape BRL1_IDLE@
- header byte BRL1_HDR
- other BRL1_IDLE@
-
-BRL1_HDR (saw a type/subch byte)(see below) BRL1_BODY
- BRL1_HDR
-
-BRL1_BODY (reading packet body) flag BRL1_FLAG
- escape BRL1_ESC
- other BRL1_BODY
-
-BRL1_ESC (saw an escape (0x7d)) flag BRL1_FLAG@
- escape BRL1_IDLE@
- other BRL1_BODY
-==========================================================
-
-"@" denotes an error transition.
-
- * The BRL1_HDR state is a transient state which doesn't read input,
- * but just provides a way in to code which decides to whom an
- * incoming packet should be directed.
- *
- * brl1_receive can be used to poll for input from the L1, or as
- * an interrupt service routine. It reads as much data as is
- * ready from the junk bus UART and places into the appropriate
- * input queues according to subchannel. The header byte is
- * stripped from console-type data, but is retained for message-
- * type data (L1 responses). A length byte will also be
- * prepended to message-type packets.
- *
- * This routine is non-blocking; if the caller needs to block
- * for input, it must call brl1_receive in a loop.
- *
- * brl1_receive returns when there is no more input, the queue
- * for the current incoming message is full, or there is an
- * error (parity error, bad header, bad CRC, etc.).
- */
-
-#define STATE_SET(l,s) ((l)->brl1_state = (s))
-#define STATE_GET(l) ((l)->brl1_state)
-
-#define LAST_HDR_SET(l,h) ((l)->brl1_last_hdr = (h))
-#define LAST_HDR_GET(l) ((l)->brl1_last_hdr)
-
-#define VALID_HDR(c) \
- ( SUBCH((c)) <= SC_CONS_SYSTEM \
- ? PKT_TYPE((c)) == BRL1_REQUEST \
- : ( PKT_TYPE((c)) == BRL1_RESPONSE || \
- PKT_TYPE((c)) == BRL1_EVENT ) )
-
-#define IS_TTY_PKT(l) ( SUBCH(LAST_HDR_GET(l)) <= SC_CONS_SYSTEM ? 1 : 0 )
-
-
-int
-brl1_receive( l1sc_t *sc, int mode )
-{
- int result; /* value to be returned by brl1_receive */
- int c; /* most-recently-read character */
- int done; /* set done to break out of recv loop */
- unsigned long pl = 0, cpl = 0;
- sc_cq_t *q; /* pointer to queue we're working with */
-
- result = BRL1_NO_MESSAGE;
-
- L1SC_RECV_LOCK(sc, cpl);
-
- done = 0;
- while( !done )
- {
- switch( STATE_GET(sc) )
- {
-
- case BRL1_IDLE:
- /* Initial or error state. Waiting for a flag character
- * to resynchronize with the L1.
- */
-
- if( !read_uart( sc, &c, &result ) ) {
-
- /* error reading uart */
- done = 1;
- continue;
- }
-
- if( c == BRL1_FLAG_CH ) {
- /* saw a flag character */
- STATE_SET( sc, BRL1_FLAG );
- continue;
- }
- break;
-
- case BRL1_FLAG:
- /* One or more flag characters have been read; look for
- * the beginning of a packet (header byte).
- */
-
- if( !read_uart( sc, &c, &result ) ) {
-
- /* error reading uart */
- if( c != UART_NO_CHAR )
- STATE_SET( sc, BRL1_IDLE );
-
- done = 1;
- continue;
- }
-
- if( c == BRL1_FLAG_CH ) {
- /* multiple flags are OK */
- continue;
- }
-
- if( !VALID_HDR( c ) ) {
- /* if c isn't a flag it should have been
- * a valid header, so we have an error
- */
- result = BRL1_PROTOCOL;
- STATE_SET( sc, BRL1_IDLE );
- done = 1;
- continue;
- }
-
- /* we have a valid header byte */
- LAST_HDR_SET( sc, c );
- STATE_SET( sc, BRL1_HDR );
-
- break;
-
- case BRL1_HDR:
- /* A header byte has been read. Do some bookkeeping. */
- q = sc->subch[ SUBCH( LAST_HDR_GET(sc) ) ].iqp;
- ASSERT(q);
-
- if( !IS_TTY_PKT(sc) ) {
- /* if this is an event or command response rather
- * than console I/O, we need to reserve a couple
- * of extra spaces in the queue for the header
- * byte and a length byte; if we can't, stay in
- * the BRL1_HDR state.
- */
- if( cq_room( q ) < 2 ) {
- result = BRL1_FULL_Q;
- done = 1;
- continue;
- }
- cq_tent_add( q, 0 ); /* reserve length byte */
- cq_tent_add( q, LAST_HDR_GET( sc ) ); /* record header byte */
- }
- STATE_SET( sc, BRL1_BODY );
-
- break;
-
- case BRL1_BODY:
- /* A header byte has been read. We are now attempting
- * to receive the packet body.
- */
-
- q = sc->subch[ SUBCH( LAST_HDR_GET(sc) ) ].iqp;
- ASSERT(q);
-
- /* if the queue we want to write into is full, don't read from
- * the uart (this provides backpressure to the L1 side)
- */
- if( cq_tent_full( q ) ) {
- result = BRL1_FULL_Q;
- done = 1;
- continue;
- }
-
- if( !read_uart( sc, &c, &result ) ) {
-
- /* error reading uart */
- if( c != UART_NO_CHAR )
- STATE_SET( sc, BRL1_IDLE );
- done = 1;
- continue;
- }
-
- if( c == BRL1_ESC_CH ) {
- /* prepare to unescape the next character */
- STATE_SET( sc, BRL1_ESC );
- continue;
- }
-
- if( c == BRL1_FLAG_CH ) {
- /* flag signifies the end of a packet */
-
- unsigned short crc; /* holds the crc as we calculate it */
- int i; /* index variable */
- brl1_sch_t *subch; /* subchannel for received packet */
- brl1_notif_t callup; /* "data ready" callup */
-
- /* whatever else may happen, we've seen a flag and we're
- * starting a new packet
- */
- STATE_SET( sc, BRL1_FLAG );
-
- /* if the packet body has less than 2 characters,
- * it can't be a well-formed packet. Discard it.
- */
- if( cq_tent_len( q ) < /* 2 + possible length byte */
- (2 + (IS_TTY_PKT(sc) ? 0 : 1)) )
- {
- result = BRL1_PROTOCOL;
- cq_discard_tent( q );
- STATE_SET( sc, BRL1_FLAG );
- done = 1;
- continue;
- }
-
- /* check CRC */
-
- /* accumulate CRC, starting with the header byte and
- * ending with the transmitted CRC. This should
- * result in a known good value.
- */
- crc = crc16_calc( INIT_CRC, LAST_HDR_GET(sc) );
- for( i = (q->ipos + (IS_TTY_PKT(sc) ? 0 : 2)) % BRL1_QSIZE;
- i != q->tent_next;
- i = (i + 1) % BRL1_QSIZE )
- {
- crc = crc16_calc( crc, q->buf[i] );
- }
-
- /* verify the caclulated crc against the "good" crc value;
- * if we fail, discard the bad packet and return an error.
- */
- if( crc != (unsigned short)GOOD_CRC ) {
- result = BRL1_CRC;
- cq_discard_tent( q );
- STATE_SET( sc, BRL1_FLAG );
- done = 1;
- continue;
- }
-
- /* so the crc check was ok. Now we discard the CRC
- * from the end of the received bytes.
- */
- q->tent_next += (BRL1_QSIZE - 2);
- q->tent_next %= BRL1_QSIZE;
-
- /* get the subchannel and lock it */
- subch = &(sc->subch[SUBCH( LAST_HDR_GET(sc) )]);
- SUBCH_DATA_LOCK( subch, pl );
-
- /* if this isn't a console packet, we need to record
- * a length byte
- */
- if( !IS_TTY_PKT(sc) ) {
- q->buf[q->ipos] = cq_tent_len( q ) - 1;
- }
-
- /* record packet for posterity */
- cq_commit_tent( q );
- result = BRL1_VALID;
-
- /* notify subchannel owner that there's something
- * on the queue for them
- */
- atomic_inc(&(subch->packet_arrived));
- callup = subch->rx_notify;
- SUBCH_DATA_UNLOCK( subch, pl );
-
- if( callup && (mode == SERIAL_INTERRUPT_MODE) ) {
- L1SC_RECV_UNLOCK( sc, cpl );
- L1_collectibles[L1C_RECEIVE_CALLUPS]++;
- (*callup)( sc->subch[SUBCH(LAST_HDR_GET(sc))].irq_frame.bf_irq,
- sc->subch[SUBCH(LAST_HDR_GET(sc))].irq_frame.bf_dev_id,
- sc->subch[SUBCH(LAST_HDR_GET(sc))].irq_frame.bf_regs,
- sc, SUBCH(LAST_HDR_GET(sc)) );
- L1SC_RECV_LOCK( sc, cpl );
- }
- continue; /* go back for more! */
- }
-
- /* none of the special cases applied; we've got a normal
- * body character
- */
- cq_tent_add( q, c );
-
- break;
-
- case BRL1_ESC:
- /* saw an escape character. The next character will need
- * to be unescaped.
- */
-
- q = sc->subch[ SUBCH( LAST_HDR_GET(sc) ) ].iqp;
- ASSERT(q);
-
- /* if the queue we want to write into is full, don't read from
- * the uart (this provides backpressure to the L1 side)
- */
- if( cq_tent_full( q ) ) {
- result = BRL1_FULL_Q;
- done = 1;
- continue;
- }
-
- if( !read_uart( sc, &c, &result ) ) {
-
- /* error reading uart */
- if( c != UART_NO_CHAR ) {
- cq_discard_tent( q );
- STATE_SET( sc, BRL1_IDLE );
- }
- done = 1;
- continue;
- }
-
- if( c == BRL1_FLAG_CH ) {
- /* flag after escape is an error */
- STATE_SET( sc, BRL1_FLAG );
- cq_discard_tent( q );
- result = BRL1_PROTOCOL;
- done = 1;
- continue;
- }
-
- if( c == BRL1_ESC_CH ) {
- /* two consecutive escapes is an error */
- STATE_SET( sc, BRL1_IDLE );
- cq_discard_tent( q );
- result = BRL1_PROTOCOL;
- done = 1;
- continue;
- }
-
- /* otherwise, we've got a character that needs
- * to be unescaped
- */
- cq_tent_add( q, (c ^ BRL1_XOR_CH) );
- STATE_SET( sc, BRL1_BODY );
-
- break;
-
- } /* end of switch( STATE_GET(sc) ) */
- } /* end of while(!done) */
-
- L1SC_RECV_UNLOCK( sc, cpl );
-
- return result;
-}
-
-
-/* brl1_init initializes the Bedrock/L1 protocol layer. This includes
- * zeroing out the send and receive state information.
- */
-
-void
-brl1_init( l1sc_t *sc, nasid_t nasid, net_vec_t uart )
-{
- int i;
- brl1_sch_t *subch;
-
- bzero( sc, sizeof( *sc ) );
- sc->nasid = nasid;
- sc->uart = uart;
- sc->getc_f = (uart == BRL1_LOCALHUB_UART ? uart_getc : rtr_uart_getc);
- sc->putc_f = (uart == BRL1_LOCALHUB_UART ? uart_putc : rtr_uart_putc);
- sc->sol = 1;
- subch = sc->subch;
-
- /* initialize L1 subchannels
- */
-
- /* assign processor TTY channels */
- for( i = 0; i < CPUS_PER_NODE; i++, subch++ ) {
- subch->use = BRL1_SUBCH_RSVD;
- subch->packet_arrived = ATOMIC_INIT(0);
- spin_lock_init( &(subch->data_lock) );
- sv_init( &(subch->arrive_sv), &(subch->data_lock), SV_MON_SPIN | SV_ORDER_FIFO /* | SV_INTS */ );
- subch->tx_notify = NULL;
- /* (for now, drop elscuart packets in the kernel) */
- subch->rx_notify = brl1_discard_packet;
- subch->iqp = &sc->garbage_q;
- }
-
- /* assign system TTY channel (first free subchannel after each
- * processor's individual TTY channel has been assigned)
- */
- subch->use = BRL1_SUBCH_RSVD;
- subch->packet_arrived = ATOMIC_INIT(0);
- spin_lock_init( &(subch->data_lock) );
- sv_init( &(subch->arrive_sv), &subch->data_lock, SV_MON_SPIN | SV_ORDER_FIFO /* | SV_INTS */ );
- subch->tx_notify = NULL;
- if( sc->uart == BRL1_LOCALHUB_UART ) {
- subch->iqp = snia_kmem_zalloc_node( sizeof(sc_cq_t), KM_NOSLEEP, NASID_TO_COMPACT_NODEID(nasid) );
- ASSERT( subch->iqp );
- cq_init( subch->iqp );
- subch->rx_notify = NULL;
- }
- else {
- /* we shouldn't be getting console input from remote UARTs */
- subch->iqp = &sc->garbage_q;
- subch->rx_notify = brl1_discard_packet;
- }
- subch++; i++;
-
- /* "reserved" subchannels (0x05-0x0F); for now, throw away
- * incoming packets
- */
- for( ; i < 0x10; i++, subch++ ) {
- subch->use = BRL1_SUBCH_FREE;
- subch->packet_arrived = ATOMIC_INIT(0);
- subch->tx_notify = NULL;
- subch->rx_notify = brl1_discard_packet;
- subch->iqp = &sc->garbage_q;
- }
-
- /* remaining subchannels are free */
- for( ; i < BRL1_NUM_SUBCHANS; i++, subch++ ) {
- subch->use = BRL1_SUBCH_FREE;
- subch->packet_arrived = ATOMIC_INIT(0);
- subch->tx_notify = NULL;
- subch->rx_notify = brl1_discard_packet;
- subch->iqp = &sc->garbage_q;
- }
-
- /* initialize synchronization structures
- */
- spin_lock_init( &(sc->subch_lock) );
- spin_lock_init( &(sc->send_lock) );
- spin_lock_init( &(sc->recv_lock) );
-
- if( sc->uart == BRL1_LOCALHUB_UART ) {
- uart_init( sc, UART_BAUD_RATE );
- }
- else {
- rtr_uart_init( sc, UART_BAUD_RATE );
- }
-
- /* Set up remaining fields using L1 command functions-- elsc_module_get
- * to read the module id, elsc_debug_get to see whether or not we're
- * in verbose mode.
- */
- {
- extern int elsc_module_get(l1sc_t *);
-
- sc->modid = elsc_module_get( sc );
- sc->modid = (sc->modid < 0 ? INVALID_MODULE : sc->modid);
- sc->verbose = 1;
- }
-}
-
-/*********************************************************************
- * These are interrupt-related functions used in the kernel to service
- * the L1.
- */
-
-/*
- * brl1_intrd is the function which is called on a console interrupt.
- */
-
-#if defined(CONFIG_IA64_SGI_SN1)
-
-static void
-brl1_intrd(int irq, void *dev_id, struct pt_regs *stuff)
-{
- u_char isr_reg;
- l1sc_t *sc = get_elsc();
- int ret;
-
- L1_collectibles[L1C_INTERRUPTS]++;
- isr_reg = READ_L1_UART_REG(sc->nasid, REG_ISR);
-
- /* Save for callup args in console */
- sc->subch[SC_CONS_SYSTEM].irq_frame.bf_irq = irq;
- sc->subch[SC_CONS_SYSTEM].irq_frame.bf_dev_id = dev_id;
- sc->subch[SC_CONS_SYSTEM].irq_frame.bf_regs = stuff;
-
-#if defined(SYNC_CONSOLE_WRITE)
- while( isr_reg & ISR_RxRDY )
-#else
- while( isr_reg & (ISR_RxRDY | ISR_TxRDY) )
-#endif
- {
- if( isr_reg & ISR_RxRDY ) {
- L1_collectibles[L1C_OUR_R_INTERRUPTS]++;
- ret = brl1_receive(sc, SERIAL_INTERRUPT_MODE);
- if ( (ret != BRL1_VALID) && (ret != BRL1_NO_MESSAGE) && (ret != BRL1_PROTOCOL) && (ret != BRL1_CRC) )
- L1_collectibles[L1C_REC_STALLS] = ret;
- }
-#if !defined(SYNC_CONSOLE_WRITE)
- if( (isr_reg & ISR_TxRDY) || (sc->send_in_use && UART_PUTC_READY(sc->nasid)) ) {
- L1_collectibles[L1C_OUR_X_INTERRUPTS]++;
- brl1_send_cont(sc);
- }
-#endif /* SYNC_CONSOLE_WRITE */
- isr_reg = READ_L1_UART_REG(sc->nasid, REG_ISR);
- }
-}
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-
-/*
- * Install a callback function for the system console subchannel
- * to allow an upper layer to be notified when the send buffer
- * has been emptied.
- */
-static inline void
-l1_tx_notif( brl1_notif_t func )
-{
- subch_set_tx_notify( &NODEPDA(NASID_TO_COMPACT_NODEID(get_master_nasid()))->module->elsc,
- SC_CONS_SYSTEM, func );
-}
-
-
-/*
- * Install a callback function for the system console subchannel
- * to allow an upper layer to be notified when a packet has been
- * received.
- */
-static inline void
-l1_rx_notif( brl1_notif_t func )
-{
- subch_set_rx_notify( &NODEPDA(NASID_TO_COMPACT_NODEID(get_master_nasid()))->module->elsc,
- SC_CONS_SYSTEM, func );
-}
-
-
-/* brl1_intr is called directly from the uart interrupt; after it runs, the
- * interrupt "daemon" xthread is signalled to continue.
- */
-void
-brl1_intr( void )
-{
-}
-
-#define BRL1_INTERRUPT_LEVEL 65 /* linux request_irq() value */
-
-/* Return the current interrupt level */
-
-//#define CONSOLE_POLLING_ALSO
-
-int
-l1_get_intr_value( void )
-{
-#ifdef CONSOLE_POLLING_ALSO
- return(0);
-#else
- return(BRL1_INTERRUPT_LEVEL);
-#endif
-}
-
-/* Disconnect the callup functions - throw away interrupts */
-
-void
-l1_unconnect_intr(void)
-{
- /* UnRegister the upper-level callup functions */
- l1_rx_notif((brl1_notif_t)NULL);
- l1_tx_notif((brl1_notif_t)NULL);
- /* We do NOT unregister the interrupts */
-}
-
-/* Set up uart interrupt handling for this node's uart */
-
-void
-l1_connect_intr(void *rx_notify, void *tx_notify)
-{
- l1sc_t *sc;
- nasid_t nasid;
-#if defined(CONFIG_IA64_SGI_SN1)
- int tmp;
-#endif
- nodepda_t *console_nodepda;
- int intr_connect_level(cpuid_t, int, ilvl_t, intr_func_t);
-
- if ( L1_interrupts_connected ) {
- /* Interrupts are connected, so just register the callups */
- l1_rx_notif((brl1_notif_t)rx_notify);
- l1_tx_notif((brl1_notif_t)tx_notify);
-
- L1_collectibles[L1C_CONNECT_CALLS]++;
- return;
- }
- else
- L1_interrupts_connected = 1;
-
- nasid = get_master_nasid();
- console_nodepda = NODEPDA(NASID_TO_COMPACT_NODEID(nasid));
- sc = &console_nodepda->module->elsc;
- sc->intr_cpu = console_nodepda->node_first_cpu;
-
-#if defined(CONFIG_IA64_SGI_SN1)
- if ( intr_connect_level(sc->intr_cpu, UART_INTR, INTPEND0_MAXMASK, (intr_func_t)brl1_intr) ) {
- L1_interrupts_connected = 0; /* FAILS !! */
- }
- else {
- void synergy_intr_connect(int, int);
-
- synergy_intr_connect(UART_INTR, sc->intr_cpu);
- L1_collectibles[L1C_R_IRQ]++;
- tmp = request_irq(BRL1_INTERRUPT_LEVEL, brl1_intrd, SA_INTERRUPT | SA_SHIRQ, "l1_protocol_driver", (void *)sc);
- L1_collectibles[L1C_R_IRQ_RET] = (uint64_t)tmp;
- if ( tmp ) {
- L1_interrupts_connected = 0; /* FAILS !! */
- }
- else {
- /* Register the upper-level callup functions */
- l1_rx_notif((brl1_notif_t)rx_notify);
- l1_tx_notif((brl1_notif_t)tx_notify);
-
- /* Set the uarts the way we like it */
- uart_enable_recv_intr( sc );
- uart_disable_xmit_intr( sc );
- }
- }
-#endif /* CONFIG_IA64_SGI_SN1 */
-}
-
-
-/* Set the line speed */
-
-void
-l1_set_baud(int baud)
-{
-#if 0
- nasid_t nasid;
- static void uart_init(l1sc_t *, int);
-#endif
-
- L1_collectibles[L1C_SET_BAUD]++;
-
-#if 0
- if ( L1_cons_is_inited ) {
- nasid = get_master_nasid();
- if ( NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module != (module_t *)0 )
- uart_init(&NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module->elsc, baud);
- }
-#endif
- return;
-}
-
-
-/* These are functions to use from serial_in/out when in protocol
- * mode to send and receive uart control regs. These are external
- * interfaces into the protocol driver.
- */
-
-void
-l1_control_out(int offset, int value)
-{
- nasid_t nasid = get_master_nasid();
- WRITE_L1_UART_REG(nasid, offset, value);
-}
-
-/* Console input exported interface. Return a register value. */
-
-int
-l1_control_in_polled(int offset)
-{
- static int l1_control_in_local(int, int);
-
- return(l1_control_in_local(offset, SERIAL_POLLED_MODE));
-}
-
-int
-l1_control_in(int offset)
-{
- static int l1_control_in_local(int, int);
-
- return(l1_control_in_local(offset, SERIAL_INTERRUPT_MODE));
-}
-
-static int
-l1_control_in_local(int offset, int mode)
-{
- nasid_t nasid;
- int ret, input;
- static int l1_poll(l1sc_t *, int);
-
- nasid = get_master_nasid();
- ret = READ_L1_UART_REG(nasid, offset);
-
- if ( offset == REG_LSR ) {
- ret |= (LSR_XHRE | LSR_XSRE); /* can send anytime */
- if ( L1_cons_is_inited ) {
- if ( NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module != (module_t *)0 ) {
- input = l1_poll(&NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module->elsc, mode);
- if ( input ) {
- ret |= LSR_RCA;
- }
- }
- }
- }
- return(ret);
-}
-
-/*
- * Console input exported interface. Return a character (if one is available)
- */
-
-int
-l1_serial_in_polled(void)
-{
- static int l1_serial_in_local(int mode);
-
- return(l1_serial_in_local(SERIAL_POLLED_MODE));
-}
-
-int
-l1_serial_in(void)
-{
- static int l1_serial_in_local(int mode);
-
- return(l1_serial_in_local(SERIAL_INTERRUPT_MODE));
-}
-
-static int
-l1_serial_in_local(int mode)
-{
- nasid_t nasid;
- l1sc_t *sc;
- int value;
- static int l1_getc( l1sc_t *, int );
- static inline l1sc_t *early_sc_init(nasid_t);
-
- nasid = get_master_nasid();
- sc = early_sc_init(nasid);
- if ( L1_cons_is_inited ) {
- if ( NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module != (module_t *)0 ) {
- sc = &NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module->elsc;
- }
- }
- value = l1_getc(sc, mode);
- return(value);
-}
-
-/* Console output exported interface. Write message to the console. */
-
-int
-l1_serial_out( char *str, int len )
-{
- nasid_t nasid = get_master_nasid();
- int l1_write(l1sc_t *, char *, int, int);
-
- if ( L1_cons_is_inited ) {
- if ( NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module != (module_t *)0 )
- return(l1_write(&NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->module->elsc, str, len,
-#if defined(SYNC_CONSOLE_WRITE)
- 1
-#else
- !L1_interrupts_connected
-#endif
- ));
- }
- return(early_l1_serial_out(nasid, str, len, NOT_LOCKED));
-}
-
-
-/*
- * These are the 'early' functions - when we need to do things before we have
- * all the structs setup.
- */
-
-static l1sc_t Early_console; /* fake l1sc_t */
-static int Early_console_inited = 0;
-
-static void
-early_brl1_init( l1sc_t *sc, nasid_t nasid, net_vec_t uart )
-{
- int i;
- brl1_sch_t *subch;
-
- bzero( sc, sizeof( *sc ) );
- sc->nasid = nasid;
- sc->uart = uart;
- sc->getc_f = (uart == BRL1_LOCALHUB_UART ? uart_getc : rtr_uart_getc);
- sc->putc_f = (uart == BRL1_LOCALHUB_UART ? uart_putc : rtr_uart_putc);
- sc->sol = 1;
- subch = sc->subch;
-
- /* initialize L1 subchannels
- */
-
- /* assign processor TTY channels */
- for( i = 0; i < CPUS_PER_NODE; i++, subch++ ) {
- subch->use = BRL1_SUBCH_RSVD;
- subch->packet_arrived = ATOMIC_INIT(0);
- subch->tx_notify = NULL;
- subch->rx_notify = NULL;
- subch->iqp = &sc->garbage_q;
- }
-
- /* assign system TTY channel (first free subchannel after each
- * processor's individual TTY channel has been assigned)
- */
- subch->use = BRL1_SUBCH_RSVD;
- subch->packet_arrived = ATOMIC_INIT(0);
- subch->tx_notify = NULL;
- subch->rx_notify = NULL;
- if( sc->uart == BRL1_LOCALHUB_UART ) {
- static sc_cq_t x_iqp;
-
- subch->iqp = &x_iqp;
- ASSERT( subch->iqp );
- cq_init( subch->iqp );
- }
- else {
- /* we shouldn't be getting console input from remote UARTs */
- subch->iqp = &sc->garbage_q;
- }
- subch++; i++;
-
- /* "reserved" subchannels (0x05-0x0F); for now, throw away
- * incoming packets
- */
- for( ; i < 0x10; i++, subch++ ) {
- subch->use = BRL1_SUBCH_FREE;
- subch->packet_arrived = ATOMIC_INIT(0);
- subch->tx_notify = NULL;
- subch->rx_notify = NULL;
- subch->iqp = &sc->garbage_q;
- }
-
- /* remaining subchannels are free */
- for( ; i < BRL1_NUM_SUBCHANS; i++, subch++ ) {
- subch->use = BRL1_SUBCH_FREE;
- subch->packet_arrived = ATOMIC_INIT(0);
- subch->tx_notify = NULL;
- subch->rx_notify = NULL;
- subch->iqp = &sc->garbage_q;
- }
-}
-
-static inline l1sc_t *
-early_sc_init(nasid_t nasid)
-{
- /* This is for early I/O */
- if ( Early_console_inited == 0 ) {
- early_brl1_init(&Early_console, nasid, BRL1_LOCALHUB_UART);
- Early_console_inited = 1;
- }
- return(&Early_console);
-}
-
-#define PUTCHAR(ch) \
- { \
- while( (!(READ_L1_UART_REG( nasid, REG_LSR ) & LSR_XHRE)) || \
- (!(READ_L1_UART_REG( nasid, REG_MSR ) & MSR_CTS)) ); \
- WRITE_L1_UART_REG( nasid, REG_DAT, (ch) ); \
- }
-
-static int
-early_l1_serial_out( nasid_t nasid, char *str, int len, int lock_state )
-{
- int ret, sent = 0;
- char *msg = str;
- static int early_l1_send( nasid_t nasid, char *str, int len, int lock_state );
-
- while ( sent < len ) {
- ret = early_l1_send(nasid, msg, len - sent, lock_state);
- sent += ret;
- msg += ret;
- }
- return(len);
-}
-
-static inline int
-early_l1_send( nasid_t nasid, char *str, int len, int lock_state )
-{
- int sent;
- char crc_char;
- unsigned short crc = INIT_CRC;
-
- if( len > (BRL1_QSIZE - 1) )
- len = (BRL1_QSIZE - 1);
-
- sent = len;
- if ( lock_state == NOT_LOCKED )
- lock_console(nasid);
-
- PUTCHAR( BRL1_FLAG_CH );
- PUTCHAR( BRL1_EVENT | SC_CONS_SYSTEM );
- crc = crc16_calc( crc, (BRL1_EVENT | SC_CONS_SYSTEM) );
-
- while( len ) {
-
- if( (*str == BRL1_FLAG_CH) || (*str == BRL1_ESC_CH) ) {
- PUTCHAR( BRL1_ESC_CH );
- PUTCHAR( (*str) ^ BRL1_XOR_CH );
- }
- else {
- PUTCHAR( *str );
- }
-
- crc = crc16_calc( crc, *str );
-
- str++; len--;
- }
-
- crc ^= 0xffff;
- crc_char = crc & 0xff;
- if( (crc_char == BRL1_ESC_CH) || (crc_char == BRL1_FLAG_CH) ) {
- crc_char ^= BRL1_XOR_CH;
- PUTCHAR( BRL1_ESC_CH );
- }
- PUTCHAR( crc_char );
- crc_char = (crc >> 8) & 0xff;
- if( (crc_char == BRL1_ESC_CH) || (crc_char == BRL1_FLAG_CH) ) {
- crc_char ^= BRL1_XOR_CH;
- PUTCHAR( BRL1_ESC_CH );
- }
- PUTCHAR( crc_char );
- PUTCHAR( BRL1_FLAG_CH );
-
- if ( lock_state == NOT_LOCKED )
- unlock_console(nasid);
- return sent;
-}
-
-
-/*********************************************************************
- * l1_cons functions
- *
- * These allow the L1 to act as the system console. They're intended
- * to abstract away most of the br/l1 internal details from the
- * _L1_cons_* functions (in the prom-- see "l1_console.c") and
- * l1_* functions (in the kernel-- see "sio_l1.c") that they support.
- *
- */
-
-static int
-l1_poll( l1sc_t *sc, int mode )
-{
- int ret;
-
- /* in case this gets called before the l1sc_t structure for the module_t
- * struct for this node is initialized (i.e., if we're called with a
- * zero l1sc_t pointer)...
- */
-
-
- if( !sc ) {
- return 0;
- }
-
- if( atomic_read(&sc->subch[SC_CONS_SYSTEM].packet_arrived) ) {
- return 1;
- }
-
- ret = brl1_receive( sc, mode );
- if ( (ret != BRL1_VALID) && (ret != BRL1_NO_MESSAGE) && (ret != BRL1_PROTOCOL) && (ret != BRL1_CRC) )
- L1_collectibles[L1C_REC_STALLS] = ret;
-
- if( atomic_read(&sc->subch[SC_CONS_SYSTEM].packet_arrived) ) {
- return 1;
- }
- return 0;
-}
-
-
-/* pull a character off of the system console queue (if one is available)
- */
-static int
-l1_getc( l1sc_t *sc, int mode )
-{
- unsigned long pl = 0;
- int c;
-
- brl1_sch_t *subch = &(sc->subch[SC_CONS_SYSTEM]);
- sc_cq_t *q = subch->iqp;
-
- if( !l1_poll( sc, mode ) ) {
- return 0;
- }
-
- SUBCH_DATA_LOCK( subch, pl );
- if( cq_empty( q ) ) {
- atomic_set(&subch->packet_arrived, 0);
- SUBCH_DATA_UNLOCK( subch, pl );
- return 0;
- }
- cq_rem( q, c );
- if( cq_empty( q ) )
- atomic_set(&subch->packet_arrived, 0);
- SUBCH_DATA_UNLOCK( subch, pl );
-
- return c;
-}
-
-/*
- * Write a message to the L1 on the system console subchannel.
- *
- * Danger: don't use a non-zero value for the wait parameter unless you're
- * someone important (like a kernel error message).
- */
-
-int
-l1_write( l1sc_t *sc, char *msg, int len, int wait )
-{
- int sent = 0, ret = 0;
-
- if ( wait ) {
- while ( sent < len ) {
- ret = brl1_send( sc, msg, len - sent, (SC_CONS_SYSTEM | BRL1_EVENT), wait );
- sent += ret;
- msg += ret;
- }
- ret = len;
- }
- else {
- ret = brl1_send( sc, msg, len, (SC_CONS_SYSTEM | BRL1_EVENT), wait );
- }
- return(ret);
-}
-
-/* initialize the system console subchannel
- */
-void
-l1_init(void)
-{
- /* All we do now is remember that we have been called */
- L1_cons_is_inited = 1;
-}
-
-
-/*********************************************************************
- * The following functions and definitions implement the "message"-
- * style interface to the L1 system controller.
- *
- * Note that throughout this file, "sc" generally stands for "system
- * controller", while "subchannels" tend to be represented by
- * variables with names like subch or ch.
- *
- */
-
-#ifdef L1_DEBUG
-#define L1_DBG_PRF(x) printf x
-#else
-#define L1_DBG_PRF(x)
-#endif
-
-/*
- * sc_data_ready is called to signal threads that are blocked on l1 input.
- */
-void
-sc_data_ready( int dummy0, void *dummy1, struct pt_regs *dummy2, l1sc_t *sc, int ch )
-{
- unsigned long pl = 0;
-
- brl1_sch_t *subch = &(sc->subch[ch]);
- SUBCH_DATA_LOCK( subch, pl );
- sv_signal( &(subch->arrive_sv) );
- SUBCH_DATA_UNLOCK( subch, pl );
-}
-
-/* sc_open reserves a subchannel to send a request to the L1 (the
- * L1's response will arrive on the same channel). The number
- * returned by sc_open is the system controller subchannel
- * acquired.
- */
-int
-sc_open( l1sc_t *sc, uint target )
-{
- /* The kernel version implements a locking scheme to arbitrate
- * subchannel assignment.
- */
- int ch;
- unsigned long pl = 0;
- brl1_sch_t *subch;
-
- SUBCH_LOCK( sc, pl );
-
- /* Look for a free subchannel. Subchannels 0-15 are reserved
- * for other purposes.
- */
- for( subch = &(sc->subch[BRL1_CMD_SUBCH]), ch = BRL1_CMD_SUBCH;
- ch < BRL1_NUM_SUBCHANS; subch++, ch++ ) {
- if( subch->use == BRL1_SUBCH_FREE )
- break;
- }
-
- if( ch == BRL1_NUM_SUBCHANS ) {
- /* there were no subchannels available! */
- SUBCH_UNLOCK( sc, pl );
- return SC_NSUBCH;
- }
-
- subch->use = BRL1_SUBCH_RSVD;
- SUBCH_UNLOCK( sc, pl );
-
- atomic_set(&subch->packet_arrived, 0);
- subch->target = target;
- spin_lock_init( &(subch->data_lock) );
- sv_init( &(subch->arrive_sv), &(subch->data_lock), SV_MON_SPIN | SV_ORDER_FIFO /* | SV_INTS */);
- subch->tx_notify = NULL;
- subch->rx_notify = sc_data_ready;
- subch->iqp = snia_kmem_zalloc_node( sizeof(sc_cq_t), KM_NOSLEEP,
- NASID_TO_COMPACT_NODEID(sc->nasid) );
- ASSERT( subch->iqp );
- cq_init( subch->iqp );
-
- return ch;
-}
-
-
-/* sc_close frees a Bedrock<->L1 subchannel.
- */
-int
-sc_close( l1sc_t *sc, int ch )
-{
- unsigned long pl = 0;
- brl1_sch_t *subch;
-
- SUBCH_LOCK( sc, pl );
- subch = &(sc->subch[ch]);
- if( subch->use != BRL1_SUBCH_RSVD ) {
- /* we're trying to close a subchannel that's not open */
- SUBCH_UNLOCK( sc, pl );
- return SC_NOPEN;
- }
-
- atomic_set(&subch->packet_arrived, 0);
- subch->use = BRL1_SUBCH_FREE;
-
- sv_broadcast( &(subch->arrive_sv) );
- sv_destroy( &(subch->arrive_sv) );
- spin_lock_destroy( &(subch->data_lock) );
-
- ASSERT( subch->iqp && (subch->iqp != &sc->garbage_q) );
- snia_kmem_free( subch->iqp, sizeof(sc_cq_t) );
- subch->iqp = &sc->garbage_q;
- subch->tx_notify = NULL;
- subch->rx_notify = brl1_discard_packet;
-
- SUBCH_UNLOCK( sc, pl );
-
- return SC_SUCCESS;
-}
-
-
-/* sc_construct_msg builds a bedrock-to-L1 request in the supplied
- * buffer. Returns the length of the message. The
- * safest course when passing a buffer to be filled in is to use
- * BRL1_QSIZE as the buffer size.
- *
- * Command arguments are passed as type/argument pairs, i.e., to
- * pass the number 5 as an argument to an L1 command, call
- * sc_construct_msg as follows:
- *
- * char msg[BRL1_QSIZE];
- * msg_len = sc_construct_msg( msg,
- * BRL1_QSIZE,
- * target_component,
- * L1_ADDR_TASK_BOGUSTASK,
- * L1_BOGUSTASK_REQ_BOGUSREQ,
- * 2,
- * L1_ARG_INT, 5 );
- *
- * To pass an additional ASCII argument, you'd do the following:
- *
- * char *str;
- * ... str points to a null-terminated ascii string ...
- * msg_len = sc_construct_msg( msg,
- * BRL1_QSIZE,
- * target_component,
- * L1_ADDR_TASK_BOGUSTASK,
- * L1_BOGUSTASK_REQ_BOGUSREQ,
- * 4,
- * L1_ARG_INT, 5,
- * L1_ARG_ASCII, str );
- *
- * Finally, arbitrary data of unknown type is passed using the argtype
- * code L1_ARG_UNKNOWN, a data length, and a buffer pointer, e.g.
- *
- * msg_len = sc_construct_msg( msg,
- * BRL1_QSIZE,
- * target_component,
- * L1_ADDR_TASK_BOGUSTASK,
- * L1_BOGUSTASK_REQ_BOGUSREQ,
- * 3,
- * L1_ARG_UNKNOWN, 32, bufptr );
- *
- * ...passes 32 bytes of data starting at bufptr. Note that no string or
- * "unknown"-type argument should be long enough to overflow the message
- * buffer.
- *
- * To construct a message for an L1 command that requires no arguments,
- * you'd use the following:
- *
- * msg_len = sc_construct_msg( msg,
- * BRL1_QSIZE,
- * target_component,
- * L1_ADDR_TASK_BOGUSTASK,
- * L1_BOGUSTASK_REQ_BOGUSREQ,
- * 0 );
- *
- * The final 0 means "no varargs". Notice that this parameter is used to hold
- * the number of additional arguments to sc_construct_msg, _not_ the actual
- * number of arguments used by the L1 command (so 2 per L1_ARG_[INT,ASCII]
- * type argument, and 3 per L1_ARG_UNKOWN type argument). A call to construct
- * an L1 command which required three integer arguments and two arguments of
- * some arbitrary (unknown) type would pass 12 as the value for this parameter.
- *
- * ENDIANNESS WARNING: The following code does a lot of copying back-and-forth
- * between byte arrays and four-byte big-endian integers. Depending on the
- * system controller connection and endianness of future architectures, some
- * rewriting might be necessary.
- */
-int
-sc_construct_msg( l1sc_t *sc, /* system controller struct */
- int ch, /* subchannel for this message */
- char *msg, /* message buffer */
- int msg_len, /* size of message buffer */
- l1addr_t addr_task, /* target system controller task */
- short req_code, /* 16-bit request code */
- int req_nargs, /* # of arguments (varargs) passed */
- ... ) /* any additional parameters */
-{
- uint32_t buf32; /* 32-bit buffer used to bounce things around */
- void *bufptr; /* used to hold command argument addresses */
- va_list al; /* variable argument list */
- int index; /* current index into msg buffer */
- int argno; /* current position in varargs list */
- int l1_argno; /* running total of arguments to l1 */
- int l1_arg_t; /* argument type/length */
- int l1_argno_byte; /* offset of argument count byte */
-
- index = argno = 0;
-
- /* set up destination address */
- if( (msg_len -= sizeof( buf32 )) < 0 )
- return -1;
- L1_ADDRESS_TO_TASK( &buf32, sc->subch[ch].target, addr_task );
- COPY_INT_TO_BUFFER(msg, index, buf32);
-
- /* copy request code */
- if( (msg_len -= 2) < 0 )
- return( -1 );
- msg[index++] = ((req_code >> 8) & 0xff);
- msg[index++] = (req_code & 0xff);
-
- if( !req_nargs ) {
- return index;
- }
-
- /* reserve a byte for the argument count */
- if( (msg_len -= 1) < 0 )
- return( -1 );
- l1_argno_byte = index++;
- l1_argno = 0;
-
- /* copy additional arguments */
- va_start( al, req_nargs );
- while( argno < req_nargs ) {
- l1_argno++;
- l1_arg_t = va_arg( al, int ); argno++;
- switch( l1_arg_t )
- {
- case L1_ARG_INT:
- if( (msg_len -= (sizeof( buf32 ) + 1)) < 0 )
- return( -1 );
- msg[index++] = L1_ARG_INT;
- buf32 = (unsigned)va_arg( al, int ); argno++;
- COPY_INT_TO_BUFFER(msg, index, buf32);
- break;
-
- case L1_ARG_ASCII:
- bufptr = va_arg( al, char* ); argno++;
- if( (msg_len -= (strlen( bufptr ) + 2)) < 0 )
- return( -1 );
- msg[index++] = L1_ARG_ASCII;
- strcpy( (char *)&(msg[index]), (char *)bufptr );
- index += (strlen( bufptr ) + 1); /* include terminating null */
- break;
-
- case L1_ARG_UNKNOWN:
- {
- int arglen;
-
- arglen = va_arg( al, int ); argno++;
- bufptr = va_arg( al, void* ); argno++;
- if( (msg_len -= (arglen + 1)) < 0 )
- return( -1 );
- msg[index++] = L1_ARG_UNKNOWN | arglen;
- BCOPY( bufptr, &(msg[index]), arglen );
- index += arglen;
- break;
- }
-
- default: /* unhandled argument type */
- return -1;
- }
- }
-
- va_end( al );
- msg[l1_argno_byte] = l1_argno;
-
- return index;
-}
-
-
-
-/* sc_interpret_resp verifies an L1 response to a bedrock request, and
- * breaks the response data up into the constituent parts. If the
- * response message indicates error, or if a mismatch is found in the
- * expected number and type of arguments, an error is returned. The
- * arguments to this function work very much like the arguments to
- * sc_construct_msg, above, except that L1_ARG_INTs must be followed
- * by a _pointer_ to an integer that can be filled in by this function.
- */
-int
-sc_interpret_resp( char *resp, /* buffer received from L1 */
- int resp_nargs, /* number of _varargs_ passed in */
- ... )
-{
- uint32_t buf32; /* 32-bit buffer used to bounce things around */
- void *bufptr; /* used to hold response field addresses */
- va_list al; /* variable argument list */
- int index; /* current index into response buffer */
- int argno; /* current position in varargs list */
- int l1_fldno; /* number of resp fields received from l1 */
- int l1_fld_t; /* field type/length */
-
- index = argno = 0;
-
-#if defined(L1_DEBUG)
-#define DUMP_RESP \
- { \
- int ix; \
- char outbuf[512]; \
- sprintf( outbuf, "sc_interpret_resp error line %d: ", __LINE__ ); \
- for( ix = 0; ix < 16; ix++ ) { \
- sprintf( &outbuf[strlen(outbuf)], "%x ", resp[ix] ); \
- } \
- printk( "%s\n", outbuf ); \
- }
-#else
-#define DUMP_RESP
-#endif /* L1_DEBUG */
-
- /* check response code */
- COPY_BUFFER_TO_INT(resp, index, buf32);
- if( buf32 != L1_RESP_OK ) {
- DUMP_RESP;
- return buf32;
- }
-
- /* get number of response fields */
- l1_fldno = resp[index++];
-
- va_start( al, resp_nargs );
-
- /* copy out response fields */
- while( argno < resp_nargs ) {
- l1_fldno--;
- l1_fld_t = va_arg( al, int ); argno++;
- switch( l1_fld_t )
- {
- case L1_ARG_INT:
- if( resp[index++] != L1_ARG_INT ) {
- /* type mismatch */
- va_end( al );
- DUMP_RESP;
- return -1;
- }
- bufptr = va_arg( al, int* ); argno++;
- COPY_BUFFER_TO_BUFFER(resp, index, bufptr);
- break;
-
- case L1_ARG_ASCII:
- if( resp[index++] != L1_ARG_ASCII ) {
- /* type mismatch */
- va_end( al );
- DUMP_RESP;
- return -1;
- }
- bufptr = va_arg( al, char* ); argno++;
- strcpy( (char *)bufptr, (char *)&(resp[index]) );
- /* include terminating null */
- index += (strlen( &(resp[index]) ) + 1);
- break;
-
- default:
- if( (l1_fld_t & L1_ARG_UNKNOWN) == L1_ARG_UNKNOWN )
- {
- int *arglen;
-
- arglen = va_arg( al, int* ); argno++;
- bufptr = va_arg( al, void* ); argno++;
- *arglen = ((resp[index++] & ~L1_ARG_UNKNOWN) & 0xff);
- BCOPY( &(resp[index]), bufptr, *arglen );
- index += (*arglen);
- }
-
- else {
- /* unhandled type */
- va_end( al );
- DUMP_RESP;
- return -1;
- }
- }
- }
- va_end( al );
-
- if( (l1_fldno != 0) || (argno != resp_nargs) ) {
- /* wrong number of arguments */
- DUMP_RESP;
- return -1;
- }
- return 0;
-}
-
-
-
-
-/* sc_send takes as arguments a system controller struct, a
- * buffer which contains a Bedrock<->L1 "request" message,
- * the message length, and the subchannel (presumably obtained
- * from an earlier invocation of sc_open) over which the
- * message is to be sent. The final argument ("wait") indicates
- * whether the send is to be performed synchronously or not.
- *
- * sc_send returns either zero or an error value. Synchronous sends
- * (wait != 0) will not return until the data has actually been sent
- * to the UART. Synchronous sends generally receive privileged
- * treatment. The intent is that they be used sparingly, for such
- * purposes as kernel printf's (the "ducons" routines). Run-of-the-mill
- * console output and L1 requests should NOT use a non-zero value
- * for wait.
- */
-int
-sc_send( l1sc_t *sc, int ch, char *msg, int len, int wait )
-{
- char type_and_subch;
- int result;
-
- if( (ch < 0) || ( ch >= BRL1_NUM_SUBCHANS) ) {
- return SC_BADSUBCH;
- }
-
- /* Verify that this is an open subchannel
- */
- if( sc->subch[ch].use == BRL1_SUBCH_FREE ) {
- return SC_NOPEN;
- }
-
- type_and_subch = (BRL1_REQUEST | ((u_char)ch));
- result = brl1_send( sc, msg, len, type_and_subch, wait );
-
- /* If we sent as much as we asked to, return "ok". */
- if( result == len )
- return( SC_SUCCESS );
-
- /* Or, if we sent less, than either the UART is busy or
- * we're trying to send too large a packet anyway.
- */
- else if( result >= 0 && result < len )
- return( SC_BUSY );
-
- /* Or, if something else went wrong (result < 0), then
- * return that error value.
- */
- else
- return( result );
-}
-
-
-
-/* subch_pull_msg pulls a message off the receive queue for subch
- * and places it the buffer pointed to by msg. This routine should only
- * be called when the caller already knows a message is available on the
- * receive queue (and, in the kernel, only when the subchannel data lock
- * is held by the caller).
- */
-static void
-subch_pull_msg( brl1_sch_t *subch, char *msg, int *len )
-{
- sc_cq_t *q; /* receive queue */
- int before_wrap, /* packet may be split into two different */
- after_wrap; /* pieces to accommodate queue wraparound */
-
- /* pull message off the receive queue */
- q = subch->iqp;
-
- cq_rem( q, *len ); /* remove length byte and store */
- cq_discard( q ); /* remove type/subch byte and discard */
-
- if ( *len > 0 )
- (*len)--; /* don't count type/subch byte in length returned */
-
- if( (q->opos + (*len)) > BRL1_QSIZE ) {
- before_wrap = BRL1_QSIZE - q->opos;
- after_wrap = (*len) - before_wrap;
- }
- else {
- before_wrap = (*len);
- after_wrap = 0;
- }
-
- BCOPY( q->buf + q->opos, msg, before_wrap );
- if( after_wrap ) {
- BCOPY( q->buf, msg + before_wrap, after_wrap );
- q->opos = after_wrap;
- }
- else {
- q->opos = ((q->opos + before_wrap) & (BRL1_QSIZE - 1));
- }
- atomic_dec(&(subch->packet_arrived));
-}
-
-
-/* sc_recv_poll can be called as a blocking or non-blocking function;
- * it attempts to pull a message off of the subchannel specified
- * in the argument list (ch).
- *
- * The "block" argument, if non-zero, is interpreted as a timeout
- * delay (to avoid permanent waiting).
- */
-
-int
-sc_recv_poll( l1sc_t *sc, int ch, char *msg, int *len, uint64_t block )
-{
- int is_msg = 0;
- unsigned long pl = 0;
- brl1_sch_t *subch = &(sc->subch[ch]);
-
- rtc_time_t exp_time = rtc_time() + block;
-
- /* sanity check-- make sure this is an open subchannel */
- if( subch->use == BRL1_SUBCH_FREE )
- return( SC_NOPEN );
-
- do {
-
- /* kick the next lower layer and see if it pulls anything in
- */
- brl1_receive( sc, SERIAL_POLLED_MODE );
- is_msg = atomic_read(&subch->packet_arrived);
-
- } while( block && !is_msg && (rtc_time() < exp_time) );
-
- if( !is_msg ) {
- /* no message and we didn't care to wait for one */
- return( SC_NMSG );
- }
-
- SUBCH_DATA_LOCK( subch, pl );
- subch_pull_msg( subch, msg, len );
- SUBCH_DATA_UNLOCK( subch, pl );
-
- return( SC_SUCCESS );
-}
-
-
-/* Like sc_recv_poll, sc_recv_intr can be called in either a blocking
- * or non-blocking mode. Rather than polling until an appointed timeout,
- * however, sc_recv_intr sleeps on a syncrhonization variable until a
- * signal from the lower layer tells us that a packet has arrived.
- *
- * sc_recv_intr can't be used with remote (router) L1s.
- */
-int
-sc_recv_intr( l1sc_t *sc, int ch, char *msg, int *len, uint64_t block )
-{
- int is_msg = 0;
- unsigned long pl = 0;
- brl1_sch_t *subch = &(sc->subch[ch]);
-
- do {
- SUBCH_DATA_LOCK(subch, pl);
- is_msg = atomic_read(&subch->packet_arrived);
- if( !is_msg && block ) {
- /* wake me when you've got something */
- subch->rx_notify = sc_data_ready;
- sv_wait( &(subch->arrive_sv), 0, 0);
- if( subch->use == BRL1_SUBCH_FREE ) {
- /* oops-- somebody closed our subchannel while we were
- * sleeping!
- */
-
- /* no need to unlock since the channel's closed anyhow */
- return( SC_NOPEN );
- }
- }
- } while( !is_msg && block );
-
- if( !is_msg ) {
- /* no message and we didn't care to wait for one */
- SUBCH_DATA_UNLOCK( subch, pl );
- return( SC_NMSG );
- }
-
- subch_pull_msg( subch, msg, len );
- SUBCH_DATA_UNLOCK( subch, pl );
-
- return( SC_SUCCESS );
-}
-
-/* sc_command implements a (blocking) combination of sc_send and sc_recv.
- * It is intended to be the SN1 equivalent of SN0's "elsc_command", which
- * issued a system controller command and then waited for a response from
- * the system controller before returning.
- *
- * cmd points to the outgoing command; resp points to the buffer in
- * which the response is to be stored. Both buffers are assumed to
- * be the same length; if there is any doubt as to whether the
- * response buffer is long enough to hold the L1's response, then
- * make it BRL1_QSIZE bytes-- no Bedrock<->L1 message can be any
- * bigger.
- *
- * Be careful using the same buffer for both cmd and resp; it could get
- * hairy if there were ever an L1 command request that spanned multiple
- * packets. (On the other hand, that would require some additional
- * rewriting of the L1 command interface anyway.)
- */
-#define __RETRIES 50
-#define __WAIT_SEND 1 // ( sc->uart != BRL1_LOCALHUB_UART )
-#define __WAIT_RECV 10000000
-
-
-int
-sc_command( l1sc_t *sc, int ch, char *cmd, char *resp, int *len )
-{
-#ifndef CONFIG_SERIAL_SGI_L1_PROTOCOL
- return SC_NMSG;
-#else
- int result;
- int retries;
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return SC_NMSG;
-
- retries = __RETRIES;
-
- while( (result = sc_send( sc, ch, cmd, *len, __WAIT_SEND )) < 0 ) {
- if( result == SC_BUSY ) {
- retries--;
- if( retries <= 0 )
- return result;
- uart_delay(500);
- }
- else {
- return result;
- }
- }
-
- /* block on sc_recv_* */
- if( (sc->uart == BRL1_LOCALHUB_UART) && L1_interrupts_connected ) {
- return( sc_recv_intr( sc, ch, resp, len, __WAIT_RECV ) );
- }
- else {
- return( sc_recv_poll( sc, ch, resp, len, __WAIT_RECV ) );
- }
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-/* sc_command_kern is a knuckle-dragging, no-patience version of sc_command
- * used in situations where the kernel has a command that shouldn't be
- * delayed until the send buffer clears. sc_command should be used instead
- * under most circumstances.
- */
-
-int
-sc_command_kern( l1sc_t *sc, int ch, char *cmd, char *resp, int *len )
-{
-#ifndef CONFIG_SERIAL_SGI_L1_PROTOCOL
- return SC_NMSG;
-#else
- int result;
-
- if ( IS_RUNNING_ON_SIMULATOR() )
- return SC_NMSG;
-
- if( (result = sc_send( sc, ch, cmd, *len, 1 )) < 0 ) {
- return result;
- }
-
- return( sc_recv_poll( sc, ch, resp, len, __WAIT_RECV ) );
-#endif /* CONFIG_SERIAL_SGI_L1_PROTOCOL */
-}
-
-
-
-/* sc_poll checks the queue corresponding to the given
- * subchannel to see if there's anything available. If
- * not, it kicks the brl1 layer and then checks again.
- *
- * Returns 1 if input is available on the given queue,
- * 0 otherwise.
- */
-
-int
-sc_poll( l1sc_t *sc, int ch )
-{
- brl1_sch_t *subch = &(sc->subch[ch]);
-
- if( atomic_read(&subch->packet_arrived) )
- return 1;
-
- brl1_receive( sc, SERIAL_POLLED_MODE );
-
- if( atomic_read(&subch->packet_arrived) )
- return 1;
-
- return 0;
-}
-
-/* for now, sc_init just calls brl1_init */
-
-void
-sc_init( l1sc_t *sc, nasid_t nasid, net_vec_t uart )
-{
- if ( !IS_RUNNING_ON_SIMULATOR() )
- brl1_init( sc, nasid, uart );
-}
-
-/* sc_dispatch_env_event handles events sent from the system control
- * network's environmental monitor tasks.
- */
-
-#if defined(LINUX_KERNEL_THREADS)
-
-static void
-sc_dispatch_env_event( uint code, int argc, char *args, int maxlen )
-{
- int j, i = 0;
- uint32_t ESPcode;
-
- switch( code ) {
- /* for now, all codes do the same thing: grab two arguments
- * and print a cmn_err_tag message */
- default:
- /* check number of arguments */
- if( argc != 2 ) {
- L1_DBG_PRF(( "sc_dispatch_env_event: "
- "expected 2 arguments, got %d\n", argc ));
- return;
- }
-
- /* get ESP code (integer argument) */
- if( args[i++] != L1_ARG_INT ) {
- L1_DBG_PRF(( "sc_dispatch_env_event: "
- "expected integer argument\n" ));
- return;
- }
- /* WARNING: highly endian */
- COPY_BUFFER_TO_INT(args, i, ESPcode);
-
- /* verify string argument */
- if( args[i++] != L1_ARG_ASCII ) {
- L1_DBG_PRF(( "sc_dispatch_env_event: "
- "expected an ASCII string\n" ));
- return;
- }
- for( j = i; j < maxlen; j++ ) {
- if( args[j] == '\0' ) break; /* found string termination */
- }
- if( j == maxlen ) {
- j--;
- L1_DBG_PRF(( "sc_dispatch_env_event: "
- "message too long-- truncating\n" ));
- }
-
- /* strip out trailing cr/lf */
- for( ;
- j > 1 && ((args[j-1] == 0xd) || (args[j-1] == 0xa));
- j-- );
- args[j] = '\0';
-
- /* strip out leading cr/lf */
- for( ;
- i < j && ((args[i] == 0xd) || (args[i] == 0xa));
- i++ );
- }
-}
-
-
-/* sc_event waits for events to arrive from the system controller, and
- * prints appropriate messages to the syslog.
- */
-
-static void
-sc_event( l1sc_t *sc, int ch )
-{
- char event[BRL1_QSIZE];
- int i;
- int result;
- int event_len;
- uint32_t ev_src;
- uint32_t ev_code;
- int ev_argc;
-
- while(1) {
-
- bzero( event, BRL1_QSIZE );
-
- /*
- * wait for an event
- */
- result = sc_recv_intr( sc, ch, event, &event_len, 1 );
- if( result != SC_SUCCESS ) {
- printk(KERN_WARNING "Error receiving sysctl event on nasid %d\n",
- sc->nasid );
- }
- else {
- /*
- * an event arrived; break it down into useful pieces
- */
-#if defined(L1_DEBUG) && 0
- int ix;
- printf( "Event packet received:\n" );
- for (ix = 0; ix < 64; ix++) {
- printf( "%x%x ", ((event[ix] >> 4) & ((uint64_t)0xf)),
- (event[ix] & ((uint64_t)0xf)) );
- if( (ix % 16) == 0xf ) printf( "\n" );
- }
-#endif /* L1_DEBUG */
-
- i = 0;
-
- /* get event source */
- COPY_BUFFER_TO_INT(event, i, ev_src);
- COPY_BUFFER_TO_INT(event, i, ev_code);
-
- /* get arg count */
- ev_argc = (event[i++] & 0xffUL);
-
- /* dispatch events by task */
- switch( (ev_src & L1_ADDR_TASK_MASK) >> L1_ADDR_TASK_SHFT )
- {
- case L1_ADDR_TASK_ENV: /* environmental monitor event */
- sc_dispatch_env_event( ev_code, ev_argc, &(event[i]),
- BRL1_QSIZE - i );
- break;
-
- default: /* unhandled task type */
- L1_DBG_PRF(( "Unhandled event type received from system "
- "controllers: source task %x\n",
- (ev_src & L1_ADDR_TASK_MASK) >> L1_ADDR_TASK_SHFT
- ));
- }
- }
-
- }
-}
-
-/* sc_listen sets up a service thread to listen for incoming events.
- */
-
-void
-sc_listen( l1sc_t *sc )
-{
- int result;
- unsigned long pl = 0;
- brl1_sch_t *subch;
-
- char msg[BRL1_QSIZE];
- int len; /* length of message being sent */
- int ch; /* system controller subchannel used */
-
- extern int msc_shutdown_pri;
-
- /* grab the designated "event subchannel" */
- SUBCH_LOCK( sc, pl );
- subch = &(sc->subch[BRL1_EVENT_SUBCH]);
- if( subch->use != BRL1_SUBCH_FREE ) {
- SUBCH_UNLOCK( sc, pl );
- printk(KERN_WARNING "sysctl event subchannel in use! "
- "Not monitoring sysctl events.\n" );
- return;
- }
- subch->use = BRL1_SUBCH_RSVD;
- SUBCH_UNLOCK( sc, pl );
-
- atomic_set(&subch->packet_arrived, 0);
- subch->target = BRL1_LOCALHUB_UART;
- spin_lock_init( &(subch->data_lock) );
- sv_init( &(subch->arrive_sv), &(subch->data_lock), SV_MON_SPIN | SV_ORDER_FIFO /* | SV_INTS */);
- subch->tx_notify = NULL;
- subch->rx_notify = sc_data_ready;
- subch->iqp = snia_kmem_zalloc_node( sizeof(sc_cq_t), KM_NOSLEEP,
- NASID_TO_COMPACT_NODEID(sc->nasid) );
- ASSERT( subch->iqp );
- cq_init( subch->iqp );
-
- /* set up a thread to listen for events */
- sthread_create( "sysctl event handler", 0, 0, 0, msc_shutdown_pri,
- KT_PS, (st_func_t *) sc_event,
- (void *)sc, (void *)(uint64_t)BRL1_EVENT_SUBCH, 0, 0 );
-
- /* signal the L1 to begin sending events */
- bzero( msg, BRL1_QSIZE );
- ch = sc_open( sc, L1_ADDR_LOCAL );
-
- if( (len = sc_construct_msg( sc, ch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_EVENT_SUBCH, 2,
- L1_ARG_INT, BRL1_EVENT_SUBCH )) < 0 )
- {
- sc_close( sc, ch );
- L1_DBG_PRF(( "Failure in sc_construct_msg (%d)\n", len ));
- goto err_return;
- }
-
- result = sc_command_kern( sc, ch, msg, msg, &len );
- if( result < 0 )
- {
- sc_close( sc, ch );
- L1_DBG_PRF(( "Failure in sc_command_kern (%d)\n", result ));
- goto err_return;
- }
-
- sc_close( sc, ch );
-
- result = sc_interpret_resp( msg, 0 );
- if( result < 0 )
- {
- L1_DBG_PRF(( "Failure in sc_interpret_resp (%d)\n", result ));
- goto err_return;
- }
-
- /* everything went fine; just return */
- return;
-
-err_return:
- /* there was a problem; complain */
- printk(KERN_WARNING "failed to set sysctl event-monitoring subchannel. "
- "Sysctl events will not be monitored.\n" );
-}
-
-#endif /* LINUX_KERNEL_THREADS */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000 - 2001 Silicon Graphics, Inc.
- * All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/eeprom.h>
-#include <asm/sn/router.h>
-#include <asm/sn/module.h>
-#include <asm/sn/ksys/l1.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/clksupport.h>
-
-#define ELSC_TIMEOUT 1000000 /* ELSC response timeout (usec) */
-#define LOCK_TIMEOUT 5000000 /* Hub lock timeout (usec) */
-
-#define LD(x) (*(volatile uint64_t *)(x))
-#define SD(x, v) (LD(x) = (uint64_t) (v))
-
-#define hub_cpu_get() 0
-
-#define LBYTE(caddr) (*(char *) caddr)
-
-extern char *bcopy(const char * src, char * dest, int count);
-
-#define LDEBUG 0
-
-/*
- * ELSC data is in NVRAM page 7 at the following offsets.
- */
-
-#define NVRAM_MAGIC_AD 0x700 /* magic number used for init */
-#define NVRAM_PASS_WD 0x701 /* password (4 bytes in length) */
-#define NVRAM_DBG1 0x705 /* virtual XOR debug switches */
-#define NVRAM_DBG2 0x706 /* physical XOR debug switches */
-#define NVRAM_CFG 0x707 /* ELSC Configuration info */
-#define NVRAM_MODULE 0x708 /* system module number */
-#define NVRAM_BIST_FLG 0x709 /* BIST flags (2 bits per nodeboard) */
-#define NVRAM_PARTITION 0x70a /* module's partition id */
-#define NVRAM_DOMAIN 0x70b /* module's domain id */
-#define NVRAM_CLUSTER 0x70c /* module's cluster id */
-#define NVRAM_CELL 0x70d /* module's cellid */
-
-#define NVRAM_MAGIC_NO 0x37 /* value of magic number */
-#define NVRAM_SIZE 16 /* 16 bytes in nvram */
-
-/*
- * Declare a static ELSC NVRAM buffer to hold all data read from
- * and written to NVRAM. This nvram "cache" will be used only during the
- * IP27prom execution.
- */
-static char elsc_nvram_buffer[NVRAM_SIZE];
-
-#define SC_COMMAND sc_command
-
-/*
- * elsc_init
- *
- * Initialize ELSC structure
- */
-
-void elsc_init(elsc_t *e, nasid_t nasid)
-{
- sc_init((l1sc_t *)e, nasid, BRL1_LOCALHUB_UART);
-}
-
-
-/*
- * elsc_errmsg
- *
- * Given a negative error code,
- * returns a corresponding static error string.
- */
-
-char *elsc_errmsg(int code)
-{
- switch (code) {
- case ELSC_ERROR_CMD_SEND:
- return "Command send error";
- case ELSC_ERROR_CMD_CHECKSUM:
- return "Command packet checksum error";
- case ELSC_ERROR_CMD_UNKNOWN:
- return "Unknown command";
- case ELSC_ERROR_CMD_ARGS:
- return "Invalid command argument(s)";
- case ELSC_ERROR_CMD_PERM:
- return "Permission denied";
- case ELSC_ERROR_RESP_TIMEOUT:
- return "System controller response timeout";
- case ELSC_ERROR_RESP_CHECKSUM:
- return "Response packet checksum error";
- case ELSC_ERROR_RESP_FORMAT:
- return "Response format error";
- case ELSC_ERROR_RESP_DIR:
- return "Response direction error";
- case ELSC_ERROR_MSG_LOST:
- return "Message lost because queue is full";
- case ELSC_ERROR_LOCK_TIMEOUT:
- return "Timed out getting ELSC lock";
- case ELSC_ERROR_DATA_SEND:
- return "Error sending data";
- case ELSC_ERROR_NIC:
- return "NIC protocol error";
- case ELSC_ERROR_NVMAGIC:
- return "Bad magic number in NVRAM";
- case ELSC_ERROR_MODULE:
- return "Module location protocol error";
- default:
- return "Unknown error";
- }
-}
-
-/*
- * elsc_nvram_init
- *
- * Initializes reads and writes to NVRAM. This will perform a single
- * read to NVRAM, getting all data at once. When the PROM tries to
- * read NVRAM, it returns the data from the buffer being read. If the
- * PROM tries to write out to NVRAM, the write is done, and the internal
- * buffer is updated.
- */
-
-void elsc_nvram_init(nasid_t nasid, uchar_t *elsc_nvram_data)
-{
- /* This might require implementation of multiple-packet request/responses
- * if it's to provide the same behavior that was available in SN0.
- */
- nasid = nasid;
- elsc_nvram_data = elsc_nvram_data;
-}
-
-/*
- * elsc_nvram_copy
- *
- * Copies the content of a buffer into the static buffer in this library.
- */
-
-void elsc_nvram_copy(uchar_t *elsc_nvram_data)
-{
- memcpy(elsc_nvram_buffer, elsc_nvram_data, NVRAM_SIZE);
-}
-
-/*
- * elsc_nvram_write
- *
- * Copies bytes from 'buf' into NVRAM, starting at NVRAM address
- * 'addr' which must be between 0 and 2047.
- *
- * If 'len' is non-negative, the routine copies 'len' bytes.
- *
- * If 'len' is negative, the routine treats the data as a string and
- * copies bytes up to and including a NUL-terminating zero, but not
- * to exceed '-len' bytes.
- */
-
-int elsc_nvram_write(elsc_t *e, int addr, char *buf, int len)
-{
- /* Here again, we might need to work out the details of a
- * multiple-packet protocol.
- */
-
- /* For now, pretend it worked. */
- e = e;
- addr = addr;
- buf = buf;
- return (len < 0 ? -len : len);
-}
-
-/*
- * elsc_nvram_read
- *
- * Copies bytes from NVRAM into 'buf', starting at NVRAM address
- * 'addr' which must be between 0 and 2047.
- *
- * If 'len' is non-negative, the routine copies 'len' bytes.
- *
- * If 'len' is negative, the routine treats the data as a string and
- * copies bytes up to and including a NUL-terminating zero, but not
- * to exceed '-len' bytes. NOTE: This method is no longer supported.
- * It was never used in the first place.
- */
-
-int elsc_nvram_read(elsc_t *e, int addr, char *buf, int len)
-{
- /* multiple packets? */
- e = e;
- addr = addr;
- buf = buf;
- len = len;
- return -1;
-}
-
-
-/*
- * Command Set
- */
-
-int elsc_version(elsc_t *e, char *result)
-{
- char msg[BRL1_QSIZE];
- int len; /* length of message being sent */
- int subch; /* system controller subchannel used */
- int major, /* major rev number */
- minor, /* minor rev number */
- bugfix; /* bugfix rev number */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( (l1sc_t *)e, L1_ADDR_LOCAL );
-
- if( (len = sc_construct_msg( (l1sc_t *)e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_FW_REV, 0 )) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND( (l1sc_t *)e, subch, msg, msg, &len ) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( (l1sc_t *)e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 6, L1_ARG_INT, &major,
- L1_ARG_INT, &minor, L1_ARG_INT, &bugfix )
- < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- sprintf( result, "%d.%d.%d", major, minor, bugfix );
-
- return 0;
-}
-
-int elsc_debug_set(elsc_t *e, u_char byte1, u_char byte2)
-{
- /* shush compiler */
- e = e;
- byte1 = byte1;
- byte2 = byte2;
-
- /* fill in a buffer with the opcode & params; call sc_command */
-
- return 0;
-}
-
-int elsc_debug_get(elsc_t *e, u_char *byte1, u_char *byte2)
-{
- char msg[BRL1_QSIZE];
- int subch; /* system controller subchannel used */
- int dbg_sw; /* holds debug switch settings */
- int len; /* number of msg buffer bytes used */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( (l1sc_t *)e, L1_ADDR_LOCAL )) < 0 ) {
- return( ELSC_ERROR_CMD_SEND );
- }
-
- if( (len = sc_construct_msg( (l1sc_t *)e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_RDBG, 0 ) ) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( sc_command( (l1sc_t *)e, subch, msg, msg, &len ) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( (l1sc_t *)e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 2, L1_ARG_INT, &dbg_sw ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- /* copy out debug switch settings (last two bytes of the
- * integer response)
- */
- *byte1 = ((dbg_sw >> 8) & 0xFF);
- *byte2 = (dbg_sw & 0xFF);
-
- return 0;
-}
-
-
-/*
- * elsc_rack_bay_get fills in the two int * arguments with the
- * rack number and bay number of the L1 being addressed
- */
-int elsc_rack_bay_get(elsc_t *e, uint *rack, uint *bay)
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
- uint32_t buf32; /* used to copy 32-bit rack/bay out of msg */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( (l1sc_t *)e, L1_ADDR_LOCAL )) < 0 ) {
- return( ELSC_ERROR_CMD_SEND );
- }
-
- if( (len = sc_construct_msg( (l1sc_t *)e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_RRACK, 0 )) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
-
- /* send the request to the L1 */
- if( sc_command( (l1sc_t *)e, subch, msg, msg, &len ) ) {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close(e, subch);
-
- /* check response */
- if( sc_interpret_resp( msg, 2, L1_ARG_INT, &buf32 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- /* extract rack/bay info
- *
- * note that the 32-bit value returned by the L1 actually
- * only uses the low-order sixteen bits for rack and bay
- * information. A "normal" L1 address puts rack and bay
- * information in bit positions 12 through 28. So if
- * we initially shift the value returned 12 bits to the left,
- * we can use the L1 addressing #define's to extract the
- * values we need (see ksys/l1.h for a complete list of the
- * various fields of an L1 address).
- */
- buf32 <<= L1_ADDR_BAY_SHFT;
-
- *rack = (buf32 & L1_ADDR_RACK_MASK) >> L1_ADDR_RACK_SHFT;
- *bay = (buf32 & L1_ADDR_BAY_MASK) >> L1_ADDR_BAY_SHFT;
-
- return 0;
-}
-
-
-/* elsc_rack_bay_type_get fills in the three int * arguments with the
- * rack number, bay number and brick type of the L1 being addressed. Note
- * that if the L1 operation fails and this function returns an error value,
- * garbage may be written to brick_type.
- */
-int elsc_rack_bay_type_get( l1sc_t *sc, uint *rack,
- uint *bay, uint *brick_type )
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
- uint32_t buf32; /* used to copy 32-bit rack & bay out of msg */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( sc, L1_ADDR_LOCAL )) < 0 ) {
- return ELSC_ERROR_CMD_SEND;
- }
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_RRBT, 0 )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND( sc, subch, msg, msg, &len ) ) {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 4, L1_ARG_INT, &buf32,
- L1_ARG_INT, brick_type ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- /* extract rack/bay info
- *
- * note that the 32-bit value returned by the L1 actually
- * only uses the low-order sixteen bits for rack and bay
- * information. A "normal" L1 address puts rack and bay
- * information in bit positions 12 through 28. So if
- * we initially shift the value returned 12 bits to the left,
- * we can use the L1 addressing #define's to extract the
- * values we need (see ksys/l1.h for a complete list of the
- * various fields of an L1 address).
- */
- buf32 <<= L1_ADDR_BAY_SHFT;
-
- *rack = (buf32 & L1_ADDR_RACK_MASK) >> L1_ADDR_RACK_SHFT;
- *bay = (buf32 & L1_ADDR_BAY_MASK) >> L1_ADDR_BAY_SHFT;
-
- /* convert brick_type to lower case */
- *brick_type = *brick_type - 'A' + 'a';
-
- return 0;
-}
-
-
-int elsc_module_get(elsc_t *e)
-{
- extern char brick_types[];
- uint rnum, rack, bay, bricktype, t;
- int ret;
-
- /* construct module ID from rack and slot info */
-
- if ((ret = elsc_rack_bay_type_get(e, &rnum, &bay, &bricktype)) < 0) {
- return ret;
- }
-
- /* report unset location info. with a special, otherwise invalid modid */
- if (rnum == 0 && bay == 0)
- return MODULE_NOT_SET;
-
- if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
- return ELSC_ERROR_MODULE;
-
- /* Build a moduleid_t-compatible rack number */
-
- rack = 0;
- t = rnum / 100; /* rack class (CPU/IO) */
- if (t > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_CLASS(rack, t);
- rnum %= 100;
-
- t = rnum / 10; /* rack group */
- if (t > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_GROUP(rack, t);
-
- t = rnum % 10; /* rack number (one-based) */
- if (t-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_NUM(rack, t);
-
- for( t = 0; t < MAX_BRICK_TYPES; t++ ) {
- if( brick_types[t] == bricktype )
- return RBT_TO_MODULE(rack, bay, t);
- }
-
- return ELSC_ERROR_MODULE;
-}
-
-int elsc_partition_set(elsc_t *e, int partition)
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( e, L1_ADDR_LOCAL )) < 0 ) {
- return ELSC_ERROR_CMD_SEND;
- }
-
- if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_PARTITION_SET, 2,
- L1_ARG_INT, partition )) < 0 )
- {
-
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( sc_command( e, subch, msg, msg, &len ) ) {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return( 0 );
-}
-
-int elsc_partition_get(elsc_t *e)
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
- uint32_t partition_id; /* used to copy partition id out of msg */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( e, L1_ADDR_LOCAL )) < 0 ) {
- return ELSC_ERROR_CMD_SEND;
- }
-
- if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_PARTITION_GET, 0 )) < 0 )
-
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( sc_command( e, subch, msg, msg, &len ) ) {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 2, L1_ARG_INT, &partition_id ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return( partition_id );
-}
-
-
-/*
- * elsc_cons_subch selects the "active" console subchannel for this node
- * (i.e., the one that will currently receive input)
- */
-int elsc_cons_subch(elsc_t *e, uint ch)
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( e, L1_ADDR_LOCAL );
-
- if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_CONS_SUBCH, 2,
- L1_ARG_INT, ch)) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND( e, subch, msg, msg, &len ) ) {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return 0;
-}
-
-
-/*
- * elsc_cons_node should only be executed by one node. It declares to
- * the system controller that the node from which it is called will be
- * the owner of the system console.
- */
-int elsc_cons_node(elsc_t *e)
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( e, L1_ADDR_LOCAL );
-
- if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_CONS_NODE, 0 )) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND( e, subch, msg, msg, &len ) ) {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return 0;
-}
-
-
-/* elsc_display_line writes up to 12 characters to either the top or bottom
- * line of the L1 display. line points to a buffer containing the message
- * to be displayed. The zero-based line number is specified by lnum (so
- * lnum == 0 specifies the top line and lnum == 1 specifies the bottom).
- * Lines longer than 12 characters, or line numbers not less than
- * L1_DISPLAY_LINES, cause elsc_display_line to return an error.
- */
-int elsc_display_line(elsc_t *e, char *line, int lnum)
-{
- char msg[BRL1_QSIZE];
- int subch; /* system controller subchannel used */
- int len; /* number of msg buffer bytes used */
-
- /* argument sanity checking */
- if( !(lnum < L1_DISPLAY_LINES) )
- return( ELSC_ERROR_CMD_ARGS );
- if( !(strlen( line ) <= L1_DISPLAY_LINE_LENGTH) )
- return( ELSC_ERROR_CMD_ARGS );
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( (l1sc_t *)e, L1_ADDR_LOCAL );
-
- if( (len = sc_construct_msg( (l1sc_t *)e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- (L1_REQ_DISP1+lnum), 2,
- L1_ARG_ASCII, line )) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND( (l1sc_t *)e, subch, msg, msg, &len ) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( (l1sc_t *)e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return 0;
-}
-
-
-/* elsc_display_mesg silently drops message characters beyond the 12th.
- */
-int elsc_display_mesg(elsc_t *e, char *chr)
-{
-
- char line[L1_DISPLAY_LINE_LENGTH+1];
- int numlines, i;
- int result;
-
- numlines = (strlen( chr ) + L1_DISPLAY_LINE_LENGTH - 1) /
- L1_DISPLAY_LINE_LENGTH;
-
- if( numlines > L1_DISPLAY_LINES )
- numlines = L1_DISPLAY_LINES;
-
- for( i = 0; i < numlines; i++ )
- {
- strlcpy( line, chr, sizeof(line) );
-
- /* generally we want to leave the first line of the L1 display
- * alone (so the L1 can manipulate it). If you need to be able
- * to display to both lines (for debugging purposes), define
- * L1_DISP_2LINES in irix/kern/ksys/l1.h, or add -DL1_DISP_2LINES
- * to your 'defs file.
- */
-#if defined(L1_DISP_2LINES)
- if( (result = elsc_display_line( e, line, i )) < 0 )
-#else
- if( (result = elsc_display_line( e, line, i+1 )) < 0 )
-#endif
-
- return result;
-
- chr += L1_DISPLAY_LINE_LENGTH;
- }
-
- return 0;
-}
-
-
-int elsc_password_set(elsc_t *e, char *password)
-{
- /* shush compiler */
- e = e;
- password = password;
-
- /* fill in buffer with the opcode & params; call elsc_command */
-
- return 0;
-}
-
-int elsc_password_get(elsc_t *e, char *password)
-{
- /* shush compiler */
- e = e;
- password = password;
-
- /* fill in buffer with the opcode & params; call elsc_command */
-
- return 0;
-}
-
-
-/*
- * sc_portspeed_get
- *
- * retrieve the current portspeed setting for the bedrock II
- */
-int sc_portspeed_get(l1sc_t *sc)
-{
- char msg[BRL1_QSIZE];
- int len; /* length of message being sent */
- int subch; /* system controller subchannel used */
- int portspeed_a, portspeed_b;
- /* ioport clock rates */
-
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( sc, L1_ADDR_LOCAL );
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_PORTSPEED,
- 0 )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( sc_command( sc, subch, msg, msg, &len ) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 4,
- L1_ARG_INT, &portspeed_a,
- L1_ARG_INT, &portspeed_b ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- /* for the c-brick, we ignore the portspeed_b value */
- return (portspeed_a ? 600 : 400);
-}
-
-/*
- * elsc_power_query
- *
- * To be used after system reset, this command returns 1 if the reset
- * was the result of a power-on, 0 otherwise.
- *
- * The power query status is cleared to 0 after it is read.
- */
-
-int elsc_power_query(elsc_t *e)
-{
- e = e; /* shush the compiler */
-
- /* fill in buffer with the opcode & params; call elsc_command */
-
- return 1;
-}
-
-int elsc_rpwr_query(elsc_t *e, int is_master)
-{
- /* shush the compiler */
- e = e;
- is_master = is_master;
-
- /* fill in buffer with the opcode & params; call elsc_command */
-
- return 0;
-}
-
-/*
- * elsc_power_down
- *
- * Sets up system to shut down in "sec" seconds (or modifies the
- * shutdown time if one is already in effect). Use 0 to power
- * down immediately.
- */
-
-int elsc_power_down(elsc_t *e, int sec)
-{
- /* shush compiler */
- e = e;
- sec = sec;
-
- /* fill in buffer with the opcode & params; call elsc_command */
-
- return 0;
-}
-
-
-int elsc_system_reset(elsc_t *e)
-{
- char msg[BRL1_QSIZE];
- int subch; /* system controller subchannel used */
- int len; /* number of msg buffer bytes used */
- int result;
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( e, L1_ADDR_LOCAL )) < 0 ) {
- return ELSC_ERROR_CMD_SEND;
- }
-
- if( (len = sc_construct_msg( e, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_RESET, 0 )) < 0 )
- {
- sc_close( e, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( (result = sc_command( e, subch, msg, msg, &len )) ) {
- sc_close( e, subch );
- if( result == SC_NMSG ) {
- /* timeout is OK. We've sent the reset. Now it's just
- * a matter of time...
- */
- return( 0 );
- }
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( e, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return 0;
-}
-
-
-int elsc_power_cycle(elsc_t *e)
-{
- /* shush compiler */
- e = e;
-
- /* fill in buffer with the opcode & params; call sc_command */
-
- return 0;
-}
-
-
-/*
- * L1 Support for reading
- * cbrick uid.
- */
-
-int elsc_nic_get(elsc_t *e, uint64_t *nic, int verbose)
-{
- /* this parameter included only for SN0 compatibility */
- verbose = verbose;
-
- /* We don't go straight to the bedrock/L1 protocol on this one, but let
- * the eeprom layer prepare the eeprom data as we would like it to
- * appear to the caller
- */
- return cbrick_uid_get( e->nasid, nic );
-}
-
-
-int _elsc_hbt(elsc_t *e, int ival, int rdly)
-{
- e = e;
- ival = ival;
- rdly = rdly;
-
- /* fill in buffer with the opcode & params; call elsc_command */
-
- return 0;
-}
-
-
-/* send a command string to an L1 */
-int sc_command_interp( l1sc_t *sc, l1addr_t compt, l1addr_t rack, l1addr_t bay,
- char *cmd )
-{
- char msg[BRL1_QSIZE];
- int len; /* length of message being sent */
- int subch; /* system controller subchannel used */
- l1addr_t target; /* target system controller for command */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
-
- L1_BUILD_ADDR( &target, compt, rack, bay, 0 );
- subch = sc_open( sc, target );
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_CMD, L1_REQ_EXEC_CMD, 2,
- L1_ARG_ASCII, cmd )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND( sc, subch, msg, msg, &len ) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return 0;
-}
-
-/*
- * sc_power_down
- *
- * Shuts down the c-brick associated with sc, and any attached I/O bricks
- * or other c-bricks (won't go through r-bricks).
- */
-
-int sc_power_down(l1sc_t *sc)
-{
- return sc_command_interp( sc, L1_ADDR_TYPE_L1, L1_ADDR_RACK_LOCAL,
- L1_ADDR_BAY_LOCAL, "* pwr d" );
-}
-
-
-/*
- * sc_power_down_all
- *
- * Works similarly to sc_power_down, except that the request is sent to the
- * closest L2 and EVERYBODY gets turned off.
- */
-
-int sc_power_down_all(l1sc_t *sc)
-{
- if( nodepda->num_routers > 0 ) {
- return sc_command_interp( sc, L1_ADDR_TYPE_L2, L1_ADDR_RACK_LOCAL,
- L1_ADDR_BAY_LOCAL, "* pwr d" );
- }
- else {
- return sc_power_down( sc );
- }
-}
-
-
-/*
- * Routines for reading the R-brick's L1
- */
-
-int router_module_get( nasid_t nasid, net_vec_t path )
-{
- uint rnum, rack, bay, t;
- int ret;
- l1sc_t sc;
-
- /* prepare l1sc_t struct */
- sc_init( &sc, nasid, path );
-
- /* construct module ID from rack and slot info */
-
- if ((ret = elsc_rack_bay_get(&sc, &rnum, &bay)) < 0)
- return ret;
-
- /* report unset location info. with a special, otherwise invalid modid */
- if (rnum == 0 && bay == 0)
- return MODULE_NOT_SET;
-
- if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
- return ELSC_ERROR_MODULE;
-
- /* Build a moduleid_t-compatible rack number */
-
- rack = 0;
- t = rnum / 100; /* rack class (CPU/IO) */
- if (t > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_CLASS(rack, t);
- rnum %= 100;
-
- t = rnum / 10; /* rack group */
- if (t > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_GROUP(rack, t);
-
- t = rnum % 10; /* rack number (one-based) */
- if (t-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_NUM(rack, t);
-
- ret = RBT_TO_MODULE(rack, bay, MODULE_RBRICK);
- return ret;
-}
-
-
-/*
- * iobrick routines
- */
-
-/* iobrick_rack_bay_type_get fills in the three int * arguments with the
- * rack number, bay number and brick type of the L1 being addressed. Note
- * that if the L1 operation fails and this function returns an error value,
- * garbage may be written to brick_type.
- */
-int iobrick_rack_bay_type_get( l1sc_t *sc, uint *rack,
- uint *bay, uint *brick_type )
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
- uint32_t buf32; /* used to copy 32-bit rack & bay out of msg */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( sc, L1_ADDR_LOCALIO )) < 0 ) {
- return( ELSC_ERROR_CMD_SEND );
- }
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_RRBT, 0 )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( sc_command( sc, subch, msg, msg, &len ) ) {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 4, L1_ARG_INT, &buf32,
- L1_ARG_INT, brick_type ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- /* extract rack/bay info
- *
- * note that the 32-bit value returned by the L1 actually
- * only uses the low-order sixteen bits for rack and bay
- * information. A "normal" L1 address puts rack and bay
- * information in bit positions 12 through 28. So if
- * we initially shift the value returned 12 bits to the left,
- * we can use the L1 addressing #define's to extract the
- * values we need (see ksys/l1.h for a complete list of the
- * various fields of an L1 address).
- */
- buf32 <<= L1_ADDR_BAY_SHFT;
-
- *rack = (buf32 & L1_ADDR_RACK_MASK) >> L1_ADDR_RACK_SHFT;
- *bay = (buf32 & L1_ADDR_BAY_MASK) >> L1_ADDR_BAY_SHFT;
-
- return 0;
-}
-
-
-int iobrick_module_get(l1sc_t *sc)
-{
- uint rnum, rack, bay, brick_type, t;
- int ret;
-
- /* construct module ID from rack and slot info */
-
- if ((ret = iobrick_rack_bay_type_get(sc, &rnum, &bay, &brick_type)) < 0)
- return ret;
-
- if (bay > MODULE_BPOS_MASK >> MODULE_BPOS_SHFT)
- return ELSC_ERROR_MODULE;
-
- /* Build a moduleid_t-compatible rack number */
-
- rack = 0;
- t = rnum / 100; /* rack class (CPU/IO) */
- if (t > RACK_CLASS_MASK(rack) >> RACK_CLASS_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_CLASS(rack, t);
- rnum %= 100;
-
- t = rnum / 10; /* rack group */
- if (t > RACK_GROUP_MASK(rack) >> RACK_GROUP_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_GROUP(rack, t);
-
- t = rnum % 10; /* rack number (one-based) */
- if (t-1 > RACK_NUM_MASK(rack) >> RACK_NUM_SHFT(rack))
- return ELSC_ERROR_MODULE;
- RACK_ADD_NUM(rack, t);
-
- switch( brick_type ) {
- case 'I':
- brick_type = MODULE_IBRICK; break;
- case 'P':
- brick_type = MODULE_PBRICK; break;
- case 'X':
- brick_type = MODULE_XBRICK; break;
- }
-
- ret = RBT_TO_MODULE(rack, bay, brick_type);
-
- return ret;
-}
-
-/* iobrick_get_sys_snum asks the attached iobrick for the system
- * serial number. This function will only be relevant to the master
- * cbrick (the one attached to the bootmaster ibrick); other nodes
- * may call the function, but the value returned to the master node
- * will be the one used as the system serial number by the kernel.
- */
-
-int
-iobrick_get_sys_snum( l1sc_t *sc, char *snum_str )
-{
- char msg[BRL1_QSIZE]; /* L1 request/response info */
- int subch; /* system controller subchannel used */
- int len; /* length of message */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- if( (subch = sc_open( sc, L1_ADDR_LOCALIO )) < 0 ) {
- return( ELSC_ERROR_CMD_SEND );
- }
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_SYS_SERIAL, 0 )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( sc_command( sc, subch, msg, msg, &len ) ) {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- return( sc_interpret_resp( msg, 2, L1_ARG_ASCII, snum_str ) );
-}
-
-
-/*
- * The following functions apply (or cut off) power to the specified
- * pci bus or slot.
- */
-
-int
-iobrick_pci_pwr( l1sc_t *sc, int bus, int slot, int req_code )
-{
-#if 0 /* The "bedrock request" method of performing this function
- * seems to be broken in the L1, so for now use the command-
- * interpreter method
- */
-
- char msg[BRL1_QSIZE];
- int len; /* length of message being sent */
- int subch; /* system controller subchannel used */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( sc, L1_ADDR_LOCALIO );
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- req_code, 4,
- L1_ARG_INT, bus,
- L1_ARG_INT, slot )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND(sc, subch, msg, msg, &len ) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 0 ) < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- return 0;
-
-#else
- char cmd[64];
- char *fxn;
-
- switch( req_code )
- {
- case L1_REQ_PCI_UP:
- fxn = "u";
- break;
- case L1_REQ_PCI_DOWN:
- fxn = "d";
- break;
- case L1_REQ_PCI_RESET:
- fxn = "rst";
- break;
- default:
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- if( slot == -1 )
- sprintf( cmd, "pci %d %s", bus, fxn );
- else
- sprintf( cmd, "pci %d %d %s", bus, slot, fxn );
-
- return sc_command_interp( sc, L1_ADDR_TYPE_IOBRICK,
- L1_ADDR_RACK_LOCAL, L1_ADDR_BAY_LOCAL, cmd );
-#endif
-}
-
-int
-iobrick_pci_slot_pwr( l1sc_t *sc, int bus, int slot, int up )
-{
- return iobrick_pci_pwr( sc, bus, slot, up );
-}
-
-int
-iobrick_pci_bus_pwr( l1sc_t *sc, int bus, int up )
-{
- return iobrick_pci_pwr( sc, bus, -1, up );
-}
-
-
-int
-iobrick_pci_slot_rst( l1sc_t *sc, int bus, int slot )
-{
- return iobrick_pci_pwr( sc, bus, slot, L1_REQ_PCI_RESET );
-}
-
-int
-iobrick_pci_bus_rst( l1sc_t *sc, int bus )
-{
- return iobrick_pci_pwr( sc, bus, -1, L1_REQ_PCI_RESET );
-}
-
-
-/* get the L1 firmware version for an iobrick */
-int
-iobrick_sc_version( l1sc_t *sc, char *result )
-{
- char msg[BRL1_QSIZE];
- int len; /* length of message being sent */
- int subch; /* system controller subchannel used */
- int major, /* major rev number */
- minor, /* minor rev number */
- bugfix; /* bugfix rev number */
-
- /* fill in msg with the opcode & params */
- bzero( msg, BRL1_QSIZE );
- subch = sc_open( sc, L1_ADDR_LOCALIO );
-
- if( (len = sc_construct_msg( sc, subch, msg, BRL1_QSIZE,
- L1_ADDR_TASK_GENERAL,
- L1_REQ_FW_REV, 0 )) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_ARGS );
- }
-
- /* send the request to the L1 */
- if( SC_COMMAND(sc, subch, msg, msg, &len ) < 0 )
- {
- sc_close( sc, subch );
- return( ELSC_ERROR_CMD_SEND );
- }
-
- /* free up subchannel */
- sc_close( sc, subch );
-
- /* check response */
- if( sc_interpret_resp( msg, 6, L1_ARG_INT, &major,
- L1_ARG_INT, &minor, L1_ARG_INT, &bugfix )
- < 0 )
- {
- return( ELSC_ERROR_RESP_FORMAT );
- }
-
- sprintf( result, "%d.%d.%d", major, minor, bugfix );
-
- return 0;
-}
+++ /dev/null
-/* labelcl - SGI's Hwgraph Compatibility Layer.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2001-2002 Silicon Graphics, Inc. All rights reserved.
-*/
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <linux/devfs_fs.h>
-#include <linux/devfs_fs_kernel.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-
-/*
-** Very simple and dumb string table that supports only find/insert.
-** In practice, if this table gets too large, we may need a more
-** efficient data structure. Also note that currently there is no
-** way to delete an item once it's added. Therefore, name collision
-** will return an error.
-*/
-
-struct string_table label_string_table;
-
-
-
-/*
- * string_table_init - Initialize the given string table.
- */
-void
-string_table_init(struct string_table *string_table)
-{
- string_table->string_table_head = NULL;
- string_table->string_table_generation = 0;
-
- /*
- * We nedd to initialize locks here!
- */
-
- return;
-}
-
-
-/*
- * string_table_destroy - Destroy the given string table.
- */
-void
-string_table_destroy(struct string_table *string_table)
-{
- struct string_table_item *item, *next_item;
-
- item = string_table->string_table_head;
- while (item) {
- next_item = item->next;
-
- STRTBL_FREE(item);
- item = next_item;
- }
-
- /*
- * We need to destroy whatever lock we have here
- */
-
- return;
-}
-
-
-
-/*
- * string_table_insert - Insert an entry in the string table .. duplicate
- * names are not allowed.
- */
-char *
-string_table_insert(struct string_table *string_table, char *name)
-{
- struct string_table_item *item, *new_item = NULL, *last_item = NULL;
-
-again:
- /*
- * Need to lock the table ..
- */
- item = string_table->string_table_head;
- last_item = NULL;
-
- while (item) {
- if (!strcmp(item->string, name)) {
- /*
- * If we allocated space for the string and the found that
- * someone else already entered it into the string table,
- * free the space we just allocated.
- */
- if (new_item)
- STRTBL_FREE(new_item);
-
-
- /*
- * Search optimization: move the found item to the head
- * of the list.
- */
- if (last_item != NULL) {
- last_item->next = item->next;
- item->next = string_table->string_table_head;
- string_table->string_table_head = item;
- }
- goto out;
- }
- last_item = item;
- item=item->next;
- }
-
- /*
- * name was not found, so add it to the string table.
- */
- if (new_item == NULL) {
- long old_generation = string_table->string_table_generation;
-
- new_item = STRTBL_ALLOC(strlen(name));
-
- strcpy(new_item->string, name);
-
- /*
- * While we allocated memory for the new string, someone else
- * changed the string table.
- */
- if (old_generation != string_table->string_table_generation) {
- goto again;
- }
- } else {
- /* At this we only have the string table lock in access mode.
- * Promote the access lock to an update lock for the string
- * table insertion below.
- */
- long old_generation =
- string_table->string_table_generation;
-
- /*
- * After we did the unlock and wer waiting for update
- * lock someone could have potentially updated
- * the string table. Check the generation number
- * for this case. If it is the case we have to
- * try all over again.
- */
- if (old_generation !=
- string_table->string_table_generation) {
- goto again;
- }
- }
-
- /*
- * At this point, we're committed to adding new_item to the string table.
- */
- new_item->next = string_table->string_table_head;
- item = string_table->string_table_head = new_item;
- string_table->string_table_generation++;
-
-out:
- /*
- * Need to unlock here.
- */
- return(item->string);
-}
-
-/*
- * labelcl_info_create - Creates the data structure that will hold the
- * device private information asscoiated with a devfs entry.
- * The pointer to this structure is what gets stored in the devfs
- * (void * info).
- */
-labelcl_info_t *
-labelcl_info_create()
-{
-
- labelcl_info_t *new = NULL;
-
- /* Initial allocation does not include any area for labels */
- if ( ( new = (labelcl_info_t *)kmalloc (sizeof(labelcl_info_t), GFP_KERNEL) ) == NULL )
- return NULL;
-
- memset (new, 0, sizeof(labelcl_info_t));
- new->hwcl_magic = LABELCL_MAGIC;
- return( new);
-
-}
-
-/*
- * labelcl_info_destroy - Frees the data structure that holds the
- * device private information asscoiated with a devfs entry. This
- * data structure was created by device_info_create().
- *
- * The caller is responsible for nulling the (void *info) in the
- * corresponding devfs entry.
- */
-int
-labelcl_info_destroy(labelcl_info_t *labelcl_info)
-{
-
- if (labelcl_info == NULL)
- return(0);
-
- /* Free the label list */
- if (labelcl_info->label_list)
- kfree(labelcl_info->label_list);
-
- /* Now free the label info area */
- labelcl_info->hwcl_magic = 0;
- kfree(labelcl_info);
-
- return(0);
-}
-
-/*
- * labelcl_info_add_LBL - Adds a new label entry in the labelcl info
- * structure.
- *
- * Error is returned if we find another label with the same name.
- */
-int
-labelcl_info_add_LBL(devfs_handle_t de,
- char *info_name,
- arb_info_desc_t info_desc,
- arbitrary_info_t info)
-{
- labelcl_info_t *labelcl_info = NULL;
- int num_labels;
- int new_label_list_size;
- label_info_t *old_label_list, *new_label_list = NULL;
- char *name;
- int i;
-
- if (de == NULL)
- return(-1);
-
- labelcl_info = devfs_get_info(de);
- if (labelcl_info == NULL)
- return(-1);
-
- if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
- return(-1);
-
- if (info_name == NULL)
- return(-1);
-
- if (strlen(info_name) >= LABEL_LENGTH_MAX)
- return(-1);
-
- name = string_table_insert(&label_string_table, info_name);
-
- num_labels = labelcl_info->num_labels;
- new_label_list_size = sizeof(label_info_t) * (num_labels+1);
-
- /*
- * Create a new label info area.
- */
- if (new_label_list_size != 0) {
- new_label_list = (label_info_t *) kmalloc(new_label_list_size, GFP_KERNEL);
-
- if (new_label_list == NULL)
- return(-1);
- }
-
- /*
- * At this point, we are committed to adding the labelled info,
- * if there isn't already information there with the same name.
- */
- old_label_list = labelcl_info->label_list;
-
- /*
- * Look for matching info name.
- */
- for (i=0; i<num_labels; i++) {
- if (!strcmp(info_name, old_label_list[i].name)) {
- /* Not allowed to add duplicate labelled info names. */
- kfree(new_label_list);
- printk(KERN_WARNING "labelcl_info_add_LBL: Duplicate label name %s for vertex 0x%p\n", info_name, (void *)de);
- return(-1);
- }
- new_label_list[i] = old_label_list[i]; /* structure copy */
- }
-
- new_label_list[num_labels].name = name;
- new_label_list[num_labels].desc = info_desc;
- new_label_list[num_labels].info = info;
-
- labelcl_info->num_labels = num_labels+1;
- labelcl_info->label_list = new_label_list;
-
- if (old_label_list != NULL)
- kfree(old_label_list);
-
- return(0);
-}
-
-/*
- * labelcl_info_remove_LBL - Remove a label entry.
- */
-int
-labelcl_info_remove_LBL(devfs_handle_t de,
- char *info_name,
- arb_info_desc_t *info_desc,
- arbitrary_info_t *info)
-{
- labelcl_info_t *labelcl_info = NULL;
- int num_labels;
- int new_label_list_size;
- label_info_t *old_label_list, *new_label_list = NULL;
- arb_info_desc_t label_desc_found;
- arbitrary_info_t label_info_found;
- int i;
-
- if (de == NULL)
- return(-1);
-
- labelcl_info = devfs_get_info(de);
- if (labelcl_info == NULL)
- return(-1);
-
- if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
- return(-1);
-
- num_labels = labelcl_info->num_labels;
- if (num_labels == 0) {
- return(-1);
- }
-
- /*
- * Create a new info area.
- */
- new_label_list_size = sizeof(label_info_t) * (num_labels-1);
- if (new_label_list_size) {
- new_label_list = (label_info_t *) kmalloc(new_label_list_size, GFP_KERNEL);
- if (new_label_list == NULL)
- return(-1);
- }
-
- /*
- * At this point, we are committed to removing the labelled info,
- * if it still exists.
- */
- old_label_list = labelcl_info->label_list;
-
- /*
- * Find matching info name.
- */
- for (i=0; i<num_labels; i++) {
- if (!strcmp(info_name, old_label_list[i].name)) {
- label_desc_found = old_label_list[i].desc;
- label_info_found = old_label_list[i].info;
- goto found;
- }
- if (i < num_labels-1) /* avoid walking off the end of the new vertex */
- new_label_list[i] = old_label_list[i]; /* structure copy */
- }
-
- /* The named info doesn't exist. */
- if (new_label_list)
- kfree(new_label_list);
-
- return(-1);
-
-found:
- /* Finish up rest of labelled info */
- for (i=i+1; i<num_labels; i++)
- new_label_list[i-1] = old_label_list[i]; /* structure copy */
-
- labelcl_info->num_labels = num_labels+1;
- labelcl_info->label_list = new_label_list;
-
- kfree(old_label_list);
-
- if (info != NULL)
- *info = label_info_found;
-
- if (info_desc != NULL)
- *info_desc = label_desc_found;
-
- return(0);
-}
-
-
-/*
- * labelcl_info_replace_LBL - Replace an existing label entry with the
- * given new information.
- *
- * Label entry must exist.
- */
-int
-labelcl_info_replace_LBL(devfs_handle_t de,
- char *info_name,
- arb_info_desc_t info_desc,
- arbitrary_info_t info,
- arb_info_desc_t *old_info_desc,
- arbitrary_info_t *old_info)
-{
- labelcl_info_t *labelcl_info = NULL;
- int num_labels;
- label_info_t *label_list;
- int i;
-
- if (de == NULL)
- return(-1);
-
- labelcl_info = devfs_get_info(de);
- if (labelcl_info == NULL)
- return(-1);
-
- if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
- return(-1);
-
- num_labels = labelcl_info->num_labels;
- if (num_labels == 0) {
- return(-1);
- }
-
- if (info_name == NULL)
- return(-1);
-
- label_list = labelcl_info->label_list;
-
- /*
- * Verify that information under info_name already exists.
- */
- for (i=0; i<num_labels; i++)
- if (!strcmp(info_name, label_list[i].name)) {
- if (old_info != NULL)
- *old_info = label_list[i].info;
-
- if (old_info_desc != NULL)
- *old_info_desc = label_list[i].desc;
-
- label_list[i].info = info;
- label_list[i].desc = info_desc;
-
- return(0);
- }
-
-
- return(-1);
-}
-
-/*
- * labelcl_info_get_LBL - Retrieve and return the information for the
- * given label entry.
- */
-int
-labelcl_info_get_LBL(devfs_handle_t de,
- char *info_name,
- arb_info_desc_t *info_desc,
- arbitrary_info_t *info)
-{
- labelcl_info_t *labelcl_info = NULL;
- int num_labels;
- label_info_t *label_list;
- int i;
-
- if (de == NULL)
- return(-1);
-
- labelcl_info = devfs_get_info(de);
- if (labelcl_info == NULL)
- return(-1);
-
- if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
- return(-1);
-
- num_labels = labelcl_info->num_labels;
- if (num_labels == 0) {
- return(-1);
- }
-
- label_list = labelcl_info->label_list;
-
- /*
- * Find information under info_name.
- */
- for (i=0; i<num_labels; i++)
- if (!strcmp(info_name, label_list[i].name)) {
- if (info != NULL)
- *info = label_list[i].info;
- if (info_desc != NULL)
- *info_desc = label_list[i].desc;
-
- return(0);
- }
-
- return(-1);
-}
-
-/*
- * labelcl_info_get_next_LBL - returns the next label entry on the list.
- */
-int
-labelcl_info_get_next_LBL(devfs_handle_t de,
- char *buffer,
- arb_info_desc_t *info_descp,
- arbitrary_info_t *infop,
- labelcl_info_place_t *placeptr)
-{
- labelcl_info_t *labelcl_info = NULL;
- uint which_info;
- label_info_t *label_list;
-
- if ((buffer == NULL) && (infop == NULL))
- return(-1);
-
- if (placeptr == NULL)
- return(-1);
-
- if (de == NULL)
- return(-1);
-
- labelcl_info = devfs_get_info(de);
- if (labelcl_info == NULL)
- return(-1);
-
- if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
- return(-1);
-
- which_info = *placeptr;
-
- if (which_info >= labelcl_info->num_labels) {
- return(-1);
- }
-
- label_list = (label_info_t *) labelcl_info->label_list;
-
- if (buffer != NULL)
- strcpy(buffer, label_list[which_info].name);
-
- if (infop)
- *infop = label_list[which_info].info;
-
- if (info_descp)
- *info_descp = label_list[which_info].desc;
-
- *placeptr = which_info + 1;
-
- return(0);
-}
-
-
-int
-labelcl_info_replace_IDX(devfs_handle_t de,
- int index,
- arbitrary_info_t info,
- arbitrary_info_t *old_info)
-{
- arbitrary_info_t *info_list_IDX;
- labelcl_info_t *labelcl_info = NULL;
-
- if (de == NULL) {
- printk(KERN_ALERT "labelcl: NULL devfs handle given.\n");
- return(-1);
- }
-
- labelcl_info = devfs_get_info(de);
- if (labelcl_info == NULL) {
- printk(KERN_ALERT "labelcl: Entry does not have info pointer.\n");
- return(-1);
- }
-
- if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
- return(-1);
-
- if ( (index < 0) || (index >= HWGRAPH_NUM_INDEX_INFO) )
- return(-1);
-
- /*
- * Replace information at the appropriate index in this vertex with
- * the new info.
- */
- info_list_IDX = labelcl_info->IDX_list;
- if (old_info != NULL)
- *old_info = info_list_IDX[index];
- info_list_IDX[index] = info;
-
- return(0);
-
-}
-
-/*
- * labelcl_info_connectpt_set - Sets the connectpt.
- */
-int
-labelcl_info_connectpt_set(struct devfs_entry *de,
- struct devfs_entry *connect_de)
-{
- arbitrary_info_t old_info;
- int rv;
-
- rv = labelcl_info_replace_IDX(de, HWGRAPH_CONNECTPT,
- (arbitrary_info_t) connect_de, &old_info);
-
- if (rv) {
- return(rv);
- }
-
- return(0);
-}
-
-
-/*
- * labelcl_info_get_IDX - Returns the information pointed at by index.
- *
- */
-int
-labelcl_info_get_IDX(devfs_handle_t de,
- int index,
- arbitrary_info_t *info)
-{
- arbitrary_info_t *info_list_IDX;
- labelcl_info_t *labelcl_info = NULL;
-
- if (de == NULL)
- return(-1);
-
- labelcl_info = devfs_get_info(de);
- if (labelcl_info == NULL)
- return(-1);
-
- if (labelcl_info->hwcl_magic != LABELCL_MAGIC)
- return(-1);
-
- if ( (index < 0) || (index >= HWGRAPH_NUM_INDEX_INFO) )
- return(-1);
-
- /*
- * Return information at the appropriate index in this vertex.
- */
- info_list_IDX = labelcl_info->IDX_list;
- if (info != NULL)
- *info = info_list_IDX[index];
-
- return(0);
-}
-
-/*
- * labelcl_info_connectpt_get - Retrieve the connect point for a device entry.
- */
-struct devfs_entry *
-labelcl_info_connectpt_get(struct devfs_entry *de)
-{
- int rv;
- arbitrary_info_t info;
-
- rv = labelcl_info_get_IDX(de, HWGRAPH_CONNECTPT, &info);
- if (rv)
- return(NULL);
-
- return((struct devfs_entry *)info);
-}
--- /dev/null
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn2 io routines.
+
+EXTRA_CFLAGS := -DLITTLE_ENDIAN
+
+obj-y += pci.o pci_dma.o pci_bus_cvlink.o iomv.o
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/sn_cpuid.h>
+
+/**
+ * sn_io_addr - convert an in/out port to an i/o address
+ * @port: port to convert
+ *
+ * Legacy in/out instructions are converted to ld/st instructions
+ * on IA64. This routine will convert a port number into a valid
+ * SN i/o address. Used by sn_in*() and sn_out*().
+ */
+void *
+sn_io_addr(unsigned long port)
+{
+ if (!IS_RUNNING_ON_SIMULATOR()) {
+ return( (void *) (port | __IA64_UNCACHED_OFFSET));
+ } else {
+ unsigned long io_base;
+ unsigned long addr;
+
+ /*
+ * word align port, but need more than 10 bits
+ * for accessing registers in bedrock local block
+ * (so we don't do port&0xfff)
+ */
+ if ((port >= 0x1f0 && port <= 0x1f7) ||
+ port == 0x3f6 || port == 0x3f7) {
+ io_base = (0xc000000fcc000000 | ((unsigned long)get_nasid() << 38));
+ addr = io_base | ((port >> 2) << 12) | (port & 0xfff);
+ } else {
+ addr = __ia64_get_io_port_base() | ((port >> 2) << 2);
+ }
+ return(void *) addr;
+ }
+}
+
+EXPORT_SYMBOL(sn_io_addr);
+
+/**
+ * sn_mmiob - I/O space memory barrier
+ *
+ * Acts as a memory mapped I/O barrier for platforms that queue writes to
+ * I/O space. This ensures that subsequent writes to I/O space arrive after
+ * all previous writes. For most ia64 platforms, this is a simple
+ * 'mf.a' instruction. For other platforms, mmiob() may have to read
+ * a chipset register to ensure ordering.
+ *
+ * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
+ * See PV 871084 for details about the WAR about zero value.
+ *
+ */
+void
+sn_mmiob (void)
+{
+ while ((((volatile unsigned long) (*pda->pio_write_status_addr)) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
+ SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK)
+ udelay(1);
+}
--- /dev/null
+/*
+ *
+ * SNI64 specific PCI support for SNI IO.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1997, 1998, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <asm/sn/types.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/io.h>
+#include <asm/sn/driver.h>
+#include <asm/sn/iograph.h>
+#include <asm/param.h>
+#include <asm/sn/pio.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/pci/pcibr_private.h>
+#include <asm/sn/pci/bridge.h>
+
+#ifdef DEBUG_CONFIG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+
+
+#ifdef CONFIG_PCI
+
+extern vertex_hdl_t pci_bus_to_vertex(unsigned char);
+extern vertex_hdl_t devfn_to_vertex(unsigned char bus, unsigned char devfn);
+
+int sn_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
+{
+ unsigned long res = 0;
+ vertex_hdl_t device_vertex;
+
+ device_vertex = devfn_to_vertex(bus->number, devfn);
+ if (!device_vertex)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ res = pciio_config_get(device_vertex, (unsigned) where, size);
+ *val = (unsigned int) res;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+int sn_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
+{
+ vertex_hdl_t device_vertex;
+
+ device_vertex = devfn_to_vertex(bus->number, devfn);
+ if (!device_vertex)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ pciio_config_set( device_vertex, (unsigned)where, size, (uint64_t) val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops sn_pci_ops = {
+ .read = sn_read_config,
+ .write = sn_write_config
+};
+
+/*
+ * sn_pci_find_bios - SNIA64 pci_find_bios() platform specific code.
+ */
+void __init
+sn_pci_find_bios(void)
+{
+ extern struct pci_ops *pci_root_ops;
+ /*
+ * Go initialize our IO Infrastructure ..
+ */
+ extern void sgi_master_io_infr_init(void);
+
+ sgi_master_io_infr_init();
+
+ /* sn_io_infrastructure_init(); */
+ pci_root_ops = &sn_pci_ops;
+}
+
+void
+pci_fixup_ioc3(struct pci_dev *d)
+{
+ int i;
+ unsigned int size;
+
+ /* IOC3 only decodes 0x20 bytes of the config space, reading
+ * beyond that is relatively benign but writing beyond that
+ * (especially the base address registers) will shut down the
+ * pci bus...so avoid doing so.
+ * NOTE: this means we can't program the intr_pin into the device,
+ * currently we hack this with special code in
+ * sgi_pci_intr_support()
+ */
+ DBG("pci_fixup_ioc3: Fixing base addresses for ioc3 device %s\n", d->slot_name);
+
+ /* I happen to know from the spec that the ioc3 needs only 0xfffff
+ * The standard pci trick of writing ~0 to the baddr and seeing
+ * what comes back doesn't work with the ioc3
+ */
+ size = 0xfffff;
+ d->resource[0].end = (unsigned long) d->resource[0].start + (unsigned long) size;
+
+ /*
+ * Zero out the resource structure .. because we did not go through
+ * the normal PCI Infrastructure Init, garbbage are left in these
+ * fileds.
+ */
+ for (i = 1; i <= PCI_ROM_RESOURCE; i++) {
+ d->resource[i].start = 0UL;
+ d->resource[i].end = 0UL;
+ d->resource[i].flags = 0UL;
+ }
+
+ d->subsystem_vendor = 0;
+ d->subsystem_device = 0;
+
+}
+
+#else
+void sn_pci_find_bios(void) {}
+void pci_fixup_ioc3(struct pci_dev *d) {}
+struct list_head pci_root_buses;
+struct list_head pci_root_buses;
+struct list_head pci_devices;
+
+#endif /* CONFIG_PCI */
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <asm/sn/types.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/io.h>
+#include <asm/sn/driver.h>
+#include <asm/sn/iograph.h>
+#include <asm/param.h>
+#include <asm/sn/pio.h>
+#include <asm/sn/xtalk/xwidget.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/hcl_util.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/xtalk/xtalkaddrs.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/pci/pciio.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/pci/pcibr_private.h>
+#include <asm/sn/pci/pci_bus_cvlink.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/arch.h>
+
+extern int bridge_rev_b_data_check_disable;
+
+vertex_hdl_t busnum_to_pcibr_vhdl[MAX_PCI_XWIDGET];
+nasid_t busnum_to_nid[MAX_PCI_XWIDGET];
+void * busnum_to_atedmamaps[MAX_PCI_XWIDGET];
+unsigned char num_bridges;
+static int done_probing;
+extern irqpda_t *irqpdaindr;
+
+static int pci_bus_map_create(vertex_hdl_t xtalk, char * io_moduleid);
+vertex_hdl_t devfn_to_vertex(unsigned char busnum, unsigned int devfn);
+
+extern void register_pcibr_intr(int irq, pcibr_intr_t intr);
+
+void sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int slot);
+
+
+/*
+ * For the given device, initialize whether it is a PIC device.
+ */
+static void
+set_isPIC(struct sn_device_sysdata *device_sysdata)
+{
+ pciio_info_t pciio_info = pciio_info_get(device_sysdata->vhdl);
+ pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+
+ device_sysdata->isPIC = IS_PIC_SOFT(pcibr_soft);;
+}
+
+/*
+ * pci_bus_cvlink_init() - To be called once during initialization before
+ * SGI IO Infrastructure init is called.
+ */
+void
+pci_bus_cvlink_init(void)
+{
+
+ extern void ioconfig_bus_init(void);
+
+ memset(busnum_to_pcibr_vhdl, 0x0, sizeof(vertex_hdl_t) * MAX_PCI_XWIDGET);
+ memset(busnum_to_nid, 0x0, sizeof(nasid_t) * MAX_PCI_XWIDGET);
+
+ memset(busnum_to_atedmamaps, 0x0, sizeof(void *) * MAX_PCI_XWIDGET);
+
+ num_bridges = 0;
+
+ ioconfig_bus_init();
+}
+
+/*
+ * pci_bus_to_vertex() - Given a logical Linux Bus Number returns the associated
+ * pci bus vertex from the SGI IO Infrastructure.
+ */
+vertex_hdl_t
+pci_bus_to_vertex(unsigned char busnum)
+{
+
+ vertex_hdl_t pci_bus = NULL;
+
+
+ /*
+ * First get the xwidget vertex.
+ */
+ pci_bus = busnum_to_pcibr_vhdl[busnum];
+ return(pci_bus);
+}
+
+/*
+ * devfn_to_vertex() - returns the vertex of the device given the bus, slot,
+ * and function numbers.
+ */
+vertex_hdl_t
+devfn_to_vertex(unsigned char busnum, unsigned int devfn)
+{
+
+ int slot = 0;
+ int func = 0;
+ char name[16];
+ vertex_hdl_t pci_bus = NULL;
+ vertex_hdl_t device_vertex = (vertex_hdl_t)NULL;
+
+ /*
+ * Go get the pci bus vertex.
+ */
+ pci_bus = pci_bus_to_vertex(busnum);
+ if (!pci_bus) {
+ /*
+ * During probing, the Linux pci code invents non-existent
+ * bus numbers and pci_dev structures and tries to access
+ * them to determine existence. Don't crib during probing.
+ */
+ if (done_probing)
+ printk("devfn_to_vertex: Invalid bus number %d given.\n", busnum);
+ return(NULL);
+ }
+
+
+ /*
+ * Go get the slot&function vertex.
+ * Should call pciio_slot_func_to_name() when ready.
+ */
+ slot = PCI_SLOT(devfn);
+ func = PCI_FUNC(devfn);
+
+ /*
+ * For a NON Multi-function card the name of the device looks like:
+ * ../pci/1, ../pci/2 ..
+ */
+ if (func == 0) {
+ sprintf(name, "%d", slot);
+ if (hwgraph_traverse(pci_bus, name, &device_vertex) ==
+ GRAPH_SUCCESS) {
+ if (device_vertex) {
+ return(device_vertex);
+ }
+ }
+ }
+
+ /*
+ * This maybe a multifunction card. It's names look like:
+ * ../pci/1a, ../pci/1b, etc.
+ */
+ sprintf(name, "%d%c", slot, 'a'+func);
+ if (hwgraph_traverse(pci_bus, name, &device_vertex) != GRAPH_SUCCESS) {
+ if (!device_vertex) {
+ return(NULL);
+ }
+ }
+
+ return(device_vertex);
+}
+
+/*
+ * For the given device, initialize the addresses for both the Device(x) Flush
+ * Write Buffer register and the Xbow Flush Register for the port the PCI bus
+ * is connected.
+ */
+static void
+set_flush_addresses(struct pci_dev *device_dev,
+ struct sn_device_sysdata *device_sysdata)
+{
+ pciio_info_t pciio_info = pciio_info_get(device_sysdata->vhdl);
+ pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
+ pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
+ bridge_t *bridge = pcibr_soft->bs_base;
+ nasid_t nasid;
+
+ /*
+ * Get the nasid from the bridge.
+ */
+ nasid = NASID_GET(device_sysdata->dma_buf_sync);
+ if (IS_PIC_DEVICE(device_dev)) {
+ device_sysdata->dma_buf_sync = (volatile unsigned int *)
+ &bridge->b_wr_req_buf[pciio_slot].reg;
+ device_sysdata->xbow_buf_sync = (volatile unsigned int *)
+ XBOW_PRIO_LINKREGS_PTR(NODE_SWIN_BASE(nasid, 0),
+ pcibr_soft->bs_xid);
+ } else {
+ /*
+ * Accessing Xbridge and Xbow register when SHUB swapoper is on!.
+ */
+ device_sysdata->dma_buf_sync = (volatile unsigned int *)
+ ((uint64_t)&(bridge->b_wr_req_buf[pciio_slot].reg)^4);
+ device_sysdata->xbow_buf_sync = (volatile unsigned int *)
+ ((uint64_t)(XBOW_PRIO_LINKREGS_PTR(
+ NODE_SWIN_BASE(nasid, 0), pcibr_soft->bs_xid)) ^ 4);
+ }
+
+#ifdef DEBUG
+ printk("set_flush_addresses: dma_buf_sync %p xbow_buf_sync %p\n",
+ device_sysdata->dma_buf_sync, device_sysdata->xbow_buf_sync);
+
+printk("set_flush_addresses: dma_buf_sync\n");
+ while((volatile unsigned int )*device_sysdata->dma_buf_sync);
+printk("set_flush_addresses: xbow_buf_sync\n");
+ while((volatile unsigned int )*device_sysdata->xbow_buf_sync);
+#endif
+
+}
+
+struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
+
+// Initialize the data structures for flushing write buffers after a PIO read.
+// The theory is:
+// Take an unused int. pin and associate it with a pin that is in use.
+// After a PIO read, force an interrupt on the unused pin, forcing a write buffer flush
+// on the in use pin. This will prevent the race condition between PIO read responses and
+// DMA writes.
+void
+sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int slot) {
+ nasid_t nasid;
+ unsigned long dnasid;
+ int wid_num;
+ int bus;
+ struct sn_flush_device_list *p;
+ bridge_t *b;
+ bridgereg_t dev_sel;
+ extern int isIO9(int);
+ int bwin;
+ int i;
+
+ nasid = NASID_GET(start);
+ wid_num = SWIN_WIDGETNUM(start);
+ bus = (start >> 23) & 0x1;
+ bwin = BWIN_WINDOWNUM(start);
+
+ if (flush_nasid_list[nasid].widget_p == NULL) {
+ flush_nasid_list[nasid].widget_p = (struct sn_flush_device_list **)kmalloc((HUB_WIDGET_ID_MAX+1) *
+ sizeof(struct sn_flush_device_list *), GFP_KERNEL);
+ memset(flush_nasid_list[nasid].widget_p, 0, (HUB_WIDGET_ID_MAX+1) * sizeof(struct sn_flush_device_list *));
+ }
+ if (bwin > 0) {
+ bwin--;
+ switch (bwin) {
+ case 0:
+ flush_nasid_list[nasid].iio_itte1 = HUB_L(IIO_ITTE_GET(nasid, 0));
+ wid_num = ((flush_nasid_list[nasid].iio_itte1) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte1 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ case 1:
+ flush_nasid_list[nasid].iio_itte2 = HUB_L(IIO_ITTE_GET(nasid, 1));
+ wid_num = ((flush_nasid_list[nasid].iio_itte2) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte2 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ case 2:
+ flush_nasid_list[nasid].iio_itte3 = HUB_L(IIO_ITTE_GET(nasid, 2));
+ wid_num = ((flush_nasid_list[nasid].iio_itte3) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte3 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ case 3:
+ flush_nasid_list[nasid].iio_itte4 = HUB_L(IIO_ITTE_GET(nasid, 3));
+ wid_num = ((flush_nasid_list[nasid].iio_itte4) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte4 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ case 4:
+ flush_nasid_list[nasid].iio_itte5 = HUB_L(IIO_ITTE_GET(nasid, 4));
+ wid_num = ((flush_nasid_list[nasid].iio_itte5) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte5 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ case 5:
+ flush_nasid_list[nasid].iio_itte6 = HUB_L(IIO_ITTE_GET(nasid, 5));
+ wid_num = ((flush_nasid_list[nasid].iio_itte6) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte6 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ case 6:
+ flush_nasid_list[nasid].iio_itte7 = HUB_L(IIO_ITTE_GET(nasid, 6));
+ wid_num = ((flush_nasid_list[nasid].iio_itte7) >> 8) & 0xf;
+ bus = flush_nasid_list[nasid].iio_itte7 & 0xf;
+ if (bus == 0x4 || bus == 0x8)
+ bus = 0;
+ else
+ bus = 1;
+ break;
+ }
+ }
+
+ // if it's IO9, bus 1, we don't care about slots 1, 3, and 4. This is
+ // because these are the IOC4 slots and we don't flush them.
+ if (isIO9(nasid) && bus == 0 && (slot == 1 || slot == 4)) {
+ return;
+ }
+ if (flush_nasid_list[nasid].widget_p[wid_num] == NULL) {
+ flush_nasid_list[nasid].widget_p[wid_num] = (struct sn_flush_device_list *)kmalloc(
+ DEV_PER_WIDGET * sizeof (struct sn_flush_device_list), GFP_KERNEL);
+ memset(flush_nasid_list[nasid].widget_p[wid_num], 0,
+ DEV_PER_WIDGET * sizeof (struct sn_flush_device_list));
+ p = &flush_nasid_list[nasid].widget_p[wid_num][0];
+ for (i=0; i<DEV_PER_WIDGET;i++) {
+ p->bus = -1;
+ p->pin = -1;
+ p++;
+ }
+ }
+
+ p = &flush_nasid_list[nasid].widget_p[wid_num][0];
+ for (i=0;i<DEV_PER_WIDGET; i++) {
+ if (p->pin == pin && p->bus == bus) break;
+ if (p->pin < 0) {
+ p->pin = pin;
+ p->bus = bus;
+ break;
+ }
+ p++;
+ }
+
+ for (i=0; i<PCI_ROM_RESOURCE; i++) {
+ if (p->bar_list[i].start == 0) {
+ p->bar_list[i].start = start;
+ p->bar_list[i].end = end;
+ break;
+ }
+ }
+ b = (bridge_t *)(NODE_SWIN_BASE(nasid, wid_num) | (bus << 23) );
+
+ // If it's IO9, then slot 2 maps to slot 7 and slot 6 maps to slot 8.
+ // To see this is non-trivial. By drawing pictures and reading manuals and talking
+ // to HW guys, we can see that on IO9 bus 1, slots 7 and 8 are always unused.
+ // Further, since we short-circuit slots 1, 3, and 4 above, we only have to worry
+ // about the case when there is a card in slot 2. A multifunction card will appear
+ // to be in slot 6 (from an interrupt point of view) also. That's the most we'll
+ // have to worry about. A four function card will overload the interrupt lines in
+ // slot 2 and 6.
+ // We also need to special case the 12160 device in slot 3. Fortunately, we have
+ // a spare intr. line for pin 4, so we'll use that for the 12160.
+ // All other buses have slot 3 and 4 and slots 7 and 8 unused. Since we can only
+ // see slots 1 and 2 and slots 5 and 6 coming through here for those buses (this
+ // is true only on Pxbricks with 2 physical slots per bus), we just need to add
+ // 2 to the slot number to find an unused slot.
+ // We have convinced ourselves that we will never see a case where two different cards
+ // in two different slots will ever share an interrupt line, so there is no need to
+ // special case this.
+
+ if (isIO9(nasid) && wid_num == 0xc && bus == 0) {
+ if (slot == 2) {
+ p->force_int_addr = (unsigned long)&b->b_force_always[6].intr;
+ dev_sel = b->b_int_device;
+ dev_sel |= (1<<18);
+ b->b_int_device = dev_sel;
+ dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
+ b->p_int_addr_64[6] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) |
+ (dnasid << 36) | (0xfUL << 48);
+ } else if (slot == 3) { /* 12160 SCSI device in IO9 */
+ p->force_int_addr = (unsigned long)&b->b_force_always[4].intr;
+ dev_sel = b->b_int_device;
+ dev_sel |= (2<<12);
+ b->b_int_device = dev_sel;
+ dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
+ b->p_int_addr_64[4] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) |
+ (dnasid << 36) | (0xfUL << 48);
+ } else { /* slot == 6 */
+ p->force_int_addr = (unsigned long)&b->b_force_always[7].intr;
+ dev_sel = b->b_int_device;
+ dev_sel |= (5<<21);
+ b->b_int_device = dev_sel;
+ dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
+ b->p_int_addr_64[7] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) |
+ (dnasid << 36) | (0xfUL << 48);
+ }
+ } else {
+ p->force_int_addr = (unsigned long)&b->b_force_always[pin + 2].intr;
+ dev_sel = b->b_int_device;
+ dev_sel |= ((slot - 1) << ( pin * 3) );
+ b->b_int_device = dev_sel;
+ dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
+ b->p_int_addr_64[pin + 2] = (virt_to_phys(&p->flush_addr) & 0xfffffffff) |
+ (dnasid << 36) | (0xfUL << 48);
+ }
+}
+
+/*
+ * Most drivers currently do not properly tell the arch specific pci dma
+ * interfaces whether they can handle A64. Here is where we privately
+ * keep track of this.
+ */
+static void __init
+set_sn_pci64(struct pci_dev *dev)
+{
+ unsigned short vendor = dev->vendor;
+ unsigned short device = dev->device;
+
+ if (vendor == PCI_VENDOR_ID_QLOGIC) {
+ if ((device == PCI_DEVICE_ID_QLOGIC_ISP2100) ||
+ (device == PCI_DEVICE_ID_QLOGIC_ISP2200)) {
+ SET_PCIA64(dev);
+ return;
+ }
+ }
+
+ if (vendor == PCI_VENDOR_ID_SGI) {
+ if (device == PCI_DEVICE_ID_SGI_IOC3) {
+ SET_PCIA64(dev);
+ return;
+ }
+ }
+
+}
+
+/*
+ * sn_pci_fixup() - This routine is called when platform_pci_fixup() is
+ * invoked at the end of pcibios_init() to link the Linux pci
+ * infrastructure to SGI IO Infrasturcture - ia64/kernel/pci.c
+ *
+ * Other platform specific fixup can also be done here.
+ */
+void
+sn_pci_fixup(int arg)
+{
+ struct list_head *ln;
+ struct pci_bus *pci_bus = NULL;
+ struct pci_dev *device_dev = NULL;
+ struct sn_widget_sysdata *widget_sysdata;
+ struct sn_device_sysdata *device_sysdata;
+ pciio_intr_t intr_handle;
+ int cpuid, bit;
+ vertex_hdl_t device_vertex;
+ pciio_intr_line_t lines;
+ extern void sn_pci_find_bios(void);
+ extern int numnodes;
+ int cnode;
+
+ if (arg == 0) {
+#ifdef CONFIG_PROC_FS
+ extern void register_sn_procfs(void);
+#endif
+
+ sn_pci_find_bios();
+ for (cnode = 0; cnode < numnodes; cnode++) {
+ extern void intr_init_vecblk(nodepda_t *npda, cnodeid_t, int);
+ intr_init_vecblk(NODEPDA(cnode), cnode, 0);
+ }
+#ifdef CONFIG_PROC_FS
+ register_sn_procfs();
+#endif
+ return;
+ }
+
+
+ done_probing = 1;
+
+ /*
+ * Initialize the pci bus vertex in the pci_bus struct.
+ */
+ for( ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
+ pci_bus = pci_bus_b(ln);
+ widget_sysdata = kmalloc(sizeof(struct sn_widget_sysdata),
+ GFP_KERNEL);
+ widget_sysdata->vhdl = pci_bus_to_vertex(pci_bus->number);
+ pci_bus->sysdata = (void *)widget_sysdata;
+ }
+
+ /*
+ * set the root start and end so that drivers calling check_region()
+ * won't see a conflict
+ */
+ ioport_resource.start = 0xc000000000000000;
+ ioport_resource.end = 0xcfffffffffffffff;
+
+ /*
+ * Set the root start and end for Mem Resource.
+ */
+ iomem_resource.start = 0;
+ iomem_resource.end = 0xffffffffffffffff;
+
+ /*
+ * Initialize the device vertex in the pci_dev struct.
+ */
+ while ((device_dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, device_dev)) != NULL) {
+ unsigned int irq;
+ int idx;
+ u16 cmd;
+ vertex_hdl_t vhdl;
+ unsigned long size;
+ extern int bit_pos_to_irq(int);
+
+ if (device_dev->vendor == PCI_VENDOR_ID_SGI &&
+ device_dev->device == PCI_DEVICE_ID_SGI_IOC3) {
+ extern void pci_fixup_ioc3(struct pci_dev *d);
+ pci_fixup_ioc3(device_dev);
+ }
+
+ /* Set the device vertex */
+
+ device_sysdata = kmalloc(sizeof(struct sn_device_sysdata),
+ GFP_KERNEL);
+ device_sysdata->vhdl = devfn_to_vertex(device_dev->bus->number, device_dev->devfn);
+ device_sysdata->isa64 = 0;
+ /*
+ * Set the xbridge Device(X) Write Buffer Flush and Xbow Flush
+ * register addresses.
+ */
+ (void) set_flush_addresses(device_dev, device_sysdata);
+
+ device_dev->sysdata = (void *) device_sysdata;
+ set_sn_pci64(device_dev);
+ set_isPIC(device_sysdata);
+
+ pci_read_config_word(device_dev, PCI_COMMAND, &cmd);
+
+ /*
+ * Set the resources address correctly. The assumption here
+ * is that the addresses in the resource structure has been
+ * read from the card and it was set in the card by our
+ * Infrastructure ..
+ */
+ vhdl = device_sysdata->vhdl;
+ for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
+ size = 0;
+ size = device_dev->resource[idx].end -
+ device_dev->resource[idx].start;
+ if (size) {
+ device_dev->resource[idx].start = (unsigned long)pciio_pio_addr(vhdl, 0, PCIIO_SPACE_WIN(idx), 0, size, 0, (IS_PIC_DEVICE(device_dev)) ? 0 : PCIIO_BYTE_STREAM);
+ device_dev->resource[idx].start |= __IA64_UNCACHED_OFFSET;
+ }
+ else
+ continue;
+
+ device_dev->resource[idx].end =
+ device_dev->resource[idx].start + size;
+
+ if (device_dev->resource[idx].flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+
+ if (device_dev->resource[idx].flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+#if 0
+ /*
+ * Software WAR for a Software BUG.
+ * This is only temporary.
+ * See PV 872791
+ */
+
+ /*
+ * Now handle the ROM resource ..
+ */
+ size = device_dev->resource[PCI_ROM_RESOURCE].end -
+ device_dev->resource[PCI_ROM_RESOURCE].start;
+
+ if (size) {
+ device_dev->resource[PCI_ROM_RESOURCE].start =
+ (unsigned long) pciio_pio_addr(vhdl, 0, PCIIO_SPACE_ROM, 0,
+ size, 0, (IS_PIC_DEVICE(device_dev)) ? 0 : PCIIO_BYTE_STREAM);
+ device_dev->resource[PCI_ROM_RESOURCE].start |= __IA64_UNCACHED_OFFSET;
+ device_dev->resource[PCI_ROM_RESOURCE].end =
+ device_dev->resource[PCI_ROM_RESOURCE].start + size;
+ }
+#endif
+
+ /*
+ * Update the Command Word on the Card.
+ */
+ cmd |= PCI_COMMAND_MASTER; /* If the device doesn't support */
+ /* bit gets dropped .. no harm */
+ pci_write_config_word(device_dev, PCI_COMMAND, cmd);
+
+ pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN, (unsigned char *)&lines);
+ if (device_dev->vendor == PCI_VENDOR_ID_SGI &&
+ device_dev->device == PCI_DEVICE_ID_SGI_IOC3 ) {
+ lines = 1;
+ }
+
+ device_sysdata = (struct sn_device_sysdata *)device_dev->sysdata;
+ device_vertex = device_sysdata->vhdl;
+
+ irqpdaindr->current = device_dev;
+ intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex);
+
+ irq = intr_handle->pi_irq;
+ irqpdaindr->device_dev[irq] = device_dev;
+ cpuid = intr_handle->pi_cpu;
+ pciio_intr_connect(intr_handle, (intr_func_t)0, (intr_arg_t)0);
+ device_dev->irq = irq;
+ register_pcibr_intr(irq, (pcibr_intr_t)intr_handle);
+
+ for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
+ int ibits = ((pcibr_intr_t)intr_handle)->bi_ibits;
+ int i;
+
+ size = device_dev->resource[idx].end -
+ device_dev->resource[idx].start;
+ if (size == 0) continue;
+
+ for (i=0; i<8; i++) {
+ if (ibits & (1 << i) ) {
+ sn_dma_flush_init(device_dev->resource[idx].start,
+ device_dev->resource[idx].end,
+ idx,
+ i,
+ PCI_SLOT(device_dev->devfn));
+ }
+ }
+ }
+
+ }
+#ifdef ajmtestintr
+ {
+ int slot = PCI_SLOT(device_dev->devfn);
+ static int timer_set = 0;
+ pcibr_intr_t pcibr_intr = (pcibr_intr_t)intr_handle;
+ pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
+ extern void intr_test_handle_intr(int, void*, struct pt_regs *);
+
+ if (!timer_set) {
+ intr_test_set_timer();
+ timer_set = 1;
+ }
+ intr_test_register_irq(irq, pcibr_soft, slot);
+ request_irq(irq, intr_test_handle_intr,0,NULL, NULL);
+ }
+#endif
+}
+
+/*
+ * linux_bus_cvlink() Creates a link between the Linux PCI Bus number
+ * to the actual hardware component that it represents:
+ * /dev/hw/linux/busnum/0 -> ../../../hw/module/001c01/slab/0/Ibrick/xtalk/15/pci
+ *
+ * The bus vertex, when called to devfs_generate_path() returns:
+ * hw/module/001c01/slab/0/Ibrick/xtalk/15/pci
+ * hw/module/001c01/slab/1/Pbrick/xtalk/12/pci-x/0
+ * hw/module/001c01/slab/1/Pbrick/xtalk/12/pci-x/1
+ */
+void
+linux_bus_cvlink(void)
+{
+ char name[8];
+ int index;
+
+ for (index=0; index < MAX_PCI_XWIDGET; index++) {
+ if (!busnum_to_pcibr_vhdl[index])
+ continue;
+
+ sprintf(name, "%x", index);
+ (void) hwgraph_edge_add(linux_busnum, busnum_to_pcibr_vhdl[index],
+ name);
+ }
+}
+
+/*
+ * pci_bus_map_create() - Called by pci_bus_to_hcl_cvlink() to finish the job.
+ *
+ * Linux PCI Bus numbers are assigned from lowest module_id numbers
+ * (rack/slot etc.) starting from HUB_WIDGET_ID_MAX down to
+ * HUB_WIDGET_ID_MIN:
+ * widgetnum 15 gets lower Bus Number than widgetnum 14 etc.
+ *
+ * Given 2 modules 001c01 and 001c02 we get the following mappings:
+ * 001c01, widgetnum 15 = Bus number 0
+ * 001c01, widgetnum 14 = Bus number 1
+ * 001c02, widgetnum 15 = Bus number 3
+ * 001c02, widgetnum 14 = Bus number 4
+ * etc.
+ *
+ * The rational for starting Bus Number 0 with Widget number 15 is because
+ * the system boot disks are always connected via Widget 15 Slot 0 of the
+ * I-brick. Linux creates /dev/sd* devices(naming) strating from Bus Number 0
+ * Therefore, /dev/sda1 will be the first disk, on Widget 15 of the lowest
+ * module id(Master Cnode) of the system.
+ *
+ */
+static int
+pci_bus_map_create(vertex_hdl_t xtalk, char * io_moduleid)
+{
+
+ vertex_hdl_t master_node_vertex = NULL;
+ vertex_hdl_t xwidget = NULL;
+ vertex_hdl_t pci_bus = NULL;
+ hubinfo_t hubinfo = NULL;
+ xwidgetnum_t widgetnum;
+ char pathname[128];
+ graph_error_t rv;
+ int bus;
+ int basebus_num;
+ extern void ioconfig_get_busnum(char *, int *);
+
+ int bus_number;
+
+ /*
+ * Loop throught this vertex and get the Xwidgets ..
+ */
+
+
+ /* PCI devices */
+
+ for (widgetnum = HUB_WIDGET_ID_MAX; widgetnum >= HUB_WIDGET_ID_MIN; widgetnum--) {
+ sprintf(pathname, "%d", widgetnum);
+ xwidget = NULL;
+
+ /*
+ * Example - /hw/module/001c16/Pbrick/xtalk/8 is the xwidget
+ * /hw/module/001c16/Pbrick/xtalk/8/pci/1 is device
+ */
+ rv = hwgraph_traverse(xtalk, pathname, &xwidget);
+ if ( (rv != GRAPH_SUCCESS) ) {
+ if (!xwidget) {
+ continue;
+ }
+ }
+
+ sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
+ pci_bus = NULL;
+ if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
+ if (!pci_bus) {
+ continue;
+}
+
+ /*
+ * Assign the correct bus number and also the nasid of this
+ * pci Xwidget.
+ *
+ * Should not be any race here ...
+ */
+ num_bridges++;
+ busnum_to_pcibr_vhdl[num_bridges - 1] = pci_bus;
+
+ /*
+ * Get the master node and from there get the NASID.
+ */
+ master_node_vertex = device_master_get(xwidget);
+ if (!master_node_vertex) {
+ printk("WARNING: pci_bus_map_create: Unable to get .master for vertex 0x%p\n", (void *)xwidget);
+ }
+
+ hubinfo_get(master_node_vertex, &hubinfo);
+ if (!hubinfo) {
+ printk("WARNING: pci_bus_map_create: Unable to get hubinfo for master node vertex 0x%p\n", (void *)master_node_vertex);
+ return(1);
+ } else {
+ busnum_to_nid[num_bridges - 1] = hubinfo->h_nasid;
+ }
+
+ /*
+ * Pre assign DMA maps needed for 32 Bits Page Map DMA.
+ */
+ busnum_to_atedmamaps[num_bridges - 1] = (void *) kmalloc(
+ sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS, GFP_KERNEL);
+ if (!busnum_to_atedmamaps[num_bridges - 1])
+ printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
+
+ memset(busnum_to_atedmamaps[num_bridges - 1], 0x0,
+ sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS);
+
+ }
+
+ /*
+ * PCIX devices
+ * We number busses differently for PCI-X devices.
+ * We start from Lowest Widget on up ..
+ */
+
+ (void) ioconfig_get_busnum((char *)io_moduleid, &basebus_num);
+
+ for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
+
+ /* Do both buses */
+ for ( bus = 0; bus < 2; bus++ ) {
+ sprintf(pathname, "%d", widgetnum);
+ xwidget = NULL;
+
+ /*
+ * Example - /hw/module/001c16/Pbrick/xtalk/8 is the xwidget
+ * /hw/module/001c16/Pbrick/xtalk/8/pci-x/0 is the bus
+ * /hw/module/001c16/Pbrick/xtalk/8/pci-x/0/1 is device
+ */
+ rv = hwgraph_traverse(xtalk, pathname, &xwidget);
+ if ( (rv != GRAPH_SUCCESS) ) {
+ if (!xwidget) {
+ continue;
+ }
+ }
+
+ if ( bus == 0 )
+ sprintf(pathname, "%d/"EDGE_LBL_PCIX_0, widgetnum);
+ else
+ sprintf(pathname, "%d/"EDGE_LBL_PCIX_1, widgetnum);
+ pci_bus = NULL;
+ if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
+ if (!pci_bus) {
+ continue;
+ }
+
+ /*
+ * Assign the correct bus number and also the nasid of this
+ * pci Xwidget.
+ *
+ * Should not be any race here ...
+ */
+ bus_number = basebus_num + bus + io_brick_map_widget(MODULE_PXBRICK, widgetnum);
+#ifdef DEBUG
+ printk("bus_number %d basebus_num %d bus %d io %d\n",
+ bus_number, basebus_num, bus,
+ io_brick_map_widget(MODULE_PXBRICK, widgetnum));
+#endif
+ busnum_to_pcibr_vhdl[bus_number] = pci_bus;
+
+ /*
+ * Pre assign DMA maps needed for 32 Bits Page Map DMA.
+ */
+ busnum_to_atedmamaps[bus_number] = (void *) kmalloc(
+ sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS, GFP_KERNEL);
+ if (!busnum_to_atedmamaps[bus_number])
+ printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
+
+ memset(busnum_to_atedmamaps[bus_number], 0x0,
+ sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS);
+ }
+ }
+
+ return(0);
+}
+
+/*
+ * pci_bus_to_hcl_cvlink() - This routine is called after SGI IO Infrastructure
+ * initialization has completed to set up the mappings between Xbridge
+ * and logical pci bus numbers. We also set up the NASID for each of these
+ * xbridges.
+ *
+ * Must be called before pci_init() is invoked.
+ */
+int
+pci_bus_to_hcl_cvlink(void)
+{
+
+ vertex_hdl_t devfs_hdl = NULL;
+ vertex_hdl_t xtalk = NULL;
+ int rv = 0;
+ char name[256];
+ char tmp_name[256];
+ int i, ii, j;
+ char *brick_name;
+ extern void ioconfig_bus_new_entries(void);
+
+ /*
+ * Figure out which IO Brick is connected to the Compute Bricks.
+ */
+ for (i = 0; i < nummodules; i++) {
+ extern int iomoduleid_get(nasid_t);
+ moduleid_t iobrick_id;
+ nasid_t nasid = -1;
+ int nodecnt;
+ int n = 0;
+
+ nodecnt = modules[i]->nodecnt;
+ for ( n = 0; n < nodecnt; n++ ) {
+ nasid = cnodeid_to_nasid(modules[i]->nodes[n]);
+ iobrick_id = iomoduleid_get(nasid);
+ if ((int)iobrick_id > 0) { /* Valid module id */
+ char name[12];
+ memset(name, 0, 12);
+ format_module_id((char *)&(modules[i]->io[n].moduleid), iobrick_id, MODULE_FORMAT_BRIEF);
+ }
+ }
+ }
+
+ devfs_hdl = hwgraph_path_to_vertex("hw/module");
+ for (i = 0; i < nummodules ; i++) {
+ for ( j = 0; j < 3; j++ ) {
+ if ( j == 0 )
+ brick_name = EDGE_LBL_PBRICK;
+ else if ( j == 1 )
+ brick_name = EDGE_LBL_PXBRICK;
+ else
+ brick_name = EDGE_LBL_IXBRICK;
+
+ for ( ii = 0; ii < 2 ; ii++ ) {
+ memset(name, 0, 256);
+ memset(tmp_name, 0, 256);
+ format_module_id(name, modules[i]->id, MODULE_FORMAT_BRIEF);
+ sprintf(tmp_name, "/slab/%d/%s/xtalk", geo_slab(modules[i]->geoid[ii]), brick_name);
+ strcat(name, tmp_name);
+ xtalk = NULL;
+ rv = hwgraph_edge_get(devfs_hdl, name, &xtalk);
+ if ( rv == 0 )
+ pci_bus_map_create(xtalk, (char *)&(modules[i]->io[ii].moduleid));
+ }
+ }
+ }
+
+ /*
+ * Create the Linux PCI bus number vertex link.
+ */
+ (void)linux_bus_cvlink();
+ (void)ioconfig_bus_new_entries();
+
+ return(0);
+}
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000,2002-2003 Silicon Graphics, Inc. All rights reserved.
+ *
+ * Routines for PCI DMA mapping. See Documentation/DMA-mapping.txt for
+ * a description of how these routines should be used.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/module.h>
+
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/io.h>
+#include <asm/sn/invent.h>
+#include <asm/sn/hcl.h>
+#include <asm/sn/pci/pcibr.h>
+#include <asm/sn/pci/pcibr_private.h>
+#include <asm/sn/driver.h>
+#include <asm/sn/types.h>
+#include <asm/sn/alenlist.h>
+#include <asm/sn/pci/pci_bus_cvlink.h>
+#include <asm/sn/nag.h>
+
+/*
+ * For ATE allocations
+ */
+pciio_dmamap_t get_free_pciio_dmamap(vertex_hdl_t);
+void free_pciio_dmamap(pcibr_dmamap_t);
+static struct sn_dma_maps_s *find_sn_dma_map(dma_addr_t, unsigned char);
+void sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
+
+/*
+ * Toplogy stuff
+ */
+extern vertex_hdl_t busnum_to_pcibr_vhdl[];
+extern nasid_t busnum_to_nid[];
+extern void * busnum_to_atedmamaps[];
+
+/**
+ * get_free_pciio_dmamap - find and allocate an ATE
+ * @pci_bus: PCI bus to get an entry for
+ *
+ * Finds and allocates an ATE on the PCI bus specified
+ * by @pci_bus.
+ */
+pciio_dmamap_t
+get_free_pciio_dmamap(vertex_hdl_t pci_bus)
+{
+ int i;
+ struct sn_dma_maps_s *sn_dma_map = NULL;
+
+ /*
+ * Darn, we need to get the maps allocated for this bus.
+ */
+ for (i = 0; i < MAX_PCI_XWIDGET; i++) {
+ if (busnum_to_pcibr_vhdl[i] == pci_bus) {
+ sn_dma_map = busnum_to_atedmamaps[i];
+ }
+ }
+
+ /*
+ * Now get a free dmamap entry from this list.
+ */
+ for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) {
+ if (!sn_dma_map->dma_addr) {
+ sn_dma_map->dma_addr = -1;
+ return( (pciio_dmamap_t) sn_dma_map );
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * free_pciio_dmamap - free an ATE
+ * @dma_map: ATE to free
+ *
+ * Frees the ATE specified by @dma_map.
+ */
+void
+free_pciio_dmamap(pcibr_dmamap_t dma_map)
+{
+ struct sn_dma_maps_s *sn_dma_map;
+
+ sn_dma_map = (struct sn_dma_maps_s *) dma_map;
+ sn_dma_map->dma_addr = 0;
+}
+
+/**
+ * find_sn_dma_map - find an ATE associated with @dma_addr and @busnum
+ * @dma_addr: DMA address to look for
+ * @busnum: PCI bus to look on
+ *
+ * Finds the ATE associated with @dma_addr and @busnum.
+ */
+static struct sn_dma_maps_s *
+find_sn_dma_map(dma_addr_t dma_addr, unsigned char busnum)
+{
+
+ struct sn_dma_maps_s *sn_dma_map = NULL;
+ int i;
+
+ sn_dma_map = busnum_to_atedmamaps[busnum];
+
+ for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) {
+ if (sn_dma_map->dma_addr == dma_addr) {
+ return sn_dma_map;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * sn_pci_alloc_consistent - allocate memory for coherent DMA
+ * @hwdev: device to allocate for
+ * @size: size of the region
+ * @dma_handle: DMA (bus) address
+ *
+ * pci_alloc_consistent() returns a pointer to a memory region suitable for
+ * coherent DMA traffic to/from a PCI device. On SN platforms, this means
+ * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
+ *
+ * This interface is usually used for "command" streams (e.g. the command
+ * queue for a SCSI controller). See Documentation/DMA-mapping.txt for
+ * more information. Note that this routine will always put a 32 bit
+ * DMA address into @dma_handle. This is because most devices
+ * that are capable of 64 bit PCI DMA transactions can't do 64 bit _coherent_
+ * DMAs, and unfortunately this interface has to cater to the LCD. Oh well.
+ *
+ * Also known as platform_pci_alloc_consistent() by the IA64 machvec code.
+ */
+void *
+sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
+{
+ void *cpuaddr;
+ vertex_hdl_t vhdl;
+ struct sn_device_sysdata *device_sysdata;
+ unsigned long phys_addr;
+ pciio_dmamap_t dma_map = 0;
+ struct sn_dma_maps_s *sn_dma_map;
+
+ *dma_handle = 0;
+
+ /* We can't easily support < 32 bit devices */
+ if (IS_PCI32L(hwdev))
+ return NULL;
+
+ /*
+ * Get hwgraph vertex for the device
+ */
+ device_sysdata = (struct sn_device_sysdata *) hwdev->sysdata;
+ vhdl = device_sysdata->vhdl;
+
+ /*
+ * Allocate the memory. FIXME: if we're allocating for
+ * two devices on the same bus, we should at least try to
+ * allocate memory in the same 2 GB window to avoid using
+ * ATEs for the translation. See the comment above about the
+ * 32 bit requirement for this function.
+ */
+ if(!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size))))
+ return NULL;
+
+ memset(cpuaddr, 0, size); /* have to zero it out */
+
+ /* physical addr. of the memory we just got */
+ phys_addr = __pa(cpuaddr);
+
+ /*
+ * This will try to use a Direct Map register to do the
+ * 32 bit DMA mapping, but it may not succeed if another
+ * device on the same bus is already mapped with different
+ * attributes or to a different memory region.
+ */
+ *dma_handle = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
+ ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+ PCIIO_DMA_CMD);
+
+ /*
+ * It is a 32 bit card and we cannot do direct mapping,
+ * so we try to use an ATE.
+ */
+ if (!(*dma_handle)) {
+ dma_map = pciio_dmamap_alloc(vhdl, NULL, size,
+ ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+ PCIIO_DMA_CMD);
+ if (!dma_map) {
+ printk(KERN_ERR "sn_pci_alloc_consistent: Unable to "
+ "allocate anymore 32 bit page map entries.\n");
+ return 0;
+ }
+ *dma_handle = (dma_addr_t) pciio_dmamap_addr(dma_map,phys_addr,
+ size);
+ sn_dma_map = (struct sn_dma_maps_s *)dma_map;
+ sn_dma_map->dma_addr = *dma_handle;
+ }
+
+ return cpuaddr;
+}
+
+/**
+ * sn_pci_free_consistent - free memory associated with coherent DMAable region
+ * @hwdev: device to free for
+ * @size: size to free
+ * @vaddr: kernel virtual address to free
+ * @dma_handle: DMA address associated with this region
+ *
+ * Frees the memory allocated by pci_alloc_consistent(). Also known
+ * as platform_pci_free_consistent() by the IA64 machvec code.
+ */
+void
+sn_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+{
+ struct sn_dma_maps_s *sn_dma_map = NULL;
+
+ /*
+ * Get the sn_dma_map entry.
+ */
+ if (IS_PCI32_MAPPED(dma_handle))
+ sn_dma_map = find_sn_dma_map(dma_handle, hwdev->bus->number);
+
+ /*
+ * and free it if necessary...
+ */
+ if (sn_dma_map) {
+ pciio_dmamap_done((pciio_dmamap_t)sn_dma_map);
+ pciio_dmamap_free((pciio_dmamap_t)sn_dma_map);
+ sn_dma_map->dma_addr = (dma_addr_t)NULL;
+ }
+ free_pages((unsigned long) vaddr, get_order(size));
+}
+
+/**
+ * sn_pci_map_sg - map a scatter-gather list for DMA
+ * @hwdev: device to map for
+ * @sg: scatterlist to map
+ * @nents: number of entries
+ * @direction: direction of the DMA transaction
+ *
+ * Maps each entry of @sg for DMA. Also known as platform_pci_map_sg by the
+ * IA64 machvec code.
+ */
+int
+sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+{
+
+ int i;
+ vertex_hdl_t vhdl;
+ unsigned long phys_addr;
+ struct sn_device_sysdata *device_sysdata;
+ pciio_dmamap_t dma_map;
+ struct sn_dma_maps_s *sn_dma_map;
+ struct scatterlist *saved_sg = sg;
+
+ /* can't go anywhere w/o a direction in life */
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ /*
+ * Get the hwgraph vertex for the device
+ */
+ device_sysdata = (struct sn_device_sysdata *) hwdev->sysdata;
+ vhdl = device_sysdata->vhdl;
+
+ /*
+ * Setup a DMA address for each entry in the
+ * scatterlist.
+ */
+ for (i = 0; i < nents; i++, sg++) {
+ phys_addr = __pa(sg->dma_address ? sg->dma_address :
+ (unsigned long)page_address(sg->page) + sg->offset);
+
+ /*
+ * Handle the most common case: 64 bit cards. This
+ * call should always succeed.
+ */
+ if (IS_PCIA64(hwdev)) {
+ sg->dma_address = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
+ sg->length,
+ ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+ PCIIO_DMA_DATA |
+ PCIIO_DMA_A64);
+ sg->dma_length = sg->length;
+ continue;
+ }
+
+ /*
+ * Handle 32-63 bit cards via direct mapping
+ */
+ if (IS_PCI32G(hwdev)) {
+ sg->dma_address = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
+ sg->length,
+ ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+ PCIIO_DMA_DATA);
+ sg->dma_length = sg->length;
+ /*
+ * See if we got a direct map entry
+ */
+ if (sg->dma_address) {
+ continue;
+ }
+
+ }
+
+ /*
+ * It is a 32 bit card and we cannot do direct mapping,
+ * so we use an ATE.
+ */
+ dma_map = pciio_dmamap_alloc(vhdl, NULL, sg->length,
+ ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+ PCIIO_DMA_DATA);
+ if (!dma_map) {
+ printk(KERN_ERR "sn_pci_map_sg: Unable to allocate "
+ "anymore 32 bit page map entries.\n");
+ /*
+ * We will need to free all previously allocated entries.
+ */
+ if (i > 0) {
+ sn_pci_unmap_sg(hwdev, saved_sg, i, direction);
+ }
+ return (0);
+ }
+
+ sg->dma_address = pciio_dmamap_addr(dma_map, phys_addr, sg->length);
+ sg->dma_length = sg->length;
+ sn_dma_map = (struct sn_dma_maps_s *)dma_map;
+ sn_dma_map->dma_addr = sg->dma_address;
+ }
+
+ return nents;
+
+}
+
+/**
+ * sn_pci_unmap_sg - unmap a scatter-gather list
+ * @hwdev: device to unmap
+ * @sg: scatterlist to unmap
+ * @nents: number of scatterlist entries
+ * @direction: DMA direction
+ *
+ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
+ * concerning calls here are the same as for pci_unmap_single() below. Also
+ * known as sn_pci_unmap_sg() by the IA64 machvec code.
+ */
+void
+sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+{
+ int i;
+ struct sn_dma_maps_s *sn_dma_map;
+
+ /* can't go anywhere w/o a direction in life */
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ for (i = 0; i < nents; i++, sg++){
+
+ if (IS_PCI32_MAPPED(sg->dma_address)) {
+ sn_dma_map = NULL;
+ sn_dma_map = find_sn_dma_map(sg->dma_address, hwdev->bus->number);
+ if (sn_dma_map) {
+ pciio_dmamap_done((pciio_dmamap_t)sn_dma_map);
+ pciio_dmamap_free((pciio_dmamap_t)sn_dma_map);
+ sn_dma_map->dma_addr = (dma_addr_t)NULL;
+ }
+ }
+
+ sg->dma_address = (dma_addr_t)NULL;
+ sg->dma_length = 0;
+ }
+}
+
+/**
+ * sn_pci_map_single - map a single region for DMA
+ * @hwdev: device to map for
+ * @ptr: kernel virtual address of the region to map
+ * @size: size of the region
+ * @direction: DMA direction
+ *
+ * Map the region pointed to by @ptr for DMA and return the
+ * DMA address. Also known as platform_pci_map_single() by
+ * the IA64 machvec code.
+ *
+ * We map this to the one step pciio_dmamap_trans interface rather than
+ * the two step pciio_dmamap_alloc/pciio_dmamap_addr because we have
+ * no way of saving the dmamap handle from the alloc to later free
+ * (which is pretty much unacceptable).
+ *
+ * TODO: simplify our interface;
+ * get rid of dev_desc and vhdl (seems redundant given a pci_dev);
+ * figure out how to save dmamap handle so can use two step.
+ */
+dma_addr_t
+sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
+{
+ vertex_hdl_t vhdl;
+ dma_addr_t dma_addr;
+ unsigned long phys_addr;
+ struct sn_device_sysdata *device_sysdata;
+ pciio_dmamap_t dma_map = NULL;
+ struct sn_dma_maps_s *sn_dma_map;
+
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ /* SN cannot support DMA addresses smaller than 32 bits. */
+ if (IS_PCI32L(hwdev))
+ return 0;
+
+ /*
+ * find vertex for the device
+ */
+ device_sysdata = (struct sn_device_sysdata *)hwdev->sysdata;
+ vhdl = device_sysdata->vhdl;
+
+ /*
+ * Call our dmamap interface
+ */
+ dma_addr = 0;
+ phys_addr = __pa(ptr);
+
+ if (IS_PCIA64(hwdev)) {
+ /* This device supports 64 bit DMA addresses. */
+ dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
+ ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+ PCIIO_DMA_DATA |
+ PCIIO_DMA_A64);
+ return dma_addr;
+ }
+
+ /*
+ * Devices that support 32 bit to 63 bit DMA addresses get
+ * 32 bit DMA addresses.
+ *
+ * First try to get a 32 bit direct map register.
+ */
+ if (IS_PCI32G(hwdev)) {
+ dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
+ ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+ PCIIO_DMA_DATA);
+ if (dma_addr)
+ return dma_addr;
+ }
+
+ /*
+ * It's a 32 bit card and we cannot do direct mapping so
+ * let's use the PMU instead.
+ */
+ dma_map = NULL;
+ dma_map = pciio_dmamap_alloc(vhdl, NULL, size,
+ ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
+ PCIIO_DMA_DATA);
+
+ if (!dma_map) {
+ printk(KERN_ERR "pci_map_single: Unable to allocate anymore "
+ "32 bit page map entries.\n");
+ return 0;
+ }
+
+ dma_addr = (dma_addr_t) pciio_dmamap_addr(dma_map, phys_addr, size);
+ sn_dma_map = (struct sn_dma_maps_s *)dma_map;
+ sn_dma_map->dma_addr = dma_addr;
+
+ return ((dma_addr_t)dma_addr);
+}
+
+/**
+ * sn_pci_unmap_single - unmap a region used for DMA
+ * @hwdev: device to unmap
+ * @dma_addr: DMA address to unmap
+ * @size: size of region
+ * @direction: DMA direction
+ *
+ * Unmaps the region pointed to by @dma_addr. Also known as
+ * platform_pci_unmap_single() by the IA64 machvec code.
+ */
+void
+sn_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
+{
+ struct sn_dma_maps_s *sn_dma_map = NULL;
+
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ /*
+ * Get the sn_dma_map entry.
+ */
+ if (IS_PCI32_MAPPED(dma_addr))
+ sn_dma_map = find_sn_dma_map(dma_addr, hwdev->bus->number);
+
+ /*
+ * and free it if necessary...
+ */
+ if (sn_dma_map) {
+ pciio_dmamap_done((pciio_dmamap_t)sn_dma_map);
+ pciio_dmamap_free((pciio_dmamap_t)sn_dma_map);
+ sn_dma_map->dma_addr = (dma_addr_t)NULL;
+ }
+}
+
+/**
+ * sn_pci_dma_sync_single - make sure all DMAs have completed
+ * @hwdev: device to sync
+ * @dma_handle: DMA address to sync
+ * @size: size of region
+ * @direction: DMA direction
+ *
+ * This routine is supposed to sync the DMA region specified
+ * by @dma_handle into the 'coherence domain'. We do not need to do
+ * anything on our platform.
+ */
+void
+sn_pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
+{
+ return;
+
+}
+
+/**
+ * sn_pci_dma_sync_sg - make sure all DMAs have completed
+ * @hwdev: device to sync
+ * @sg: scatterlist to sync
+ * @nents: number of entries in the scatterlist
+ * @direction: DMA direction
+ *
+ * This routine is supposed to sync the DMA regions specified
+ * by @sg into the 'coherence domain'. We do not need to do anything
+ * on our platform.
+ */
+void
+sn_pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+{
+ return;
+
+}
+
+/**
+ * sn_dma_supported - test a DMA mask
+ * @hwdev: device to test
+ * @mask: DMA mask to test
+ *
+ * Return whether the given PCI device DMA address mask can be supported
+ * properly. For example, if your device can only drive the low 24-bits
+ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
+ * this function. Of course, SN only supports devices that have 32 or more
+ * address bits when using the PMU. We could theoretically support <32 bit
+ * cards using direct mapping, but we'll worry about that later--on the off
+ * chance that someone actually wants to use such a card.
+ */
+int
+sn_pci_dma_supported(struct pci_dev *hwdev, u64 mask)
+{
+ if (mask < 0xffffffff)
+ return 0;
+ return 1;
+}
+
+#ifdef CONFIG_PCI
+
+/*
+ * New generic DMA routines just wrap sn2 PCI routines until we
+ * support other bus types (if ever).
+ */
+
+int
+sn_dma_supported(struct device *dev, u64 mask)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return sn_pci_dma_supported(to_pci_dev(dev), mask);
+}
+EXPORT_SYMBOL(sn_dma_supported);
+
+int
+sn_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ if (!sn_dma_supported(to_pci_dev(dev), dma_mask))
+ return 0;
+
+ dev->dma_mask = dma_mask;
+ return 1;
+}
+EXPORT_SYMBOL(sn_dma_set_mask);
+
+void *
+sn_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ int flag)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return sn_pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
+}
+EXPORT_SYMBOL(sn_dma_alloc_coherent);
+
+void
+sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ sn_pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
+}
+EXPORT_SYMBOL(sn_dma_free_coherent);
+
+dma_addr_t
+sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ int direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return sn_pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
+}
+EXPORT_SYMBOL(sn_dma_map_single);
+
+void
+sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ int direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ sn_pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
+}
+EXPORT_SYMBOL(sn_dma_unmap_single);
+
+dma_addr_t
+sn_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ int direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
+}
+EXPORT_SYMBOL(sn_dma_map_page);
+
+void
+sn_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ int direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
+}
+EXPORT_SYMBOL(sn_dma_unmap_page);
+
+int
+sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ int direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return sn_pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
+}
+EXPORT_SYMBOL(sn_dma_map_sg);
+
+void
+sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ int direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ sn_pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
+}
+EXPORT_SYMBOL(sn_dma_unmap_sg);
+
+void
+sn_dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size,
+ int direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ sn_pci_dma_sync_single(to_pci_dev(dev), dma_handle, size, (int)direction);
+}
+EXPORT_SYMBOL(sn_dma_sync_single);
+
+void
+sn_dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
+ int direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ sn_pci_dma_sync_sg(to_pci_dev(dev), sg, nelems, (int)direction);
+}
+EXPORT_SYMBOL(sn_dma_sync_sg);
+
+#endif /* CONFIG_PCI */
+
+EXPORT_SYMBOL(sn_pci_unmap_single);
+EXPORT_SYMBOL(sn_pci_map_single);
+EXPORT_SYMBOL(sn_pci_dma_sync_single);
+EXPORT_SYMBOL(sn_pci_map_sg);
+EXPORT_SYMBOL(sn_pci_unmap_sg);
+EXPORT_SYMBOL(sn_pci_alloc_consistent);
+EXPORT_SYMBOL(sn_pci_free_consistent);
+EXPORT_SYMBOL(sn_pci_dma_supported);
+
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <linux/bootmem.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/snconfig.h>
-
-extern int numcpus;
-extern char arg_maxnodes[];
-extern cpuid_t master_procid;
-#if defined(CONFIG_IA64_SGI_SN1)
-extern synergy_da_t *Synergy_da_indr[];
-#endif
-
-extern int hasmetarouter;
-
-int maxcpus;
-cpumask_t boot_cpumask;
-hubreg_t region_mask = 0;
-
-
-extern xwidgetnum_t hub_widget_id(nasid_t);
-
-extern int valid_icache_reasons; /* Reasons to flush the icache */
-extern int valid_dcache_reasons; /* Reasons to flush the dcache */
-extern u_char miniroot;
-extern volatile int need_utlbmiss_patch;
-extern void iograph_early_init(void);
-
-nasid_t master_nasid = INVALID_NASID;
-
-
-/*
- * mlreset(int slave)
- * very early machine reset - at this point NO interrupts have been
- * enabled; nor is memory, tlb, p0, etc setup.
- *
- * slave is zero when mlreset is called for the master processor and
- * is nonzero thereafter.
- */
-
-
-void
-mlreset(int slave)
-{
- if (!slave) {
- /*
- * We are the master cpu and node.
- */
- master_nasid = get_nasid();
- set_master_bridge_base();
-
- /* We're the master processor */
- master_procid = smp_processor_id();
- master_nasid = cpuid_to_nasid(master_procid);
-
- /*
- * master_nasid we get back better be same as one from
- * get_nasid()
- */
- ASSERT_ALWAYS(master_nasid == get_nasid());
-
- /* early initialization of iograph */
- iograph_early_init();
-
- /* Initialize Hub Pseudodriver Management */
- hubdev_init();
-
- } else { /* slave != 0 */
- /*
- * This code is performed ONLY by slave processors.
- */
-
- }
-}
-
-
-/* XXX - Move the meat of this to intr.c ? */
-/*
- * Set up the platform-dependent fields in the nodepda.
- */
-void init_platform_nodepda(nodepda_t *npda, cnodeid_t node)
-{
- hubinfo_t hubinfo;
-#ifdef CONFIG_IA64_SGI_SN1
- int sn;
-#endif
-
- extern void router_map_init(nodepda_t *);
- extern void router_queue_init(nodepda_t *,cnodeid_t);
- extern void intr_init_vecblk(nodepda_t *, cnodeid_t, int);
-
- /* Allocate per-node platform-dependent data */
- hubinfo = (hubinfo_t)alloc_bootmem_node(NODE_DATA(node), sizeof(struct hubinfo_s));
-
- npda->pdinfo = (void *)hubinfo;
- hubinfo->h_nodepda = npda;
- hubinfo->h_cnodeid = node;
- hubinfo->h_nasid = COMPACT_TO_NASID_NODEID(node);
-
- spin_lock_init(&hubinfo->h_crblock);
-
- hubinfo->h_widgetid = hub_widget_id(hubinfo->h_nasid);
- npda->xbow_peer = INVALID_NASID;
-
- /*
- * Initialize the linked list of
- * router info pointers to the dependent routers
- */
- npda->npda_rip_first = NULL;
-
- /*
- * npda_rip_last always points to the place
- * where the next element is to be inserted
- * into the list
- */
- npda->npda_rip_last = &npda->npda_rip_first;
- npda->module_id = INVALID_MODULE;
-
-#ifdef CONFIG_IA64_SGI_SN1
- /*
- * Initialize the interrupts.
- * On sn2, this is done at pci init time,
- * because sn2 needs the cpus checked in
- * when it initializes interrupts. This is
- * so we don't see all the nodes as headless.
- */
- for (sn=0; sn<NUM_SUBNODES; sn++) {
- intr_init_vecblk(npda, node, sn);
- }
-#endif /* CONFIG_IA64_SGI_SN1 */
-
- mutex_init_locked(&npda->xbow_sema); /* init it locked? */
-
-#ifdef LATER
-
- /* Setup the (module,slot) --> nic mapping for all the routers
- * in the system. This is useful during error handling when
- * there is no shared memory.
- */
- router_map_init(npda);
-
- /* Allocate memory for the per-node router traversal queue */
- router_queue_init(npda,node);
- npda->sbe_info = alloc_bootmem_node(NODE_DATA(node), sizeof (sbe_info_t));
- ASSERT(npda->sbe_info);
-
-#endif /* LATER */
-}
-
-/* XXX - Move the interrupt stuff to intr.c ? */
-/*
- * Set up the platform-dependent fields in the processor pda.
- * Must be done _after_ init_platform_nodepda().
- * If we need a lock here, something else is wrong!
- */
-void init_platform_pda(cpuid_t cpu)
-{
-#if defined(CONFIG_IA64_SGI_SN1)
- hub_intmasks_t *intmasks;
- int i, subnode;
- cnodeid_t cnode;
- synergy_da_t *sda;
- int which_synergy;
-
-
- cnode = cpuid_to_cnodeid(cpu);
- which_synergy = cpuid_to_synergy(cpu);
-
- sda = Synergy_da_indr[(cnode * 2) + which_synergy];
- intmasks = &sda->s_intmasks;
-
- /* Clear INT_PEND0 masks. */
- for (i = 0; i < N_INTPEND0_MASKS; i++)
- intmasks->intpend0_masks[i] = 0;
-
- /* Set up pointer to the vector block in the nodepda. */
- /* (Cant use SUBNODEPDA - not working yet) */
- subnode = cpuid_to_subnode(cpu);
- intmasks->dispatch0 = &NODEPDA(cnode)->snpda[cpuid_to_subnode(cpu)].intr_dispatch0;
- intmasks->dispatch1 = &NODEPDA(cnode)->snpda[cpuid_to_subnode(cpu)].intr_dispatch1;
- if (intmasks->dispatch0 != &SUBNODEPDA(cnode, subnode)->intr_dispatch0 ||
- intmasks->dispatch1 != &SUBNODEPDA(cnode, subnode)->intr_dispatch1)
- panic("xxx");
- intmasks->dispatch0 = &SUBNODEPDA(cnode, subnode)->intr_dispatch0;
- intmasks->dispatch1 = &SUBNODEPDA(cnode, subnode)->intr_dispatch1;
-
- /* Clear INT_PEND1 masks. */
- for (i = 0; i < N_INTPEND1_MASKS; i++)
- intmasks->intpend1_masks[i] = 0;
-#endif /* CONFIG_IA64_SGI_SN1 */
-}
-
-void
-update_node_information(cnodeid_t cnodeid)
-{
- nodepda_t *npda = NODEPDA(cnodeid);
- nodepda_router_info_t *npda_rip;
-
- /* Go through the list of router info
- * structures and copy some frequently
- * accessed info from the info hanging
- * off the corresponding router vertices
- */
- npda_rip = npda->npda_rip_first;
- while(npda_rip) {
- if (npda_rip->router_infop) {
- npda_rip->router_portmask =
- npda_rip->router_infop->ri_portmask;
- npda_rip->router_slot =
- npda_rip->router_infop->ri_slotnum;
- } else {
- /* No router, no ports. */
- npda_rip->router_portmask = 0;
- }
- npda_rip = npda_rip->router_next;
- }
-}
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/eeprom.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/xtalk/xswitch.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/xtalk/xtalk_private.h>
-#include <asm/sn/xtalk/xtalkaddrs.h>
-
-/* #define IOGRAPH_DEBUG */
-#ifdef IOGRAPH_DEBUG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif /* IOGRAPH_DEBUG */
-
-/* #define PROBE_TEST */
-
-/* At most 2 hubs can be connected to an xswitch */
-#define NUM_XSWITCH_VOLUNTEER 2
-
-/*
- * Track which hubs have volunteered to manage devices hanging off of
- * a Crosstalk Switch (e.g. xbow). This structure is allocated,
- * initialized, and hung off the xswitch vertex early on when the
- * xswitch vertex is created.
- */
-typedef struct xswitch_vol_s {
- mutex_t xswitch_volunteer_mutex;
- int xswitch_volunteer_count;
- devfs_handle_t xswitch_volunteer[NUM_XSWITCH_VOLUNTEER];
-} *xswitch_vol_t;
-
-void
-xswitch_vertex_init(devfs_handle_t xswitch)
-{
- xswitch_vol_t xvolinfo;
- int rc;
-
- xvolinfo = kmalloc(sizeof(struct xswitch_vol_s), GFP_KERNEL);
- mutex_init(&xvolinfo->xswitch_volunteer_mutex);
- xvolinfo->xswitch_volunteer_count = 0;
- rc = hwgraph_info_add_LBL(xswitch,
- INFO_LBL_XSWITCH_VOL,
- (arbitrary_info_t)xvolinfo);
- ASSERT(rc == GRAPH_SUCCESS); rc = rc;
-}
-
-
-/*
- * When assignment of hubs to widgets is complete, we no longer need the
- * xswitch volunteer structure hanging around. Destroy it.
- */
-static void
-xswitch_volunteer_delete(devfs_handle_t xswitch)
-{
- xswitch_vol_t xvolinfo;
- int rc;
-
- rc = hwgraph_info_remove_LBL(xswitch,
- INFO_LBL_XSWITCH_VOL,
- (arbitrary_info_t *)&xvolinfo);
-#ifdef LATER
- ASSERT(rc == GRAPH_SUCCESS); rc = rc;
-#endif
-
- kfree(xvolinfo);
-}
-/*
- * A Crosstalk master volunteers to manage xwidgets on the specified xswitch.
- */
-/* ARGSUSED */
-static void
-volunteer_for_widgets(devfs_handle_t xswitch, devfs_handle_t master)
-{
- xswitch_vol_t xvolinfo = NULL;
-
- (void)hwgraph_info_get_LBL(xswitch,
- INFO_LBL_XSWITCH_VOL,
- (arbitrary_info_t *)&xvolinfo);
- if (xvolinfo == NULL) {
-#ifdef LATER
- if (!is_headless_node_vertex(master)) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "volunteer for widgets: vertex %v has no info label",
- xswitch);
-#else
- printk(KERN_WARNING "volunteer for widgets: vertex 0x%x has no info label",
- xswitch);
-#endif
- }
-#endif /* LATER */
- return;
- }
-
- mutex_lock(&xvolinfo->xswitch_volunteer_mutex);
- ASSERT(xvolinfo->xswitch_volunteer_count < NUM_XSWITCH_VOLUNTEER);
- xvolinfo->xswitch_volunteer[xvolinfo->xswitch_volunteer_count] = master;
- xvolinfo->xswitch_volunteer_count++;
- mutex_unlock(&xvolinfo->xswitch_volunteer_mutex);
-}
-
-extern int xbow_port_io_enabled(nasid_t nasid, int widgetnum);
-
-/*
- * Assign all the xwidgets hanging off the specified xswitch to the
- * Crosstalk masters that have volunteered for xswitch duty.
- */
-/* ARGSUSED */
-static void
-assign_widgets_to_volunteers(devfs_handle_t xswitch, devfs_handle_t hubv)
-{
- int curr_volunteer, num_volunteer;
- xwidgetnum_t widgetnum;
- xswitch_info_t xswitch_info;
- xswitch_vol_t xvolinfo = NULL;
- nasid_t nasid;
- hubinfo_t hubinfo;
-
- hubinfo_get(hubv, &hubinfo);
- nasid = hubinfo->h_nasid;
-
- xswitch_info = xswitch_info_get(xswitch);
- ASSERT(xswitch_info != NULL);
-
- (void)hwgraph_info_get_LBL(xswitch,
- INFO_LBL_XSWITCH_VOL,
- (arbitrary_info_t *)&xvolinfo);
- if (xvolinfo == NULL) {
-#ifdef LATER
- if (!is_headless_node_vertex(hubv)) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "assign_widgets_to_volunteers:vertex %v has "
- " no info label",
- xswitch);
-#else
- printk(KERN_WARNING "assign_widgets_to_volunteers:vertex 0x%x has "
- " no info label",
- xswitch);
-#endif
- }
-#endif /* LATER */
- return;
- }
-
- num_volunteer = xvolinfo->xswitch_volunteer_count;
- ASSERT(num_volunteer > 0);
- curr_volunteer = 0;
-
- /* Assign master hub for xswitch itself. */
- if (HUB_WIDGET_ID_MIN > 0) {
- hubv = xvolinfo->xswitch_volunteer[0];
- xswitch_info_master_assignment_set(xswitch_info, (xwidgetnum_t)0, hubv);
- }
-
- /*
- * TBD: Use administrative information to alter assignment of
- * widgets to hubs.
- */
- for (widgetnum=HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
-
- /*
- * Ignore disabled/empty ports.
- */
- if (!xbow_port_io_enabled(nasid, widgetnum))
- continue;
-
- /*
- * If this is the master IO board, assign it to the same
- * hub that owned it in the prom.
- */
- if (is_master_nasid_widget(nasid, widgetnum)) {
- int i;
-
- for (i=0; i<num_volunteer; i++) {
- hubv = xvolinfo->xswitch_volunteer[i];
- hubinfo_get(hubv, &hubinfo);
- nasid = hubinfo->h_nasid;
- if (nasid == get_console_nasid())
- goto do_assignment;
- }
-#ifdef LATER
- PRINT_PANIC("Nasid == %d, console nasid == %d",
- nasid, get_console_nasid());
-#endif
- }
-
-
- /*
- * Do a round-robin assignment among the volunteer nodes.
- */
- hubv = xvolinfo->xswitch_volunteer[curr_volunteer];
- curr_volunteer = (curr_volunteer + 1) % num_volunteer;
- /* fall through */
-
-do_assignment:
- /*
- * At this point, we want to make hubv the master of widgetnum.
- */
- xswitch_info_master_assignment_set(xswitch_info, widgetnum, hubv);
- }
-
- xswitch_volunteer_delete(xswitch);
-}
-
-/*
- * Early iograph initialization. Called by master CPU in mlreset().
- * Useful for including iograph.o in kernel.o.
- */
-void
-iograph_early_init(void)
-{
-/*
- * Need new way to get this information ..
- */
- cnodeid_t cnode;
- nasid_t nasid;
- lboard_t *board;
-
- /*
- * Init. the board-to-hwgraph link early, so FRU analyzer
- * doesn't trip on leftover values if we panic early on.
- */
- for(cnode = 0; cnode < numnodes; cnode++) {
- nasid = COMPACT_TO_NASID_NODEID(cnode);
- board = (lboard_t *)KL_CONFIG_INFO(nasid);
- DBG("iograph_early_init: Found board 0x%p\n", board);
-
- /* Check out all the board info stored on a node */
- while(board) {
- board->brd_graph_link = GRAPH_VERTEX_NONE;
- board = KLCF_NEXT(board);
- DBG("iograph_early_init: Found board 0x%p\n", board);
-
-
- }
- }
-
- hubio_init();
-}
-
-#ifdef LINUX_KERNEL_THREADS
-static struct semaphore io_init_sema;
-#endif
-
-/*
- * Let boot processor know that we're done initializing our node's IO
- * and then exit.
- */
-/* ARGSUSED */
-static void
-io_init_done(cnodeid_t cnodeid,cpu_cookie_t c)
-{
- /* Let boot processor know that we're done. */
-#ifdef LINUX_KERNEL_THREADS
- up(&io_init_sema);
-#endif
-#ifdef LATER
- /* This is for the setnoderun done when the io_init thread
- * started
- */
- restorenoderun(c);
- sthread_exit();
-#endif
-}
-
-/*
- * Probe to see if this hub's xtalk link is active. If so,
- * return the Crosstalk Identification of the widget that we talk to.
- * This is called before any of the Crosstalk infrastructure for
- * this hub is set up. It's usually called on the node that we're
- * probing, but not always.
- *
- * TBD: Prom code should actually do this work, and pass through
- * hwid for our use.
- */
-static void
-early_probe_for_widget(devfs_handle_t hubv, xwidget_hwid_t hwid)
-{
- hubreg_t llp_csr_reg;
- nasid_t nasid;
- hubinfo_t hubinfo;
-
- hubinfo_get(hubv, &hubinfo);
- nasid = hubinfo->h_nasid;
-
- llp_csr_reg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
- /*
- * If link is up, read the widget's part number.
- * A direct connect widget must respond to widgetnum=0.
- */
- if (llp_csr_reg & IIO_LLP_CSR_IS_UP) {
- /* TBD: Put hub into "indirect" mode */
- /*
- * We're able to read from a widget because our hub's
- * WIDGET_ID was set up earlier.
- */
- widgetreg_t widget_id = *(volatile widgetreg_t *)
- (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
-
- DBG("early_probe_for_widget: Hub Vertex 0x%p is UP widget_id = 0x%x Register 0x%p\n", hubv, widget_id,
- (volatile widgetreg_t *)(RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID) );
-
- hwid->part_num = XWIDGET_PART_NUM(widget_id);
- hwid->rev_num = XWIDGET_REV_NUM(widget_id);
- hwid->mfg_num = XWIDGET_MFG_NUM(widget_id);
-
- /* TBD: link reset */
- } else {
-
- hwid->part_num = XWIDGET_PART_NUM_NONE;
- hwid->rev_num = XWIDGET_REV_NUM_NONE;
- hwid->mfg_num = XWIDGET_MFG_NUM_NONE;
- }
-
-}
-
-/* Add inventory information to the widget vertex
- * Right now (module,slot,revision) is being
- * added as inventory information.
- */
-static void
-xwidget_inventory_add(devfs_handle_t widgetv,
- lboard_t *board,
- struct xwidget_hwid_s hwid)
-{
- if (!board)
- return;
- /* Donot add inventory information for the baseio
- * on a speedo with an xbox. It has already been
- * taken care of in SN00_vmc.
- * Speedo with xbox's baseio comes in at slot io1 (widget 9)
- */
- device_inventory_add(widgetv,INV_IOBD,board->brd_type,
- board->brd_module,
- SLOTNUM_GETSLOT(board->brd_slot),
- hwid.rev_num);
-}
-
-/*
- * io_xswitch_widget_init
- *
- */
-
-/* defined in include/linux/ctype.h */
-/* #define toupper(c) (islower(c) ? (c) - 'a' + 'A' : (c)) */
-
-void
-io_xswitch_widget_init(devfs_handle_t xswitchv,
- devfs_handle_t hubv,
- xwidgetnum_t widgetnum,
- async_attach_t aa)
-{
- xswitch_info_t xswitch_info;
- xwidgetnum_t hub_widgetid;
- devfs_handle_t widgetv;
- cnodeid_t cnode;
- widgetreg_t widget_id;
- nasid_t nasid, peer_nasid;
- struct xwidget_hwid_s hwid;
- hubinfo_t hubinfo;
- /*REFERENCED*/
- int rc;
- char slotname[SLOTNUM_MAXLENGTH];
- char pathname[128];
- char new_name[64];
- moduleid_t module;
- slotid_t slot;
- lboard_t *board = NULL;
- char buffer[16];
- slotid_t get_widget_slotnum(int xbow, int widget);
-
- DBG("\nio_xswitch_widget_init: hubv 0x%p, xswitchv 0x%p, widgetnum 0x%x\n", hubv, xswitchv, widgetnum);
- /*
- * Verify that xswitchv is indeed an attached xswitch.
- */
- xswitch_info = xswitch_info_get(xswitchv);
- ASSERT(xswitch_info != NULL);
-
- hubinfo_get(hubv, &hubinfo);
- nasid = hubinfo->h_nasid;
- cnode = NASID_TO_COMPACT_NODEID(nasid);
- hub_widgetid = hubinfo->h_widgetid;
-
-
- /* Who's the other guy on out crossbow (if anyone) */
- peer_nasid = NODEPDA(cnode)->xbow_peer;
- if (peer_nasid == INVALID_NASID)
- /* If I don't have a peer, use myself. */
- peer_nasid = nasid;
-
-
- /* Check my xbow structure and my peer's */
- if (!xbow_port_io_enabled(nasid, widgetnum) &&
- !xbow_port_io_enabled(peer_nasid, widgetnum)) {
- return;
- }
-
- if (xswitch_info_link_ok(xswitch_info, widgetnum)) {
- char name[4];
- /*
- * If the current hub is not supposed to be the master
- * for this widgetnum, then skip this widget.
- */
- if (xswitch_info_master_assignment_get(xswitch_info,
- widgetnum) != hubv) {
- return;
- }
-
- module = NODEPDA(cnode)->module_id;
-#ifdef XBRIDGE_REGS_SIM
- /* hardwire for now...could do this with something like:
- * xbow_soft_t soft = hwgraph_fastinfo_get(vhdl);
- * xbow_t xbow = soft->base;
- * xbowreg_t xwidget_id = xbow->xb_wid_id;
- * but I don't feel like figuring out vhdl right now..
- * and I know for a fact the answer is 0x2d000049
- */
- DBG("io_xswitch_widget_init: XBRIDGE_REGS_SIM FIXME: reading xwidget id: hardwired to xbridge (0x2d000049).\n");
- DBG("XWIDGET_PART_NUM(0x2d000049)= 0x%x\n", XWIDGET_PART_NUM(0x2d000049));
- if (XWIDGET_PART_NUM(0x2d000049)==XXBOW_WIDGET_PART_NUM) {
-#else
- if (nasid_has_xbridge(nasid)) {
-#endif /* XBRIDGE_REGS_SIM */
- board = find_lboard_module_class(
- (lboard_t *)KL_CONFIG_INFO(nasid),
- module,
- KLTYPE_IOBRICK);
-
-DBG("io_xswitch_widget_init: Board 0x%p\n", board);
-{
- lboard_t dummy;
-
-
- if (board) {
- DBG("io_xswitch_widget_init: Found KLTYPE_IOBRICK Board 0x%p brd_type 0x%x\n", board, board->brd_type);
- } else {
- DBG("io_xswitch_widget_init: FIXME did not find IOBOARD\n");
- board = &dummy;
- }
-
-}
-
- /*
- * Make sure we really want to say xbrick, pbrick,
- * etc. rather than XIO, graphics, etc.
- */
-
-#ifdef SUPPORT_PRINTING_M_FORMAT
- sprintf(pathname, EDGE_LBL_MODULE "/%M/"
- "%cbrick" "/%s/%d",
- NODEPDA(cnode)->module_id,
-
-#else
- memset(buffer, 0, 16);
- format_module_id(buffer, NODEPDA(cnode)->module_id, MODULE_FORMAT_BRIEF);
- sprintf(pathname, EDGE_LBL_MODULE "/%s/"
- "%cbrick" "/%s/%d",
- buffer,
-#endif
-
- (board->brd_type == KLTYPE_IBRICK) ? 'I' :
- (board->brd_type == KLTYPE_PBRICK) ? 'P' :
- (board->brd_type == KLTYPE_XBRICK) ? 'X' : '?',
- EDGE_LBL_XTALK, widgetnum);
- }
-
- DBG("io_xswitch_widget_init: path= %s\n", pathname);
- rc = hwgraph_path_add(hwgraph_root, pathname, &widgetv);
-
- ASSERT(rc == GRAPH_SUCCESS);
-
- /* This is needed to let the user programs to map the
- * module,slot numbers to the corresponding widget numbers
- * on the crossbow.
- */
- rc = device_master_set(hwgraph_connectpt_get(widgetv), hubv);
-
- /* If we are looking at the global master io6
- * then add information about the version of
- * the io6prom as a part of "detailed inventory"
- * information.
- */
- if (is_master_baseio(nasid,
- NODEPDA(cnode)->module_id,
- get_widget_slotnum(0,widgetnum))) {
- extern void klhwg_baseio_inventory_add(devfs_handle_t,
- cnodeid_t);
- module = NODEPDA(cnode)->module_id;
-
-#ifdef XBRIDGE_REGS_SIM
- DBG("io_xswitch_widget_init: XBRIDGE_REGS_SIM FIXME: reading xwidget id: hardwired to xbridge (0x2d000049).\n");
- if (XWIDGET_PART_NUM(0x2d000049)==XXBOW_WIDGET_PART_NUM) {
-#else
- if (nasid_has_xbridge(nasid)) {
-#endif /* XBRIDGE_REGS_SIM */
- board = find_lboard_module(
- (lboard_t *)KL_CONFIG_INFO(nasid),
- module);
- /*
- * Change iobrick to correct i/o brick
- */
-#ifdef SUPPORT_PRINTING_M_FORMAT
- sprintf(pathname, EDGE_LBL_MODULE "/%M/"
-#else
- sprintf(pathname, EDGE_LBL_MODULE "/%x/"
-#endif
- "iobrick" "/%s/%d",
- NODEPDA(cnode)->module_id,
- EDGE_LBL_XTALK, widgetnum);
- } else {
- slot = get_widget_slotnum(0, widgetnum);
- board = get_board_name(nasid, module, slot,
- new_name);
- /*
- * Create the vertex for the widget,
- * using the decimal
- * widgetnum as the name of the primary edge.
- */
-#ifdef SUPPORT_PRINTING_M_FORMAT
- sprintf(pathname, EDGE_LBL_MODULE "/%M/"
- EDGE_LBL_SLOT "/%s/%s",
- NODEPDA(cnode)->module_id,
- slotname, new_name);
-#else
- memset(buffer, 0, 16);
- format_module_id(buffer, NODEPDA(cnode)->module_id, MODULE_FORMAT_BRIEF);
- sprintf(pathname, EDGE_LBL_MODULE "/%s/"
- EDGE_LBL_SLOT "/%s/%s",
- buffer,
- slotname, new_name);
-#endif
- }
-
- rc = hwgraph_path_add(hwgraph_root, pathname, &widgetv);
- DBG("io_xswitch_widget_init: (2) path= %s\n", pathname);
- /*
- * This is a weird ass code needed for error injection
- * purposes.
- */
- rc = device_master_set(hwgraph_connectpt_get(widgetv), hubv);
-
- klhwg_baseio_inventory_add(widgetv,cnode);
- }
- sprintf(name, "%d", widgetnum);
- DBG("io_xswitch_widget_init: FIXME hwgraph_edge_add %s xswitchv 0x%p, widgetv 0x%p\n", name, xswitchv, widgetv);
- rc = hwgraph_edge_add(xswitchv, widgetv, name);
-
- /*
- * crosstalk switch code tracks which
- * widget is attached to each link.
- */
- xswitch_info_vhdl_set(xswitch_info, widgetnum, widgetv);
-
- /*
- * Peek at the widget to get its crosstalk part and
- * mfgr numbers, then present it to the generic xtalk
- * bus provider to have its driver attach routine
- * called (or not).
- */
-#ifdef XBRIDGE_REGS_SIM
- widget_id = 0x2d000049;
- DBG("io_xswitch_widget_init: XBRIDGE_REGS_SIM FIXME: id hardwired to widget_id\n");
-#else
- widget_id = XWIDGET_ID_READ(nasid, widgetnum);
-#endif /* XBRIDGE_REGS_SIM */
- hwid.part_num = XWIDGET_PART_NUM(widget_id);
- hwid.rev_num = XWIDGET_REV_NUM(widget_id);
- hwid.mfg_num = XWIDGET_MFG_NUM(widget_id);
- /* Store some inventory information about
- * the xwidget in the hardware graph.
- */
- xwidget_inventory_add(widgetv,board,hwid);
-
- (void)xwidget_register(&hwid, widgetv, widgetnum,
- hubv, hub_widgetid,
- aa);
-
-#ifdef SN0_USE_BTE
- bte_bpush_war(cnode, (void *)board);
-#endif
- }
-
-}
-
-
-static void
-io_init_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnode)
-{
- xwidgetnum_t widgetnum;
- async_attach_t aa;
-
- aa = async_attach_new();
-
- DBG("io_init_xswitch_widgets: xswitchv 0x%p for cnode %d\n", xswitchv, cnode);
-
- for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX;
- widgetnum++) {
- io_xswitch_widget_init(xswitchv,
- cnodeid_to_vertex(cnode),
- widgetnum, aa);
- }
- /*
- * Wait for parallel attach threads, if any, to complete.
- */
- async_attach_waitall(aa);
- async_attach_free(aa);
-}
-
-/*
- * For each PCI bridge connected to the xswitch, add a link from the
- * board's klconfig info to the bridge's hwgraph vertex. This lets
- * the FRU analyzer find the bridge without traversing the hardware
- * graph and risking hangs.
- */
-static void
-io_link_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnodeid)
-{
- xwidgetnum_t widgetnum;
- char pathname[128];
- devfs_handle_t vhdl;
- nasid_t nasid, peer_nasid;
- lboard_t *board;
-
-
-
- /* And its connected hub's nasids */
- nasid = COMPACT_TO_NASID_NODEID(cnodeid);
- peer_nasid = NODEPDA(cnodeid)->xbow_peer;
-
- /*
- * Look for paths matching "<widgetnum>/pci" under xswitchv.
- * For every widget, init. its lboard's hwgraph link. If the
- * board has a PCI bridge, point the link to it.
- */
- for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX;
- widgetnum++) {
- sprintf(pathname, "%d", widgetnum);
- if (hwgraph_traverse(xswitchv, pathname, &vhdl) !=
- GRAPH_SUCCESS)
- continue;
-
- board = find_lboard_module((lboard_t *)KL_CONFIG_INFO(nasid),
- NODEPDA(cnodeid)->module_id);
- if (board == NULL && peer_nasid != INVALID_NASID) {
- /*
- * Try to find the board on our peer
- */
- board = find_lboard_module(
- (lboard_t *)KL_CONFIG_INFO(peer_nasid),
- NODEPDA(cnodeid)->module_id);
- }
- if (board == NULL) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "Could not find PROM info for vertex %v, "
- "FRU analyzer may fail",
- vhdl);
-#else
- printk(KERN_WARNING "Could not find PROM info for vertex 0x%p, "
- "FRU analyzer may fail",
- (void *)vhdl);
-#endif
- return;
- }
-
- sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
- if (hwgraph_traverse(xswitchv, pathname, &vhdl) ==
- GRAPH_SUCCESS)
- board->brd_graph_link = vhdl;
- else
- board->brd_graph_link = GRAPH_VERTEX_NONE;
- }
-}
-
-/*
- * Initialize all I/O on the specified node.
- */
-static void
-io_init_node(cnodeid_t cnodeid)
-{
- /*REFERENCED*/
- devfs_handle_t hubv, switchv, widgetv;
- struct xwidget_hwid_s hwid;
- hubinfo_t hubinfo;
- int is_xswitch;
- nodepda_t *npdap;
- struct semaphore *peer_sema = 0;
- uint32_t widget_partnum;
- nodepda_router_info_t *npda_rip;
- cpu_cookie_t c = 0;
- extern int hubdev_docallouts(devfs_handle_t);
-
-#ifdef LATER
- /* Try to execute on the node that we're initializing. */
- c = setnoderun(cnodeid);
-#endif
- npdap = NODEPDA(cnodeid);
-
- /*
- * Get the "top" vertex for this node's hardware
- * graph; it will carry the per-hub hub-specific
- * data, and act as the crosstalk provider master.
- * It's canonical path is probably something of the
- * form /hw/module/%M/slot/%d/node
- */
- hubv = cnodeid_to_vertex(cnodeid);
- DBG("io_init_node: Initialize IO for cnode %d hubv(node) 0x%p npdap 0x%p\n", cnodeid, hubv, npdap);
-
- ASSERT(hubv != GRAPH_VERTEX_NONE);
-
- hubdev_docallouts(hubv);
-
- /*
- * Set up the dependent routers if we have any.
- */
- npda_rip = npdap->npda_rip_first;
-
- while(npda_rip) {
- /* If the router info has not been initialized
- * then we need to do the router initialization
- */
- if (!npda_rip->router_infop) {
- router_init(cnodeid,0,npda_rip);
- }
- npda_rip = npda_rip->router_next;
- }
-
- /*
- * Read mfg info on this hub
- */
-#ifdef LATER
- printk("io_init_node: FIXME need to implement HUB_VERTEX_MFG_INFO\n");
- HUB_VERTEX_MFG_INFO(hubv);
-#endif /* LATER */
-
- /*
- * If nothing connected to this hub's xtalk port, we're done.
- */
- early_probe_for_widget(hubv, &hwid);
- if (hwid.part_num == XWIDGET_PART_NUM_NONE) {
-#ifdef PROBE_TEST
- if ((cnodeid == 1) || (cnodeid == 2)) {
- int index;
-
- for (index = 0; index < 600; index++)
- DBG("Interfering with device probing!!!\n");
- }
-#endif
- /* io_init_done takes cpu cookie as 2nd argument
- * to do a restorenoderun for the setnoderun done
- * at the start of this thread
- */
-
- DBG("**** io_init_node: Node's 0x%p hub widget has XWIDGET_PART_NUM_NONE ****\n", hubv);
- return;
- /* NOTREACHED */
- }
-
- /*
- * attach our hub_provider information to hubv,
- * so we can use it as a crosstalk provider "master"
- * vertex.
- */
- xtalk_provider_register(hubv, &hub_provider);
- xtalk_provider_startup(hubv);
-
- /*
- * Create a vertex to represent the crosstalk bus
- * attached to this hub, and a vertex to be used
- * as the connect point for whatever is out there
- * on the other side of our crosstalk connection.
- *
- * Crosstalk Switch drivers "climb up" from their
- * connection point to try and take over the switch
- * point.
- *
- * Of course, the edges and verticies may already
- * exist, in which case our net effect is just to
- * associate the "xtalk_" driver with the connection
- * point for the device.
- */
-
- (void)hwgraph_path_add(hubv, EDGE_LBL_XTALK, &switchv);
-
- DBG("io_init_node: Created 'xtalk' entry to '../node/' xtalk vertex 0x%p\n", switchv);
-
- ASSERT(switchv != GRAPH_VERTEX_NONE);
-
- (void)hwgraph_edge_add(hubv, switchv, EDGE_LBL_IO);
-
- DBG("io_init_node: Created symlink 'io' from ../node/io to ../node/xtalk \n");
-
- /*
- * We need to find the widget id and update the basew_id field
- * accordingly. In particular, SN00 has direct connected bridge,
- * and hence widget id is Not 0.
- */
-
- widget_partnum = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + WIDGET_ID))) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT;
-
- if (widget_partnum == BRIDGE_WIDGET_PART_NUM ||
- widget_partnum == XBRIDGE_WIDGET_PART_NUM){
- npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
-
- DBG("io_init_node: Found XBRIDGE widget_partnum= 0x%x\n", widget_partnum);
-
- } else if (widget_partnum == XBOW_WIDGET_PART_NUM ||
- widget_partnum == XXBOW_WIDGET_PART_NUM) {
- /*
- * Xbow control register does not have the widget ID field.
- * So, hard code the widget ID to be zero.
- */
- DBG("io_init_node: Found XBOW widget_partnum= 0x%x\n", widget_partnum);
- npdap->basew_id = 0;
-
- } else if (widget_partnum == XG_WIDGET_PART_NUM) {
- /*
- * OK, WTF do we do here if we have an XG direct connected to a HUB/Bedrock???
- * So, hard code the widget ID to be zero?
- */
- npdap->basew_id = 0;
- npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
- } else {
- npdap->basew_id = (((*(volatile int32_t *)(NODE_SWIN_BASE(COMPACT_TO_NASID_NODEID(cnodeid), 0) + BRIDGE_WID_CONTROL))) & WIDGET_WIDGET_ID);
-
- panic(" ****io_init_node: Unknown Widget Part Number 0x%x Widgt ID 0x%x attached to Hubv 0x%p ****\n", widget_partnum, npdap->basew_id, (void *)hubv);
-
- /*NOTREACHED*/
- }
- {
- char widname[10];
- sprintf(widname, "%x", npdap->basew_id);
- (void)hwgraph_path_add(switchv, widname, &widgetv);
- DBG("io_init_node: Created '%s' to '..node/xtalk/' vertex 0x%p\n", widname, widgetv);
- ASSERT(widgetv != GRAPH_VERTEX_NONE);
- }
-
- nodepda->basew_xc = widgetv;
-
- is_xswitch = xwidget_hwid_is_xswitch(&hwid);
-
- /*
- * Try to become the master of the widget. If this is an xswitch
- * with multiple hubs connected, only one will succeed. Mastership
- * of an xswitch is used only when touching registers on that xswitch.
- * The slave xwidgets connected to the xswitch can be owned by various
- * masters.
- */
- if (device_master_set(widgetv, hubv) == 0) {
-
- /* Only one hub (thread) per Crosstalk device or switch makes
- * it to here.
- */
-
- /*
- * Initialize whatever xwidget is hanging off our hub.
- * Whatever it is, it's accessible through widgetnum 0.
- */
- hubinfo_get(hubv, &hubinfo);
-
- (void)xwidget_register(&hwid, widgetv, npdap->basew_id, hubv, hubinfo->h_widgetid, NULL);
-
- if (!is_xswitch) {
- /* io_init_done takes cpu cookie as 2nd argument
- * to do a restorenoderun for the setnoderun done
- * at the start of this thread
- */
- io_init_done(cnodeid,c);
- /* NOTREACHED */
- }
-
- /*
- * Special handling for Crosstalk Switches (e.g. xbow).
- * We need to do things in roughly the following order:
- * 1) Initialize xswitch hardware (done above)
- * 2) Determine which hubs are available to be widget masters
- * 3) Discover which links are active from the xswitch
- * 4) Assign xwidgets hanging off the xswitch to hubs
- * 5) Initialize all xwidgets on the xswitch
- */
-
- volunteer_for_widgets(switchv, hubv);
-
- /* If there's someone else on this crossbow, recognize him */
- if (npdap->xbow_peer != INVALID_NASID) {
- nodepda_t *peer_npdap = NODEPDA(NASID_TO_COMPACT_NODEID(npdap->xbow_peer));
- peer_sema = &peer_npdap->xbow_sema;
- volunteer_for_widgets(switchv, peer_npdap->node_vertex);
- }
-
- assign_widgets_to_volunteers(switchv, hubv);
-
- /* Signal that we're done */
- if (peer_sema) {
- mutex_unlock(peer_sema);
- }
-
- }
- else {
- /* Wait 'til master is done assigning widgets. */
- mutex_lock(&npdap->xbow_sema);
- }
-
-#ifdef PROBE_TEST
- if ((cnodeid == 1) || (cnodeid == 2)) {
- int index;
-
- for (index = 0; index < 500; index++)
- DBG("Interfering with device probing!!!\n");
- }
-#endif
- /* Now both nodes can safely inititialize widgets */
- io_init_xswitch_widgets(switchv, cnodeid);
- io_link_xswitch_widgets(switchv, cnodeid);
-
- /* io_init_done takes cpu cookie as 2nd argument
- * to do a restorenoderun for the setnoderun done
- * at the start of this thread
- */
- io_init_done(cnodeid,c);
-
- DBG("\nio_init_node: DONE INITIALIZED ALL I/O FOR CNODEID %d\n\n", cnodeid);
-}
-
-
-#define IOINIT_STKSZ (16 * 1024)
-
-#define __DEVSTR1 "/../.master/"
-#define __DEVSTR2 "/target/"
-#define __DEVSTR3 "/lun/0/disk/partition/"
-#define __DEVSTR4 "/../ef"
-
-#if defined(CONFIG_IA64_SGI_SN1)
-/*
- * Currently, we need to allow for 5 IBrick slots with 1 FC each
- * plus an internal 1394.
- *
- * ioconfig starts numbering SCSI's at NUM_BASE_IO_SCSI_CTLR.
- */
-#define NUM_BASE_IO_SCSI_CTLR 6
-#else
-#define NUM_BASE_IO_SCSI_CTLR 6
-#endif
-/*
- * This tells ioconfig where it can start numbering scsi controllers.
- * Below this base number, platform-specific handles the numbering.
- * XXX Irix legacy..controller numbering should be part of devfsd's job
- */
-int num_base_io_scsi_ctlr = 2; /* used by syssgi */
-devfs_handle_t base_io_scsi_ctlr_vhdl[NUM_BASE_IO_SCSI_CTLR];
-static devfs_handle_t baseio_enet_vhdl,baseio_console_vhdl;
-
-/*
- * Put the logical controller number information in the
- * scsi controller vertices for each scsi controller that
- * is in a "fixed position".
- */
-static void
-scsi_ctlr_nums_add(devfs_handle_t pci_vhdl)
-{
- {
- int i;
-
- num_base_io_scsi_ctlr = NUM_BASE_IO_SCSI_CTLR;
-
- /* Initialize base_io_scsi_ctlr_vhdl array */
- for (i=0; i<NUM_BASE_IO_SCSI_CTLR; i++)
- base_io_scsi_ctlr_vhdl[i] = GRAPH_VERTEX_NONE;
- }
- {
- /*
- * May want to consider changing the SN0 code, above, to work more like
- * the way this works.
- */
- devfs_handle_t base_ibrick_xbridge_vhdl;
- devfs_handle_t base_ibrick_xtalk_widget_vhdl;
- devfs_handle_t scsi_ctlr_vhdl;
- int i;
- graph_error_t rv;
-
- /*
- * This is a table of "well-known" SCSI controllers and their well-known
- * controller numbers. The names in the table start from the base IBrick's
- * Xbridge vertex, so the first component is the xtalk widget number.
- */
- static struct {
- char *base_ibrick_scsi_path;
- int controller_number;
- } hardwired_scsi_controllers[] = {
- {"15/" EDGE_LBL_PCI "/1/" EDGE_LBL_SCSI_CTLR "/0", 0},
- {"15/" EDGE_LBL_PCI "/2/" EDGE_LBL_SCSI_CTLR "/0", 1},
- {"15/" EDGE_LBL_PCI "/3/" EDGE_LBL_SCSI_CTLR "/0", 2},
- {"14/" EDGE_LBL_PCI "/1/" EDGE_LBL_SCSI_CTLR "/0", 3},
- {"14/" EDGE_LBL_PCI "/2/" EDGE_LBL_SCSI_CTLR "/0", 4},
- {"15/" EDGE_LBL_PCI "/6/ohci/0/" EDGE_LBL_SCSI_CTLR "/0", 5},
- {NULL, -1} /* must be last */
- };
-
- base_ibrick_xtalk_widget_vhdl = hwgraph_connectpt_get(pci_vhdl);
- ASSERT_ALWAYS(base_ibrick_xtalk_widget_vhdl != GRAPH_VERTEX_NONE);
-
- base_ibrick_xbridge_vhdl = hwgraph_connectpt_get(base_ibrick_xtalk_widget_vhdl);
- ASSERT_ALWAYS(base_ibrick_xbridge_vhdl != GRAPH_VERTEX_NONE);
- hwgraph_vertex_unref(base_ibrick_xtalk_widget_vhdl);
-
- /*
- * Iterate through the list of well-known SCSI controllers.
- * For each controller found, set it's controller number according
- * to the table.
- */
- for (i=0; hardwired_scsi_controllers[i].base_ibrick_scsi_path != NULL; i++) {
- rv = hwgraph_path_lookup(base_ibrick_xbridge_vhdl,
- hardwired_scsi_controllers[i].base_ibrick_scsi_path, &scsi_ctlr_vhdl, NULL);
-
- if (rv != GRAPH_SUCCESS) /* No SCSI at this path */
- continue;
-
- ASSERT(hardwired_scsi_controllers[i].controller_number < NUM_BASE_IO_SCSI_CTLR);
- base_io_scsi_ctlr_vhdl[hardwired_scsi_controllers[i].controller_number] = scsi_ctlr_vhdl;
- device_controller_num_set(scsi_ctlr_vhdl, hardwired_scsi_controllers[i].controller_number);
- hwgraph_vertex_unref(scsi_ctlr_vhdl); /* (even though we're actually keeping a reference) */
- }
-
- hwgraph_vertex_unref(base_ibrick_xbridge_vhdl);
- }
-}
-
-
-#include <asm/sn/ioerror_handling.h>
-devfs_handle_t sys_critical_graph_root = GRAPH_VERTEX_NONE;
-
-/* Define the system critical vertices and connect them through
- * a canonical parent-child relationships for easy traversal
- * during io error handling.
- */
-static void
-sys_critical_graph_init(void)
-{
- devfs_handle_t bridge_vhdl,master_node_vhdl;
- devfs_handle_t xbow_vhdl = GRAPH_VERTEX_NONE;
- extern devfs_handle_t hwgraph_root;
- devfs_handle_t pci_slot_conn;
- int slot;
- devfs_handle_t baseio_console_conn;
-
- DBG("sys_critical_graph_init: FIXME.\n");
- baseio_console_conn = hwgraph_connectpt_get(baseio_console_vhdl);
-
- if (baseio_console_conn == NULL) {
- return;
- }
-
- /* Get the vertex handle for the baseio bridge */
- bridge_vhdl = device_master_get(baseio_console_conn);
-
- /* Get the master node of the baseio card */
- master_node_vhdl = cnodeid_to_vertex(
- master_node_get(baseio_console_vhdl));
-
- /* Add the "root->node" part of the system critical graph */
-
- sys_critical_graph_vertex_add(hwgraph_root,master_node_vhdl);
-
- /* Check if we have a crossbow */
- if (hwgraph_traverse(master_node_vhdl,
- EDGE_LBL_XTALK"/0",
- &xbow_vhdl) == GRAPH_SUCCESS) {
- /* We have a crossbow.Add "node->xbow" part of the system
- * critical graph.
- */
- sys_critical_graph_vertex_add(master_node_vhdl,xbow_vhdl);
-
- /* Add "xbow->baseio bridge" of the system critical graph */
- sys_critical_graph_vertex_add(xbow_vhdl,bridge_vhdl);
-
- hwgraph_vertex_unref(xbow_vhdl);
- } else
- /* We donot have a crossbow. Add "node->baseio_bridge"
- * part of the system critical graph.
- */
- sys_critical_graph_vertex_add(master_node_vhdl,bridge_vhdl);
-
- /* Add all the populated PCI slot vertices to the system critical
- * graph with the bridge vertex as the parent.
- */
- for (slot = 0 ; slot < 8; slot++) {
- char slot_edge[10];
-
- sprintf(slot_edge,"%d",slot);
- if (hwgraph_traverse(bridge_vhdl,slot_edge, &pci_slot_conn)
- != GRAPH_SUCCESS)
- continue;
- sys_critical_graph_vertex_add(bridge_vhdl,pci_slot_conn);
- hwgraph_vertex_unref(pci_slot_conn);
- }
-
- hwgraph_vertex_unref(bridge_vhdl);
-
- /* Add the "ioc3 pci connection point -> console ioc3" part
- * of the system critical graph
- */
-
- if (hwgraph_traverse(baseio_console_vhdl,"..",&pci_slot_conn) ==
- GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- baseio_console_vhdl);
- hwgraph_vertex_unref(pci_slot_conn);
- }
-
- /* Add the "ethernet pci connection point -> base ethernet" part of
- * the system critical graph
- */
- if (hwgraph_traverse(baseio_enet_vhdl,"..",&pci_slot_conn) ==
- GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- baseio_enet_vhdl);
- hwgraph_vertex_unref(pci_slot_conn);
- }
-
- /* Add the "scsi controller pci connection point -> base scsi
- * controller" part of the system critical graph
- */
- if (hwgraph_traverse(base_io_scsi_ctlr_vhdl[0],
- "../..",&pci_slot_conn) == GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- base_io_scsi_ctlr_vhdl[0]);
- hwgraph_vertex_unref(pci_slot_conn);
- }
- if (hwgraph_traverse(base_io_scsi_ctlr_vhdl[1],
- "../..",&pci_slot_conn) == GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- base_io_scsi_ctlr_vhdl[1]);
- hwgraph_vertex_unref(pci_slot_conn);
- }
- hwgraph_vertex_unref(baseio_console_conn);
-
-}
-
-static void
-baseio_ctlr_num_set(void)
-{
- char name[MAXDEVNAME];
- devfs_handle_t console_vhdl, pci_vhdl, enet_vhdl;
- devfs_handle_t ioc3_console_vhdl_get(void);
-
-
- DBG("baseio_ctlr_num_set; FIXME\n");
- console_vhdl = ioc3_console_vhdl_get();
- if (console_vhdl == GRAPH_VERTEX_NONE)
- return;
- /* Useful for setting up the system critical graph */
- baseio_console_vhdl = console_vhdl;
-
- vertex_to_name(console_vhdl,name,MAXDEVNAME);
-
- strcat(name,__DEVSTR1);
- pci_vhdl = hwgraph_path_to_vertex(name);
- scsi_ctlr_nums_add(pci_vhdl);
- /* Unref the pci_vhdl due to the reference by hwgraph_path_to_vertex
- */
- hwgraph_vertex_unref(pci_vhdl);
-
- vertex_to_name(console_vhdl, name, MAXDEVNAME);
- strcat(name, __DEVSTR4);
- enet_vhdl = hwgraph_path_to_vertex(name);
-
- /* Useful for setting up the system critical graph */
- baseio_enet_vhdl = enet_vhdl;
-
- device_controller_num_set(enet_vhdl, 0);
- /* Unref the enet_vhdl due to the reference by hwgraph_path_to_vertex
- */
- hwgraph_vertex_unref(enet_vhdl);
-}
-/* #endif */
-
-void
-sn00_rrb_alloc(devfs_handle_t vhdl, int *vendor_list)
-{
- /* REFERENCED */
- int rtn_val;
-
- /*
- ** sn00 population: errb orrb
- ** 0- ql 3+?
- ** 1- ql 2
- ** 2- ioc3 ethernet 2+?
- ** 3- ioc3 secondary 1
- ** 4- 0
- ** 5- PCI slot
- ** 6- PCI slot
- ** 7- PCI slot
- */
-
- /* The following code implements this heuristic for getting
- * maximum usage out of the rrbs
- *
- * constraints:
- * 8 bit ql1 needs 1+1
- * ql0 or ql5,6,7 wants 1+2
- * ethernet wants 2 or more
- *
- * rules for even rrbs:
- * if nothing in slot 6
- * 4 rrbs to 0 and 2 (0xc8889999)
- * else
- * 3 2 3 to slots 0 2 6 (0xc8899bbb)
- *
- * rules for odd rrbs
- * if nothing in slot 5 or 7 (0xc8889999)
- * 4 rrbs to 1 and 3
- * else if 1 thing in 5 or 7 (0xc8899aaa) or (0xc8899bbb)
- * 3 2 3 to slots 1 3 5|7
- * else
- * 2 1 3 2 to slots 1 3 5 7 (note: if there's a ql card in 7 this
- * (0xc89aaabb) may short what it wants therefore the
- * rule should be to plug pci slots in order)
- */
-
-
- if (vendor_list[6] != PCIIO_VENDOR_ID_NONE) {
- /* something in slot 6 */
- rtn_val = pcibr_alloc_all_rrbs(vhdl, 0, 3,1, 2,0, 0,0, 3,0);
- }
- else {
- rtn_val = pcibr_alloc_all_rrbs(vhdl, 0, 4,1, 4,0, 0,0, 0,0);
- }
- if (rtn_val)
- printk(KERN_WARNING "sn00_rrb_alloc: pcibr_alloc_all_rrbs failed");
-
- if ((vendor_list[5] != PCIIO_VENDOR_ID_NONE) &&
- (vendor_list[7] != PCIIO_VENDOR_ID_NONE)) {
- /* soemthing in slot 5 and 7 */
- rtn_val = pcibr_alloc_all_rrbs(vhdl, 1, 2,1, 1,0, 3,0, 2,0);
- }
- else if (vendor_list[5] != PCIIO_VENDOR_ID_NONE) {
- /* soemthing in slot 5 but not 7 */
- rtn_val = pcibr_alloc_all_rrbs(vhdl, 1, 3,1, 2,0, 3,0, 0,0);
- }
- else if (vendor_list[7] != PCIIO_VENDOR_ID_NONE) {
- /* soemthing in slot 7 but not 5 */
- rtn_val = pcibr_alloc_all_rrbs(vhdl, 1, 3,1, 2,0, 0,0, 3,0);
- }
- else {
- /* nothing in slot 5 or 7 */
- rtn_val = pcibr_alloc_all_rrbs(vhdl, 1, 4,1, 4,0, 0,0, 0,0);
- }
- if (rtn_val)
- printk(KERN_WARNING "sn00_rrb_alloc: pcibr_alloc_all_rrbs failed");
-}
-
-
-/*
- * Initialize all I/O devices. Starting closest to nodes, probe and
- * initialize outward.
- */
-void
-init_all_devices(void)
-{
- /* Governor on init threads..bump up when safe
- * (beware many devfs races)
- */
-#ifdef LATER
- int io_init_node_threads = 2;
-#endif
- cnodeid_t cnodeid, active;
-
-#ifdef LINUX_KERNEL_THREADS
- sema_init(&io_init_sema, 0);
-#endif
-
- active = 0;
- for (cnodeid = 0; cnodeid < numnodes; cnodeid++) {
-#ifdef LINUX_KERNEL_THREADS
- char thread_name[16];
- extern int io_init_pri;
-
- /*
- * Spawn a service thread for each node to initialize all
- * I/O on that node. Each thread attempts to bind itself
- * to the node whose I/O it's initializing.
- */
- sprintf(thread_name, "IO_init[%d]", cnodeid);
-
- (void)sthread_create(thread_name, 0, IOINIT_STKSZ, 0,
- io_init_pri, KT_PS, (st_func_t *)io_init_node,
- (void *)(long)cnodeid, 0, 0, 0);
-#else
- DBG("init_all_devices: Calling io_init_node() for cnode %d\n", cnodeid);
- io_init_node(cnodeid);
-
- DBG("init_all_devices: Done io_init_node() for cnode %d\n", cnodeid);
-
-#endif /* LINUX_KERNEL_THREADS */
-
-#ifdef LINUX_KERNEL_THREADS
- /* Limit how many nodes go at once, to not overload hwgraph */
- /* TBD: Should timeout */
- DBG("started thread for cnode %d\n", cnodeid);
- active++;
- if (io_init_node_threads &&
- active >= io_init_node_threads) {
- down(&io_init_sema);
- active--;
- }
-#endif /* LINUX_KERNEL_THREADS */
- }
-
-#ifdef LINUX_KERNEL_THREADS
- /* Wait until all IO_init threads are done */
-
- while (active > 0) {
-#ifdef AA_DEBUG
- DBG("waiting, %d still active\n", active);
-#endif
- down(&io_init_sema);
- active--;
- }
-
-#endif /* LINUX_KERNEL_THREADS */
-
- for (cnodeid = 0; cnodeid < numnodes; cnodeid++)
- /*
- * Update information generated by IO init.
- */
- update_node_information(cnodeid);
-
- baseio_ctlr_num_set();
- /* Setup the system critical graph (which is a subgraph of the
- * main hwgraph). This information is useful during io error
- * handling.
- */
- sys_critical_graph_init();
-
-#if HWG_PRINT
- hwgraph_print();
-#endif
-
-}
-
-#define toint(x) ((int)(x) - (int)('0'))
-
-void
-devnamefromarcs(char *devnm)
-{
- int val;
- char tmpnm[MAXDEVNAME];
- char *tmp1, *tmp2;
-
- val = strncmp(devnm, "dks", 3);
- if (val != 0)
- return;
- tmp1 = devnm + 3;
- if (!isdigit(*tmp1))
- return;
-
- val = 0;
- while (isdigit(*tmp1)) {
- val = 10*val+toint(*tmp1);
- tmp1++;
- }
-
- if(*tmp1 != 'd')
- return;
- else
- tmp1++;
-
- if ((val < 0) || (val >= NUM_BASE_IO_SCSI_CTLR)) {
- int i;
- int viable_found = 0;
-
- DBG("Only controller numbers 0..%d are supported for\n", NUM_BASE_IO_SCSI_CTLR-1);
- DBG("prom \"root\" variables of the form dksXdXsX.\n");
- DBG("To use another disk you must use the full hardware graph path\n\n");
- DBG("Possible controller numbers for use in 'dksXdXsX' on this system: ");
- for (i=0; i<NUM_BASE_IO_SCSI_CTLR; i++) {
- if (base_io_scsi_ctlr_vhdl[i] != GRAPH_VERTEX_NONE) {
- DBG("%d ", i);
- viable_found=1;
- }
- }
- if (viable_found)
- DBG("\n");
- else
- DBG("none found!\n");
-
-#ifdef LATER
- if (kdebug)
- debug("ring");
-#endif
- DELAY(15000000);
- //prom_reboot();
- panic("FIXME: devnamefromarcs: should call prom_reboot here.\n");
- /* NOTREACHED */
- }
-
- ASSERT(base_io_scsi_ctlr_vhdl[val] != GRAPH_VERTEX_NONE);
- vertex_to_name(base_io_scsi_ctlr_vhdl[val],
- tmpnm,
- MAXDEVNAME);
- tmp2 = tmpnm + strlen(tmpnm);
- strcpy(tmp2, __DEVSTR2);
- tmp2 += strlen(__DEVSTR2);
- while (*tmp1 != 's') {
- if((*tmp2++ = *tmp1++) == '\0')
- return;
- }
- tmp1++;
- strcpy(tmp2, __DEVSTR3);
- tmp2 += strlen(__DEVSTR3);
- while ( (*tmp2++ = *tmp1++) )
- ;
- tmp2--;
- *tmp2++ = '/';
- strcpy(tmp2, EDGE_LBL_BLOCK);
- strcpy(devnm,tmpnm);
-}
-
-static
-struct io_brick_map_s io_brick_tab[] = {
-
-/* Ibrick widget number to PCI bus number map */
- {
- 'I', /* Ibrick type */
- /* PCI Bus # Widget # */
- { 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
- 0, /* 0x8 */
- 0, /* 0x9 */
- 0, 0, /* 0xa - 0xb */
- 0, /* 0xc */
- 0, /* 0xd */
- 2, /* 0xe */
- 1 /* 0xf */
- }
- },
-
-/* Pbrick widget number to PCI bus number map */
- {
- 'P', /* Pbrick type */
- /* PCI Bus # Widget # */
- { 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
- 2, /* 0x8 */
- 1, /* 0x9 */
- 0, 0, /* 0xa - 0xb */
- 5, /* 0xc */
- 6, /* 0xd */
- 4, /* 0xe */
- 3 /* 0xf */
- }
- },
-
-/* Xbrick widget to XIO slot map */
- {
- 'X', /* Xbrick type */
- /* XIO Slot # Widget # */
- { 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
- 1, /* 0x8 */
- 2, /* 0x9 */
- 0, 0, /* 0xa - 0xb */
- 3, /* 0xc */
- 4, /* 0xd */
- 0, /* 0xe */
- 0 /* 0xf */
- }
- }
-};
-
-/*
- * Use the brick's type to map a widget number to a meaningful int
- */
-int
-io_brick_map_widget(char brick_type, int widget_num)
-{
- int num_bricks, i;
-
- /* Calculate number of bricks in table */
- num_bricks = sizeof(io_brick_tab)/sizeof(io_brick_tab[0]);
-
- /* Look for brick prefix in table */
- for (i = 0; i < num_bricks; i++) {
- if (brick_type == io_brick_tab[i].ibm_type)
- return(io_brick_tab[i].ibm_map_wid[widget_num]);
- }
-
- return 0;
-
-}
-
-/*
- * Use the device's vertex to map the device's widget to a meaningful int
- */
-int
-io_path_map_widget(devfs_handle_t vertex)
-{
- char hw_path_name[MAXDEVNAME];
- char *wp, *bp, *sp = NULL;
- int widget_num;
- long atoi(char *);
- int hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen);
-
-
- /* Get the full path name of the vertex */
- if (GRAPH_SUCCESS != hwgraph_vertex_name_get(vertex, hw_path_name,
- MAXDEVNAME))
- return 0;
-
- /* Find the widget number in the path name */
- wp = strstr(hw_path_name, "/"EDGE_LBL_XTALK"/");
- if (wp == NULL)
- return 0;
- widget_num = atoi(wp+7);
- if (widget_num < XBOW_PORT_8 || widget_num > XBOW_PORT_F)
- return 0;
-
- /* Find "brick" in the path name */
- bp = strstr(hw_path_name, "brick");
- if (bp == NULL)
- return 0;
-
- /* Find preceding slash */
- sp = bp;
- while (sp > hw_path_name) {
- sp--;
- if (*sp == '/')
- break;
- }
-
- /* Invalid if no preceding slash */
- if (!sp)
- return 0;
-
- /* Bump slash pointer to "brick" prefix */
- sp++;
- /*
- * Verify "brick" prefix length; valid exaples:
- * 'I' from "/Ibrick"
- * 'P' from "/Pbrick"
- * 'X' from "/Xbrick"
- */
- if ((bp - sp) != 1)
- return 0;
-
- return (io_brick_map_widget(*sp, widget_num));
-
-}
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn1/hubdev.h>
-#include <asm/sn/module.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/xtalk/xswitch.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/sn_cpuid.h>
-
-
-/* #define LDEBUG 1 */
-
-#ifdef LDEBUG
-#define DPRINTF printk
-#define printf printk
-#else
-#define DPRINTF(x...)
-#endif
-
-module_t *modules[MODULE_MAX];
-int nummodules;
-
-#define SN00_SERIAL_FUDGE 0x3b1af409d513c2
-#define SN0_SERIAL_FUDGE 0x6e
-
-void
-encode_int_serial(uint64_t src,uint64_t *dest)
-{
- uint64_t val;
- int i;
-
- val = src + SN00_SERIAL_FUDGE;
-
-
- for (i = 0; i < sizeof(long long); i++) {
- ((char*)dest)[i] =
- ((char*)&val)[sizeof(long long)/2 +
- ((i%2) ? ((i/2 * -1) - 1) : (i/2))];
- }
-}
-
-
-void
-decode_int_serial(uint64_t src, uint64_t *dest)
-{
- uint64_t val;
- int i;
-
- for (i = 0; i < sizeof(long long); i++) {
- ((char*)&val)[sizeof(long long)/2 +
- ((i%2) ? ((i/2 * -1) - 1) : (i/2))] =
- ((char*)&src)[i];
- }
-
- *dest = val - SN00_SERIAL_FUDGE;
-}
-
-
-void
-encode_str_serial(const char *src, char *dest)
-{
- int i;
-
- for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++) {
-
- dest[i] = src[MAX_SERIAL_NUM_SIZE/2 +
- ((i%2) ? ((i/2 * -1) - 1) : (i/2))] +
- SN0_SERIAL_FUDGE;
- }
-}
-
-void
-decode_str_serial(const char *src, char *dest)
-{
- int i;
-
- for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++) {
- dest[MAX_SERIAL_NUM_SIZE/2 +
- ((i%2) ? ((i/2 * -1) - 1) : (i/2))] = src[i] -
- SN0_SERIAL_FUDGE;
- }
-}
-
-
-module_t *module_lookup(moduleid_t id)
-{
- int i;
-
- for (i = 0; i < nummodules; i++)
- if (modules[i]->id == id) {
- DPRINTF("module_lookup: found m=0x%p\n", modules[i]);
- return modules[i];
- }
-
- return NULL;
-}
-
-/*
- * module_add_node
- *
- * The first time a new module number is seen, a module structure is
- * inserted into the module list in order sorted by module number
- * and the structure is initialized.
- *
- * The node number is added to the list of nodes in the module.
- */
-
-module_t *module_add_node(moduleid_t id, cnodeid_t n)
-{
- module_t *m;
- int i;
- char buffer[16];
-
-#ifdef __ia64
- memset(buffer, 0, 16);
- format_module_id(buffer, id, MODULE_FORMAT_BRIEF);
- DPRINTF("module_add_node: id=%s node=%d\n", buffer, n);
-#endif
-
- if ((m = module_lookup(id)) == 0) {
-#ifdef LATER
- m = kmem_zalloc_node(sizeof (module_t), KM_NOSLEEP, n);
-#else
- m = kmalloc(sizeof (module_t), GFP_KERNEL);
- memset(m, 0 , sizeof(module_t));
-#endif
- ASSERT_ALWAYS(m);
-
- m->id = id;
- spin_lock_init(&m->lock);
-
- mutex_init_locked(&m->thdcnt);
-
-// set_elsc(&m->elsc);
- elsc_init(&m->elsc, COMPACT_TO_NASID_NODEID(n));
- spin_lock_init(&m->elsclock);
-
- /* Insert in sorted order by module number */
-
- for (i = nummodules; i > 0 && modules[i - 1]->id > id; i--)
- modules[i] = modules[i - 1];
-
- modules[i] = m;
- nummodules++;
- }
-
- m->nodes[m->nodecnt++] = n;
-
- DPRINTF("module_add_node: module %s now has %d nodes\n", buffer, m->nodecnt);
-
- return m;
-}
-
-int module_probe_snum(module_t *m, nasid_t nasid)
-{
- lboard_t *board;
- klmod_serial_num_t *comp;
- char * bcopy(const char * src, char * dest, int count);
- char serial_number[16];
-
- /*
- * record brick serial number
- */
- board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
-
- if (! board || KL_CONFIG_DUPLICATE_BOARD(board))
- {
-#if LDEBUG
- printf ("module_probe_snum: no IP35 board found!\n");
-#endif
- return 0;
- }
-
- board_serial_number_get( board, serial_number );
- if( serial_number[0] != '\0' ) {
- encode_str_serial( serial_number, m->snum.snum_str );
- m->snum_valid = 1;
- }
-#if LDEBUG
- else {
- printf("module_probe_snum: brick serial number is null!\n");
- }
- printf("module_probe_snum: brick serial number == %s\n", serial_number);
-#endif /* DEBUG */
-
- board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid),
- KLTYPE_IOBRICK_XBOW);
-
- if (! board || KL_CONFIG_DUPLICATE_BOARD(board))
- return 0;
-
- comp = GET_SNUM_COMP(board);
-
- if (comp) {
-#if LDEBUG
- int i;
-
- printf("********found module with id %x and string", m->id);
-
- for (i = 0; i < MAX_SERIAL_NUM_SIZE; i++)
- printf(" %x ", comp->snum.snum_str[i]);
-
- printf("\n"); /* Fudged string is not ASCII */
-#endif
-
- if (comp->snum.snum_str[0] != '\0') {
- bcopy(comp->snum.snum_str,
- m->sys_snum,
- MAX_SERIAL_NUM_SIZE);
- m->sys_snum_valid = 1;
- }
- }
-
- if (m->sys_snum_valid)
- return 1;
- else {
- DPRINTF("Invalid serial number for module %d, "
- "possible missing or invalid NIC.", m->id);
- return 0;
- }
-}
-
-void
-io_module_init(void)
-{
- cnodeid_t node;
- lboard_t *board;
- nasid_t nasid;
- int nserial;
- module_t *m;
-
- DPRINTF("*******module_init\n");
-
- nserial = 0;
-
- for (node = 0; node < numnodes; node++) {
- nasid = COMPACT_TO_NASID_NODEID(node);
-
- board = find_lboard((lboard_t *) KL_CONFIG_INFO(nasid), KLTYPE_SNIA);
- ASSERT(board);
-
- m = module_add_node(board->brd_module, node);
-
- if (! m->snum_valid && module_probe_snum(m, nasid))
- nserial++;
- }
-
- DPRINTF("********found total of %d serial numbers in the system\n",
- nserial);
-
- if (nserial == 0)
- printk(KERN_WARNING "io_module_init: No serial number found.\n");
-}
-
-elsc_t *get_elsc(void)
-{
- return &NODEPDA(cpuid_to_cnodeid(smp_processor_id()))->module->elsc;
-}
-
-int
-get_kmod_info(cmoduleid_t cmod, module_info_t *mod_info)
-{
- int i;
-
- if (cmod < 0 || cmod >= nummodules)
- return EINVAL;
-
- if (! modules[cmod]->snum_valid)
- return ENXIO;
-
- mod_info->mod_num = modules[cmod]->id;
- {
- char temp[MAX_SERIAL_NUM_SIZE];
-
- decode_str_serial(modules[cmod]->snum.snum_str, temp);
-
- /* if this is an invalid serial number return an error */
- if (temp[0] != 'K')
- return ENXIO;
-
- mod_info->serial_num = 0;
-
- for (i = 0; i < MAX_SERIAL_NUM_SIZE && temp[i] != '\0'; i++) {
- mod_info->serial_num <<= 4;
- mod_info->serial_num |= (temp[i] & 0xf);
-
- mod_info->serial_str[i] = temp[i];
- }
-
- mod_info->serial_str[i] = '\0';
- }
-
- return 0;
-}
+++ /dev/null
-/*
- *
- * SNI64 specific PCI support for SNI IO.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 1997, 1998, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/pci.h>
-#include <asm/sn/types.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/iograph.h>
-#include <asm/param.h>
-#include <asm/sn/pio.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/pci/bridge.h>
-
-#ifdef DEBUG_CONFIG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
-
-
-#ifdef CONFIG_PCI
-
-extern devfs_handle_t pci_bus_to_vertex(unsigned char);
-extern devfs_handle_t devfn_to_vertex(unsigned char bus, unsigned char devfn);
-
-/*
- * snia64_read_config_byte - Read a byte from the config area of the device.
- */
-static int snia64_read_config_byte (struct pci_dev *dev,
- int where, unsigned char *val)
-{
- unsigned long res = 0;
- unsigned size = 1;
- devfs_handle_t device_vertex;
-
- if ( (dev == (struct pci_dev *)0) || (val == (unsigned char *)0) ) {
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- device_vertex = devfn_to_vertex(dev->bus->number, dev->devfn);
- if (!device_vertex) {
- DBG("%s : nonexistent device: bus= 0x%x slot= 0x%x func= 0x%x\n",
- __FUNCTION__, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
- return(-1);
- }
- res = pciio_config_get(device_vertex, (unsigned) where, size);
- *val = (unsigned char) res;
- return PCIBIOS_SUCCESSFUL;
-}
-
-/*
- * snia64_read_config_word - Read 2 bytes from the config area of the device.
- */
-static int snia64_read_config_word (struct pci_dev *dev,
- int where, unsigned short *val)
-{
- unsigned long res = 0;
- unsigned size = 2; /* 2 bytes */
- devfs_handle_t device_vertex;
-
- if ( (dev == (struct pci_dev *)0) || (val == (unsigned short *)0) ) {
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- device_vertex = devfn_to_vertex(dev->bus->number, dev->devfn);
- if (!device_vertex) {
- DBG("%s : nonexistent device: bus= 0x%x slot= 0x%x func= 0x%x\n",
- __FUNCTION__, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
- return(-1);
- }
- res = pciio_config_get(device_vertex, (unsigned) where, size);
- *val = (unsigned short) res;
- return PCIBIOS_SUCCESSFUL;
-}
-
-/*
- * snia64_read_config_dword - Read 4 bytes from the config area of the device.
- */
-static int snia64_read_config_dword (struct pci_dev *dev,
- int where, unsigned int *val)
-{
- unsigned long res = 0;
- unsigned size = 4; /* 4 bytes */
- devfs_handle_t device_vertex;
-
- if (where & 3) {
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
- if ( (dev == (struct pci_dev *)0) || (val == (unsigned int *)0) ) {
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- device_vertex = devfn_to_vertex(dev->bus->number, dev->devfn);
- if (!device_vertex) {
- DBG("%s : nonexistent device: bus= 0x%x slot= 0x%x func= 0x%x\n",
- __FUNCTION__, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
- return(-1);
- }
- res = pciio_config_get(device_vertex, (unsigned) where, size);
- *val = (unsigned int) res;
- return PCIBIOS_SUCCESSFUL;
-}
-
-/*
- * snia64_write_config_byte - Writes 1 byte to the config area of the device.
- */
-static int snia64_write_config_byte (struct pci_dev *dev,
- int where, unsigned char val)
-{
- devfs_handle_t device_vertex;
-
- if ( dev == (struct pci_dev *)0 ) {
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- /*
- * if it's an IOC3 then we bail out, we special
- * case them with pci_fixup_ioc3
- */
- if (dev->vendor == PCI_VENDOR_ID_SGI &&
- dev->device == PCI_DEVICE_ID_SGI_IOC3 )
- return PCIBIOS_SUCCESSFUL;
-
- device_vertex = devfn_to_vertex(dev->bus->number, dev->devfn);
- if (!device_vertex) {
- DBG("%s : nonexistent device: bus= 0x%x slot= 0x%x func= 0x%x\n",
- __FUNCTION__, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
- return(-1);
- }
- pciio_config_set( device_vertex, (unsigned)where, 1, (uint64_t) val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-/*
- * snia64_write_config_word - Writes 2 bytes to the config area of the device.
- */
-static int snia64_write_config_word (struct pci_dev *dev,
- int where, unsigned short val)
-{
- devfs_handle_t device_vertex = NULL;
-
- if (where & 1) {
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
- if ( dev == (struct pci_dev *)0 ) {
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- /*
- * if it's an IOC3 then we bail out, we special
- * case them with pci_fixup_ioc3
- */
- if (dev->vendor == PCI_VENDOR_ID_SGI &&
- dev->device == PCI_DEVICE_ID_SGI_IOC3)
- return PCIBIOS_SUCCESSFUL;
-
- device_vertex = devfn_to_vertex(dev->bus->number, dev->devfn);
- if (!device_vertex) {
- DBG("%s : nonexistent device: bus= 0x%x slot= 0x%x func= 0x%x\n",
- __FUNCTION__, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
- return(-1);
- }
- pciio_config_set( device_vertex, (unsigned)where, 2, (uint64_t) val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-/*
- * snia64_write_config_dword - Writes 4 bytes to the config area of the device.
- */
-static int snia64_write_config_dword (struct pci_dev *dev,
- int where, unsigned int val)
-{
- devfs_handle_t device_vertex;
-
- if (where & 3) {
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
- if ( dev == (struct pci_dev *)0 ) {
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- /*
- * if it's an IOC3 then we bail out, we special
- * case them with pci_fixup_ioc3
- */
- if (dev->vendor == PCI_VENDOR_ID_SGI &&
- dev->device == PCI_DEVICE_ID_SGI_IOC3)
- return PCIBIOS_SUCCESSFUL;
-
- device_vertex = devfn_to_vertex(dev->bus->number, dev->devfn);
- if (!device_vertex) {
- DBG("%s : nonexistent device: bus= 0x%x slot= 0x%x func= 0x%x\n",
- __FUNCTION__, dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
- return(-1);
- }
- pciio_config_set( device_vertex, (unsigned)where, 4, (uint64_t) val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops snia64_pci_ops = {
- snia64_read_config_byte,
- snia64_read_config_word,
- snia64_read_config_dword,
- snia64_write_config_byte,
- snia64_write_config_word,
- snia64_write_config_dword
-};
-
-/*
- * snia64_pci_find_bios - SNIA64 pci_find_bios() platform specific code.
- */
-void __init
-sn_pci_find_bios(void)
-{
- extern struct pci_ops *pci_root_ops;
- /*
- * Go initialize our IO Infrastructure ..
- */
- extern void sgi_master_io_infr_init(void);
-
- sgi_master_io_infr_init();
-
- /* sn_io_infrastructure_init(); */
- pci_root_ops = &snia64_pci_ops;
-}
-
-void
-pci_fixup_ioc3(struct pci_dev *d)
-{
- int i;
- unsigned int size;
-
- /* IOC3 only decodes 0x20 bytes of the config space, reading
- * beyond that is relatively benign but writing beyond that
- * (especially the base address registers) will shut down the
- * pci bus...so avoid doing so.
- * NOTE: this means we can't program the intr_pin into the device,
- * currently we hack this with special code in
- * sgi_pci_intr_support()
- */
- DBG("pci_fixup_ioc3: Fixing base addresses for ioc3 device %s\n", d->slot_name);
-
- /* I happen to know from the spec that the ioc3 needs only 0xfffff
- * The standard pci trick of writing ~0 to the baddr and seeing
- * what comes back doesn't work with the ioc3
- */
- size = 0xfffff;
- d->resource[0].end = (unsigned long) d->resource[0].start + (unsigned long) size;
-
- /*
- * Zero out the resource structure .. because we did not go through
- * the normal PCI Infrastructure Init, garbbage are left in these
- * fileds.
- */
- for (i = 1; i <= PCI_ROM_RESOURCE; i++) {
- d->resource[i].start = 0UL;
- d->resource[i].end = 0UL;
- d->resource[i].flags = 0UL;
- }
-
-#ifdef CONFIG_IA64_SGI_SN1
- *(volatile u32 *)0xc0000a000f000220 |= 0x90000;
-#endif
- d->subsystem_vendor = 0;
- d->subsystem_device = 0;
-
-}
-
-#else
-void sn_pci_find_bios(void) {}
-void pci_fixup_ioc3(struct pci_dev *d) {}
-struct list_head pci_root_buses;
-struct list_head pci_root_buses;
-struct list_head pci_devices;
-
-#endif /* CONFIG_PCI */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <asm/sn/types.h>
-#include <asm/sn/hack.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/iograph.h>
-#include <asm/param.h>
-#include <asm/sn/pio.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/xtalk/xtalkaddrs.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/pci/pci_bus_cvlink.h>
-#include <asm/sn/simulator.h>
-#include <asm/sn/sn_cpuid.h>
-
-extern int bridge_rev_b_data_check_disable;
-
-devfs_handle_t busnum_to_pcibr_vhdl[MAX_PCI_XWIDGET];
-nasid_t busnum_to_nid[MAX_PCI_XWIDGET];
-void * busnum_to_atedmamaps[MAX_PCI_XWIDGET];
-unsigned char num_bridges;
-static int done_probing = 0;
-
-static int pci_bus_map_create(devfs_handle_t xtalk);
-devfs_handle_t devfn_to_vertex(unsigned char busnum, unsigned int devfn);
-
-#define SN1_IOPORTS_UNIT 256
-#define MAX_IOPORTS 0xffff
-#define MAX_IOPORTS_CHUNKS (MAX_IOPORTS / SN1_IOPORTS_UNIT)
-struct ioports_to_tlbs_s ioports_to_tlbs[MAX_IOPORTS_CHUNKS];
-unsigned long sn1_allocate_ioports(unsigned long pci_address);
-
-extern void sn1_init_irq_desc(void);
-
-
-
-/*
- * pci_bus_cvlink_init() - To be called once during initialization before
- * SGI IO Infrastructure init is called.
- */
-void
-pci_bus_cvlink_init(void)
-{
- memset(busnum_to_pcibr_vhdl, 0x0, sizeof(devfs_handle_t) * MAX_PCI_XWIDGET);
- memset(busnum_to_nid, 0x0, sizeof(nasid_t) * MAX_PCI_XWIDGET);
-
- memset(busnum_to_atedmamaps, 0x0, sizeof(void *) * MAX_PCI_XWIDGET);
-
- memset(ioports_to_tlbs, 0x0, sizeof(ioports_to_tlbs));
-
- num_bridges = 0;
-}
-
-/*
- * pci_bus_to_vertex() - Given a logical Linux Bus Number returns the associated
- * pci bus vertex from the SGI IO Infrastructure.
- */
-devfs_handle_t
-pci_bus_to_vertex(unsigned char busnum)
-{
-
- devfs_handle_t pci_bus = NULL;
-
-
- /*
- * First get the xwidget vertex.
- */
- pci_bus = busnum_to_pcibr_vhdl[busnum];
- return(pci_bus);
-}
-
-/*
- * devfn_to_vertex() - returns the vertex of the device given the bus, slot,
- * and function numbers.
- */
-devfs_handle_t
-devfn_to_vertex(unsigned char busnum, unsigned int devfn)
-{
-
- int slot = 0;
- int func = 0;
- char name[16];
- devfs_handle_t pci_bus = NULL;
- devfs_handle_t device_vertex = (devfs_handle_t)NULL;
-
- /*
- * Go get the pci bus vertex.
- */
- pci_bus = pci_bus_to_vertex(busnum);
- if (!pci_bus) {
- /*
- * During probing, the Linux pci code invents non-existent
- * bus numbers and pci_dev structures and tries to access
- * them to determine existence. Don't crib during probing.
- */
- if (done_probing)
- printk("devfn_to_vertex: Invalid bus number %d given.\n", busnum);
- return(NULL);
- }
-
-
- /*
- * Go get the slot&function vertex.
- * Should call pciio_slot_func_to_name() when ready.
- */
- slot = PCI_SLOT(devfn);
- func = PCI_FUNC(devfn);
-
- /*
- * For a NON Multi-function card the name of the device looks like:
- * ../pci/1, ../pci/2 ..
- */
- if (func == 0) {
- sprintf(name, "%d", slot);
- if (hwgraph_traverse(pci_bus, name, &device_vertex) ==
- GRAPH_SUCCESS) {
- if (device_vertex) {
- return(device_vertex);
- }
- }
- }
-
- /*
- * This maybe a multifunction card. It's names look like:
- * ../pci/1a, ../pci/1b, etc.
- */
- sprintf(name, "%d%c", slot, 'a'+func);
- if (hwgraph_traverse(pci_bus, name, &device_vertex) != GRAPH_SUCCESS) {
- if (!device_vertex) {
- return(NULL);
- }
- }
-
- return(device_vertex);
-}
-
-/*
- * For the given device, initialize the addresses for both the Device(x) Flush
- * Write Buffer register and the Xbow Flush Register for the port the PCI bus
- * is connected.
- */
-static void
-set_flush_addresses(struct pci_dev *device_dev,
- struct sn1_device_sysdata *device_sysdata)
-{
- pciio_info_t pciio_info = pciio_info_get(device_sysdata->vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
-
- device_sysdata->dma_buf_sync = (volatile unsigned int *)
- &(bridge->b_wr_req_buf[pciio_slot].reg);
- device_sysdata->xbow_buf_sync = (volatile unsigned int *)
- XBOW_PRIO_LINKREGS_PTR(NODE_SWIN_BASE(get_nasid(), 0),
- pcibr_soft->bs_xid);
-#ifdef DEBUG
-
- printk("set_flush_addresses: dma_buf_sync %p xbow_buf_sync %p\n",
- device_sysdata->dma_buf_sync, device_sysdata->xbow_buf_sync);
-
- while((volatile unsigned int )*device_sysdata->dma_buf_sync);
- while((volatile unsigned int )*device_sysdata->xbow_buf_sync);
-#endif
-
-}
-
-/*
- * Most drivers currently do not properly tell the arch specific pci dma
- * interfaces whether they can handle A64. Here is where we privately
- * keep track of this.
- */
-static void __init
-set_sn1_pci64(struct pci_dev *dev)
-{
- unsigned short vendor = dev->vendor;
- unsigned short device = dev->device;
-
- if (vendor == PCI_VENDOR_ID_QLOGIC) {
- if ((device == PCI_DEVICE_ID_QLOGIC_ISP2100) ||
- (device == PCI_DEVICE_ID_QLOGIC_ISP2200)) {
- SET_PCIA64(dev);
- return;
- }
- }
-
- if (vendor == PCI_VENDOR_ID_SGI) {
- if (device == PCI_DEVICE_ID_SGI_IOC3) {
- SET_PCIA64(dev);
- return;
- }
- }
-
-}
-
-/*
- * sn1_allocate_ioports() - This routine provides the allocation and
- * mappings between Linux style IOPORTs management.
- *
- * For simplicity sake, SN1 will allocate IOPORTs in chunks of
- * 256bytes .. irrespective of what the card desires. This may
- * have to change when we understand how to deal with legacy ioports
- * which are hardcoded in some drivers e.g. SVGA.
- *
- * Ofcourse, the SN1 IO Infrastructure has no concept of IOPORT numbers.
- * It will remain so. The IO Infrastructure will continue to map
- * IO Resource just like IRIX. When this is done, we map IOPORT
- * chunks to these resources. The Linux drivers will see and use real
- * IOPORT numbers. The various IOPORT access macros e.g. inb/outb etc.
- * does the munging of these IOPORT numbers to make a Uncache Virtual
- * Address. This address via the tlb entries generates the PCI Address
- * allocated by the SN1 IO Infrastructure Layer.
- */
-static unsigned long sn1_ioport_num = 0x1000; /* Reserve room for Legacy stuff */
-unsigned long
-sn1_allocate_ioports(unsigned long pci_address)
-{
-
- unsigned long ioport_index;
-
- /*
- * Just some idiot checking ..
- */
- if ( sn1_ioport_num > 0xffff ) {
- printk("sn1_allocate_ioports: No more IO PORTS available\n");
- return(-1);
- }
-
- /*
- * See Section 4.1.1.5 of Intel IA-64 Acrchitecture Software Developer's
- * Manual for details.
- */
- ioport_index = sn1_ioport_num / SN1_IOPORTS_UNIT;
-
- ioports_to_tlbs[ioport_index].p = 1; /* Present Bit */
- ioports_to_tlbs[ioport_index].rv_1 = 0; /* 1 Bit */
- ioports_to_tlbs[ioport_index].ma = 4; /* Memory Attributes 3 bits*/
- ioports_to_tlbs[ioport_index].a = 1; /* Set Data Access Bit Fault 1 Bit*/
- ioports_to_tlbs[ioport_index].d = 1; /* Dirty Bit */
- ioports_to_tlbs[ioport_index].pl = 0;/* Privilege Level - All levels can R/W*/
- ioports_to_tlbs[ioport_index].ar = 3; /* Access Rights - R/W only*/
- ioports_to_tlbs[ioport_index].ppn = pci_address >> 12; /* 4K page size */
- ioports_to_tlbs[ioport_index].ed = 0; /* Exception Deferral Bit */
- ioports_to_tlbs[ioport_index].ig = 0; /* Ignored */
-
- /* printk("sn1_allocate_ioports: ioport_index 0x%x ioports_to_tlbs 0x%p\n", ioport_index, ioports_to_tlbs[ioport_index]); */
-
- sn1_ioport_num += SN1_IOPORTS_UNIT;
-
- return(sn1_ioport_num - SN1_IOPORTS_UNIT);
-}
-
-/*
- * sn1_pci_fixup() - This routine is called when platform_pci_fixup() is
- * invoked at the end of pcibios_init() to link the Linux pci
- * infrastructure to SGI IO Infrasturcture - ia64/kernel/pci.c
- *
- * Other platform specific fixup can also be done here.
- */
-void
-sn1_pci_fixup(int arg)
-{
- struct list_head *ln;
- struct pci_bus *pci_bus = NULL;
- struct pci_dev *device_dev = NULL;
- struct sn1_widget_sysdata *widget_sysdata;
- struct sn1_device_sysdata *device_sysdata;
-#ifdef SN1_IOPORTS
- unsigned long ioport;
-#endif
- pciio_intr_t intr_handle;
- int cpuid, bit;
- devfs_handle_t device_vertex;
- pciio_intr_line_t lines;
- extern void sn1_pci_find_bios(void);
-#ifdef CONFIG_IA64_SGI_SN2
- extern int numnodes;
- int cnode;
-#endif /* CONFIG_IA64_SGI_SN2 */
-
-
- if (arg == 0) {
- sn1_init_irq_desc();
- sn1_pci_find_bios();
-#ifdef CONFIG_IA64_SGI_SN2
- for (cnode = 0; cnode < numnodes; cnode++) {
- extern void intr_init_vecblk(nodepda_t *npda, cnodeid_t, int);
- intr_init_vecblk(NODEPDA(cnode), cnode, 0);
- }
-#endif /* CONFIG_IA64_SGI_SN2 */
- return;
- }
-
-#if 0
-{
- devfs_handle_t bridge_vhdl = pci_bus_to_vertex(0);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) hwgraph_fastinfo_get(bridge_vhdl);
- bridge_t *bridge = pcibr_soft->bs_base;
- printk("pci_fixup_ioc3: Before devreg fixup\n");
- printk("pci_fixup_ioc3: Devreg 0 0x%x\n", bridge->b_device[0].reg);
- printk("pci_fixup_ioc3: Devreg 1 0x%x\n", bridge->b_device[1].reg);
- printk("pci_fixup_ioc3: Devreg 2 0x%x\n", bridge->b_device[2].reg);
- printk("pci_fixup_ioc3: Devreg 3 0x%x\n", bridge->b_device[3].reg);
- printk("pci_fixup_ioc3: Devreg 4 0x%x\n", bridge->b_device[4].reg);
- printk("pci_fixup_ioc3: Devreg 5 0x%x\n", bridge->b_device[5].reg);
- printk("pci_fixup_ioc3: Devreg 6 0x%x\n", bridge->b_device[6].reg);
- printk("pci_fixup_ioc3: Devreg 7 0x%x\n", bridge->b_device[7].reg);
-}
-#endif
- done_probing = 1;
-
- /*
- * Initialize the pci bus vertex in the pci_bus struct.
- */
- for( ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
- pci_bus = pci_bus_b(ln);
- widget_sysdata = kmalloc(sizeof(struct sn1_widget_sysdata),
- GFP_KERNEL);
- widget_sysdata->vhdl = pci_bus_to_vertex(pci_bus->number);
- pci_bus->sysdata = (void *)widget_sysdata;
- }
-
- /*
- * set the root start and end so that drivers calling check_region()
- * won't see a conflict
- */
-#ifdef SN1_IOPORTS
- ioport_resource.start = sn1_ioport_num;
- ioport_resource.end = 0xffff;
-#else
-#if defined(CONFIG_IA64_SGI_SN1)
- if ( IS_RUNNING_ON_SIMULATOR() ) {
- /*
- * IDE legacy IO PORTs are supported in Medusa.
- * Just open up IO PORTs from 0 .. ioport_resource.end.
- */
- ioport_resource.start = 0;
- } else {
- /*
- * We do not support Legacy IO PORT numbers.
- */
- ioport_resource.start |= IO_SWIZ_BASE | __IA64_UNCACHED_OFFSET;
- }
- ioport_resource.end |= (HSPEC_SWIZ_BASE-1) | __IA64_UNCACHED_OFFSET;
-#else
- // Need something here for sn2.... ZXZXZX
-#endif
-#endif
-
- /*
- * Initialize the device vertex in the pci_dev struct.
- */
- while ((device_dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, device_dev)) != NULL) {
- unsigned int irq;
- int idx;
- u16 cmd;
- devfs_handle_t vhdl;
- unsigned long size;
- extern int bit_pos_to_irq(int);
-
- if (device_dev->vendor == PCI_VENDOR_ID_SGI &&
- device_dev->device == PCI_DEVICE_ID_SGI_IOC3) {
- extern void pci_fixup_ioc3(struct pci_dev *d);
- pci_fixup_ioc3(device_dev);
- }
-
- /* Set the device vertex */
-
- device_sysdata = kmalloc(sizeof(struct sn1_device_sysdata),
- GFP_KERNEL);
- device_sysdata->vhdl = devfn_to_vertex(device_dev->bus->number, device_dev->devfn);
- device_sysdata->isa64 = 0;
- /*
- * Set the xbridge Device(X) Write Buffer Flush and Xbow Flush
- * register addresses.
- */
- (void) set_flush_addresses(device_dev, device_sysdata);
-
- device_dev->sysdata = (void *) device_sysdata;
- set_sn1_pci64(device_dev);
- pci_read_config_word(device_dev, PCI_COMMAND, &cmd);
-
- /*
- * Set the resources address correctly. The assumption here
- * is that the addresses in the resource structure has been
- * read from the card and it was set in the card by our
- * Infrastructure ..
- */
- vhdl = device_sysdata->vhdl;
- for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
- size = 0;
- size = device_dev->resource[idx].end -
- device_dev->resource[idx].start;
- if (size) {
- device_dev->resource[idx].start = (unsigned long)pciio_pio_addr(vhdl, 0, PCIIO_SPACE_WIN(idx), 0, size, 0, PCIIO_BYTE_STREAM);
- device_dev->resource[idx].start |= __IA64_UNCACHED_OFFSET;
- }
- else
- continue;
-
- device_dev->resource[idx].end =
- device_dev->resource[idx].start + size;
-
-#ifdef CONFIG_IA64_SGI_SN1
- /*
- * Adjust the addresses to go to the SWIZZLE ..
- */
- device_dev->resource[idx].start =
- device_dev->resource[idx].start & 0xfffff7ffffffffff;
- device_dev->resource[idx].end =
- device_dev->resource[idx].end & 0xfffff7ffffffffff;
-#endif
-
- if (device_dev->resource[idx].flags & IORESOURCE_IO) {
- cmd |= PCI_COMMAND_IO;
-#ifdef SN1_IOPORTS
- ioport = sn1_allocate_ioports(device_dev->resource[idx].start);
- if (ioport < 0) {
- printk("sn1_pci_fixup: PCI Device 0x%x on PCI Bus %d not mapped to IO PORTs .. IO PORTs exhausted\n", device_dev->devfn, device_dev->bus->number);
- continue;
- }
- pciio_config_set(vhdl, (unsigned) PCI_BASE_ADDRESS_0 + (idx * 4), 4, (res + (ioport & 0xfff)));
-
-printk("sn1_pci_fixup: ioport number %d mapped to pci address 0x%lx\n", ioport, (res + (ioport & 0xfff)));
-
- device_dev->resource[idx].start = ioport;
- device_dev->resource[idx].end = ioport + SN1_IOPORTS_UNIT;
-#endif
- }
- if (device_dev->resource[idx].flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
- /*
- * Now handle the ROM resource ..
- */
- size = device_dev->resource[PCI_ROM_RESOURCE].end -
- device_dev->resource[PCI_ROM_RESOURCE].start;
-
- if (size) {
- device_dev->resource[PCI_ROM_RESOURCE].start =
- (unsigned long) pciio_pio_addr(vhdl, 0, PCIIO_SPACE_ROM, 0,
- size, 0, PCIIO_BYTE_STREAM);
- device_dev->resource[PCI_ROM_RESOURCE].start |= __IA64_UNCACHED_OFFSET;
- device_dev->resource[PCI_ROM_RESOURCE].end =
- device_dev->resource[PCI_ROM_RESOURCE].start + size;
-
-#ifdef CONFIG_IA64_SGI_SN1
- /*
- * go through synergy swizzled space
- */
- device_dev->resource[PCI_ROM_RESOURCE].start &= 0xfffff7ffffffffffUL;
- device_dev->resource[PCI_ROM_RESOURCE].end &= 0xfffff7ffffffffffUL;
-#endif
-
- }
-
- /*
- * Update the Command Word on the Card.
- */
- cmd |= PCI_COMMAND_MASTER; /* If the device doesn't support */
- /* bit gets dropped .. no harm */
- pci_write_config_word(device_dev, PCI_COMMAND, cmd);
-
- pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN, (unsigned char *)&lines);
- if (device_dev->vendor == PCI_VENDOR_ID_SGI &&
- device_dev->device == PCI_DEVICE_ID_SGI_IOC3 ) {
- lines = 1;
- }
-
- device_sysdata = (struct sn1_device_sysdata *)device_dev->sysdata;
- device_vertex = device_sysdata->vhdl;
-
- intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex);
-
- bit = intr_handle->pi_irq;
- cpuid = intr_handle->pi_cpu;
-#ifdef CONFIG_IA64_SGI_SN1
- irq = bit_pos_to_irq(bit);
-#else /* SN2 */
- irq = bit;
-#endif
- irq = irq + (cpuid << 8);
- pciio_intr_connect(intr_handle);
- device_dev->irq = irq;
-#ifdef ajmtestintr
- {
- int slot = PCI_SLOT(device_dev->devfn);
- static int timer_set = 0;
- pcibr_intr_t pcibr_intr = (pcibr_intr_t)intr_handle;
- pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
- extern void intr_test_handle_intr(int, void*, struct pt_regs *);
-
- if (!timer_set) {
- intr_test_set_timer();
- timer_set = 1;
- }
- intr_test_register_irq(irq, pcibr_soft, slot);
- request_irq(irq, intr_test_handle_intr,0,NULL, NULL);
- }
-#endif
-
- }
-
-#if 0
-
-{
- devfs_handle_t bridge_vhdl = pci_bus_to_vertex(0);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) hwgraph_fastinfo_get(bridge_vhdl);
- bridge_t *bridge = pcibr_soft->bs_base;
-
- printk("pci_fixup_ioc3: Before devreg fixup\n");
- printk("pci_fixup_ioc3: Devreg 0 0x%x\n", bridge->b_device[0].reg);
- printk("pci_fixup_ioc3: Devreg 1 0x%x\n", bridge->b_device[1].reg);
- printk("pci_fixup_ioc3: Devreg 2 0x%x\n", bridge->b_device[2].reg);
- printk("pci_fixup_ioc3: Devreg 3 0x%x\n", bridge->b_device[3].reg);
- printk("pci_fixup_ioc3: Devreg 4 0x%x\n", bridge->b_device[4].reg);
- printk("pci_fixup_ioc3: Devreg 5 0x%x\n", bridge->b_device[5].reg);
- printk("pci_fixup_ioc3: Devreg 6 0x%x\n", bridge->b_device[6].reg);
- printk("pci_fixup_ioc3: Devreg 7 0x%x\n", bridge->b_device[7].reg);
-}
-
-printk("testing Big Window: 0xC0000200c0000000 %p\n", *( (volatile uint64_t *)0xc0000200a0000000));
-printk("testing Big Window: 0xC0000200c0000008 %p\n", *( (volatile uint64_t *)0xc0000200a0000008));
-
-#endif
-
-}
-
-/*
- * pci_bus_map_create() - Called by pci_bus_to_hcl_cvlink() to finish the job.
- *
- * Linux PCI Bus numbers are assigned from lowest module_id numbers
- * (rack/slot etc.) starting from HUB_WIDGET_ID_MAX down to
- * HUB_WIDGET_ID_MIN:
- * widgetnum 15 gets lower Bus Number than widgetnum 14 etc.
- *
- * Given 2 modules 001c01 and 001c02 we get the following mappings:
- * 001c01, widgetnum 15 = Bus number 0
- * 001c01, widgetnum 14 = Bus number 1
- * 001c02, widgetnum 15 = Bus number 3
- * 001c02, widgetnum 14 = Bus number 4
- * etc.
- *
- * The rational for starting Bus Number 0 with Widget number 15 is because
- * the system boot disks are always connected via Widget 15 Slot 0 of the
- * I-brick. Linux creates /dev/sd* devices(naming) strating from Bus Number 0
- * Therefore, /dev/sda1 will be the first disk, on Widget 15 of the lowest
- * module id(Master Cnode) of the system.
- *
- */
-static int
-pci_bus_map_create(devfs_handle_t xtalk)
-{
-
- devfs_handle_t master_node_vertex = NULL;
- devfs_handle_t xwidget = NULL;
- devfs_handle_t pci_bus = NULL;
- hubinfo_t hubinfo = NULL;
- xwidgetnum_t widgetnum;
- char pathname[128];
- graph_error_t rv;
-
- /*
- * Loop throught this vertex and get the Xwidgets ..
- */
- for (widgetnum = HUB_WIDGET_ID_MAX; widgetnum >= HUB_WIDGET_ID_MIN; widgetnum--) {
-#if 0
- {
- int pos;
- char dname[256];
- pos = devfs_generate_path(xtalk, dname, 256);
- printk("%s : path= %s\n", __FUNCTION__, &dname[pos]);
- }
-#endif
-
- sprintf(pathname, "%d", widgetnum);
- xwidget = NULL;
-
- /*
- * Example - /hw/module/001c16/Pbrick/xtalk/8 is the xwidget
- * /hw/module/001c16/Pbrick/xtalk/8/pci/1 is device
- */
- rv = hwgraph_traverse(xtalk, pathname, &xwidget);
- if ( (rv != GRAPH_SUCCESS) ) {
- if (!xwidget)
- continue;
- }
-
- sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
- pci_bus = NULL;
- if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
- if (!pci_bus)
- continue;
-
- /*
- * Assign the correct bus number and also the nasid of this
- * pci Xwidget.
- *
- * Should not be any race here ...
- */
- num_bridges++;
- busnum_to_pcibr_vhdl[num_bridges - 1] = pci_bus;
-
- /*
- * Get the master node and from there get the NASID.
- */
- master_node_vertex = device_master_get(xwidget);
- if (!master_node_vertex) {
- printk("WARNING: pci_bus_map_create: Unable to get .master for vertex 0x%p\n", (void *)xwidget);
- }
-
- hubinfo_get(master_node_vertex, &hubinfo);
- if (!hubinfo) {
- printk("WARNING: pci_bus_map_create: Unable to get hubinfo for master node vertex 0x%p\n", (void *)master_node_vertex);
- return(1);
- } else {
- busnum_to_nid[num_bridges - 1] = hubinfo->h_nasid;
- }
-
- /*
- * Pre assign DMA maps needed for 32 Bits Page Map DMA.
- */
- busnum_to_atedmamaps[num_bridges - 1] = (void *) kmalloc(
- sizeof(struct sn1_dma_maps_s) * MAX_ATE_MAPS, GFP_KERNEL);
- if (!busnum_to_atedmamaps[num_bridges - 1])
- printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
-
- memset(busnum_to_atedmamaps[num_bridges - 1], 0x0,
- sizeof(struct sn1_dma_maps_s) * MAX_ATE_MAPS);
-
- }
-
- return(0);
-}
-
-/*
- * pci_bus_to_hcl_cvlink() - This routine is called after SGI IO Infrastructure
- * initialization has completed to set up the mappings between Xbridge
- * and logical pci bus numbers. We also set up the NASID for each of these
- * xbridges.
- *
- * Must be called before pci_init() is invoked.
- */
-int
-pci_bus_to_hcl_cvlink(void)
-{
-
- devfs_handle_t devfs_hdl = NULL;
- devfs_handle_t xtalk = NULL;
- int rv = 0;
- char name[256];
- int master_iobrick;
- int i;
-
- /*
- * Iterate throught each xtalk links in the system ..
- * /hw/module/001c01/node/xtalk/ 8|9|10|11|12|13|14|15
- *
- * /hw/module/001c01/node/xtalk/15 -> /hw/module/001c01/Ibrick/xtalk/15
- *
- * What if it is not pci?
- */
- devfs_hdl = hwgraph_path_to_vertex("/dev/hw/module");
-
- /*
- * To provide consistent(not persistent) device naming, we need to start
- * bus number allocation from the C-Brick with the lowest module id e.g. 001c01
- * with an attached I-Brick. Find the master_iobrick.
- */
- master_iobrick = -1;
- for (i = 0; i < nummodules; i++) {
- moduleid_t iobrick_id;
- iobrick_id = iobrick_module_get(&modules[i]->elsc);
- if (iobrick_id > 0) { /* Valid module id */
- if (MODULE_GET_BTYPE(iobrick_id) == MODULE_IBRICK) {
- master_iobrick = i;
- break;
- }
- }
- }
-
- /*
- * The master_iobrick gets bus 0 and 1.
- */
- if (master_iobrick >= 0) {
- memset(name, 0, 256);
- format_module_id(name, modules[master_iobrick]->id, MODULE_FORMAT_BRIEF);
- strcat(name, "/node/xtalk");
- xtalk = NULL;
- rv = hwgraph_edge_get(devfs_hdl, name, &xtalk);
- pci_bus_map_create(xtalk);
- }
-
- /*
- * Now go do the rest of the modules, starting from the C-Brick with the lowest
- * module id, remembering to skip the master_iobrick, which was done above.
- */
- for (i = 0; i < nummodules; i++) {
- if (i == master_iobrick) {
- continue; /* Did the master_iobrick already. */
- }
-
- memset(name, 0, 256);
- format_module_id(name, modules[i]->id, MODULE_FORMAT_BRIEF);
- strcat(name, "/node/xtalk");
- xtalk = NULL;
- rv = hwgraph_edge_get(devfs_hdl, name, &xtalk);
- pci_bus_map_create(xtalk);
- }
-
- return(0);
-}
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000,2002 Silicon Graphics, Inc. All rights reserved.
- *
- * Routines for PCI DMA mapping. See Documentation/DMA-mapping.txt for
- * a description of how these routines should be used.
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/devfs_fs_kernel.h>
-#include <linux/module.h>
-
-#include <asm/delay.h>
-#include <asm/io.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/types.h>
-#include <asm/sn/alenlist.h>
-#include <asm/sn/pci/pci_bus_cvlink.h>
-#include <asm/sn/nag.h>
-
-/*
- * For ATE allocations
- */
-pciio_dmamap_t get_free_pciio_dmamap(devfs_handle_t);
-void free_pciio_dmamap(pcibr_dmamap_t);
-static struct sn_dma_maps_s *find_sn_dma_map(dma_addr_t, unsigned char);
-
-/*
- * Toplogy stuff
- */
-extern devfs_handle_t busnum_to_pcibr_vhdl[];
-extern nasid_t busnum_to_nid[];
-extern void * busnum_to_atedmamaps[];
-
-/**
- * get_free_pciio_dmamap - find and allocate an ATE
- * @pci_bus: PCI bus to get an entry for
- *
- * Finds and allocates an ATE on the PCI bus specified
- * by @pci_bus.
- */
-pciio_dmamap_t
-get_free_pciio_dmamap(devfs_handle_t pci_bus)
-{
- int i;
- struct sn_dma_maps_s *sn_dma_map = NULL;
-
- /*
- * Darn, we need to get the maps allocated for this bus.
- */
- for (i = 0; i < MAX_PCI_XWIDGET; i++) {
- if (busnum_to_pcibr_vhdl[i] == pci_bus) {
- sn_dma_map = busnum_to_atedmamaps[i];
- }
- }
-
- /*
- * Now get a free dmamap entry from this list.
- */
- for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) {
- if (!sn_dma_map->dma_addr) {
- sn_dma_map->dma_addr = -1;
- return( (pciio_dmamap_t) sn_dma_map );
- }
- }
-
- return NULL;
-}
-
-/**
- * free_pciio_dmamap - free an ATE
- * @dma_map: ATE to free
- *
- * Frees the ATE specified by @dma_map.
- */
-void
-free_pciio_dmamap(pcibr_dmamap_t dma_map)
-{
- struct sn_dma_maps_s *sn_dma_map;
-
- sn_dma_map = (struct sn_dma_maps_s *) dma_map;
- sn_dma_map->dma_addr = 0;
-}
-
-/**
- * find_sn_dma_map - find an ATE associated with @dma_addr and @busnum
- * @dma_addr: DMA address to look for
- * @busnum: PCI bus to look on
- *
- * Finds the ATE associated with @dma_addr and @busnum.
- */
-static struct sn_dma_maps_s *
-find_sn_dma_map(dma_addr_t dma_addr, unsigned char busnum)
-{
-
- struct sn_dma_maps_s *sn_dma_map = NULL;
- int i;
-
- sn_dma_map = busnum_to_atedmamaps[busnum];
-
- for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) {
- if (sn_dma_map->dma_addr == dma_addr) {
- return sn_dma_map;
- }
- }
-
- return NULL;
-}
-
-/**
- * sn_dma_sync - try to flush DMA buffers into the coherence domain
- * @hwdev: device to flush
- *
- * This routine flushes all DMA buffers for the device into the II of
- * the destination hub.
- *
- * NOTE!: this does not mean that the data is in the "coherence domain",
- * but it is very close. In other words, this routine *does not work*
- * as advertised due to hardware bugs. That said, it should be good enough for
- * most situations.
- */
-void
-sn_dma_sync(struct pci_dev *hwdev)
-{
-
-#ifdef SN_DMA_SYNC
-
- struct sn_device_sysdata *device_sysdata;
- volatile unsigned long dummy;
-
- /*
- * A DMA sync is supposed to ensure that
- * all the DMA from a particular device
- * is complete and coherent. We
- * try to do this by
- * 1. flushing the write wuffers from Bridge
- * 2. flushing the Xbow port.
- * Unfortunately, this only gets the DMA transactions 'very close' to
- * the coherence domain, but not quite in it.
- */
- device_sysdata = (struct sn_device_sysdata *)hwdev->sysdata;
- dummy = (volatile unsigned long ) *device_sysdata->dma_buf_sync;
-
- /*
- * For the Xbow port flush, we may be denied the request because
- * someone else may be flushing the port .. try again.
- */
- while((volatile unsigned long ) *device_sysdata->xbow_buf_sync) {
- udelay(2);
- }
-#endif
-}
-
-/**
- * sn_pci_alloc_consistent - allocate memory for coherent DMA
- * @hwdev: device to allocate for
- * @size: size of the region
- * @dma_handle: DMA (bus) address
- *
- * pci_alloc_consistent() returns a pointer to a memory region suitable for
- * coherent DMA traffic to/from a PCI device. On SN platforms, this means
- * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
- *
- * This interface is usually used for "command" streams (e.g. the command
- * queue for a SCSI controller). See Documentation/DMA-mapping.txt for
- * more information. Note that this routine will always put a 32 bit
- * DMA address into @dma_handle. This is because most devices
- * that are capable of 64 bit PCI DMA transactions can't do 64 bit _coherent_
- * DMAs, and unfortunately this interface has to cater to the LCD. Oh well.
- *
- * Also known as platform_pci_alloc_consistent() by the IA64 machvec code.
- */
-void *
-sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
-{
- void *cpuaddr;
- devfs_handle_t vhdl;
- struct sn_device_sysdata *device_sysdata;
- unsigned long phys_addr;
- pciio_dmamap_t dma_map = 0;
- struct sn_dma_maps_s *sn_dma_map;
-
- *dma_handle = 0;
-
- /* We can't easily support < 32 bit devices */
- if (IS_PCI32L(hwdev))
- return NULL;
-
- /*
- * Get hwgraph vertex for the device
- */
- device_sysdata = (struct sn_device_sysdata *) hwdev->sysdata;
- vhdl = device_sysdata->vhdl;
-
- /*
- * Allocate the memory. FIXME: if we're allocating for
- * two devices on the same bus, we should at least try to
- * allocate memory in the same 2 GB window to avoid using
- * ATEs for the translation. See the comment above about the
- * 32 bit requirement for this function.
- */
- if(!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size))))
- return NULL;
-
- memset(cpuaddr, 0, size); /* have to zero it out */
-
- /* physical addr. of the memory we just got */
- phys_addr = __pa(cpuaddr);
-
- /*
- * This will try to use a Direct Map register to do the
- * 32 bit DMA mapping, but it may not succeed if another
- * device on the same bus is already mapped with different
- * attributes or to a different memory region.
- */
-#ifdef CONFIG_IA64_SGI_SN1
- *dma_handle = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
- PCIIO_BYTE_STREAM |
- PCIIO_DMA_CMD);
-#elif defined(CONFIG_IA64_SGI_SN2)
- *dma_handle = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
- ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
- PCIIO_DMA_CMD);
-#else
-#error unsupported platform
-#endif
-
- /*
- * It is a 32 bit card and we cannot do direct mapping,
- * so we try to use an ATE.
- */
- if (!(*dma_handle)) {
-#ifdef CONFIG_IA64_SGI_SN1
- dma_map = pciio_dmamap_alloc(vhdl, NULL, size,
- PCIIO_BYTE_STREAM |
- PCIIO_DMA_CMD);
-#elif defined(CONFIG_IA64_SGI_SN2)
- dma_map = pciio_dmamap_alloc(vhdl, NULL, size,
- ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
- PCIIO_DMA_CMD);
-#else
-#error unsupported platform
-#endif
- if (!dma_map) {
- printk(KERN_ERR "sn_pci_alloc_consistent: Unable to "
- "allocate anymore 32 bit page map entries.\n");
- BUG();
- }
- *dma_handle = (dma_addr_t) pciio_dmamap_addr(dma_map,phys_addr,
- size);
- sn_dma_map = (struct sn_dma_maps_s *)dma_map;
- sn_dma_map->dma_addr = *dma_handle;
- }
-
- return cpuaddr;
-}
-
-/**
- * sn_pci_free_consistent - free memory associated with coherent DMAable region
- * @hwdev: device to free for
- * @size: size to free
- * @vaddr: kernel virtual address to free
- * @dma_handle: DMA address associated with this region
- *
- * Frees the memory allocated by pci_alloc_consistent(). Also known
- * as platform_pci_free_consistent() by the IA64 machvec code.
- */
-void
-sn_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
-{
- struct sn_dma_maps_s *sn_dma_map = NULL;
-
- /*
- * Get the sn_dma_map entry.
- */
- if (IS_PCI32_MAPPED(dma_handle))
- sn_dma_map = find_sn_dma_map(dma_handle, hwdev->bus->number);
-
- /*
- * and free it if necessary...
- */
- if (sn_dma_map) {
- pciio_dmamap_done((pciio_dmamap_t)sn_dma_map);
- pciio_dmamap_free((pciio_dmamap_t)sn_dma_map);
- sn_dma_map->dma_addr = (dma_addr_t)NULL;
- }
- free_pages((unsigned long) vaddr, get_order(size));
-}
-
-/**
- * sn_pci_map_sg - map a scatter-gather list for DMA
- * @hwdev: device to map for
- * @sg: scatterlist to map
- * @nents: number of entries
- * @direction: direction of the DMA transaction
- *
- * Maps each entry of @sg for DMA. Also known as platform_pci_map_sg by the
- * IA64 machvec code.
- */
-int
-sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
-{
-
- int i;
- devfs_handle_t vhdl;
- dma_addr_t dma_addr;
- unsigned long phys_addr;
- struct sn_device_sysdata *device_sysdata;
- pciio_dmamap_t dma_map;
-
- /* can't go anywhere w/o a direction in life */
- if (direction == PCI_DMA_NONE)
- BUG();
-
- /*
- * Get the hwgraph vertex for the device
- */
- device_sysdata = (struct sn_device_sysdata *) hwdev->sysdata;
- vhdl = device_sysdata->vhdl;
-
- /*
- * Setup a DMA address for each entry in the
- * scatterlist.
- */
- for (i = 0; i < nents; i++, sg++) {
- /* this catches incorrectly written drivers that
- attempt to map scatterlists that they have
- previously mapped. we print a warning and
- continue, but the driver should be fixed */
- switch (((u64)sg->dma_address) >> 60) {
- case 0xa:
- case 0xb:
-#ifdef DEBUG
-/* This needs to be cleaned up at some point. */
- NAG("A PCI driver (for device at%8s) has attempted to "
- "map a scatterlist that was previously mapped at "
- "%p - this is currently being worked around.\n",
- hwdev->slot_name, (void *)sg->dma_address);
- phys_addr = (u64)sg->dma_address & TO_PHYS_MASK;
- break;
-#endif
- default: /* not previously mapped, get the phys. addr */
- phys_addr = __pa(sg->dma_address);
- break;
- }
- sg->page = NULL;
- dma_addr = 0;
-
- /*
- * Handle the most common case: 64 bit cards. This
- * call should always succeed.
- */
- if (IS_PCIA64(hwdev)) {
- dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
- sg->length,
- ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
- PCIIO_DMA_DATA |
- PCIIO_DMA_A64);
- sg->dma_address = (char *)dma_addr;
- continue;
- }
-
- /*
- * Handle 32-63 bit cards via direct mapping
- */
- if (IS_PCI32G(hwdev)) {
-#ifdef CONFIG_IA64_SGI_SN1
- dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
- sg->length,
- PCIIO_BYTE_STREAM |
- PCIIO_DMA_DATA);
-#elif defined(CONFIG_IA64_SGI_SN2)
- dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
- sg->length,
- ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
- PCIIO_DMA_DATA);
-#else
-#error unsupported platform
-#endif
- /*
- * See if we got a direct map entry
- */
- if (dma_addr) {
- sg->dma_address = (char *)dma_addr;
- continue;
- }
-
- }
-
- /*
- * It is a 32 bit card and we cannot do direct mapping,
- * so we use an ATE.
- */
- dma_map = 0;
-#ifdef CONFIG_IA64_SGI_SN1
- dma_map = pciio_dmamap_alloc(vhdl, NULL, sg->length,
- PCIIO_BYTE_STREAM |
- PCIIO_DMA_DATA);
-#elif defined(CONFIG_IA64_SGI_SN2)
- dma_map = pciio_dmamap_alloc(vhdl, NULL, sg->length,
- ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
- PCIIO_DMA_DATA);
-#else
-#error unsupported platform
-#endif
- if (!dma_map) {
- printk(KERN_ERR "sn_pci_map_sg: Unable to allocate "
- "anymore 32 bit page map entries.\n");
- BUG();
- }
- dma_addr = pciio_dmamap_addr(dma_map, phys_addr, sg->length);
- sg->dma_address = (char *)dma_addr;
- sg->page = (struct page *)dma_map;
-
- }
-
- return nents;
-
-}
-
-/**
- * sn_pci_unmap_sg - unmap a scatter-gather list
- * @hwdev: device to unmap
- * @sg: scatterlist to unmap
- * @nents: number of scatterlist entries
- * @direction: DMA direction
- *
- * Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for pci_unmap_single() below. Also
- * known as sn_pci_unmap_sg() by the IA64 machvec code.
- */
-void
-sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
-{
- int i;
- struct sn_dma_maps_s *sn_dma_map;
-
- /* can't go anywhere w/o a direction in life */
- if (direction == PCI_DMA_NONE)
- BUG();
-
- for (i = 0; i < nents; i++, sg++)
- if (sg->page) {
- /*
- * We maintain the DMA Map pointer in sg->page if
- * it is ever allocated.
- */
- sg->dma_address = 0;
- sn_dma_map = (struct sn_dma_maps_s *)sg->page;
- pciio_dmamap_done((pciio_dmamap_t)sn_dma_map);
- pciio_dmamap_free((pciio_dmamap_t)sn_dma_map);
- sn_dma_map->dma_addr = 0;
- sg->page = 0;
- }
-
-}
-
-/**
- * sn_pci_map_single - map a single region for DMA
- * @hwdev: device to map for
- * @ptr: kernel virtual address of the region to map
- * @size: size of the region
- * @direction: DMA direction
- *
- * Map the region pointed to by @ptr for DMA and return the
- * DMA address. Also known as platform_pci_map_single() by
- * the IA64 machvec code.
- *
- * We map this to the one step pciio_dmamap_trans interface rather than
- * the two step pciio_dmamap_alloc/pciio_dmamap_addr because we have
- * no way of saving the dmamap handle from the alloc to later free
- * (which is pretty much unacceptable).
- *
- * TODO: simplify our interface;
- * get rid of dev_desc and vhdl (seems redundant given a pci_dev);
- * figure out how to save dmamap handle so can use two step.
- */
-dma_addr_t
-sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
-{
- devfs_handle_t vhdl;
- dma_addr_t dma_addr;
- unsigned long phys_addr;
- struct sn_device_sysdata *device_sysdata;
- pciio_dmamap_t dma_map = NULL;
- struct sn_dma_maps_s *sn_dma_map;
-
- if (direction == PCI_DMA_NONE)
- BUG();
-
- /* SN cannot support DMA addresses smaller than 32 bits. */
- if (IS_PCI32L(hwdev))
- return 0;
-
- /*
- * find vertex for the device
- */
- device_sysdata = (struct sn_device_sysdata *)hwdev->sysdata;
- vhdl = device_sysdata->vhdl;
-
- /*
- * Call our dmamap interface
- */
- dma_addr = 0;
- phys_addr = __pa(ptr);
-
- if (IS_PCIA64(hwdev)) {
- /* This device supports 64 bit DMA addresses. */
- dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
- ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
- PCIIO_DMA_DATA |
- PCIIO_DMA_A64);
- return dma_addr;
- }
-
- /*
- * Devices that support 32 bit to 63 bit DMA addresses get
- * 32 bit DMA addresses.
- *
- * First try to get a 32 bit direct map register.
- */
- if (IS_PCI32G(hwdev)) {
-#ifdef CONFIG_IA64_SGI_SN1
- dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
- PCIIO_BYTE_STREAM |
- PCIIO_DMA_DATA);
-#elif defined(CONFIG_IA64_SGI_SN2)
- dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
- ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
- PCIIO_DMA_DATA);
-#else
-#error unsupported platform
-#endif
- if (dma_addr)
- return dma_addr;
- }
-
- /*
- * It's a 32 bit card and we cannot do direct mapping so
- * let's use the PMU instead.
- */
- dma_map = NULL;
-#ifdef CONFIG_IA64_SGI_SN1
- dma_map = pciio_dmamap_alloc(vhdl, NULL, size, PCIIO_BYTE_STREAM |
- PCIIO_DMA_DATA);
-#elif defined(CONFIG_IA64_SGI_SN2)
- dma_map = pciio_dmamap_alloc(vhdl, NULL, size,
- ((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
- PCIIO_DMA_DATA);
-#else
-#error unsupported platform
-#endif
-
- if (!dma_map) {
- printk(KERN_ERR "pci_map_single: Unable to allocate anymore "
- "32 bit page map entries.\n");
- BUG();
- }
-
- dma_addr = (dma_addr_t) pciio_dmamap_addr(dma_map, phys_addr, size);
- sn_dma_map = (struct sn_dma_maps_s *)dma_map;
- sn_dma_map->dma_addr = dma_addr;
-
- return ((dma_addr_t)dma_addr);
-}
-
-/**
- * sn_pci_unmap_single - unmap a region used for DMA
- * @hwdev: device to unmap
- * @dma_addr: DMA address to unmap
- * @size: size of region
- * @direction: DMA direction
- *
- * Unmaps the region pointed to by @dma_addr. Also known as
- * platform_pci_unmap_single() by the IA64 machvec code.
- */
-void
-sn_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
-{
- struct sn_dma_maps_s *sn_dma_map = NULL;
-
- if (direction == PCI_DMA_NONE)
- BUG();
-
- /*
- * Get the sn_dma_map entry.
- */
- if (IS_PCI32_MAPPED(dma_addr))
- sn_dma_map = find_sn_dma_map(dma_addr, hwdev->bus->number);
-
- /*
- * and free it if necessary...
- */
- if (sn_dma_map) {
- pciio_dmamap_done((pciio_dmamap_t)sn_dma_map);
- pciio_dmamap_free((pciio_dmamap_t)sn_dma_map);
- sn_dma_map->dma_addr = (dma_addr_t)NULL;
- }
-}
-
-/**
- * sn_pci_dma_sync_single - make sure all DMAs have completed
- * @hwdev: device to sync
- * @dma_handle: DMA address to sync
- * @size: size of region
- * @direction: DMA direction
- *
- * This routine is supposed to sync the DMA region specified
- * by @dma_handle into the 'coherence domain'. See sn_dma_sync()
- * above for more information. Also known as
- * platform_pci_dma_sync_single() by the IA64 machvec code.
- */
-void
-sn_pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
-
- sn_dma_sync(hwdev);
-}
-
-/**
- * sn_pci_dma_sync_sg - make sure all DMAs have completed
- * @hwdev: device to sync
- * @sg: scatterlist to sync
- * @nents: number of entries in the scatterlist
- * @direction: DMA direction
- *
- * This routine is supposed to sync the DMA regions specified
- * by @sg into the 'coherence domain'. See sn_dma_sync()
- * above for more information. Also known as
- * platform_pci_dma_sync_sg() by the IA64 machvec code.
- */
-void
-sn_pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
-{
- if (direction == PCI_DMA_NONE)
- BUG();
-
- sn_dma_sync(hwdev);
-}
-
-/**
- * sn_dma_address - get the DMA address for the first entry of a scatterlist
- * @sg: sg to look at
- *
- * Gets the DMA address for the scatterlist @sg. Also known as
- * platform_dma_address() by the IA64 machvec code.
- */
-unsigned long
-sn_dma_address(struct scatterlist *sg)
-{
- return ((unsigned long)sg->dma_address);
-}
-
-/**
- * sn_dma_supported - test a DMA mask
- * @hwdev: device to test
- * @mask: DMA mask to test
- *
- * Return whether the given PCI device DMA address mask can be supported
- * properly. For example, if your device can only drive the low 24-bits
- * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
- * this function. Of course, SN only supports devices that have 32 or more
- * address bits when using the PMU. We could theoretically support <32 bit
- * cards using direct mapping, but we'll worry about that later--on the off
- * chance that someone actually wants to use such a card.
- */
-int
-sn_pci_dma_supported(struct pci_dev *hwdev, u64 mask)
-{
- if (mask < 0xffffffff)
- return 0;
- return 1;
-}
-
-EXPORT_SYMBOL(sn_pci_unmap_single);
-EXPORT_SYMBOL(sn_pci_map_single);
-EXPORT_SYMBOL(sn_pci_dma_sync_single);
-EXPORT_SYMBOL(sn_pci_map_sg);
-EXPORT_SYMBOL(sn_pci_unmap_sg);
-EXPORT_SYMBOL(sn_pci_alloc_consistent);
-EXPORT_SYMBOL(sn_pci_free_consistent);
-EXPORT_SYMBOL(sn_dma_address);
-EXPORT_SYMBOL(sn_pci_dma_supported);
-
+++ /dev/null
-/*
- * arch/ia64/sn/io/pciba.c
- *
- * IRIX PCIBA-inspired user mode PCI interface
- *
- * requires: devfs
- *
- * device nodes show up in /dev/pci/BB/SS.F (where BB is the bus the
- * device is on, SS is the slot the device is in, and F is the
- * device's function on a multi-function card).
- *
- * when compiled into the kernel, it will only be initialized by the
- * sgi sn1 specific initialization code. in this case, device nodes
- * are under /dev/hw/..../
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of
- * this archive for more details.
- *
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
- *
- * 03262001 - Initial version by Chad Talbott
- */
-
-
-/* jesse's beefs:
-
- register_pci_device should be documented
-
- grossness with do_swap should be documented
-
- big, gross union'ized node_data should be replaced with independent
- structures
-
- replace global list of nodes with global lists of resources. could
- use object oriented approach of allocating and cleaning up
- resources.
-
-*/
-
-
-#include <linux/config.h>
-#ifndef CONFIG_DEVFS_FS
-# error PCIBA requires devfs
-#endif
-
-#include <linux/module.h>
-#include <linux/devfs_fs_kernel.h>
-#include <linux/pci.h>
-#include <linux/list.h>
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mman.h>
-#include <linux/init.h>
-#include <linux/raw.h>
-#include <linux/capability.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
-#include <asm/page.h>
-
-#include <asm/sn/pci/pciba.h>
-
-
-MODULE_DESCRIPTION("User mode PCI interface");
-MODULE_AUTHOR("Chad Talbott");
-
-
-#undef DEBUG_PCIBA
-/* #define DEBUG_PCIBA */
-
-#undef TRACE_PCIBA
-/* #define TRACE_PCIBA */
-
-#if defined(DEBUG_PCIBA)
-# define DPRINTF(x...) printk(KERN_DEBUG x)
-#else
-# define DPRINTF(x...)
-#endif
-
-#if defined(TRACE_PCIBA)
-# if defined(__GNUC__)
-# define TRACE() printk(KERN_DEBUG "%s:%d:%s\n", \
- __FILE__, __LINE__, __FUNCTION__)
-# else
-# define TRACE() printk(KERN_DEBUG "%s:%d\n", __LINE__, __FILE__)
-# endif
-#else
-# define TRACE()
-#endif
-
-
-typedef enum { failure, success } status;
-typedef enum { false, true } boolean;
-
-
-/* major data structures:
-
- struct node_data -
-
- one for each file registered with devfs. contains everything
- that any file's fops would need to know about.
-
- struct dma_allocation -
-
- a single DMA allocation. only the 'dma' nodes care about
- these. they are there primarily to allow the driver to look
- up the kernel virtual address of dma buffers allocated by
- pci_alloc_consistent, as the application is only given the
- physical address (to program the device's dma, presumably) and
- cannot supply the kernel virtual address when freeing the
- buffer.
-
- it's also useful to maintain a list of buffers allocated
- through a specific node to allow some sanity checking by this
- driver. this prevents (for example) a broken application from
- freeing buffers that it didn't allocate, or buffers allocated
- on another node.
-
- global_node_list -
-
- a list of all nodes allocated. this allows the driver to free
- all the memory it has 'kmalloc'd in case of an error, or on
- module removal.
-
- global_dma_list -
-
- a list of all dma buffers allocated by this driver. this
- allows the driver to 'pci_free_consistent' all buffers on
- module removal or error.
-
-*/
-
-
-struct node_data {
- /* flat list of all the device nodes. makes it easy to free
- them all when we're unregistered */
- struct list_head global_node_list;
- devfs_handle_t devfs_handle;
-
- void (* cleanup)(struct node_data *);
-
- union {
- struct {
- struct pci_dev * dev;
- struct list_head dma_allocs;
- boolean mmapped;
- } dma;
- struct {
- struct pci_dev * dev;
- u32 saved_rom_base_reg;
- boolean mmapped;
- } rom;
- struct {
- struct resource * res;
- } base;
- struct {
- struct pci_dev * dev;
- } config;
- } u;
-};
-
-struct dma_allocation {
- struct list_head list;
-
- dma_addr_t handle;
- void * va;
- size_t size;
-};
-
-
-static LIST_HEAD(global_node_list);
-static LIST_HEAD(global_dma_list);
-
-
-/* module entry points */
-int __init pciba_init(void);
-void __exit pciba_exit(void);
-
-static status __init register_with_devfs(void);
-static void __exit unregister_with_devfs(void);
-
-static status __init register_pci_device(devfs_handle_t device_dir_handle,
- struct pci_dev * dev);
-
-/* file operations */
-static int generic_open(struct inode * inode, struct file * file);
-static int rom_mmap(struct file * file, struct vm_area_struct * vma);
-static int rom_release(struct inode * inode, struct file * file);
-static int base_mmap(struct file * file, struct vm_area_struct * vma);
-static int config_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd,
- unsigned long arg);
-static int dma_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd,
- unsigned long arg);
-static int dma_mmap(struct file * file, struct vm_area_struct * vma);
-
-/* support routines */
-static int mmap_pci_address(struct vm_area_struct * vma, unsigned long pci_va);
-static int mmap_kernel_address(struct vm_area_struct * vma, void * kernel_va);
-
-#ifdef DEBUG_PCIBA
-static void dump_nodes(struct list_head * nodes);
-static void dump_allocations(struct list_head * dalp);
-#endif
-
-/* file operations for each type of node */
-static struct file_operations rom_fops = {
- owner: THIS_MODULE,
- mmap: rom_mmap,
- open: generic_open,
- release: rom_release
-};
-
-
-static struct file_operations base_fops = {
- owner: THIS_MODULE,
- mmap: base_mmap,
- open: generic_open
-};
-
-
-static struct file_operations config_fops = {
- owner: THIS_MODULE,
- ioctl: config_ioctl,
- open: generic_open
-};
-
-static struct file_operations dma_fops = {
- owner: THIS_MODULE,
- ioctl: dma_ioctl,
- mmap: dma_mmap,
- open: generic_open
-};
-
-
-module_init(pciba_init);
-module_exit(pciba_exit);
-
-
-int __init
-pciba_init(void)
-{
- TRACE();
-
- if (register_with_devfs() == failure)
- return 1; /* failure */
-
- printk("PCIBA (a user mode PCI interface) initialized.\n");
-
- return 0; /* success */
-}
-
-
-void __exit
-pciba_exit(void)
-{
- TRACE();
-
- /* FIXME: should also free all that memory that we allocated
- ;) */
- unregister_with_devfs();
-}
-
-
-# if 0
-static void __exit
-free_nodes(void)
-{
- struct node_data * nd;
-
- TRACE();
-
- list_for_each(nd, &node_list) {
- kfree(list_entry(nd, struct nd, node_list));
- }
-}
-#endif
-
-#if !defined(CONFIG_IA64_SGI_SN1)
-
-static status __init
-register_with_devfs(void)
-{
- struct pci_dev * dev = NULL;
- devfs_handle_t device_dir_handle;
- char devfs_path[40];
-
- TRACE();
-
- if (!devfs_mk_dir(NULL, "pci", NULL))
- return failure;
-
- /* FIXME: don't forget /dev/pci/mem & /dev/pci/io */
-
- while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- sprintf(devfs_path, "pci/%02x/%02x.%x",
- dev->bus->number,
- PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
-
- device_dir_handle =
- devfs_mk_dir(NULL, devfs_path, NULL);
- if (device_dir_handle == NULL)
- return failure;
-
- if (register_pci_device(device_dir_handle, dev) == failure) {
- devfs_remove("pci");
- return failure;
- }
- }
-
- return success;
-}
-
-#else
-
-extern devfs_handle_t
-devfn_to_vertex(unsigned char busnum, unsigned int devfn);
-
-static status __init
-register_with_devfs(void)
-{
- struct pci_dev * dev = NULL;
- devfs_handle_t device_dir_handle;
-
- TRACE();
-
- /* FIXME: don't forget /dev/.../pci/mem & /dev/.../pci/io */
-
- while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- device_dir_handle = devfn_to_vertex(dev->bus->number,
- dev->devfn);
- if (device_dir_handle == NULL)
- return failure;
-
- if (register_pci_device(device_dir_handle, dev) == failure) {
- devfs_remove("pci");
- return failure;
- }
- }
-
- return success;
-}
-
-static void __exit
-unregister_with_devfs(void)
-{
- struct list_head * lhp;
- struct node_data * nd;
-
- TRACE();
-
- list_for_each(lhp, &global_node_list) {
- nd = list_entry(lhp, struct node_data, global_node_list);
- devfs_unregister(nd->devfs_handle);
- }
-
-}
-
-
-struct node_data * new_node(void)
-{
- struct node_data * node;
-
- TRACE();
-
- node = kmalloc(sizeof(struct node_data), GFP_KERNEL);
- if (node == NULL)
- return NULL;
- list_add(&node->global_node_list, &global_node_list);
- return node;
-}
-
-
-void dma_cleanup(struct node_data * dma_node)
-{
- TRACE();
-
- /* FIXME: should free these allocations */
-#ifdef DEBUG_PCIBA
- dump_allocations(&dma_node->u.dma.dma_allocs);
-#endif
- devfs_unregister(dma_node->devfs_handle);
-}
-
-
-void init_dma_node(struct node_data * node,
- struct pci_dev * dev, devfs_handle_t dh)
-{
- TRACE();
-
- node->devfs_handle = dh;
- node->u.dma.dev = dev;
- node->cleanup = dma_cleanup;
- INIT_LIST_HEAD(&node->u.dma.dma_allocs);
-}
-
-
-void rom_cleanup(struct node_data * rom_node)
-{
- TRACE();
-
- if (rom_node->u.rom.mmapped)
- pci_write_config_dword(rom_node->u.rom.dev,
- PCI_ROM_ADDRESS,
- rom_node->u.rom.saved_rom_base_reg);
- devfs_unregister(rom_node->devfs_handle);
-}
-
-
-void init_rom_node(struct node_data * node,
- struct pci_dev * dev, devfs_handle_t dh)
-{
- TRACE();
-
- node->devfs_handle = dh;
- node->u.rom.dev = dev;
- node->cleanup = rom_cleanup;
- node->u.rom.mmapped = false;
-}
-
-
-static status __init
-register_pci_device(devfs_handle_t device_dir_handle, struct pci_dev * dev)
-{
- struct node_data * nd;
- char devfs_path[20];
- devfs_handle_t node_devfs_handle;
- int ri;
-
- TRACE();
-
-
- /* register nodes for all the device's base address registers */
- for (ri = 0; ri < PCI_ROM_RESOURCE; ri++) {
- if (pci_resource_len(dev, ri) != 0) {
- sprintf(devfs_path, "base/%d", ri);
- if (devfs_register(device_dir_handle, devfs_path,
- DEVFS_FL_NONE,
- 0, 0,
- S_IFREG | S_IRUSR | S_IWUSR,
- &base_fops,
- &dev->resource[ri]) == NULL)
- return failure;
- }
- }
-
- /* register a node corresponding to the first MEM resource on
- the device */
- for (ri = 0; ri < PCI_ROM_RESOURCE; ri++) {
- if (dev->resource[ri].flags & IORESOURCE_MEM &&
- pci_resource_len(dev, ri) != 0) {
- if (devfs_register(device_dir_handle, "mem",
- DEVFS_FL_NONE, 0, 0,
- S_IFREG | S_IRUSR | S_IWUSR,
- &base_fops,
- &dev->resource[ri]) == NULL)
- return failure;
- break;
- }
- }
-
- /* also register a node corresponding to the first IO resource
- on the device */
- for (ri = 0; ri < PCI_ROM_RESOURCE; ri++) {
- if (dev->resource[ri].flags & IORESOURCE_IO &&
- pci_resource_len(dev, ri) != 0) {
- if (devfs_register(device_dir_handle, "io",
- DEVFS_FL_NONE, 0, 0,
- S_IFREG | S_IRUSR | S_IWUSR,
- &base_fops,
- &dev->resource[ri]) == NULL)
- return failure;
- break;
- }
- }
-
- /* register a node corresponding to the device's ROM resource,
- if present */
- if (pci_resource_len(dev, PCI_ROM_RESOURCE) != 0) {
- nd = new_node();
- if (nd == NULL)
- return failure;
- node_devfs_handle = devfs_register(device_dir_handle, "rom",
- DEVFS_FL_NONE, 0, 0,
- S_IFREG | S_IRUSR,
- &rom_fops, nd);
- if (node_devfs_handle == NULL)
- return failure;
- init_rom_node(nd, dev, node_devfs_handle);
- }
-
- /* register a node that allows ioctl's to read and write to
- the device's config space */
- if (devfs_register(device_dir_handle, "config", DEVFS_FL_NONE,
- 0, 0, S_IFREG | S_IRUSR | S_IWUSR,
- &config_fops, dev) == NULL)
- return failure;
-
-
- /* finally, register a node that allows ioctl's to allocate
- and free DMA buffers, as well as memory map those
- buffers. */
- nd = new_node();
- if (nd == NULL)
- return failure;
- node_devfs_handle =
- devfs_register(device_dir_handle, "dma", DEVFS_FL_NONE,
- 0, 0, S_IFREG | S_IRUSR | S_IWUSR,
- &dma_fops, nd);
- if (node_devfs_handle == NULL)
- return failure;
- init_dma_node(nd, dev, node_devfs_handle);
-
-#ifdef DEBUG_PCIBA
- dump_nodes(&global_node_list);
-#endif
-
- return success;
-}
-
-
-static int
-generic_open(struct inode * inode, struct file * file)
-{
- TRACE();
-
- /* FIXME: should check that they're not trying to open the ROM
- writable */
-
- return 0; /* success */
-}
-
-
-static int
-rom_mmap(struct file * file, struct vm_area_struct * vma)
-{
- unsigned long pci_pa;
- struct node_data * nd;
-
- TRACE();
-
- nd = (struct node_data * )file->private_data;
-
- pci_pa = pci_resource_start(nd->u.rom.dev, PCI_ROM_RESOURCE);
-
- if (!nd->u.rom.mmapped) {
- nd->u.rom.mmapped = true;
- DPRINTF("Enabling ROM address decoder.\n");
- DPRINTF(
-"rom_mmap: FIXME: some cards do not allow both ROM and memory addresses to\n"
-"rom_mmap: FIXME: be enabled simultaneously, as they share a decoder.\n");
- pci_read_config_dword(nd->u.rom.dev, PCI_ROM_ADDRESS,
- &nd->u.rom.saved_rom_base_reg);
- DPRINTF("ROM base address contains %x\n",
- nd->u.rom.saved_rom_base_reg);
- pci_write_config_dword(nd->u.rom.dev, PCI_ROM_ADDRESS,
- nd->u.rom.saved_rom_base_reg |
- PCI_ROM_ADDRESS_ENABLE);
- }
-
- return mmap_pci_address(vma, pci_pa);
-}
-
-
-static int
-rom_release(struct inode * inode, struct file * file)
-{
- struct node_data * nd;
-
- TRACE();
-
- nd = (struct node_data * )file->private_data;
-
- if (nd->u.rom.mmapped) {
- nd->u.rom.mmapped = false;
- DPRINTF("Disabling ROM address decoder.\n");
- pci_write_config_dword(nd->u.rom.dev, PCI_ROM_ADDRESS,
- nd->u.rom.saved_rom_base_reg);
- }
- return 0; /* indicate success */
-}
-
-
-static int
-base_mmap(struct file * file, struct vm_area_struct * vma)
-{
- struct resource * resource;
-
- TRACE();
-
- resource = (struct resource *)file->private_data;
-
- return mmap_pci_address(vma, resource->start);
-}
-
-
-static int
-config_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd,
- unsigned long arg)
-{
- struct pci_dev * dev;
-
- union cfg_data {
- uint8_t byte;
- uint16_t word;
- uint32_t dword;
- } read_data, write_data;
-
- int dir, size, offset;
-
- TRACE();
-
- DPRINTF("cmd = %x (DIR = %x, TYPE = %x, NR = %x, SIZE = %x)\n",
- cmd,
- _IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
- DPRINTF("arg = %lx\n", arg);
-
- dev = (struct pci_dev *)file->private_data;
-
- /* PCIIOCCFG{RD,WR}: read and/or write PCI configuration
- space. If both, the read happens first (this becomes a swap
- operation, atomic with respect to other updates through
- this path). */
-
- dir = _IOC_DIR(cmd);
-
-#define do_swap(suffix, type) \
- do { \
- if (dir & _IOC_READ) { \
- pci_read_config_##suffix(dev, _IOC_NR(cmd), \
- &read_data.suffix); \
- } \
- if (dir & _IOC_WRITE) { \
- get_user(write_data.suffix, (type)arg); \
- pci_write_config_##suffix(dev, _IOC_NR(cmd), \
- write_data.suffix); \
- } \
- if (dir & _IOC_READ) { \
- put_user(read_data.suffix, (type)arg); \
- } \
- } while (0)
-
- size = _IOC_SIZE(cmd);
- offset = _IOC_NR(cmd);
-
- DPRINTF("sanity check\n");
- if (((size > 0) || (size <= 4)) &&
- ((offset + size) <= 256) &&
- (dir & (_IOC_READ | _IOC_WRITE))) {
-
- switch (size)
- {
- case 1:
- do_swap(byte, uint8_t *);
- break;
- case 2:
- do_swap(word, uint16_t *);
- break;
- case 4:
- do_swap(dword, uint32_t *);
- break;
- default:
- DPRINTF("invalid ioctl\n");
- return -EINVAL;
- }
- } else
- return -EINVAL;
-
- return 0;
-}
-
-
-#ifdef DEBUG_PCIBA
-static void
-dump_allocations(struct list_head * dalp)
-{
- struct dma_allocation * dap;
- struct list_head * p;
-
- printk("{\n");
- list_for_each(p, dalp) {
- dap = list_entry(p, struct dma_allocation,
- list);
- printk(" handle = %lx, va = %p\n",
- dap->handle, dap->va);
- }
- printk("}\n");
-}
-
-static void
-dump_nodes(struct list_head * nodes)
-{
- struct node_data * ndp;
- struct list_head * p;
-
- printk("{\n");
- list_for_each(p, nodes) {
- ndp = list_entry(p, struct node_data,
- global_node_list);
- printk(" %p\n", (void *)ndp);
- }
- printk("}\n");
-}
-
-
-#if 0
-#define NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
-
-static void
-test_list(void)
-{
- u64 i;
- LIST_HEAD(the_list);
-
- for (i = 0; i < 5; i++) {
- struct dma_allocation * new_alloc;
- NEW(new_alloc);
- new_alloc->va = (void *)i;
- new_alloc->handle = 5*i;
- printk("%d - the_list->next = %lx\n", i, the_list.next);
- list_add(&new_alloc->list, &the_list);
- }
- dump_allocations(&the_list);
-}
-#endif
-#endif
-
-
-static LIST_HEAD(dma_buffer_list);
-
-
-static int
-dma_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd,
- unsigned long arg)
-{
- struct node_data * nd;
- uint64_t argv;
- int result;
- struct dma_allocation * dma_alloc;
- struct list_head * iterp;
-
- TRACE();
-
- DPRINTF("cmd = %x\n", cmd);
- DPRINTF("arg = %lx\n", arg);
-
- nd = (struct node_data *)file->private_data;
-
-#ifdef DEBUG_PCIBA
- DPRINTF("at dma_ioctl entry\n");
- dump_allocations(&nd->u.dma.dma_allocs);
-#endif
-
- switch (cmd) {
- case PCIIOCDMAALLOC:
- /* PCIIOCDMAALLOC: allocate a chunk of physical memory
- and set it up for DMA. Return the PCI address that
- gets to it. */
- DPRINTF("case PCIIOCDMAALLOC (%lx)\n", PCIIOCDMAALLOC);
-
- if ( (result = get_user(argv, (uint64_t *)arg)) )
- return result;
- DPRINTF("argv (size of buffer) = %lx\n", argv);
-
- dma_alloc = (struct dma_allocation *)
- kmalloc(sizeof(struct dma_allocation), GFP_KERNEL);
- if (dma_alloc == NULL)
- return -ENOMEM;
-
- dma_alloc->size = (size_t)argv;
- dma_alloc->va = pci_alloc_consistent(nd->u.dma.dev,
- dma_alloc->size,
- &dma_alloc->handle);
- DPRINTF("dma_alloc->va = %p, dma_alloc->handle = %lx\n",
- dma_alloc->va, dma_alloc->handle);
- if (dma_alloc->va == NULL) {
- kfree(dma_alloc);
- return -ENOMEM;
- }
-
- list_add(&dma_alloc->list, &nd->u.dma.dma_allocs);
- if ( (result = put_user((uint64_t)dma_alloc->handle,
- (uint64_t *)arg)) ) {
- DPRINTF("put_user failed\n");
- pci_free_consistent(nd->u.dma.dev, (size_t)argv,
- dma_alloc->va, dma_alloc->handle);
- kfree(dma_alloc);
- return result;
- }
-
-#ifdef DEBUG_PCIBA
- DPRINTF("after insertion\n");
- dump_allocations(&nd->u.dma.dma_allocs);
-#endif
- break;
-
- case PCIIOCDMAFREE:
- DPRINTF("case PCIIOCDMAFREE (%lx)\n", PCIIOCDMAFREE);
-
- if ( (result = get_user(argv, (uint64_t *)arg)) ) {
- DPRINTF("get_user failed\n");
- return result;
- }
-
- DPRINTF("argv (physical address of DMA buffer) = %lx\n", argv);
- list_for_each(iterp, &nd->u.dma.dma_allocs) {
- struct dma_allocation * da =
- list_entry(iterp, struct dma_allocation, list);
- if (da->handle == argv) {
- pci_free_consistent(nd->u.dma.dev, da->size,
- da->va, da->handle);
- list_del(&da->list);
- kfree(da);
-#ifdef DEBUG_PCIBA
- DPRINTF("after deletion\n");
- dump_allocations(&nd->u.dma.dma_allocs);
-#endif
- return 0; /* success */
- }
- }
- /* previously allocated dma buffer wasn't found */
- DPRINTF("attempt to free invalid dma handle\n");
- return -EINVAL;
-
- default:
- DPRINTF("undefined ioctl\n");
- return -EINVAL;
- }
-
- DPRINTF("success\n");
- return 0;
-}
-
-
-static int
-dma_mmap(struct file * file, struct vm_area_struct * vma)
-{
- struct node_data * nd;
- struct list_head * iterp;
- int result;
-
- TRACE();
-
- nd = (struct node_data *)file->private_data;
-
- DPRINTF("vma->vm_start is %lx\n", vma->vm_start);
- DPRINTF("vma->vm_end is %lx\n", vma->vm_end);
- DPRINTF("offset = %lx\n", vma->vm_pgoff);
-
- /* get kernel virtual address for the dma buffer (necessary
- * for the mmap). */
- list_for_each(iterp, &nd->u.dma.dma_allocs) {
- struct dma_allocation * da =
- list_entry(iterp, struct dma_allocation, list);
- /* why does mmap shift its offset argument? */
- if (da->handle == vma->vm_pgoff << PAGE_SHIFT) {
- DPRINTF("found dma handle\n");
- if ( (result = mmap_kernel_address(vma,
- da->va)) ) {
- return result; /* failure */
- } else {
- /* it seems like at least one of these
- should show up in user land....
- I'm missing something */
- *(char *)da->va = 0xaa;
- strncpy(da->va, " Toastie!", da->size);
- if (put_user(0x18badbeeful,
- (u64 *)vma->vm_start))
- DPRINTF("put_user failed?!\n");
- return 0; /* success */
- }
-
- }
- }
- DPRINTF("attempt to mmap an invalid dma handle\n");
- return -EINVAL;
-}
-
-
-static int
-mmap_pci_address(struct vm_area_struct * vma, unsigned long pci_va)
-{
- unsigned long pci_pa;
-
- TRACE();
-
- DPRINTF("vma->vm_start is %lx\n", vma->vm_start);
- DPRINTF("vma->vm_end is %lx\n", vma->vm_end);
-
- /* the size of the vma doesn't necessarily correspond to the
- size specified in the mmap call. So we can't really do any
- kind of sanity check here. This is a dangerous driver, and
- it's very easy for a user process to kill the machine. */
-
- DPRINTF("PCI base at virtual address %lx\n", pci_va);
- /* the __pa macro is intended for region 7 on IA64, so it
- doesn't work for region 6 */
- /* pci_pa = __pa(pci_va); */
- /* should be replaced by __tpa or equivalent (preferably a
- generic equivalent) */
- pci_pa = pci_va & ~0xe000000000000000ul;
- DPRINTF("PCI base at physical address %lx\n", pci_pa);
-
- /* there are various arch-specific versions of this function
- defined in linux/drivers/char/mem.c, but it would be nice
- if all architectures put it in pgtable.h. it's defined
- there for ia64.... */
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- vma->vm_flags |= VM_NONCACHED | VM_RESERVED | VM_IO;
-
- return io_remap_page_range(vma->vm_start, pci_pa,
- vma->vm_end-vma->vm_start,
- vma->vm_page_prot);
-}
-
-
-static int
-mmap_kernel_address(struct vm_area_struct * vma, void * kernel_va)
-{
- unsigned long kernel_pa;
-
- TRACE();
-
- DPRINTF("vma->vm_start is %lx\n", vma->vm_start);
- DPRINTF("vma->vm_end is %lx\n", vma->vm_end);
-
- /* the size of the vma doesn't necessarily correspond to the
- size specified in the mmap call. So we can't really do any
- kind of sanity check here. This is a dangerous driver, and
- it's very easy for a user process to kill the machine. */
-
- DPRINTF("mapping virtual address %p\n", kernel_va);
- kernel_pa = __pa(kernel_va);
- DPRINTF("mapping physical address %lx\n", kernel_pa);
-
- vma->vm_flags |= VM_NONCACHED | VM_RESERVED | VM_IO;
-
- return remap_page_range(vma->vm_start, kernel_pa,
- vma->vm_end-vma->vm_start,
- vma->vm_page_prot);
-}
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#define USRPCI 0
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/xtalk/xbow.h> /* Must be before iograph.h to get MAX_PORT_NUM */
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/ioerror_handling.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pciio_private.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/io.h>
-#include <asm/sn/pci/pci_bus_cvlink.h>
-#include <asm/sn/simulator.h>
-
-#define DEBUG_PCIIO
-#undef DEBUG_PCIIO /* turn this on for yet more console output */
-
-
-#define GET_NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
-#define DO_DEL(ptr) (kfree(ptr))
-
-char pciio_info_fingerprint[] = "pciio_info";
-
-cdl_p pciio_registry = NULL;
-
-int
-badaddr_val(volatile void *addr, int len, volatile void *ptr)
-{
- int ret = 0;
- volatile void *new_addr;
-
- switch (len) {
- case 4:
- new_addr = (void *)(((u64) addr)^4);
- ret = ia64_sn_probe_io_slot((long)new_addr, len, (void *)ptr);
- break;
- default:
- printk(KERN_WARNING "badaddr_val given len %x but supports len of 4 only\n", len);
- }
-
- if (ret < 0)
- panic("badaddr_val: unexpected status (%d) in probing", ret);
- return(ret);
-
-}
-
-
-nasid_t
-get_console_nasid(void)
-{
- extern nasid_t console_nasid;
- if (console_nasid < 0) {
- console_nasid = ia64_sn_get_console_nasid();
- if (console_nasid < 0) {
-// ZZZ What do we do if we don't get a console nasid on the hardware????
- if (IS_RUNNING_ON_SIMULATOR() )
- console_nasid = master_nasid;
- }
- }
- return console_nasid;
-}
-
-int
-hub_dma_enabled(devfs_handle_t xconn_vhdl)
-{
- return(0);
-}
-
-int
-hub_error_devenable(devfs_handle_t xconn_vhdl, int devnum, int error_code)
-{
- return(0);
-}
-
-void
-ioerror_dump(char *name, int error_code, int error_mode, ioerror_t *ioerror)
-{
-}
-
-/******
- ****** end hack defines ......
- ******/
-
-
-
-
-/* =====================================================================
- * PCI Generic Bus Provider
- * Implement PCI provider operations. The pciio* layer provides a
- * platform-independent interface for PCI devices. This layer
- * switches among the possible implementations of a PCI adapter.
- */
-
-/* =====================================================================
- * Provider Function Location SHORTCUT
- *
- * On platforms with only one possible PCI provider, macros can be
- * set up at the top that cause the table lookups and indirections to
- * completely disappear.
- */
-
-#if defined(CONFIG_IA64_SGI_SN1)
-/*
- * For the moment, we will assume that IP27
- * only use Bridge ASICs to provide PCI support.
- */
-#include <asm/sn/pci/pcibr.h>
-#define DEV_FUNC(dev,func) pcibr_##func
-#define CAST_PIOMAP(x) ((pcibr_piomap_t)(x))
-#define CAST_DMAMAP(x) ((pcibr_dmamap_t)(x))
-#define CAST_INTR(x) ((pcibr_intr_t)(x))
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-/* =====================================================================
- * Function Table of Contents
- */
-
-#if !defined(DEV_FUNC)
-static pciio_provider_t *pciio_to_provider_fns(devfs_handle_t dev);
-#endif
-
-pciio_piomap_t pciio_piomap_alloc(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
-void pciio_piomap_free(pciio_piomap_t);
-caddr_t pciio_piomap_addr(pciio_piomap_t, iopaddr_t, size_t);
-
-void pciio_piomap_done(pciio_piomap_t);
-caddr_t pciio_piotrans_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-caddr_t pciio_pio_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, pciio_piomap_t *, unsigned);
-
-iopaddr_t pciio_piospace_alloc(devfs_handle_t, device_desc_t, pciio_space_t, size_t, size_t);
-void pciio_piospace_free(devfs_handle_t, pciio_space_t, iopaddr_t, size_t);
-
-pciio_dmamap_t pciio_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
-void pciio_dmamap_free(pciio_dmamap_t);
-iopaddr_t pciio_dmamap_addr(pciio_dmamap_t, paddr_t, size_t);
-alenlist_t pciio_dmamap_list(pciio_dmamap_t, alenlist_t, unsigned);
-void pciio_dmamap_done(pciio_dmamap_t);
-iopaddr_t pciio_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
-alenlist_t pciio_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
-void pciio_dmamap_drain(pciio_dmamap_t);
-void pciio_dmaaddr_drain(devfs_handle_t, paddr_t, size_t);
-void pciio_dmalist_drain(devfs_handle_t, alenlist_t);
-iopaddr_t pciio_dma_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, pciio_dmamap_t *, unsigned);
-
-pciio_intr_t pciio_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
-void pciio_intr_free(pciio_intr_t);
-int pciio_intr_connect(pciio_intr_t);
-void pciio_intr_disconnect(pciio_intr_t);
-devfs_handle_t pciio_intr_cpu_get(pciio_intr_t);
-
-void pciio_slot_func_to_name(char *, pciio_slot_t, pciio_function_t);
-
-void pciio_provider_startup(devfs_handle_t);
-void pciio_provider_shutdown(devfs_handle_t);
-
-pciio_endian_t pciio_endian_set(devfs_handle_t, pciio_endian_t, pciio_endian_t);
-pciio_priority_t pciio_priority_set(devfs_handle_t, pciio_priority_t);
-devfs_handle_t pciio_intr_dev_get(pciio_intr_t);
-
-devfs_handle_t pciio_pio_dev_get(pciio_piomap_t);
-pciio_slot_t pciio_pio_slot_get(pciio_piomap_t);
-pciio_space_t pciio_pio_space_get(pciio_piomap_t);
-iopaddr_t pciio_pio_pciaddr_get(pciio_piomap_t);
-ulong pciio_pio_mapsz_get(pciio_piomap_t);
-caddr_t pciio_pio_kvaddr_get(pciio_piomap_t);
-
-devfs_handle_t pciio_dma_dev_get(pciio_dmamap_t);
-pciio_slot_t pciio_dma_slot_get(pciio_dmamap_t);
-
-pciio_info_t pciio_info_chk(devfs_handle_t);
-pciio_info_t pciio_info_get(devfs_handle_t);
-void pciio_info_set(devfs_handle_t, pciio_info_t);
-devfs_handle_t pciio_info_dev_get(pciio_info_t);
-pciio_slot_t pciio_info_slot_get(pciio_info_t);
-pciio_function_t pciio_info_function_get(pciio_info_t);
-pciio_vendor_id_t pciio_info_vendor_id_get(pciio_info_t);
-pciio_device_id_t pciio_info_device_id_get(pciio_info_t);
-devfs_handle_t pciio_info_master_get(pciio_info_t);
-arbitrary_info_t pciio_info_mfast_get(pciio_info_t);
-pciio_provider_t *pciio_info_pops_get(pciio_info_t);
-error_handler_f *pciio_info_efunc_get(pciio_info_t);
-error_handler_arg_t *pciio_info_einfo_get(pciio_info_t);
-pciio_space_t pciio_info_bar_space_get(pciio_info_t, int);
-iopaddr_t pciio_info_bar_base_get(pciio_info_t, int);
-size_t pciio_info_bar_size_get(pciio_info_t, int);
-iopaddr_t pciio_info_rom_base_get(pciio_info_t);
-size_t pciio_info_rom_size_get(pciio_info_t);
-
-void pciio_init(void);
-int pciio_attach(devfs_handle_t);
-
-void pciio_provider_register(devfs_handle_t, pciio_provider_t *pciio_fns);
-void pciio_provider_unregister(devfs_handle_t);
-pciio_provider_t *pciio_provider_fns_get(devfs_handle_t);
-
-int pciio_driver_register(pciio_vendor_id_t, pciio_device_id_t, char *driver_prefix, unsigned);
-void pciio_driver_unregister(char *driver_prefix);
-
-devfs_handle_t pciio_device_register(devfs_handle_t, devfs_handle_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
-
-void pciio_device_unregister(devfs_handle_t);
-pciio_info_t pciio_device_info_new(pciio_info_t, devfs_handle_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
-void pciio_device_info_free(pciio_info_t);
-devfs_handle_t pciio_device_info_register(devfs_handle_t, pciio_info_t);
-void pciio_device_info_unregister(devfs_handle_t, pciio_info_t);
-int pciio_device_attach(devfs_handle_t, int);
-int pciio_device_detach(devfs_handle_t, int);
-void pciio_error_register(devfs_handle_t, error_handler_f *, error_handler_arg_t);
-
-int pciio_reset(devfs_handle_t);
-int pciio_write_gather_flush(devfs_handle_t);
-int pciio_slot_inuse(devfs_handle_t);
-
-/* =====================================================================
- * Provider Function Location
- *
- * If there is more than one possible provider for
- * this platform, we need to examine the master
- * vertex of the current vertex for a provider
- * function structure, and indirect through the
- * appropriately named member.
- */
-
-#if !defined(DEV_FUNC)
-
-static pciio_provider_t *
-pciio_to_provider_fns(devfs_handle_t dev)
-{
- pciio_info_t card_info;
- pciio_provider_t *provider_fns;
-
- /*
- * We're called with two types of vertices, one is
- * the bridge vertex (ends with "pci") and the other is the
- * pci slot vertex (ends with "pci/[0-8]"). For the first type
- * we need to get the provider from the PFUNCS label. For
- * the second we get it from fastinfo/c_pops.
- */
- provider_fns = pciio_provider_fns_get(dev);
- if (provider_fns == NULL) {
- card_info = pciio_info_get(dev);
- if (card_info != NULL) {
- provider_fns = pciio_info_pops_get(card_info);
- }
- }
-
- if (provider_fns == NULL)
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- PRINT_PANIC("%v: provider_fns == NULL", dev);
-#else
- PRINT_PANIC("0x%p: provider_fns == NULL", (void *)dev);
-#endif
-
- return provider_fns;
-
-}
-
-#define DEV_FUNC(dev,func) pciio_to_provider_fns(dev)->func
-#define CAST_PIOMAP(x) ((pciio_piomap_t)(x))
-#define CAST_DMAMAP(x) ((pciio_dmamap_t)(x))
-#define CAST_INTR(x) ((pciio_intr_t)(x))
-#endif
-
-/*
- * Many functions are not passed their vertex
- * information directly; rather, they must
- * dive through a resource map. These macros
- * are available to coordinate this detail.
- */
-#define PIOMAP_FUNC(map,func) DEV_FUNC((map)->pp_dev,func)
-#define DMAMAP_FUNC(map,func) DEV_FUNC((map)->pd_dev,func)
-#define INTR_FUNC(intr_hdl,func) DEV_FUNC((intr_hdl)->pi_dev,func)
-
-/* =====================================================================
- * PIO MANAGEMENT
- *
- * For mapping system virtual address space to
- * pciio space on a specified card
- */
-
-pciio_piomap_t
-pciio_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
- device_desc_t dev_desc, /* device descriptor */
- pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
- iopaddr_t addr, /* lowest address (or offset in window) */
- size_t byte_count, /* size of region containing our mappings */
- size_t byte_count_max, /* maximum size of a mapping */
- unsigned flags)
-{ /* defined in sys/pio.h */
- return (pciio_piomap_t) DEV_FUNC(dev, piomap_alloc)
- (dev, dev_desc, space, addr, byte_count, byte_count_max, flags);
-}
-
-void
-pciio_piomap_free(pciio_piomap_t pciio_piomap)
-{
- PIOMAP_FUNC(pciio_piomap, piomap_free)
- (CAST_PIOMAP(pciio_piomap));
-}
-
-caddr_t
-pciio_piomap_addr(pciio_piomap_t pciio_piomap, /* mapping resources */
- iopaddr_t pciio_addr, /* map for this pciio address */
- size_t byte_count)
-{ /* map this many bytes */
- pciio_piomap->pp_kvaddr = PIOMAP_FUNC(pciio_piomap, piomap_addr)
- (CAST_PIOMAP(pciio_piomap), pciio_addr, byte_count);
-
- return pciio_piomap->pp_kvaddr;
-}
-
-void
-pciio_piomap_done(pciio_piomap_t pciio_piomap)
-{
- PIOMAP_FUNC(pciio_piomap, piomap_done)
- (CAST_PIOMAP(pciio_piomap));
-}
-
-caddr_t
-pciio_piotrans_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
- iopaddr_t addr, /* starting address (or offset in window) */
- size_t byte_count, /* map this many bytes */
- unsigned flags)
-{ /* (currently unused) */
- return DEV_FUNC(dev, piotrans_addr)
- (dev, dev_desc, space, addr, byte_count, flags);
-}
-
-caddr_t
-pciio_pio_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
- iopaddr_t addr, /* starting address (or offset in window) */
- size_t byte_count, /* map this many bytes */
- pciio_piomap_t *mapp, /* where to return the map pointer */
- unsigned flags)
-{ /* PIO flags */
- pciio_piomap_t map = 0;
- int errfree = 0;
- caddr_t res;
-
- if (mapp) {
- map = *mapp; /* possible pre-allocated map */
- *mapp = 0; /* record "no map used" */
- }
-
- res = pciio_piotrans_addr
- (dev, dev_desc, space, addr, byte_count, flags);
- if (res)
- return res; /* pciio_piotrans worked */
-
- if (!map) {
- map = pciio_piomap_alloc
- (dev, dev_desc, space, addr, byte_count, byte_count, flags);
- if (!map)
- return res; /* pciio_piomap_alloc failed */
- errfree = 1;
- }
-
- res = pciio_piomap_addr
- (map, addr, byte_count);
- if (!res) {
- if (errfree)
- pciio_piomap_free(map);
- return res; /* pciio_piomap_addr failed */
- }
- if (mapp)
- *mapp = map; /* pass back map used */
-
- return res; /* pciio_piomap_addr succeeded */
-}
-
-iopaddr_t
-pciio_piospace_alloc(devfs_handle_t dev, /* Device requiring space */
- device_desc_t dev_desc, /* Device descriptor */
- pciio_space_t space, /* MEM32/MEM64/IO */
- size_t byte_count, /* Size of mapping */
- size_t align)
-{ /* Alignment needed */
- if (align < NBPP)
- align = NBPP;
- return DEV_FUNC(dev, piospace_alloc)
- (dev, dev_desc, space, byte_count, align);
-}
-
-void
-pciio_piospace_free(devfs_handle_t dev, /* Device freeing space */
- pciio_space_t space, /* Type of space */
- iopaddr_t pciaddr, /* starting address */
- size_t byte_count)
-{ /* Range of address */
- DEV_FUNC(dev, piospace_free)
- (dev, space, pciaddr, byte_count);
-}
-
-/* =====================================================================
- * DMA MANAGEMENT
- *
- * For mapping from pci space to system
- * physical space.
- */
-
-pciio_dmamap_t
-pciio_dmamap_alloc(devfs_handle_t dev, /* set up mappings for this device */
- device_desc_t dev_desc, /* device descriptor */
- size_t byte_count_max, /* max size of a mapping */
- unsigned flags)
-{ /* defined in dma.h */
- return (pciio_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
- (dev, dev_desc, byte_count_max, flags);
-}
-
-void
-pciio_dmamap_free(pciio_dmamap_t pciio_dmamap)
-{
- DMAMAP_FUNC(pciio_dmamap, dmamap_free)
- (CAST_DMAMAP(pciio_dmamap));
-}
-
-iopaddr_t
-pciio_dmamap_addr(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
- paddr_t paddr, /* map for this address */
- size_t byte_count)
-{ /* map this many bytes */
- return DMAMAP_FUNC(pciio_dmamap, dmamap_addr)
- (CAST_DMAMAP(pciio_dmamap), paddr, byte_count);
-}
-
-alenlist_t
-pciio_dmamap_list(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
- alenlist_t alenlist, /* map this Address/Length List */
- unsigned flags)
-{
- return DMAMAP_FUNC(pciio_dmamap, dmamap_list)
- (CAST_DMAMAP(pciio_dmamap), alenlist, flags);
-}
-
-void
-pciio_dmamap_done(pciio_dmamap_t pciio_dmamap)
-{
- DMAMAP_FUNC(pciio_dmamap, dmamap_done)
- (CAST_DMAMAP(pciio_dmamap));
-}
-
-iopaddr_t
-pciio_dmatrans_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- paddr_t paddr, /* system physical address */
- size_t byte_count, /* length */
- unsigned flags)
-{ /* defined in dma.h */
- return DEV_FUNC(dev, dmatrans_addr)
- (dev, dev_desc, paddr, byte_count, flags);
-}
-
-alenlist_t
-pciio_dmatrans_list(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- alenlist_t palenlist, /* system address/length list */
- unsigned flags)
-{ /* defined in dma.h */
- return DEV_FUNC(dev, dmatrans_list)
- (dev, dev_desc, palenlist, flags);
-}
-
-iopaddr_t
-pciio_dma_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- paddr_t paddr, /* system physical address */
- size_t byte_count, /* length */
- pciio_dmamap_t *mapp, /* map to use, then map we used */
- unsigned flags)
-{ /* PIO flags */
- pciio_dmamap_t map = 0;
- int errfree = 0;
- iopaddr_t res;
-
- if (mapp) {
- map = *mapp; /* possible pre-allocated map */
- *mapp = 0; /* record "no map used" */
- }
-
- res = pciio_dmatrans_addr
- (dev, dev_desc, paddr, byte_count, flags);
- if (res)
- return res; /* pciio_dmatrans worked */
-
- if (!map) {
- map = pciio_dmamap_alloc
- (dev, dev_desc, byte_count, flags);
- if (!map)
- return res; /* pciio_dmamap_alloc failed */
- errfree = 1;
- }
-
- res = pciio_dmamap_addr
- (map, paddr, byte_count);
- if (!res) {
- if (errfree)
- pciio_dmamap_free(map);
- return res; /* pciio_dmamap_addr failed */
- }
- if (mapp)
- *mapp = map; /* pass back map used */
-
- return res; /* pciio_dmamap_addr succeeded */
-}
-
-void
-pciio_dmamap_drain(pciio_dmamap_t map)
-{
- DMAMAP_FUNC(map, dmamap_drain)
- (CAST_DMAMAP(map));
-}
-
-void
-pciio_dmaaddr_drain(devfs_handle_t dev, paddr_t addr, size_t size)
-{
- DEV_FUNC(dev, dmaaddr_drain)
- (dev, addr, size);
-}
-
-void
-pciio_dmalist_drain(devfs_handle_t dev, alenlist_t list)
-{
- DEV_FUNC(dev, dmalist_drain)
- (dev, list);
-}
-
-/* =====================================================================
- * INTERRUPT MANAGEMENT
- *
- * Allow crosstalk devices to establish interrupts
- */
-
-/*
- * Allocate resources required for an interrupt as specified in intr_desc.
- * Return resource handle in intr_hdl.
- */
-pciio_intr_t
-pciio_intr_alloc(devfs_handle_t dev, /* which Crosstalk device */
- device_desc_t dev_desc, /* device descriptor */
- pciio_intr_line_t lines, /* INTR line(s) to attach */
- devfs_handle_t owner_dev)
-{ /* owner of this interrupt */
- return (pciio_intr_t) DEV_FUNC(dev, intr_alloc)
- (dev, dev_desc, lines, owner_dev);
-}
-
-/*
- * Free resources consumed by intr_alloc.
- */
-void
-pciio_intr_free(pciio_intr_t intr_hdl)
-{
- INTR_FUNC(intr_hdl, intr_free)
- (CAST_INTR(intr_hdl));
-}
-
-/*
- * Associate resources allocated with a previous pciio_intr_alloc call with the
- * described handler, arg, name, etc.
- *
- * Returns 0 on success, returns <0 on failure.
- */
-int
-pciio_intr_connect(pciio_intr_t intr_hdl) /* pciio intr resource handle */
-{
- return INTR_FUNC(intr_hdl, intr_connect)
- (CAST_INTR(intr_hdl));
-}
-
-/*
- * Disassociate handler with the specified interrupt.
- */
-void
-pciio_intr_disconnect(pciio_intr_t intr_hdl)
-{
- INTR_FUNC(intr_hdl, intr_disconnect)
- (CAST_INTR(intr_hdl));
-}
-
-/*
- * Return a hwgraph vertex that represents the CPU currently
- * targeted by an interrupt.
- */
-devfs_handle_t
-pciio_intr_cpu_get(pciio_intr_t intr_hdl)
-{
- return INTR_FUNC(intr_hdl, intr_cpu_get)
- (CAST_INTR(intr_hdl));
-}
-
-void
-pciio_slot_func_to_name(char *name,
- pciio_slot_t slot,
- pciio_function_t func)
-{
- /*
- * standard connection points:
- *
- * PCIIO_SLOT_NONE: .../pci/direct
- * PCIIO_FUNC_NONE: .../pci/<SLOT> ie. .../pci/3
- * multifunction: .../pci/<SLOT><FUNC> ie. .../pci/3c
- */
-
- if (slot == PCIIO_SLOT_NONE)
- sprintf(name, "direct");
- else if (func == PCIIO_FUNC_NONE)
- sprintf(name, "%d", slot);
- else
- sprintf(name, "%d%c", slot, 'a'+func);
-}
-
-/* =====================================================================
- * CONFIGURATION MANAGEMENT
- */
-
-/*
- * Startup a crosstalk provider
- */
-void
-pciio_provider_startup(devfs_handle_t pciio_provider)
-{
- DEV_FUNC(pciio_provider, provider_startup)
- (pciio_provider);
-}
-
-/*
- * Shutdown a crosstalk provider
- */
-void
-pciio_provider_shutdown(devfs_handle_t pciio_provider)
-{
- DEV_FUNC(pciio_provider, provider_shutdown)
- (pciio_provider);
-}
-
-/*
- * Specify endianness constraints. The driver tells us what the device
- * does and how it would like to see things in memory. We reply with
- * how things will actually appear in memory.
- */
-pciio_endian_t
-pciio_endian_set(devfs_handle_t dev,
- pciio_endian_t device_end,
- pciio_endian_t desired_end)
-{
- ASSERT((device_end == PCIDMA_ENDIAN_BIG) || (device_end == PCIDMA_ENDIAN_LITTLE));
- ASSERT((desired_end == PCIDMA_ENDIAN_BIG) || (desired_end == PCIDMA_ENDIAN_LITTLE));
-
-#if DEBUG
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_ALERT "%v: pciio_endian_set is going away.\n"
- "\tplease use PCIIO_BYTE_STREAM or PCIIO_WORD_VALUES in your\n"
- "\tpciio_dmamap_alloc and pciio_dmatrans calls instead.\n",
- dev);
-#else
- printk(KERN_ALERT "0x%x: pciio_endian_set is going away.\n"
- "\tplease use PCIIO_BYTE_STREAM or PCIIO_WORD_VALUES in your\n"
- "\tpciio_dmamap_alloc and pciio_dmatrans calls instead.\n",
- dev);
-#endif
-#endif
-
- return DEV_FUNC(dev, endian_set)
- (dev, device_end, desired_end);
-}
-
-/*
- * Specify PCI arbitration priority.
- */
-pciio_priority_t
-pciio_priority_set(devfs_handle_t dev,
- pciio_priority_t device_prio)
-{
- ASSERT((device_prio == PCI_PRIO_HIGH) || (device_prio == PCI_PRIO_LOW));
-
- return DEV_FUNC(dev, priority_set)
- (dev, device_prio);
-}
-
-/*
- * Read value of configuration register
- */
-uint64_t
-pciio_config_get(devfs_handle_t dev,
- unsigned reg,
- unsigned size)
-{
- uint64_t value = 0;
- unsigned shift = 0;
-
- /* handle accesses that cross words here,
- * since that's common code between all
- * possible providers.
- */
- while (size > 0) {
- unsigned biw = 4 - (reg&3);
- if (biw > size)
- biw = size;
-
- value |= DEV_FUNC(dev, config_get)
- (dev, reg, biw) << shift;
-
- shift += 8*biw;
- reg += biw;
- size -= biw;
- }
- return value;
-}
-
-/*
- * Change value of configuration register
- */
-void
-pciio_config_set(devfs_handle_t dev,
- unsigned reg,
- unsigned size,
- uint64_t value)
-{
- /* handle accesses that cross words here,
- * since that's common code between all
- * possible providers.
- */
- while (size > 0) {
- unsigned biw = 4 - (reg&3);
- if (biw > size)
- biw = size;
-
- DEV_FUNC(dev, config_set)
- (dev, reg, biw, value);
- reg += biw;
- size -= biw;
- value >>= biw * 8;
- }
-}
-
-/* =====================================================================
- * GENERIC PCI SUPPORT FUNCTIONS
- */
-
-/*
- * Issue a hardware reset to a card.
- */
-int
-pciio_reset(devfs_handle_t dev)
-{
- return DEV_FUNC(dev, reset) (dev);
-}
-
-/*
- * flush write gather buffers
- */
-int
-pciio_write_gather_flush(devfs_handle_t dev)
-{
- return DEV_FUNC(dev, write_gather_flush) (dev);
-}
-
-devfs_handle_t
-pciio_intr_dev_get(pciio_intr_t pciio_intr)
-{
- return (pciio_intr->pi_dev);
-}
-
-/****** Generic crosstalk pio interfaces ******/
-devfs_handle_t
-pciio_pio_dev_get(pciio_piomap_t pciio_piomap)
-{
- return (pciio_piomap->pp_dev);
-}
-
-pciio_slot_t
-pciio_pio_slot_get(pciio_piomap_t pciio_piomap)
-{
- return (pciio_piomap->pp_slot);
-}
-
-pciio_space_t
-pciio_pio_space_get(pciio_piomap_t pciio_piomap)
-{
- return (pciio_piomap->pp_space);
-}
-
-iopaddr_t
-pciio_pio_pciaddr_get(pciio_piomap_t pciio_piomap)
-{
- return (pciio_piomap->pp_pciaddr);
-}
-
-ulong
-pciio_pio_mapsz_get(pciio_piomap_t pciio_piomap)
-{
- return (pciio_piomap->pp_mapsz);
-}
-
-caddr_t
-pciio_pio_kvaddr_get(pciio_piomap_t pciio_piomap)
-{
- return (pciio_piomap->pp_kvaddr);
-}
-
-/****** Generic crosstalk dma interfaces ******/
-devfs_handle_t
-pciio_dma_dev_get(pciio_dmamap_t pciio_dmamap)
-{
- return (pciio_dmamap->pd_dev);
-}
-
-pciio_slot_t
-pciio_dma_slot_get(pciio_dmamap_t pciio_dmamap)
-{
- return (pciio_dmamap->pd_slot);
-}
-
-/****** Generic pci slot information interfaces ******/
-
-pciio_info_t
-pciio_info_chk(devfs_handle_t pciio)
-{
- arbitrary_info_t ainfo = 0;
-
- hwgraph_info_get_LBL(pciio, INFO_LBL_PCIIO, &ainfo);
- return (pciio_info_t) ainfo;
-}
-
-pciio_info_t
-pciio_info_get(devfs_handle_t pciio)
-{
- pciio_info_t pciio_info;
-
- pciio_info = (pciio_info_t) hwgraph_fastinfo_get(pciio);
-
-#ifdef DEBUG_PCIIO
- {
- int pos;
- char dname[256];
- pos = devfs_generate_path(pciio, dname, 256);
- printk("%s : path= %s\n", __FUNCTION__, &dname[pos]);
- }
-#endif /* DEBUG_PCIIO */
-
- if ((pciio_info != NULL) &&
- (pciio_info->c_fingerprint != pciio_info_fingerprint)
- && (pciio_info->c_fingerprint != NULL)) {
-
- return((pciio_info_t)-1); /* Should panic .. */
- }
-
-
- return pciio_info;
-}
-
-void
-pciio_info_set(devfs_handle_t pciio, pciio_info_t pciio_info)
-{
- if (pciio_info != NULL)
- pciio_info->c_fingerprint = pciio_info_fingerprint;
- hwgraph_fastinfo_set(pciio, (arbitrary_info_t) pciio_info);
-
- /* Also, mark this vertex as a PCI slot
- * and use the pciio_info, so pciio_info_chk
- * can work (and be fairly efficient).
- */
- hwgraph_info_add_LBL(pciio, INFO_LBL_PCIIO,
- (arbitrary_info_t) pciio_info);
-}
-
-devfs_handle_t
-pciio_info_dev_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_vertex);
-}
-
-/*ARGSUSED*/
-pciio_bus_t
-pciio_info_bus_get(pciio_info_t pciio_info)
-{
- /* XXX for now O2 always gets back bus 0 */
- return (pciio_bus_t)0;
-}
-
-pciio_slot_t
-pciio_info_slot_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_slot);
-}
-
-pciio_function_t
-pciio_info_function_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_func);
-}
-
-pciio_vendor_id_t
-pciio_info_vendor_id_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_vendor);
-}
-
-pciio_device_id_t
-pciio_info_device_id_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_device);
-}
-
-devfs_handle_t
-pciio_info_master_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_master);
-}
-
-arbitrary_info_t
-pciio_info_mfast_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_mfast);
-}
-
-pciio_provider_t *
-pciio_info_pops_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_pops);
-}
-
-error_handler_f *
-pciio_info_efunc_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_efunc);
-}
-
-error_handler_arg_t *
-pciio_info_einfo_get(pciio_info_t pciio_info)
-{
- return (pciio_info->c_einfo);
-}
-
-pciio_space_t
-pciio_info_bar_space_get(pciio_info_t info, int win)
-{
- return info->c_window[win].w_space;
-}
-
-iopaddr_t
-pciio_info_bar_base_get(pciio_info_t info, int win)
-{
- return info->c_window[win].w_base;
-}
-
-size_t
-pciio_info_bar_size_get(pciio_info_t info, int win)
-{
- return info->c_window[win].w_size;
-}
-
-iopaddr_t
-pciio_info_rom_base_get(pciio_info_t info)
-{
- return info->c_rbase;
-}
-
-size_t
-pciio_info_rom_size_get(pciio_info_t info)
-{
- return info->c_rsize;
-}
-
-
-/* =====================================================================
- * GENERIC PCI INITIALIZATION FUNCTIONS
- */
-
-/*
- * pciioinit: called once during device driver
- * initializtion if this driver is configured into
- * the system.
- */
-void
-pciio_init(void)
-{
- cdl_p cp;
-
-#if DEBUG && ATTACH_DEBUG
- printf("pciio_init\n");
-#endif
- /* Allocate the registry.
- * We might already have one.
- * If we don't, go get one.
- * MPness: someone might have
- * set one up for us while we
- * were not looking; use an atomic
- * compare-and-swap to commit to
- * using the new registry if and
- * only if nobody else did first.
- * If someone did get there first,
- * toss the one we allocated back
- * into the pool.
- */
- if (pciio_registry == NULL) {
- cp = cdl_new(EDGE_LBL_PCI, "vendor", "device");
- if (!compare_and_swap_ptr((void **) &pciio_registry, NULL, (void *) cp)) {
- cdl_del(cp);
- }
- }
- ASSERT(pciio_registry != NULL);
-}
-
-/*
- * pciioattach: called for each vertex in the graph
- * that is a PCI provider.
- */
-/*ARGSUSED */
-int
-pciio_attach(devfs_handle_t pciio)
-{
-#if DEBUG && ATTACH_DEBUG
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk("%v: pciio_attach\n", pciio);
-#else
- printk("0x%x: pciio_attach\n", pciio);
-#endif
-#endif
- return 0;
-}
-
-/*
- * Associate a set of pciio_provider functions with a vertex.
- */
-void
-pciio_provider_register(devfs_handle_t provider, pciio_provider_t *pciio_fns)
-{
- hwgraph_info_add_LBL(provider, INFO_LBL_PFUNCS, (arbitrary_info_t) pciio_fns);
-}
-
-/*
- * Disassociate a set of pciio_provider functions with a vertex.
- */
-void
-pciio_provider_unregister(devfs_handle_t provider)
-{
- arbitrary_info_t ainfo;
-
- hwgraph_info_remove_LBL(provider, INFO_LBL_PFUNCS, (long *) &ainfo);
-}
-
-/*
- * Obtain a pointer to the pciio_provider functions for a specified Crosstalk
- * provider.
- */
-pciio_provider_t *
-pciio_provider_fns_get(devfs_handle_t provider)
-{
- arbitrary_info_t ainfo = 0;
-
- (void) hwgraph_info_get_LBL(provider, INFO_LBL_PFUNCS, &ainfo);
- return (pciio_provider_t *) ainfo;
-}
-
-/*ARGSUSED4 */
-int
-pciio_driver_register(
- pciio_vendor_id_t vendor_id,
- pciio_device_id_t device_id,
- char *driver_prefix,
- unsigned flags)
-{
- /* a driver's init routine might call
- * pciio_driver_register before the
- * system calls pciio_init; so we
- * make the init call ourselves here.
- */
- if (pciio_registry == NULL)
- pciio_init();
-
- return cdl_add_driver(pciio_registry,
- vendor_id, device_id,
- driver_prefix, flags, NULL);
-}
-
-/*
- * Remove an initialization function.
- */
-void
-pciio_driver_unregister(
- char *driver_prefix)
-{
- /* before a driver calls unregister,
- * it must have called register; so
- * we can assume we have a registry here.
- */
- ASSERT(pciio_registry != NULL);
-
- cdl_del_driver(pciio_registry, driver_prefix, NULL);
-}
-
-/*
- * Set the slot status for a device supported by the
- * driver being registered.
- */
-void
-pciio_driver_reg_callback(
- devfs_handle_t pconn_vhdl,
- int key1,
- int key2,
- int error)
-{
-}
-
-/*
- * Set the slot status for a device supported by the
- * driver being unregistered.
- */
-void
-pciio_driver_unreg_callback(
- devfs_handle_t pconn_vhdl,
- int key1,
- int key2,
- int error)
-{
-}
-
-/*
- * Call some function with each vertex that
- * might be one of this driver's attach points.
- */
-void
-pciio_iterate(char *driver_prefix,
- pciio_iter_f * func)
-{
- /* a driver's init routine might call
- * pciio_iterate before the
- * system calls pciio_init; so we
- * make the init call ourselves here.
- */
- if (pciio_registry == NULL)
- pciio_init();
-
- ASSERT(pciio_registry != NULL);
-
- cdl_iterate(pciio_registry, driver_prefix, (cdl_iter_f *) func);
-}
-
-devfs_handle_t
-pciio_device_register(
- devfs_handle_t connectpt, /* vertex for /hw/.../pciio/%d */
- devfs_handle_t master, /* card's master ASIC (PCI provider) */
- pciio_slot_t slot, /* card's slot */
- pciio_function_t func, /* card's func */
- pciio_vendor_id_t vendor_id,
- pciio_device_id_t device_id)
-{
- return pciio_device_info_register
- (connectpt, pciio_device_info_new (NULL, master, slot, func,
- vendor_id, device_id));
-}
-
-void
-pciio_device_unregister(devfs_handle_t pconn)
-{
- DEV_FUNC(pconn,device_unregister)(pconn);
-}
-
-pciio_info_t
-pciio_device_info_new(
- pciio_info_t pciio_info,
- devfs_handle_t master,
- pciio_slot_t slot,
- pciio_function_t func,
- pciio_vendor_id_t vendor_id,
- pciio_device_id_t device_id)
-{
- if (!pciio_info)
- GET_NEW(pciio_info);
- ASSERT(pciio_info != NULL);
-
- pciio_info->c_slot = slot;
- pciio_info->c_func = func;
- pciio_info->c_vendor = vendor_id;
- pciio_info->c_device = device_id;
- pciio_info->c_master = master;
- pciio_info->c_mfast = hwgraph_fastinfo_get(master);
- pciio_info->c_pops = pciio_provider_fns_get(master);
- pciio_info->c_efunc = 0;
- pciio_info->c_einfo = 0;
-
- return pciio_info;
-}
-
-void
-pciio_device_info_free(pciio_info_t pciio_info)
-{
- /* NOTE : pciio_info is a structure within the pcibr_info
- * and not a pointer to memory allocated on the heap !!
- */
- BZERO((char *)pciio_info,sizeof(pciio_info));
-}
-
-devfs_handle_t
-pciio_device_info_register(
- devfs_handle_t connectpt, /* vertex at center of bus */
- pciio_info_t pciio_info) /* details about the connectpt */
-{
- char name[32];
- devfs_handle_t pconn;
- int device_master_set(devfs_handle_t, devfs_handle_t);
-
- pciio_slot_func_to_name(name,
- pciio_info->c_slot,
- pciio_info->c_func);
-
- if (GRAPH_SUCCESS !=
- hwgraph_path_add(connectpt, name, &pconn))
- return pconn;
-
- pciio_info->c_vertex = pconn;
- pciio_info_set(pconn, pciio_info);
-#ifdef DEBUG_PCIIO
- {
- int pos;
- char dname[256];
- pos = devfs_generate_path(pconn, dname, 256);
- printk("%s : pconn path= %s \n", __FUNCTION__, &dname[pos]);
- }
-#endif /* DEBUG_PCIIO */
-
- /*
- * create link to our pci provider
- */
-
- device_master_set(pconn, pciio_info->c_master);
-
-#if USRPCI
- /*
- * Call into usrpci provider to let it initialize for
- * the given slot.
- */
- if (pciio_info->c_slot != PCIIO_SLOT_NONE)
- usrpci_device_register(pconn, pciio_info->c_master, pciio_info->c_slot);
-#endif
-
- return pconn;
-}
-
-void
-pciio_device_info_unregister(devfs_handle_t connectpt,
- pciio_info_t pciio_info)
-{
- char name[32];
- devfs_handle_t pconn;
-
- if (!pciio_info)
- return;
-
- pciio_slot_func_to_name(name,
- pciio_info->c_slot,
- pciio_info->c_func);
-
- hwgraph_edge_remove(connectpt,name,&pconn);
- pciio_info_set(pconn,0);
-
- /* Remove the link to our pci provider */
- hwgraph_edge_remove(pconn, EDGE_LBL_MASTER, NULL);
-
-
- hwgraph_vertex_unref(pconn);
- hwgraph_vertex_destroy(pconn);
-
-}
-/* Add the pci card inventory information to the hwgraph
- */
-static void
-pciio_device_inventory_add(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
-
- ASSERT(pciio_info);
- ASSERT(pciio_info->c_vertex == pconn_vhdl);
-
- /* Donot add inventory for non-existent devices */
- if ((pciio_info->c_vendor == PCIIO_VENDOR_ID_NONE) ||
- (pciio_info->c_device == PCIIO_DEVICE_ID_NONE))
- return;
- device_inventory_add(pconn_vhdl,INV_IOBD,INV_PCIADAP,
- pciio_info->c_vendor,pciio_info->c_device,
- pciio_info->c_slot);
-}
-
-static void
-pciio_device_inventory_remove(devfs_handle_t pconn_vhdl)
-{
-#ifdef LATER
- hwgraph_inventory_remove(pconn_vhdl,-1,-1,-1,-1,-1);
-#endif
-}
-
-/*ARGSUSED */
-int
-pciio_device_attach(devfs_handle_t pconn,
- int drv_flags)
-{
- pciio_info_t pciio_info;
- pciio_vendor_id_t vendor_id;
- pciio_device_id_t device_id;
-
-
- pciio_device_inventory_add(pconn);
- pciio_info = pciio_info_get(pconn);
-
- vendor_id = pciio_info->c_vendor;
- device_id = pciio_info->c_device;
-
- /* we don't start attaching things until
- * all the driver init routines (including
- * pciio_init) have been called; so we
- * can assume here that we have a registry.
- */
- ASSERT(pciio_registry != NULL);
-
- return(cdl_add_connpt(pciio_registry, vendor_id, device_id, pconn, drv_flags));
-}
-
-int
-pciio_device_detach(devfs_handle_t pconn,
- int drv_flags)
-{
- pciio_info_t pciio_info;
- pciio_vendor_id_t vendor_id;
- pciio_device_id_t device_id;
-
- pciio_device_inventory_remove(pconn);
- pciio_info = pciio_info_get(pconn);
-
- vendor_id = pciio_info->c_vendor;
- device_id = pciio_info->c_device;
-
- /* we don't start attaching things until
- * all the driver init routines (including
- * pciio_init) have been called; so we
- * can assume here that we have a registry.
- */
- ASSERT(pciio_registry != NULL);
-
- return(cdl_del_connpt(pciio_registry, vendor_id, device_id,
- pconn, drv_flags));
-
-}
-
-/*
- * pciio_error_register:
- * arrange for a function to be called with
- * a specified first parameter plus other
- * information when an error is encountered
- * and traced to the pci slot corresponding
- * to the connection point pconn.
- *
- * may also be called with a null function
- * pointer to "unregister" the error handler.
- *
- * NOTE: subsequent calls silently overwrite
- * previous data for this vertex. We assume that
- * cooperating drivers, well, cooperate ...
- */
-void
-pciio_error_register(devfs_handle_t pconn,
- error_handler_f *efunc,
- error_handler_arg_t einfo)
-{
- pciio_info_t pciio_info;
-
- pciio_info = pciio_info_get(pconn);
- ASSERT(pciio_info != NULL);
- pciio_info->c_efunc = efunc;
- pciio_info->c_einfo = einfo;
-}
-
-/*
- * Check if any device has been found in this slot, and return
- * true or false
- * vhdl is the vertex for the slot
- */
-int
-pciio_slot_inuse(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
-
- ASSERT(pciio_info);
- ASSERT(pciio_info->c_vertex == pconn_vhdl);
- if (pciio_info->c_vendor) {
- /*
- * Non-zero value for vendor indicate
- * a board being found in this slot.
- */
- return 1;
- }
- return 0;
-}
-
-int
-pciio_dma_enabled(devfs_handle_t pconn_vhdl)
-{
- return DEV_FUNC(pconn_vhdl, dma_enabled)(pconn_vhdl);
-}
-
-/*
- * These are complementary Linux interfaces that takes in a pci_dev * as the
- * first arguement instead of devfs_handle_t.
- */
-iopaddr_t snia_pciio_dmatrans_addr(struct pci_dev *, device_desc_t, paddr_t, size_t, unsigned);
-pciio_dmamap_t snia_pciio_dmamap_alloc(struct pci_dev *, device_desc_t, size_t, unsigned);
-void snia_pciio_dmamap_free(pciio_dmamap_t);
-iopaddr_t snia_pciio_dmamap_addr(pciio_dmamap_t, paddr_t, size_t);
-void snia_pciio_dmamap_done(pciio_dmamap_t);
-pciio_endian_t snia_pciio_endian_set(struct pci_dev *pci_dev, pciio_endian_t device_end,
- pciio_endian_t desired_end);
-
-#include <linux/module.h>
-EXPORT_SYMBOL(snia_pciio_dmatrans_addr);
-EXPORT_SYMBOL(snia_pciio_dmamap_alloc);
-EXPORT_SYMBOL(snia_pciio_dmamap_free);
-EXPORT_SYMBOL(snia_pciio_dmamap_addr);
-EXPORT_SYMBOL(snia_pciio_dmamap_done);
-EXPORT_SYMBOL(snia_pciio_endian_set);
-
-pciio_endian_t
-snia_pciio_endian_set(struct pci_dev *pci_dev,
- pciio_endian_t device_end,
- pciio_endian_t desired_end)
-{
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
-
- return DEV_FUNC(dev, endian_set)
- (dev, device_end, desired_end);
-}
-
-iopaddr_t
-snia_pciio_dmatrans_addr(struct pci_dev *pci_dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- paddr_t paddr, /* system physical address */
- size_t byte_count, /* length */
- unsigned flags)
-{ /* defined in dma.h */
-
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
-
- return DEV_FUNC(dev, dmatrans_addr)
- (dev, dev_desc, paddr, byte_count, flags);
-}
-
-pciio_dmamap_t
-snia_pciio_dmamap_alloc(struct pci_dev *pci_dev, /* set up mappings for this device */
- device_desc_t dev_desc, /* device descriptor */
- size_t byte_count_max, /* max size of a mapping */
- unsigned flags)
-{ /* defined in dma.h */
-
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
-
- return (pciio_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
- (dev, dev_desc, byte_count_max, flags);
-}
-
-void
-snia_pciio_dmamap_free(pciio_dmamap_t pciio_dmamap)
-{
- DMAMAP_FUNC(pciio_dmamap, dmamap_free)
- (CAST_DMAMAP(pciio_dmamap));
-}
-
-iopaddr_t
-snia_pciio_dmamap_addr(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
- paddr_t paddr, /* map for this address */
- size_t byte_count)
-{ /* map this many bytes */
- return DMAMAP_FUNC(pciio_dmamap, dmamap_addr)
- (CAST_DMAMAP(pciio_dmamap), paddr, byte_count);
-}
-
-void
-snia_pciio_dmamap_done(pciio_dmamap_t pciio_dmamap)
-{
- DMAMAP_FUNC(pciio_dmamap, dmamap_done)
- (CAST_DMAMAP(pciio_dmamap));
-}
-
--- /dev/null
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn2 io routines.
+
+EXTRA_CFLAGS := -DLITTLE_ENDIAN
+
+obj-y += sgi_io_init.o irix_io_init.o
--- /dev/null
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/sn_private.h>
+#include <linux/smp.h>
+#include <asm/sn/simulator.h>
+
+extern void init_all_devices(void);
+extern void klhwg_add_all_modules(vertex_hdl_t);
+extern void klhwg_add_all_nodes(vertex_hdl_t);
+
+extern vertex_hdl_t hwgraph_root;
+extern void io_module_init(void);
+extern int pci_bus_to_hcl_cvlink(void);
+extern void mlreset(void);
+
+/* #define DEBUG_IO_INIT 1 */
+#ifdef DEBUG_IO_INIT
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif /* DEBUG_IO_INIT */
+
+/*
+ * This routine is responsible for the setup of all the IRIX hwgraph style
+ * stuff that's been pulled into linux. It's called by sn_pci_find_bios which
+ * is called just before the generic Linux PCI layer does its probing (by
+ * platform_pci_fixup aka sn_pci_fixup).
+ *
+ * It is very IMPORTANT that this call is only made by the Master CPU!
+ *
+ */
+
+void
+irix_io_init(void)
+{
+ cnodeid_t cnode;
+
+ /*
+ * This is the Master CPU. Emulate mlsetup and main.c in Irix.
+ */
+ mlreset();
+
+ /*
+ * Initialize platform-dependent vertices in the hwgraph:
+ * module
+ * node
+ * cpu
+ * memory
+ * slot
+ * hub
+ * router
+ * xbow
+ */
+
+ io_module_init(); /* Use to be called module_init() .. */
+ klhwg_add_all_modules(hwgraph_root);
+ klhwg_add_all_nodes(hwgraph_root);
+
+ for (cnode = 0; cnode < numnodes; cnode++) {
+ extern void per_hub_init(cnodeid_t);
+ per_hub_init(cnode);
+ }
+
+ /* We can do headless hub cnodes here .. */
+
+ /*
+ *
+ * Our IO Infrastructure drivers are in place ..
+ * Initialize the whole IO Infrastructure .. xwidget/device probes.
+ *
+ */
+ init_all_devices();
+ pci_bus_to_hcl_cvlink();
+}
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/sn/sgi.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/klconfig.h>
+#include <asm/sn/sn_private.h>
+#include <asm/sn/pda.h>
+#include <linux/smp.h>
+
+extern int init_hcl(void);
+
+/*
+ * per_hub_init
+ *
+ * This code is executed once for each Hub chip.
+ */
+void
+per_hub_init(cnodeid_t cnode)
+{
+ nasid_t nasid;
+ nodepda_t *npdap;
+ ii_icmr_u_t ii_icmr;
+ ii_ibcr_u_t ii_ibcr;
+ ii_ilcsr_u_t ii_ilcsr;
+
+ nasid = COMPACT_TO_NASID_NODEID(cnode);
+
+ ASSERT(nasid != INVALID_NASID);
+ ASSERT(NASID_TO_COMPACT_NODEID(nasid) == cnode);
+
+ npdap = NODEPDA(cnode);
+
+ /* Disable the request and reply errors. */
+ REMOTE_HUB_S(nasid, IIO_IWEIM, 0xC000);
+
+ /*
+ * Set the total number of CRBs that can be used.
+ */
+ ii_icmr.ii_icmr_regval= 0x0;
+ ii_icmr.ii_icmr_fld_s.i_c_cnt = 0xf;
+ if (enable_shub_wars_1_1() ) {
+ // Set bit one of ICMR to prevent II from sending interrupt for II bug.
+ ii_icmr.ii_icmr_regval |= 0x1;
+ }
+ REMOTE_HUB_S(nasid, IIO_ICMR, ii_icmr.ii_icmr_regval);
+
+ /*
+ * Set the number of CRBs that both of the BTEs combined
+ * can use minus 1.
+ */
+ ii_ibcr.ii_ibcr_regval= 0x0;
+ ii_ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
+ if (ii_ilcsr.ii_ilcsr_fld_s.i_llp_stat & LNK_STAT_WORKING) {
+ ii_ibcr.ii_ibcr_fld_s.i_count = 0x8;
+ } else {
+ /*
+ * if the LLP is down, there is no attached I/O, so
+ * give BTE all the CRBs.
+ */
+ ii_ibcr.ii_ibcr_fld_s.i_count = 0x14;
+ }
+ REMOTE_HUB_S(nasid, IIO_IBCR, ii_ibcr.ii_ibcr_regval);
+
+ /*
+ * Set CRB timeout to be 10ms.
+ */
+ REMOTE_HUB_S(nasid, IIO_ICTP, 0xffffff );
+ REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
+
+ /* Initialize error interrupts for this hub. */
+ hub_error_init(cnode);
+}
+
+/*
+ * This routine is responsible for the setup of all the IRIX hwgraph style
+ * stuff that's been pulled into linux. It's called by sn_pci_find_bios which
+ * is called just before the generic Linux PCI layer does its probing (by
+ * platform_pci_fixup aka sn_pci_fixup).
+ *
+ * It is very IMPORTANT that this call is only made by the Master CPU!
+ *
+ */
+
+void
+sgi_master_io_infr_init(void)
+{
+ extern void irix_io_init(void);
+
+ init_hcl(); /* Sets up the hwgraph compatibility layer with devfs */
+ irix_io_init(); /* Do IRIX Compatibility IO Init */
+
+#ifdef CONFIG_KDB
+ {
+ extern void kdba_io_init(void);
+ kdba_io_init();
+ }
+#endif
+
+}
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/pci/pciio.h>
#include <asm/sn/slotnum.h>
-unsigned char Is_pic_on_this_nasid[512]; /* non-0 when this is a pic shub */
-
void *
snia_kmem_zalloc(size_t size, int flag)
{
kfree(ptr);
}
-int
-nic_vertex_info_match(devfs_handle_t v, char *s)
-{
- /* we don't support this */
- return(0);
-}
-
/*
* the alloc/free_node routines do a simple kmalloc for now ..
*/
return (neg ? n : -n);
}
-char *
-strtok_r(char *string, const char *sepset, char **lasts)
-{
- register char *q, *r;
-
- /*first or subsequent call*/
- if (string == NULL)
- string = *lasts;
-
- if(string == 0) /* return if no tokens remaining */
- return(NULL);
-
- q = string + strspn(string, sepset); /* skip leading separators */
-
- if(*q == '\0') { /* return if no tokens remaining */
- *lasts = 0; /* indicate this is last token */
- return(NULL);
- }
-
- if((r = strpbrk(q, sepset)) == NULL) /* move past token */
- *lasts = 0; /* indicate this is last token */
- else {
- *r = '\0';
- *lasts = r+1;
- }
- return(q);
-}
-
/*
* print_register() allows formatted printing of bit fields. individual
* bit fields are described by a struct reg_desc, multiple bit fields within
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/pci/pciba.h>
-#include <linux/smp.h>
-
-extern void mlreset(int );
-extern int init_hcl(void);
-extern void klgraph_hack_init(void);
-extern void hubspc_init(void);
-extern void pciio_init(void);
-extern void pcibr_init(void);
-extern void xtalk_init(void);
-extern void xbow_init(void);
-extern void xbmon_init(void);
-extern void pciiox_init(void);
-extern void usrpci_init(void);
-extern void ioc3_init(void);
-extern void initialize_io(void);
-#if defined(CONFIG_IA64_SGI_SN1)
-extern void intr_clear_all(nasid_t);
-#endif
-extern void klhwg_add_all_modules(devfs_handle_t);
-extern void klhwg_add_all_nodes(devfs_handle_t);
-
-void sn_mp_setup(void);
-extern devfs_handle_t hwgraph_root;
-extern void io_module_init(void);
-extern void pci_bus_cvlink_init(void);
-extern void temp_hack(void);
-
-extern int pci_bus_to_hcl_cvlink(void);
-
-/* #define DEBUG_IO_INIT */
-#ifdef DEBUG_IO_INIT
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif /* DEBUG_IO_INIT */
-
-/*
- * per_hub_init
- *
- * This code is executed once for each Hub chip.
- */
-static void
-per_hub_init(cnodeid_t cnode)
-{
- nasid_t nasid;
- nodepda_t *npdap;
- ii_icmr_u_t ii_icmr;
- ii_ibcr_u_t ii_ibcr;
-
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- ASSERT(nasid != INVALID_NASID);
- ASSERT(NASID_TO_COMPACT_NODEID(nasid) == cnode);
-
- npdap = NODEPDA(cnode);
-
-#if defined(CONFIG_IA64_SGI_SN1)
- /* initialize per-node synergy perf instrumentation */
- npdap->synergy_perf_enabled = 0; /* off by default */
- npdap->synergy_perf_lock = SPIN_LOCK_UNLOCKED;
- npdap->synergy_perf_freq = SYNERGY_PERF_FREQ_DEFAULT;
- npdap->synergy_inactive_intervals = 0;
- npdap->synergy_active_intervals = 0;
- npdap->synergy_perf_data = NULL;
- npdap->synergy_perf_first = NULL;
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-
- /*
- * Set the total number of CRBs that can be used.
- */
- ii_icmr.ii_icmr_regval= 0x0;
- ii_icmr.ii_icmr_fld_s.i_c_cnt = 0xF;
- REMOTE_HUB_S(nasid, IIO_ICMR, ii_icmr.ii_icmr_regval);
-
- /*
- * Set the number of CRBs that both of the BTEs combined
- * can use minus 1.
- */
- ii_ibcr.ii_ibcr_regval= 0x0;
- ii_ibcr.ii_ibcr_fld_s.i_count = 0x8;
- REMOTE_HUB_S(nasid, IIO_IBCR, ii_ibcr.ii_ibcr_regval);
-
- /*
- * Set CRB timeout to be 10ms.
- */
- REMOTE_HUB_S(nasid, IIO_ICTP, 0x1000 );
- REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
-
-
-#if defined(CONFIG_IA64_SGI_SN1)
- /* Reserve all of the hardwired interrupt levels. */
- intr_reserve_hardwired(cnode);
-#endif
-
- /* Initialize error interrupts for this hub. */
- hub_error_init(cnode);
-}
-
-/*
- * This routine is responsible for the setup of all the IRIX hwgraph style
- * stuff that's been pulled into linux. It's called by sn1_pci_find_bios which
- * is called just before the generic Linux PCI layer does its probing (by
- * platform_pci_fixup aka sn1_pci_fixup).
- *
- * It is very IMPORTANT that this call is only made by the Master CPU!
- *
- */
-
-void
-sgi_master_io_infr_init(void)
-{
- int cnode;
-
- /*
- * Do any early init stuff .. einit_tbl[] etc.
- */
- DBG("--> sgi_master_io_infr_init: calling init_hcl().\n");
- init_hcl(); /* Sets up the hwgraph compatibility layer with devfs */
-
- /*
- * initialize the Linux PCI to xwidget vertexes ..
- */
- DBG("--> sgi_master_io_infr_init: calling pci_bus_cvlink_init().\n");
- pci_bus_cvlink_init();
-
-#ifdef BRINGUP
-#ifdef CONFIG_IA64_SGI_SN1
- /*
- * Hack to provide statically initialzed klgraph entries.
- */
- DBG("--> sgi_master_io_infr_init: calling klgraph_hack_init()\n");
- klgraph_hack_init();
-#endif /* CONFIG_IA64_SGI_SN1 */
-#endif /* BRINGUP */
-
- /*
- * This is the Master CPU. Emulate mlsetup and main.c in Irix.
- */
- DBG("--> sgi_master_io_infr_init: calling mlreset(0).\n");
- mlreset(0); /* Master .. */
-
- /*
- * allowboot() is called by kern/os/main.c in main()
- * Emulate allowboot() ...
- * per_cpu_init() - only need per_hub_init()
- * cpu_io_setup() - Nothing to do.
- *
- */
- DBG("--> sgi_master_io_infr_init: calling sn_mp_setup().\n");
- sn_mp_setup();
-
- DBG("--> sgi_master_io_infr_init: calling per_hub_init(0).\n");
- for (cnode = 0; cnode < numnodes; cnode++) {
- per_hub_init(cnode);
- }
-
- /* We can do headless hub cnodes here .. */
-
- /*
- * io_init[] stuff.
- *
- * Get SGI IO Infrastructure drivers to init and register with
- * each other etc.
- */
-
- DBG("--> sgi_master_io_infr_init: calling hubspc_init()\n");
- hubspc_init();
-
- DBG("--> sgi_master_io_infr_init: calling pciio_init()\n");
- pciio_init();
-
- DBG("--> sgi_master_io_infr_init: calling pcibr_init()\n");
- pcibr_init();
-
- DBG("--> sgi_master_io_infr_init: calling xtalk_init()\n");
- xtalk_init();
-
- DBG("--> sgi_master_io_infr_init: calling xbow_init()\n");
- xbow_init();
-
- DBG("--> sgi_master_io_infr_init: calling xbmon_init()\n");
- xbmon_init();
-
- DBG("--> sgi_master_io_infr_init: calling pciiox_init()\n");
- pciiox_init();
-
- DBG("--> sgi_master_io_infr_init: calling usrpci_init()\n");
- usrpci_init();
-
- DBG("--> sgi_master_io_infr_init: calling ioc3_init()\n");
- ioc3_init();
-
- /*
- *
- * Our IO Infrastructure drivers are in place ..
- * Initialize the whole IO Infrastructure .. xwidget/device probes.
- *
- */
- DBG("--> sgi_master_io_infr_init: Start Probe and IO Initialization\n");
- initialize_io();
-
- DBG("--> sgi_master_io_infr_init: Setting up SGI IO Links for Linux PCI\n");
- pci_bus_to_hcl_cvlink();
-
-#ifdef CONFIG_PCIBA
- DBG("--> sgi_master_io_infr_init: calling pciba_init()\n");
- pciba_init();
-#endif
-
- DBG("--> Leave sgi_master_io_infr_init: DONE setting up SGI Links for PCI\n");
-}
-
-/*
- * sgi_slave_io_infr_init - This routine must be called on all cpus except
- * the Master CPU.
- */
-void
-sgi_slave_io_infr_init(void)
-{
- /* Emulate cboot() .. */
- mlreset(1); /* This is a slave cpu */
-
- // per_hub_init(0); /* Need to get and send in actual cnode number */
-
- /* Done */
-}
-
-/*
- * One-time setup for MP SN.
- * Allocate per-node data, slurp prom klconfig information and
- * convert it to hwgraph information.
- */
-void
-sn_mp_setup(void)
-{
- cnodeid_t cnode;
- cpuid_t cpu;
-
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- /* Skip holes in CPU space */
- if (cpu_enabled(cpu)) {
- init_platform_pda(cpu);
- }
- }
-
- /*
- * Initialize platform-dependent vertices in the hwgraph:
- * module
- * node
- * cpu
- * memory
- * slot
- * hub
- * router
- * xbow
- */
-
- DBG("sn_mp_io_setup: calling io_module_init()\n");
- io_module_init(); /* Use to be called module_init() .. */
-
- DBG("sn_mp_setup: calling klhwg_add_all_modules()\n");
- klhwg_add_all_modules(hwgraph_root);
- DBG("sn_mp_setup: calling klhwg_add_all_nodes()\n");
- klhwg_add_all_nodes(hwgraph_root);
-
-
- for (cnode = 0; cnode < numnodes; cnode++) {
-
- /*
- * This routine clears the Hub's Interrupt registers.
- */
- /*
- * We need to move this intr_clear_all() routine
- * from SN/intr.c to a more appropriate file.
- * Talk to Al Mayer.
- */
-#if defined(CONFIG_IA64_SGI_SN1)
- intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
-#endif
- /* now init the hub */
- // per_hub_init(cnode);
-
- }
-
-#if defined(CONFIG_IA64_SGI_SN1)
- synergy_perf_init();
-#endif
-
-}
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/config.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/module.h>
-#include <asm/sn/nic.h>
#include <asm/sn/sn_private.h>
-cpuid_t master_procid = 0;
+cpuid_t master_procid;
char arg_maxnodes[4];
-extern void init_all_devices(void);
-
-#if defined(CONFIG_IA64_SGI_SN1)
-synergy_da_t *Synergy_da_indr[MAX_COMPACT_NODES * 2];
-#endif
-
/*
* Return non-zero if the given variable was specified
*/
return (strlen(s) != 0);
}
-void xbmon_init(void)
-{
- FIXME("xbmon_init : no-op\n");
-
-}
-
-void pciiox_init(void)
-{
- FIXME("pciiox_init : no-op\n");
-
-}
-
-void usrpci_init(void)
-{
- FIXME("usrpci_init : no-op\n");
-
-}
-
-void ioc3_init(void)
-{
- FIXME("ioc3_init : no-op\n");
-
-}
-
-void initialize_io(void)
-{
-
- init_all_devices();
-}
-
/*
* Routines provided by ml/SN/promif.c.
*/
-static __psunsigned_t master_bridge_base = (__psunsigned_t)NULL;
+static __psunsigned_t master_bridge_base;
nasid_t console_nasid = (nasid_t)-1;
-#if !defined(CONFIG_IA64_SGI_SN1)
char master_baseio_wid;
-#endif
static char console_wid;
static char console_pcislot;
return 0;
}
-#if defined(CONFIG_IA64_SGI_SN1)
-int
-is_master_nasid_widget(nasid_t test_nasid, xwidgetnum_t test_wid)
-{
-
- /*
- * If the widget numbers are different, we're not the master.
- */
- if (test_wid != (xwidgetnum_t)console_wid)
- return 0;
-
- /*
- * If the NASIDs are the same or equivalent, we're the master.
- */
- if (check_nasid_equiv(test_nasid, console_nasid)) {
- return 1;
- } else {
- return 0;
- }
-}
-#else
int
is_master_baseio_nasid_widget(nasid_t test_nasid, xwidgetnum_t test_wid)
{
return 0;
}
}
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-/*
- * Routines provided by ml/SN/nvram.c
- */
-void
-nvram_baseinit(void)
-{
- FIXME("nvram_baseinit : no-op\n");
-
-}
+++ /dev/null
-/* $Id: hub_intr.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/sn/types.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/iograph.h>
-#include <asm/param.h>
-#include <asm/sn/pio.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/xtalk/xtalkaddrs.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn_cpuid.h>
-
-extern xtalk_provider_t hub_provider;
-
-/* ARGSUSED */
-void
-hub_intr_init(devfs_handle_t hubv)
-{
-}
-
-/*
- * hub_device_desc_update
- * Update the passed in device descriptor with the actual the
- * target cpu number and interrupt priority level.
- * NOTE : These might be the same as the ones passed in thru
- * the descriptor.
- */
-static void
-hub_device_desc_update(device_desc_t dev_desc,
- ilvl_t intr_swlevel,
- cpuid_t cpu)
-{
-}
-
-int allocate_my_bit = INTRCONNECT_ANYBIT;
-
-/*
- * Allocate resources required for an interrupt as specified in dev_desc.
- * Returns a hub interrupt handle on success, or 0 on failure.
- */
-static hub_intr_t
-do_hub_intr_alloc(devfs_handle_t dev, /* which crosstalk device */
- device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev, /* owner of this interrupt, if known */
- int uncond_nothread) /* unconditionally non-threaded */
-{
- cpuid_t cpu = (cpuid_t)0; /* cpu to receive interrupt */
- int cpupicked = 0;
- int bit; /* interrupt vector */
- /*REFERENCED*/
- int intr_resflags = 0;
- hub_intr_t intr_hdl;
- cnodeid_t nodeid; /* node to receive interrupt */
- /*REFERENCED*/
- nasid_t nasid; /* nasid to receive interrupt */
- struct xtalk_intr_s *xtalk_info;
- iopaddr_t xtalk_addr; /* xtalk addr on hub to set intr */
- xwidget_info_t xwidget_info; /* standard crosstalk widget info handle */
- char *intr_name = NULL;
- ilvl_t intr_swlevel = (ilvl_t)0;
- extern int default_intr_pri;
- extern void synergy_intr_alloc(int, int);
-
-
- if (dev_desc) {
- if (dev_desc->flags & D_INTR_ISERR) {
- intr_resflags = II_ERRORINT;
- } else if (!uncond_nothread && !(dev_desc->flags & D_INTR_NOTHREAD)) {
- intr_resflags = II_THREADED;
- } else {
- /* Neither an error nor a thread. */
- intr_resflags = 0;
- }
- } else {
- intr_swlevel = default_intr_pri;
- if (!uncond_nothread)
- intr_resflags = II_THREADED;
- }
-
- /* XXX - Need to determine if the interrupt should be threaded. */
-
- /* If the cpu has not been picked already then choose a candidate
- * interrupt target and reserve the interrupt bit
- */
- if (!cpupicked) {
- cpu = intr_heuristic(dev,dev_desc,allocate_my_bit,
- intr_resflags,owner_dev,
- intr_name,&bit);
- }
-
- /* At this point we SHOULD have a valid cpu */
- if (cpu == CPU_NONE) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "%v hub_intr_alloc could not allocate interrupt\n",
- owner_dev);
-#else
- printk(KERN_WARNING "%p hub_intr_alloc could not allocate interrupt\n",
- (void *)owner_dev);
-#endif
- return(0);
-
- }
-
- /* If the cpu has been picked already (due to the bridge data
- * corruption bug) then try to reserve an interrupt bit .
- */
- if (cpupicked) {
- bit = intr_reserve_level(cpu, allocate_my_bit,
- intr_resflags,
- owner_dev, intr_name);
- if (bit < 0) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "Could not reserve an interrupt bit for cpu "
- " %d and dev %v\n",
- cpu,owner_dev);
-#else
- printk(KERN_WARNING "Could not reserve an interrupt bit for cpu "
- " %d and dev %p\n",
- (int)cpu, (void *)owner_dev);
-#endif
-
- return(0);
- }
- }
-
- nodeid = cpuid_to_cnodeid(cpu);
- nasid = cpuid_to_nasid(cpu);
- xtalk_addr = HUBREG_AS_XTALKADDR(nasid, PIREG(PI_INT_PEND_MOD, cpuid_to_subnode(cpu)));
-
- /*
- * Allocate an interrupt handle, and fill it in. There are two
- * pieces to an interrupt handle: the piece needed by generic
- * xtalk code which is used by crosstalk device drivers, and
- * the piece needed by low-level IP27 hardware code.
- */
- intr_hdl = snia_kmem_alloc_node(sizeof(struct hub_intr_s), KM_NOSLEEP, nodeid);
- ASSERT_ALWAYS(intr_hdl);
-
- /*
- * Fill in xtalk information for generic xtalk interfaces that
- * operate on xtalk_intr_hdl's.
- */
- xtalk_info = &intr_hdl->i_xtalk_info;
- xtalk_info->xi_dev = dev;
- xtalk_info->xi_vector = bit;
- xtalk_info->xi_addr = xtalk_addr;
-
- /*
- * Regardless of which CPU we ultimately interrupt, a given crosstalk
- * widget always handles interrupts (and PIO and DMA) through its
- * designated "master" crosstalk provider.
- */
- xwidget_info = xwidget_info_get(dev);
- if (xwidget_info)
- xtalk_info->xi_target = xwidget_info_masterid_get(xwidget_info);
-
- /* Fill in low level hub information for hub_* interrupt interface */
- intr_hdl->i_swlevel = intr_swlevel;
- intr_hdl->i_cpuid = cpu;
- intr_hdl->i_bit = bit;
- intr_hdl->i_flags = HUB_INTR_IS_ALLOCED;
-
- /* Store the actual interrupt priority level & interrupt target
- * cpu back in the device descriptor.
- */
- hub_device_desc_update(dev_desc, intr_swlevel, cpu);
- synergy_intr_alloc((int)bit, (int)cpu);
- return(intr_hdl);
-}
-
-/*
- * Allocate resources required for an interrupt as specified in dev_desc.
- * Returns a hub interrupt handle on success, or 0 on failure.
- */
-hub_intr_t
-hub_intr_alloc( devfs_handle_t dev, /* which crosstalk device */
- device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev) /* owner of this interrupt, if known */
-{
- return(do_hub_intr_alloc(dev, dev_desc, owner_dev, 0));
-}
-
-/*
- * Allocate resources required for an interrupt as specified in dev_desc.
- * Uncondtionally request non-threaded, regardless of what the device
- * descriptor might say.
- * Returns a hub interrupt handle on success, or 0 on failure.
- */
-hub_intr_t
-hub_intr_alloc_nothd(devfs_handle_t dev, /* which crosstalk device */
- device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev) /* owner of this interrupt, if known */
-{
- return(do_hub_intr_alloc(dev, dev_desc, owner_dev, 1));
-}
-
-/*
- * Free resources consumed by intr_alloc.
- */
-void
-hub_intr_free(hub_intr_t intr_hdl)
-{
- cpuid_t cpu = intr_hdl->i_cpuid;
- int bit = intr_hdl->i_bit;
- xtalk_intr_t xtalk_info;
-
- if (intr_hdl->i_flags & HUB_INTR_IS_CONNECTED) {
- /* Setting the following fields in the xtalk interrupt info
- * clears the interrupt target register in the xtalk user
- */
- xtalk_info = &intr_hdl->i_xtalk_info;
- xtalk_info->xi_dev = NODEV;
- xtalk_info->xi_vector = 0;
- xtalk_info->xi_addr = 0;
- hub_intr_disconnect(intr_hdl);
- }
-
- if (intr_hdl->i_flags & HUB_INTR_IS_ALLOCED)
- kfree(intr_hdl);
-
- intr_unreserve_level(cpu, bit);
-}
-
-
-/*
- * Associate resources allocated with a previous hub_intr_alloc call with the
- * described handler, arg, name, etc.
- */
-/*ARGSUSED*/
-int
-hub_intr_connect( hub_intr_t intr_hdl, /* xtalk intr resource handle */
- xtalk_intr_setfunc_t setfunc, /* func to set intr hw */
- void *setfunc_arg) /* arg to setfunc */
-{
- int rv;
- cpuid_t cpu = intr_hdl->i_cpuid;
- int bit = intr_hdl->i_bit;
- extern int synergy_intr_connect(int, int);
-
- ASSERT(intr_hdl->i_flags & HUB_INTR_IS_ALLOCED);
-
- rv = intr_connect_level(cpu, bit, intr_hdl->i_swlevel, NULL);
- if (rv < 0)
- return(rv);
-
- intr_hdl->i_xtalk_info.xi_setfunc = setfunc;
- intr_hdl->i_xtalk_info.xi_sfarg = setfunc_arg;
-
- if (setfunc) (*setfunc)((xtalk_intr_t)intr_hdl);
-
- intr_hdl->i_flags |= HUB_INTR_IS_CONNECTED;
- return(synergy_intr_connect((int)bit, (int)cpu));
-}
-
-
-/*
- * Disassociate handler with the specified interrupt.
- */
-void
-hub_intr_disconnect(hub_intr_t intr_hdl)
-{
- /*REFERENCED*/
- int rv;
- cpuid_t cpu = intr_hdl->i_cpuid;
- int bit = intr_hdl->i_bit;
- xtalk_intr_setfunc_t setfunc;
-
- setfunc = intr_hdl->i_xtalk_info.xi_setfunc;
-
- /* TBD: send disconnected interrupts somewhere harmless */
- if (setfunc) (*setfunc)((xtalk_intr_t)intr_hdl);
-
- rv = intr_disconnect_level(cpu, bit);
- ASSERT(rv == 0);
- intr_hdl->i_flags &= ~HUB_INTR_IS_CONNECTED;
-}
-
-
-/*
- * Return a hwgraph vertex that represents the CPU currently
- * targeted by an interrupt.
- */
-devfs_handle_t
-hub_intr_cpu_get(hub_intr_t intr_hdl)
-{
- cpuid_t cpuid = intr_hdl->i_cpuid;
- ASSERT(cpuid != CPU_NONE);
-
- return(cpuid_to_vertex(cpuid));
-}
+++ /dev/null
-/* $Id: hubcounters.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992-1997,2000-2002 Silicon Graphics, Inc.
- * All rights reserved.
- */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <asm/types.h>
-#include <asm/sn/io.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/router.h>
-#include <asm/sn/snconfig.h>
-#include <asm/sn/slotnum.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/sndrv.h>
-
-extern void hubni_error_handler(char *, int); /* huberror.c */
-
-static int hubstats_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
-struct file_operations hub_mon_fops = {
- ioctl: hubstats_ioctl,
-};
-
-#define HUB_CAPTURE_TICKS (2 * HZ)
-
-#define HUB_ERR_THRESH 500
-#define USEC_PER_SEC 1000000
-#define NSEC_PER_SEC USEC_PER_SEC*1000
-
-volatile int hub_print_usecs = 600 * USEC_PER_SEC;
-
-/* Return success if the hub's crosstalk link is working */
-int
-hub_xtalk_link_up(nasid_t nasid)
-{
- hubreg_t llp_csr_reg;
-
- /* Read the IO LLP control status register */
- llp_csr_reg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
-
- /* Check if the xtalk link is working */
- if (llp_csr_reg & IIO_LLP_CSR_IS_UP)
- return(1);
-
- return(0);
-
-
-}
-
-static char *error_flag_to_type(unsigned char error_flag)
-{
- switch(error_flag) {
- case 0x1: return ("NI retries");
- case 0x2: return ("NI SN errors");
- case 0x4: return ("NI CB errors");
- case 0x8: return ("II CB errors");
- case 0x10: return ("II SN errors");
- default: return ("Errors");
- }
-}
-
-int
-print_hub_error(hubstat_t *hsp, hubreg_t reg,
- int64_t delta, unsigned char error_flag)
-{
- int64_t rate;
-
- reg *= hsp->hs_per_minute; /* Convert to minutes */
- rate = reg / delta;
-
- if (rate > HUB_ERR_THRESH) {
-
- if(hsp->hs_maint & error_flag)
- {
- printk( "Excessive %s (%ld/min) on %s",
- error_flag_to_type(error_flag), rate, hsp->hs_name);
- }
- else
- {
- hsp->hs_maint |= error_flag;
- printk( "Excessive %s (%ld/min) on %s",
- error_flag_to_type(error_flag), rate, hsp->hs_name);
- }
- return 1;
- } else {
- return 0;
- }
-}
-
-
-int
-check_hub_error_rates(hubstat_t *hsp)
-{
- int64_t delta = hsp->hs_timestamp - hsp->hs_timebase;
- int printed = 0;
-
- printed += print_hub_error(hsp, hsp->hs_ni_retry_errors,
- delta, 0x1);
-
-#if 0
- printed += print_hub_error(hsp, hsp->hs_ni_sn_errors,
- delta, 0x2);
-#endif
-
- printed += print_hub_error(hsp, hsp->hs_ni_cb_errors,
- delta, 0x4);
-
-
- /* If the hub's xtalk link is not working there is
- * no need to print the "Excessive..." warning
- * messages
- */
- if (!hub_xtalk_link_up(hsp->hs_nasid))
- return(printed);
-
-
- printed += print_hub_error(hsp, hsp->hs_ii_cb_errors,
- delta, 0x8);
-
- printed += print_hub_error(hsp, hsp->hs_ii_sn_errors,
- delta, 0x10);
-
- return printed;
-}
-
-
-void
-capture_hub_stats(cnodeid_t cnodeid, struct nodepda_s *npda)
-{
- nasid_t nasid;
- hubstat_t *hsp = &(npda->hubstats);
- hubreg_t port_error;
- ii_illr_u_t illr;
- int count;
- int overflow = 0;
-
- /*
- * If our link wasn't up at boot time, don't worry about error rates.
- */
- if (!(hsp->hs_ni_port_status & NPS_LINKUP_MASK)) {
- printk("capture_hub_stats: cnode=%d hs_ni_port_status=0x%016lx : link is not up\n",
- cnodeid, hsp->hs_ni_port_status);
- return;
- }
-
- nasid = COMPACT_TO_NASID_NODEID(cnodeid);
-
- hsp->hs_timestamp = GET_RTC_COUNTER();
-
- port_error = REMOTE_HUB_L(nasid, NI_PORT_ERROR_CLEAR);
- count = ((port_error & NPE_RETRYCOUNT_MASK) >> NPE_RETRYCOUNT_SHFT);
- hsp->hs_ni_retry_errors += count;
- if (count == NPE_COUNT_MAX)
- overflow = 1;
- count = ((port_error & NPE_SNERRCOUNT_MASK) >> NPE_SNERRCOUNT_SHFT);
- hsp->hs_ni_sn_errors += count;
- if (count == NPE_COUNT_MAX)
- overflow = 1;
- count = ((port_error & NPE_CBERRCOUNT_MASK) >> NPE_CBERRCOUNT_SHFT);
- hsp->hs_ni_cb_errors += count;
- if (overflow || count == NPE_COUNT_MAX)
- hsp->hs_ni_overflows++;
-
- if (port_error & NPE_FATAL_ERRORS) {
-#ifdef ajm
- hubni_error_handler("capture_hub_stats", 1);
-#else
- printk("Error: hubni_error_handler in capture_hub_stats");
-#endif
- }
-
- illr.ii_illr_regval = REMOTE_HUB_L(nasid, IIO_LLP_LOG);
- REMOTE_HUB_S(nasid, IIO_LLP_LOG, 0);
-
- hsp->hs_ii_sn_errors += illr.ii_illr_fld_s.i_sn_cnt;
- hsp->hs_ii_cb_errors += illr.ii_illr_fld_s.i_cb_cnt;
- if ((illr.ii_illr_fld_s.i_sn_cnt == IIO_LLP_SN_MAX) ||
- (illr.ii_illr_fld_s.i_cb_cnt == IIO_LLP_CB_MAX))
- hsp->hs_ii_overflows++;
-
- if (hsp->hs_print) {
- if (check_hub_error_rates(hsp)) {
- hsp->hs_last_print = GET_RTC_COUNTER();
- hsp->hs_print = 0;
- }
- } else {
- if ((GET_RTC_COUNTER() -
- hsp->hs_last_print) > hub_print_usecs)
- hsp->hs_print = 1;
- }
-
- npda->hubticks = HUB_CAPTURE_TICKS;
-}
-
-
-void
-init_hub_stats(cnodeid_t cnodeid, struct nodepda_s *npda)
-{
- hubstat_t *hsp = &(npda->hubstats);
- nasid_t nasid = cnodeid_to_nasid(cnodeid);
- bzero(&(npda->hubstats), sizeof(hubstat_t));
-
- hsp->hs_version = HUBSTAT_VERSION;
- hsp->hs_cnode = cnodeid;
- hsp->hs_nasid = nasid;
- hsp->hs_timebase = GET_RTC_COUNTER();
- hsp->hs_ni_port_status = REMOTE_HUB_L(nasid, NI_PORT_STATUS);
-
- /* Clear the II error counts. */
- REMOTE_HUB_S(nasid, IIO_LLP_LOG, 0);
-
- /* Clear the NI counts. */
- REMOTE_HUB_L(nasid, NI_PORT_ERROR_CLEAR);
-
- hsp->hs_per_minute = (long long)RTC_CYCLES_PER_SEC * 60LL;
-
- npda->hubticks = HUB_CAPTURE_TICKS;
-
- /* XX should use kmem_alloc_node */
- hsp->hs_name = (char *)kmalloc(MAX_HUB_PATH, GFP_KERNEL);
- ASSERT_ALWAYS(hsp->hs_name);
-
- sprintf(hsp->hs_name, "/dev/hw/" EDGE_LBL_MODULE "/%03d/"
- EDGE_LBL_NODE "/" EDGE_LBL_HUB,
- npda->module_id);
-
- hsp->hs_last_print = 0;
- hsp->hs_print = 1;
-
- hub_print_usecs = hub_print_usecs;
-
-#if 0
- printk("init_hub_stats: cnode=%d nasid=%d hs_version=%d hs_ni_port_status=0x%016lx\n",
- cnodeid, nasid, hsp->hs_version, hsp->hs_ni_port_status);
-#endif
-}
-
-static int
-hubstats_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- cnodeid_t cnode;
- nodepda_t *npdap;
- uint64_t longarg;
- devfs_handle_t d;
-
- if ((d = devfs_get_handle_from_inode(inode)) == NULL)
- return -ENODEV;
- cnode = (cnodeid_t)hwgraph_fastinfo_get(d);
- npdap = NODEPDA(cnode);
-
- if (npdap->hubstats.hs_version != HUBSTAT_VERSION) {
- init_hub_stats(cnode, npdap);
- }
-
- switch (cmd) {
- case SNDRV_GET_INFOSIZE:
- longarg = sizeof(hubstat_t);
- if (copy_to_user((void *)arg, &longarg, sizeof(longarg))) {
- return -EFAULT;
- }
- break;
-
- case SNDRV_GET_HUBINFO:
- /* refresh npda->hubstats */
- capture_hub_stats(cnode, npdap);
- if (copy_to_user((void *)arg, &npdap->hubstats, sizeof(hubstat_t))) {
- return -EFAULT;
- }
- break;
-
- default:
- return -EINVAL;
- }
-
- return 0;
-}
+++ /dev/null
-/* $Id: huberror.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/smp.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/intr.h>
-
-extern void hubni_eint_init(cnodeid_t cnode);
-extern void hubii_eint_init(cnodeid_t cnode);
-extern void hubii_eint_handler (int irq, void *arg, struct pt_regs *ep);
-extern void snia_error_intr_handler(int irq, void *devid, struct pt_regs *pt_regs);
-
-extern int maxcpus;
-
-#define HUB_ERROR_PERIOD (120 * HZ) /* 2 minutes */
-
-
-void
-hub_error_clear(nasid_t nasid)
-{
- int i;
- hubreg_t idsr;
- int sn;
-
- for(sn=0; sn<NUM_SUBNODES; sn++) {
- REMOTE_HUB_PI_S(nasid, sn, PI_ERR_INT_PEND, -1);
- REMOTE_HUB_PI_S(nasid, sn, PI_ERR_STATUS0_A_CLR, -1);
- REMOTE_HUB_PI_S(nasid, sn, PI_ERR_STATUS0_B_CLR, -1);
- REMOTE_HUB_PI_S(nasid, sn, PI_SPURIOUS_HDR_0, 0);
- REMOTE_HUB_PI_S(nasid, sn, PI_SPURIOUS_HDR_1, 0);
- }
-
- REMOTE_HUB_L(nasid, MD_DIR_ERROR_CLR);
- REMOTE_HUB_L(nasid, MD_MEM_ERROR_CLR);
- REMOTE_HUB_L(nasid, MD_MISC1_ERROR_CLR);
- REMOTE_HUB_L(nasid, MD_PROTOCOL_ERR_CLR);
-
- /*
- * Make sure spurious write response errors are cleared
- * (values are from hub_set_prb())
- */
- for (i = 0; i <= HUB_WIDGET_ID_MAX - HUB_WIDGET_ID_MIN + 1; i++) {
- iprb_t prb;
-
- prb.iprb_regval = REMOTE_HUB_L(nasid, IIO_IOPRB_0 + (i * sizeof(hubreg_t)));
-
- /* Clear out some fields */
- prb.iprb_ovflow = 1;
- prb.iprb_bnakctr = 0;
- prb.iprb_anakctr = 0;
-
- /*
- * PIO reads in fire-and-forget mode on bedrock 1.0 don't
- * frob the credit count properly, making the responses appear
- * spurious. So don't use fire-and-forget mode. Bug 761802.
- */
- prb.iprb_ff = 0; /* disable fire-and-forget mode by default */
-
- prb.iprb_xtalkctr = 3; /* approx. PIO credits for the widget */
-
- REMOTE_HUB_S(nasid, IIO_IOPRB_0 + (i * sizeof(hubreg_t)), prb.iprb_regval);
- }
-
- REMOTE_HUB_S(nasid, IIO_IO_ERR_CLR, -1);
- idsr = REMOTE_HUB_L(nasid, IIO_IIDSR);
- REMOTE_HUB_S(nasid, IIO_IIDSR, (idsr & ~(IIO_IIDSR_SENT_MASK)));
-
- REMOTE_HUB_L(nasid, NI_PORT_ERROR_CLEAR);
- /* No need to clear NI_PORT_HEADER regs; they are continually overwritten*/
-
- REMOTE_HUB_S(nasid, LB_ERROR_MASK_CLR, -1);
- REMOTE_HUB_S(nasid, LB_ERROR_HDR1, 0);
-
- /* Clear XB error regs, in order */
- for (i = 0;
- i <= XB_FIRST_ERROR_CLEAR - XB_POQ0_ERROR_CLEAR;
- i += sizeof(hubreg_t)) {
- REMOTE_HUB_S(nasid, XB_POQ0_ERROR_CLEAR + i, 0);
- }
-}
-
-
-/*
- * Function : hub_error_init
- * Purpose : initialize the error handling requirements for a given hub.
- * Parameters : cnode, the compact nodeid.
- * Assumptions : Called only once per hub, either by a local cpu. Or by a
- * remote cpu, when this hub is headless.(cpuless)
- * Returns : None
- */
-
-void
-hub_error_init(cnodeid_t cnode)
-{
- nasid_t nasid;
-
- nasid = cnodeid_to_nasid(cnode);
- hub_error_clear(nasid);
-
- /*
- * Now setup the hub ii and ni error interrupt handler.
- */
-
- hubii_eint_init(cnode);
- hubni_eint_init(cnode);
-
- return;
-}
-
-/*
- * Function : hubii_eint_init
- * Parameters : cnode
- * Purpose : to initialize the hub iio error interrupt.
- * Assumptions : Called once per hub, by the cpu which will ultimately
- * handle this interrupt.
- * Returns : None.
- */
-
-
-void
-hubii_eint_init(cnodeid_t cnode)
-{
- int bit, rv;
- ii_iidsr_u_t hubio_eint;
- hubinfo_t hinfo;
- cpuid_t intr_cpu;
- devfs_handle_t hub_v;
- ii_ilcsr_u_t ilcsr;
- int bit_pos_to_irq(int bit);
- int synergy_intr_connect(int bit, int cpuid);
-
-
- hub_v = (devfs_handle_t)cnodeid_to_vertex(cnode);
- ASSERT_ALWAYS(hub_v);
- hubinfo_get(hub_v, &hinfo);
-
- ASSERT(hinfo);
- ASSERT(hinfo->h_cnodeid == cnode);
-
- ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(hinfo->h_nasid, IIO_ILCSR);
-
- if ((ilcsr.ii_ilcsr_fld_s.i_llp_stat & 0x2) == 0) {
- /*
- * HUB II link is not up.
- * Just disable LLP, and don't connect any interrupts.
- */
- ilcsr.ii_ilcsr_fld_s.i_llp_en = 0;
- REMOTE_HUB_S(hinfo->h_nasid, IIO_ILCSR, ilcsr.ii_ilcsr_regval);
- return;
- }
- /* Select a possible interrupt target where there is a free interrupt
- * bit and also reserve the interrupt bit for this IO error interrupt
- */
- intr_cpu = intr_heuristic(hub_v,0,INTRCONNECT_ANYBIT,II_ERRORINT,hub_v,
- "HUB IO error interrupt",&bit);
- if (intr_cpu == CPU_NONE) {
- printk("hubii_eint_init: intr_reserve_level failed, cnode %d", cnode);
- return;
- }
-
- rv = intr_connect_level(intr_cpu, bit, 0, NULL);
- synergy_intr_connect(bit, intr_cpu);
- request_irq(bit_pos_to_irq(bit) + (intr_cpu << 8), hubii_eint_handler, 0, "SN hub error", (void *)hub_v);
- ASSERT_ALWAYS(rv >= 0);
- hubio_eint.ii_iidsr_regval = 0;
- hubio_eint.ii_iidsr_fld_s.i_enable = 1;
- hubio_eint.ii_iidsr_fld_s.i_level = bit;/* Take the least significant bits*/
- hubio_eint.ii_iidsr_fld_s.i_node = COMPACT_TO_NASID_NODEID(cnode);
- hubio_eint.ii_iidsr_fld_s.i_pi_id = cpuid_to_subnode(intr_cpu);
- REMOTE_HUB_S(hinfo->h_nasid, IIO_IIDSR, hubio_eint.ii_iidsr_regval);
-
-}
-
-void
-hubni_eint_init(cnodeid_t cnode)
-{
- int intr_bit;
- cpuid_t targ;
-
-
- if ((targ = cnodeid_to_cpuid(cnode)) == CPU_NONE)
- return;
-
- /* The prom chooses which cpu gets these interrupts, but we
- * don't know which one it chose. We will register all of the
- * cpus to be sure. This only costs us an irqaction per cpu.
- */
- for (; targ < CPUS_PER_NODE; targ++) {
- if (!cpu_enabled(targ) ) continue;
- /* connect the INTEND1 bits. */
- for (intr_bit = XB_ERROR; intr_bit <= MSC_PANIC_INTR; intr_bit++) {
- intr_connect_level(targ, intr_bit, II_ERRORINT, NULL);
- }
- request_irq(SGI_HUB_ERROR_IRQ + (targ << 8), snia_error_intr_handler, 0, "SN hub error", NULL);
- /* synergy masks are initialized in the prom to enable all interrupts. */
- /* We'll just leave them that way, here, for these interrupts. */
- }
-}
-
-
-/*ARGSUSED*/
-void
-hubii_eint_handler (int irq, void *arg, struct pt_regs *ep)
-{
-
- panic("Hubii interrupt\n");
-}
+++ /dev/null
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * ip37.c
- * Support for IP35/IP37 machines
- */
-
-#include <linux/types.h>
-
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/pci/bridge.h> /* for bridge_t */
-
-
-xwidgetnum_t
-hub_widget_id(nasid_t nasid)
-{
- hubii_wcr_t ii_wcr; /* the control status register */
-
- ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid,IIO_WCR);
-
- return ii_wcr.wcr_fields_s.wcr_widget_id;
-}
-
-int
-is_fine_dirmode(void)
-{
- return (((LOCAL_HUB_L(LB_REV_ID) & LRI_SYSTEM_SIZE_MASK)
- >> LRI_SYSTEM_SIZE_SHFT) == SYSTEM_SIZE_SMALL);
-
-}
-
-
-void
-ni_reset_port(void)
-{
- LOCAL_HUB_S(NI_RESET_ENABLE, NRE_RESETOK);
- LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
-}
+++ /dev/null
-/* $Id: mem_refcnt.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/sn1/mem_refcnt.h>
-#include <asm/sn/sn1/hwcntrs.h>
-#include <asm/sn/sn1/hubspc.h>
-// From numa_hw.h
-
-#define MIGR_COUNTER_MAX_GET(nodeid) \
- (NODEPDA_MCD((nodeid))->migr_system_kparms.migr_threshold_reference)
-/*
- * Get the Absolute Theshold
- */
-#define MIGR_THRESHOLD_ABS_GET(nodeid) ( \
- MD_MIG_VALUE_THRESH_GET(COMPACT_TO_NASID_NODEID(nodeid)))
-/*
- * Get the current Differential Threshold
- */
-#define MIGR_THRESHOLD_DIFF_GET(nodeid) \
- (NODEPDA_MCD(nodeid)->migr_as_kparms.migr_base_threshold)
-
-#define NUM_OF_HW_PAGES_PER_SW_PAGE() (NBPP / MD_PAGE_SIZE)
-
-// #include "migr_control.h"
-
-int
-mem_refcnt_attach(devfs_handle_t hub)
-{
-#if 0
- devfs_handle_t refcnt_dev;
-
- hwgraph_char_device_add(hub,
- "refcnt",
- "hubspc_",
- &refcnt_dev);
- device_info_set(refcnt_dev, (void*)(ulong)HUBSPC_REFCOUNTERS);
-#endif
-
- return (0);
-}
-
-
-/*ARGSUSED*/
-int
-mem_refcnt_open(devfs_handle_t *devp, mode_t oflag, int otyp, cred_t *crp)
-{
- cnodeid_t node;
-
- node = master_node_get(*devp);
-
- ASSERT( (node >= 0) && (node < numnodes) );
-
- if (NODEPDA(node)->migr_refcnt_counterbuffer == NULL) {
- return (ENODEV);
- }
-
- ASSERT( NODEPDA(node)->migr_refcnt_counterbase != NULL );
- ASSERT( NODEPDA(node)->migr_refcnt_cbsize != (size_t)0 );
-
- return (0);
-}
-
-/*ARGSUSED*/
-int
-mem_refcnt_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
-{
- return 0;
-}
-
-/*ARGSUSED*/
-int
-mem_refcnt_mmap(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
-{
- cnodeid_t node;
- int errcode;
- char* buffer;
- size_t blen;
-
- node = master_node_get(dev);
-
- ASSERT( (node >= 0) && (node < numnodes) );
-
- ASSERT( NODEPDA(node)->migr_refcnt_counterbuffer != NULL);
- ASSERT( NODEPDA(node)->migr_refcnt_counterbase != NULL );
- ASSERT( NODEPDA(node)->migr_refcnt_cbsize != 0 );
-
- /*
- * XXXX deal with prot's somewhere around here....
- */
-
- buffer = NODEPDA(node)->migr_refcnt_counterbuffer;
- blen = NODEPDA(node)->migr_refcnt_cbsize;
-
- /*
- * Force offset to be a multiple of sizeof(refcnt_t)
- * We round up.
- */
-
- off = (((off - 1)/sizeof(refcnt_t)) + 1) * sizeof(refcnt_t);
-
- if ( ((buffer + blen) - (buffer + off + len)) < 0 ) {
- return (EPERM);
- }
-
- errcode = v_mapphys(vt,
- buffer + off,
- len);
-
- return errcode;
-}
-
-/*ARGSUSED*/
-int
-mem_refcnt_unmap(devfs_handle_t dev, vhandl_t *vt)
-{
- return 0;
-}
-
-/* ARGSUSED */
-int
-mem_refcnt_ioctl(devfs_handle_t dev,
- int cmd,
- void *arg,
- int mode,
- cred_t *cred_p,
- int *rvalp)
-{
- cnodeid_t node;
- int errcode;
- extern int numnodes;
-
- node = master_node_get(dev);
-
- ASSERT( (node >= 0) && (node < numnodes) );
-
- ASSERT( NODEPDA(node)->migr_refcnt_counterbuffer != NULL);
- ASSERT( NODEPDA(node)->migr_refcnt_counterbase != NULL );
- ASSERT( NODEPDA(node)->migr_refcnt_cbsize != 0 );
-
- errcode = 0;
-
- switch (cmd) {
- case RCB_INFO_GET:
- {
- rcb_info_t rcb;
-
- rcb.rcb_len = NODEPDA(node)->migr_refcnt_cbsize;
-
- rcb.rcb_sw_sets = NODEPDA(node)->migr_refcnt_numsets;
- rcb.rcb_sw_counters_per_set = numnodes;
- rcb.rcb_sw_counter_size = sizeof(refcnt_t);
-
- rcb.rcb_base_pages = NODEPDA(node)->migr_refcnt_numsets /
- NUM_OF_HW_PAGES_PER_SW_PAGE();
- rcb.rcb_base_page_size = NBPP;
- rcb.rcb_base_paddr = ctob(slot_getbasepfn(node, 0));
-
- rcb.rcb_cnodeid = node;
- rcb.rcb_granularity = MD_PAGE_SIZE;
-#ifdef LATER
- rcb.rcb_hw_counter_max = MIGR_COUNTER_MAX_GET(node);
- rcb.rcb_diff_threshold = MIGR_THRESHOLD_DIFF_GET(node);
-#endif
- rcb.rcb_abs_threshold = MIGR_THRESHOLD_ABS_GET(node);
- rcb.rcb_num_slots = MAX_MEM_SLOTS;
-
- if (COPYOUT(&rcb, arg, sizeof(rcb_info_t))) {
- errcode = EFAULT;
- }
-
- break;
- }
- case RCB_SLOT_GET:
- {
- rcb_slot_t slot[MAX_MEM_SLOTS];
- int s;
- int nslots;
-
- nslots = MAX_MEM_SLOTS;
- ASSERT(nslots <= MAX_MEM_SLOTS);
- for (s = 0; s < nslots; s++) {
- slot[s].base = (uint64_t)ctob(slot_getbasepfn(node, s));
-#ifdef LATER
- slot[s].size = (uint64_t)ctob(slot_getsize(node, s));
-#else
- slot[s].size = (uint64_t)1;
-#endif
- }
- if (COPYOUT(&slot[0], arg, nslots * sizeof(rcb_slot_t))) {
- errcode = EFAULT;
- }
-
- *rvalp = nslots;
- break;
- }
-
- default:
- errcode = EINVAL;
- break;
-
- }
-
- return errcode;
-}
+++ /dev/null
-/* $Id: ml_SN_intr.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-/*
- * intr.c-
- * This file contains all of the routines necessary to set up and
- * handle interrupts on an IP27 board.
- */
-
-#ident "$Revision: 1.1 $"
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <asm/smp.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/intr.h>
-
-
-#if DEBUG_INTR_TSTAMP_DEBUG
-#include <sys/debug.h>
-#include <sys/idbg.h>
-#include <sys/inst.h>
-void do_splx_log(int, int);
-void spldebug_log_event(int);
-#endif
-
-#ifdef CONFIG_SMP
-extern unsigned long cpu_online_map;
-#endif
-#define cpu_allows_intr(cpu) (1)
-// If I understand what's going on with this, 32 should work.
-// physmem_maxradius seems to be the maximum number of router
-// hops to get from one end of the system to the other. With
-// a maximally configured machine, with the dumbest possible
-// topology, we would make 32 router hops. For what we're using
-// it for, the dumbest possible should suffice.
-#define physmem_maxradius() 32
-
-#define SUBNODE_ANY (-1)
-
-extern int nmied;
-extern int hub_intr_wakeup_cnt;
-extern synergy_da_t *Synergy_da_indr[];
-extern cpuid_t master_procid;
-
-extern cnodeid_t master_node_get(devfs_handle_t vhdl);
-
-extern void snia_error_intr_handler(int irq, void *devid, struct pt_regs *pt_regs);
-
-
-#define INTR_LOCK(vecblk) \
- (s = mutex_spinlock(&(vecblk)->vector_lock))
-#define INTR_UNLOCK(vecblk) \
- mutex_spinunlock(&(vecblk)->vector_lock, s)
-
-/*
- * REACT/Pro
- */
-
-
-
-/*
- * Find first bit set
- * Used outside this file also
- */
-int ms1bit(unsigned long x)
-{
- int b;
-
- if (x >> 32) b = 32, x >>= 32;
- else b = 0;
- if (x >> 16) b += 16, x >>= 16;
- if (x >> 8) b += 8, x >>= 8;
- if (x >> 4) b += 4, x >>= 4;
- if (x >> 2) b += 2, x >>= 2;
-
- return b + (int) (x >> 1);
-}
-
-/* ARGSUSED */
-void
-intr_stray(void *lvl)
-{
- printk(KERN_WARNING "Stray Interrupt - level %ld to cpu %d", (long)lvl, smp_processor_id());
-}
-
-#if defined(DEBUG)
-
-/* Infrastructure to gather the device - target cpu mapping info */
-#define MAX_DEVICES 1000 /* Reasonable large number . Need not be
- * the exact maximum # devices possible.
- */
-#define MAX_NAME 100
-typedef struct {
- dev_t dev; /* device */
- cpuid_t cpuid; /* target cpu */
- cnodeid_t cnodeid;/* node on which the target cpu is present */
- int bit; /* intr bit reserved */
- char intr_name[MAX_NAME]; /* name of the interrupt */
-} intr_dev_targ_map_t;
-
-intr_dev_targ_map_t intr_dev_targ_map[MAX_DEVICES];
-uint64_t intr_dev_targ_map_size;
-spinlock_t intr_dev_targ_map_lock;
-
-/* Print out the device - target cpu mapping.
- * This routine is used only in the idbg command
- * "intrmap"
- */
-void
-intr_dev_targ_map_print(cnodeid_t cnodeid)
-{
- int i,j,size = 0;
- int print_flag = 0,verbose = 0;
- char node_name[10];
-
- if (cnodeid != CNODEID_NONE) {
- nodepda_t *npda;
-
- npda = NODEPDA(cnodeid);
- for (j=0; j<NUM_SUBNODES; j++) {
- qprintf("\n SUBNODE %d\n INT_PEND0: ", j);
- for(i = 0 ; i < N_INTPEND_BITS ; i++)
- qprintf("%d",SNPDA(npda,j)->intr_dispatch0.info[i].ii_flags);
- qprintf("\n INT_PEND1: ");
- for(i = 0 ; i < N_INTPEND_BITS ; i++)
- qprintf("%d",SNPDA(npda,j)->intr_dispatch1.info[i].ii_flags);
- }
- verbose = 1;
- }
- qprintf("\n Device - Target Map [Interrupts: %s Node%s]\n\n",
- (verbose ? "All" : "Non-hardwired"),
- (cnodeid == CNODEID_NONE) ? "s: All" : node_name);
-
- qprintf("Device\tCpu\tCnode\tIntr_bit\tIntr_name\n");
- for (i = 0 ; i < intr_dev_targ_map_size ; i++) {
-
- print_flag = 0;
- if (verbose) {
- if (cnodeid != CNODEID_NONE) {
- if (cnodeid == intr_dev_targ_map[i].cnodeid)
- print_flag = 1;
- } else {
- print_flag = 1;
- }
- } else {
- if (intr_dev_targ_map[i].dev != 0) {
- if (cnodeid != CNODEID_NONE) {
- if (cnodeid ==
- intr_dev_targ_map[i].cnodeid)
- print_flag = 1;
- } else {
- print_flag = 1;
- }
- }
- }
- if (print_flag) {
- size++;
- qprintf("%d\t%d\t%d\t%d\t%s\n",
- intr_dev_targ_map[i].dev,
- intr_dev_targ_map[i].cpuid,
- intr_dev_targ_map[i].cnodeid,
- intr_dev_targ_map[i].bit,
- intr_dev_targ_map[i].intr_name);
- }
-
- }
- qprintf("\nTotal : %d\n",size);
-}
-#endif /* DEBUG */
-
-/*
- * The spinlocks have already been initialized. Now initialize the interrupt
- * vectors. One processor on each hub does the work.
- */
-void
-intr_init_vecblk(nodepda_t *npda, cnodeid_t node, int sn)
-{
- int i, ip=0;
- intr_vecblk_t *vecblk;
- subnode_pda_t *snpda;
-
-
- snpda = SNPDA(npda,sn);
- do {
- if (ip == 0) {
- vecblk = &snpda->intr_dispatch0;
- } else {
- vecblk = &snpda->intr_dispatch1;
- }
-
- /* Initialize this vector. */
- for (i = 0; i < N_INTPEND_BITS; i++) {
- vecblk->vectors[i].iv_func = intr_stray;
- vecblk->vectors[i].iv_prefunc = NULL;
- vecblk->vectors[i].iv_arg = (void *)(__psint_t)(ip * N_INTPEND_BITS + i);
-
- vecblk->info[i].ii_owner_dev = 0;
- strcpy(vecblk->info[i].ii_name, "Unused");
- vecblk->info[i].ii_flags = 0; /* No flags */
- vecblk->vectors[i].iv_mustruncpu = -1; /* No CPU yet. */
-
- }
-
- mutex_spinlock_init(&vecblk->vector_lock);
-
- vecblk->vector_count = 0;
- for (i = 0; i < CPUS_PER_SUBNODE; i++)
- vecblk->cpu_count[i] = 0;
-
- vecblk->vector_state = VECTOR_UNINITED;
-
- } while (++ip < 2);
-
-}
-
-
-/*
- * do_intr_reserve_level(cpuid_t cpu, int bit, int resflags, int reserve,
- * devfs_handle_t owner_dev, char *name)
- * Internal work routine to reserve or unreserve an interrupt level.
- * cpu is the CPU to which the interrupt will be sent.
- * bit is the level bit to reserve. -1 means any level
- * resflags should include II_ERRORINT if this is an
- * error interrupt, II_THREADED if the interrupt handler
- * will be threaded, or 0 otherwise.
- * reserve should be set to II_RESERVE or II_UNRESERVE
- * to get or clear a reservation.
- * owner_dev is the device that "owns" this interrupt, if supplied
- * name is a human-readable name for this interrupt, if supplied
- * intr_reserve_level returns the bit reserved or -1 to indicate an error
- */
-static int
-do_intr_reserve_level(cpuid_t cpu, int bit, int resflags, int reserve,
- devfs_handle_t owner_dev, char *name)
-{
- intr_vecblk_t *vecblk;
- hub_intmasks_t *hub_intmasks;
- unsigned long s;
- int rv = 0;
- int ip;
- synergy_da_t *sda;
- int which_synergy;
- cnodeid_t cnode;
-
- ASSERT(bit < N_INTPEND_BITS * 2);
-
- cnode = cpuid_to_cnodeid(cpu);
- which_synergy = cpuid_to_synergy(cpu);
- sda = Synergy_da_indr[(cnode * 2) + which_synergy];
- hub_intmasks = &sda->s_intmasks;
- // hub_intmasks = &pdaindr[cpu].pda->p_intmasks;
-
- // if (pdaindr[cpu].pda == NULL) return -1;
- if ((bit < N_INTPEND_BITS) && !(resflags & II_ERRORINT)) {
- vecblk = hub_intmasks->dispatch0;
- ip = 0;
- } else {
- ASSERT((bit >= N_INTPEND_BITS) || (bit == -1));
- bit -= N_INTPEND_BITS; /* Get position relative to INT_PEND1 reg. */
- vecblk = hub_intmasks->dispatch1;
- ip = 1;
- }
-
- INTR_LOCK(vecblk);
-
- if (bit <= -1) {
- bit = 0;
- ASSERT(reserve == II_RESERVE);
- /* Choose any available level */
- for (; bit < N_INTPEND_BITS; bit++) {
- if (!(vecblk->info[bit].ii_flags & II_RESERVE)) {
- rv = bit;
- break;
- }
- }
-
- /* Return -1 if all interrupt levels int this register are taken. */
- if (bit == N_INTPEND_BITS)
- rv = -1;
-
- } else {
- /* Reserve a particular level if it's available. */
- if ((vecblk->info[bit].ii_flags & II_RESERVE) == reserve) {
- /* Can't (un)reserve a level that's already (un)reserved. */
- rv = -1;
- } else {
- rv = bit;
- }
- }
-
- /* Reserve the level and bump the count. */
- if (rv != -1) {
- if (reserve) {
- vecblk->info[bit].ii_flags |= (II_RESERVE | resflags);
- vecblk->info[bit].ii_owner_dev = owner_dev;
- /* Copy in the name. */
- if (name)
- strlcpy(vecblk->info[bit].ii_name, name,
- sizeof(vecblk->info[bit].ii_name));
- else
- vecblk->info[bit].ii_name[0] = '\0';
- vecblk->vector_count++;
- } else {
- vecblk->info[bit].ii_flags = 0; /* Clear all the flags */
- vecblk->info[bit].ii_owner_dev = 0;
- /* Clear the name. */
- vecblk->info[bit].ii_name[0] = '\0';
- vecblk->vector_count--;
- }
- }
-
- INTR_UNLOCK(vecblk);
-
-#if defined(DEBUG)
- if (rv >= 0) {
- /* Gather this device - target cpu mapping information
- * in a table which can be used later by the idbg "intrmap"
- * command
- */
- s = mutex_spinlock(&intr_dev_targ_map_lock);
- if (intr_dev_targ_map_size < MAX_DEVICES) {
- intr_dev_targ_map_t *p;
-
- p = &intr_dev_targ_map[intr_dev_targ_map_size];
- p->dev = owner_dev;
- p->cpuid = cpu;
- p->cnodeid = cpuid_to_cnodeid(cpu);
- p->bit = ip * N_INTPEND_BITS + rv;
- if (name)
- strlcpy(p->intr_name, name, sizeof(p->intr_name));
- else
- p->intr_name[0] = '\0';
- intr_dev_targ_map_size++;
- }
- mutex_spinunlock(&intr_dev_targ_map_lock,s);
- }
-#endif /* DEBUG */
-
- return (((rv == -1) ? rv : (ip * N_INTPEND_BITS) + rv)) ;
-}
-
-
-/*
- * WARNING: This routine should only be called from within ml/SN.
- * Reserve an interrupt level.
- */
-int
-intr_reserve_level(cpuid_t cpu, int bit, int resflags, devfs_handle_t owner_dev, char *name)
-{
- return(do_intr_reserve_level(cpu, bit, resflags, II_RESERVE, owner_dev, name));
-}
-
-
-/*
- * WARNING: This routine should only be called from within ml/SN.
- * Unreserve an interrupt level.
- */
-void
-intr_unreserve_level(cpuid_t cpu, int bit)
-{
- (void)do_intr_reserve_level(cpu, bit, 0, II_UNRESERVE, 0, NULL);
-}
-
-/*
- * Get values that vary depending on which CPU and bit we're operating on
- */
-static hub_intmasks_t *
-intr_get_ptrs(cpuid_t cpu, int bit,
- int *new_bit, /* Bit relative to the register */
- hubreg_t **intpend_masks, /* Masks for this register */
- intr_vecblk_t **vecblk, /* Vecblock for this interrupt */
- int *ip) /* Which intpend register */
-{
- hub_intmasks_t *hub_intmasks;
- synergy_da_t *sda;
- int which_synergy;
- cnodeid_t cnode;
-
- ASSERT(bit < N_INTPEND_BITS * 2);
-
- cnode = cpuid_to_cnodeid(cpu);
- which_synergy = cpuid_to_synergy(cpu);
- sda = Synergy_da_indr[(cnode * 2) + which_synergy];
- hub_intmasks = &sda->s_intmasks;
-
- // hub_intmasks = &pdaindr[cpu].pda->p_intmasks;
-
- if (bit < N_INTPEND_BITS) {
- *intpend_masks = hub_intmasks->intpend0_masks;
- *vecblk = hub_intmasks->dispatch0;
- *ip = 0;
- *new_bit = bit;
- } else {
- *intpend_masks = hub_intmasks->intpend1_masks;
- *vecblk = hub_intmasks->dispatch1;
- *ip = 1;
- *new_bit = bit - N_INTPEND_BITS;
- }
-
- return hub_intmasks;
-}
-
-
-/*
- * intr_connect_level(cpuid_t cpu, int bit, ilvl_t intr_swlevel,
- * intr_func_t intr_func, void *intr_arg);
- * This is the lowest-level interface to the interrupt code. It shouldn't
- * be called from outside the ml/SN directory.
- * intr_connect_level hooks up an interrupt to a particular bit in
- * the INT_PEND0/1 masks. Returns 0 on success.
- * cpu is the CPU to which the interrupt will be sent.
- * bit is the level bit to connect to
- * intr_swlevel tells which software level to use
- * intr_func is the interrupt handler
- * intr_arg is an arbitrary argument interpreted by the handler
- * intr_prefunc is a prologue function, to be called
- * with interrupts disabled, to disable
- * the interrupt at source. It is called
- * with the same argument. Should be NULL for
- * typical interrupts, which can be masked
- * by the infrastructure at the level bit.
- * intr_connect_level returns 0 on success or nonzero on an error
- */
-/* ARGSUSED */
-int
-intr_connect_level(cpuid_t cpu, int bit, ilvl_t intr_swlevel, intr_func_t intr_prefunc)
-{
- intr_vecblk_t *vecblk;
- hubreg_t *intpend_masks;
- int rv = 0;
- int ip;
- unsigned long s;
-
- ASSERT(bit < N_INTPEND_BITS * 2);
-
- (void)intr_get_ptrs(cpu, bit, &bit, &intpend_masks,
- &vecblk, &ip);
-
- INTR_LOCK(vecblk);
-
- if ((vecblk->info[bit].ii_flags & II_INUSE) ||
- (!(vecblk->info[bit].ii_flags & II_RESERVE))) {
- /* Can't assign to a level that's in use or isn't reserved. */
- rv = -1;
- } else {
- /* Stuff parameters into vector and info */
- vecblk->vectors[bit].iv_prefunc = intr_prefunc;
- vecblk->info[bit].ii_flags |= II_INUSE;
- }
-
- /* Now stuff the masks if everything's okay. */
- if (!rv) {
- int lslice;
- volatile hubreg_t *mask_reg;
- // nasid_t nasid = COMPACT_TO_NASID_NODEID(cpuid_to_cnodeid(cpu));
- nasid_t nasid = cpuid_to_nasid(cpu);
- int subnode = cpuid_to_subnode(cpu);
-
- /* Make sure it's not already pending when we connect it. */
- REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit + ip * N_INTPEND_BITS);
-
- if (bit >= GFX_INTR_A && bit <= CC_PEND_B) {
- intpend_masks[0] |= (1ULL << (uint64_t)bit);
- }
-
- lslice = cpuid_to_localslice(cpu);
- vecblk->cpu_count[lslice]++;
-#if SN1
- /*
- * On SN1, there are 8 interrupt mask registers per node:
- * PI_0 MASK_0 A
- * PI_0 MASK_1 A
- * PI_0 MASK_0 B
- * PI_0 MASK_1 B
- * PI_1 MASK_0 A
- * PI_1 MASK_1 A
- * PI_1 MASK_0 B
- * PI_1 MASK_1 B
- */
-#endif
- if (ip == 0) {
- mask_reg = REMOTE_HUB_PI_ADDR(nasid, subnode,
- PI_INT_MASK0_A + PI_INT_MASK_OFFSET * lslice);
- } else {
- mask_reg = REMOTE_HUB_PI_ADDR(nasid, subnode,
- PI_INT_MASK1_A + PI_INT_MASK_OFFSET * lslice);
- }
-
- HUB_S(mask_reg, intpend_masks[0]);
- }
-
- INTR_UNLOCK(vecblk);
-
- return rv;
-}
-
-
-/*
- * intr_disconnect_level(cpuid_t cpu, int bit)
- *
- * This is the lowest-level interface to the interrupt code. It should
- * not be called from outside the ml/SN directory.
- * intr_disconnect_level removes a particular bit from an interrupt in
- * the INT_PEND0/1 masks. Returns 0 on success or nonzero on failure.
- */
-int
-intr_disconnect_level(cpuid_t cpu, int bit)
-{
- intr_vecblk_t *vecblk;
- hubreg_t *intpend_masks;
- unsigned long s;
- int rv = 0;
- int ip;
-
- (void)intr_get_ptrs(cpu, bit, &bit, &intpend_masks,
- &vecblk, &ip);
-
- INTR_LOCK(vecblk);
-
- if ((vecblk->info[bit].ii_flags & (II_RESERVE | II_INUSE)) !=
- ((II_RESERVE | II_INUSE))) {
- /* Can't remove a level that's not in use or isn't reserved. */
- rv = -1;
- } else {
- /* Stuff parameters into vector and info */
- vecblk->vectors[bit].iv_func = (intr_func_t)NULL;
- vecblk->vectors[bit].iv_prefunc = (intr_func_t)NULL;
- vecblk->vectors[bit].iv_arg = 0;
- vecblk->info[bit].ii_flags &= ~II_INUSE;
-#ifdef BASE_ITHRTEAD
- vecblk->vectors[bit].iv_mustruncpu = -1; /* No mustrun CPU any more. */
-#endif
- }
-
- /* Now clear the masks if everything's okay. */
- if (!rv) {
- int lslice;
- volatile hubreg_t *mask_reg;
-
- intpend_masks[0] &= ~(1ULL << (uint64_t)bit);
- lslice = cpuid_to_localslice(cpu);
- vecblk->cpu_count[lslice]--;
- mask_reg = REMOTE_HUB_PI_ADDR(COMPACT_TO_NASID_NODEID(cpuid_to_cnodeid(cpu)),
- cpuid_to_subnode(cpu),
- ip == 0 ? PI_INT_MASK0_A : PI_INT_MASK1_A);
- mask_reg = (volatile hubreg_t *)((__psunsigned_t)mask_reg +
- (PI_INT_MASK_OFFSET * lslice));
- *mask_reg = intpend_masks[0];
- }
-
- INTR_UNLOCK(vecblk);
-
- return rv;
-}
-
-/*
- * Actually block or unblock an interrupt
- */
-void
-do_intr_block_bit(cpuid_t cpu, int bit, int block)
-{
- intr_vecblk_t *vecblk;
- int ip;
- unsigned long s;
- hubreg_t *intpend_masks;
- volatile hubreg_t mask_value;
- volatile hubreg_t *mask_reg;
-
- intr_get_ptrs(cpu, bit, &bit, &intpend_masks, &vecblk, &ip);
-
- INTR_LOCK(vecblk);
-
- if (block)
- /* Block */
- intpend_masks[0] &= ~(1ULL << (uint64_t)bit);
- else
- /* Unblock */
- intpend_masks[0] |= (1ULL << (uint64_t)bit);
-
- if (ip == 0) {
- mask_reg = REMOTE_HUB_PI_ADDR(COMPACT_TO_NASID_NODEID(cpuid_to_cnodeid(cpu)),
- cpuid_to_subnode(cpu), PI_INT_MASK0_A);
- } else {
- mask_reg = REMOTE_HUB_PI_ADDR(COMPACT_TO_NASID_NODEID(cpuid_to_cnodeid(cpu)),
- cpuid_to_subnode(cpu), PI_INT_MASK1_A);
- }
-
- HUB_S(mask_reg, intpend_masks[0]);
-
- /*
- * Wait for it to take effect. (One read should suffice.)
- * This is only necessary when blocking an interrupt
- */
- if (block)
- while ((mask_value = HUB_L(mask_reg)) != intpend_masks[0])
- ;
-
- INTR_UNLOCK(vecblk);
-}
-
-
-/*
- * Block a particular interrupt (cpu/bit pair).
- */
-/* ARGSUSED */
-void
-intr_block_bit(cpuid_t cpu, int bit)
-{
- do_intr_block_bit(cpu, bit, 1);
-}
-
-
-/*
- * Unblock a particular interrupt (cpu/bit pair).
- */
-/* ARGSUSED */
-void
-intr_unblock_bit(cpuid_t cpu, int bit)
-{
- do_intr_block_bit(cpu, bit, 0);
-}
-
-
-/* verifies that the specified CPUID is on the specified SUBNODE (if any) */
-#define cpu_on_subnode(cpuid, which_subnode) \
- (((which_subnode) == SUBNODE_ANY) || (cpuid_to_subnode(cpuid) == (which_subnode)))
-
-
-/*
- * Choose one of the CPUs on a specified node or subnode to receive
- * interrupts. Don't pick a cpu which has been specified as a NOINTR cpu.
- *
- * Among all acceptable CPUs, the CPU that has the fewest total number
- * of interrupts targetted towards it is chosen. Note that we never
- * consider how frequent each of these interrupts might occur, so a rare
- * hardware error interrupt is weighted equally with a disk interrupt.
- */
-static cpuid_t
-do_intr_cpu_choose(cnodeid_t cnode, int which_subnode)
-{
- cpuid_t cpu, best_cpu = CPU_NONE;
- int slice, min_count=1000;
-
- min_count = 1000;
- for (slice=0; slice < CPUS_PER_NODE; slice++) {
- intr_vecblk_t *vecblk0, *vecblk1;
- int total_intrs_to_slice;
- subnode_pda_t *snpda;
- int local_cpu_num;
-
- cpu = cnode_slice_to_cpuid(cnode, slice);
- if (cpu == CPU_NONE)
- continue;
-
- /* If this cpu isn't enabled for interrupts, skip it */
- if (!cpu_enabled(cpu) || !cpu_allows_intr(cpu))
- continue;
-
- /* If this isn't the right subnode, skip it */
- if (!cpu_on_subnode(cpu, which_subnode))
- continue;
-
- /* OK, this one's a potential CPU for interrupts */
- snpda = SUBNODEPDA(cnode,SUBNODE(slice));
- vecblk0 = &snpda->intr_dispatch0;
- vecblk1 = &snpda->intr_dispatch1;
- local_cpu_num = LOCALCPU(slice);
- total_intrs_to_slice = vecblk0->cpu_count[local_cpu_num] +
- vecblk1->cpu_count[local_cpu_num];
-
- if (min_count > total_intrs_to_slice) {
- min_count = total_intrs_to_slice;
- best_cpu = cpu;
- }
- }
- return best_cpu;
-}
-
-/*
- * Choose an appropriate interrupt target CPU on a specified node.
- * If which_subnode is SUBNODE_ANY, then subnode is not considered.
- * Otherwise, the chosen CPU must be on the specified subnode.
- */
-static cpuid_t
-intr_cpu_choose_from_node(cnodeid_t cnode, int which_subnode)
-{
- return(do_intr_cpu_choose(cnode, which_subnode));
-}
-
-
-/* Make it easy to identify subnode vertices in the hwgraph */
-void
-mark_subnodevertex_as_subnode(devfs_handle_t vhdl, int which_subnode)
-{
- graph_error_t rv;
-
- ASSERT(0 <= which_subnode);
- ASSERT(which_subnode < NUM_SUBNODES);
-
- rv = hwgraph_info_add_LBL(vhdl, INFO_LBL_CPUBUS, (arbitrary_info_t)which_subnode);
- ASSERT_ALWAYS(rv == GRAPH_SUCCESS);
-
- rv = hwgraph_info_export_LBL(vhdl, INFO_LBL_CPUBUS, sizeof(arbitrary_info_t));
- ASSERT_ALWAYS(rv == GRAPH_SUCCESS);
-}
-
-
-/*
- * Given a device descriptor, extract interrupt target information and
- * choose an appropriate CPU. Return CPU_NONE if we can't make sense
- * out of the target information.
- * TBD: Should this be considered platform-independent code?
- */
-
-
-/*
- * intr_bit_reserve_test(cpuid,which_subnode,cnode,req_bit,intr_resflags,
- * owner_dev,intr_name,*resp_bit)
- * Either cpuid is not CPU_NONE or cnodeid not CNODE_NONE but
- * not both.
- * 1. If cpuid is specified, this routine tests if this cpu can be a valid
- * interrupt target candidate.
- * 2. If cnodeid is specified, this routine tests if there is a cpu on
- * this node which can be a valid interrupt target candidate.
- * 3. If a valid interrupt target cpu candidate is found then an attempt at
- * reserving an interrupt bit on the corresponding cnode is made.
- *
- * If steps 1 & 2 both fail or step 3 fails then we are not able to get a valid
- * interrupt target cpu then routine returns CPU_NONE (failure)
- * Otherwise routine returns cpuid of interrupt target (success)
- */
-static cpuid_t
-intr_bit_reserve_test(cpuid_t cpuid,
- int favor_subnode,
- cnodeid_t cnodeid,
- int req_bit,
- int intr_resflags,
- devfs_handle_t owner_dev,
- char *intr_name,
- int *resp_bit)
-{
-
- ASSERT((cpuid==CPU_NONE) || (cnodeid==CNODEID_NONE));
-
- if (cnodeid != CNODEID_NONE) {
- /* Try to choose a interrupt cpu candidate */
- cpuid = intr_cpu_choose_from_node(cnodeid, favor_subnode);
- }
-
- if (cpuid != CPU_NONE) {
- /* Try to reserve an interrupt bit on the hub
- * corresponding to the canidate cnode. If we
- * are successful then we got a cpu which can
- * act as an interrupt target for the io device.
- * Otherwise we need to continue the search
- * further.
- */
- *resp_bit = do_intr_reserve_level(cpuid,
- req_bit,
- intr_resflags,
- II_RESERVE,
- owner_dev,
- intr_name);
-
- if (*resp_bit >= 0)
- /* The interrupt target specified was fine */
- return(cpuid);
- }
- return(CPU_NONE);
-}
-/*
- * intr_heuristic(dev_t dev,device_desc_t dev_desc,
- * int req_bit,int intr_resflags,dev_t owner_dev,
- * char *intr_name,int *resp_bit)
- *
- * Choose an interrupt destination for an interrupt.
- * dev is the device for which the interrupt is being set up
- * dev_desc is a description of hardware and policy that could
- * help determine where this interrupt should go
- * req_bit is the interrupt bit requested
- * (can be INTRCONNECT_ANY_BIT in which the first available
- * interrupt bit is used)
- * intr_resflags indicates whether we want to (un)reserve bit
- * owner_dev is the owner device
- * intr_name is the readable interrupt name
- * resp_bit indicates whether we succeeded in getting the required
- * action { (un)reservation} done
- * negative value indicates failure
- *
- */
-/* ARGSUSED */
-cpuid_t
-intr_heuristic(devfs_handle_t dev,
- device_desc_t dev_desc,
- int req_bit,
- int intr_resflags,
- devfs_handle_t owner_dev,
- char *intr_name,
- int *resp_bit)
-{
- cpuid_t cpuid; /* possible intr targ*/
- cnodeid_t candidate; /* possible canidate */
- int which_subnode = SUBNODE_ANY;
-
-/* SN1 + pcibr Addressing Limitation */
- {
- devfs_handle_t pconn_vhdl;
- pcibr_soft_t pcibr_soft;
-
- /*
- * This combination of SN1 and Bridge hardware has an odd "limitation".
- * Due to the choice of addresses for PI0 and PI1 registers on SN1
- * and historical limitations in Bridge, Bridge is unable to
- * send interrupts to both PI0 CPUs and PI1 CPUs -- we have
- * to choose one set or the other. That choice is implicitly
- * made when Bridge first attaches its error interrupt. After
- * that point, all subsequent interrupts are restricted to the
- * same PI number (though it's possible to send interrupts to
- * the same PI number on a different node).
- *
- * Since neither SN1 nor Bridge designers are willing to admit a
- * bug, we can't really call this a "workaround". It's a permanent
- * solution for an SN1-specific and Bridge-specific hardware
- * limitation that won't ever be lifted.
- */
- if ((hwgraph_edge_get(dev, EDGE_LBL_PCI, &pconn_vhdl) == GRAPH_SUCCESS) &&
- ((pcibr_soft = pcibr_soft_get(pconn_vhdl)) != NULL)) {
- /*
- * We "know" that the error interrupt is the first
- * interrupt set up by pcibr_attach. Send all interrupts
- * on this bridge to the same subnode number.
- */
- if (pcibr_soft->bsi_err_intr) {
- which_subnode = cpuid_to_subnode(((hub_intr_t) pcibr_soft->bsi_err_intr)->i_cpuid);
- }
- }
- }
-
- /* Check if we can find a valid interrupt target candidate on
- * the master node for the device.
- */
- cpuid = intr_bit_reserve_test(CPU_NONE,
- which_subnode,
- master_node_get(dev),
- req_bit,
- intr_resflags,
- owner_dev,
- intr_name,
- resp_bit);
-
- if (cpuid != CPU_NONE) {
- if (cpu_on_subnode(cpuid, which_subnode))
- return(cpuid); /* got a valid interrupt target */
- else
- intr_unreserve_level(cpuid, *resp_bit);
- }
-
- printk(KERN_WARNING "Cannot target interrupts to closest node(%d): (0x%lx)\n",
- master_node_get(dev),(unsigned long)owner_dev);
-
- /* Fall through into the default algorithm
- * (exhaustive-search-for-the-nearest-possible-interrupt-target)
- * for finding the interrupt target
- */
-
- {
- /*
- * Do a stupid round-robin assignment of the node.
- * (Should do a "nearest neighbor" but not for SN1.
- */
- static cnodeid_t last_node = -1;
-
- if (last_node >= numnodes) last_node = 0;
- for (candidate = last_node + 1; candidate != last_node; candidate++) {
- if (candidate == numnodes) candidate = 0;
- cpuid = intr_bit_reserve_test(CPU_NONE,
- which_subnode,
- candidate,
- req_bit,
- intr_resflags,
- owner_dev,
- intr_name,
- resp_bit);
-
- if (cpuid != CPU_NONE) {
- if (cpu_on_subnode(cpuid, which_subnode)) {
- last_node = candidate;
- return(cpuid); /* got a valid interrupt target */
- }
- else
- intr_unreserve_level(cpuid, *resp_bit);
- }
- }
- last_node = candidate;
- }
-
- printk(KERN_WARNING "Cannot target interrupts to any close node: %ld (0x%lx)\n",
- (long)owner_dev, (unsigned long)owner_dev);
-
- /* In the worst case try to allocate interrupt bits on the
- * master processor's node. We may get here during error interrupt
- * allocation phase when the topology matrix is not yet setup
- * and hence cannot do an exhaustive search.
- */
- ASSERT(cpu_allows_intr(master_procid));
- cpuid = intr_bit_reserve_test(master_procid,
- which_subnode,
- CNODEID_NONE,
- req_bit,
- intr_resflags,
- owner_dev,
- intr_name,
- resp_bit);
-
- if (cpuid != CPU_NONE) {
- if (cpu_on_subnode(cpuid, which_subnode))
- return(cpuid);
- else
- intr_unreserve_level(cpuid, *resp_bit);
- }
-
- printk(KERN_WARNING "Cannot target interrupts: (0x%lx)\n",
- (unsigned long)owner_dev);
-
- return(CPU_NONE); /* Should never get here */
-}
-
-struct hardwired_intr_s {
- signed char level;
- int flags;
- char *name;
-} const hardwired_intr[] = {
- { INT_PEND0_BASELVL + RESERVED_INTR, 0, "Reserved" },
- { INT_PEND0_BASELVL + GFX_INTR_A, 0, "Gfx A" },
- { INT_PEND0_BASELVL + GFX_INTR_B, 0, "Gfx B" },
- { INT_PEND0_BASELVL + PG_MIG_INTR, II_THREADED, "Migration" },
- { INT_PEND0_BASELVL + UART_INTR, II_THREADED, "Bedrock/L1" },
- { INT_PEND0_BASELVL + CC_PEND_A, 0, "Crosscall A" },
- { INT_PEND0_BASELVL + CC_PEND_B, 0, "Crosscall B" },
- { INT_PEND1_BASELVL + CLK_ERR_INTR, II_ERRORINT, "Clock Error" },
- { INT_PEND1_BASELVL + COR_ERR_INTR_A, II_ERRORINT, "Correctable Error A" },
- { INT_PEND1_BASELVL + COR_ERR_INTR_B, II_ERRORINT, "Correctable Error B" },
- { INT_PEND1_BASELVL + MD_COR_ERR_INTR, II_ERRORINT, "MD Correct. Error" },
- { INT_PEND1_BASELVL + NI_ERROR_INTR, II_ERRORINT, "NI Error" },
- { INT_PEND1_BASELVL + NI_BRDCAST_ERR_A, II_ERRORINT, "Remote NI Error"},
- { INT_PEND1_BASELVL + NI_BRDCAST_ERR_B, II_ERRORINT, "Remote NI Error"},
- { INT_PEND1_BASELVL + MSC_PANIC_INTR, II_ERRORINT, "MSC Panic" },
- { INT_PEND1_BASELVL + LLP_PFAIL_INTR_A, II_ERRORINT, "LLP Pfail WAR" },
- { INT_PEND1_BASELVL + LLP_PFAIL_INTR_B, II_ERRORINT, "LLP Pfail WAR" },
- { INT_PEND1_BASELVL + NACK_INT_A, 0, "CPU A Nack count == NACK_CMP" },
- { INT_PEND1_BASELVL + NACK_INT_B, 0, "CPU B Nack count == NACK_CMP" },
- { INT_PEND1_BASELVL + LB_ERROR, 0, "Local Block Error" },
- { INT_PEND1_BASELVL + XB_ERROR, 0, "Local XBar Error" },
- { -1, 0, (char *)NULL},
-};
-
-/*
- * Reserve all of the hardwired interrupt levels so they're not used as
- * general purpose bits later.
- */
-void
-intr_reserve_hardwired(cnodeid_t cnode)
-{
- cpuid_t cpu;
- int level;
- int i;
- char subnode_done[NUM_SUBNODES];
-
- // cpu = cnodetocpu(cnode);
- for (cpu = 0; cpu < smp_num_cpus; cpu++) {
- if (cpuid_to_cnodeid(cpu) == cnode) {
- break;
- }
- }
- if (cpu == smp_num_cpus) cpu = CPU_NONE;
- if (cpu == CPU_NONE) {
- printk("Node %d has no CPUs", cnode);
- return;
- }
-
- for (i=0; i<NUM_SUBNODES; i++)
- subnode_done[i] = 0;
-
- for (; cpu<smp_num_cpus && cpu_enabled(cpu) && cpuid_to_cnodeid(cpu) == cnode; cpu++) {
- int which_subnode = cpuid_to_subnode(cpu);
- if (subnode_done[which_subnode])
- continue;
- subnode_done[which_subnode] = 1;
-
- for (i = 0; hardwired_intr[i].level != -1; i++) {
- level = hardwired_intr[i].level;
-
- if (level != intr_reserve_level(cpu, level,
- hardwired_intr[i].flags,
- (devfs_handle_t) NULL,
- hardwired_intr[i].name))
- panic("intr_reserve_hardwired: Can't reserve level %d, cpu %ld.", level, cpu);
- }
- }
-}
-
-
-/*
- * Check and clear interrupts.
- */
-/*ARGSUSED*/
-static void
-intr_clear_bits(nasid_t nasid, volatile hubreg_t *pend, int base_level,
- char *name)
-{
- volatile hubreg_t bits;
- int i;
-
- /* Check pending interrupts */
- if ((bits = HUB_L(pend)) != 0) {
- for (i = 0; i < N_INTPEND_BITS; i++) {
- if (bits & (1 << i)) {
-#ifdef INTRDEBUG
- printk(KERN_WARNING "Nasid %d interrupt bit %d set in %s",
- nasid, i, name);
-#endif
- LOCAL_HUB_CLR_INTR(base_level + i);
- }
- }
- }
-}
-
-/*
- * Clear out our interrupt registers.
- */
-void
-intr_clear_all(nasid_t nasid)
-{
- int sn;
-
- for(sn=0; sn<NUM_SUBNODES; sn++) {
- REMOTE_HUB_PI_S(nasid, sn, PI_INT_MASK0_A, 0);
- REMOTE_HUB_PI_S(nasid, sn, PI_INT_MASK0_B, 0);
- REMOTE_HUB_PI_S(nasid, sn, PI_INT_MASK1_A, 0);
- REMOTE_HUB_PI_S(nasid, sn, PI_INT_MASK1_B, 0);
-
- intr_clear_bits(nasid, REMOTE_HUB_PI_ADDR(nasid, sn, PI_INT_PEND0),
- INT_PEND0_BASELVL, "INT_PEND0");
- intr_clear_bits(nasid, REMOTE_HUB_PI_ADDR(nasid, sn, PI_INT_PEND1),
- INT_PEND1_BASELVL, "INT_PEND1");
- }
-}
-
-/*
- * Dump information about a particular interrupt vector.
- */
-static void
-dump_vector(intr_info_t *info, intr_vector_t *vector, int bit, hubreg_t ip,
- hubreg_t ima, hubreg_t imb, void (*pf)(char *, ...))
-{
- hubreg_t value = 1LL << bit;
-
- pf(" Bit %02d: %s: func 0x%x arg 0x%x prefunc 0x%x\n",
- bit, info->ii_name,
- vector->iv_func, vector->iv_arg, vector->iv_prefunc);
- pf(" vertex 0x%x %s%s",
- info->ii_owner_dev,
- ((info->ii_flags) & II_RESERVE) ? "R" : "U",
- ((info->ii_flags) & II_INUSE) ? "C" : "-");
- pf("%s%s%s%s",
- ip & value ? "P" : "-",
- ima & value ? "A" : "-",
- imb & value ? "B" : "-",
- ((info->ii_flags) & II_ERRORINT) ? "E" : "-");
- pf("\n");
-}
-
-
-/*
- * Dump information about interrupt vector assignment.
- */
-void
-intr_dumpvec(cnodeid_t cnode, void (*pf)(char *, ...))
-{
- nodepda_t *npda;
- int ip, sn, bit;
- intr_vecblk_t *dispatch;
- hubreg_t ipr, ima, imb;
- nasid_t nasid;
-
- if ((cnode < 0) || (cnode >= numnodes)) {
- pf("intr_dumpvec: cnodeid out of range: %d\n", cnode);
- return ;
- }
-
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- if (nasid == INVALID_NASID) {
- pf("intr_dumpvec: Bad cnodeid: %d\n", cnode);
- return ;
- }
-
-
- npda = NODEPDA(cnode);
-
- for (sn = 0; sn < NUM_SUBNODES; sn++) {
- for (ip = 0; ip < 2; ip++) {
- dispatch = ip ? &(SNPDA(npda,sn)->intr_dispatch1) : &(SNPDA(npda,sn)->intr_dispatch0);
- ipr = REMOTE_HUB_PI_L(nasid, sn, ip ? PI_INT_PEND1 : PI_INT_PEND0);
- ima = REMOTE_HUB_PI_L(nasid, sn, ip ? PI_INT_MASK1_A : PI_INT_MASK0_A);
- imb = REMOTE_HUB_PI_L(nasid, sn, ip ? PI_INT_MASK1_B : PI_INT_MASK0_B);
-
- pf("Node %d INT_PEND%d:\n", cnode, ip);
-
- if (dispatch->ithreads_enabled)
- pf(" Ithreads enabled\n");
- else
- pf(" Ithreads disabled\n");
- pf(" vector_count = %d, vector_state = %d\n",
- dispatch->vector_count,
- dispatch->vector_state);
- pf(" CPU A count %d, CPU B count %d\n",
- dispatch->cpu_count[0],
- dispatch->cpu_count[1]);
- pf(" &vector_lock = 0x%x\n",
- &(dispatch->vector_lock));
- for (bit = 0; bit < N_INTPEND_BITS; bit++) {
- if ((dispatch->info[bit].ii_flags & II_RESERVE) ||
- (ipr & (1L << bit))) {
- dump_vector(&(dispatch->info[bit]),
- &(dispatch->vectors[bit]),
- bit, ipr, ima, imb, pf);
- }
- }
- pf("\n");
- }
- }
-}
-
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-int NeedXbridgeSwap = 0;
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_private.h>
-
-#ifdef __ia64
-#define rmallocmap atemapalloc
-#define rmfreemap atemapfree
-#define rmfree atefree
-#define rmalloc atealloc
-#endif
-
-extern boolean_t is_sys_critical_vertex(devfs_handle_t);
-
-#undef PCIBR_ATE_DEBUG
-
-#if 0
-#define DEBUG 1 /* To avoid lots of bad printk() formats leave off */
-#endif
-#define PCI_DEBUG 1
-#define ATTACH_DEBUG 1
-#define PCIBR_SOFT_LIST 1
-
-#ifndef LOCAL
-#define LOCAL static
-#endif
-
-/*
- * Macros related to the Lucent USS 302/312 usb timeout workaround. It
- * appears that if the lucent part can get into a retry loop if it sees a
- * DAC on the bus during a pio read retry. The loop is broken after about
- * 1ms, so we need to set up bridges holding this part to allow at least
- * 1ms for pio.
- */
-
-#define USS302_TIMEOUT_WAR
-
-#ifdef USS302_TIMEOUT_WAR
-#define LUCENT_USBHC_VENDOR_ID_NUM 0x11c1
-#define LUCENT_USBHC302_DEVICE_ID_NUM 0x5801
-#define LUCENT_USBHC312_DEVICE_ID_NUM 0x5802
-#define USS302_BRIDGE_TIMEOUT_HLD 4
-#endif
-
-#define PCIBR_LLP_CONTROL_WAR
-#if defined (PCIBR_LLP_CONTROL_WAR)
-int pcibr_llp_control_war_cnt;
-#endif /* PCIBR_LLP_CONTROL_WAR */
-
-int pcibr_devflag = D_MP;
-
-#ifdef LATER
-#define F(s,n) { 1l<<(s),-(s), n }
-
-struct reg_desc bridge_int_status_desc[] =
-{
- F(31, "MULTI_ERR"),
- F(30, "PMU_ESIZE_EFAULT"),
- F(29, "UNEXPECTED_RESP"),
- F(28, "BAD_XRESP_PACKET"),
- F(27, "BAD_XREQ_PACKET"),
- F(26, "RESP_XTALK_ERROR"),
- F(25, "REQ_XTALK_ERROR"),
- F(24, "INVALID_ADDRESS"),
- F(23, "UNSUPPORTED_XOP"),
- F(22, "XREQ_FIFO_OFLOW"),
- F(21, "LLP_REC_SNERROR"),
- F(20, "LLP_REC_CBERROR"),
- F(19, "LLP_RCTY"),
- F(18, "LLP_TX_RETRY"),
- F(17, "LLP_TCTY"),
- F(16, "SSRAM_PERR"),
- F(15, "PCI_ABORT"),
- F(14, "PCI_PARITY"),
- F(13, "PCI_SERR"),
- F(12, "PCI_PERR"),
- F(11, "PCI_MASTER_TOUT"),
- F(10, "PCI_RETRY_CNT"),
- F(9, "XREAD_REQ_TOUT"),
- F(8, "GIO_BENABLE_ERR"),
- F(7, "INT7"),
- F(6, "INT6"),
- F(5, "INT5"),
- F(4, "INT4"),
- F(3, "INT3"),
- F(2, "INT2"),
- F(1, "INT1"),
- F(0, "INT0"),
- {0}
-};
-
-struct reg_values space_v[] =
-{
- {PCIIO_SPACE_NONE, "none"},
- {PCIIO_SPACE_ROM, "ROM"},
- {PCIIO_SPACE_IO, "I/O"},
- {PCIIO_SPACE_MEM, "MEM"},
- {PCIIO_SPACE_MEM32, "MEM(32)"},
- {PCIIO_SPACE_MEM64, "MEM(64)"},
- {PCIIO_SPACE_CFG, "CFG"},
- {PCIIO_SPACE_WIN(0), "WIN(0)"},
- {PCIIO_SPACE_WIN(1), "WIN(1)"},
- {PCIIO_SPACE_WIN(2), "WIN(2)"},
- {PCIIO_SPACE_WIN(3), "WIN(3)"},
- {PCIIO_SPACE_WIN(4), "WIN(4)"},
- {PCIIO_SPACE_WIN(5), "WIN(5)"},
- {PCIIO_SPACE_BAD, "BAD"},
- {0}
-};
-
-struct reg_desc space_desc[] =
-{
- {0xFF, 0, "space", 0, space_v},
- {0}
-};
-
-#if DEBUG
-#define device_desc device_bits
-LOCAL struct reg_desc device_bits[] =
-{
- {BRIDGE_DEV_ERR_LOCK_EN, 0, "ERR_LOCK_EN"},
- {BRIDGE_DEV_PAGE_CHK_DIS, 0, "PAGE_CHK_DIS"},
- {BRIDGE_DEV_FORCE_PCI_PAR, 0, "FORCE_PCI_PAR"},
- {BRIDGE_DEV_VIRTUAL_EN, 0, "VIRTUAL_EN"},
- {BRIDGE_DEV_PMU_WRGA_EN, 0, "PMU_WRGA_EN"},
- {BRIDGE_DEV_DIR_WRGA_EN, 0, "DIR_WRGA_EN"},
- {BRIDGE_DEV_DEV_SIZE, 0, "DEV_SIZE"},
- {BRIDGE_DEV_RT, 0, "RT"},
- {BRIDGE_DEV_SWAP_PMU, 0, "SWAP_PMU"},
- {BRIDGE_DEV_SWAP_DIR, 0, "SWAP_DIR"},
- {BRIDGE_DEV_PREF, 0, "PREF"},
- {BRIDGE_DEV_PRECISE, 0, "PRECISE"},
- {BRIDGE_DEV_COH, 0, "COH"},
- {BRIDGE_DEV_BARRIER, 0, "BARRIER"},
- {BRIDGE_DEV_GBR, 0, "GBR"},
- {BRIDGE_DEV_DEV_SWAP, 0, "DEV_SWAP"},
- {BRIDGE_DEV_DEV_IO_MEM, 0, "DEV_IO_MEM"},
- {BRIDGE_DEV_OFF_MASK, BRIDGE_DEV_OFF_ADDR_SHFT, "DEV_OFF", "%x"},
- {0}
-};
-#endif /* DEBUG */
-
-#ifdef SUPPORT_PRINTING_R_FORMAT
-LOCAL struct reg_values xio_cmd_pactyp[] =
-{
- {0x0, "RdReq"},
- {0x1, "RdResp"},
- {0x2, "WrReqWithResp"},
- {0x3, "WrResp"},
- {0x4, "WrReqNoResp"},
- {0x5, "Reserved(5)"},
- {0x6, "FetchAndOp"},
- {0x7, "Reserved(7)"},
- {0x8, "StoreAndOp"},
- {0x9, "Reserved(9)"},
- {0xa, "Reserved(a)"},
- {0xb, "Reserved(b)"},
- {0xc, "Reserved(c)"},
- {0xd, "Reserved(d)"},
- {0xe, "SpecialReq"},
- {0xf, "SpecialResp"},
- {0}
-};
-
-LOCAL struct reg_desc xio_cmd_bits[] =
-{
- {WIDGET_DIDN, -28, "DIDN", "%x"},
- {WIDGET_SIDN, -24, "SIDN", "%x"},
- {WIDGET_PACTYP, -20, "PACTYP", 0, xio_cmd_pactyp},
- {WIDGET_TNUM, -15, "TNUM", "%x"},
- {WIDGET_COHERENT, 0, "COHERENT"},
- {WIDGET_DS, 0, "DS"},
- {WIDGET_GBR, 0, "GBR"},
- {WIDGET_VBPM, 0, "VBPM"},
- {WIDGET_ERROR, 0, "ERROR"},
- {WIDGET_BARRIER, 0, "BARRIER"},
- {0}
-};
-#endif /* SUPPORT_PRINTING_R_FORMAT */
-
-#if PCIBR_FREEZE_TIME || PCIBR_ATE_DEBUG
-LOCAL struct reg_desc ate_bits[] =
-{
- {0xFFFF000000000000ull, -48, "RMF", "%x"},
- {~(IOPGSIZE - 1) & /* may trim off some low bits */
- 0x0000FFFFFFFFF000ull, 0, "XIO", "%x"},
- {0x0000000000000F00ull, -8, "port", "%x"},
- {0x0000000000000010ull, 0, "Barrier"},
- {0x0000000000000008ull, 0, "Prefetch"},
- {0x0000000000000004ull, 0, "Precise"},
- {0x0000000000000002ull, 0, "Coherent"},
- {0x0000000000000001ull, 0, "Valid"},
- {0}
-};
-#endif
-
-#if PCIBR_ATE_DEBUG
-LOCAL struct reg_values ssram_sizes[] =
-{
- {BRIDGE_CTRL_SSRAM_512K, "512k"},
- {BRIDGE_CTRL_SSRAM_128K, "128k"},
- {BRIDGE_CTRL_SSRAM_64K, "64k"},
- {BRIDGE_CTRL_SSRAM_1K, "1k"},
- {0}
-};
-
-LOCAL struct reg_desc control_bits[] =
-{
- {BRIDGE_CTRL_FLASH_WR_EN, 0, "FLASH_WR_EN"},
- {BRIDGE_CTRL_EN_CLK50, 0, "EN_CLK50"},
- {BRIDGE_CTRL_EN_CLK40, 0, "EN_CLK40"},
- {BRIDGE_CTRL_EN_CLK33, 0, "EN_CLK33"},
- {BRIDGE_CTRL_RST_MASK, -24, "RST", "%x"},
- {BRIDGE_CTRL_IO_SWAP, 0, "IO_SWAP"},
- {BRIDGE_CTRL_MEM_SWAP, 0, "MEM_SWAP"},
- {BRIDGE_CTRL_PAGE_SIZE, 0, "PAGE_SIZE"},
- {BRIDGE_CTRL_SS_PAR_BAD, 0, "SS_PAR_BAD"},
- {BRIDGE_CTRL_SS_PAR_EN, 0, "SS_PAR_EN"},
- {BRIDGE_CTRL_SSRAM_SIZE_MASK, 0, "SSRAM_SIZE", 0, ssram_sizes},
- {BRIDGE_CTRL_F_BAD_PKT, 0, "F_BAD_PKT"},
- {BRIDGE_CTRL_LLP_XBAR_CRD_MASK, -12, "LLP_XBAR_CRD", "%d"},
- {BRIDGE_CTRL_CLR_RLLP_CNT, 0, "CLR_RLLP_CNT"},
- {BRIDGE_CTRL_CLR_TLLP_CNT, 0, "CLR_TLLP_CNT"},
- {BRIDGE_CTRL_SYS_END, 0, "SYS_END"},
- {BRIDGE_CTRL_MAX_TRANS_MASK, -4, "MAX_TRANS", "%d"},
- {BRIDGE_CTRL_WIDGET_ID_MASK, 0, "WIDGET_ID", "%x"},
- {0}
-};
-#endif
-#endif /* LATER */
-
-/* kbrick widgetnum-to-bus layout */
-int p_busnum[MAX_PORT_NUM] = { /* widget# */
- 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
- 2, /* 0x8 */
- 1, /* 0x9 */
- 0, 0, /* 0xa - 0xb */
- 5, /* 0xc */
- 6, /* 0xd */
- 4, /* 0xe */
- 3, /* 0xf */
-};
-
-/*
- * Additional PIO spaces per slot are
- * recorded in this structure.
- */
-struct pciio_piospace_s {
- pciio_piospace_t next; /* another space for this device */
- char free; /* 1 if free, 0 if in use */
- pciio_space_t space; /* Which space is in use */
- iopaddr_t start; /* Starting address of the PIO space */
- size_t count; /* size of PIO space */
-};
-
-#if PCIBR_SOFT_LIST
-pcibr_list_p pcibr_list = 0;
-#endif
-
-#define INFO_LBL_PCIBR_ASIC_REV "_pcibr_asic_rev"
-
-#define PCIBR_D64_BASE_UNSET (0xFFFFFFFFFFFFFFFF)
-#define PCIBR_D32_BASE_UNSET (0xFFFFFFFF)
-
-#define PCIBR_VALID_SLOT(s) (s < 8)
-
-#ifdef SN_XXX
-extern int hub_device_flags_set(devfs_handle_t widget_dev,
- hub_widget_flags_t flags);
-#endif
-extern pciio_dmamap_t get_free_pciio_dmamap(devfs_handle_t);
-extern void free_pciio_dmamap(pcibr_dmamap_t);
-
-/*
- * This is the file operation table for the pcibr driver.
- * As each of the functions are implemented, put the
- * appropriate function name below.
- */
-struct file_operations pcibr_fops = {
- owner: THIS_MODULE,
- llseek: NULL,
- read: NULL,
- write: NULL,
- readdir: NULL,
- poll: NULL,
- ioctl: NULL,
- mmap: NULL,
- open: NULL,
- flush: NULL,
- release: NULL,
- fsync: NULL,
- fasync: NULL,
- lock: NULL,
- readv: NULL,
- writev: NULL
-};
-
-extern devfs_handle_t hwgraph_root;
-extern graph_error_t hwgraph_vertex_unref(devfs_handle_t vhdl);
-extern int cap_able(uint64_t x);
-extern uint64_t rmalloc(struct map *mp, size_t size);
-extern void rmfree(struct map *mp, size_t size, uint64_t a);
-extern int hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen);
-extern long atoi(register char *p);
-extern char *dev_to_name(devfs_handle_t dev, char *buf, uint buflen);
-extern cnodeid_t nodevertex_to_cnodeid(devfs_handle_t vhdl);
-extern graph_error_t hwgraph_edge_remove(devfs_handle_t from, char *name, devfs_handle_t *toptr);
-extern struct map *rmallocmap(uint64_t mapsiz);
-extern void rmfreemap(struct map *mp);
-extern int compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr);
-extern int io_path_map_widget(devfs_handle_t vertex);
-
-
-
-/* =====================================================================
- * Function Table of Contents
- *
- * The order of functions in this file has stopped
- * making much sense. We might want to take a look
- * at it some time and bring back some sanity, or
- * perhaps bust this file into smaller chunks.
- */
-
-LOCAL void do_pcibr_rrb_clear(bridge_t *, int);
-LOCAL void do_pcibr_rrb_flush(bridge_t *, int);
-LOCAL int do_pcibr_rrb_count_valid(bridge_t *, pciio_slot_t);
-LOCAL int do_pcibr_rrb_count_avail(bridge_t *, pciio_slot_t);
-LOCAL int do_pcibr_rrb_alloc(bridge_t *, pciio_slot_t, int);
-LOCAL int do_pcibr_rrb_free(bridge_t *, pciio_slot_t, int);
-
-LOCAL void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int);
-
-int pcibr_wrb_flush(devfs_handle_t);
-int pcibr_rrb_alloc(devfs_handle_t, int *, int *);
-int pcibr_rrb_check(devfs_handle_t, int *, int *, int *, int *);
-int pcibr_alloc_all_rrbs(devfs_handle_t, int, int, int, int, int, int, int, int, int);
-void pcibr_rrb_flush(devfs_handle_t);
-
-LOCAL int pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, bridgereg_t);
-void pcibr_release_device(pcibr_soft_t, pciio_slot_t, bridgereg_t);
-
-LOCAL void pcibr_clearwidint(bridge_t *);
-LOCAL void pcibr_setwidint(xtalk_intr_t);
-LOCAL int pcibr_probe_slot(bridge_t *, cfg_p, unsigned *);
-
-void pcibr_init(void);
-int pcibr_attach(devfs_handle_t);
-int pcibr_detach(devfs_handle_t);
-int pcibr_open(devfs_handle_t *, int, int, cred_t *);
-int pcibr_close(devfs_handle_t, int, int, cred_t *);
-int pcibr_map(devfs_handle_t, vhandl_t *, off_t, size_t, uint);
-int pcibr_unmap(devfs_handle_t, vhandl_t *);
-int pcibr_ioctl(devfs_handle_t, int, void *, int, struct cred *, int *);
-
-void pcibr_freeblock_sub(iopaddr_t *, iopaddr_t *, iopaddr_t, size_t);
-
-LOCAL int pcibr_init_ext_ate_ram(bridge_t *);
-LOCAL int pcibr_ate_alloc(pcibr_soft_t, int);
-LOCAL void pcibr_ate_free(pcibr_soft_t, int, int);
-
-LOCAL pcibr_info_t pcibr_info_get(devfs_handle_t);
-LOCAL pcibr_info_t pcibr_device_info_new(pcibr_soft_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
-LOCAL void pcibr_device_info_free(devfs_handle_t, pciio_slot_t);
-LOCAL iopaddr_t pcibr_addr_pci_to_xio(devfs_handle_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-
-pcibr_piomap_t pcibr_piomap_alloc(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
-void pcibr_piomap_free(pcibr_piomap_t);
-caddr_t pcibr_piomap_addr(pcibr_piomap_t, iopaddr_t, size_t);
-void pcibr_piomap_done(pcibr_piomap_t);
-caddr_t pcibr_piotrans_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-iopaddr_t pcibr_piospace_alloc(devfs_handle_t, device_desc_t, pciio_space_t, size_t, size_t);
-void pcibr_piospace_free(devfs_handle_t, pciio_space_t, iopaddr_t, size_t);
-
-LOCAL iopaddr_t pcibr_flags_to_d64(unsigned, pcibr_soft_t);
-LOCAL bridge_ate_t pcibr_flags_to_ate(unsigned);
-
-pcibr_dmamap_t pcibr_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
-void pcibr_dmamap_free(pcibr_dmamap_t);
-LOCAL bridge_ate_p pcibr_ate_addr(pcibr_soft_t, int);
-LOCAL iopaddr_t pcibr_addr_xio_to_pci(pcibr_soft_t, iopaddr_t, size_t);
-iopaddr_t pcibr_dmamap_addr(pcibr_dmamap_t, paddr_t, size_t);
-alenlist_t pcibr_dmamap_list(pcibr_dmamap_t, alenlist_t, unsigned);
-void pcibr_dmamap_done(pcibr_dmamap_t);
-cnodeid_t pcibr_get_dmatrans_node(devfs_handle_t);
-iopaddr_t pcibr_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
-alenlist_t pcibr_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
-void pcibr_dmamap_drain(pcibr_dmamap_t);
-void pcibr_dmaaddr_drain(devfs_handle_t, paddr_t, size_t);
-void pcibr_dmalist_drain(devfs_handle_t, alenlist_t);
-iopaddr_t pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
-
-static unsigned pcibr_intr_bits(pciio_info_t info, pciio_intr_line_t lines);
-pcibr_intr_t pcibr_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
-void pcibr_intr_free(pcibr_intr_t);
-LOCAL void pcibr_setpciint(xtalk_intr_t);
-int pcibr_intr_connect(pcibr_intr_t);
-void pcibr_intr_disconnect(pcibr_intr_t);
-
-devfs_handle_t pcibr_intr_cpu_get(pcibr_intr_t);
-void pcibr_xintr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
-void pcibr_intr_func(intr_arg_t);
-
-void pcibr_provider_startup(devfs_handle_t);
-void pcibr_provider_shutdown(devfs_handle_t);
-
-int pcibr_reset(devfs_handle_t);
-pciio_endian_t pcibr_endian_set(devfs_handle_t, pciio_endian_t, pciio_endian_t);
-int pcibr_priority_bits_set(pcibr_soft_t, pciio_slot_t, pciio_priority_t);
-pciio_priority_t pcibr_priority_set(devfs_handle_t, pciio_priority_t);
-int pcibr_device_flags_set(devfs_handle_t, pcibr_device_flags_t);
-
-LOCAL cfg_p pcibr_config_addr(devfs_handle_t, unsigned);
-uint64_t pcibr_config_get(devfs_handle_t, unsigned, unsigned);
-LOCAL uint64_t do_pcibr_config_get(cfg_p, unsigned, unsigned);
-void pcibr_config_set(devfs_handle_t, unsigned, unsigned, uint64_t);
-LOCAL void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
-
-LOCAL pcibr_hints_t pcibr_hints_get(devfs_handle_t, int);
-void pcibr_hints_fix_rrbs(devfs_handle_t);
-void pcibr_hints_dualslot(devfs_handle_t, pciio_slot_t, pciio_slot_t);
-void pcibr_hints_intr_bits(devfs_handle_t, pcibr_intr_bits_f *);
-void pcibr_set_rrb_callback(devfs_handle_t, rrb_alloc_funct_t);
-void pcibr_hints_handsoff(devfs_handle_t);
-void pcibr_hints_subdevs(devfs_handle_t, pciio_slot_t, ulong);
-
-LOCAL int pcibr_slot_info_init(devfs_handle_t,pciio_slot_t);
-LOCAL int pcibr_slot_info_free(devfs_handle_t,pciio_slot_t);
-
-#ifdef LATER
-LOCAL int pcibr_slot_info_return(pcibr_soft_t, pciio_slot_t,
- pcibr_slot_info_resp_t);
-LOCAL void pcibr_slot_func_info_return(pcibr_info_h, int,
- pcibr_slot_func_info_resp_t);
-#endif /* LATER */
-
-LOCAL int pcibr_slot_addr_space_init(devfs_handle_t,pciio_slot_t);
-LOCAL int pcibr_slot_device_init(devfs_handle_t, pciio_slot_t);
-LOCAL int pcibr_slot_guest_info_init(devfs_handle_t,pciio_slot_t);
-LOCAL int pcibr_slot_initial_rrb_alloc(devfs_handle_t,pciio_slot_t);
-LOCAL int pcibr_slot_call_device_attach(devfs_handle_t,
- pciio_slot_t, int);
-LOCAL int pcibr_slot_call_device_detach(devfs_handle_t,
- pciio_slot_t, int);
-
-LOCAL int pcibr_slot_detach(devfs_handle_t, pciio_slot_t, int);
-LOCAL int pcibr_is_slot_sys_critical(devfs_handle_t, pciio_slot_t);
-#ifdef LATER
-LOCAL int pcibr_slot_query(devfs_handle_t, pcibr_slot_info_req_t);
-#endif
-
-/* =====================================================================
- * RRB management
- */
-
-#define LSBIT(word) ((word) &~ ((word)-1))
-
-#define PCIBR_RRB_SLOT_VIRTUAL 8
-
-LOCAL void
-do_pcibr_rrb_clear(bridge_t *bridge, int rrb)
-{
- bridgereg_t status;
-
- /* bridge_lock must be held;
- * this RRB must be disabled.
- */
-
- /* wait until RRB has no outstanduing XIO packets. */
- while ((status = bridge->b_resp_status) & BRIDGE_RRB_INUSE(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
-
- /* if the RRB has data, drain it. */
- if (status & BRIDGE_RRB_VALID(rrb)) {
- bridge->b_resp_clear = BRIDGE_RRB_CLEAR(rrb);
-
- /* wait until RRB is no longer valid. */
- while ((status = bridge->b_resp_status) & BRIDGE_RRB_VALID(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
- }
-}
-
-LOCAL void
-do_pcibr_rrb_flush(bridge_t *bridge, int rrbn)
-{
- reg_p rrbp = &bridge->b_rrb_map[rrbn & 1].reg;
- bridgereg_t rrbv;
- int shft = 4 * (rrbn >> 1);
- unsigned ebit = BRIDGE_RRB_EN << shft;
-
- rrbv = *rrbp;
- if (rrbv & ebit)
- *rrbp = rrbv & ~ebit;
-
- do_pcibr_rrb_clear(bridge, rrbn);
-
- if (rrbv & ebit)
- *rrbp = rrbv;
-}
-
-/*
- * pcibr_rrb_count_valid: count how many RRBs are
- * marked valid for the specified PCI slot on this
- * bridge.
- *
- * NOTE: The "slot" parameter for all pcibr_rrb
- * management routines must include the "virtual"
- * bit; when manageing both the normal and the
- * virtual channel, separate calls to these
- * routines must be made. To denote the virtual
- * channel, add PCIBR_RRB_SLOT_VIRTUAL to the slot
- * number.
- *
- * IMPL NOTE: The obvious algorithm is to iterate
- * through the RRB fields, incrementing a count if
- * the RRB is valid and matches the slot. However,
- * it is much simpler to use an algorithm derived
- * from the "partitioned add" idea. First, XOR in a
- * pattern such that the fields that match this
- * slot come up "all ones" and all other fields
- * have zeros in the mismatching bits. Then AND
- * together the bits in the field, so we end up
- * with one bit turned on for each field that
- * matched. Now we need to count these bits. This
- * can be done either with a series of shift/add
- * instructions or by using "tmp % 15"; I expect
- * that the cascaded shift/add will be faster.
- */
-
-LOCAL int
-do_pcibr_rrb_count_valid(bridge_t *bridge,
- pciio_slot_t slot)
-{
- bridgereg_t tmp;
-
- tmp = bridge->b_rrb_map[slot & 1].reg;
- tmp ^= 0x11111111 * (7 - slot / 2);
- tmp &= (0xCCCCCCCC & tmp) >> 2;
- tmp &= (0x22222222 & tmp) >> 1;
- tmp += tmp >> 4;
- tmp += tmp >> 8;
- tmp += tmp >> 16;
- return tmp & 15;
-}
-
-/*
- * do_pcibr_rrb_count_avail: count how many RRBs are
- * available to be allocated for the specified slot.
- *
- * IMPL NOTE: similar to the above, except we are
- * just counting how many fields have the valid bit
- * turned off.
- */
-LOCAL int
-do_pcibr_rrb_count_avail(bridge_t *bridge,
- pciio_slot_t slot)
-{
- bridgereg_t tmp;
-
- tmp = bridge->b_rrb_map[slot & 1].reg;
- tmp = (0x88888888 & ~tmp) >> 3;
- tmp += tmp >> 4;
- tmp += tmp >> 8;
- tmp += tmp >> 16;
- return tmp & 15;
-}
-
-/*
- * do_pcibr_rrb_alloc: allocate some additional RRBs
- * for the specified slot. Returns -1 if there were
- * insufficient free RRBs to satisfy the request,
- * or 0 if the request was fulfilled.
- *
- * Note that if a request can be partially filled,
- * it will be, even if we return failure.
- *
- * IMPL NOTE: again we avoid iterating across all
- * the RRBs; instead, we form up a word containing
- * one bit for each free RRB, then peel the bits
- * off from the low end.
- */
-LOCAL int
-do_pcibr_rrb_alloc(bridge_t *bridge,
- pciio_slot_t slot,
- int more)
-{
- int rv = 0;
- bridgereg_t reg, tmp, bit;
-
- reg = bridge->b_rrb_map[slot & 1].reg;
- tmp = (0x88888888 & ~reg) >> 3;
- while (more-- > 0) {
- bit = LSBIT(tmp);
- if (!bit) {
- rv = -1;
- break;
- }
- tmp &= ~bit;
- reg = ((reg & ~(bit * 15)) | (bit * (8 + slot / 2)));
- }
- bridge->b_rrb_map[slot & 1].reg = reg;
- return rv;
-}
-
-/*
- * do_pcibr_rrb_free: release some of the RRBs that
- * have been allocated for the specified
- * slot. Returns zero for success, or negative if
- * it was unable to free that many RRBs.
- *
- * IMPL NOTE: We form up a bit for each RRB
- * allocated to the slot, aligned with the VALID
- * bitfield this time; then we peel bits off one at
- * a time, releasing the corresponding RRB.
- */
-LOCAL int
-do_pcibr_rrb_free(bridge_t *bridge,
- pciio_slot_t slot,
- int less)
-{
- int rv = 0;
- bridgereg_t reg, tmp, clr, bit;
- int i;
-
- clr = 0;
- reg = bridge->b_rrb_map[slot & 1].reg;
-
- /* This needs to be done otherwise the rrb's on the virtual channel
- * for this slot won't be freed !!
- */
- tmp = reg & 0xbbbbbbbb;
-
- tmp ^= (0x11111111 * (7 - slot / 2));
- tmp &= (0x33333333 & tmp) << 2;
- tmp &= (0x44444444 & tmp) << 1;
- while (less-- > 0) {
- bit = LSBIT(tmp);
- if (!bit) {
- rv = -1;
- break;
- }
- tmp &= ~bit;
- reg &= ~bit;
- clr |= bit;
- }
- bridge->b_rrb_map[slot & 1].reg = reg;
-
- for (i = 0; i < 8; i++)
- if (clr & (8 << (4 * i)))
- do_pcibr_rrb_clear(bridge, (2 * i) + (slot & 1));
-
- return rv;
-}
-
-LOCAL void
-do_pcibr_rrb_autoalloc(pcibr_soft_t pcibr_soft,
- int slot,
- int more_rrbs)
-{
- bridge_t *bridge = pcibr_soft->bs_base;
- int got;
-
- for (got = 0; got < more_rrbs; ++got) {
- if (pcibr_soft->bs_rrb_res[slot & 7] > 0)
- pcibr_soft->bs_rrb_res[slot & 7]--;
- else if (pcibr_soft->bs_rrb_avail[slot & 1] > 0)
- pcibr_soft->bs_rrb_avail[slot & 1]--;
- else
- break;
- if (do_pcibr_rrb_alloc(bridge, slot, 1) < 0)
- break;
-#if PCIBR_RRB_DEBUG
- printk( "do_pcibr_rrb_autoalloc: add one to slot %d%s\n",
- slot & 7, slot & 8 ? "v" : "");
-#endif
- pcibr_soft->bs_rrb_valid[slot]++;
- }
-#if PCIBR_RRB_DEBUG
- printk("%s: %d+%d free RRBs. Allocation list:\n", pcibr_soft->bs_name,
- pcibr_soft->bs_rrb_avail[0],
- pcibr_soft->bs_rrb_avail[1]);
- for (slot = 0; slot < 8; ++slot)
- printk("\t%d+%d+%d",
- 0xFFF & pcibr_soft->bs_rrb_valid[slot],
- 0xFFF & pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
- pcibr_soft->bs_rrb_res[slot]);
- printk("\n");
-#endif
-}
-
-/*
- * Device driver interface to flush the write buffers for a specified
- * device hanging off the bridge.
- */
-int
-pcibr_wrb_flush(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
- volatile bridgereg_t *wrb_flush;
-
- wrb_flush = &(bridge->b_wr_req_buf[pciio_slot].reg);
- while (*wrb_flush);
-
- return(0);
-}
-/*
- * Device driver interface to request RRBs for a specified device
- * hanging off a Bridge. The driver requests the total number of
- * RRBs it would like for the normal channel (vchan0) and for the
- * "virtual channel" (vchan1). The actual number allocated to each
- * channel is returned.
- *
- * If we cannot allocate at least one RRB to a channel that needs
- * at least one, return -1 (failure). Otherwise, satisfy the request
- * as best we can and return 0.
- */
-int
-pcibr_rrb_alloc(devfs_handle_t pconn_vhdl,
- int *count_vchan0,
- int *count_vchan1)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
- int desired_vchan0;
- int desired_vchan1;
- int orig_vchan0;
- int orig_vchan1;
- int delta_vchan0;
- int delta_vchan1;
- int final_vchan0;
- int final_vchan1;
- int avail_rrbs;
- unsigned long s;
- int error;
-
- /*
- * TBD: temper request with admin info about RRB allocation,
- * and according to demand from other devices on this Bridge.
- *
- * One way of doing this would be to allocate two RRBs
- * for each device on the bus, before any drivers start
- * asking for extras. This has the weakness that one
- * driver might not give back an "extra" RRB until after
- * another driver has already failed to get one that
- * it wanted.
- */
-
- s = pcibr_lock(pcibr_soft);
-
- /* How many RRBs do we own? */
- orig_vchan0 = pcibr_soft->bs_rrb_valid[pciio_slot];
- orig_vchan1 = pcibr_soft->bs_rrb_valid[pciio_slot + PCIBR_RRB_SLOT_VIRTUAL];
-
- /* How many RRBs do we want? */
- desired_vchan0 = count_vchan0 ? *count_vchan0 : orig_vchan0;
- desired_vchan1 = count_vchan1 ? *count_vchan1 : orig_vchan1;
-
- /* How many RRBs are free? */
- avail_rrbs = pcibr_soft->bs_rrb_avail[pciio_slot & 1]
- + pcibr_soft->bs_rrb_res[pciio_slot];
-
- /* Figure desired deltas */
- delta_vchan0 = desired_vchan0 - orig_vchan0;
- delta_vchan1 = desired_vchan1 - orig_vchan1;
-
- /* Trim back deltas to something
- * that we can actually meet, by
- * decreasing the ending allocation
- * for whichever channel wants
- * more RRBs. If both want the same
- * number, cut the second channel.
- * NOTE: do not change the allocation for
- * a channel that was passed as NULL.
- */
- while ((delta_vchan0 + delta_vchan1) > avail_rrbs) {
- if (count_vchan0 &&
- (!count_vchan1 ||
- ((orig_vchan0 + delta_vchan0) >
- (orig_vchan1 + delta_vchan1))))
- delta_vchan0--;
- else
- delta_vchan1--;
- }
-
- /* Figure final RRB allocations
- */
- final_vchan0 = orig_vchan0 + delta_vchan0;
- final_vchan1 = orig_vchan1 + delta_vchan1;
-
- /* If either channel wants RRBs but our actions
- * would leave it with none, declare an error,
- * but DO NOT change any RRB allocations.
- */
- if ((desired_vchan0 && !final_vchan0) ||
- (desired_vchan1 && !final_vchan1)) {
-
- error = -1;
-
- } else {
-
- /* Commit the allocations: free, then alloc.
- */
- if (delta_vchan0 < 0)
- (void) do_pcibr_rrb_free(bridge, pciio_slot, -delta_vchan0);
- if (delta_vchan1 < 0)
- (void) do_pcibr_rrb_free(bridge, PCIBR_RRB_SLOT_VIRTUAL + pciio_slot, -delta_vchan1);
-
- if (delta_vchan0 > 0)
- (void) do_pcibr_rrb_alloc(bridge, pciio_slot, delta_vchan0);
- if (delta_vchan1 > 0)
- (void) do_pcibr_rrb_alloc(bridge, PCIBR_RRB_SLOT_VIRTUAL + pciio_slot, delta_vchan1);
-
- /* Return final values to caller.
- */
- if (count_vchan0)
- *count_vchan0 = final_vchan0;
- if (count_vchan1)
- *count_vchan1 = final_vchan1;
-
- /* prevent automatic changes to this slot's RRBs
- */
- pcibr_soft->bs_rrb_fixed |= 1 << pciio_slot;
-
- /* Track the actual allocations, release
- * any further reservations, and update the
- * number of available RRBs.
- */
-
- pcibr_soft->bs_rrb_valid[pciio_slot] = final_vchan0;
- pcibr_soft->bs_rrb_valid[pciio_slot + PCIBR_RRB_SLOT_VIRTUAL] = final_vchan1;
- pcibr_soft->bs_rrb_avail[pciio_slot & 1] =
- pcibr_soft->bs_rrb_avail[pciio_slot & 1]
- + pcibr_soft->bs_rrb_res[pciio_slot]
- - delta_vchan0
- - delta_vchan1;
- pcibr_soft->bs_rrb_res[pciio_slot] = 0;
-
-#if PCIBR_RRB_DEBUG
- printk("pcibr_rrb_alloc: slot %d set to %d+%d; %d+%d free\n",
- pciio_slot, final_vchan0, final_vchan1,
- pcibr_soft->bs_rrb_avail[0],
- pcibr_soft->bs_rrb_avail[1]);
- for (pciio_slot = 0; pciio_slot < 8; ++pciio_slot)
- printk("\t%d+%d+%d",
- 0xFFF & pcibr_soft->bs_rrb_valid[pciio_slot],
- 0xFFF & pcibr_soft->bs_rrb_valid[pciio_slot + PCIBR_RRB_SLOT_VIRTUAL],
- pcibr_soft->bs_rrb_res[pciio_slot]);
- printk("\n");
-#endif
-
- error = 0;
- }
-
- pcibr_unlock(pcibr_soft, s);
- return error;
-}
-
-/*
- * Device driver interface to check the current state
- * of the RRB allocations.
- *
- * pconn_vhdl is your PCI connection point (specifies which
- * PCI bus and which slot).
- *
- * count_vchan0 points to where to return the number of RRBs
- * assigned to the primary DMA channel, used by all DMA
- * that does not explicitly ask for the alternate virtual
- * channel.
- *
- * count_vchan1 points to where to return the number of RRBs
- * assigned to the secondary DMA channel, used when
- * PCIBR_VCHAN1 and PCIIO_DMA_A64 are specified.
- *
- * count_reserved points to where to return the number of RRBs
- * that have been automatically reserved for your device at
- * startup, but which have not been assigned to a
- * channel. RRBs must be assigned to a channel to be used;
- * this can be done either with an explicit pcibr_rrb_alloc
- * call, or automatically by the infrastructure when a DMA
- * translation is constructed. Any call to pcibr_rrb_alloc
- * will release any unassigned reserved RRBs back to the
- * free pool.
- *
- * count_pool points to where to return the number of RRBs
- * that are currently unassigned and unreserved. This
- * number can (and will) change as other drivers make calls
- * to pcibr_rrb_alloc, or automatically allocate RRBs for
- * DMA beyond their initial reservation.
- *
- * NULL may be passed for any of the return value pointers
- * the caller is not interested in.
- *
- * The return value is "0" if all went well, or "-1" if
- * there is a problem. Additionally, if the wrong vertex
- * is passed in, one of the subsidiary support functions
- * could panic with a "bad pciio fingerprint."
- */
-
-int
-pcibr_rrb_check(devfs_handle_t pconn_vhdl,
- int *count_vchan0,
- int *count_vchan1,
- int *count_reserved,
- int *count_pool)
-{
- pciio_info_t pciio_info;
- pciio_slot_t pciio_slot;
- pcibr_soft_t pcibr_soft;
- unsigned long s;
- int error = -1;
-
- if ((pciio_info = pciio_info_get(pconn_vhdl)) &&
- (pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info)) &&
- ((pciio_slot = pciio_info_slot_get(pciio_info)) < 8)) {
-
- s = pcibr_lock(pcibr_soft);
-
- if (count_vchan0)
- *count_vchan0 =
- pcibr_soft->bs_rrb_valid[pciio_slot];
-
- if (count_vchan1)
- *count_vchan1 =
- pcibr_soft->bs_rrb_valid[pciio_slot + PCIBR_RRB_SLOT_VIRTUAL];
-
- if (count_reserved)
- *count_reserved =
- pcibr_soft->bs_rrb_res[pciio_slot];
-
- if (count_pool)
- *count_pool =
- pcibr_soft->bs_rrb_avail[pciio_slot & 1];
-
- error = 0;
-
- pcibr_unlock(pcibr_soft, s);
- }
- return error;
-}
-
-/* pcibr_alloc_all_rrbs allocates all the rrbs available in the quantities
- * requested for each of the devies. The evn_odd argument indicates whether
- * allcoation for the odd or even rrbs is requested and next group of four pairse
- * are the amount to assign to each device (they should sum to <= 8) and
- * whether to set the viritual bit for that device (1 indictaes yes, 0 indicates no)
- * the devices in order are either 0, 2, 4, 6 or 1, 3, 5, 7
- * if even_odd is even we alloc even rrbs else we allocate odd rrbs
- * returns 0 if no errors else returns -1
- */
-
-int
-pcibr_alloc_all_rrbs(devfs_handle_t vhdl, int even_odd,
- int dev_1_rrbs, int virt1, int dev_2_rrbs, int virt2,
- int dev_3_rrbs, int virt3, int dev_4_rrbs, int virt4)
-{
- devfs_handle_t pcibr_vhdl;
- pcibr_soft_t pcibr_soft = NULL;
- bridge_t *bridge = NULL;
-
- uint32_t rrb_setting = 0;
- int rrb_shift = 7;
- uint32_t cur_rrb;
- int dev_rrbs[4];
- int virt[4];
- int i, j;
- unsigned long s;
-
- if (GRAPH_SUCCESS ==
- hwgraph_traverse(vhdl, EDGE_LBL_PCI, &pcibr_vhdl)) {
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- if (pcibr_soft)
- bridge = pcibr_soft->bs_base;
- hwgraph_vertex_unref(pcibr_vhdl);
- }
- if (bridge == NULL)
- bridge = (bridge_t *) xtalk_piotrans_addr
- (vhdl, NULL, 0, sizeof(bridge_t), 0);
-
- even_odd &= 1;
-
- dev_rrbs[0] = dev_1_rrbs;
- dev_rrbs[1] = dev_2_rrbs;
- dev_rrbs[2] = dev_3_rrbs;
- dev_rrbs[3] = dev_4_rrbs;
-
- virt[0] = virt1;
- virt[1] = virt2;
- virt[2] = virt3;
- virt[3] = virt4;
-
- if ((dev_1_rrbs + dev_2_rrbs + dev_3_rrbs + dev_4_rrbs) > 8) {
- return -1;
- }
- if ((dev_1_rrbs < 0) || (dev_2_rrbs < 0) || (dev_3_rrbs < 0) || (dev_4_rrbs < 0)) {
- return -1;
- }
- /* walk through rrbs */
- for (i = 0; i < 4; i++) {
- if (virt[i]) {
- cur_rrb = i | 0xc;
- cur_rrb = cur_rrb << (rrb_shift * 4);
- rrb_shift--;
- rrb_setting = rrb_setting | cur_rrb;
- dev_rrbs[i] = dev_rrbs[i] - 1;
- }
- for (j = 0; j < dev_rrbs[i]; j++) {
- cur_rrb = i | 0x8;
- cur_rrb = cur_rrb << (rrb_shift * 4);
- rrb_shift--;
- rrb_setting = rrb_setting | cur_rrb;
- }
- }
-
- if (pcibr_soft)
- s = pcibr_lock(pcibr_soft);
-
- bridge->b_rrb_map[even_odd].reg = rrb_setting;
-
- if (pcibr_soft) {
-
- pcibr_soft->bs_rrb_fixed |= 0x55 << even_odd;
-
- /* since we've "FIXED" the allocations
- * for these slots, we probably can dispense
- * with tracking avail/res/valid data, but
- * keeping it up to date helps debugging.
- */
-
- pcibr_soft->bs_rrb_avail[even_odd] =
- 8 - (dev_1_rrbs + dev_2_rrbs + dev_3_rrbs + dev_4_rrbs);
-
- pcibr_soft->bs_rrb_res[even_odd + 0] = 0;
- pcibr_soft->bs_rrb_res[even_odd + 2] = 0;
- pcibr_soft->bs_rrb_res[even_odd + 4] = 0;
- pcibr_soft->bs_rrb_res[even_odd + 6] = 0;
-
- pcibr_soft->bs_rrb_valid[even_odd + 0] = dev_1_rrbs - virt1;
- pcibr_soft->bs_rrb_valid[even_odd + 2] = dev_2_rrbs - virt2;
- pcibr_soft->bs_rrb_valid[even_odd + 4] = dev_3_rrbs - virt3;
- pcibr_soft->bs_rrb_valid[even_odd + 6] = dev_4_rrbs - virt4;
-
- pcibr_soft->bs_rrb_valid[even_odd + 0 + PCIBR_RRB_SLOT_VIRTUAL] = virt1;
- pcibr_soft->bs_rrb_valid[even_odd + 2 + PCIBR_RRB_SLOT_VIRTUAL] = virt2;
- pcibr_soft->bs_rrb_valid[even_odd + 4 + PCIBR_RRB_SLOT_VIRTUAL] = virt3;
- pcibr_soft->bs_rrb_valid[even_odd + 6 + PCIBR_RRB_SLOT_VIRTUAL] = virt4;
-
- pcibr_unlock(pcibr_soft, s);
- }
- return 0;
-}
-
-/*
- * pcibr_rrb_flush: chase down all the RRBs assigned
- * to the specified connection point, and flush
- * them.
- */
-void
-pcibr_rrb_flush(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
- unsigned long s;
- reg_p rrbp;
- unsigned rrbm;
- int i;
- int rrbn;
- unsigned sval;
- unsigned mask;
-
- sval = BRIDGE_RRB_EN | (pciio_slot >> 1);
- mask = BRIDGE_RRB_EN | BRIDGE_RRB_PDEV;
- rrbn = pciio_slot & 1;
- rrbp = &bridge->b_rrb_map[rrbn].reg;
-
- s = pcibr_lock(pcibr_soft);
- rrbm = *rrbp;
- for (i = 0; i < 8; ++i) {
- if ((rrbm & mask) == sval)
- do_pcibr_rrb_flush(bridge, rrbn);
- rrbm >>= 4;
- rrbn += 2;
- }
- pcibr_unlock(pcibr_soft, s);
-}
-
-/* =====================================================================
- * Device(x) register management
- */
-
-/* pcibr_try_set_device: attempt to modify Device(x)
- * for the specified slot on the specified bridge
- * as requested in flags, limited to the specified
- * bits. Returns which BRIDGE bits were in conflict,
- * or ZERO if everything went OK.
- *
- * Caller MUST hold pcibr_lock when calling this function.
- */
-LOCAL int
-pcibr_try_set_device(pcibr_soft_t pcibr_soft,
- pciio_slot_t slot,
- unsigned flags,
- bridgereg_t mask)
-{
- bridge_t *bridge;
- pcibr_soft_slot_t slotp;
- bridgereg_t old;
- bridgereg_t new;
- bridgereg_t chg;
- bridgereg_t bad;
- bridgereg_t badpmu;
- bridgereg_t badd32;
- bridgereg_t badd64;
- bridgereg_t fix;
- unsigned long s;
- bridgereg_t xmask;
-
- xmask = mask;
- if (pcibr_soft->bs_xbridge) {
- if (mask == BRIDGE_DEV_PMU_BITS)
- xmask = XBRIDGE_DEV_PMU_BITS;
- if (mask == BRIDGE_DEV_D64_BITS)
- xmask = XBRIDGE_DEV_D64_BITS;
- }
-
- slotp = &pcibr_soft->bs_slot[slot];
-
- s = pcibr_lock(pcibr_soft);
-
- bridge = pcibr_soft->bs_base;
-
- old = slotp->bss_device;
-
- /* figure out what the desired
- * Device(x) bits are based on
- * the flags specified.
- */
-
- new = old;
-
- /* Currently, we inherit anything that
- * the new caller has not specified in
- * one way or another, unless we take
- * action here to not inherit.
- *
- * This is needed for the "swap" stuff,
- * since it could have been set via
- * pcibr_endian_set -- altho note that
- * any explicit PCIBR_BYTE_STREAM or
- * PCIBR_WORD_VALUES will freely override
- * the effect of that call (and vice
- * versa, no protection either way).
- *
- * I want to get rid of pcibr_endian_set
- * in favor of tracking DMA endianness
- * using the flags specified when DMA
- * channels are created.
- */
-
-#define BRIDGE_DEV_WRGA_BITS (BRIDGE_DEV_PMU_WRGA_EN | BRIDGE_DEV_DIR_WRGA_EN)
-#define BRIDGE_DEV_SWAP_BITS (BRIDGE_DEV_SWAP_PMU | BRIDGE_DEV_SWAP_DIR)
-
- /* Do not use Barrier, Write Gather,
- * or Prefetch unless asked.
- * Leave everything else as it
- * was from the last time.
- */
- new = new
- & ~BRIDGE_DEV_BARRIER
- & ~BRIDGE_DEV_WRGA_BITS
- & ~BRIDGE_DEV_PREF
- ;
-
- /* Generic macro flags
- */
- if (flags & PCIIO_DMA_DATA) {
- new = (new
- & ~BRIDGE_DEV_BARRIER) /* barrier off */
- | BRIDGE_DEV_PREF; /* prefetch on */
-
- }
- if (flags & PCIIO_DMA_CMD) {
- new = ((new
- & ~BRIDGE_DEV_PREF) /* prefetch off */
- & ~BRIDGE_DEV_WRGA_BITS) /* write gather off */
- | BRIDGE_DEV_BARRIER; /* barrier on */
- }
- /* Generic detail flags
- */
- if (flags & PCIIO_WRITE_GATHER)
- new |= BRIDGE_DEV_WRGA_BITS;
- if (flags & PCIIO_NOWRITE_GATHER)
- new &= ~BRIDGE_DEV_WRGA_BITS;
-
- if (flags & PCIIO_PREFETCH)
- new |= BRIDGE_DEV_PREF;
- if (flags & PCIIO_NOPREFETCH)
- new &= ~BRIDGE_DEV_PREF;
-
- if (flags & PCIBR_WRITE_GATHER)
- new |= BRIDGE_DEV_WRGA_BITS;
- if (flags & PCIBR_NOWRITE_GATHER)
- new &= ~BRIDGE_DEV_WRGA_BITS;
-
- if (flags & PCIIO_BYTE_STREAM)
- new |= (pcibr_soft->bs_xbridge) ?
- BRIDGE_DEV_SWAP_DIR : BRIDGE_DEV_SWAP_BITS;
- if (flags & PCIIO_WORD_VALUES)
- new &= (pcibr_soft->bs_xbridge) ?
- ~BRIDGE_DEV_SWAP_DIR : ~BRIDGE_DEV_SWAP_BITS;
-
- /* Provider-specific flags
- */
- if (flags & PCIBR_PREFETCH)
- new |= BRIDGE_DEV_PREF;
- if (flags & PCIBR_NOPREFETCH)
- new &= ~BRIDGE_DEV_PREF;
-
- if (flags & PCIBR_PRECISE)
- new |= BRIDGE_DEV_PRECISE;
- if (flags & PCIBR_NOPRECISE)
- new &= ~BRIDGE_DEV_PRECISE;
-
- if (flags & PCIBR_BARRIER)
- new |= BRIDGE_DEV_BARRIER;
- if (flags & PCIBR_NOBARRIER)
- new &= ~BRIDGE_DEV_BARRIER;
-
- if (flags & PCIBR_64BIT)
- new |= BRIDGE_DEV_DEV_SIZE;
- if (flags & PCIBR_NO64BIT)
- new &= ~BRIDGE_DEV_DEV_SIZE;
-
- chg = old ^ new; /* what are we changing, */
- chg &= xmask; /* of the interesting bits */
-
- if (chg) {
-
- badd32 = slotp->bss_d32_uctr ? (BRIDGE_DEV_D32_BITS & chg) : 0;
- if (pcibr_soft->bs_xbridge) {
- badpmu = slotp->bss_pmu_uctr ? (XBRIDGE_DEV_PMU_BITS & chg) : 0;
- badd64 = slotp->bss_d64_uctr ? (XBRIDGE_DEV_D64_BITS & chg) : 0;
- } else {
- badpmu = slotp->bss_pmu_uctr ? (BRIDGE_DEV_PMU_BITS & chg) : 0;
- badd64 = slotp->bss_d64_uctr ? (BRIDGE_DEV_D64_BITS & chg) : 0;
- }
- bad = badpmu | badd32 | badd64;
-
- if (bad) {
-
- /* some conflicts can be resolved by
- * forcing the bit on. this may cause
- * some performance degredation in
- * the stream(s) that want the bit off,
- * but the alternative is not allowing
- * the new stream at all.
- */
- if ( (fix = bad & (BRIDGE_DEV_PRECISE |
- BRIDGE_DEV_BARRIER)) ){
- bad &= ~fix;
- /* don't change these bits if
- * they are already set in "old"
- */
- chg &= ~(fix & old);
- }
- /* some conflicts can be resolved by
- * forcing the bit off. this may cause
- * some performance degredation in
- * the stream(s) that want the bit on,
- * but the alternative is not allowing
- * the new stream at all.
- */
- if ( (fix = bad & (BRIDGE_DEV_WRGA_BITS |
- BRIDGE_DEV_PREF)) ) {
- bad &= ~fix;
- /* don't change these bits if
- * we wanted to turn them on.
- */
- chg &= ~(fix & new);
- }
- /* conflicts in other bits mean
- * we can not establish this DMA
- * channel while the other(s) are
- * still present.
- */
- if (bad) {
- pcibr_unlock(pcibr_soft, s);
-#if (DEBUG && PCIBR_DEV_DEBUG)
- printk("pcibr_try_set_device: mod blocked by %R\n", bad, device_bits);
-#endif
- return bad;
- }
- }
- }
- if (mask == BRIDGE_DEV_PMU_BITS)
- slotp->bss_pmu_uctr++;
- if (mask == BRIDGE_DEV_D32_BITS)
- slotp->bss_d32_uctr++;
- if (mask == BRIDGE_DEV_D64_BITS)
- slotp->bss_d64_uctr++;
-
- /* the value we want to write is the
- * original value, with the bits for
- * our selected changes flipped, and
- * with any disabled features turned off.
- */
- new = old ^ chg; /* only change what we want to change */
-
- if (slotp->bss_device == new) {
- pcibr_unlock(pcibr_soft, s);
- return 0;
- }
- bridge->b_device[slot].reg = new;
- slotp->bss_device = new;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- pcibr_unlock(pcibr_soft, s);
-#if DEBUG && PCIBR_DEV_DEBUG
- printk("pcibr Device(%d): 0x%p\n", slot, bridge->b_device[slot].reg);
-#endif
-
- return 0;
-}
-
-void
-pcibr_release_device(pcibr_soft_t pcibr_soft,
- pciio_slot_t slot,
- bridgereg_t mask)
-{
- pcibr_soft_slot_t slotp;
- unsigned long s;
-
- slotp = &pcibr_soft->bs_slot[slot];
-
- s = pcibr_lock(pcibr_soft);
-
- if (mask == BRIDGE_DEV_PMU_BITS)
- slotp->bss_pmu_uctr--;
- if (mask == BRIDGE_DEV_D32_BITS)
- slotp->bss_d32_uctr--;
- if (mask == BRIDGE_DEV_D64_BITS)
- slotp->bss_d64_uctr--;
-
- pcibr_unlock(pcibr_soft, s);
-}
-
-/*
- * flush write gather buffer for slot
- */
-LOCAL void
-pcibr_device_write_gather_flush(pcibr_soft_t pcibr_soft,
- pciio_slot_t slot)
-{
- bridge_t *bridge;
- unsigned long s;
- volatile uint32_t wrf;
- s = pcibr_lock(pcibr_soft);
- bridge = pcibr_soft->bs_base;
- wrf = bridge->b_wr_req_buf[slot].reg;
- pcibr_unlock(pcibr_soft, s);
-}
-
-/* =====================================================================
- * Bridge (pcibr) "Device Driver" entry points
- */
-
-/*
- * pcibr_probe_slot: read a config space word
- * while trapping any errors; reutrn zero if
- * all went OK, or nonzero if there was an error.
- * The value read, if any, is passed back
- * through the valp parameter.
- */
-LOCAL int
-pcibr_probe_slot(bridge_t *bridge,
- cfg_p cfg,
- unsigned *valp)
-{
- int rv;
- bridgereg_t old_enable, new_enable;
- int badaddr_val(volatile void *, int, volatile void *);
-
-
- old_enable = bridge->b_int_enable;
- new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
-
- bridge->b_int_enable = new_enable;
-
- /*
- * The xbridge doesn't clear b_err_int_view unless
- * multi-err is cleared...
- */
- if (is_xbridge(bridge))
- if (bridge->b_err_int_view & BRIDGE_ISR_PCI_MST_TIMEOUT) {
- bridge->b_int_rst_stat = BRIDGE_IRR_MULTI_CLR;
- }
-
- if (bridge->b_int_status & BRIDGE_IRR_PCI_GRP) {
- bridge->b_int_rst_stat = BRIDGE_IRR_PCI_GRP_CLR;
- (void) bridge->b_wid_tflush; /* flushbus */
- }
- rv = badaddr_val((void *) cfg, 4, valp);
-
- /*
- * The xbridge doesn't set master timeout in b_int_status
- * here. Fortunately it's in error_interrupt_view.
- */
- if (is_xbridge(bridge))
- if (bridge->b_err_int_view & BRIDGE_ISR_PCI_MST_TIMEOUT) {
- bridge->b_int_rst_stat = BRIDGE_IRR_MULTI_CLR;
- rv = 1; /* unoccupied slot */
- }
-
- bridge->b_int_enable = old_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-
- return rv;
-}
-
-/*
- * pcibr_init: called once during system startup or
- * when a loadable driver is loaded.
- *
- * The driver_register function should normally
- * be in _reg, not _init. But the pcibr driver is
- * required by devinit before the _reg routines
- * are called, so this is an exception.
- */
-void
-pcibr_init(void)
-{
-#if DEBUG && ATTACH_DEBUG
- printk("pcibr_init\n");
-#endif
-
- xwidget_driver_register(XBRIDGE_WIDGET_PART_NUM,
- XBRIDGE_WIDGET_MFGR_NUM,
- "pcibr_",
- 0);
- xwidget_driver_register(BRIDGE_WIDGET_PART_NUM,
- BRIDGE_WIDGET_MFGR_NUM,
- "pcibr_",
- 0);
-}
-
-/*
- * open/close mmap/munmap interface would be used by processes
- * that plan to map the PCI bridge, and muck around with the
- * registers. This is dangerous to do, and will be allowed
- * to a select brand of programs. Typically these are
- * diagnostics programs, or some user level commands we may
- * write to do some weird things.
- * To start with expect them to have root priveleges.
- * We will ask for more later.
- */
-/* ARGSUSED */
-int
-pcibr_open(devfs_handle_t *devp, int oflag, int otyp, cred_t *credp)
-{
- return 0;
-}
-
-/*ARGSUSED */
-int
-pcibr_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
-{
- return 0;
-}
-
-/*ARGSUSED */
-int
-pcibr_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
-{
- int error;
- devfs_handle_t vhdl = dev_to_vhdl(dev);
- devfs_handle_t pcibr_vhdl = hwgraph_connectpt_get(vhdl);
- pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- bridge_t *bridge = pcibr_soft->bs_base;
-
- hwgraph_vertex_unref(pcibr_vhdl);
-
- ASSERT(pcibr_soft);
- len = ctob(btoc(len)); /* Make len page aligned */
- error = v_mapphys(vt, (void *) ((__psunsigned_t) bridge + off), len);
-
- /*
- * If the offset being mapped corresponds to the flash prom
- * base, and if the mapping succeeds, and if the user
- * has requested the protections to be WRITE, enable the
- * flash prom to be written.
- *
- * XXX- deprecate this in favor of using the
- * real flash driver ...
- */
- if (!error &&
- ((off == BRIDGE_EXTERNAL_FLASH) ||
- (len > BRIDGE_EXTERNAL_FLASH))) {
- int s;
-
- /*
- * ensure that we write and read without any interruption.
- * The read following the write is required for the Bridge war
- */
- s = splhi();
- bridge->b_wid_control |= BRIDGE_CTRL_FLASH_WR_EN;
- bridge->b_wid_control; /* inval addr bug war */
- splx(s);
- }
- return error;
-}
-
-/*ARGSUSED */
-int
-pcibr_unmap(devfs_handle_t dev, vhandl_t *vt)
-{
- devfs_handle_t pcibr_vhdl = hwgraph_connectpt_get((devfs_handle_t) dev);
- pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- bridge_t *bridge = pcibr_soft->bs_base;
-
- hwgraph_vertex_unref(pcibr_vhdl);
-
- /*
- * If flashprom write was enabled, disable it, as
- * this is the last unmap.
- */
- if (bridge->b_wid_control & BRIDGE_CTRL_FLASH_WR_EN) {
- int s;
-
- /*
- * ensure that we write and read without any interruption.
- * The read following the write is required for the Bridge war
- */
- s = splhi();
- bridge->b_wid_control &= ~BRIDGE_CTRL_FLASH_WR_EN;
- bridge->b_wid_control; /* inval addr bug war */
- splx(s);
- }
- return 0;
-}
-
-/* This is special case code used by grio. There are plans to make
- * this a bit more general in the future, but till then this should
- * be sufficient.
- */
-pciio_slot_t
-pcibr_device_slot_get(devfs_handle_t dev_vhdl)
-{
- char devname[MAXDEVNAME];
- devfs_handle_t tdev;
- pciio_info_t pciio_info;
- pciio_slot_t slot = PCIIO_SLOT_NONE;
-
- vertex_to_name(dev_vhdl, devname, MAXDEVNAME);
-
- /* run back along the canonical path
- * until we find a PCI connection point.
- */
- tdev = hwgraph_connectpt_get(dev_vhdl);
- while (tdev != GRAPH_VERTEX_NONE) {
- pciio_info = pciio_info_chk(tdev);
- if (pciio_info) {
- slot = pciio_info_slot_get(pciio_info);
- break;
- }
- hwgraph_vertex_unref(tdev);
- tdev = hwgraph_connectpt_get(tdev);
- }
- hwgraph_vertex_unref(tdev);
-
- return slot;
-}
-
-/*==========================================================================
- * BRIDGE PCI SLOT RELATED IOCTLs
- */
-char *pci_space_name[] = {"NONE",
- "ROM",
- "IO",
- "",
- "MEM",
- "MEM32",
- "MEM64",
- "CFG",
- "WIN0",
- "WIN1",
- "WIN2",
- "WIN3",
- "WIN4",
- "WIN5",
- "",
- "BAD"};
-
-
-/*ARGSUSED */
-int
-pcibr_ioctl(devfs_handle_t dev,
- int cmd,
- void *arg,
- int flag,
- struct cred *cr,
- int *rvalp)
-{
- devfs_handle_t pcibr_vhdl = hwgraph_connectpt_get((devfs_handle_t)dev);
-#ifdef LATER
- pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-#endif
- int error = 0;
-
- hwgraph_vertex_unref(pcibr_vhdl);
-
- switch (cmd) {
-#ifdef LATER
- case GIOCSETBW:
- {
- grio_ioctl_info_t info;
- pciio_slot_t slot = 0;
-
- if (!cap_able((uint64_t)CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
- if (COPYIN(arg, &info, sizeof(grio_ioctl_info_t))) {
- error = EFAULT;
- break;
- }
-#ifdef GRIO_DEBUG
- printk("pcibr:: prev_vhdl: %d reqbw: %lld\n",
- info.prev_vhdl, info.reqbw);
-#endif /* GRIO_DEBUG */
-
- if ((slot = pcibr_device_slot_get(info.prev_vhdl)) ==
- PCIIO_SLOT_NONE) {
- error = EIO;
- break;
- }
- if (info.reqbw)
- pcibr_priority_bits_set(pcibr_soft, slot, PCI_PRIO_HIGH);
- break;
- }
-
- case GIOCRELEASEBW:
- {
- grio_ioctl_info_t info;
- pciio_slot_t slot = 0;
-
- if (!cap_able(CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
- if (COPYIN(arg, &info, sizeof(grio_ioctl_info_t))) {
- error = EFAULT;
- break;
- }
-#ifdef GRIO_DEBUG
- printk("pcibr:: prev_vhdl: %d reqbw: %lld\n",
- info.prev_vhdl, info.reqbw);
-#endif /* GRIO_DEBUG */
-
- if ((slot = pcibr_device_slot_get(info.prev_vhdl)) ==
- PCIIO_SLOT_NONE) {
- error = EIO;
- break;
- }
- if (info.reqbw)
- pcibr_priority_bits_set(pcibr_soft, slot, PCI_PRIO_LOW);
- break;
- }
-
- case PCIBR_SLOT_POWERUP:
- {
- pciio_slot_t slot;
-
- if (!cap_able(CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
-
- slot = (pciio_slot_t)(uint64_t)arg;
- error = pcibr_slot_powerup(pcibr_vhdl,slot);
- break;
- }
- case PCIBR_SLOT_SHUTDOWN:
- if (!cap_able(CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
-
- slot = (pciio_slot_t)(uint64_t)arg;
- error = pcibr_slot_powerup(pcibr_vhdl,slot);
- break;
- }
- case PCIBR_SLOT_QUERY:
- {
- struct pcibr_slot_info_req_s req;
-
- if (!cap_able(CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
-
- if (COPYIN(arg, &req, sizeof(req))) {
- error = EFAULT;
- break;
- }
-
- error = pcibr_slot_query(pcibr_vhdl, &req);
- break;
- }
-#endif /* LATER */
- default:
- break;
-
- }
-
- return error;
-}
-
-void
-pcibr_freeblock_sub(iopaddr_t *free_basep,
- iopaddr_t *free_lastp,
- iopaddr_t base,
- size_t size)
-{
- iopaddr_t free_base = *free_basep;
- iopaddr_t free_last = *free_lastp;
- iopaddr_t last = base + size - 1;
-
- if ((last < free_base) || (base > free_last)); /* free block outside arena */
-
- else if ((base <= free_base) && (last >= free_last))
- /* free block contains entire arena */
- *free_basep = *free_lastp = 0;
-
- else if (base <= free_base)
- /* free block is head of arena */
- *free_basep = last + 1;
-
- else if (last >= free_last)
- /* free block is tail of arena */
- *free_lastp = base - 1;
-
- /*
- * We are left with two regions: the free area
- * in the arena "below" the block, and the free
- * area in the arena "above" the block. Keep
- * the one that is bigger.
- */
-
- else if ((base - free_base) > (free_last - last))
- *free_lastp = base - 1; /* keep lower chunk */
- else
- *free_basep = last + 1; /* keep upper chunk */
-}
-
-/* Convert from ssram_bits in control register to number of SSRAM entries */
-#define ATE_NUM_ENTRIES(n) _ate_info[n]
-
-/* Possible choices for number of ATE entries in Bridge's SSRAM */
-LOCAL int _ate_info[] =
-{
- 0, /* 0 entries */
- 8 * 1024, /* 8K entries */
- 16 * 1024, /* 16K entries */
- 64 * 1024 /* 64K entries */
-};
-
-#define ATE_NUM_SIZES (sizeof(_ate_info) / sizeof(int))
-#define ATE_PROBE_VALUE 0x0123456789abcdefULL
-
-/*
- * Determine the size of this bridge's external mapping SSRAM, and set
- * the control register appropriately to reflect this size, and initialize
- * the external SSRAM.
- */
-LOCAL int
-pcibr_init_ext_ate_ram(bridge_t *bridge)
-{
- int largest_working_size = 0;
- int num_entries, entry;
- int i, j;
- bridgereg_t old_enable, new_enable;
- int s;
-
- /* Probe SSRAM to determine its size. */
- old_enable = bridge->b_int_enable;
- new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
- bridge->b_int_enable = new_enable;
-
- for (i = 1; i < ATE_NUM_SIZES; i++) {
- /* Try writing a value */
- bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
-
- /* Guard against wrap */
- for (j = 1; j < i; j++)
- bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(j) - 1] = 0;
-
- /* See if value was written */
- if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
- largest_working_size = i;
- }
- bridge->b_int_enable = old_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-
- /*
- * ensure that we write and read without any interruption.
- * The read following the write is required for the Bridge war
- */
-
- s = splhi();
- bridge->b_wid_control = (bridge->b_wid_control
- & ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
- | BRIDGE_CTRL_SSRAM_SIZE(largest_working_size);
- bridge->b_wid_control; /* inval addr bug war */
- splx(s);
-
- num_entries = ATE_NUM_ENTRIES(largest_working_size);
-
-#if PCIBR_ATE_DEBUG
- if (num_entries)
- printk("bridge at 0x%x: clearing %d external ATEs\n", bridge, num_entries);
- else
- printk("bridge at 0x%x: no externa9422l ATE RAM found\n", bridge);
-#endif
-
- /* Initialize external mapping entries */
- for (entry = 0; entry < num_entries; entry++)
- bridge->b_ext_ate_ram[entry] = 0;
-
- return (num_entries);
-}
-
-/*
- * Allocate "count" contiguous Bridge Address Translation Entries
- * on the specified bridge to be used for PCI to XTALK mappings.
- * Indices in rm map range from 1..num_entries. Indicies returned
- * to caller range from 0..num_entries-1.
- *
- * Return the start index on success, -1 on failure.
- */
-LOCAL int
-pcibr_ate_alloc(pcibr_soft_t pcibr_soft, int count)
-{
- int index = 0;
-
- index = (int) rmalloc(pcibr_soft->bs_int_ate_map, (size_t) count);
-/* printk("Colin: pcibr_ate_alloc - index %d count %d \n", index, count); */
-
- if (!index && pcibr_soft->bs_ext_ate_map)
- index = (int) rmalloc(pcibr_soft->bs_ext_ate_map, (size_t) count);
-
- /* rmalloc manages resources in the 1..n
- * range, with 0 being failure.
- * pcibr_ate_alloc manages resources
- * in the 0..n-1 range, with -1 being failure.
- */
- return index - 1;
-}
-
-LOCAL void
-pcibr_ate_free(pcibr_soft_t pcibr_soft, int index, int count)
-/* Who says there's no such thing as a free meal? :-) */
-{
- /* note the "+1" since rmalloc handles 1..n but
- * we start counting ATEs at zero.
- */
-/* printk("Colin: pcibr_ate_free - index %d count %d\n", index, count); */
-
- rmfree((index < pcibr_soft->bs_int_ate_size)
- ? pcibr_soft->bs_int_ate_map
- : pcibr_soft->bs_ext_ate_map,
- count, index + 1);
-}
-
-LOCAL pcibr_info_t
-pcibr_info_get(devfs_handle_t vhdl)
-{
- return (pcibr_info_t) pciio_info_get(vhdl);
-}
-
-pcibr_info_t
-pcibr_device_info_new(
- pcibr_soft_t pcibr_soft,
- pciio_slot_t slot,
- pciio_function_t rfunc,
- pciio_vendor_id_t vendor,
- pciio_device_id_t device)
-{
- pcibr_info_t pcibr_info;
- pciio_function_t func;
- int ibit;
-
- func = (rfunc == PCIIO_FUNC_NONE) ? 0 : rfunc;
-
- NEW(pcibr_info);
- pciio_device_info_new(&pcibr_info->f_c,
- pcibr_soft->bs_vhdl,
- slot, rfunc,
- vendor, device);
-
- if (slot != PCIIO_SLOT_NONE) {
-
- /*
- * Currently favored mapping from PCI
- * slot number and INTA/B/C/D to Bridge
- * PCI Interrupt Bit Number:
- *
- * SLOT A B C D
- * 0 0 4 0 4
- * 1 1 5 1 5
- * 2 2 6 2 6
- * 3 3 7 3 7
- * 4 4 0 4 0
- * 5 5 1 5 1
- * 6 6 2 6 2
- * 7 7 3 7 3
- *
- * XXX- allow pcibr_hints to override default
- * XXX- allow ADMIN to override pcibr_hints
- */
- for (ibit = 0; ibit < 4; ++ibit)
- pcibr_info->f_ibit[ibit] =
- (slot + 4 * ibit) & 7;
-
- /*
- * Record the info in the sparse func info space.
- */
- if (func < pcibr_soft->bs_slot[slot].bss_ninfo)
- pcibr_soft->bs_slot[slot].bss_infos[func] = pcibr_info;
- }
- return pcibr_info;
-}
-
-void
-pcibr_device_info_free(devfs_handle_t pcibr_vhdl, pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- pcibr_info_t pcibr_info;
- pciio_function_t func;
- pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[slot];
- int nfunc = slotp->bss_ninfo;
-
-
- for (func = 0; func < nfunc; func++) {
- pcibr_info = slotp->bss_infos[func];
-
- if (!pcibr_info)
- continue;
-
- slotp->bss_infos[func] = 0;
- pciio_device_info_unregister(pcibr_vhdl, &pcibr_info->f_c);
- pciio_device_info_free(&pcibr_info->f_c);
- DEL(pcibr_info);
- }
-
- /* Clear the DEVIO(x) for this slot */
- slotp->bss_devio.bssd_space = PCIIO_SPACE_NONE;
- slotp->bss_devio.bssd_base = PCIBR_D32_BASE_UNSET;
- slotp->bss_device = 0;
-
-
- /* Reset the mapping usage counters */
- slotp->bss_pmu_uctr = 0;
- slotp->bss_d32_uctr = 0;
- slotp->bss_d64_uctr = 0;
-
- /* Clear the Direct translation info */
- slotp->bss_d64_base = PCIBR_D64_BASE_UNSET;
- slotp->bss_d64_flags = 0;
- slotp->bss_d32_base = PCIBR_D32_BASE_UNSET;
- slotp->bss_d32_flags = 0;
-
- /* Clear out shadow info necessary for the external SSRAM workaround */
- slotp->bss_ext_ates_active = ATOMIC_INIT(0);
- slotp->bss_cmd_pointer = 0;
- slotp->bss_cmd_shadow = 0;
-
-}
-
-/*
- * PCI_ADDR_SPACE_LIMITS_LOAD
- * Gets the current values of
- * pci io base,
- * pci io last,
- * pci low memory base,
- * pci low memory last,
- * pci high memory base,
- * pci high memory last
- */
-#define PCI_ADDR_SPACE_LIMITS_LOAD() \
- pci_io_fb = pcibr_soft->bs_spinfo.pci_io_base; \
- pci_io_fl = pcibr_soft->bs_spinfo.pci_io_last; \
- pci_lo_fb = pcibr_soft->bs_spinfo.pci_swin_base; \
- pci_lo_fl = pcibr_soft->bs_spinfo.pci_swin_last; \
- pci_hi_fb = pcibr_soft->bs_spinfo.pci_mem_base; \
- pci_hi_fl = pcibr_soft->bs_spinfo.pci_mem_last;
-/*
- * PCI_ADDR_SPACE_LIMITS_STORE
- * Sets the current values of
- * pci io base,
- * pci io last,
- * pci low memory base,
- * pci low memory last,
- * pci high memory base,
- * pci high memory last
- */
-#define PCI_ADDR_SPACE_LIMITS_STORE() \
- pcibr_soft->bs_spinfo.pci_io_base = pci_io_fb; \
- pcibr_soft->bs_spinfo.pci_io_last = pci_io_fl; \
- pcibr_soft->bs_spinfo.pci_swin_base = pci_lo_fb; \
- pcibr_soft->bs_spinfo.pci_swin_last = pci_lo_fl; \
- pcibr_soft->bs_spinfo.pci_mem_base = pci_hi_fb; \
- pcibr_soft->bs_spinfo.pci_mem_last = pci_hi_fl;
-
-#define PCI_ADDR_SPACE_LIMITS_PRINT() \
- printf("+++++++++++++++++++++++\n" \
- "IO base 0x%x last 0x%x\n" \
- "SWIN base 0x%x last 0x%x\n" \
- "MEM base 0x%x last 0x%x\n" \
- "+++++++++++++++++++++++\n", \
- pcibr_soft->bs_spinfo.pci_io_base, \
- pcibr_soft->bs_spinfo.pci_io_last, \
- pcibr_soft->bs_spinfo.pci_swin_base, \
- pcibr_soft->bs_spinfo.pci_swin_last, \
- pcibr_soft->bs_spinfo.pci_mem_base, \
- pcibr_soft->bs_spinfo.pci_mem_last);
-
-/*
- * pcibr_slot_info_init
- * Probe for this slot and see if it is populated.
- * If it is populated initialize the generic PCI infrastructural
- * information associated with this particular PCI device.
- */
-int
-pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- bridge_t *bridge;
- cfg_p cfgw;
- unsigned idword;
- unsigned pfail;
- unsigned idwords[8];
- pciio_vendor_id_t vendor;
- pciio_device_id_t device;
- unsigned htype;
- cfg_p wptr;
- int win;
- pciio_space_t space;
- iopaddr_t pci_io_fb, pci_io_fl;
- iopaddr_t pci_lo_fb, pci_lo_fl;
- iopaddr_t pci_hi_fb, pci_hi_fl;
- int nfunc;
- pciio_function_t rfunc;
- int func;
- devfs_handle_t conn_vhdl;
- pcibr_soft_slot_t slotp;
-
- /* Get the basic software information required to proceed */
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- if (!pcibr_soft)
- return(EINVAL);
-
- bridge = pcibr_soft->bs_base;
- if (!PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- /* If we have a host slot (eg:- IOC3 has 2 PCI slots and the initialization
- * is done by the host slot then we are done.
- */
- if (pcibr_soft->bs_slot[slot].has_host) {
- return(0);
- }
-
- /* Check for a slot with any system critical functions */
- if (pcibr_is_slot_sys_critical(pcibr_vhdl, slot))
- return(EPERM);
-
- /* Load the current values of allocated PCI address spaces */
- PCI_ADDR_SPACE_LIMITS_LOAD();
-
- /* Try to read the device-id/vendor-id from the config space */
- cfgw = bridge->b_type0_cfg_dev[slot].l;
-
- if (pcibr_probe_slot(bridge, cfgw, &idword))
- return(ENODEV);
-
- slotp = &pcibr_soft->bs_slot[slot];
- slotp->slot_status |= SLOT_POWER_UP;
-
- vendor = 0xFFFF & idword;
- /* If the vendor id is not valid then the slot is not populated
- * and we are done.
- */
- if (vendor == 0xFFFF)
- return(ENODEV);
-
- device = 0xFFFF & (idword >> 16);
- htype = do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1);
-
- nfunc = 1;
- rfunc = PCIIO_FUNC_NONE;
- pfail = 0;
-
- /* NOTE: if a card claims to be multifunction
- * but only responds to config space 0, treat
- * it as a unifunction card.
- */
-
- if (htype & 0x80) { /* MULTIFUNCTION */
- for (func = 1; func < 8; ++func) {
- cfgw = bridge->b_type0_cfg_dev[slot].f[func].l;
- if (pcibr_probe_slot(bridge, cfgw, &idwords[func])) {
- pfail |= 1 << func;
- continue;
- }
- vendor = 0xFFFF & idwords[func];
- if (vendor == 0xFFFF) {
- pfail |= 1 << func;
- continue;
- }
- nfunc = func + 1;
- rfunc = 0;
- }
- cfgw = bridge->b_type0_cfg_dev[slot].l;
- }
- NEWA(pcibr_infoh, nfunc);
-
- pcibr_soft->bs_slot[slot].bss_ninfo = nfunc;
- pcibr_soft->bs_slot[slot].bss_infos = pcibr_infoh;
-
- for (func = 0; func < nfunc; ++func) {
- unsigned cmd_reg;
-
- if (func) {
- if (pfail & (1 << func))
- continue;
-
- idword = idwords[func];
- cfgw = bridge->b_type0_cfg_dev[slot].f[func].l;
-
- device = 0xFFFF & (idword >> 16);
- htype = do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1);
- rfunc = func;
- }
- htype &= 0x7f;
- if (htype != 0x00) {
- printk(KERN_WARNING "%s pcibr: pci slot %d func %d has strange header type 0x%x\n",
- pcibr_soft->bs_name, slot, func, htype);
- continue;
- }
-#if DEBUG && ATTACH_DEBUG
- printk(KERN_NOTICE
- "%s pcibr: pci slot %d func %d: vendor 0x%x device 0x%x",
- pcibr_soft->bs_name, slot, func, vendor, device);
-#endif
-
- pcibr_info = pcibr_device_info_new
- (pcibr_soft, slot, rfunc, vendor, device);
- conn_vhdl = pciio_device_info_register(pcibr_vhdl, &pcibr_info->f_c);
- if (func == 0)
- slotp->slot_conn = conn_vhdl;
-
-#ifdef LITTLE_ENDIAN
- cmd_reg = cfgw[(PCI_CFG_COMMAND ^ 4) / 4];
-#else
- cmd_reg = cfgw[PCI_CFG_COMMAND / 4];
-#endif
-
- wptr = cfgw + PCI_CFG_BASE_ADDR_0 / 4;
-
- for (win = 0; win < PCI_CFG_BASE_ADDRS; ++win) {
- iopaddr_t base, mask, code;
- size_t size;
-
- /*
- * GET THE BASE & SIZE OF THIS WINDOW:
- *
- * The low two or four bits of the BASE register
- * determines which address space we are in; the
- * rest is a base address. BASE registers
- * determine windows that are power-of-two sized
- * and naturally aligned, so we can get the size
- * of a window by writing all-ones to the
- * register, reading it back, and seeing which
- * bits are used for decode; the least
- * significant nonzero bit is also the size of
- * the window.
- *
- * WARNING: someone may already have allocated
- * some PCI space to this window, and in fact
- * PIO may be in process at this very moment
- * from another processor (or even from this
- * one, if we get interrupted)! So, if the BASE
- * already has a nonzero address, be generous
- * and use the LSBit of that address as the
- * size; this could overstate the window size.
- * Usually, when one card is set up, all are set
- * up; so, since we don't bitch about
- * overlapping windows, we are ok.
- *
- * UNFORTUNATELY, some cards do not clear their
- * BASE registers on reset. I have two heuristics
- * that can detect such cards: first, if the
- * decode enable is turned off for the space
- * that the window uses, we can disregard the
- * initial value. second, if the address is
- * outside the range that we use, we can disregard
- * it as well.
- *
- * This is looking very PCI generic. Except for
- * knowing how many slots and where their config
- * spaces are, this window loop and the next one
- * could probably be shared with other PCI host
- * adapters. It would be interesting to see if
- * this could be pushed up into pciio, when we
- * start supporting more PCI providers.
- */
-#ifdef LITTLE_ENDIAN
- base = wptr[((win*4)^4)/4];
-#else
- base = wptr[win];
-#endif
-
- if (base & PCI_BA_IO_SPACE) {
- /* BASE is in I/O space. */
- space = PCIIO_SPACE_IO;
- mask = -4;
- code = base & 3;
- base = base & mask;
- if (base == 0) {
- ; /* not assigned */
- } else if (!(cmd_reg & PCI_CMD_IO_SPACE)) {
- base = 0; /* decode not enabled */
- }
- } else {
- /* BASE is in MEM space. */
- space = PCIIO_SPACE_MEM;
- mask = -16;
- code = base & PCI_BA_MEM_LOCATION; /* extract BAR type */
- base = base & mask;
- if (base == 0) {
- ; /* not assigned */
- } else if (!(cmd_reg & PCI_CMD_MEM_SPACE)) {
- base = 0; /* decode not enabled */
- } else if (base & 0xC0000000) {
- base = 0; /* outside permissable range */
- } else if ((code == PCI_BA_MEM_64BIT) &&
-#ifdef LITTLE_ENDIAN
- (wptr[(((win + 1)*4)^4)/4] != 0)) {
-#else
- (wptr[win + 1] != 0)) {
-#endif /* LITTLE_ENDIAN */
- base = 0; /* outside permissable range */
- }
- }
-
- if (base != 0) { /* estimate size */
- size = base & -base;
- } else { /* calculate size */
-#ifdef LITTLE_ENDIAN
- wptr[((win*4)^4)/4] = ~0; /* turn on all bits */
- size = wptr[((win*4)^4)/4]; /* get stored bits */
-#else
- wptr[win] = ~0; /* turn on all bits */
- size = wptr[win]; /* get stored bits */
-#endif /* LITTLE_ENDIAN */
- size &= mask; /* keep addr */
- size &= -size; /* keep lsbit */
- if (size == 0)
- continue;
- }
-
- pcibr_info->f_window[win].w_space = space;
- pcibr_info->f_window[win].w_base = base;
- pcibr_info->f_window[win].w_size = size;
-
- /*
- * If this window already has PCI space
- * allocated for it, "subtract" that space from
- * our running freeblocks. Don't worry about
- * overlaps in existing allocated windows; we
- * may be overstating their sizes anyway.
- */
-
- if (base && size) {
- if (space == PCIIO_SPACE_IO) {
- pcibr_freeblock_sub(&pci_io_fb,
- &pci_io_fl,
- base, size);
- } else {
- pcibr_freeblock_sub(&pci_lo_fb,
- &pci_lo_fl,
- base, size);
- pcibr_freeblock_sub(&pci_hi_fb,
- &pci_hi_fl,
- base, size);
- }
- }
-#if defined(IOC3_VENDOR_ID_NUM) && defined(IOC3_DEVICE_ID_NUM)
- /*
- * IOC3 BASE_ADDR* BUG WORKAROUND
- *
-
- * If we write to BASE1 on the IOC3, the
- * data in BASE0 is replaced. The
- * original workaround was to remember
- * the value of BASE0 and restore it
- * when we ran off the end of the BASE
- * registers; however, a later
- * workaround was added (I think it was
- * rev 1.44) to avoid setting up
- * anything but BASE0, with the comment
- * that writing all ones to BASE1 set
- * the enable-parity-error test feature
- * in IOC3's SCR bit 14.
- *
- * So, unless we defer doing any PCI
- * space allocation until drivers
- * attach, and set up a way for drivers
- * (the IOC3 in paricular) to tell us
- * generically to keep our hands off
- * BASE registers, we gotta "know" about
- * the IOC3 here.
- *
- * Too bad the PCI folks didn't reserve the
- * all-zero value for 'no BASE here' (it is a
- * valid code for an uninitialized BASE in
- * 32-bit PCI memory space).
- */
-
- if ((vendor == IOC3_VENDOR_ID_NUM) &&
- (device == IOC3_DEVICE_ID_NUM))
- break;
-#endif
- if (code == PCI_BA_MEM_64BIT) {
- win++; /* skip upper half */
-#ifdef LITTLE_ENDIAN
- wptr[((win*4)^4)/4] = 0; /* which must be zero */
-#else
- wptr[win] = 0; /* which must be zero */
-#endif /* LITTLE_ENDIAN */
- }
- } /* next win */
- } /* next func */
-
- /* Store back the values for allocated PCI address spaces */
- PCI_ADDR_SPACE_LIMITS_STORE();
- return(0);
-}
-
-/*
- * pcibr_slot_info_free
- * Remove all the PCI infrastructural information associated
- * with a particular PCI device.
- */
-int
-pcibr_slot_info_free(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- int nfunc;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
-
- pcibr_device_info_free(pcibr_vhdl, slot);
-
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
- DELA(pcibr_infoh,nfunc);
- pcibr_soft->bs_slot[slot].bss_ninfo = 0;
-
- return(0);
-}
-
-int as_debug = 0;
-/*
- * pcibr_slot_addr_space_init
- * Reserve chunks of PCI address space as required by
- * the base registers in the card.
- */
-int
-pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- bridge_t *bridge;
- iopaddr_t pci_io_fb, pci_io_fl;
- iopaddr_t pci_lo_fb, pci_lo_fl;
- iopaddr_t pci_hi_fb, pci_hi_fl;
- size_t align;
- iopaddr_t mask;
- int nbars;
- int nfunc;
- int func;
- int win;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- bridge = pcibr_soft->bs_base;
-
- /* Get the current values for the allocated PCI address spaces */
- PCI_ADDR_SPACE_LIMITS_LOAD();
-
- if (as_debug)
-#ifdef LATER
- PCI_ADDR_SPACE_LIMITS_PRINT();
-#endif
- /* allocate address space,
- * for windows that have not been
- * previously assigned.
- */
- if (pcibr_soft->bs_slot[slot].has_host) {
- return(0);
- }
-
- nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
- if (nfunc < 1)
- return(EINVAL);
-
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
- if (!pcibr_infoh)
- return(EINVAL);
-
- /*
- * Try to make the DevIO windows not
- * overlap by pushing the "io" and "hi"
- * allocation areas up to the next one
- * or two megabyte bound. This also
- * keeps them from being zero.
- *
- * DO NOT do this with "pci_lo" since
- * the entire "lo" area is only a
- * megabyte, total ...
- */
- align = (slot < 2) ? 0x200000 : 0x100000;
- mask = -align;
- pci_io_fb = (pci_io_fb + align - 1) & mask;
- pci_hi_fb = (pci_hi_fb + align - 1) & mask;
-
- for (func = 0; func < nfunc; ++func) {
- cfg_p cfgw;
- cfg_p wptr;
- pciio_space_t space;
- iopaddr_t base;
- size_t size;
- cfg_p pci_cfg_cmd_reg_p;
- unsigned pci_cfg_cmd_reg;
- unsigned pci_cfg_cmd_reg_add = 0;
-
- pcibr_info = pcibr_infoh[func];
-
- if (!pcibr_info)
- continue;
-
- if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
- continue;
-
- cfgw = bridge->b_type0_cfg_dev[slot].f[func].l;
- wptr = cfgw + PCI_CFG_BASE_ADDR_0 / 4;
-
- nbars = PCI_CFG_BASE_ADDRS;
-
- for (win = 0; win < nbars; ++win) {
-
- space = pcibr_info->f_window[win].w_space;
- base = pcibr_info->f_window[win].w_base;
- size = pcibr_info->f_window[win].w_size;
-
- if (size < 1)
- continue;
-
- if (base >= size) {
-#if DEBUG && PCI_DEBUG
- printk("pcibr: slot %d func %d window %d is in %d[0x%x..0x%x], alloc by prom\n",
- slot, func, win, space, base, base + size - 1);
-#endif
- continue; /* already allocated */
- }
- align = size; /* ie. 0x00001000 */
- if (align < _PAGESZ)
- align = _PAGESZ; /* ie. 0x00004000 */
- mask = -align; /* ie. 0xFFFFC000 */
-
- switch (space) {
- case PCIIO_SPACE_IO:
- base = (pci_io_fb + align - 1) & mask;
- if ((base + size) > pci_io_fl) {
- base = 0;
- break;
- }
- pci_io_fb = base + size;
- break;
-
- case PCIIO_SPACE_MEM:
-#ifdef LITTLE_ENDIAN
- if ((wptr[((win*4)^4)/4] & PCI_BA_MEM_LOCATION) ==
-#else
- if ((wptr[win] & PCI_BA_MEM_LOCATION) ==
-#endif /* LITTLE_ENDIAN */
- PCI_BA_MEM_1MEG) {
- /* allocate from 20-bit PCI space */
- base = (pci_lo_fb + align - 1) & mask;
- if ((base + size) > pci_lo_fl) {
- base = 0;
- break;
- }
- pci_lo_fb = base + size;
- } else {
- /* allocate from 32-bit or 64-bit PCI space */
- base = (pci_hi_fb + align - 1) & mask;
- if ((base + size) > pci_hi_fl) {
- base = 0;
- break;
- }
- pci_hi_fb = base + size;
- }
- break;
-
- default:
- base = 0;
-#if DEBUG && PCI_DEBUG
- printk("pcibr: slot %d window %d had bad space code %d\n",
- slot, win, space);
-#endif
- }
- pcibr_info->f_window[win].w_base = base;
-#ifdef LITTLE_ENDIAN
- wptr[((win*4)^4)/4] = base;
-#if DEBUG && PCI_DEBUG
- printk("Setting base address 0x%p base 0x%x\n", &(wptr[((win*4)^4)/4]), base);
-#endif
-#else
- wptr[win] = base;
-#endif /* LITTLE_ENDIAN */
-
-#if DEBUG && PCI_DEBUG
- if (base >= size)
- printk("pcibr: slot %d func %d window %d is in %d [0x%x..0x%x], alloc by pcibr\n",
- slot, func, win, space, base, base + size - 1);
- else
- printk("pcibr: slot %d func %d window %d, unable to alloc 0x%x in 0x%p\n",
- slot, func, win, size, space);
-#endif
- } /* next base */
-
- /*
- * Allocate space for the EXPANSION ROM
- * NOTE: DO NOT DO THIS ON AN IOC3,
- * as it blows the system away.
- */
- base = size = 0;
- if ((pcibr_soft->bs_slot[slot].bss_vendor_id != IOC3_VENDOR_ID_NUM) ||
- (pcibr_soft->bs_slot[slot].bss_device_id != IOC3_DEVICE_ID_NUM)) {
-
- wptr = cfgw + PCI_EXPANSION_ROM / 4;
-#ifdef LITTLE_ENDIAN
- wptr[1] = 0xFFFFF000;
- mask = wptr[1];
-#else
- *wptr = 0xFFFFF000;
- mask = *wptr;
-#endif /* LITTLE_ENDIAN */
- if (mask & 0xFFFFF000) {
- size = mask & -mask;
- align = size;
- if (align < _PAGESZ)
- align = _PAGESZ;
- mask = -align;
- base = (pci_hi_fb + align - 1) & mask;
- if ((base + size) > pci_hi_fl)
- base = size = 0;
- else {
- pci_hi_fb = base + size;
-#ifdef LITTLE_ENDIAN
- wptr[1] = base;
-#else
- *wptr = base;
-#endif /* LITTLE_ENDIAN */
-#if DEBUG && PCI_DEBUG
- printk("%s/%d ROM in 0x%lx..0x%lx (alloc by pcibr)\n",
- pcibr_soft->bs_name, slot,
- base, base + size - 1);
-#endif
- }
- }
- }
- pcibr_info->f_rbase = base;
- pcibr_info->f_rsize = size;
-
- /*
- * if necessary, update the board's
- * command register to enable decoding
- * in the windows we added.
- *
- * There are some bits we always want to
- * be sure are set.
- */
- pci_cfg_cmd_reg_add |= PCI_CMD_IO_SPACE;
-
- /*
- * The Adaptec 1160 FC Controller WAR #767995:
- * The part incorrectly ignores the upper 32 bits of a 64 bit
- * address when decoding references to its registers so to
- * keep it from responding to a bus cycle that it shouldn't
- * we only use I/O space to get at it's registers. Don't
- * enable memory space accesses on that PCI device.
- */
- #define FCADP_VENDID 0x9004 /* Adaptec Vendor ID from fcadp.h */
- #define FCADP_DEVID 0x1160 /* Adaptec 1160 Device ID from fcadp.h */
-
- if ((pcibr_info->f_vendor != FCADP_VENDID) ||
- (pcibr_info->f_device != FCADP_DEVID))
- pci_cfg_cmd_reg_add |= PCI_CMD_MEM_SPACE;
-
- pci_cfg_cmd_reg_add |= PCI_CMD_BUS_MASTER;
-
- pci_cfg_cmd_reg_p = cfgw + PCI_CFG_COMMAND / 4;
- pci_cfg_cmd_reg = *pci_cfg_cmd_reg_p;
-#if PCI_FBBE /* XXX- check here to see if dev can do fast-back-to-back */
- if (!((pci_cfg_cmd_reg >> 16) & PCI_STAT_F_BK_BK_CAP))
- fast_back_to_back_enable = 0;
-#endif
- pci_cfg_cmd_reg &= 0xFFFF;
- if (pci_cfg_cmd_reg_add & ~pci_cfg_cmd_reg)
- *pci_cfg_cmd_reg_p = pci_cfg_cmd_reg | pci_cfg_cmd_reg_add;
-
- } /* next func */
-
- /* Now that we have allocated new chunks of PCI address spaces to this
- * card we need to update the bookkeeping values which indicate
- * the current PCI address space allocations.
- */
- PCI_ADDR_SPACE_LIMITS_STORE();
- return(0);
-}
-
-/*
- * pcibr_slot_device_init
- * Setup the device register in the bridge for this PCI slot.
- */
-int
-pcibr_slot_device_init(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- bridge_t *bridge;
- bridgereg_t devreg;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- bridge = pcibr_soft->bs_base;
-
- /*
- * Adjustments to Device(x)
- * and init of bss_device shadow
- */
- devreg = bridge->b_device[slot].reg;
- devreg &= ~BRIDGE_DEV_PAGE_CHK_DIS;
- devreg |= BRIDGE_DEV_COH | BRIDGE_DEV_VIRTUAL_EN;
-#ifdef LITTLE_ENDIAN
- devreg |= BRIDGE_DEV_DEV_SWAP;
-#endif
- pcibr_soft->bs_slot[slot].bss_device = devreg;
- bridge->b_device[slot].reg = devreg;
-
-#if DEBUG && PCI_DEBUG
- printk("pcibr Device(%d): 0x%lx\n", slot, bridge->b_device[slot].reg);
-#endif
-
-#if DEBUG && PCI_DEBUG
- printk("pcibr: PCI space allocation done.\n");
-#endif
-
- return(0);
-}
-
-/*
- * pcibr_slot_guest_info_init
- * Setup the host/guest relations for a PCI slot.
- */
-int
-pcibr_slot_guest_info_init(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- pcibr_soft_slot_t slotp;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- slotp = &pcibr_soft->bs_slot[slot];
-
- /* create info and verticies for guest slots;
- * for compatibilitiy macros, create info
- * for even unpopulated slots (but do not
- * build verticies for them).
- */
- if (pcibr_soft->bs_slot[slot].bss_ninfo < 1) {
- NEWA(pcibr_infoh, 1);
- pcibr_soft->bs_slot[slot].bss_ninfo = 1;
- pcibr_soft->bs_slot[slot].bss_infos = pcibr_infoh;
-
- pcibr_info = pcibr_device_info_new
- (pcibr_soft, slot, PCIIO_FUNC_NONE,
- PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
-
- if (pcibr_soft->bs_slot[slot].has_host) {
- slotp->slot_conn = pciio_device_info_register
- (pcibr_vhdl, &pcibr_info->f_c);
- }
- }
-
- /* generate host/guest relations
- */
- if (pcibr_soft->bs_slot[slot].has_host) {
- int host = pcibr_soft->bs_slot[slot].host_slot;
- pcibr_soft_slot_t host_slotp = &pcibr_soft->bs_slot[host];
-
- hwgraph_edge_add(slotp->slot_conn,
- host_slotp->slot_conn,
- EDGE_LBL_HOST);
-
- /* XXX- only gives us one guest edge per
- * host. If/when we have a host with more than
- * one guest, we will need to figure out how
- * the host finds all its guests, and sorts
- * out which one is which.
- */
- hwgraph_edge_add(host_slotp->slot_conn,
- slotp->slot_conn,
- EDGE_LBL_GUEST);
- }
-
- return(0);
-}
-
-/*
- * pcibr_slot_initial_rrb_alloc
- * Allocate a default number of rrbs for this slot on
- * the two channels. This is dictated by the rrb allocation
- * strategy routine defined per platform.
- */
-
-int
-pcibr_slot_initial_rrb_alloc(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- bridge_t *bridge;
- int c0, c1;
- int r;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- bridge = pcibr_soft->bs_base;
-
- /* How may RRBs are on this slot?
- */
- c0 = do_pcibr_rrb_count_valid(bridge, slot);
- c1 = do_pcibr_rrb_count_valid(bridge, slot + PCIBR_RRB_SLOT_VIRTUAL);
-
-#if PCIBR_RRB_DEBUG
- printk("pcibr_attach: slot %d started with %d+%d\n", slot, c0, c1);
-#endif
-
- /* Do we really need any?
- */
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
- pcibr_info = pcibr_infoh[0];
- if ((pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE) &&
- !pcibr_soft->bs_slot[slot].has_host) {
- if (c0 > 0)
- do_pcibr_rrb_free(bridge, slot, c0);
- if (c1 > 0)
- do_pcibr_rrb_free(bridge, slot + PCIBR_RRB_SLOT_VIRTUAL, c1);
- pcibr_soft->bs_rrb_valid[slot] = 0x1000;
- pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL] = 0x1000;
- return(ENODEV);
- }
-
- pcibr_soft->bs_rrb_avail[slot & 1] -= c0 + c1;
- pcibr_soft->bs_rrb_valid[slot] = c0;
- pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL] = c1;
-
- pcibr_soft->bs_rrb_avail[0] = do_pcibr_rrb_count_avail(bridge, 0);
- pcibr_soft->bs_rrb_avail[1] = do_pcibr_rrb_count_avail(bridge, 1);
-
- r = 3 - (c0 + c1);
-
- if (r > 0) {
- pcibr_soft->bs_rrb_res[slot] = r;
- pcibr_soft->bs_rrb_avail[slot & 1] -= r;
- }
-
-#if PCIBR_RRB_DEBUG
- printk("\t%d+%d+%d",
- 0xFFF & pcibr_soft->bs_rrb_valid[slot],
- 0xFFF & pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
- pcibr_soft->bs_rrb_res[slot]);
- printk("\n");
-#endif
-
- return(0);
-}
-
-/*
- * pcibr_slot_call_device_attach
- * This calls the associated driver attach routine for the PCI
- * card in this slot.
- */
-int
-pcibr_slot_call_device_attach(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot,
- int drv_flags)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- async_attach_t aa = NULL;
- int func;
- devfs_handle_t xconn_vhdl,conn_vhdl;
- int nfunc;
- int error_func;
- int error_slot = 0;
- int error = ENODEV;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
-
- if (pcibr_soft->bs_slot[slot].has_host) {
- return(EPERM);
- }
-
- xconn_vhdl = pcibr_soft->bs_conn;
- aa = async_attach_get_info(xconn_vhdl);
-
- nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
-
- for (func = 0; func < nfunc; ++func) {
-
- pcibr_info = pcibr_infoh[func];
-
- if (!pcibr_info)
- continue;
-
- if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
- continue;
-
- conn_vhdl = pcibr_info->f_vertex;
-
-#ifdef LATER
- /*
- * Activate if and when we support cdl.
- */
- if (aa)
- async_attach_add_info(conn_vhdl, aa);
-#endif /* LATER */
-
- error_func = pciio_device_attach(conn_vhdl, drv_flags);
-
- pcibr_info->f_att_det_error = error_func;
-
- if (error_func)
- error_slot = error_func;
-
- error = error_slot;
-
- } /* next func */
-
- if (error) {
- if ((error != ENODEV) && (error != EUNATCH))
- pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_INCMPLT;
- } else {
- pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_CMPLT;
- }
-
- return(error);
-}
-
-/*
- * pcibr_slot_call_device_detach
- * This calls the associated driver detach routine for the PCI
- * card in this slot.
- */
-int
-pcibr_slot_call_device_detach(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot,
- int drv_flags)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- int func;
- devfs_handle_t conn_vhdl = GRAPH_VERTEX_NONE;
- int nfunc;
- int error_func;
- int error_slot = 0;
- int error = ENODEV;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
-
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(EINVAL);
-
- if (pcibr_soft->bs_slot[slot].has_host)
- return(EPERM);
-
- /* Make sure that we do not detach a system critical function vertex */
- if(pcibr_is_slot_sys_critical(pcibr_vhdl, slot))
- return(EPERM);
-
- nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
-
- for (func = 0; func < nfunc; ++func) {
-
- pcibr_info = pcibr_infoh[func];
-
- if (!pcibr_info)
- continue;
-
- if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
- continue;
-
- conn_vhdl = pcibr_info->f_vertex;
-
- error_func = pciio_device_detach(conn_vhdl, drv_flags);
-
- pcibr_info->f_att_det_error = error_func;
-
- if (error_func)
- error_slot = error_func;
-
- error = error_slot;
-
- } /* next func */
-
- pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
-
- if (error) {
- if ((error != ENODEV) && (error != EUNATCH))
- pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_INCMPLT;
- } else {
- if (conn_vhdl != GRAPH_VERTEX_NONE)
- pcibr_device_unregister(conn_vhdl);
- pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_CMPLT;
- }
-
- return(error);
-}
-
-/*
- * pcibr_slot_detach
- * This is a place holder routine to keep track of all the
- * slot-specific freeing that needs to be done.
- */
-int
-pcibr_slot_detach(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot,
- int drv_flags)
-{
- int error;
-
- /* Call the device detach function */
- error = (pcibr_slot_call_device_detach(pcibr_vhdl, slot, drv_flags));
- return (error);
-
-}
-
-/*
- * pcibr_is_slot_sys_critical
- * Check slot for any functions that are system critical.
- * Return 1 if any are system critical or 0 otherwise.
- *
- * This function will always return 0 when called by
- * pcibr_attach() because the system critical vertices
- * have not yet been set in the hwgraph.
- */
-int
-pcibr_is_slot_sys_critical(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- devfs_handle_t conn_vhdl = GRAPH_VERTEX_NONE;
- int nfunc;
- int func;
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- if (!pcibr_soft || !PCIBR_VALID_SLOT(slot))
- return(0);
-
- nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
-
- for (func = 0; func < nfunc; ++func) {
-
- pcibr_info = pcibr_infoh[func];
- if (!pcibr_info)
- continue;
-
- if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
- continue;
-
- conn_vhdl = pcibr_info->f_vertex;
- if (is_sys_critical_vertex(conn_vhdl)) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "%v is a system critical device vertex\n", conn_vhdl);
-#else
- printk(KERN_WARNING "%p is a system critical device vertex\n", (void *)conn_vhdl);
-#endif
- return(1);
- }
-
- }
-
- return(0);
-}
-
-/*
- * pcibr_device_unregister
- * This frees up any hardware resources reserved for this PCI device
- * and removes any PCI infrastructural information setup for it.
- * This is usually used at the time of shutting down of the PCI card.
- */
-int
-pcibr_device_unregister(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info;
- devfs_handle_t pcibr_vhdl;
- pciio_slot_t slot;
- pcibr_soft_t pcibr_soft;
- bridge_t *bridge;
- int error_call;
- int error = 0;
-
- pciio_info = pciio_info_get(pconn_vhdl);
-
- pcibr_vhdl = pciio_info_master_get(pciio_info);
- slot = pciio_info_slot_get(pciio_info);
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- bridge = pcibr_soft->bs_base;
-
- /* Clear all the hardware xtalk resources for this device */
- xtalk_widgetdev_shutdown(pcibr_soft->bs_conn, slot);
-
- /* Flush all the rrbs */
- pcibr_rrb_flush(pconn_vhdl);
-
- /* Free the rrbs allocated to this slot */
- error_call = do_pcibr_rrb_free(bridge, slot,
- pcibr_soft->bs_rrb_valid[slot] +
- pcibr_soft->bs_rrb_valid[slot +
- PCIBR_RRB_SLOT_VIRTUAL]);
-
- if (error_call)
- error = ERANGE;
-
- pcibr_soft->bs_rrb_valid[slot] = 0;
- pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL] = 0;
- pcibr_soft->bs_rrb_res[slot] = 0;
-
- /* Flush the write buffers !! */
- error_call = pcibr_wrb_flush(pconn_vhdl);
-
- if (error_call)
- error = error_call;
-
- /* Clear the information specific to the slot */
- error_call = pcibr_slot_info_free(pcibr_vhdl, slot);
-
- if (error_call)
- error = error_call;
-
- return(error);
-
-}
-
-/*
- * build a convenience link path in the
- * form of ".../<iobrick>/bus/<busnum>"
- *
- * returns 1 on success, 0 otherwise
- *
- * depends on hwgraph separator == '/'
- */
-int
-pcibr_bus_cnvlink(devfs_handle_t f_c, int slot)
-{
- char dst[MAXDEVNAME];
- char *dp = dst;
- char *cp, *xp;
- int widgetnum;
- char pcibus[8];
- devfs_handle_t nvtx, svtx;
- int rv;
-
-#if DEBUG
- printk("pcibr_bus_cnvlink: slot= %d f_c= %p\n",
- slot, f_c);
- {
- int pos;
- char dname[256];
- pos = devfs_generate_path(f_c, dname, 256);
- printk("%s : path= %s\n", __FUNCTION__, &dname[pos]);
- }
-#endif
-
- if (GRAPH_SUCCESS != hwgraph_vertex_name_get(f_c, dst, MAXDEVNAME))
- return 0;
-
- /* dst example == /hw/module/001c02/Pbrick/xtalk/8/pci/direct */
-
- /* find the widget number */
- xp = strstr(dst, "/"EDGE_LBL_XTALK"/");
- if (xp == NULL)
- return 0;
- widgetnum = atoi(xp+7);
- if (widgetnum < XBOW_PORT_8 || widgetnum > XBOW_PORT_F)
- return 0;
-
- /* remove "/pci/direct" from path */
- cp = strstr(dst, "/" EDGE_LBL_PCI "/" "direct");
- if (cp == NULL)
- return 0;
- *cp = (char)NULL;
-
- /* get the vertex for the widget */
- if (GRAPH_SUCCESS != hwgraph_traverse(NULL, dp, &svtx))
- return 0;
-
- *xp = (char)NULL; /* remove "/xtalk/..." from path */
-
- /* dst example now == /hw/module/001c02/Pbrick */
-
- /* get the bus number */
- strcat(dst, "/bus");
- sprintf(pcibus, "%d", p_busnum[widgetnum]);
-
- /* link to bus to widget */
- rv = hwgraph_path_add(NULL, dp, &nvtx);
- if (GRAPH_SUCCESS == rv)
- rv = hwgraph_edge_add(nvtx, svtx, pcibus);
-
- return (rv == GRAPH_SUCCESS);
-}
-
-
-/*
- * pcibr_attach: called every time the crosstalk
- * infrastructure is asked to initialize a widget
- * that matches the part number we handed to the
- * registration routine above.
- */
-/*ARGSUSED */
-int
-pcibr_attach(devfs_handle_t xconn_vhdl)
-{
- /* REFERENCED */
- graph_error_t rc;
- devfs_handle_t pcibr_vhdl;
- devfs_handle_t ctlr_vhdl;
- bridge_t *bridge = NULL;
- bridgereg_t id;
- int rev;
- pcibr_soft_t pcibr_soft;
- pcibr_info_t pcibr_info;
- xwidget_info_t info;
- xtalk_intr_t xtalk_intr;
- device_desc_t dev_desc = (device_desc_t)0;
- int slot;
- int ibit;
- devfs_handle_t noslot_conn;
- char devnm[MAXDEVNAME], *s;
- pcibr_hints_t pcibr_hints;
- bridgereg_t b_int_enable;
- unsigned rrb_fixed = 0;
-
- iopaddr_t pci_io_fb, pci_io_fl;
- iopaddr_t pci_lo_fb, pci_lo_fl;
- iopaddr_t pci_hi_fb, pci_hi_fl;
-
- int spl_level;
-#ifdef LATER
- char *nicinfo = (char *)0;
-#endif
-
-#if PCI_FBBE
- int fast_back_to_back_enable;
-#endif
- l1sc_t *scp;
- nasid_t nasid;
-
- async_attach_t aa = NULL;
-
- aa = async_attach_get_info(xconn_vhdl);
-
-#if DEBUG && ATTACH_DEBUG
- printk("pcibr_attach: xconn_vhdl= %p\n", xconn_vhdl);
- {
- int pos;
- char dname[256];
- pos = devfs_generate_path(xconn_vhdl, dname, 256);
- printk("%s : path= %s \n", __FUNCTION__, &dname[pos]);
- }
-#endif
-
- /* Setup the PRB for the bridge in CONVEYOR BELT
- * mode. PRBs are setup in default FIRE-AND-FORGET
- * mode during the initialization.
- */
- hub_device_flags_set(xconn_vhdl, HUB_PIO_CONVEYOR);
-
- bridge = (bridge_t *)
- xtalk_piotrans_addr(xconn_vhdl, NULL,
- 0, sizeof(bridge_t), 0);
-
-#ifndef MEDUSA_HACK
- if ((bridge->b_wid_stat & BRIDGE_STAT_PCI_GIO_N) == 0)
- return -1; /* someone else handles GIO bridges. */
-#endif
-
- if (XWIDGET_PART_REV_NUM(bridge->b_wid_id) == XBRIDGE_PART_REV_A)
- NeedXbridgeSwap = 1;
-
- /*
- * Create the vertex for the PCI bus, which we
- * will also use to hold the pcibr_soft and
- * which will be the "master" vertex for all the
- * pciio connection points we will hang off it.
- * This needs to happen before we call nic_bridge_vertex_info
- * as we are some of the *_vmc functions need access to the edges.
- *
- * Opening this vertex will provide access to
- * the Bridge registers themselves.
- */
- rc = hwgraph_path_add(xconn_vhdl, EDGE_LBL_PCI, &pcibr_vhdl);
- ASSERT(rc == GRAPH_SUCCESS);
-
- ctlr_vhdl = NULL;
- ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER,
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &pcibr_fops, NULL);
-
- ASSERT(ctlr_vhdl != NULL);
-
- /*
- * decode the nic, and hang its stuff off our
- * connection point where other drivers can get
- * at it.
- */
-#ifdef LATER
- nicinfo = BRIDGE_VERTEX_MFG_INFO(xconn_vhdl, (nic_data_t) & bridge->b_nic);
-#endif
-
- /*
- * Get the hint structure; if some NIC callback
- * marked this vertex as "hands-off" then we
- * just return here, before doing anything else.
- */
- pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);
-
- if (pcibr_hints && pcibr_hints->ph_hands_off)
- return -1; /* generic operations disabled */
-
- id = bridge->b_wid_id;
- rev = XWIDGET_PART_REV_NUM(id);
-
- hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, (arbitrary_info_t) rev);
-
- /*
- * allocate soft state structure, fill in some
- * fields, and hook it up to our vertex.
- */
- NEW(pcibr_soft);
- BZERO(pcibr_soft, sizeof *pcibr_soft);
- pcibr_soft_set(pcibr_vhdl, pcibr_soft);
-
- pcibr_soft->bs_conn = xconn_vhdl;
- pcibr_soft->bs_vhdl = pcibr_vhdl;
- pcibr_soft->bs_base = bridge;
- pcibr_soft->bs_rev_num = rev;
- pcibr_soft->bs_intr_bits = pcibr_intr_bits;
- if (is_xbridge(bridge)) {
- pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
- pcibr_soft->bs_xbridge = 1;
- } else {
- pcibr_soft->bs_int_ate_size = BRIDGE_INTERNAL_ATES;
- pcibr_soft->bs_xbridge = 0;
- }
-
- nasid = NASID_GET(bridge);
- scp = &NODEPDA( NASID_TO_COMPACT_NODEID(nasid) )->module->elsc;
- pcibr_soft->bs_l1sc = scp;
- pcibr_soft->bs_moduleid = iobrick_module_get(scp);
- pcibr_soft->bsi_err_intr = 0;
-
- /* Bridges up through REV C
- * are unable to set the direct
- * byteswappers to BYTE_STREAM.
- */
- if (pcibr_soft->bs_rev_num <= BRIDGE_PART_REV_C) {
- pcibr_soft->bs_pio_end_io = PCIIO_WORD_VALUES;
- pcibr_soft->bs_pio_end_mem = PCIIO_WORD_VALUES;
- }
-#if PCIBR_SOFT_LIST
- {
- pcibr_list_p self;
-
- NEW(self);
- self->bl_soft = pcibr_soft;
- self->bl_vhdl = pcibr_vhdl;
- self->bl_next = pcibr_list;
- pcibr_list = self;
- }
-#endif
-
- /*
- * get the name of this bridge vertex and keep the info. Use this
- * only where it is really needed now: like error interrupts.
- */
- s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
- pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
- strcpy(pcibr_soft->bs_name, s);
-
-#if SHOW_REVS || DEBUG
-#if !DEBUG
- if (kdebug)
-#endif
- printk("%sBridge ASIC: rev %s (code=0x%x) at %s\n",
- is_xbridge(bridge) ? "X" : "",
- (rev == BRIDGE_PART_REV_A) ? "A" :
- (rev == BRIDGE_PART_REV_B) ? "B" :
- (rev == BRIDGE_PART_REV_C) ? "C" :
- (rev == BRIDGE_PART_REV_D) ? "D" :
- (rev == XBRIDGE_PART_REV_A) ? "A" :
- (rev == XBRIDGE_PART_REV_B) ? "B" :
- "unknown",
- rev, pcibr_soft->bs_name);
-#endif
-
- info = xwidget_info_get(xconn_vhdl);
- pcibr_soft->bs_xid = xwidget_info_id_get(info);
- pcibr_soft->bs_master = xwidget_info_master_get(info);
- pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);
-
- /*
- * Init bridge lock.
- */
- spin_lock_init(&pcibr_soft->bs_lock);
-
- /*
- * If we have one, process the hints structure.
- */
- if (pcibr_hints) {
- rrb_fixed = pcibr_hints->ph_rrb_fixed;
-
- pcibr_soft->bs_rrb_fixed = rrb_fixed;
-
- if (pcibr_hints->ph_intr_bits)
- pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;
-
- for (slot = 0; slot < 8; ++slot) {
- int hslot = pcibr_hints->ph_host_slot[slot] - 1;
-
- if (hslot < 0) {
- pcibr_soft->bs_slot[slot].host_slot = slot;
- } else {
- pcibr_soft->bs_slot[slot].has_host = 1;
- pcibr_soft->bs_slot[slot].host_slot = hslot;
- }
- }
- }
- /*
- * set up initial values for state fields
- */
- for (slot = 0; slot < 8; ++slot) {
- pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
- pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
- pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
- pcibr_soft->bs_slot[slot].bss_ext_ates_active = ATOMIC_INIT(0);
- }
-
- for (ibit = 0; ibit < 8; ++ibit) {
- pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
- pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft;
- pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
- pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_stat =
- &(bridge->b_int_status);
- pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
- pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
- pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
- }
-
- /*
- * Initialize various Bridge registers.
- */
-
- /*
- * On pre-Rev.D bridges, set the PCI_RETRY_CNT
- * to zero to avoid dropping stores. (#475347)
- */
- if (rev < BRIDGE_PART_REV_D)
- bridge->b_bus_timeout &= ~BRIDGE_BUS_PCI_RETRY_MASK;
-
- /*
- * Clear all pending interrupts.
- */
- bridge->b_int_rst_stat = (BRIDGE_IRR_ALL_CLR);
-
- /*
- * Until otherwise set up,
- * assume all interrupts are
- * from slot 7.
- */
- bridge->b_int_device = (uint32_t) 0xffffffff;
-
- {
- bridgereg_t dirmap;
- paddr_t paddr;
- iopaddr_t xbase;
- xwidgetnum_t xport;
- iopaddr_t offset;
- int num_entries = 0;
- int entry;
- cnodeid_t cnodeid;
- nasid_t nasid;
-
- /* Set the Bridge's 32-bit PCI to XTalk
- * Direct Map register to the most useful
- * value we can determine. Note that we
- * must use a single xid for all of:
- * direct-mapped 32-bit DMA accesses
- * direct-mapped 64-bit DMA accesses
- * DMA accesses through the PMU
- * interrupts
- * This is the only way to guarantee that
- * completion interrupts will reach a CPU
- * after all DMA data has reached memory.
- * (Of course, there may be a few special
- * drivers/controlers that explicitly manage
- * this ordering problem.)
- */
-
- cnodeid = 0; /* default node id */
- /*
- * Determine the base address node id to be used for all 32-bit
- * Direct Mapping I/O. The default is node 0, but this can be changed
- * via a DEVICE_ADMIN directive and the PCIBUS_DMATRANS_NODE
- * attribute in the irix.sm config file. A device driver can obtain
- * this node value via a call to pcibr_get_dmatrans_node().
- */
- nasid = COMPACT_TO_NASID_NODEID(cnodeid);
- paddr = NODE_OFFSET(nasid) + 0;
-
- /* currently, we just assume that if we ask
- * for a DMA mapping to "zero" the XIO
- * host will transmute this into a request
- * for the lowest hunk of memory.
- */
- xbase = xtalk_dmatrans_addr(xconn_vhdl, 0,
- paddr, _PAGESZ, 0);
-
- if (xbase != XIO_NOWHERE) {
- if (XIO_PACKED(xbase)) {
- xport = XIO_PORT(xbase);
- xbase = XIO_ADDR(xbase);
- } else
- xport = pcibr_soft->bs_mxid;
-
- offset = xbase & ((1ull << BRIDGE_DIRMAP_OFF_ADDRSHFT) - 1ull);
- xbase >>= BRIDGE_DIRMAP_OFF_ADDRSHFT;
-
- dirmap = xport << BRIDGE_DIRMAP_W_ID_SHFT;
-
- if (xbase)
- dirmap |= BRIDGE_DIRMAP_OFF & xbase;
- else if (offset >= (512 << 20))
- dirmap |= BRIDGE_DIRMAP_ADD512;
-
- bridge->b_dir_map = dirmap;
- }
- /*
- * Set bridge's idea of page size according to the system's
- * idea of "IO page size". TBD: The idea of IO page size
- * should really go away.
- */
- /*
- * ensure that we write and read without any interruption.
- * The read following the write is required for the Bridge war
- */
- spl_level = splhi();
-#if IOPGSIZE == 4096
- bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
-#elif IOPGSIZE == 16384
- bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE;
-#else
- <<<Unable to deal with IOPGSIZE >>>;
-#endif
- bridge->b_wid_control; /* inval addr bug war */
- splx(spl_level);
-
- /* Initialize internal mapping entries */
- for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++)
- bridge->b_int_ate_ram[entry].wr = 0;
-
- /*
- * Determine if there's external mapping SSRAM on this
- * bridge. Set up Bridge control register appropriately,
- * inititlize SSRAM, and set software up to manage RAM
- * entries as an allocatable resource.
- *
- * Currently, we just use the rm* routines to manage ATE
- * allocation. We should probably replace this with a
- * Best Fit allocator.
- *
- * For now, if we have external SSRAM, avoid using
- * the internal ssram: we can't turn PREFETCH on
- * when we use the internal SSRAM; and besides,
- * this also guarantees that no allocation will
- * straddle the internal/external line, so we
- * can increment ATE write addresses rather than
- * recomparing against BRIDGE_INTERNAL_ATES every
- * time.
- */
- if (is_xbridge(bridge))
- num_entries = 0;
- else
- num_entries = pcibr_init_ext_ate_ram(bridge);
-
- /* we always have 128 ATEs (512 for Xbridge) inside the chip
- * even if disabled for debugging.
- */
- pcibr_soft->bs_int_ate_map = rmallocmap(pcibr_soft->bs_int_ate_size);
- pcibr_ate_free(pcibr_soft, 0, pcibr_soft->bs_int_ate_size);
-#if PCIBR_ATE_DEBUG
- printk("pcibr_attach: %d INTERNAL ATEs\n", pcibr_soft->bs_int_ate_size);
-#endif
-
- if (num_entries > pcibr_soft->bs_int_ate_size) {
-#if PCIBR_ATE_NOTBOTH /* for debug -- forces us to use external ates */
- printk("pcibr_attach: disabling internal ATEs.\n");
- pcibr_ate_alloc(pcibr_soft, pcibr_soft->bs_int_ate_size);
-#endif
- pcibr_soft->bs_ext_ate_map = rmallocmap(num_entries);
- pcibr_ate_free(pcibr_soft, pcibr_soft->bs_int_ate_size,
- num_entries - pcibr_soft->bs_int_ate_size);
-#if PCIBR_ATE_DEBUG
- printk("pcibr_attach: %d EXTERNAL ATEs\n",
- num_entries - pcibr_soft->bs_int_ate_size);
-#endif
- }
- }
-
- {
- bridgereg_t dirmap;
- iopaddr_t xbase;
-
- /*
- * now figure the *real* xtalk base address
- * that dirmap sends us to.
- */
- dirmap = bridge->b_dir_map;
- if (dirmap & BRIDGE_DIRMAP_OFF)
- xbase = (iopaddr_t)(dirmap & BRIDGE_DIRMAP_OFF)
- << BRIDGE_DIRMAP_OFF_ADDRSHFT;
- else if (dirmap & BRIDGE_DIRMAP_ADD512)
- xbase = 512 << 20;
- else
- xbase = 0;
-
- pcibr_soft->bs_dir_xbase = xbase;
-
- /* it is entirely possible that we may, at this
- * point, have our dirmap pointing somewhere
- * other than our "master" port.
- */
- pcibr_soft->bs_dir_xport =
- (dirmap & BRIDGE_DIRMAP_W_ID) >> BRIDGE_DIRMAP_W_ID_SHFT;
- }
-
- /* pcibr sources an error interrupt;
- * figure out where to send it.
- *
- * If any interrupts are enabled in bridge,
- * then the prom set us up and our interrupt
- * has already been reconnected in mlreset
- * above.
- *
- * Need to set the D_INTR_ISERR flag
- * in the dev_desc used for allocating the
- * error interrupt, so our interrupt will
- * be properly routed and prioritized.
- *
- * If our crosstalk provider wants to
- * fix widget error interrupts to specific
- * destinations, D_INTR_ISERR is how it
- * knows to do this.
- */
-
- xtalk_intr = xtalk_intr_alloc(xconn_vhdl, dev_desc, pcibr_vhdl);
- ASSERT(xtalk_intr != NULL);
-
- pcibr_soft->bsi_err_intr = xtalk_intr;
-
- /*
- * On IP35 with XBridge, we do some extra checks in pcibr_setwidint
- * in order to work around some addressing limitations. In order
- * for that fire wall to work properly, we need to make sure we
- * start from a known clean state.
- */
- pcibr_clearwidint(bridge);
-
- xtalk_intr_connect(xtalk_intr, (xtalk_intr_setfunc_t)pcibr_setwidint, (void *)bridge);
-
- /*
- * now we can start handling error interrupts;
- * enable all of them.
- * NOTE: some PCI ints may already be enabled.
- */
- b_int_enable = bridge->b_int_enable | BRIDGE_ISR_ERRORS;
-
-
- bridge->b_int_enable = b_int_enable;
- bridge->b_int_mode = 0; /* do not send "clear interrupt" packets */
-
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-
- /*
- * Depending on the rev of bridge, disable certain features.
- * Easiest way seems to be to force the PCIBR_NOwhatever
- * flag to be on for all DMA calls, which overrides any
- * PCIBR_whatever flag or even the setting of whatever
- * from the PCIIO_DMA_class flags (or even from the other
- * PCIBR flags, since NO overrides YES).
- */
- pcibr_soft->bs_dma_flags = 0;
-
- /* PREFETCH:
- * Always completely disabled for REV.A;
- * at "pcibr_prefetch_enable_rev", anyone
- * asking for PCIIO_PREFETCH gets it.
- * Between these two points, you have to ask
- * for PCIBR_PREFETCH, which promises that
- * your driver knows about known Bridge WARs.
- */
- if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_B)
- pcibr_soft->bs_dma_flags |= PCIBR_NOPREFETCH;
- else if (pcibr_soft->bs_rev_num <
- (BRIDGE_WIDGET_PART_NUM << 4 | pcibr_prefetch_enable_rev))
- pcibr_soft->bs_dma_flags |= PCIIO_NOPREFETCH;
-
- /* WRITE_GATHER:
- * Disabled up to but not including the
- * rev number in pcibr_wg_enable_rev. There
- * is no "WAR range" as with prefetch.
- */
- if (pcibr_soft->bs_rev_num <
- (BRIDGE_WIDGET_PART_NUM << 4 | pcibr_wg_enable_rev))
- pcibr_soft->bs_dma_flags |= PCIBR_NOWRITE_GATHER;
-
- pciio_provider_register(pcibr_vhdl, &pcibr_provider);
- pciio_provider_startup(pcibr_vhdl);
-
- pci_io_fb = 0x00000004; /* I/O FreeBlock Base */
- pci_io_fl = 0xFFFFFFFF; /* I/O FreeBlock Last */
-
- pci_lo_fb = 0x00000010; /* Low Memory FreeBlock Base */
- pci_lo_fl = 0x001FFFFF; /* Low Memory FreeBlock Last */
-
- pci_hi_fb = 0x00200000; /* High Memory FreeBlock Base */
- pci_hi_fl = 0x3FFFFFFF; /* High Memory FreeBlock Last */
-
-
- PCI_ADDR_SPACE_LIMITS_STORE();
-
- /* build "no-slot" connection point
- */
- pcibr_info = pcibr_device_info_new
- (pcibr_soft, PCIIO_SLOT_NONE, PCIIO_FUNC_NONE,
- PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
- noslot_conn = pciio_device_info_register
- (pcibr_vhdl, &pcibr_info->f_c);
-
- /* Remember the no slot connection point info for tearing it
- * down during detach.
- */
- pcibr_soft->bs_noslot_conn = noslot_conn;
- pcibr_soft->bs_noslot_info = pcibr_info;
-#if PCI_FBBE
- fast_back_to_back_enable = 1;
-#endif
-
-#if PCI_FBBE
- if (fast_back_to_back_enable) {
- /*
- * All devices on the bus are capable of fast back to back, so
- * we need to set the fast back to back bit in all devices on
- * the bus that are capable of doing such accesses.
- */
- }
-#endif
-
-#ifdef LATER
- /* If the bridge has been reset then there is no need to reset
- * the individual PCI slots.
- */
- for (slot = 0; slot < 8; ++slot)
- /* Reset all the slots */
- (void)pcibr_slot_reset(pcibr_vhdl, slot);
-#endif
-
- for (slot = 0; slot < 8; ++slot)
- /* Find out what is out there */
- (void)pcibr_slot_info_init(pcibr_vhdl,slot);
-
- for (slot = 0; slot < 8; ++slot)
- /* Set up the address space for this slot in the pci land */
- (void)pcibr_slot_addr_space_init(pcibr_vhdl,slot);
-
- for (slot = 0; slot < 8; ++slot)
- /* Setup the device register */
- (void)pcibr_slot_device_init(pcibr_vhdl, slot);
-
-#ifndef __ia64
- for (slot = 0; slot < 8; ++slot)
- /* Set up convenience links */
- if (is_xbridge(bridge))
- if (pcibr_soft->bs_slot[slot].bss_ninfo > 0) /* if occupied */
- pcibr_bus_cnvlink(pcibr_info->f_vertex, slot);
-#endif
-
- for (slot = 0; slot < 8; ++slot)
- /* Setup host/guest relations */
- (void)pcibr_slot_guest_info_init(pcibr_vhdl,slot);
-
- for (slot = 0; slot < 8; ++slot)
- /* Initial RRB management */
- (void)pcibr_slot_initial_rrb_alloc(pcibr_vhdl,slot);
-
- /* driver attach routines should be called out from generic linux code */
- for (slot = 0; slot < 8; ++slot)
- /* Call the device attach */
- (void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
-
- /*
- * Each Pbrick PCI bus only has slots 1 and 2. Similarly for
- * widget 0xe on Ibricks. Allocate RRB's accordingly.
- */
- if (pcibr_soft->bs_moduleid > 0) {
- switch (MODULE_GET_BTCHAR(pcibr_soft->bs_moduleid)) {
- case 'p': /* Pbrick */
- do_pcibr_rrb_autoalloc(pcibr_soft, 1, 8);
- do_pcibr_rrb_autoalloc(pcibr_soft, 2, 8);
- break;
- case 'i': /* Ibrick */
- /* port 0xe on the Ibrick only has slots 1 and 2 */
- if (pcibr_soft->bs_xid == 0xe) {
- do_pcibr_rrb_autoalloc(pcibr_soft, 1, 8);
- do_pcibr_rrb_autoalloc(pcibr_soft, 2, 8);
- }
- else {
- /* allocate one RRB for the serial port */
- do_pcibr_rrb_autoalloc(pcibr_soft, 0, 1);
- }
- break;
- } /* switch */
- }
-
-#ifdef LATER
- if (strstr(nicinfo, XTALK_PCI_PART_NUM)) {
- do_pcibr_rrb_autoalloc(pcibr_soft, 1, 8);
-#if PCIBR_RRB_DEBUG
- printf("\n\nFound XTALK_PCI (030-1275) at %v\n", xconn_vhdl);
-
- printf("pcibr_attach: %v Shoebox RRB MANAGEMENT: %d+%d free\n",
- pcibr_vhdl,
- pcibr_soft->bs_rrb_avail[0],
- pcibr_soft->bs_rrb_avail[1]);
-
- for (slot = 0; slot < 8; ++slot)
- printf("\t%d+%d+%d",
- 0xFFF & pcibr_soft->bs_rrb_valid[slot],
- 0xFFF & pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
- pcibr_soft->bs_rrb_res[slot]);
-
- printf("\n");
-#endif
- }
-#else
- FIXME("pcibr_attach: Call do_pcibr_rrb_autoalloc nicinfo\n");
-#endif
-
- if (aa)
- async_attach_add_info(noslot_conn, aa);
-
- pciio_device_attach(noslot_conn, 0);
-
-
- /*
- * Tear down pointer to async attach info -- async threads for
- * bridge's descendants may be running but the bridge's work is done.
- */
- if (aa)
- async_attach_del_info(xconn_vhdl);
-
- return 0;
-}
-/*
- * pcibr_detach:
- * Detach the bridge device from the hwgraph after cleaning out all the
- * underlying vertices.
- */
-int
-pcibr_detach(devfs_handle_t xconn)
-{
- pciio_slot_t slot;
- devfs_handle_t pcibr_vhdl;
- pcibr_soft_t pcibr_soft;
- bridge_t *bridge;
-
- /* Get the bridge vertex from its xtalk connection point */
- if (hwgraph_traverse(xconn, EDGE_LBL_PCI, &pcibr_vhdl) != GRAPH_SUCCESS)
- return(1);
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- bridge = pcibr_soft->bs_base;
-
- /* Disable the interrupts from the bridge */
- bridge->b_int_enable = 0;
-
- /* Detach all the PCI devices talking to this bridge */
- for(slot = 0; slot < 8; slot++) {
-#ifdef DEBUG
- printk("pcibr_device_detach called for %p/%d\n",
- pcibr_vhdl,slot);
-#endif
- pcibr_slot_detach(pcibr_vhdl, slot, 0);
- }
-
- /* Unregister the no-slot connection point */
- pciio_device_info_unregister(pcibr_vhdl,
- &(pcibr_soft->bs_noslot_info->f_c));
-
- spin_lock_destroy(&pcibr_soft->bs_lock);
- kfree(pcibr_soft->bs_name);
-
- /* Error handler gets unregistered when the widget info is
- * cleaned
- */
- /* Free the soft ATE maps */
- if (pcibr_soft->bs_int_ate_map)
- rmfreemap(pcibr_soft->bs_int_ate_map);
- if (pcibr_soft->bs_ext_ate_map)
- rmfreemap(pcibr_soft->bs_ext_ate_map);
-
- /* Disconnect the error interrupt and free the xtalk resources
- * associated with it.
- */
- xtalk_intr_disconnect(pcibr_soft->bsi_err_intr);
- xtalk_intr_free(pcibr_soft->bsi_err_intr);
-
- /* Clear the software state maintained by the bridge driver for this
- * bridge.
- */
- DEL(pcibr_soft);
- /* Remove the Bridge revision labelled info */
- (void)hwgraph_info_remove_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, NULL);
- /* Remove the character device associated with this bridge */
- (void)hwgraph_edge_remove(pcibr_vhdl, EDGE_LBL_CONTROLLER, NULL);
- /* Remove the PCI bridge vertex */
- (void)hwgraph_edge_remove(xconn, EDGE_LBL_PCI, NULL);
-
- return(0);
-}
-
-int
-pcibr_asic_rev(devfs_handle_t pconn_vhdl)
-{
- devfs_handle_t pcibr_vhdl;
- arbitrary_info_t ainfo;
-
- if (GRAPH_SUCCESS !=
- hwgraph_traverse(pconn_vhdl, EDGE_LBL_MASTER, &pcibr_vhdl))
- return -1;
-
- if (GRAPH_SUCCESS !=
- hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo))
- return -1;
-
- return (int) ainfo;
-}
-
-int
-pcibr_write_gather_flush(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- pciio_slot_t slot;
- slot = pciio_info_slot_get(pciio_info);
- pcibr_device_write_gather_flush(pcibr_soft, slot);
- return 0;
-}
-
-/* =====================================================================
- * PIO MANAGEMENT
- */
-
-LOCAL iopaddr_t
-pcibr_addr_pci_to_xio(devfs_handle_t pconn_vhdl,
- pciio_slot_t slot,
- pciio_space_t space,
- iopaddr_t pci_addr,
- size_t req_size,
- unsigned flags)
-{
- pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
- pciio_info_t pciio_info = &pcibr_info->f_c;
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
-
- unsigned bar; /* which BASE reg on device is decoding */
- iopaddr_t xio_addr = XIO_NOWHERE;
-
- pciio_space_t wspace; /* which space device is decoding */
- iopaddr_t wbase; /* base of device decode on PCI */
- size_t wsize; /* size of device decode on PCI */
-
- int try; /* DevIO(x) window scanning order control */
- int win; /* which DevIO(x) window is being used */
- pciio_space_t mspace; /* target space for devio(x) register */
- iopaddr_t mbase; /* base of devio(x) mapped area on PCI */
- size_t msize; /* size of devio(x) mapped area on PCI */
- size_t mmask; /* addr bits stored in Device(x) */
-
- unsigned long s;
-
- s = pcibr_lock(pcibr_soft);
-
- if (pcibr_soft->bs_slot[slot].has_host) {
- slot = pcibr_soft->bs_slot[slot].host_slot;
- pcibr_info = pcibr_soft->bs_slot[slot].bss_infos[0];
- }
- if (space == PCIIO_SPACE_NONE)
- goto done;
-
- if (space == PCIIO_SPACE_CFG) {
- /*
- * Usually, the first mapping
- * established to a PCI device
- * is to its config space.
- *
- * In any case, we definitely
- * do NOT need to worry about
- * PCI BASE registers, and
- * MUST NOT attempt to point
- * the DevIO(x) window at
- * this access ...
- */
- if (((flags & PCIIO_BYTE_STREAM) == 0) &&
- ((pci_addr + req_size) <= BRIDGE_TYPE0_CFG_FUNC_OFF))
- xio_addr = pci_addr + BRIDGE_TYPE0_CFG_DEV(slot);
-
- goto done;
- }
- if (space == PCIIO_SPACE_ROM) {
- /* PIO to the Expansion Rom.
- * Driver is responsible for
- * enabling and disabling
- * decodes properly.
- */
- wbase = pcibr_info->f_rbase;
- wsize = pcibr_info->f_rsize;
-
- /*
- * While the driver should know better
- * than to attempt to map more space
- * than the device is decoding, he might
- * do it; better to bail out here.
- */
- if ((pci_addr + req_size) > wsize)
- goto done;
-
- pci_addr += wbase;
- space = PCIIO_SPACE_MEM;
- }
- /*
- * reduce window mappings to raw
- * space mappings (maybe allocating
- * windows), and try for DevIO(x)
- * usage (setting it if it is available).
- */
- bar = space - PCIIO_SPACE_WIN0;
- if (bar < 6) {
- wspace = pcibr_info->f_window[bar].w_space;
- if (wspace == PCIIO_SPACE_NONE)
- goto done;
-
- /* get PCI base and size */
- wbase = pcibr_info->f_window[bar].w_base;
- wsize = pcibr_info->f_window[bar].w_size;
-
- /*
- * While the driver should know better
- * than to attempt to map more space
- * than the device is decoding, he might
- * do it; better to bail out here.
- */
- if ((pci_addr + req_size) > wsize)
- goto done;
-
- /* shift from window relative to
- * decoded space relative.
- */
- pci_addr += wbase;
- space = wspace;
- } else
- bar = -1;
-
- /* Scan all the DevIO(x) windows twice looking for one
- * that can satisfy our request. The first time through,
- * only look at assigned windows; the second time, also
- * look at PCIIO_SPACE_NONE windows. Arrange the order
- * so we always look at our own window first.
- *
- * We will not attempt to satisfy a single request
- * by concatinating multiple windows.
- */
- for (try = 0; try < 16; ++try) {
- bridgereg_t devreg;
- unsigned offset;
-
- win = (try + slot) % 8;
-
- /* If this DevIO(x) mapping area can provide
- * a mapping to this address, use it.
- */
- msize = (win < 2) ? 0x200000 : 0x100000;
- mmask = -msize;
- if (space != PCIIO_SPACE_IO)
- mmask &= 0x3FFFFFFF;
-
- offset = pci_addr & (msize - 1);
-
- /* If this window can't possibly handle that request,
- * go on to the next window.
- */
- if (((pci_addr & (msize - 1)) + req_size) > msize)
- continue;
-
- devreg = pcibr_soft->bs_slot[win].bss_device;
-
- /* Is this window "nailed down"?
- * If not, maybe we can use it.
- * (only check this the second time through)
- */
- mspace = pcibr_soft->bs_slot[win].bss_devio.bssd_space;
- if ((try > 7) && (mspace == PCIIO_SPACE_NONE)) {
-
- /* If this is the primary DevIO(x) window
- * for some other device, skip it.
- */
- if ((win != slot) &&
- (PCIIO_VENDOR_ID_NONE !=
- pcibr_soft->bs_slot[win].bss_vendor_id))
- continue;
-
- /* It's a free window, and we fit in it.
- * Set up Device(win) to our taste.
- */
- mbase = pci_addr & mmask;
-
- /* check that we would really get from
- * here to there.
- */
- if ((mbase | offset) != pci_addr)
- continue;
-
- devreg &= ~BRIDGE_DEV_OFF_MASK;
- if (space != PCIIO_SPACE_IO)
- devreg |= BRIDGE_DEV_DEV_IO_MEM;
- else
- devreg &= ~BRIDGE_DEV_DEV_IO_MEM;
- devreg |= (mbase >> 20) & BRIDGE_DEV_OFF_MASK;
-
- /* default is WORD_VALUES.
- * if you specify both,
- * operation is undefined.
- */
- if (flags & PCIIO_BYTE_STREAM)
- devreg |= BRIDGE_DEV_DEV_SWAP;
- else
- devreg &= ~BRIDGE_DEV_DEV_SWAP;
-
- if (pcibr_soft->bs_slot[win].bss_device != devreg) {
- bridge->b_device[win].reg = devreg;
- pcibr_soft->bs_slot[win].bss_device = devreg;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-
-#if DEBUG && PCI_DEBUG
- printk("pcibr Device(%d): 0x%lx\n", win, bridge->b_device[win].reg);
-#endif
- }
- pcibr_soft->bs_slot[win].bss_devio.bssd_space = space;
- pcibr_soft->bs_slot[win].bss_devio.bssd_base = mbase;
- xio_addr = BRIDGE_DEVIO(win) + (pci_addr - mbase);
-
-#if DEBUG && PCI_DEBUG
- printk("%s LINE %d map to space %d space desc 0x%x[%lx..%lx] for slot %d allocates DevIO(%d) devreg 0x%x\n",
- __FUNCTION__, __LINE__, space, space_desc,
- pci_addr, pci_addr + req_size - 1,
- slot, win, devreg);
-#endif
-
- goto done;
- } /* endif DevIO(x) not pointed */
- mbase = pcibr_soft->bs_slot[win].bss_devio.bssd_base;
-
- /* Now check for request incompat with DevIO(x)
- */
- if ((mspace != space) ||
- (pci_addr < mbase) ||
- ((pci_addr + req_size) > (mbase + msize)) ||
- ((flags & PCIIO_BYTE_STREAM) && !(devreg & BRIDGE_DEV_DEV_SWAP)) ||
- (!(flags & PCIIO_BYTE_STREAM) && (devreg & BRIDGE_DEV_DEV_SWAP)))
- continue;
-
- /* DevIO(x) window is pointed at PCI space
- * that includes our target. Calculate the
- * final XIO address, release the lock and
- * return.
- */
- xio_addr = BRIDGE_DEVIO(win) + (pci_addr - mbase);
-
-#if DEBUG && PCI_DEBUG
- printk("%s LINE %d map to space %d [0x%p..0x%p] for slot %d uses DevIO(%d)\n",
- __FUNCTION__, __LINE__, space, pci_addr, pci_addr + req_size - 1, slot, win);
-#endif
- goto done;
- }
-
- switch (space) {
- /*
- * Accesses to device decode
- * areas that do a not fit
- * within the DevIO(x) space are
- * modified to be accesses via
- * the direct mapping areas.
- *
- * If necessary, drivers can
- * explicitly ask for mappings
- * into these address spaces,
- * but this should never be needed.
- */
- case PCIIO_SPACE_MEM: /* "mem space" */
- case PCIIO_SPACE_MEM32: /* "mem, use 32-bit-wide bus" */
- if ((pci_addr + BRIDGE_PCI_MEM32_BASE + req_size - 1) <=
- BRIDGE_PCI_MEM32_LIMIT)
- xio_addr = pci_addr + BRIDGE_PCI_MEM32_BASE;
- break;
-
- case PCIIO_SPACE_MEM64: /* "mem, use 64-bit-wide bus" */
- if ((pci_addr + BRIDGE_PCI_MEM64_BASE + req_size - 1) <=
- BRIDGE_PCI_MEM64_LIMIT)
- xio_addr = pci_addr + BRIDGE_PCI_MEM64_BASE;
- break;
-
- case PCIIO_SPACE_IO: /* "i/o space" */
- /* Bridge Hardware Bug WAR #482741:
- * The 4G area that maps directly from
- * XIO space to PCI I/O space is busted
- * until Bridge Rev D.
- */
- if ((pcibr_soft->bs_rev_num > BRIDGE_PART_REV_C) &&
- ((pci_addr + BRIDGE_PCI_IO_BASE + req_size - 1) <=
- BRIDGE_PCI_IO_LIMIT))
- xio_addr = pci_addr + BRIDGE_PCI_IO_BASE;
- break;
- }
-
- /* Check that "Direct PIO" byteswapping matches,
- * try to change it if it does not.
- */
- if (xio_addr != XIO_NOWHERE) {
- unsigned bst; /* nonzero to set bytestream */
- unsigned *bfp; /* addr of record of how swapper is set */
- unsigned swb; /* which control bit to mung */
- unsigned bfo; /* current swapper setting */
- unsigned bfn; /* desired swapper setting */
-
- bfp = ((space == PCIIO_SPACE_IO)
- ? (&pcibr_soft->bs_pio_end_io)
- : (&pcibr_soft->bs_pio_end_mem));
-
- bfo = *bfp;
-
- bst = flags & PCIIO_BYTE_STREAM;
-
- bfn = bst ? PCIIO_BYTE_STREAM : PCIIO_WORD_VALUES;
-
- if (bfn == bfo) { /* we already match. */
- ;
- } else if (bfo != 0) { /* we have a conflict. */
-#if DEBUG && PCI_DEBUG
- printk("pcibr_addr_pci_to_xio: swap conflict in space %d , was%s%s, want%s%s\n",
- space,
- bfo & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
- bfo & PCIIO_WORD_VALUES ? " WORD_VALUES" : "",
- bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
- bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
-#endif
- xio_addr = XIO_NOWHERE;
- } else { /* OK to make the change. */
- bridgereg_t octl, nctl;
-
- swb = (space == PCIIO_SPACE_IO) ? BRIDGE_CTRL_IO_SWAP : BRIDGE_CTRL_MEM_SWAP;
- octl = bridge->b_wid_control;
- nctl = bst ? octl | swb : octl & ~swb;
-
- if (octl != nctl) /* make the change if any */
- bridge->b_wid_control = nctl;
-
- *bfp = bfn; /* record the assignment */
-
-#if DEBUG && PCI_DEBUG
- printk("pcibr_addr_pci_to_xio: swap for space %d set to%s%s\n",
- space,
- bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
- bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
-#endif
- }
- }
- done:
- pcibr_unlock(pcibr_soft, s);
- return xio_addr;
-}
-
-/*ARGSUSED6 */
-pcibr_piomap_t
-pcibr_piomap_alloc(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- pciio_space_t space,
- iopaddr_t pci_addr,
- size_t req_size,
- size_t req_size_max,
- unsigned flags)
-{
- pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
- pciio_info_t pciio_info = &pcibr_info->f_c;
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
-
- pcibr_piomap_t *mapptr;
- pcibr_piomap_t maplist;
- pcibr_piomap_t pcibr_piomap;
- iopaddr_t xio_addr;
- xtalk_piomap_t xtalk_piomap;
- unsigned long s;
-
- /* Make sure that the req sizes are non-zero */
- if ((req_size < 1) || (req_size_max < 1))
- return NULL;
-
- /*
- * Code to translate slot/space/addr
- * into xio_addr is common between
- * this routine and pcibr_piotrans_addr.
- */
- xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
-
- if (xio_addr == XIO_NOWHERE)
- return NULL;
-
- /* Check the piomap list to see if there is already an allocated
- * piomap entry but not in use. If so use that one. Otherwise
- * allocate a new piomap entry and add it to the piomap list
- */
- mapptr = &(pcibr_info->f_piomap);
-
- s = pcibr_lock(pcibr_soft);
- for (pcibr_piomap = *mapptr;
- pcibr_piomap != NULL;
- pcibr_piomap = pcibr_piomap->bp_next) {
- if (pcibr_piomap->bp_mapsz == 0)
- break;
- }
-
- if (pcibr_piomap)
- mapptr = NULL;
- else {
- pcibr_unlock(pcibr_soft, s);
- NEW(pcibr_piomap);
- }
-
- pcibr_piomap->bp_dev = pconn_vhdl;
- pcibr_piomap->bp_slot = pciio_slot;
- pcibr_piomap->bp_flags = flags;
- pcibr_piomap->bp_space = space;
- pcibr_piomap->bp_pciaddr = pci_addr;
- pcibr_piomap->bp_mapsz = req_size;
- pcibr_piomap->bp_soft = pcibr_soft;
- pcibr_piomap->bp_toc[0] = ATOMIC_INIT(0);
-
- if (mapptr) {
- s = pcibr_lock(pcibr_soft);
- maplist = *mapptr;
- pcibr_piomap->bp_next = maplist;
- *mapptr = pcibr_piomap;
- }
- pcibr_unlock(pcibr_soft, s);
-
-
- if (pcibr_piomap) {
- xtalk_piomap =
- xtalk_piomap_alloc(xconn_vhdl, 0,
- xio_addr,
- req_size, req_size_max,
- flags & PIOMAP_FLAGS);
- if (xtalk_piomap) {
- pcibr_piomap->bp_xtalk_addr = xio_addr;
- pcibr_piomap->bp_xtalk_pio = xtalk_piomap;
- } else {
- pcibr_piomap->bp_mapsz = 0;
- pcibr_piomap = 0;
- }
- }
- return pcibr_piomap;
-}
-
-/*ARGSUSED */
-void
-pcibr_piomap_free(pcibr_piomap_t pcibr_piomap)
-{
- xtalk_piomap_free(pcibr_piomap->bp_xtalk_pio);
- pcibr_piomap->bp_xtalk_pio = 0;
- pcibr_piomap->bp_mapsz = 0;
-}
-
-/*ARGSUSED */
-caddr_t
-pcibr_piomap_addr(pcibr_piomap_t pcibr_piomap,
- iopaddr_t pci_addr,
- size_t req_size)
-{
- return xtalk_piomap_addr(pcibr_piomap->bp_xtalk_pio,
- pcibr_piomap->bp_xtalk_addr +
- pci_addr - pcibr_piomap->bp_pciaddr,
- req_size);
-}
-
-/*ARGSUSED */
-void
-pcibr_piomap_done(pcibr_piomap_t pcibr_piomap)
-{
- xtalk_piomap_done(pcibr_piomap->bp_xtalk_pio);
-}
-
-/*ARGSUSED */
-caddr_t
-pcibr_piotrans_addr(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- pciio_space_t space,
- iopaddr_t pci_addr,
- size_t req_size,
- unsigned flags)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
-
- iopaddr_t xio_addr;
-
- xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
-
- if (xio_addr == XIO_NOWHERE)
- return NULL;
-
- return xtalk_piotrans_addr(xconn_vhdl, 0, xio_addr, req_size, flags & PIOMAP_FLAGS);
-}
-
-/*
- * PIO Space allocation and management.
- * Allocate and Manage the PCI PIO space (mem and io space)
- * This routine is pretty simplistic at this time, and
- * does pretty trivial management of allocation and freeing..
- * The current scheme is prone for fragmentation..
- * Change the scheme to use bitmaps.
- */
-
-/*ARGSUSED */
-iopaddr_t
-pcibr_piospace_alloc(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- pciio_space_t space,
- size_t req_size,
- size_t alignment)
-{
- pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
- pciio_info_t pciio_info = &pcibr_info->f_c;
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-
- pciio_piospace_t piosp;
- unsigned long s;
-
- iopaddr_t *pciaddr, *pcilast;
- iopaddr_t start_addr;
- size_t align_mask;
-
- /*
- * Check for proper alignment
- */
- ASSERT(alignment >= NBPP);
- ASSERT((alignment & (alignment - 1)) == 0);
-
- align_mask = alignment - 1;
- s = pcibr_lock(pcibr_soft);
-
- /*
- * First look if a previously allocated chunk exists.
- */
- if ((piosp = pcibr_info->f_piospace)) {
- /*
- * Look through the list for a right sized free chunk.
- */
- do {
- if (piosp->free &&
- (piosp->space == space) &&
- (piosp->count >= req_size) &&
- !(piosp->start & align_mask)) {
- piosp->free = 0;
- pcibr_unlock(pcibr_soft, s);
- return piosp->start;
- }
- piosp = piosp->next;
- } while (piosp);
- }
- ASSERT(!piosp);
-
- switch (space) {
- case PCIIO_SPACE_IO:
- pciaddr = &pcibr_soft->bs_spinfo.pci_io_base;
- pcilast = &pcibr_soft->bs_spinfo.pci_io_last;
- break;
- case PCIIO_SPACE_MEM:
- case PCIIO_SPACE_MEM32:
- pciaddr = &pcibr_soft->bs_spinfo.pci_mem_base;
- pcilast = &pcibr_soft->bs_spinfo.pci_mem_last;
- break;
- default:
- ASSERT(0);
- pcibr_unlock(pcibr_soft, s);
- return 0;
- }
-
- start_addr = *pciaddr;
-
- /*
- * Align start_addr.
- */
- if (start_addr & align_mask)
- start_addr = (start_addr + align_mask) & ~align_mask;
-
- if ((start_addr + req_size) > *pcilast) {
- /*
- * If too big a request, reject it.
- */
- pcibr_unlock(pcibr_soft, s);
- return 0;
- }
- *pciaddr = (start_addr + req_size);
-
- NEW(piosp);
- piosp->free = 0;
- piosp->space = space;
- piosp->start = start_addr;
- piosp->count = req_size;
- piosp->next = pcibr_info->f_piospace;
- pcibr_info->f_piospace = piosp;
-
- pcibr_unlock(pcibr_soft, s);
- return start_addr;
-}
-
-/*ARGSUSED */
-void
-pcibr_piospace_free(devfs_handle_t pconn_vhdl,
- pciio_space_t space,
- iopaddr_t pciaddr,
- size_t req_size)
-{
- pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
-
- pciio_piospace_t piosp;
- unsigned long s;
- char name[1024];
-
- /*
- * Look through the bridge data structures for the pciio_piospace_t
- * structure corresponding to 'pciaddr'
- */
- s = pcibr_lock(pcibr_soft);
- piosp = pcibr_info->f_piospace;
- while (piosp) {
- /*
- * Piospace free can only be for the complete
- * chunk and not parts of it..
- */
- if (piosp->start == pciaddr) {
- if (piosp->count == req_size)
- break;
- /*
- * Improper size passed for freeing..
- * Print a message and break;
- */
- hwgraph_vertex_name_get(pconn_vhdl, name, 1024);
- printk(KERN_WARNING "pcibr_piospace_free: error");
- printk(KERN_WARNING "Device %s freeing size (0x%lx) different than allocated (0x%lx)",
- name, req_size, piosp->count);
- printk(KERN_WARNING "Freeing 0x%lx instead", piosp->count);
- break;
- }
- piosp = piosp->next;
- }
-
- if (!piosp) {
- printk(KERN_WARNING
- "pcibr_piospace_free: Address 0x%lx size 0x%lx - No match\n",
- pciaddr, req_size);
- pcibr_unlock(pcibr_soft, s);
- return;
- }
- piosp->free = 1;
- pcibr_unlock(pcibr_soft, s);
- return;
-}
-
-/* =====================================================================
- * DMA MANAGEMENT
- *
- * The Bridge ASIC provides three methods of doing
- * DMA: via a "direct map" register available in
- * 32-bit PCI space (which selects a contiguous 2G
- * address space on some other widget), via
- * "direct" addressing via 64-bit PCI space (all
- * destination information comes from the PCI
- * address, including transfer attributes), and via
- * a "mapped" region that allows a bunch of
- * different small mappings to be established with
- * the PMU.
- *
- * For efficiency, we most prefer to use the 32-bit
- * direct mapping facility, since it requires no
- * resource allocations. The advantage of using the
- * PMU over the 64-bit direct is that single-cycle
- * PCI addressing can be used; the advantage of
- * using 64-bit direct over PMU addressing is that
- * we do not have to allocate entries in the PMU.
- */
-
-/*
- * Convert PCI-generic software flags and Bridge-specific software flags
- * into Bridge-specific Direct Map attribute bits.
- */
-LOCAL iopaddr_t
-pcibr_flags_to_d64(unsigned flags, pcibr_soft_t pcibr_soft)
-{
- iopaddr_t attributes = 0;
-
- /* Sanity check: Bridge only allows use of VCHAN1 via 64-bit addrs */
-#ifdef LATER
- ASSERT_ALWAYS(!(flags & PCIBR_VCHAN1) || (flags & PCIIO_DMA_A64));
-#endif
-
- /* Generic macro flags
- */
- if (flags & PCIIO_DMA_DATA) { /* standard data channel */
- attributes &= ~PCI64_ATTR_BAR; /* no barrier bit */
- attributes |= PCI64_ATTR_PREF; /* prefetch on */
- }
- if (flags & PCIIO_DMA_CMD) { /* standard command channel */
- attributes |= PCI64_ATTR_BAR; /* barrier bit on */
- attributes &= ~PCI64_ATTR_PREF; /* disable prefetch */
- }
- /* Generic detail flags
- */
- if (flags & PCIIO_PREFETCH)
- attributes |= PCI64_ATTR_PREF;
- if (flags & PCIIO_NOPREFETCH)
- attributes &= ~PCI64_ATTR_PREF;
-
- /* the swap bit is in the address attributes for xbridge */
- if (pcibr_soft->bs_xbridge) {
- if (flags & PCIIO_BYTE_STREAM)
- attributes |= PCI64_ATTR_SWAP;
- if (flags & PCIIO_WORD_VALUES)
- attributes &= ~PCI64_ATTR_SWAP;
- }
-
- /* Provider-specific flags
- */
- if (flags & PCIBR_BARRIER)
- attributes |= PCI64_ATTR_BAR;
- if (flags & PCIBR_NOBARRIER)
- attributes &= ~PCI64_ATTR_BAR;
-
- if (flags & PCIBR_PREFETCH)
- attributes |= PCI64_ATTR_PREF;
- if (flags & PCIBR_NOPREFETCH)
- attributes &= ~PCI64_ATTR_PREF;
-
- if (flags & PCIBR_PRECISE)
- attributes |= PCI64_ATTR_PREC;
- if (flags & PCIBR_NOPRECISE)
- attributes &= ~PCI64_ATTR_PREC;
-
- if (flags & PCIBR_VCHAN1)
- attributes |= PCI64_ATTR_VIRTUAL;
- if (flags & PCIBR_VCHAN0)
- attributes &= ~PCI64_ATTR_VIRTUAL;
-
- return (attributes);
-}
-
-/*
- * Convert PCI-generic software flags and Bridge-specific software flags
- * into Bridge-specific Address Translation Entry attribute bits.
- */
-LOCAL bridge_ate_t
-pcibr_flags_to_ate(unsigned flags)
-{
- bridge_ate_t attributes;
-
- /* default if nothing specified:
- * NOBARRIER
- * NOPREFETCH
- * NOPRECISE
- * COHERENT
- * Plus the valid bit
- */
- attributes = ATE_CO | ATE_V;
-
- /* Generic macro flags
- */
- if (flags & PCIIO_DMA_DATA) { /* standard data channel */
- attributes &= ~ATE_BAR; /* no barrier */
- attributes |= ATE_PREF; /* prefetch on */
- }
- if (flags & PCIIO_DMA_CMD) { /* standard command channel */
- attributes |= ATE_BAR; /* barrier bit on */
- attributes &= ~ATE_PREF; /* disable prefetch */
- }
- /* Generic detail flags
- */
- if (flags & PCIIO_PREFETCH)
- attributes |= ATE_PREF;
- if (flags & PCIIO_NOPREFETCH)
- attributes &= ~ATE_PREF;
-
- /* Provider-specific flags
- */
- if (flags & PCIBR_BARRIER)
- attributes |= ATE_BAR;
- if (flags & PCIBR_NOBARRIER)
- attributes &= ~ATE_BAR;
-
- if (flags & PCIBR_PREFETCH)
- attributes |= ATE_PREF;
- if (flags & PCIBR_NOPREFETCH)
- attributes &= ~ATE_PREF;
-
- if (flags & PCIBR_PRECISE)
- attributes |= ATE_PREC;
- if (flags & PCIBR_NOPRECISE)
- attributes &= ~ATE_PREC;
-
- return (attributes);
-}
-
-/*ARGSUSED */
-pcibr_dmamap_t
-pcibr_dmamap_alloc(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- size_t req_size_max,
- unsigned flags)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
- pciio_slot_t slot;
- xwidgetnum_t xio_port;
-
- xtalk_dmamap_t xtalk_dmamap;
- pcibr_dmamap_t pcibr_dmamap;
- int ate_count;
- int ate_index;
-
- /* merge in forced flags */
- flags |= pcibr_soft->bs_dma_flags;
-
-#ifdef IRIX
- NEWf(pcibr_dmamap, flags);
-#else
- /*
- * On SNIA64, these maps are pre-allocated because pcibr_dmamap_alloc()
- * can be called within an interrupt thread.
- */
- pcibr_dmamap = (pcibr_dmamap_t)get_free_pciio_dmamap(pcibr_soft->bs_vhdl);
-#endif
-
- if (!pcibr_dmamap)
- return 0;
-
- xtalk_dmamap = xtalk_dmamap_alloc(xconn_vhdl, dev_desc, req_size_max,
- flags & DMAMAP_FLAGS);
- if (!xtalk_dmamap) {
-#if PCIBR_ATE_DEBUG
- printk("pcibr_attach: xtalk_dmamap_alloc failed\n");
-#endif
-#ifdef IRIX
- DEL(pcibr_dmamap);
-#else
- free_pciio_dmamap(pcibr_dmamap);
-#endif
- return 0;
- }
- xio_port = pcibr_soft->bs_mxid;
- slot = pciio_info_slot_get(pciio_info);
-
- pcibr_dmamap->bd_dev = pconn_vhdl;
- pcibr_dmamap->bd_slot = slot;
- pcibr_dmamap->bd_soft = pcibr_soft;
- pcibr_dmamap->bd_xtalk = xtalk_dmamap;
- pcibr_dmamap->bd_max_size = req_size_max;
- pcibr_dmamap->bd_xio_port = xio_port;
-
- if (flags & PCIIO_DMA_A64) {
- if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D64_BITS)) {
- iopaddr_t pci_addr;
- int have_rrbs;
- int min_rrbs;
-
- /* Device is capable of A64 operations,
- * and the attributes of the DMA are
- * consistent with any previous DMA
- * mappings using shared resources.
- */
-
- pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
-
- pcibr_dmamap->bd_flags = flags;
- pcibr_dmamap->bd_xio_addr = 0;
- pcibr_dmamap->bd_pci_addr = pci_addr;
-
- /* Make sure we have an RRB (or two).
- */
- if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
- if (flags & PCIBR_VCHAN1)
- slot += PCIBR_RRB_SLOT_VIRTUAL;
- have_rrbs = pcibr_soft->bs_rrb_valid[slot];
- if (have_rrbs < 2) {
- if (pci_addr & PCI64_ATTR_PREF)
- min_rrbs = 2;
- else
- min_rrbs = 1;
- if (have_rrbs < min_rrbs)
- do_pcibr_rrb_autoalloc(pcibr_soft, slot, min_rrbs - have_rrbs);
- }
- }
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: using direct64\n");
-#endif
- return pcibr_dmamap;
- }
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: unable to use direct64\n");
-#endif
- flags &= ~PCIIO_DMA_A64;
- }
- if (flags & PCIIO_FIXED) {
- /* warning: mappings may fail later,
- * if direct32 can't get to the address.
- */
- if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D32_BITS)) {
- /* User desires DIRECT A32 operations,
- * and the attributes of the DMA are
- * consistent with any previous DMA
- * mappings using shared resources.
- * Mapping calls may fail if target
- * is outside the direct32 range.
- */
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: using direct32\n");
-#endif
- pcibr_dmamap->bd_flags = flags;
- pcibr_dmamap->bd_xio_addr = pcibr_soft->bs_dir_xbase;
- pcibr_dmamap->bd_pci_addr = PCI32_DIRECT_BASE;
- return pcibr_dmamap;
- }
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: unable to use direct32\n");
-#endif
- /* If the user demands FIXED and we can't
- * give it to him, fail.
- */
- xtalk_dmamap_free(xtalk_dmamap);
-#ifdef IRIX
- DEL(pcibr_dmamap);
-#else
- free_pciio_dmamap(pcibr_dmamap);
-#endif
- return 0;
- }
- /*
- * Allocate Address Translation Entries from the mapping RAM.
- * Unless the PCIBR_NO_ATE_ROUNDUP flag is specified,
- * the maximum number of ATEs is based on the worst-case
- * scenario, where the requested target is in the
- * last byte of an ATE; thus, mapping IOPGSIZE+2
- * does end up requiring three ATEs.
- */
- if (!(flags & PCIBR_NO_ATE_ROUNDUP)) {
- ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
- +req_size_max /* max mapping bytes */
- - 1) + 1; /* round UP */
- } else { /* assume requested target is page aligned */
- ate_count = IOPG(req_size_max /* max mapping bytes */
- - 1) + 1; /* round UP */
- }
-
- ate_index = pcibr_ate_alloc(pcibr_soft, ate_count);
-
- if (ate_index != -1) {
- if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_PMU_BITS)) {
- bridge_ate_t ate_proto;
- int have_rrbs;
- int min_rrbs;
-
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: using PMU\n");
-#endif
-
- ate_proto = pcibr_flags_to_ate(flags);
-
- pcibr_dmamap->bd_flags = flags;
- pcibr_dmamap->bd_pci_addr =
- PCI32_MAPPED_BASE + IOPGSIZE * ate_index;
- /*
- * for xbridge the byte-swap bit == bit 29 of PCI address
- */
- if (pcibr_soft->bs_xbridge) {
- if (flags & PCIIO_BYTE_STREAM)
- ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
- /*
- * If swap was set in bss_device in pcibr_endian_set()
- * we need to change the address bit.
- */
- if (pcibr_soft->bs_slot[slot].bss_device &
- BRIDGE_DEV_SWAP_PMU)
- ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
- if (flags & PCIIO_WORD_VALUES)
- ATE_SWAP_OFF(pcibr_dmamap->bd_pci_addr);
- }
- pcibr_dmamap->bd_xio_addr = 0;
- pcibr_dmamap->bd_ate_ptr = pcibr_ate_addr(pcibr_soft, ate_index);
- pcibr_dmamap->bd_ate_index = ate_index;
- pcibr_dmamap->bd_ate_count = ate_count;
- pcibr_dmamap->bd_ate_proto = ate_proto;
-
- /* Make sure we have an RRB (or two).
- */
- if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
- have_rrbs = pcibr_soft->bs_rrb_valid[slot];
- if (have_rrbs < 2) {
- if (ate_proto & ATE_PREF)
- min_rrbs = 2;
- else
- min_rrbs = 1;
- if (have_rrbs < min_rrbs)
- do_pcibr_rrb_autoalloc(pcibr_soft, slot, min_rrbs - have_rrbs);
- }
- }
- if (ate_index >= pcibr_soft->bs_int_ate_size &&
- !pcibr_soft->bs_xbridge) {
- bridge_t *bridge = pcibr_soft->bs_base;
- volatile unsigned *cmd_regp;
- unsigned cmd_reg;
- unsigned long s;
-
- pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_SSRAM;
-
- s = pcibr_lock(pcibr_soft);
- cmd_regp = &(bridge->
- b_type0_cfg_dev[slot].
- l[PCI_CFG_COMMAND / 4]);
- cmd_reg = *cmd_regp;
- pcibr_soft->bs_slot[slot].bss_cmd_pointer = cmd_regp;
- pcibr_soft->bs_slot[slot].bss_cmd_shadow = cmd_reg;
- pcibr_unlock(pcibr_soft, s);
- }
- return pcibr_dmamap;
- }
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: unable to use PMU\n");
-#endif
- pcibr_ate_free(pcibr_soft, ate_index, ate_count);
- }
- /* total failure: sorry, you just can't
- * get from here to there that way.
- */
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_alloc: complete failure.\n");
-#endif
- xtalk_dmamap_free(xtalk_dmamap);
-#ifdef IRIX
- DEL(pcibr_dmamap);
-#else
- free_pciio_dmamap(pcibr_dmamap);
-#endif
- return 0;
-}
-
-/*ARGSUSED */
-void
-pcibr_dmamap_free(pcibr_dmamap_t pcibr_dmamap)
-{
- pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
- pciio_slot_t slot = pcibr_dmamap->bd_slot;
-
- unsigned flags = pcibr_dmamap->bd_flags;
-
- /* Make sure that bss_ext_ates_active
- * is properly kept up to date.
- */
-
- if (PCIBR_DMAMAP_BUSY & flags)
- if (PCIBR_DMAMAP_SSRAM & flags)
- atomic_dec(&(pcibr_soft->bs_slot[slot]. bss_ext_ates_active));
-
- xtalk_dmamap_free(pcibr_dmamap->bd_xtalk);
-
- if (pcibr_dmamap->bd_flags & PCIIO_DMA_A64) {
- pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_D64_BITS);
- }
- if (pcibr_dmamap->bd_ate_count) {
- pcibr_ate_free(pcibr_dmamap->bd_soft,
- pcibr_dmamap->bd_ate_index,
- pcibr_dmamap->bd_ate_count);
- pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_PMU_BITS);
- }
-#ifdef IRIX
- DEL(pcibr_dmamap);
-#else
- free_pciio_dmamap(pcibr_dmamap);
-#endif
-}
-
-/*
- * Setup an Address Translation Entry as specified. Use either the Bridge
- * internal maps or the external map RAM, as appropriate.
- */
-LOCAL bridge_ate_p
-pcibr_ate_addr(pcibr_soft_t pcibr_soft,
- int ate_index)
-{
- bridge_t *bridge = pcibr_soft->bs_base;
-
- return (ate_index < pcibr_soft->bs_int_ate_size)
- ? &(bridge->b_int_ate_ram[ate_index].wr)
- : &(bridge->b_ext_ate_ram[ate_index]);
-}
-
-/*
- * pcibr_addr_xio_to_pci: given a PIO range, hand
- * back the corresponding base PCI MEM address;
- * this is used to short-circuit DMA requests that
- * loop back onto this PCI bus.
- */
-LOCAL iopaddr_t
-pcibr_addr_xio_to_pci(pcibr_soft_t soft,
- iopaddr_t xio_addr,
- size_t req_size)
-{
- iopaddr_t xio_lim = xio_addr + req_size - 1;
- iopaddr_t pci_addr;
- pciio_slot_t slot;
-
- if ((xio_addr >= BRIDGE_PCI_MEM32_BASE) &&
- (xio_lim <= BRIDGE_PCI_MEM32_LIMIT)) {
- pci_addr = xio_addr - BRIDGE_PCI_MEM32_BASE;
- return pci_addr;
- }
- if ((xio_addr >= BRIDGE_PCI_MEM64_BASE) &&
- (xio_lim <= BRIDGE_PCI_MEM64_LIMIT)) {
- pci_addr = xio_addr - BRIDGE_PCI_MEM64_BASE;
- return pci_addr;
- }
- for (slot = 0; slot < 8; ++slot)
- if ((xio_addr >= BRIDGE_DEVIO(slot)) &&
- (xio_lim < BRIDGE_DEVIO(slot + 1))) {
- bridgereg_t dev;
-
- dev = soft->bs_slot[slot].bss_device;
- pci_addr = dev & BRIDGE_DEV_OFF_MASK;
- pci_addr <<= BRIDGE_DEV_OFF_ADDR_SHFT;
- pci_addr += xio_addr - BRIDGE_DEVIO(slot);
- return (dev & BRIDGE_DEV_DEV_IO_MEM) ? pci_addr : PCI_NOWHERE;
- }
- return 0;
-}
-
-/* We are starting to get more complexity
- * surrounding writing ATEs, so pull
- * the writing code into this new function.
- */
-
-#if PCIBR_FREEZE_TIME
-#define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, &freeze_time, cmd_regs)
-#else
-#define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, cmd_regs)
-#endif
-
-LOCAL unsigned
-ate_freeze(pcibr_dmamap_t pcibr_dmamap,
-#if PCIBR_FREEZE_TIME
- unsigned *freeze_time_ptr,
-#endif
- unsigned *cmd_regs)
-{
- pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
-#ifdef LATER
- int dma_slot = pcibr_dmamap->bd_slot;
-#endif
- int ext_ates = pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM;
- int slot;
-
- unsigned long s;
- unsigned cmd_reg;
- volatile unsigned *cmd_lwa;
- unsigned cmd_lwd;
-
- if (!ext_ates)
- return 0;
-
- /* Bridge Hardware Bug WAR #484930:
- * Bridge can't handle updating External ATEs
- * while DMA is occurring that uses External ATEs,
- * even if the particular ATEs involved are disjoint.
- */
-
- /* need to prevent anyone else from
- * unfreezing the grant while we
- * are working; also need to prevent
- * this thread from being interrupted
- * to keep PCI grant freeze time
- * at an absolute minimum.
- */
- s = pcibr_lock(pcibr_soft);
-
-#ifdef LATER
- /* just in case pcibr_dmamap_done was not called */
- if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
- pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
- if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
- atomic_dec(&(pcibr_soft->bs_slot[dma_slot]. bss_ext_ates_active));
- xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
- }
-#endif /* LATER */
-#if PCIBR_FREEZE_TIME
- *freeze_time_ptr = get_timestamp();
-#endif
-
- cmd_lwa = 0;
- for (slot = 0; slot < 8; ++slot)
- if (atomic_read(&pcibr_soft->bs_slot[slot].bss_ext_ates_active)) {
- cmd_reg = pcibr_soft->
- bs_slot[slot].
- bss_cmd_shadow;
- if (cmd_reg & PCI_CMD_BUS_MASTER) {
- cmd_lwa = pcibr_soft->
- bs_slot[slot].
- bss_cmd_pointer;
- cmd_lwd = cmd_reg ^ PCI_CMD_BUS_MASTER;
- cmd_lwa[0] = cmd_lwd;
- }
- cmd_regs[slot] = cmd_reg;
- } else
- cmd_regs[slot] = 0;
-
- if (cmd_lwa) {
- bridge_t *bridge = pcibr_soft->bs_base;
-
- /* Read the last master bit that has been cleared. This PIO read
- * on the PCI bus is to ensure the completion of any DMAs that
- * are due to bus requests issued by PCI devices before the
- * clearing of master bits.
- */
- cmd_lwa[0];
-
- /* Flush all the write buffers in the bridge */
- for (slot = 0; slot < 8; ++slot)
- if (atomic_read(&pcibr_soft->bs_slot[slot].bss_ext_ates_active)) {
- /* Flush the write buffer associated with this
- * PCI device which might be using dma map RAM.
- */
- bridge->b_wr_req_buf[slot].reg;
- }
- }
- return s;
-}
-
-#define ATE_WRITE() ate_write(ate_ptr, ate_count, ate)
-
-LOCAL void
-ate_write(bridge_ate_p ate_ptr,
- int ate_count,
- bridge_ate_t ate)
-{
- while (ate_count-- > 0) {
- *ate_ptr++ = ate;
- ate += IOPGSIZE;
- }
-}
-
-
-#if PCIBR_FREEZE_TIME
-#define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, ate, ate_total, freeze_time, cmd_regs, s)
-#else
-#define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, cmd_regs, s)
-#endif
-
-LOCAL void
-ate_thaw(pcibr_dmamap_t pcibr_dmamap,
- int ate_index,
-#if PCIBR_FREEZE_TIME
- bridge_ate_t ate,
- int ate_total,
- unsigned freeze_time_start,
-#endif
- unsigned *cmd_regs,
- unsigned s)
-{
- pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
- int dma_slot = pcibr_dmamap->bd_slot;
- int slot;
- bridge_t *bridge = pcibr_soft->bs_base;
- int ext_ates = pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM;
-
- unsigned cmd_reg;
-
-#if PCIBR_FREEZE_TIME
- unsigned freeze_time;
- static unsigned max_freeze_time = 0;
- static unsigned max_ate_total;
-#endif
-
- if (!ext_ates)
- return;
-
- /* restore cmd regs */
- for (slot = 0; slot < 8; ++slot)
- if ((cmd_reg = cmd_regs[slot]) & PCI_CMD_BUS_MASTER)
- bridge->b_type0_cfg_dev[slot].l[PCI_CFG_COMMAND / 4] = cmd_reg;
-
- pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_BUSY;
- atomic_inc(&(pcibr_soft->bs_slot[dma_slot]. bss_ext_ates_active));
-
-#if PCIBR_FREEZE_TIME
- freeze_time = get_timestamp() - freeze_time_start;
-
- if ((max_freeze_time < freeze_time) ||
- (max_ate_total < ate_total)) {
- if (max_freeze_time < freeze_time)
- max_freeze_time = freeze_time;
- if (max_ate_total < ate_total)
- max_ate_total = ate_total;
- pcibr_unlock(pcibr_soft, s);
- printk("%s: pci freeze time %d usec for %d ATEs\n"
- "\tfirst ate: %R\n",
- pcibr_soft->bs_name,
- freeze_time * 1000 / 1250,
- ate_total,
- ate, ate_bits);
- } else
-#endif
- pcibr_unlock(pcibr_soft, s);
-}
-
-/*ARGSUSED */
-iopaddr_t
-pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
- paddr_t paddr,
- size_t req_size)
-{
- pcibr_soft_t pcibr_soft;
- iopaddr_t xio_addr;
- xwidgetnum_t xio_port;
- iopaddr_t pci_addr;
- unsigned flags;
-
- ASSERT(pcibr_dmamap != NULL);
- ASSERT(req_size > 0);
- ASSERT(req_size <= pcibr_dmamap->bd_max_size);
-
- pcibr_soft = pcibr_dmamap->bd_soft;
-
- flags = pcibr_dmamap->bd_flags;
-
- xio_addr = xtalk_dmamap_addr(pcibr_dmamap->bd_xtalk, paddr, req_size);
- if (XIO_PACKED(xio_addr)) {
- xio_port = XIO_PORT(xio_addr);
- xio_addr = XIO_ADDR(xio_addr);
- } else
- xio_port = pcibr_dmamap->bd_xio_port;
-
- /* If this DMA is to an address that
- * refers back to this Bridge chip,
- * reduce it back to the correct
- * PCI MEM address.
- */
- if (xio_port == pcibr_soft->bs_xid) {
- pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
- } else if (flags & PCIIO_DMA_A64) {
- /* A64 DMA:
- * always use 64-bit direct mapping,
- * which always works.
- * Device(x) was set up during
- * dmamap allocation.
- */
-
- /* attributes are already bundled up into bd_pci_addr.
- */
- pci_addr = pcibr_dmamap->bd_pci_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT)
- | xio_addr;
-
- /* Bridge Hardware WAR #482836:
- * If the transfer is not cache aligned
- * and the Bridge Rev is <= B, force
- * prefetch to be off.
- */
- if (flags & PCIBR_NOPREFETCH)
- pci_addr &= ~PCI64_ATTR_PREF;
-
-#if DEBUG && PCIBR_DMA_DEBUG
- printk("pcibr_dmamap_addr (direct64):\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\tXIO port 0x%x offset 0x%x\n"
- "\treturning PCI 0x%x\n",
- paddr, paddr + req_size - 1,
- xio_port, xio_addr, pci_addr);
-#endif
- } else if (flags & PCIIO_FIXED) {
- /* A32 direct DMA:
- * always use 32-bit direct mapping,
- * which may fail.
- * Device(x) was set up during
- * dmamap allocation.
- */
-
- if (xio_port != pcibr_soft->bs_dir_xport)
- pci_addr = 0; /* wrong DIDN */
- else if (xio_addr < pcibr_dmamap->bd_xio_addr)
- pci_addr = 0; /* out of range */
- else if ((xio_addr + req_size) >
- (pcibr_dmamap->bd_xio_addr + BRIDGE_DMA_DIRECT_SIZE))
- pci_addr = 0; /* out of range */
- else
- pci_addr = pcibr_dmamap->bd_pci_addr +
- xio_addr - pcibr_dmamap->bd_xio_addr;
-
-#if DEBUG && PCIBR_DMA_DEBUG
- printk("pcibr_dmamap_addr (direct32):\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\tXIO port 0x%x offset 0x%x\n"
- "\treturning PCI 0x%x\n",
- paddr, paddr + req_size - 1,
- xio_port, xio_addr, pci_addr);
-#endif
- } else {
- bridge_t *bridge = pcibr_soft->bs_base;
- iopaddr_t offset = IOPGOFF(xio_addr);
- bridge_ate_t ate_proto = pcibr_dmamap->bd_ate_proto;
- int ate_count = IOPG(offset + req_size - 1) + 1;
-
- int ate_index = pcibr_dmamap->bd_ate_index;
- unsigned cmd_regs[8];
- unsigned s;
-
-#if PCIBR_FREEZE_TIME
- int ate_total = ate_count;
- unsigned freeze_time;
-#endif
-
-#if PCIBR_ATE_DEBUG
- bridge_ate_t ate_cmp;
- bridge_ate_p ate_cptr;
- unsigned ate_lo, ate_hi;
- int ate_bad = 0;
- int ate_rbc = 0;
-#endif
- bridge_ate_p ate_ptr = pcibr_dmamap->bd_ate_ptr;
- bridge_ate_t ate;
-
- /* Bridge Hardware WAR #482836:
- * If the transfer is not cache aligned
- * and the Bridge Rev is <= B, force
- * prefetch to be off.
- */
- if (flags & PCIBR_NOPREFETCH)
- ate_proto &= ~ATE_PREF;
-
- ate = ate_proto
- | (xio_port << ATE_TIDSHIFT)
- | (xio_addr - offset);
-
- pci_addr = pcibr_dmamap->bd_pci_addr + offset;
-
- /* Fill in our mapping registers
- * with the appropriate xtalk data,
- * and hand back the PCI address.
- */
-
- ASSERT(ate_count > 0);
- if (ate_count <= pcibr_dmamap->bd_ate_count) {
- ATE_FREEZE();
- ATE_WRITE();
- ATE_THAW();
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- } else {
- /* The number of ATE's required is greater than the number
- * allocated for this map. One way this can happen is if
- * pcibr_dmamap_alloc() was called with the PCIBR_NO_ATE_ROUNDUP
- * flag, and then when that map is used (right now), the
- * target address tells us we really did need to roundup.
- * The other possibility is that the map is just plain too
- * small to handle the requested target area.
- */
-#if PCIBR_ATE_DEBUG
- printk(KERN_WARNING "pcibr_dmamap_addr :\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\tate_count 0x%x bd_ate_count 0x%x\n"
- "\tATE's required > number allocated\n",
- paddr, paddr + req_size - 1,
- ate_count, pcibr_dmamap->bd_ate_count);
-#endif
- pci_addr = 0;
- }
-
- }
- return pci_addr;
-}
-
-/*ARGSUSED */
-alenlist_t
-pcibr_dmamap_list(pcibr_dmamap_t pcibr_dmamap,
- alenlist_t palenlist,
- unsigned flags)
-{
- pcibr_soft_t pcibr_soft;
- bridge_t *bridge=NULL;
-
- unsigned al_flags = (flags & PCIIO_NOSLEEP) ? AL_NOSLEEP : 0;
- int inplace = flags & PCIIO_INPLACE;
-
- alenlist_t pciio_alenlist = 0;
- alenlist_t xtalk_alenlist;
- size_t length;
- iopaddr_t offset;
- unsigned direct64;
- int ate_index = 0;
- int ate_count = 0;
- int ate_total = 0;
- bridge_ate_p ate_ptr = (bridge_ate_p)0;
- bridge_ate_t ate_proto = (bridge_ate_t)0;
- bridge_ate_t ate_prev;
- bridge_ate_t ate;
- alenaddr_t xio_addr;
- xwidgetnum_t xio_port;
- iopaddr_t pci_addr;
- alenaddr_t new_addr;
-
- unsigned cmd_regs[8];
- unsigned s = 0;
-
-#if PCIBR_FREEZE_TIME
- unsigned freeze_time;
-#endif
- int ate_freeze_done = 0; /* To pair ATE_THAW
- * with an ATE_FREEZE
- */
-
- pcibr_soft = pcibr_dmamap->bd_soft;
-
- xtalk_alenlist = xtalk_dmamap_list(pcibr_dmamap->bd_xtalk, palenlist,
- flags & DMAMAP_FLAGS);
- if (!xtalk_alenlist)
- goto fail;
-
- alenlist_cursor_init(xtalk_alenlist, 0, NULL);
-
- if (inplace) {
- pciio_alenlist = xtalk_alenlist;
- } else {
- pciio_alenlist = alenlist_create(al_flags);
- if (!pciio_alenlist)
- goto fail;
- }
-
- direct64 = pcibr_dmamap->bd_flags & PCIIO_DMA_A64;
- if (!direct64) {
- bridge = pcibr_soft->bs_base;
- ate_ptr = pcibr_dmamap->bd_ate_ptr;
- ate_index = pcibr_dmamap->bd_ate_index;
- ate_proto = pcibr_dmamap->bd_ate_proto;
- ATE_FREEZE();
- ate_freeze_done = 1; /* Remember that we need to do an ATE_THAW */
- }
- pci_addr = pcibr_dmamap->bd_pci_addr;
-
- ate_prev = 0; /* matches no valid ATEs */
- while (ALENLIST_SUCCESS ==
- alenlist_get(xtalk_alenlist, NULL, 0,
- &xio_addr, &length, al_flags)) {
- if (XIO_PACKED(xio_addr)) {
- xio_port = XIO_PORT(xio_addr);
- xio_addr = XIO_ADDR(xio_addr);
- } else
- xio_port = pcibr_dmamap->bd_xio_port;
-
- if (xio_port == pcibr_soft->bs_xid) {
- new_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, length);
- if (new_addr == PCI_NOWHERE)
- goto fail;
- } else if (direct64) {
- new_addr = pci_addr | xio_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
-
- /* Bridge Hardware WAR #482836:
- * If the transfer is not cache aligned
- * and the Bridge Rev is <= B, force
- * prefetch to be off.
- */
- if (flags & PCIBR_NOPREFETCH)
- new_addr &= ~PCI64_ATTR_PREF;
-
- } else {
- /* calculate the ate value for
- * the first address. If it
- * matches the previous
- * ATE written (ie. we had
- * multiple blocks in the
- * same IOPG), then back up
- * and reuse that ATE.
- *
- * We are NOT going to
- * aggressively try to
- * reuse any other ATEs.
- */
- offset = IOPGOFF(xio_addr);
- ate = ate_proto
- | (xio_port << ATE_TIDSHIFT)
- | (xio_addr - offset);
- if (ate == ate_prev) {
-#if PCIBR_ATE_DEBUG
- printk("pcibr_dmamap_list: ATE share\n");
-#endif
- ate_ptr--;
- ate_index--;
- pci_addr -= IOPGSIZE;
- }
- new_addr = pci_addr + offset;
-
- /* Fill in the hardware ATEs
- * that contain this block.
- */
- ate_count = IOPG(offset + length - 1) + 1;
- ate_total += ate_count;
-
- /* Ensure that this map contains enough ATE's */
- if (ate_total > pcibr_dmamap->bd_ate_count) {
-#if PCIBR_ATE_DEBUG
- printk(KERN_WARNING "pcibr_dmamap_list :\n"
- "\twanted xio_addr [0x%x..0x%x]\n"
- "\tate_total 0x%x bd_ate_count 0x%x\n"
- "\tATE's required > number allocated\n",
- xio_addr, xio_addr + length - 1,
- ate_total, pcibr_dmamap->bd_ate_count);
-#endif
- goto fail;
- }
-
- ATE_WRITE();
-
- ate_index += ate_count;
- ate_ptr += ate_count;
-
- ate_count <<= IOPFNSHIFT;
- ate += ate_count;
- pci_addr += ate_count;
- }
-
- /* write the PCI DMA address
- * out to the scatter-gather list.
- */
- if (inplace) {
- if (ALENLIST_SUCCESS !=
- alenlist_replace(pciio_alenlist, NULL,
- &new_addr, &length, al_flags))
- goto fail;
- } else {
- if (ALENLIST_SUCCESS !=
- alenlist_append(pciio_alenlist,
- new_addr, length, al_flags))
- goto fail;
- }
- }
- if (!inplace)
- alenlist_done(xtalk_alenlist);
-
- /* Reset the internal cursor of the alenlist to be returned back
- * to the caller.
- */
- alenlist_cursor_init(pciio_alenlist, 0, NULL);
-
-
- /* In case an ATE_FREEZE was done do the ATE_THAW to unroll all the
- * changes that ATE_FREEZE has done to implement the external SSRAM
- * bug workaround.
- */
- if (ate_freeze_done) {
- ATE_THAW();
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
- return pciio_alenlist;
-
- fail:
- /* There are various points of failure after doing an ATE_FREEZE
- * We need to do an ATE_THAW. Otherwise the ATEs are locked forever.
- * The decision to do an ATE_THAW needs to be based on whether a
- * an ATE_FREEZE was done before.
- */
- if (ate_freeze_done) {
- ATE_THAW();
- bridge->b_wid_tflush;
- }
- if (pciio_alenlist && !inplace)
- alenlist_destroy(pciio_alenlist);
- return 0;
-}
-
-/*ARGSUSED */
-void
-pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
-{
- /*
- * We could go through and invalidate ATEs here;
- * for performance reasons, we don't.
- * We also don't enforce the strict alternation
- * between _addr/_list and _done, but Hub does.
- */
-
- if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
- pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
-
- if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
- atomic_dec(&(pcibr_dmamap->bd_soft->bs_slot[pcibr_dmamap->bd_slot]. bss_ext_ates_active));
- }
-
- xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
-}
-
-
-/*
- * For each bridge, the DIR_OFF value in the Direct Mapping Register
- * determines the PCI to Crosstalk memory mapping to be used for all
- * 32-bit Direct Mapping memory accesses. This mapping can be to any
- * node in the system. This function will return that compact node id.
- */
-
-/*ARGSUSED */
-cnodeid_t
-pcibr_get_dmatrans_node(devfs_handle_t pconn_vhdl)
-{
-
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-
- return(NASID_TO_COMPACT_NODEID(NASID_GET(pcibr_soft->bs_dir_xbase)));
-}
-
-/*ARGSUSED */
-iopaddr_t
-pcibr_dmatrans_addr(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- paddr_t paddr,
- size_t req_size,
- unsigned flags)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
-
- xwidgetnum_t xio_port;
- iopaddr_t xio_addr;
- iopaddr_t pci_addr;
-
- int have_rrbs;
- int min_rrbs;
-
- /* merge in forced flags */
- flags |= pcibr_soft->bs_dma_flags;
-
- xio_addr = xtalk_dmatrans_addr(xconn_vhdl, 0, paddr, req_size,
- flags & DMAMAP_FLAGS);
-
- if (!xio_addr) {
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
- return 0;
- }
- /*
- * find which XIO port this goes to.
- */
- if (XIO_PACKED(xio_addr)) {
- if (xio_addr == XIO_NOWHERE) {
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
- return 0;
- }
- xio_port = XIO_PORT(xio_addr);
- xio_addr = XIO_ADDR(xio_addr);
-
- } else
- xio_port = pcibr_soft->bs_mxid;
-
- /*
- * If this DMA comes back to us,
- * return the PCI MEM address on
- * which it would land, or NULL
- * if the target is something
- * on bridge other than PCI MEM.
- */
- if (xio_port == pcibr_soft->bs_xid) {
- pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
- return pci_addr;
- }
- /* If the caller can use A64, try to
- * satisfy the request with the 64-bit
- * direct map. This can fail if the
- * configuration bits in Device(x)
- * conflict with our flags.
- */
-
- if (flags & PCIIO_DMA_A64) {
- pci_addr = slotp->bss_d64_base;
- if (!(flags & PCIBR_VCHAN1))
- flags |= PCIBR_VCHAN0;
- if ((pci_addr != PCIBR_D64_BASE_UNSET) &&
- (flags == slotp->bss_d64_flags)) {
-
- pci_addr |= xio_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
-
-#if DEBUG && PCIBR_DMA_DEBUG
-#if HWG_PERF_CHECK
- if (xio_addr != 0x20000000)
-#endif
- printk("pcibr_dmatrans_addr: [reuse]\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tdirect 64bit address is 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr, pci_addr);
-#endif
- return (pci_addr);
- }
- if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS)) {
- pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
- slotp->bss_d64_flags = flags;
- slotp->bss_d64_base = pci_addr;
- pci_addr |= xio_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
-
- /* Make sure we have an RRB (or two).
- */
- if (!(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
- if (flags & PCIBR_VCHAN1)
- pciio_slot += PCIBR_RRB_SLOT_VIRTUAL;
- have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot];
- if (have_rrbs < 2) {
- if (pci_addr & PCI64_ATTR_PREF)
- min_rrbs = 2;
- else
- min_rrbs = 1;
- if (have_rrbs < min_rrbs)
- do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, min_rrbs - have_rrbs);
- }
- }
-#if PCIBR_DMA_DEBUG
-#if HWG_PERF_CHECK
- if (xio_addr != 0x20000000)
-#endif
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tdirect 64bit address is 0x%x\n"
- "\tnew flags: 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr, pci_addr, (uint64_t) flags);
-#endif
- return (pci_addr);
- }
- /* our flags conflict with Device(x).
- */
- flags = flags
- & ~PCIIO_DMA_A64
- & ~PCIBR_VCHAN0
- ;
-
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tUnable to set Device(x) bits for Direct-64\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
- }
- /* Try to satisfy the request with the 32-bit direct
- * map. This can fail if the configuration bits in
- * Device(x) conflict with our flags, or if the
- * target address is outside where DIR_OFF points.
- */
- {
- size_t map_size = 1ULL << 31;
- iopaddr_t xio_base = pcibr_soft->bs_dir_xbase;
- iopaddr_t offset = xio_addr - xio_base;
- iopaddr_t endoff = req_size + offset;
-
- if ((req_size > map_size) ||
- (xio_addr < xio_base) ||
- (xio_port != pcibr_soft->bs_dir_xport) ||
- (endoff > map_size)) {
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\txio region outside direct32 target\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
- } else {
- pci_addr = slotp->bss_d32_base;
- if ((pci_addr != PCIBR_D32_BASE_UNSET) &&
- (flags == slotp->bss_d32_flags)) {
-
- pci_addr |= offset;
-
-#if DEBUG && PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr: [reuse]\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tmapped via direct32 offset 0x%x\n"
- "\twill DMA via pci addr 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr, offset, pci_addr);
-#endif
- return (pci_addr);
- }
- if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS)) {
-
- pci_addr = PCI32_DIRECT_BASE;
- slotp->bss_d32_flags = flags;
- slotp->bss_d32_base = pci_addr;
- pci_addr |= offset;
-
- /* Make sure we have an RRB (or two).
- */
- if (!(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
- have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot];
- if (have_rrbs < 2) {
- if (slotp->bss_device & BRIDGE_DEV_PREF)
- min_rrbs = 2;
- else
- min_rrbs = 1;
- if (have_rrbs < min_rrbs)
- do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, min_rrbs - have_rrbs);
- }
- }
-#if PCIBR_DMA_DEBUG
-#if HWG_PERF_CHECK
- if (xio_addr != 0x20000000)
-#endif
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tmapped via direct32 offset 0x%x\n"
- "\twill DMA via pci addr 0x%x\n"
- "\tnew flags: 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr, offset, pci_addr, (uint64_t) flags);
-#endif
- return (pci_addr);
- }
- /* our flags conflict with Device(x).
- */
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tUnable to set Device(x) bits for Direct-32\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
- }
- }
-
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n"
- "\tno acceptable PCI address found or constructable\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
-
- return 0;
-}
-
-/*ARGSUSED */
-alenlist_t
-pcibr_dmatrans_list(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- alenlist_t palenlist,
- unsigned flags)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
- xwidgetnum_t xio_port;
-
- alenlist_t pciio_alenlist = 0;
- alenlist_t xtalk_alenlist = 0;
-
- int inplace;
- unsigned direct64;
- unsigned al_flags;
-
- iopaddr_t xio_base;
- alenaddr_t xio_addr;
- size_t xio_size;
-
- size_t map_size;
- iopaddr_t pci_base;
- alenaddr_t pci_addr;
-
- unsigned relbits = 0;
-
- /* merge in forced flags */
- flags |= pcibr_soft->bs_dma_flags;
-
- inplace = flags & PCIIO_INPLACE;
- direct64 = flags & PCIIO_DMA_A64;
- al_flags = (flags & PCIIO_NOSLEEP) ? AL_NOSLEEP : 0;
-
- if (direct64) {
- map_size = 1ull << 48;
- xio_base = 0;
- pci_base = slotp->bss_d64_base;
- if ((pci_base != PCIBR_D64_BASE_UNSET) &&
- (flags == slotp->bss_d64_flags)) {
- /* reuse previous base info */
- } else if (pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS) < 0) {
- /* DMA configuration conflict */
- goto fail;
- } else {
- relbits = BRIDGE_DEV_D64_BITS;
- pci_base =
- pcibr_flags_to_d64(flags, pcibr_soft);
- }
- } else {
- xio_base = pcibr_soft->bs_dir_xbase;
- map_size = 1ull << 31;
- pci_base = slotp->bss_d32_base;
- if ((pci_base != PCIBR_D32_BASE_UNSET) &&
- (flags == slotp->bss_d32_flags)) {
- /* reuse previous base info */
- } else if (pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS) < 0) {
- /* DMA configuration conflict */
- goto fail;
- } else {
- relbits = BRIDGE_DEV_D32_BITS;
- pci_base = PCI32_DIRECT_BASE;
- }
- }
-
- xtalk_alenlist = xtalk_dmatrans_list(xconn_vhdl, 0, palenlist,
- flags & DMAMAP_FLAGS);
- if (!xtalk_alenlist)
- goto fail;
-
- alenlist_cursor_init(xtalk_alenlist, 0, NULL);
-
- if (inplace) {
- pciio_alenlist = xtalk_alenlist;
- } else {
- pciio_alenlist = alenlist_create(al_flags);
- if (!pciio_alenlist)
- goto fail;
- }
-
- while (ALENLIST_SUCCESS ==
- alenlist_get(xtalk_alenlist, NULL, 0,
- &xio_addr, &xio_size, al_flags)) {
-
- /*
- * find which XIO port this goes to.
- */
- if (XIO_PACKED(xio_addr)) {
- if (xio_addr == XIO_NOWHERE) {
-#if PCIBR_DMA_DEBUG
- printk("pcibr_dmatrans_addr:\n"
- "\tpciio connection point %v\n"
- "\txtalk connection point %v\n"
- "\twanted paddr [0x%x..0x%x]\n"
- "\txtalk_dmatrans_addr returned 0x%x\n",
- pconn_vhdl, xconn_vhdl,
- paddr, paddr + req_size - 1,
- xio_addr);
-#endif
- return 0;
- }
- xio_port = XIO_PORT(xio_addr);
- xio_addr = XIO_ADDR(xio_addr);
- } else
- xio_port = pcibr_soft->bs_mxid;
-
- /*
- * If this DMA comes back to us,
- * return the PCI MEM address on
- * which it would land, or NULL
- * if the target is something
- * on bridge other than PCI MEM.
- */
- if (xio_port == pcibr_soft->bs_xid) {
- pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, xio_size);
- if ( (pci_addr == (alenaddr_t)NULL) )
- goto fail;
- } else if (direct64) {
- ASSERT(xio_port != 0);
- pci_addr = pci_base | xio_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
- } else {
- iopaddr_t offset = xio_addr - xio_base;
- iopaddr_t endoff = xio_size + offset;
-
- if ((xio_size > map_size) ||
- (xio_addr < xio_base) ||
- (xio_port != pcibr_soft->bs_dir_xport) ||
- (endoff > map_size))
- goto fail;
-
- pci_addr = pci_base + (xio_addr - xio_base);
- }
-
- /* write the PCI DMA address
- * out to the scatter-gather list.
- */
- if (inplace) {
- if (ALENLIST_SUCCESS !=
- alenlist_replace(pciio_alenlist, NULL,
- &pci_addr, &xio_size, al_flags))
- goto fail;
- } else {
- if (ALENLIST_SUCCESS !=
- alenlist_append(pciio_alenlist,
- pci_addr, xio_size, al_flags))
- goto fail;
- }
- }
-
- if (relbits) {
- if (direct64) {
- slotp->bss_d64_flags = flags;
- slotp->bss_d64_base = pci_base;
- } else {
- slotp->bss_d32_flags = flags;
- slotp->bss_d32_base = pci_base;
- }
- }
- if (!inplace)
- alenlist_done(xtalk_alenlist);
-
- /* Reset the internal cursor of the alenlist to be returned back
- * to the caller.
- */
- alenlist_cursor_init(pciio_alenlist, 0, NULL);
- return pciio_alenlist;
-
- fail:
- if (relbits)
- pcibr_release_device(pcibr_soft, pciio_slot, relbits);
- if (pciio_alenlist && !inplace)
- alenlist_destroy(pciio_alenlist);
- return 0;
-}
-
-void
-pcibr_dmamap_drain(pcibr_dmamap_t map)
-{
- xtalk_dmamap_drain(map->bd_xtalk);
-}
-
-void
-pcibr_dmaaddr_drain(devfs_handle_t pconn_vhdl,
- paddr_t paddr,
- size_t bytes)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
-
- xtalk_dmaaddr_drain(xconn_vhdl, paddr, bytes);
-}
-
-void
-pcibr_dmalist_drain(devfs_handle_t pconn_vhdl,
- alenlist_t list)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
-
- xtalk_dmalist_drain(xconn_vhdl, list);
-}
-
-/*
- * Get the starting PCIbus address out of the given DMA map.
- * This function is supposed to be used by a close friend of PCI bridge
- * since it relies on the fact that the starting address of the map is fixed at
- * the allocation time in the current implementation of PCI bridge.
- */
-iopaddr_t
-pcibr_dmamap_pciaddr_get(pcibr_dmamap_t pcibr_dmamap)
-{
- return (pcibr_dmamap->bd_pci_addr);
-}
-
-/*
- * There are end cases where a deadlock can occur if interrupt
- * processing completes and the Bridge b_int_status bit is still set.
- *
- * One scenerio is if a second PCI interrupt occurs within 60ns of
- * the previous interrupt being cleared. In this case the Bridge
- * does not detect the transition, the Bridge b_int_status bit
- * remains set, and because no transition was detected no interrupt
- * packet is sent to the Hub/Heart.
- *
- * A second scenerio is possible when a b_int_status bit is being
- * shared by multiple devices:
- * Device #1 generates interrupt
- * Bridge b_int_status bit set
- * Device #2 generates interrupt
- * interrupt processing begins
- * ISR for device #1 runs and
- * clears interrupt
- * Device #1 generates interrupt
- * ISR for device #2 runs and
- * clears interrupt
- * (b_int_status bit still set)
- * interrupt processing completes
- *
- * Interrupt processing is now complete, but an interrupt is still
- * outstanding for Device #1. But because there was no transition of
- * the b_int_status bit, no interrupt packet will be generated and
- * a deadlock will occur.
- *
- * To avoid these deadlock situations, this function is used
- * to check if a specific Bridge b_int_status bit is set, and if so,
- * cause the setting of the corresponding interrupt bit.
- *
- * On a XBridge (IP35), we do this by writing the appropriate Bridge Force
- * Interrupt register.
- */
-void
-pcibr_force_interrupt(pcibr_intr_wrap_t wrap)
-{
- unsigned bit;
- pcibr_soft_t pcibr_soft = wrap->iw_soft;
- bridge_t *bridge = pcibr_soft->bs_base;
- cpuid_t cpuvertex_to_cpuid(devfs_handle_t vhdl);
-
- bit = wrap->iw_intr;
-
- if (pcibr_soft->bs_xbridge) {
- bridge->b_force_pin[bit].intr = 1;
- } else if ((1 << bit) & *wrap->iw_stat) {
- cpuid_t cpu;
- unsigned intr_bit;
- xtalk_intr_t xtalk_intr =
- pcibr_soft->bs_intr[bit].bsi_xtalk_intr;
-
- intr_bit = (short) xtalk_intr_vector_get(xtalk_intr);
- cpu = cpuvertex_to_cpuid(xtalk_intr_cpu_get(xtalk_intr));
- REMOTE_CPU_SEND_INTR(cpu, intr_bit);
- }
-}
-
-/* =====================================================================
- * INTERRUPT MANAGEMENT
- */
-
-static unsigned
-pcibr_intr_bits(pciio_info_t info,
- pciio_intr_line_t lines)
-{
- pciio_slot_t slot = pciio_info_slot_get(info);
- unsigned bbits = 0;
-
- /*
- * Currently favored mapping from PCI
- * slot number and INTA/B/C/D to Bridge
- * PCI Interrupt Bit Number:
- *
- * SLOT A B C D
- * 0 0 4 0 4
- * 1 1 5 1 5
- * 2 2 6 2 6
- * 3 3 7 3 7
- * 4 4 0 4 0
- * 5 5 1 5 1
- * 6 6 2 6 2
- * 7 7 3 7 3
- */
-
- if (slot < 8) {
- if (lines & (PCIIO_INTR_LINE_A| PCIIO_INTR_LINE_C))
- bbits |= 1 << slot;
- if (lines & (PCIIO_INTR_LINE_B| PCIIO_INTR_LINE_D))
- bbits |= 1 << (slot ^ 4);
- }
- return bbits;
-}
-
-
-/*ARGSUSED */
-pcibr_intr_t
-pcibr_intr_alloc(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- pciio_intr_line_t lines,
- devfs_handle_t owner_dev)
-{
- pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pcibr_info->f_slot;
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
- bridge_t *bridge = pcibr_soft->bs_base;
- int is_threaded = 0;
- int thread_swlevel;
-
- xtalk_intr_t *xtalk_intr_p;
- pcibr_intr_t *pcibr_intr_p;
- pcibr_intr_list_t *intr_list_p;
-
- unsigned pcibr_int_bits;
- unsigned pcibr_int_bit;
- xtalk_intr_t xtalk_intr = (xtalk_intr_t)0;
- hub_intr_t hub_intr;
- pcibr_intr_t pcibr_intr;
- pcibr_intr_list_t intr_entry;
- pcibr_intr_list_t intr_list;
- bridgereg_t int_dev;
-
-#if DEBUG && INTR_DEBUG
- printk("%v: pcibr_intr_alloc\n"
- "%v:%s%s%s%s%s\n",
- owner_dev, pconn_vhdl,
- !(lines & 15) ? " No INTs?" : "",
- lines & 1 ? " INTA" : "",
- lines & 2 ? " INTB" : "",
- lines & 4 ? " INTC" : "",
- lines & 8 ? " INTD" : "");
-#endif
-
- NEW(pcibr_intr);
- if (!pcibr_intr)
- return NULL;
-
- if (dev_desc) {
- cpuid_t intr_target_from_desc(device_desc_t, int);
- } else {
- extern int default_intr_pri;
-
- is_threaded = 1; /* PCI interrupts are threaded, by default */
- thread_swlevel = default_intr_pri;
- }
-
- pcibr_intr->bi_dev = pconn_vhdl;
- pcibr_intr->bi_lines = lines;
- pcibr_intr->bi_soft = pcibr_soft;
- pcibr_intr->bi_ibits = 0; /* bits will be added below */
- pcibr_intr->bi_flags = is_threaded ? 0 : PCIIO_INTR_NOTHREAD;
- pcibr_intr->bi_mustruncpu = CPU_NONE;
- mutex_spinlock_init(&pcibr_intr->bi_ibuf.ib_lock);
-
- pcibr_int_bits = pcibr_soft->bs_intr_bits((pciio_info_t)pcibr_info, lines);
-
-
- /*
- * For each PCI interrupt line requested, figure
- * out which Bridge PCI Interrupt Line it maps
- * to, and make sure there are xtalk resources
- * allocated for it.
- */
-#if DEBUG && INTR_DEBUG
- printk("pcibr_int_bits: 0x%X\n", pcibr_int_bits);
-#endif
- for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit ++) {
- if (pcibr_int_bits & (1 << pcibr_int_bit)) {
- xtalk_intr_p = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
-
- xtalk_intr = *xtalk_intr_p;
-
- if (xtalk_intr == NULL) {
- /*
- * This xtalk_intr_alloc is constrained for two reasons:
- * 1) Normal interrupts and error interrupts need to be delivered
- * through a single xtalk target widget so that there aren't any
- * ordering problems with DMA, completion interrupts, and error
- * interrupts. (Use of xconn_vhdl forces this.)
- *
- * 2) On IP35, addressing constraints on IP35 and Bridge force
- * us to use a single PI number for all interrupts from a
- * single Bridge. (IP35-specific code forces this, and we
- * verify in pcibr_setwidint.)
- */
-
- /*
- * All code dealing with threaded PCI interrupt handlers
- * is located at the pcibr level. Because of this,
- * we always want the lower layers (hub/heart_intr_alloc,
- * intr_level_connect) to treat us as non-threaded so we
- * don't set up a duplicate threaded environment. We make
- * this happen by calling a special xtalk interface.
- */
- xtalk_intr = xtalk_intr_alloc_nothd(xconn_vhdl, dev_desc,
- owner_dev);
-#if DEBUG && INTR_DEBUG
- printk("%v: xtalk_intr=0x%X\n", xconn_vhdl, xtalk_intr);
-#endif
-
- /* both an assert and a runtime check on this:
- * we need to check in non-DEBUG kernels, and
- * the ASSERT gets us more information when
- * we use DEBUG kernels.
- */
- ASSERT(xtalk_intr != NULL);
- if (xtalk_intr == NULL) {
- /* it is quite possible that our
- * xtalk_intr_alloc failed because
- * someone else got there first,
- * and we can find their results
- * in xtalk_intr_p.
- */
- if (!*xtalk_intr_p) {
-#ifdef SUPPORT_PRINTING_V_FORMAT
- printk(KERN_ALERT
- "pcibr_intr_alloc %v: unable to get xtalk interrupt resources",
- xconn_vhdl);
-#else
- printk(KERN_ALERT
- "pcibr_intr_alloc 0x%p: unable to get xtalk interrupt resources",
- (void *)xconn_vhdl);
-#endif
- /* yes, we leak resources here. */
- return 0;
- }
- } else if (compare_and_swap_ptr((void **) xtalk_intr_p, NULL, xtalk_intr)) {
- /*
- * now tell the bridge which slot is
- * using this interrupt line.
- */
- int_dev = bridge->b_int_device;
- int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
- int_dev |= pciio_slot << BRIDGE_INT_DEV_SHFT(pcibr_int_bit);
- bridge->b_int_device = int_dev; /* XXXMP */
-
-#if DEBUG && INTR_DEBUG
- printk("%v: bridge intr bit %d clears my wrb\n",
- pconn_vhdl, pcibr_int_bit);
-#endif
- } else {
- /* someone else got one allocated first;
- * free the one we just created, and
- * retrieve the one they allocated.
- */
- xtalk_intr_free(xtalk_intr);
- xtalk_intr = *xtalk_intr_p;
-#if PARANOID
- /* once xtalk_intr is set, we never clear it,
- * so if the CAS fails above, this condition
- * can "never happen" ...
- */
- if (!xtalk_intr) {
- printk(KERN_ALERT
- "pcibr_intr_alloc %v: unable to set xtalk interrupt resources",
- xconn_vhdl);
- /* yes, we leak resources here. */
- return 0;
- }
-#endif
- }
- }
-
- pcibr_intr->bi_ibits |= 1 << pcibr_int_bit;
-
- NEW(intr_entry);
- intr_entry->il_next = NULL;
- intr_entry->il_intr = pcibr_intr;
- intr_entry->il_wrbf = &(bridge->b_wr_req_buf[pciio_slot].reg);
- intr_list_p =
- &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
-#if DEBUG && INTR_DEBUG
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk("0x%x: Bridge bit %d wrap=0x%x\n",
- pconn_vhdl, pcibr_int_bit,
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap);
-#else
- printk("%v: Bridge bit %d wrap=0x%x\n",
- pconn_vhdl, pcibr_int_bit,
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap);
-#endif
-#endif
-
- if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
- /* we are the first interrupt on this bridge bit.
- */
-#if DEBUG && INTR_DEBUG
- printk("%v INT 0x%x (bridge bit %d) allocated [FIRST]\n",
- pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
-#endif
- continue;
- }
- intr_list = *intr_list_p;
- pcibr_intr_p = &intr_list->il_intr;
- if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
- /* first entry on list was erased,
- * and we replaced it, so we
- * don't need our intr_entry.
- */
- DEL(intr_entry);
-#if DEBUG && INTR_DEBUG
- printk("%v INT 0x%x (bridge bit %d) replaces erased first\n",
- pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
-#endif
- continue;
- }
- intr_list_p = &intr_list->il_next;
- if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
- /* we are the new second interrupt on this bit.
- */
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared = 1;
-#if DEBUG && INTR_DEBUG
- printk("%v INT 0x%x (bridge bit %d) is new SECOND\n",
- pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
-#endif
- continue;
- }
- while (1) {
- pcibr_intr_p = &intr_list->il_intr;
- if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
- /* an entry on list was erased,
- * and we replaced it, so we
- * don't need our intr_entry.
- */
- DEL(intr_entry);
-#if DEBUG && INTR_DEBUG
- printk("%v INT 0x%x (bridge bit %d) replaces erased Nth\n",
- pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
-#endif
- break;
- }
- intr_list_p = &intr_list->il_next;
- if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
- /* entry appended to share list
- */
-#if DEBUG && INTR_DEBUG
- printk("%v INT 0x%x (bridge bit %d) is new Nth\n",
- pconn_vhdl, pcibr_int_bits, pcibr_int_bit);
-#endif
- break;
- }
- /* step to next record in chain
- */
- intr_list = *intr_list_p;
- }
- }
- }
-
-#if DEBUG && INTR_DEBUG
- printk("%v pcibr_intr_alloc complete\n", pconn_vhdl);
-#endif
- hub_intr = (hub_intr_t)xtalk_intr;
- pcibr_intr->bi_irq = hub_intr->i_bit;
- pcibr_intr->bi_cpu = hub_intr->i_cpuid;
- return pcibr_intr;
-}
-
-/*ARGSUSED */
-void
-pcibr_intr_free(pcibr_intr_t pcibr_intr)
-{
- unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
- pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
- unsigned pcibr_int_bit;
- pcibr_intr_list_t intr_list;
- int intr_shared;
- xtalk_intr_t *xtalk_intrp;
-
- for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++) {
- if (pcibr_int_bits & (1 << pcibr_int_bit)) {
- for (intr_list =
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
- intr_list != NULL;
- intr_list = intr_list->il_next)
- if (compare_and_swap_ptr((void **) &intr_list->il_intr,
- pcibr_intr,
- NULL)) {
-#if DEBUG && INTR_DEBUG
- printk("%s: cleared a handler from bit %d\n",
- pcibr_soft->bs_name, pcibr_int_bit);
-#endif
- }
- /* If this interrupt line is not being shared between multiple
- * devices release the xtalk interrupt resources.
- */
- intr_shared =
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared;
- xtalk_intrp = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
-
- if ((!intr_shared) && (*xtalk_intrp)) {
-
- bridge_t *bridge = pcibr_soft->bs_base;
- bridgereg_t int_dev;
-
- xtalk_intr_free(*xtalk_intrp);
- *xtalk_intrp = 0;
-
- /* Clear the PCI device interrupt to bridge interrupt pin
- * mapping.
- */
- int_dev = bridge->b_int_device;
- int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
- bridge->b_int_device = int_dev;
-
- }
- }
- }
- DEL(pcibr_intr);
-}
-
-LOCAL void
-pcibr_setpciint(xtalk_intr_t xtalk_intr)
-{
- iopaddr_t addr = xtalk_intr_addr_get(xtalk_intr);
- xtalk_intr_vector_t vect = xtalk_intr_vector_get(xtalk_intr);
- bridgereg_t *int_addr = (bridgereg_t *)
- xtalk_intr_sfarg_get(xtalk_intr);
-
- *int_addr = ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
- (BRIDGE_INT_ADDR_FLD & vect));
-}
-
-/*ARGSUSED */
-int
-pcibr_intr_connect(pcibr_intr_t pcibr_intr)
-{
- pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
- bridge_t *bridge = pcibr_soft->bs_base;
- unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
- unsigned pcibr_int_bit;
- bridgereg_t b_int_enable;
- unsigned long s;
-
- if (pcibr_intr == NULL)
- return -1;
-
-#if DEBUG && INTR_DEBUG
- printk("%v: pcibr_intr_connect\n",
- pcibr_intr->bi_dev);
-#endif
-
- *((volatile unsigned *)&pcibr_intr->bi_flags) |= PCIIO_INTR_CONNECTED;
-
- /*
- * For each PCI interrupt line requested, figure
- * out which Bridge PCI Interrupt Line it maps
- * to, and make sure there are xtalk resources
- * allocated for it.
- */
- for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
- if (pcibr_int_bits & (1 << pcibr_int_bit)) {
- xtalk_intr_t xtalk_intr;
-
- xtalk_intr = pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
-
- /*
- * If this interrupt line is being shared and the connect has
- * already been done, no need to do it again.
- */
- if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected)
- continue;
-
-
- /*
- * Use the pcibr wrapper function to handle all Bridge interrupts
- * regardless of whether the interrupt line is shared or not.
- */
- xtalk_intr_connect(xtalk_intr, (xtalk_intr_setfunc_t) pcibr_setpciint,
- (void *)&(bridge->b_int_addr[pcibr_int_bit].addr));
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 1;
-
-#if DEBUG && INTR_DEBUG
- printk("%v bridge bit %d wrapper connected\n",
- pcibr_intr->bi_dev, pcibr_int_bit);
-#endif
- }
- s = pcibr_lock(pcibr_soft);
- b_int_enable = bridge->b_int_enable;
- b_int_enable |= pcibr_int_bits;
- bridge->b_int_enable = b_int_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- pcibr_unlock(pcibr_soft, s);
-
- return 0;
-}
-
-/*ARGSUSED */
-void
-pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
-{
- pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
- bridge_t *bridge = pcibr_soft->bs_base;
- unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
- unsigned pcibr_int_bit;
- bridgereg_t b_int_enable;
- unsigned long s;
-
- /* Stop calling the function. Now.
- */
- *((volatile unsigned *)&pcibr_intr->bi_flags) &= ~PCIIO_INTR_CONNECTED;
- /*
- * For each PCI interrupt line requested, figure
- * out which Bridge PCI Interrupt Line it maps
- * to, and disconnect the interrupt.
- */
-
- /* don't disable interrupts for lines that
- * are shared between devices.
- */
- for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
- if ((pcibr_int_bits & (1 << pcibr_int_bit)) &&
- (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared))
- pcibr_int_bits &= ~(1 << pcibr_int_bit);
- if (!pcibr_int_bits)
- return;
-
- s = pcibr_lock(pcibr_soft);
- b_int_enable = bridge->b_int_enable;
- b_int_enable &= ~pcibr_int_bits;
- bridge->b_int_enable = b_int_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- pcibr_unlock(pcibr_soft, s);
-
- for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
- if (pcibr_int_bits & (1 << pcibr_int_bit)) {
- /* if the interrupt line is now shared,
- * do not disconnect it.
- */
- if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
- continue;
-
- xtalk_intr_disconnect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
- pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 0;
-
-#if DEBUG && INTR_DEBUG
- printk("%s: xtalk disconnect done for Bridge bit %d\n",
- pcibr_soft->bs_name, pcibr_int_bit);
-#endif
-
- /* if we are sharing the interrupt line,
- * connect us up; this closes the hole
- * where the another pcibr_intr_alloc()
- * was in progress as we disconnected.
- */
- if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
- continue;
-
- xtalk_intr_connect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr,
- (xtalk_intr_setfunc_t)pcibr_setpciint,
- (void *) &(bridge->b_int_addr[pcibr_int_bit].addr));
- }
-}
-
-/*ARGSUSED */
-devfs_handle_t
-pcibr_intr_cpu_get(pcibr_intr_t pcibr_intr)
-{
- pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
- unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
- unsigned pcibr_int_bit;
-
- for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
- if (pcibr_int_bits & (1 << pcibr_int_bit))
- return xtalk_intr_cpu_get(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
- return 0;
-}
-
-/* =====================================================================
- * INTERRUPT HANDLING
- */
-LOCAL void
-pcibr_clearwidint(bridge_t *bridge)
-{
- bridge->b_wid_int_upper = 0;
- bridge->b_wid_int_lower = 0;
-}
-
-LOCAL void
-pcibr_setwidint(xtalk_intr_t intr)
-{
- xwidgetnum_t targ = xtalk_intr_target_get(intr);
- iopaddr_t addr = xtalk_intr_addr_get(intr);
- xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
- widgetreg_t NEW_b_wid_int_upper, NEW_b_wid_int_lower;
- widgetreg_t OLD_b_wid_int_upper, OLD_b_wid_int_lower;
-
- bridge_t *bridge = (bridge_t *)xtalk_intr_sfarg_get(intr);
-
- NEW_b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
- XTALK_ADDR_TO_UPPER(addr));
- NEW_b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
-
- OLD_b_wid_int_upper = bridge->b_wid_int_upper;
- OLD_b_wid_int_lower = bridge->b_wid_int_lower;
-
- /* Verify that all interrupts from this Bridge are using a single PI */
- if ((OLD_b_wid_int_upper != 0) && (OLD_b_wid_int_lower != 0)) {
- /*
- * Once set, these registers shouldn't change; they should
- * be set multiple times with the same values.
- *
- * If we're attempting to change these registers, it means
- * that our heuristics for allocating interrupts in a way
- * appropriate for IP35 have failed, and the admin needs to
- * explicitly direct some interrupts (or we need to make the
- * heuristics more clever).
- *
- * In practice, we hope this doesn't happen very often, if
- * at all.
- */
- if ((OLD_b_wid_int_upper != NEW_b_wid_int_upper) ||
- (OLD_b_wid_int_lower != NEW_b_wid_int_lower)) {
- printk(KERN_WARNING "Interrupt allocation is too complex.\n");
- printk(KERN_WARNING "Use explicit administrative interrupt targetting.\n");
- printk(KERN_WARNING "bridge=0x%lx targ=0x%x\n", (unsigned long)bridge, targ);
- printk(KERN_WARNING "NEW=0x%x/0x%x OLD=0x%x/0x%x\n",
- NEW_b_wid_int_upper, NEW_b_wid_int_lower,
- OLD_b_wid_int_upper, OLD_b_wid_int_lower);
- PRINT_PANIC("PCI Bridge interrupt targetting error\n");
- }
- }
-
- bridge->b_wid_int_upper = NEW_b_wid_int_upper;
- bridge->b_wid_int_lower = NEW_b_wid_int_lower;
- bridge->b_int_host_err = vect;
-}
-
-/*
- * pcibr_intr_preset: called during mlreset time
- * if the platform specific code needs to route
- * one of the Bridge's xtalk interrupts before the
- * xtalk infrastructure is available.
- */
-void
-pcibr_xintr_preset(void *which_widget,
- int which_widget_intr,
- xwidgetnum_t targ,
- iopaddr_t addr,
- xtalk_intr_vector_t vect)
-{
- bridge_t *bridge = (bridge_t *) which_widget;
-
- if (which_widget_intr == -1) {
- /* bridge widget error interrupt */
- bridge->b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
- XTALK_ADDR_TO_UPPER(addr));
- bridge->b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
- bridge->b_int_host_err = vect;
-
- /* turn on all interrupts except
- * the PCI interrupt requests,
- * at least at heart.
- */
- bridge->b_int_enable |= ~BRIDGE_IMR_INT_MSK;
-
- } else {
- /* routing a PCI device interrupt.
- * targ and low 38 bits of addr must
- * be the same as the already set
- * value for the widget error interrupt.
- */
- bridge->b_int_addr[which_widget_intr].addr =
- ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
- (BRIDGE_INT_ADDR_FLD & vect));
- /*
- * now bridge can let it through;
- * NB: still should be blocked at
- * xtalk provider end, until the service
- * function is set.
- */
- bridge->b_int_enable |= 1 << vect;
- }
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-}
-
-
-/*
- * pcibr_intr_func()
- *
- * This is the pcibr interrupt "wrapper" function that is called,
- * in interrupt context, to initiate the interrupt handler(s) registered
- * (via pcibr_intr_alloc/connect) for the occurring interrupt. Non-threaded
- * handlers will be called directly, and threaded handlers will have their
- * thread woken up.
- */
-void
-pcibr_intr_func(intr_arg_t arg)
-{
- pcibr_intr_wrap_t wrap = (pcibr_intr_wrap_t) arg;
- reg_p wrbf;
- pcibr_intr_t intr;
- pcibr_intr_list_t list;
- int clearit;
- int do_nonthreaded = 1;
- int is_threaded = 0;
- int x = 0;
-
- /*
- * If any handler is still running from a previous interrupt
- * just return. If there's a need to call the handler(s) again,
- * another interrupt will be generated either by the device or by
- * pcibr_force_interrupt().
- */
-
- if (wrap->iw_hdlrcnt) {
- return;
- }
-
- /*
- * Call all interrupt handlers registered.
- * First, the pcibr_intrd threads for any threaded handlers will be
- * awoken, then any non-threaded handlers will be called sequentially.
- */
-
- clearit = 1;
- while (do_nonthreaded) {
- for (list = wrap->iw_list; list != NULL; list = list->il_next) {
- if ((intr = list->il_intr) &&
- (intr->bi_flags & PCIIO_INTR_CONNECTED)) {
-
- /*
- * This device may have initiated write
- * requests since the bridge last saw
- * an edge on this interrupt input; flushing
- * the buffer prior to invoking the handler
- * should help but may not be sufficient if we
- * get more requests after the flush, followed
- * by the card deciding it wants service, before
- * the interrupt handler checks to see if things need
- * to be done.
- *
- * There is a similar race condition if
- * an interrupt handler loops around and
- * notices further service is required.
- * Perhaps we need to have an explicit
- * call that interrupt handlers need to
- * do between noticing that DMA to memory
- * has completed, but before observing the
- * contents of memory?
- */
-
- if ((do_nonthreaded) && (!is_threaded)) {
- /* Non-threaded.
- * Call the interrupt handler at interrupt level
- */
-
- /* Only need to flush write buffers if sharing */
-
- if ((wrap->iw_shared) && (wrbf = list->il_wrbf)) {
- if ((x = *wrbf)) /* write request buffer flush */
-#ifdef SUPPORT_PRINTING_V_FORMAT
- printk(KERN_ALERT "pcibr_intr_func %v: \n"
- "write buffer flush failed, wrbf=0x%x\n",
- list->il_intr->bi_dev, wrbf);
-#else
- printk(KERN_ALERT "pcibr_intr_func %p: \n"
- "write buffer flush failed, wrbf=0x%lx\n",
- (void *)list->il_intr->bi_dev, (long) wrbf);
-#endif
- }
- }
-
- clearit = 0;
- }
- }
-
- do_nonthreaded = 0;
- /*
- * If the non-threaded handler was the last to complete,
- * (i.e., no threaded handlers still running) force an
- * interrupt to avoid a potential deadlock situation.
- */
- if (wrap->iw_hdlrcnt == 0) {
- pcibr_force_interrupt(wrap);
- }
- }
-
- /* If there were no handlers,
- * disable the interrupt and return.
- * It will get enabled again after
- * a handler is connected.
- * If we don't do this, we would
- * sit here and spin through the
- * list forever.
- */
- if (clearit) {
- pcibr_soft_t pcibr_soft = wrap->iw_soft;
- bridge_t *bridge = pcibr_soft->bs_base;
- bridgereg_t b_int_enable;
- bridgereg_t mask = 1 << wrap->iw_intr;
- unsigned long s;
-
- s = pcibr_lock(pcibr_soft);
- b_int_enable = bridge->b_int_enable;
- b_int_enable &= ~mask;
- bridge->b_int_enable = b_int_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- pcibr_unlock(pcibr_soft, s);
- return;
- }
-}
-
-/* =====================================================================
- * CONFIGURATION MANAGEMENT
- */
-/*ARGSUSED */
-void
-pcibr_provider_startup(devfs_handle_t pcibr)
-{
-}
-
-/*ARGSUSED */
-void
-pcibr_provider_shutdown(devfs_handle_t pcibr)
-{
-}
-
-int
-pcibr_reset(devfs_handle_t conn)
-{
- pciio_info_t pciio_info = pciio_info_get(conn);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
- bridgereg_t ctlreg;
- unsigned cfgctl[8];
- unsigned long s;
- int f, nf;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- int win;
-
- if (pcibr_soft->bs_slot[pciio_slot].has_host) {
- pciio_slot = pcibr_soft->bs_slot[pciio_slot].host_slot;
- pcibr_info = pcibr_soft->bs_slot[pciio_slot].bss_infos[0];
- }
- if (pciio_slot < 4) {
- s = pcibr_lock(pcibr_soft);
- nf = pcibr_soft->bs_slot[pciio_slot].bss_ninfo;
- pcibr_infoh = pcibr_soft->bs_slot[pciio_slot].bss_infos;
- for (f = 0; f < nf; ++f)
- if (pcibr_infoh[f])
- cfgctl[f] = bridge->b_type0_cfg_dev[pciio_slot].f[f].l[PCI_CFG_COMMAND / 4];
-
- ctlreg = bridge->b_wid_control;
- bridge->b_wid_control = ctlreg | BRIDGE_CTRL_RST(pciio_slot);
- /* XXX delay? */
- bridge->b_wid_control = ctlreg;
- /* XXX delay? */
-
- for (f = 0; f < nf; ++f)
- if ((pcibr_info = pcibr_infoh[f]))
- for (win = 0; win < 6; ++win)
- if (pcibr_info->f_window[win].w_base != 0)
- bridge->b_type0_cfg_dev[pciio_slot].f[f].l[PCI_CFG_BASE_ADDR(win) / 4] =
- pcibr_info->f_window[win].w_base;
- for (f = 0; f < nf; ++f)
- if (pcibr_infoh[f])
- bridge->b_type0_cfg_dev[pciio_slot].f[f].l[PCI_CFG_COMMAND / 4] = cfgctl[f];
- pcibr_unlock(pcibr_soft, s);
-
- return 0;
- }
-#ifdef SUPPORT_PRINTING_V_FORMAT
- printk(KERN_WARNING "%v: pcibr_reset unimplemented for slot %d\n",
- conn, pciio_slot);
-#endif
- return -1;
-}
-
-pciio_endian_t
-pcibr_endian_set(devfs_handle_t pconn_vhdl,
- pciio_endian_t device_end,
- pciio_endian_t desired_end)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridgereg_t devreg;
- unsigned long s;
-
- /*
- * Bridge supports hardware swapping; so we can always
- * arrange for the caller's desired endianness.
- */
-
- s = pcibr_lock(pcibr_soft);
- devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
- if (device_end != desired_end)
- devreg |= BRIDGE_DEV_SWAP_BITS;
- else
- devreg &= ~BRIDGE_DEV_SWAP_BITS;
-
- /* NOTE- if we ever put SWAP bits
- * onto the disabled list, we will
- * have to change the logic here.
- */
- if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
- bridge_t *bridge = pcibr_soft->bs_base;
-
- bridge->b_device[pciio_slot].reg = devreg;
- pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
- pcibr_unlock(pcibr_soft, s);
-
-#if DEBUG && PCIBR_DEV_DEBUG
- printk("pcibr Device(%d): 0x%p\n", pciio_slot, bridge->b_device[pciio_slot].reg);
-#endif
-
- return desired_end;
-}
-
-/* This (re)sets the GBR and REALTIME bits and also keeps track of how
- * many sets are outstanding. Reset succeeds only if the number of outstanding
- * sets == 1.
- */
-int
-pcibr_priority_bits_set(pcibr_soft_t pcibr_soft,
- pciio_slot_t pciio_slot,
- pciio_priority_t device_prio)
-{
- unsigned long s;
- int *counter;
- bridgereg_t rtbits = 0;
- bridgereg_t devreg;
- int rc = PRIO_SUCCESS;
-
- /* in dual-slot configurations, the host and the
- * guest have separate DMA resources, so they
- * have separate requirements for priority bits.
- */
-
- counter = &(pcibr_soft->bs_slot[pciio_slot].bss_pri_uctr);
-
- /*
- * Bridge supports PCI notions of LOW and HIGH priority
- * arbitration rings via a "REAL_TIME" bit in the per-device
- * Bridge register. The "GBR" bit controls access to the GBR
- * ring on the xbow. These two bits are (re)set together.
- *
- * XXX- Bug in Rev B Bridge Si:
- * Symptom: Prefetcher starts operating incorrectly. This happens
- * due to corruption of the address storage ram in the prefetcher
- * when a non-real time PCI request is pulled and a real-time one is
- * put in it's place. Workaround: Use only a single arbitration ring
- * on PCI bus. GBR and RR can still be uniquely used per
- * device. NETLIST MERGE DONE, WILL BE FIXED IN REV C.
- */
-
- if (pcibr_soft->bs_rev_num != BRIDGE_PART_REV_B)
- rtbits |= BRIDGE_DEV_RT;
-
- /* NOTE- if we ever put DEV_RT or DEV_GBR on
- * the disabled list, we will have to take
- * it into account here.
- */
-
- s = pcibr_lock(pcibr_soft);
- devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
- if (device_prio == PCI_PRIO_HIGH) {
- if ((++*counter == 1)) {
- if (rtbits)
- devreg |= rtbits;
- else
- rc = PRIO_FAIL;
- }
- } else if (device_prio == PCI_PRIO_LOW) {
- if (*counter <= 0)
- rc = PRIO_FAIL;
- else if (--*counter == 0)
- if (rtbits)
- devreg &= ~rtbits;
- }
- if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
- bridge_t *bridge = pcibr_soft->bs_base;
-
- bridge->b_device[pciio_slot].reg = devreg;
- pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
- pcibr_unlock(pcibr_soft, s);
-
- return rc;
-}
-
-pciio_priority_t
-pcibr_priority_set(devfs_handle_t pconn_vhdl,
- pciio_priority_t device_prio)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-
- (void) pcibr_priority_bits_set(pcibr_soft, pciio_slot, device_prio);
-
- return device_prio;
-}
-
-/*
- * Interfaces to allow special (e.g. SGI) drivers to set/clear
- * Bridge-specific device flags. Many flags are modified through
- * PCI-generic interfaces; we don't allow them to be directly
- * manipulated here. Only flags that at this point seem pretty
- * Bridge-specific can be set through these special interfaces.
- * We may add more flags as the need arises, or remove flags and
- * create PCI-generic interfaces as the need arises.
- *
- * Returns 0 on failure, 1 on success
- */
-int
-pcibr_device_flags_set(devfs_handle_t pconn_vhdl,
- pcibr_device_flags_t flags)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridgereg_t set = 0;
- bridgereg_t clr = 0;
-
- ASSERT((flags & PCIBR_DEVICE_FLAGS) == flags);
-
- if (flags & PCIBR_WRITE_GATHER)
- set |= BRIDGE_DEV_PMU_WRGA_EN;
- if (flags & PCIBR_NOWRITE_GATHER)
- clr |= BRIDGE_DEV_PMU_WRGA_EN;
-
- if (flags & PCIBR_WRITE_GATHER)
- set |= BRIDGE_DEV_DIR_WRGA_EN;
- if (flags & PCIBR_NOWRITE_GATHER)
- clr |= BRIDGE_DEV_DIR_WRGA_EN;
-
- if (flags & PCIBR_PREFETCH)
- set |= BRIDGE_DEV_PREF;
- if (flags & PCIBR_NOPREFETCH)
- clr |= BRIDGE_DEV_PREF;
-
- if (flags & PCIBR_PRECISE)
- set |= BRIDGE_DEV_PRECISE;
- if (flags & PCIBR_NOPRECISE)
- clr |= BRIDGE_DEV_PRECISE;
-
- if (flags & PCIBR_BARRIER)
- set |= BRIDGE_DEV_BARRIER;
- if (flags & PCIBR_NOBARRIER)
- clr |= BRIDGE_DEV_BARRIER;
-
- if (flags & PCIBR_64BIT)
- set |= BRIDGE_DEV_DEV_SIZE;
- if (flags & PCIBR_NO64BIT)
- clr |= BRIDGE_DEV_DEV_SIZE;
-
- if (set || clr) {
- bridgereg_t devreg;
- unsigned long s;
-
- s = pcibr_lock(pcibr_soft);
- devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
- devreg = (devreg & ~clr) | set;
- if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
- bridge_t *bridge = pcibr_soft->bs_base;
-
- bridge->b_device[pciio_slot].reg = devreg;
- pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
- pcibr_unlock(pcibr_soft, s);
-#if DEBUG && PCIBR_DEV_DEBUG
- printk("pcibr Device(%d): %R\n", pciio_slot, bridge->b_device[pciio_slot].regbridge->b_device[pciio_slot].reg, device_bits);
-#endif
- }
- return (1);
-}
-
-#ifdef LITTLE_ENDIAN
-/*
- * on sn-ia we need to twiddle the the addresses going out
- * the pci bus because we use the unswizzled synergy space
- * (the alternative is to use the swizzled synergy space
- * and byte swap the data)
- */
-#define CB(b,r) (((volatile uint8_t *) b)[((r)^4)])
-#define CS(b,r) (((volatile uint16_t *) b)[((r^4)/2)])
-#define CW(b,r) (((volatile uint32_t *) b)[((r^4)/4)])
-#else
-#define CB(b,r) (((volatile uint8_t *) cfgbase)[(r)^3])
-#define CS(b,r) (((volatile uint16_t *) cfgbase)[((r)/2)^1])
-#define CW(b,r) (((volatile uint32_t *) cfgbase)[(r)/4])
-#endif /* LITTLE_ENDIAN */
-
-
-LOCAL cfg_p
-pcibr_config_addr(devfs_handle_t conn,
- unsigned reg)
-{
- pcibr_info_t pcibr_info;
- pciio_slot_t pciio_slot;
- pciio_function_t pciio_func;
- pcibr_soft_t pcibr_soft;
- bridge_t *bridge;
- cfg_p cfgbase = (cfg_p)0;
-
- pcibr_info = pcibr_info_get(conn);
-
- pciio_slot = pcibr_info->f_slot;
- if (pciio_slot == PCIIO_SLOT_NONE)
- pciio_slot = PCI_TYPE1_SLOT(reg);
-
- pciio_func = pcibr_info->f_func;
- if (pciio_func == PCIIO_FUNC_NONE)
- pciio_func = PCI_TYPE1_FUNC(reg);
-
- pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
-
- bridge = pcibr_soft->bs_base;
-
- cfgbase = bridge->b_type0_cfg_dev[pciio_slot].f[pciio_func].l;
-
- return cfgbase;
-}
-
-uint64_t
-pcibr_config_get(devfs_handle_t conn,
- unsigned reg,
- unsigned size)
-{
- return do_pcibr_config_get(pcibr_config_addr(conn, reg),
- PCI_TYPE1_REG(reg), size);
-}
-
-LOCAL uint64_t
-do_pcibr_config_get(
- cfg_p cfgbase,
- unsigned reg,
- unsigned size)
-{
- unsigned value;
-
-
- value = CW(cfgbase, reg);
-
- if (reg & 3)
- value >>= 8 * (reg & 3);
- if (size < 4)
- value &= (1 << (8 * size)) - 1;
-
- return value;
-}
-
-void
-pcibr_config_set(devfs_handle_t conn,
- unsigned reg,
- unsigned size,
- uint64_t value)
-{
- do_pcibr_config_set(pcibr_config_addr(conn, reg),
- PCI_TYPE1_REG(reg), size, value);
-}
-
-LOCAL void
-do_pcibr_config_set(cfg_p cfgbase,
- unsigned reg,
- unsigned size,
- uint64_t value)
-{
- switch (size) {
- case 1:
- CB(cfgbase, reg) = value;
- break;
- case 2:
- if (reg & 1) {
- CB(cfgbase, reg) = value;
- CB(cfgbase, reg + 1) = value >> 8;
- } else
- CS(cfgbase, reg) = value;
- break;
- case 3:
- if (reg & 1) {
- CB(cfgbase, reg) = value;
- CS(cfgbase, (reg + 1)) = value >> 8;
- } else {
- CS(cfgbase, reg) = value;
- CB(cfgbase, reg + 2) = value >> 16;
- }
- break;
-
- case 4:
- CW(cfgbase, reg) = value;
- break;
- }
-}
-
-pciio_provider_t pcibr_provider =
-{
- (pciio_piomap_alloc_f *) pcibr_piomap_alloc,
- (pciio_piomap_free_f *) pcibr_piomap_free,
- (pciio_piomap_addr_f *) pcibr_piomap_addr,
- (pciio_piomap_done_f *) pcibr_piomap_done,
- (pciio_piotrans_addr_f *) pcibr_piotrans_addr,
- (pciio_piospace_alloc_f *) pcibr_piospace_alloc,
- (pciio_piospace_free_f *) pcibr_piospace_free,
-
- (pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
- (pciio_dmamap_free_f *) pcibr_dmamap_free,
- (pciio_dmamap_addr_f *) pcibr_dmamap_addr,
- (pciio_dmamap_list_f *) pcibr_dmamap_list,
- (pciio_dmamap_done_f *) pcibr_dmamap_done,
- (pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
- (pciio_dmatrans_list_f *) pcibr_dmatrans_list,
- (pciio_dmamap_drain_f *) pcibr_dmamap_drain,
- (pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
- (pciio_dmalist_drain_f *) pcibr_dmalist_drain,
-
- (pciio_intr_alloc_f *) pcibr_intr_alloc,
- (pciio_intr_free_f *) pcibr_intr_free,
- (pciio_intr_connect_f *) pcibr_intr_connect,
- (pciio_intr_disconnect_f *) pcibr_intr_disconnect,
- (pciio_intr_cpu_get_f *) pcibr_intr_cpu_get,
-
- (pciio_provider_startup_f *) pcibr_provider_startup,
- (pciio_provider_shutdown_f *) pcibr_provider_shutdown,
- (pciio_reset_f *) pcibr_reset,
- (pciio_write_gather_flush_f *) pcibr_write_gather_flush,
- (pciio_endian_set_f *) pcibr_endian_set,
- (pciio_priority_set_f *) pcibr_priority_set,
- (pciio_config_get_f *) pcibr_config_get,
- (pciio_config_set_f *) pcibr_config_set,
-
- (pciio_error_devenable_f *) 0,
- (pciio_error_extract_f *) 0,
-
-#ifdef LATER
- (pciio_driver_reg_callback_f *) pcibr_driver_reg_callback,
- (pciio_driver_unreg_callback_f *) pcibr_driver_unreg_callback,
-#else
- (pciio_driver_reg_callback_f *) 0,
- (pciio_driver_unreg_callback_f *) 0,
-#endif
- (pciio_device_unregister_f *) pcibr_device_unregister,
- (pciio_dma_enabled_f *) pcibr_dma_enabled,
-};
-
-LOCAL pcibr_hints_t
-pcibr_hints_get(devfs_handle_t xconn_vhdl, int alloc)
-{
- arbitrary_info_t ainfo = 0;
- graph_error_t rv;
- pcibr_hints_t hint;
-
- rv = hwgraph_info_get_LBL(xconn_vhdl, INFO_LBL_PCIBR_HINTS, &ainfo);
-
- if (alloc && (rv != GRAPH_SUCCESS)) {
-
- NEW(hint);
- hint->rrb_alloc_funct = NULL;
- hint->ph_intr_bits = NULL;
- rv = hwgraph_info_add_LBL(xconn_vhdl,
- INFO_LBL_PCIBR_HINTS,
- (arbitrary_info_t) hint);
- if (rv != GRAPH_SUCCESS)
- goto abnormal_exit;
-
- rv = hwgraph_info_get_LBL(xconn_vhdl, INFO_LBL_PCIBR_HINTS, &ainfo);
-
- if (rv != GRAPH_SUCCESS)
- goto abnormal_exit;
-
- if (ainfo != (arbitrary_info_t) hint)
- goto abnormal_exit;
- }
- return (pcibr_hints_t) ainfo;
-
-abnormal_exit:
-#ifdef LATER
- printf("SHOULD NOT BE HERE\n");
-#endif
- DEL(hint);
- return(NULL);
-
-}
-
-void
-pcibr_hints_fix_some_rrbs(devfs_handle_t xconn_vhdl, unsigned mask)
-{
- pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
-
- if (hint)
- hint->ph_rrb_fixed = mask;
-#if DEBUG
- else
- printk("pcibr_hints_fix_rrbs: pcibr_hints_get failed at\n"
- "\t%p\n", xconn_vhdl);
-#endif
-}
-
-void
-pcibr_hints_fix_rrbs(devfs_handle_t xconn_vhdl)
-{
- pcibr_hints_fix_some_rrbs(xconn_vhdl, 0xFF);
-}
-
-void
-pcibr_hints_dualslot(devfs_handle_t xconn_vhdl,
- pciio_slot_t host,
- pciio_slot_t guest)
-{
- pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
-
- if (hint)
- hint->ph_host_slot[guest] = host + 1;
-#if DEBUG
- else
- printk("pcibr_hints_dualslot: pcibr_hints_get failed at\n"
- "\t%p\n", xconn_vhdl);
-#endif
-}
-
-void
-pcibr_hints_intr_bits(devfs_handle_t xconn_vhdl,
- pcibr_intr_bits_f *xxx_intr_bits)
-{
- pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
-
- if (hint)
- hint->ph_intr_bits = xxx_intr_bits;
-#if DEBUG
- else
- printk("pcibr_hints_intr_bits: pcibr_hints_get failed at\n"
- "\t%p\n", xconn_vhdl);
-#endif
-}
-
-void
-pcibr_set_rrb_callback(devfs_handle_t xconn_vhdl, rrb_alloc_funct_t rrb_alloc_funct)
-{
- pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
-
- if (hint)
- hint->rrb_alloc_funct = rrb_alloc_funct;
-}
-
-void
-pcibr_hints_handsoff(devfs_handle_t xconn_vhdl)
-{
- pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
-
- if (hint)
- hint->ph_hands_off = 1;
-#if DEBUG
- else
- printk("pcibr_hints_handsoff: pcibr_hints_get failed at\n"
- "\t%p\n", xconn_vhdl);
-#endif
-}
-
-void
-pcibr_hints_subdevs(devfs_handle_t xconn_vhdl,
- pciio_slot_t slot,
- uint64_t subdevs)
-{
- arbitrary_info_t ainfo = 0;
- char sdname[16];
- devfs_handle_t pconn_vhdl = GRAPH_VERTEX_NONE;
-
- sprintf(sdname, "pci/%d", slot);
- (void) hwgraph_path_add(xconn_vhdl, sdname, &pconn_vhdl);
- if (pconn_vhdl == GRAPH_VERTEX_NONE) {
-#if DEBUG
- printk("pcibr_hints_subdevs: hwgraph_path_create failed at\n"
- "\t%p (seeking %s)\n", xconn_vhdl, sdname);
-#endif
- return;
- }
- hwgraph_info_get_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, &ainfo);
- if (ainfo == 0) {
- uint64_t *subdevp;
-
- NEW(subdevp);
- if (!subdevp) {
-#if DEBUG
- printk("pcibr_hints_subdevs: subdev ptr alloc failed at\n"
- "\t%p\n", pconn_vhdl);
-#endif
- return;
- }
- *subdevp = subdevs;
- hwgraph_info_add_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, (arbitrary_info_t) subdevp);
- hwgraph_info_get_LBL(pconn_vhdl, INFO_LBL_SUBDEVS, &ainfo);
- if (ainfo == (arbitrary_info_t) subdevp)
- return;
- DEL(subdevp);
- if (ainfo == (arbitrary_info_t) NULL) {
-#if DEBUG
- printk("pcibr_hints_subdevs: null subdevs ptr at\n"
- "\t%p\n", pconn_vhdl);
-#endif
- return;
- }
-#if DEBUG
- printk("pcibr_subdevs_get: dup subdev add_LBL at\n"
- "\t%p\n", pconn_vhdl);
-#endif
- }
- *(uint64_t *) ainfo = subdevs;
-}
-
-
-#ifdef LATER
-
-#include <sys/idbg.h>
-#include <sys/idbgentry.h>
-
-char *pci_space[] = {"NONE",
- "ROM",
- "IO",
- "",
- "MEM",
- "MEM32",
- "MEM64",
- "CFG",
- "WIN0",
- "WIN1",
- "WIN2",
- "WIN3",
- "WIN4",
- "WIN5",
- "",
- "BAD"};
-
-void
-idbg_pss_func(pcibr_info_h pcibr_infoh, int func)
-{
- pcibr_info_t pcibr_info = pcibr_infoh[func];
- char name[MAXDEVNAME];
- int win;
-
- if (!pcibr_info)
- return;
- qprintf("Per-slot Function Info\n");
-#ifdef SUPPORT_PRINTING_V_FORMAT
- sprintf(name, "%v", pcibr_info->f_vertex);
-#endif
- qprintf("\tSlot Name : %s\n",name);
- qprintf("\tPCI Bus : %d ",pcibr_info->f_bus);
- qprintf("Slot : %d ", pcibr_info->f_slot);
- qprintf("Function : %d ", pcibr_info->f_func);
- qprintf("VendorId : 0x%x " , pcibr_info->f_vendor);
- qprintf("DeviceId : 0x%x\n", pcibr_info->f_device);
-#ifdef SUPPORT_PRINTING_V_FORMAT
- sprintf(name, "%v", pcibr_info->f_master);
-#endif
- qprintf("\tBus provider : %s\n",name);
- qprintf("\tProvider Fns : 0x%x ", pcibr_info->f_pops);
- qprintf("Error Handler : 0x%x Arg 0x%x\n",
- pcibr_info->f_efunc,pcibr_info->f_einfo);
- for(win = 0 ; win < 6 ; win++)
- qprintf("\tBase Reg #%d space %s base 0x%x size 0x%x\n",
- win,pci_space[pcibr_info->f_window[win].w_space],
- pcibr_info->f_window[win].w_base,
- pcibr_info->f_window[win].w_size);
-
- qprintf("\tRom base 0x%x size 0x%x\n",
- pcibr_info->f_rbase,pcibr_info->f_rsize);
-
- qprintf("\tInterrupt Bit Map\n");
- qprintf("\t\tPCI Int#\tBridge Pin#\n");
- for (win = 0 ; win < 4; win++)
- qprintf("\t\tINT%c\t\t%d\n",win+'A',pcibr_info->f_ibit[win]);
- qprintf("\n");
-}
-
-
-void
-idbg_pss_info(pcibr_soft_t pcibr_soft, pciio_slot_t slot)
-{
- pcibr_soft_slot_t pss;
- char slot_conn_name[MAXDEVNAME];
- int func;
-
- pss = &pcibr_soft->bs_slot[slot];
- qprintf("PCI INFRASTRUCTURAL INFO FOR SLOT %d\n", slot);
- qprintf("\tHost Present ? %s ", pss->has_host ? "yes" : "no");
- qprintf("\tHost Slot : %d\n",pss->host_slot);
- sprintf(slot_conn_name, "%v", pss->slot_conn);
- qprintf("\tSlot Conn : %s\n",slot_conn_name);
- qprintf("\t#Functions : %d\n",pss->bss_ninfo);
- for (func = 0; func < pss->bss_ninfo; func++)
- idbg_pss_func(pss->bss_infos,func);
- qprintf("\tSpace : %s ",pci_space[pss->bss_devio.bssd_space]);
- qprintf("\tBase : 0x%x ", pss->bss_devio.bssd_base);
- qprintf("\tShadow Devreg : 0x%x\n", pss->bss_device);
- qprintf("\tUsage counts : pmu %d d32 %d d64 %d\n",
- pss->bss_pmu_uctr,pss->bss_d32_uctr,pss->bss_d64_uctr);
-
- qprintf("\tDirect Trans Info : d64_base 0x%x d64_flags 0x%x"
- "d32_base 0x%x d32_flags 0x%x\n",
- pss->bss_d64_base, pss->bss_d64_flags,
- pss->bss_d32_base, pss->bss_d32_flags);
-
- qprintf("\tExt ATEs active ? %s",
- atomic_read(&pss->bss_ext_ates_active) ? "yes" : "no");
- qprintf(" Command register : 0x%x ", pss->bss_cmd_pointer);
- qprintf(" Shadow command val : 0x%x\n", pss->bss_cmd_shadow);
-
- qprintf("\tRRB Info : Valid %d+%d Reserved %d\n",
- pcibr_soft->bs_rrb_valid[slot],
- pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
- pcibr_soft->bs_rrb_res[slot]);
-
-}
-
-int ips = 0;
-
-void
-idbg_pss(pcibr_soft_t pcibr_soft)
-{
- pciio_slot_t slot;
-
-
- if (ips >= 0 && ips < 8)
- idbg_pss_info(pcibr_soft,ips);
- else if (ips < 0)
- for (slot = 0; slot < 8; slot++)
- idbg_pss_info(pcibr_soft,slot);
- else
- qprintf("Invalid ips %d\n",ips);
-}
-
-#endif /* LATER */
-
-int
-pcibr_dma_enabled(devfs_handle_t pconn_vhdl)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-
-
- return xtalk_dma_enabled(pcibr_soft->bs_conn);
-}
EXTRA_CFLAGS := -DLITTLE_ENDIAN
-obj-y += pcibr/ bte_error.o geo_op.o klconflib.o klgraph.o l1.o \
- l1_command.o ml_iograph.o ml_SN_init.o ml_SN_intr.o module.o \
- pci_bus_cvlink.o pciio.o pic.o sgi_io_init.o shub.o shuberror.o \
- shub_intr.o shubio.o xbow.o xtalk.o
+obj-y += pcibr/ ml_SN_intr.o shub_intr.o shuberror.o shub.o bte_error.o \
+ pic.o geo_op.o l1.o l1_command.o klconflib.o klgraph.o ml_SN_init.o \
+ ml_iograph.o module.o pciio.o xbow.o xtalk.o shubio.o
obj-$(CONFIG_KDB) += kdba_io.o
obj-$(CONFIG_SHUB_1_0_SPECIFIC) += efi-rtc.o
-/* $Id: bte_error.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
+/*
*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*
- * Copyright (C) 1992 - 1997, 2000,2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
#include <asm/sn/sn2/shubio.h>
#include <asm/sn/bte.h>
-/************************************************************************
- * *
- * BTE ERROR RECOVERY *
- * *
- * Given a BTE error, the node causing the error must do the following: *
- * a) Clear all crbs relating to that BTE *
- * 1) Read CRBA value for crb in question *
- * 2) Mark CRB as VALID, store local physical *
- * address known to be good in the address field *
- * (bte_notification_targ is a known good local *
- * address). *
- * 3) Write CRBA *
- * 4) Using ICCR, FLUSH the CRB, and wait for it to *
- * complete. *
- * ... BTE BUSY bit should now be clear (or at least *
- * should be after ALL CRBs associated with the *
- * transfer are complete. *
- * *
- * b) Re-enable BTE *
- * 1) Write IMEM with BTE Enable + XXX bits
- * 2) Write IECLR with BTE clear bits
- * 3) Clear IIDSR INT_SENT bits.
- * *
- ************************************************************************/
-
-/*
- * >>> bte_crb_error_handler needs to be broken into two parts. The
- * first should cleanup the CRB. The second should wait until all bte
- * related CRB's are complete and then do the error reset.
+
+/*
+ * Bte error handling is done in two parts. The first captures
+ * any crb related errors. Since there can be multiple crbs per
+ * interface and multiple interfaces active, we need to wait until
+ * all active crbs are completed. This is the first job of the
+ * second part error handler. When all bte related CRBs are cleanly
+ * completed, it resets the interfaces and gets them ready for new
+ * transfers to be queued.
*/
-void
-bte_crb_error_handler(devfs_handle_t hub_v, int btenum,
- int crbnum, ioerror_t *ioe, int bteop)
+
+
+void bte_error_handler(unsigned long);
+
+
/*
- * Function: bte_crb_error_handler
- * Purpose: Process a CRB for a specific HUB/BTE
- * Parameters: hub_v - vertex of hub in HW graph
- * btenum - bte number on hub (0 == a, 1 == b)
- * crbnum - crb number being processed
- * Notes:
- * This routine assumes serialization at a higher level. A CRB
- * should not be processed more than once. The error recovery
- * follows the following sequence - if you change this, be real
- * sure about what you are doing.
- *
+ * First part error handler. This is called whenever any error CRB interrupt
+ * is generated by the II.
*/
+void
+bte_crb_error_handler(vertex_hdl_t hub_v, int btenum,
+ int crbnum, ioerror_t * ioe, int bteop)
{
- hubinfo_t hinfo;
- icrba_t crba;
- icrbb_t crbb;
- nasid_t n;
- hubreg_t iidsr, imem, ieclr;
+ hubinfo_t hinfo;
+ struct bteinfo_s *bte;
+
hubinfo_get(hub_v, &hinfo);
+ bte = &hinfo->h_nodepda->bte_if[btenum];
+
+ /*
+ * The caller has already figured out the error type, we save that
+ * in the bte handle structure for the thread excercising the
+ * interface to consume.
+ */
+ switch (ioe->ie_errortype) {
+ case IIO_ICRB_ECODE_PERR:
+ bte->bh_error = BTEFAIL_POISON;
+ break;
+ case IIO_ICRB_ECODE_WERR:
+ bte->bh_error = BTEFAIL_PROT;
+ break;
+ case IIO_ICRB_ECODE_AERR:
+ bte->bh_error = BTEFAIL_ACCESS;
+ break;
+ case IIO_ICRB_ECODE_TOUT:
+ bte->bh_error = BTEFAIL_TOUT;
+ break;
+ case IIO_ICRB_ECODE_XTERR:
+ bte->bh_error = BTEFAIL_XTERR;
+ break;
+ case IIO_ICRB_ECODE_DERR:
+ bte->bh_error = BTEFAIL_DIR;
+ break;
+ case IIO_ICRB_ECODE_PWERR:
+ case IIO_ICRB_ECODE_PRERR:
+ /* NO BREAK */
+ default:
+ bte->bh_error = BTEFAIL_ERROR;
+ }
+ bte->bte_error_count++;
+
+ BTE_PRINTK(("Got an error on cnode %d bte %d\n",
+ bte->bte_cnode, bte->bte_num));
+ bte_error_handler((unsigned long) hinfo->h_nodepda);
+}
- n = hinfo->h_nasid;
-
+/*
+ * Second part error handler. Wait until all BTE related CRBs are completed
+ * and then reset the interfaces.
+ */
+void
+bte_error_handler(unsigned long _nodepda)
+{
+ struct nodepda_s *err_nodepda = (struct nodepda_s *) _nodepda;
+ spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
+ struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
+ nasid_t nasid;
+ int i;
+ int valid_crbs;
+ unsigned long irq_flags;
+ volatile u64 *notify;
+ bte_result_t bh_error;
+ ii_imem_u_t imem; /* II IMEM Register */
+ ii_icrb0_d_u_t icrbd; /* II CRB Register D */
+ ii_ibcr_u_t ibcr;
+ ii_icmr_u_t icmr;
+
+
+ BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
+ smp_processor_id()));
+
+ spin_lock_irqsave(recovery_lock, irq_flags);
+
+ if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
+ (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
+ BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
+ smp_processor_id()));
+ spin_unlock_irqrestore(recovery_lock, irq_flags);
+ return;
+ }
/*
- * The following 10 lines (or so) are adapted from IRIXs
- * bte_crb_error function. No clear documentation tells
- * why the crb needs to complete normally in order for
- * the BTE to resume normal operations. This first step
- * appears vital!
+ * Lock all interfaces on this node to prevent new transfers
+ * from being queued.
*/
+ for (i = 0; i < BTES_PER_NODE; i++) {
+ if (err_nodepda->bte_if[i].cleanup_active) {
+ continue;
+ }
+ spin_lock(&err_nodepda->bte_if[i].spinlock);
+ BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
+ smp_processor_id(), i));
+ err_nodepda->bte_if[i].cleanup_active = 1;
+ }
+
+ /* Determine information about our hub */
+ nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
+
/*
- * Zero error and error code to prevent error_dump complaining
- * about these CRBs. Copy the CRB to the notification line.
- * The crb address is in shub format (physical address shifted
- * right by cacheline size).
+ * A BTE transfer can use multiple CRBs. We need to make sure
+ * that all the BTE CRBs are complete (or timed out) before
+ * attempting to clean up the error. Resetting the BTE while
+ * there are still BTE CRBs active will hang the BTE.
+ * We should look at all the CRBs to see if they are allocated
+ * to the BTE and see if they are still active. When none
+ * are active, we can continue with the cleanup.
+ *
+ * We also want to make sure that the local NI port is up.
+ * When a router resets the NI port can go down, while it
+ * goes through the LLP handshake, but then comes back up.
*/
- crbb.ii_icrb0_b_regval = REMOTE_HUB_L(n, IIO_ICRB_B(crbnum));
- crbb.b_error=0;
- crbb.b_ecode=0;
- REMOTE_HUB_S(n, IIO_ICRB_B(crbnum), crbb.ii_icrb0_b_regval);
-
- crba.ii_icrb0_a_regval = REMOTE_HUB_L(n, IIO_ICRB_A(crbnum));
- crba.a_addr = TO_PHYS((u64)&nodepda->bte_if[btenum].notify) >> 3;
- crba.a_valid = 1;
- REMOTE_HUB_S(n, IIO_ICRB_A(crbnum), crba.ii_icrb0_a_regval);
-
- REMOTE_HUB_S(n, IIO_ICCR,
- IIO_ICCR_PENDING | IIO_ICCR_CMD_FLUSH | crbnum);
-
- while (REMOTE_HUB_L(n, IIO_ICCR) & IIO_ICCR_PENDING)
- ;
-
-
- /* Terminate the BTE. */
- /* >>> The other bte transfer will need to be restarted. */
- HUB_L((shubreg_t *)((nodepda->bte_if[btenum].bte_base_addr +
- IIO_IBCT0 - IIO_IBLS0)));
-
- imem = REMOTE_HUB_L(n, IIO_IMEM);
- ieclr = REMOTE_HUB_L(n, IIO_IECLR);
- if (btenum == 0) {
- imem |= IIO_IMEM_W0ESD | IIO_IMEM_B0ESD;
- ieclr|= IECLR_BTE0;
- } else {
- imem |= IIO_IMEM_W0ESD | IIO_IMEM_B1ESD;
- ieclr|= IECLR_BTE1;
+ icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR);
+ if (icmr.ii_icmr_fld_s.i_crb_mark != 0) {
+ /*
+ * There are errors which still need to be cleaned up by
+ * hubiio_crb_error_handler
+ */
+ mod_timer(recovery_timer, HZ * 5);
+ BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
+ smp_processor_id()));
+ spin_unlock_irqrestore(recovery_lock, irq_flags);
+ return;
}
- REMOTE_HUB_S(n, IIO_IMEM, imem);
- REMOTE_HUB_S(n, IIO_IECLR, ieclr);
-
- iidsr = REMOTE_HUB_L(n, IIO_IIDSR);
- iidsr &= ~IIO_IIDSR_SENT_MASK;
- iidsr |= IIO_IIDSR_ENB_MASK;
- REMOTE_HUB_S(n, IIO_IIDSR, iidsr);
+ if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
+ valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld;
- bte_reset_nasid(n);
+ for (i = 0; i < IIO_NUM_CRBS; i++) {
+ if (!((1 << i) & valid_crbs)) {
+ /* This crb was not marked as valid, ignore */
+ continue;
+ }
+ icrbd.ii_icrb0_d_regval =
+ REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
+ if (icrbd.d_bteop) {
+ mod_timer(recovery_timer, HZ * 5);
+ BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
+ err_nodepda, smp_processor_id(), i));
+ spin_unlock_irqrestore(recovery_lock,
+ irq_flags);
+ return;
+ }
+ }
+ }
- *nodepda->bte_if[btenum].most_rcnt_na = IBLS_ERROR;
-}
+ BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda,
+ smp_processor_id()));
+ /* Reenable both bte interfaces */
+ imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
+ imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
+ REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
+
+ /* Reinitialize both BTE state machines. */
+ ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
+ ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
+ REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
+
+
+ for (i = 0; i < BTES_PER_NODE; i++) {
+ bh_error = err_nodepda->bte_if[i].bh_error;
+ if (bh_error != BTE_SUCCESS) {
+ /* There is an error which needs to be notified */
+ notify = err_nodepda->bte_if[i].most_rcnt_na;
+ BTE_PRINTK(("cnode %d bte %d error=0x%lx\n",
+ err_nodepda->bte_if[i].bte_cnode,
+ err_nodepda->bte_if[i].bte_num,
+ IBLS_ERROR | (u64) bh_error));
+ *notify = IBLS_ERROR | bh_error;
+ err_nodepda->bte_if[i].bh_error = BTE_SUCCESS;
+ }
+
+ err_nodepda->bte_if[i].cleanup_active = 0;
+ BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda,
+ smp_processor_id(), i));
+ spin_unlock(&pda->cpu_bte_if[i]->spinlock);
+ }
+
+ del_timer(recovery_timer);
+
+ spin_unlock_irqrestore(recovery_lock, irq_flags);
+}
--- /dev/null
+/*
+ * Kernel Debugger Architecture Dependent POD functions.
+ *
+ * Copyright (C) 1999-2003 Silicon Graphics, Inc. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ */
+
+#include <linux/types.h>
+#include <linux/kdb.h>
+//#include <linux/kdbprivate.h>
+
+/**
+ * kdba_io - enter POD mode from kdb
+ * @argc: arg count
+ * @argv: arg values
+ * @envp: kdb env. vars
+ * @regs: current register state
+ *
+ * Enter POD mode from kdb using SGI SN specific SAL function call.
+ */
+static int
+kdba_io(int argc, const char **argv, const char **envp, struct pt_regs *regs)
+{
+ kdb_printf("kdba_io entered with addr 0x%p\n", (void *) regs);
+
+ return(0);
+}
+
+/**
+ * kdba_io_init - register 'io' command with kdb
+ *
+ * Register the 'io' command with kdb at load time.
+ */
+void
+kdba_io_init(void)
+{
+ kdb_register("io", kdba_io, "<vaddr>", "Display IO Contents", 0);
+}
+
+/**
+ * kdba_io_exit - unregister the 'io' command
+ *
+ * Tell kdb that the 'io' command is no longer available.
+ */
+static void __exit
+kdba_exit(void)
+{
+ kdb_unregister("io");
+}
#include <asm/sn/router.h>
#include <asm/sn/xtalk/xbow.h>
-#define printf printk
-int hasmetarouter;
#define LDEBUG 0
#define NIC_UNKNOWN ((nic_t) -1)
#define DBG(x...)
#endif /* DEBUG_KLGRAPH */
-static void sort_nic_names(lboard_t *) ;
-
u64 klgraph_addr[MAX_COMPACT_NODES];
-int module_number = 0;
+static int hasmetarouter;
+
+
+char brick_types[MAX_BRICK_TYPES + 1] = "crikxdpn%#=012345";
lboard_t *
find_lboard(lboard_t *start, unsigned char brd_type)
return (lboard_t *)NULL;
}
-lboard_t *
-find_lboard_module_class(lboard_t *start, geoid_t geoid,
- unsigned char brd_type)
-{
- while (start) {
- DBG("find_lboard_module_class: lboard 0x%p, start->brd_geoid 0x%x, mod 0x%x, start->brd_type 0x%x, brd_type 0x%x\n", start, start->brd_geoid, geoid, start->brd_type, brd_type);
-
- if (geo_cmp(start->brd_geoid, geoid) &&
- (KLCLASS(start->brd_type) == KLCLASS(brd_type)))
- return start;
- start = KLCF_NEXT(start);
- }
-
- /* Didn't find it. */
- return (lboard_t *)NULL;
-}
-
/*
* Convert a NIC name to a name for use in the hardware graph.
*/
}
/*
- * Find the lboard structure and get the board name.
- * If we can't find the structure or it's too low a revision,
- * use default name.
- */
-lboard_t *
-get_board_name(nasid_t nasid, geoid_t geoid, slotid_t slot, char *name)
-{
- lboard_t *brd;
-
- brd = find_lboard_modslot((lboard_t *)KL_CONFIG_INFO(nasid),
- geoid);
-
-#ifndef _STANDALONE
- {
- cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
-
- if (!brd && (NODEPDA(cnode)->xbow_peer != INVALID_NASID))
- brd = find_lboard_modslot((lboard_t *)
- KL_CONFIG_INFO(NODEPDA(cnode)->xbow_peer),
- geoid);
- }
-#endif
-
- if (!brd || (brd->brd_sversion < 2)) {
- strcpy(name, EDGE_LBL_XWIDGET);
- } else {
- nic_name_convert(brd->brd_name, name);
- }
-
- /*
- * PV # 540860
- * If the name is not 'baseio'
- * get the lowest of all the names in the nic string.
- * This is needed for boards like divo, which can have
- * a bunch of daughter cards, but would like to be called
- * divo. We could do this for baseio
- * but it has some special case names that we would not
- * like to disturb at this point.
- */
-
- /* gfx boards don't need any of this name scrambling */
- if (brd && (KLCLASS(brd->brd_type) == KLCLASS_GFX)) {
- return(brd);
- }
-
- if (!(!strcmp(name, "baseio") )) {
- if (brd) {
- sort_nic_names(brd) ;
- /* Convert to small case, '-' to '_' etc */
- nic_name_convert(brd->brd_name, name) ;
- }
- }
-
- return(brd);
-}
-
-/*
* get_actual_nasid
*
* Completely disabled brds have their klconfig on
board_name = EDGE_LBL_IO;
break;
case KLCLASS_IOBRICK:
- if (brd->brd_type == KLTYPE_PBRICK)
+ if (brd->brd_type == KLTYPE_PXBRICK)
+ board_name = EDGE_LBL_PXBRICK;
+ else if (brd->brd_type == KLTYPE_IXBRICK)
+ board_name = EDGE_LBL_IXBRICK;
+ else if (brd->brd_type == KLTYPE_PBRICK)
board_name = EDGE_LBL_PBRICK;
else if (brd->brd_type == KLTYPE_IBRICK)
board_name = EDGE_LBL_IBRICK;
else if (brd->brd_type == KLTYPE_XBRICK)
board_name = EDGE_LBL_XBRICK;
+ else if (brd->brd_type == KLTYPE_PEBRICK)
+ board_name = EDGE_LBL_PEBRICK;
+ else if (brd->brd_type == KLTYPE_CGBRICK)
+ board_name = EDGE_LBL_CGBRICK;
else
board_name = EDGE_LBL_IOBRICK;
break;
#include "asm/sn/sn_private.h"
-xwidgetnum_t
-nodevertex_widgetnum_get(devfs_handle_t node_vtx)
-{
- hubinfo_t hubinfo_p;
-
- hwgraph_info_get_LBL(node_vtx, INFO_LBL_NODE_INFO,
- (arbitrary_info_t *) &hubinfo_p);
- return(hubinfo_p->h_widgetid);
-}
-
-devfs_handle_t
-nodevertex_xbow_peer_get(devfs_handle_t node_vtx)
-{
- hubinfo_t hubinfo_p;
- nasid_t xbow_peer_nasid;
- cnodeid_t xbow_peer;
-
- hwgraph_info_get_LBL(node_vtx, INFO_LBL_NODE_INFO,
- (arbitrary_info_t *) &hubinfo_p);
- xbow_peer_nasid = hubinfo_p->h_nodepda->xbow_peer;
- if(xbow_peer_nasid == INVALID_NASID)
- return ( (devfs_handle_t)-1);
- xbow_peer = NASID_TO_COMPACT_NODEID(xbow_peer_nasid);
- return(NODEPDA(xbow_peer)->node_vertex);
-}
-
-/* NIC Sorting Support */
-
-#define MAX_NICS_PER_STRING 32
-#define MAX_NIC_NAME_LEN 32
-
-static char *
-get_nic_string(lboard_t *lb)
-{
- int i;
- klinfo_t *k = NULL ;
- klconf_off_t mfg_off = 0 ;
- char *mfg_nic = NULL ;
-
- for (i = 0; i < KLCF_NUM_COMPS(lb); i++) {
- k = KLCF_COMP(lb, i) ;
- switch(k->struct_type) {
- case KLSTRUCT_BRI:
- mfg_off = ((klbri_t *)k)->bri_mfg_nic ;
- break ;
-
- case KLSTRUCT_HUB:
- mfg_off = ((klhub_t *)k)->hub_mfg_nic ;
- break ;
-
- case KLSTRUCT_ROU:
- mfg_off = ((klrou_t *)k)->rou_mfg_nic ;
- break ;
-
- case KLSTRUCT_GFX:
- mfg_off = ((klgfx_t *)k)->gfx_mfg_nic ;
- break ;
-
- case KLSTRUCT_TPU:
- mfg_off = ((kltpu_t *)k)->tpu_mfg_nic ;
- break ;
-
- case KLSTRUCT_GSN_A:
- case KLSTRUCT_GSN_B:
- mfg_off = ((klgsn_t *)k)->gsn_mfg_nic ;
- break ;
-
- case KLSTRUCT_XTHD:
- mfg_off = ((klxthd_t *)k)->xthd_mfg_nic ;
- break;
-
- default:
- mfg_off = 0 ;
- break ;
- }
- if (mfg_off)
- break ;
- }
-
- if ((mfg_off) && (k))
- mfg_nic = (char *)NODE_OFFSET_TO_K0(k->nasid, mfg_off) ;
-
- return mfg_nic ;
-}
-
-char *
-get_first_string(char **ptrs, int n)
-{
- int i ;
- char *tmpptr ;
-
- if ((ptrs == NULL) || (n == 0))
- return NULL ;
-
- tmpptr = ptrs[0] ;
-
- if (n == 1)
- return tmpptr ;
-
- for (i = 0 ; i < n ; i++) {
- if (strcmp(tmpptr, ptrs[i]) > 0)
- tmpptr = ptrs[i] ;
- }
-
- return tmpptr ;
-}
-
-int
-get_ptrs(char *idata, char **ptrs, int n, char *label)
-{
- int i = 0 ;
- char *tmp = idata ;
-
- if ((ptrs == NULL) || (idata == NULL) || (label == NULL) || (n == 0))
- return 0 ;
-
- while ( (tmp = strstr(tmp, label)) ){
- tmp += strlen(label) ;
- /* check for empty name field, and last NULL ptr */
- if ((i < (n-1)) && (*tmp != ';')) {
- ptrs[i++] = tmp ;
- }
- }
-
- ptrs[i] = NULL ;
-
- return i ;
-}
-
-/*
- * sort_nic_names
- *
- * Does not really do sorting. Find the alphabetically lowest
- * name among all the nic names found in a nic string.
- *
- * Return:
- * Nothing
- *
- * Side Effects:
- *
- * lb->brd_name gets the new name found
- */
-
-static void
-sort_nic_names(lboard_t *lb)
-{
- char *nic_str ;
- char *ptrs[MAX_NICS_PER_STRING] ;
- char name[MAX_NIC_NAME_LEN] ;
- char *tmp, *tmp1 ;
-
- *name = 0 ;
-
- /* Get the nic pointer from the lb */
-
- if ((nic_str = get_nic_string(lb)) == NULL)
- return ;
-
- tmp = get_first_string(ptrs,
- get_ptrs(nic_str, ptrs, MAX_NICS_PER_STRING, "Name:")) ;
-
- if (tmp == NULL)
- return ;
-
- if ( (tmp1 = strchr(tmp, ';')) )
- strlcpy(name, tmp, tmp1-tmp);
- else
- strlcpy(name, tmp, (sizeof(name)));
-
- strlcpy(lb->brd_name, name, sizeof(lb->brd_name)) ;
-}
-
-
-
-char brick_types[MAX_BRICK_TYPES + 1] = "crikxdpn%#012345";
-
/*
* Format a module id for printing.
*/
rack = MODULE_GET_RACK(m);
ASSERT(MODULE_GET_BTYPE(m) < MAX_BRICK_TYPES);
brickchar = MODULE_GET_BTCHAR(m);
+
position = MODULE_GET_BPOS(m);
if (fmt == MODULE_FORMAT_BRIEF) {
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/kldir.h>
-#include <asm/sn/gda.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/router.h>
#include <asm/sn/xtalk/xbow.h>
extern char arg_maxnodes[];
extern u64 klgraph_addr[];
-void mark_cpuvertex_as_cpu(devfs_handle_t vhdl, cpuid_t cpuid);
+void mark_cpuvertex_as_cpu(vertex_hdl_t vhdl, cpuid_t cpuid);
/*
* Add detailed disabled cpu inventory info to the hardware graph.
*/
void
-klhwg_disabled_cpu_invent_info(devfs_handle_t cpuv,
+klhwg_disabled_cpu_invent_info(vertex_hdl_t cpuv,
cnodeid_t cnode,
klcpu_t *cpu, slotid_t slot)
{
* Add detailed cpu inventory info to the hardware graph.
*/
void
-klhwg_cpu_invent_info(devfs_handle_t cpuv,
+klhwg_cpu_invent_info(vertex_hdl_t cpuv,
cnodeid_t cnode,
klcpu_t *cpu)
{
* as a part of detailed inventory info in the hwgraph.
*/
void
-klhwg_baseio_inventory_add(devfs_handle_t baseio_vhdl,cnodeid_t cnode)
+klhwg_baseio_inventory_add(vertex_hdl_t baseio_vhdl,cnodeid_t cnode)
{
invent_miscinfo_t *baseio_inventory;
unsigned char version = 0,revision = 0;
sizeof(invent_miscinfo_t));
}
-char *hub_rev[] = {
- "0.0",
- "1.0",
- "2.0",
- "2.1",
- "2.2",
- "2.3"
-};
-
/*
* Add detailed cpu inventory info to the hardware graph.
*/
void
-klhwg_hub_invent_info(devfs_handle_t hubv,
+klhwg_hub_invent_info(vertex_hdl_t hubv,
cnodeid_t cnode,
klhub_t *hub)
{
/* ARGSUSED */
void
-klhwg_add_hub(devfs_handle_t node_vertex, klhub_t *hub, cnodeid_t cnode)
+klhwg_add_hub(vertex_hdl_t node_vertex, klhub_t *hub, cnodeid_t cnode)
{
- devfs_handle_t myhubv;
- devfs_handle_t hub_mon;
+ vertex_hdl_t myhubv;
+ vertex_hdl_t hub_mon;
int rc;
extern struct file_operations shub_mon_fops;
(void) hwgraph_path_add(node_vertex, EDGE_LBL_HUB, &myhubv);
rc = device_master_set(myhubv, node_vertex);
hub_mon = hwgraph_register(myhubv, EDGE_LBL_PERFMON,
- 0, DEVFS_FL_AUTO_DEVNUM,
+ 0, 0,
0, 0,
S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
&shub_mon_fops, (void *)(long)cnode);
/* ARGSUSED */
void
-klhwg_add_disabled_cpu(devfs_handle_t node_vertex, cnodeid_t cnode, klcpu_t *cpu, slotid_t slot)
+klhwg_add_disabled_cpu(vertex_hdl_t node_vertex, cnodeid_t cnode, klcpu_t *cpu, slotid_t slot)
{
- devfs_handle_t my_cpu;
+ vertex_hdl_t my_cpu;
char name[120];
cpuid_t cpu_id;
nasid_t nasid;
/* ARGSUSED */
void
-klhwg_add_cpu(devfs_handle_t node_vertex, cnodeid_t cnode, klcpu_t *cpu)
+klhwg_add_cpu(vertex_hdl_t node_vertex, cnodeid_t cnode, klcpu_t *cpu)
{
- devfs_handle_t my_cpu, cpu_dir;
+ vertex_hdl_t my_cpu, cpu_dir;
char name[120];
cpuid_t cpu_id;
nasid_t nasid;
nasid_t hub_nasid;
cnodeid_t hub_cnode;
int widgetnum;
- devfs_handle_t xbow_v, hubv;
+ vertex_hdl_t xbow_v, hubv;
/*REFERENCED*/
graph_error_t err;
/* ARGSUSED */
void
-klhwg_add_node(devfs_handle_t hwgraph_root, cnodeid_t cnode, gda_t *gdap)
+klhwg_add_node(vertex_hdl_t hwgraph_root, cnodeid_t cnode)
{
nasid_t nasid;
lboard_t *brd;
klhub_t *hub;
- devfs_handle_t node_vertex = NULL;
+ vertex_hdl_t node_vertex = NULL;
char path_buffer[100];
int rv;
char *s;
ASSERT(brd);
do {
- devfs_handle_t cpu_dir;
+ vertex_hdl_t cpu_dir;
/* Generate a hardware graph path for this board. */
board_to_path(brd, path_buffer);
while (cpu) {
cpuid_t cpu_id;
cpu_id = nasid_slice_to_cpuid(nasid,cpu->cpu_info.physid);
- if (cpu_enabled(cpu_id))
+ if (cpu_online(cpu_id))
klhwg_add_cpu(node_vertex, cnode, cpu);
else
klhwg_add_disabled_cpu(node_vertex, cnode, cpu, brd->brd_slot);
/* ARGSUSED */
void
-klhwg_add_all_routers(devfs_handle_t hwgraph_root)
+klhwg_add_all_routers(vertex_hdl_t hwgraph_root)
{
nasid_t nasid;
cnodeid_t cnode;
lboard_t *brd;
- devfs_handle_t node_vertex;
+ vertex_hdl_t node_vertex;
char path_buffer[100];
int rv;
/* ARGSUSED */
void
-klhwg_connect_one_router(devfs_handle_t hwgraph_root, lboard_t *brd,
+klhwg_connect_one_router(vertex_hdl_t hwgraph_root, lboard_t *brd,
cnodeid_t cnode, nasid_t nasid)
{
klrou_t *router;
char path_buffer[50];
char dest_path[50];
- devfs_handle_t router_hndl;
- devfs_handle_t dest_hndl;
+ vertex_hdl_t router_hndl;
+ vertex_hdl_t dest_hndl;
int rc;
int port;
lboard_t *dest_brd;
void
-klhwg_connect_routers(devfs_handle_t hwgraph_root)
+klhwg_connect_routers(vertex_hdl_t hwgraph_root)
{
nasid_t nasid;
cnodeid_t cnode;
void
-klhwg_connect_hubs(devfs_handle_t hwgraph_root)
+klhwg_connect_hubs(vertex_hdl_t hwgraph_root)
{
nasid_t nasid;
cnodeid_t cnode;
lboard_t *brd;
klhub_t *hub;
lboard_t *dest_brd;
- devfs_handle_t hub_hndl;
- devfs_handle_t dest_hndl;
+ vertex_hdl_t hub_hndl;
+ vertex_hdl_t dest_hndl;
char path_buffer[50];
char dest_path[50];
graph_error_t rc;
}
void
-klhwg_add_all_modules(devfs_handle_t hwgraph_root)
+klhwg_add_all_modules(vertex_hdl_t hwgraph_root)
{
cmoduleid_t cm;
char name[128];
- devfs_handle_t vhdl;
- devfs_handle_t module_vhdl;
+ vertex_hdl_t vhdl;
+ vertex_hdl_t module_vhdl;
int rc;
char buffer[16];
}
void
-klhwg_add_all_nodes(devfs_handle_t hwgraph_root)
+klhwg_add_all_nodes(vertex_hdl_t hwgraph_root)
{
cnodeid_t cnode;
for (cnode = 0; cnode < numnodes; cnode++) {
- klhwg_add_node(hwgraph_root, cnode, NULL);
+ klhwg_add_node(hwgraph_root, cnode);
}
for (cnode = 0; cnode < numnodes; cnode++) {
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
#include <asm/sn/sgi.h>
#include <asm/sn/io.h>
#include <asm/sn/iograph.h>
#include <asm/sn/hcl.h>
#include <asm/sn/hcl_util.h>
#include <asm/sn/labelcl.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/router.h>
#include <asm/sn/module.h>
#include <asm/sn/ksys/l1.h>
#define UART_BAUD_RATE 57600
+static int L1_connected; /* non-zero when interrupts are enabled */
+
+
int
get_L1_baud(void)
{
int
l1_get_intr_value( void )
{
- return(0);
+ cpuid_t intr_cpuid;
+ nasid_t console_nasid;
+ int major, minor;
+ extern nasid_t get_console_nasid(void);
+
+ /* if it is an old prom, run in poll mode */
+
+ major = sn_sal_rev_major();
+ minor = sn_sal_rev_minor();
+ if ( (major < 1) || ((major == 1) && (minor < 10)) ) {
+ /* before version 1.10 doesn't work */
+ return (0);
+ }
+
+ console_nasid = get_console_nasid();
+ intr_cpuid = NODEPDA(NASID_TO_COMPACT_NODEID(console_nasid))->node_first_cpu;
+ return CPU_VECTOR_TO_IRQ(intr_cpuid, SGI_UART_VECTOR);
}
/* Disconnect the callup functions - throw away interrupts */
/* Set up uart interrupt handling for this node's uart */
-void
-l1_connect_intr(void *rx_notify, void *tx_notify)
+int
+l1_connect_intr(void *intr_func, void *arg, struct pt_regs *ep)
{
-#if 0
- // Will need code here for sn2 - something like this
- console_nodepda = NODEPDA(NASID_TO_COMPACT_NODEID(get_master_nasid());
- intr_connect_level(console_nodepda->node_first_cpu,
- SGI_UART_VECTOR, INTPEND0_MAXMASK,
- dummy_intr_func);
- request_irq(SGI_UART_VECTOR | (console_nodepda->node_first_cpu << 8),
- intr_func, SA_INTERRUPT | SA_SHIRQ,
- "l1_protocol_driver", (void *)sc);
-#endif
+ cpuid_t intr_cpuid;
+ nasid_t console_nasid;
+ unsigned int console_irq;
+ int result;
+ extern int intr_connect_level(cpuid_t, int, ilvl_t, intr_func_t);
+ extern nasid_t get_console_nasid(void);
+
+
+ /* don't call to connect multiple times - we DON'T support changing the handler */
+
+ if ( !L1_connected ) {
+ L1_connected++;
+ console_nasid = get_console_nasid();
+ intr_cpuid = NODEPDA(NASID_TO_COMPACT_NODEID(console_nasid))->node_first_cpu;
+ console_irq = CPU_VECTOR_TO_IRQ(intr_cpuid, SGI_UART_VECTOR);
+ result = intr_connect_level(intr_cpuid, SGI_UART_VECTOR,
+ 0 /*not used*/, 0 /*not used*/);
+ if (result != SGI_UART_VECTOR) {
+ if (result < 0)
+ printk(KERN_WARNING "L1 console driver : intr_connect_level failed %d\n", result);
+ else
+ printk(KERN_WARNING "L1 console driver : intr_connect_level returns wrong bit %d\n", result);
+ return (-1);
+ }
+
+ result = request_irq(console_irq, intr_func, SA_INTERRUPT,
+ "SGI L1 console driver", (void *)arg);
+ if (result < 0) {
+ printk(KERN_WARNING "L1 console driver : request_irq failed %d\n", result);
+ return (-1);
+ }
+
+ /* ask SAL to turn on interrupts in the UART itself */
+ ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV);
+ }
+ return (0);
}
int
l1_serial_out( char *str, int len )
{
- int counter = len;
+ int tmp;
/* Ignore empty messages */
if ( len == 0 )
if ( IS_RUNNING_ON_SIMULATOR() ) {
extern u64 master_node_bedrock_address;
void early_sn_setup(void);
+ int counter = len;
+
if (!master_node_bedrock_address)
early_sn_setup();
if ( master_node_bedrock_address != (u64)0 ) {
}
/* Attempt to write things out thru the sal */
- if ( ia64_sn_console_putb(str, len) )
- return(0);
-
- return((counter <= 0) ? 0 : (len - counter));
+ if ( L1_connected )
+ tmp = ia64_sn_console_xmit_chars(str, len);
+ else
+ tmp = ia64_sn_console_putb(str, len);
+ return ((tmp < 0) ? 0 : tmp);
}
#include <asm/sn/hcl.h>
#include <asm/sn/hcl_util.h>
#include <asm/sn/labelcl.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/router.h>
#include <asm/sn/module.h>
#include <asm/sn/ksys/l1.h>
#include <asm/sn/sn_sal.h>
#include <linux/ctype.h>
-#define ELSC_TIMEOUT 1000000 /* ELSC response timeout (usec) */
-#define LOCK_TIMEOUT 5000000 /* Hub lock timeout (usec) */
-
-#define hub_cpu_get() 0
-
-#define LBYTE(caddr) (*(char *) caddr)
-
-extern char *bcopy(const char * src, char * dest, int count);
-
-#define LDEBUG 0
-
-/*
- * ELSC data is in NVRAM page 7 at the following offsets.
- */
-
-#define NVRAM_MAGIC_AD 0x700 /* magic number used for init */
-#define NVRAM_PASS_WD 0x701 /* password (4 bytes in length) */
-#define NVRAM_DBG1 0x705 /* virtual XOR debug switches */
-#define NVRAM_DBG2 0x706 /* physical XOR debug switches */
-#define NVRAM_CFG 0x707 /* ELSC Configuration info */
-#define NVRAM_MODULE 0x708 /* system module number */
-#define NVRAM_BIST_FLG 0x709 /* BIST flags (2 bits per nodeboard) */
-#define NVRAM_PARTITION 0x70a /* module's partition id */
-#define NVRAM_DOMAIN 0x70b /* module's domain id */
-#define NVRAM_CLUSTER 0x70c /* module's cluster id */
-#define NVRAM_CELL 0x70d /* module's cellid */
-
-#define NVRAM_MAGIC_NO 0x37 /* value of magic number */
-#define NVRAM_SIZE 16 /* 16 bytes in nvram */
-
-
/* elsc_display_line writes up to 12 characters to either the top or bottom
* line of the L1 display. line points to a buffer containing the message
* to be displayed. The zero-based line number is specified by lnum (so
return 0;
}
+
/*
* iobrick routines
*/
if ( ia64_sn_sysctl_iobrick_module_get(nasid, &result) )
return( ELSC_ERROR_CMD_SEND );
- *rack = (result & L1_ADDR_RACK_MASK) >> L1_ADDR_RACK_SHFT;
- *bay = (result & L1_ADDR_BAY_MASK) >> L1_ADDR_BAY_SHFT;
- *brick_type = (result & L1_ADDR_TYPE_MASK) >> L1_ADDR_TYPE_SHFT;
+ *rack = (result & MODULE_RACK_MASK) >> MODULE_RACK_SHFT;
+ *bay = (result & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT;
+ *brick_type = (result & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT;
*brick_type = toupper(*brick_type);
return 0;
int iomoduleid_get(nasid_t nasid)
{
-
int result = 0;
if ( ia64_sn_sysctl_iobrick_module_get(nasid, &result) )
return( ELSC_ERROR_CMD_SEND );
return result;
-
}
int iobrick_module_get(nasid_t nasid)
RACK_ADD_NUM(rack, t);
switch( brick_type ) {
- case 'I':
+ case L1_BRICKTYPE_IX:
+ brick_type = MODULE_IXBRICK; break;
+ case L1_BRICKTYPE_PX:
+ brick_type = MODULE_PXBRICK; break;
+ case L1_BRICKTYPE_I:
brick_type = MODULE_IBRICK; break;
- case 'P':
+ case L1_BRICKTYPE_P:
brick_type = MODULE_PBRICK; break;
- case 'X':
+ case L1_BRICKTYPE_X:
brick_type = MODULE_XBRICK; break;
}
return ret;
}
-#ifdef CONFIG_PCI
+
/*
* iobrick_module_get_nasid() returns a module_id which has the brick
* type encoded in bits 15-12, but this is not the true brick type...
/* convert to a module.h brick type */
for( t = 0; t < MAX_BRICK_TYPES; t++ ) {
- if( brick_types[t] == type )
+ if( brick_types[t] == type ) {
return t;
+ }
}
return -1; /* unknown brick */
}
-#endif
+
int iobrick_module_get_nasid(nasid_t nasid)
{
int io_moduleid;
-#ifdef PIC_LATER
- uint rack, bay;
+ io_moduleid = iobrick_module_get(nasid);
+ return io_moduleid;
+}
+
+/*
+ * given a L1 bricktype, return a bricktype string. This string is the
+ * string that will be used in the hwpath for I/O bricks
+ */
+char *
+iobrick_L1bricktype_to_name(int type)
+{
+ switch (type)
+ {
+ default:
+ return("Unknown");
+
+ case L1_BRICKTYPE_X:
+ return("Xbrick");
- if (PEBRICK_NODE(nasid)) {
- if (peer_iobrick_rack_bay_get(nasid, &rack, &bay)) {
- printf("Could not read rack and bay location "
- "of PEBrick at nasid %d\n", nasid);
- }
+ case L1_BRICKTYPE_I:
+ return("Ibrick");
- io_moduleid = peer_iobrick_module_get(sc, rack, bay);
+ case L1_BRICKTYPE_P:
+ return("Pbrick");
+
+ case L1_BRICKTYPE_PX:
+ return("PXbrick");
+
+ case L1_BRICKTYPE_IX:
+ return("IXbrick");
+
+ case L1_BRICKTYPE_C:
+ return("Cbrick");
+
+ case L1_BRICKTYPE_R:
+ return("Rbrick");
}
-#endif /* PIC_LATER */
- io_moduleid = iobrick_module_get(nasid);
- return io_moduleid;
}
+
#include <asm/sn/sn_private.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/snconfig.h>
-extern int numcpus;
-extern char arg_maxnodes[];
extern cpuid_t master_procid;
-
-extern int hasmetarouter;
-
int maxcpus;
-cpumask_t boot_cpumask;
-hubreg_t region_mask = 0;
-
extern xwidgetnum_t hub_widget_id(nasid_t);
-extern int valid_icache_reasons; /* Reasons to flush the icache */
-extern int valid_dcache_reasons; /* Reasons to flush the dcache */
-extern u_char miniroot;
-extern volatile int need_utlbmiss_patch;
extern void iograph_early_init(void);
nasid_t master_nasid = INVALID_NASID; /* This is the partition master nasid */
/* early initialization of iograph */
iograph_early_init();
-
- /* Initialize Hub Pseudodriver Management */
- hubdev_init();
}
mutex_init_locked(&npda->xbow_sema); /* init it locked? */
}
-/* XXX - Move the interrupt stuff to intr.c ? */
-/*
- * Set up the platform-dependent fields in the processor pda.
- * Must be done _after_ init_platform_nodepda().
- * If we need a lock here, something else is wrong!
- */
-void init_platform_pda(cpuid_t cpu)
-{
-}
-
void
update_node_information(cnodeid_t cnodeid)
{
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
-extern irqpda_t *irqpdaindr[];
-extern cnodeid_t master_node_get(devfs_handle_t vhdl);
+extern irqpda_t *irqpdaindr;
+extern cnodeid_t master_node_get(vertex_hdl_t vhdl);
extern nasid_t master_nasid;
// Initialize some shub registers for interrupts, both IO and error.
+//
+
+
void
intr_init_vecblk( nodepda_t *npda,
nodepda_t *lnodepda;
sh_ii_int0_enable_u_t ii_int_enable;
sh_int_node_id_config_u_t node_id_config;
+ sh_local_int5_config_u_t local5_config;
+ sh_local_int5_enable_u_t local5_enable;
extern void sn_init_cpei_timer(void);
static int timer_added = 0;
HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_ERROR_MASK), 0);
HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_CRBP_ERROR_MASK), 0);
+ // Config and enable UART interrupt, all nodes.
+
+ local5_config.sh_local_int5_config_regval = 0;
+ local5_config.sh_local_int5_config_s.idx = SGI_UART_VECTOR;
+ local5_config.sh_local_int5_config_s.pid = cpu0;
+ HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_CONFIG),
+ local5_config.sh_local_int5_config_regval);
+
+ local5_enable.sh_local_int5_enable_regval = 0;
+ local5_enable.sh_local_int5_enable_s.uart_int = 1;
+ HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_ENABLE),
+ local5_enable.sh_local_int5_enable_regval);
+
// The II_INT_CONFIG register for cpu 0.
ii_int_config.sh_ii_int0_config_regval = 0;
// Enable interrupts for II_INT0 and 1.
ii_int_enable.sh_ii_int0_enable_regval = 0;
ii_int_enable.sh_ii_int0_enable_s.ii_enable = 1;
-#ifdef BUS_INT_WAR
- /* Dont enable any ints from II. We will poll for interrupts. */
- ii_int_enable.sh_ii_int0_enable_s.ii_enable = 0;
-
- /* Enable IPIs. We use them ONLY for send INITs to hung cpus */
- *(volatile long*)GLOBAL_MMR_ADDR(nasid, SH_IPI_INT_ENABLE) = 1;
-#endif
HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT0_ENABLE),
ii_int_enable.sh_ii_int0_enable_regval);
int reserve)
{
int i;
- irqpda_t *irqs = irqpdaindr[cpu];
+ irqpda_t *irqs = irqpdaindr;
+ int min_shared;
if (reserve) {
if (bit < 0) {
}
}
}
- if (bit < 0) {
- return -1;
+ if (bit < 0) { /* ran out of irqs. Have to share. This will be rare. */
+ min_shared = 256;
+ for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) {
+ /* Share with the same device class */
+ if (irqpdaindr->current->vendor == irqpdaindr->device_dev[i]->vendor &&
+ irqpdaindr->current->device == irqpdaindr->device_dev[i]->device &&
+ irqpdaindr->share_count[i] < min_shared) {
+ min_shared = irqpdaindr->share_count[i];
+ bit = i;
+ }
+ }
+ min_shared = 256;
+ if (bit < 0) { /* didn't find a matching device, just pick one. This will be */
+ /* exceptionally rare. */
+ for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) {
+ if (irqpdaindr->share_count[i] < min_shared) {
+ min_shared = irqpdaindr->share_count[i];
+ bit = i;
+ }
+ }
+ }
+ irqpdaindr->share_count[bit]++;
+ }
+ if (irqs->irq_flags[bit] & SN2_IRQ_SHARED) {
+ irqs->irq_flags[bit] |= SN2_IRQ_RESERVED;
+ return bit;
}
if (irqs->irq_flags[bit] & SN2_IRQ_RESERVED) {
return -1;
intr_reserve_level(cpuid_t cpu,
int bit,
int resflags,
- devfs_handle_t owner_dev,
+ vertex_hdl_t owner_dev,
char *name)
{
return(do_intr_reserve_level(cpu, bit, 1));
int bit,
int connect)
{
- irqpda_t *irqs = irqpdaindr[cpu];
+ irqpda_t *irqs = irqpdaindr;
if (connect) {
+ if (irqs->irq_flags[bit] & SN2_IRQ_SHARED) {
+ irqs->irq_flags[bit] |= SN2_IRQ_CONNECTED;
+ return bit;
+ }
if (irqs->irq_flags[bit] & SN2_IRQ_CONNECTED) {
return -1;
} else {
int slice, min_count = 1000;
irqpda_t *irqs;
- for (slice = 0; slice < CPUS_PER_NODE; slice++) {
+ for (slice = CPUS_PER_NODE - 1; slice >= 0; slice--) {
int intrs;
cpu = cnode_slice_to_cpuid(cnode, slice);
- if (cpu == CPU_NONE) {
+ if (cpu == num_online_cpus()) {
continue;
}
- if (!cpu_enabled(cpu)) {
+ if (!cpu_online(cpu)) {
continue;
}
- irqs = irqpdaindr[cpu];
+ irqs = irqpdaindr;
intrs = irqs->num_irq_used;
if (min_count > intrs) {
min_count = intrs;
best_cpu = cpu;
+ if ( enable_shub_wars_1_1() ) {
+ /* Rather than finding the best cpu, always return the first cpu*/
+ /* This forces all interrupts to the same cpu */
+ break;
+ }
}
}
return best_cpu;
cnodeid_t cnode,
int req_bit,
int resflags,
- devfs_handle_t owner_dev,
+ vertex_hdl_t owner_dev,
char *name,
int *resp_bit)
{
// Find the node to assign for this interrupt.
cpuid_t
-intr_heuristic(devfs_handle_t dev,
+intr_heuristic(vertex_hdl_t dev,
device_desc_t dev_desc,
int req_bit,
int resflags,
- devfs_handle_t owner_dev,
+ vertex_hdl_t owner_dev,
char *name,
int *resp_bit)
{
cpuid_t cpuid;
cpuid_t candidate = CPU_NONE;
cnodeid_t candidate_node;
- devfs_handle_t pconn_vhdl;
+ vertex_hdl_t pconn_vhdl;
pcibr_soft_t pcibr_soft;
int bit;
if (candidate != CPU_NONE) {
printk("Cannot target interrupt to target node (%ld).\n",candidate);
return CPU_NONE; } else {
- printk("Cannot target interrupt to closest node (%d) 0x%p\n",
- master_node_get(dev), (void *)owner_dev);
+ /* printk("Cannot target interrupt to closest node (%d) 0x%p\n",
+ master_node_get(dev), (void *)owner_dev); */
}
// We couldn't put it on the closest node. Try to find another one.
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/klconfig.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/xtalk/xtalk.h>
/* At most 2 hubs can be connected to an xswitch */
#define NUM_XSWITCH_VOLUNTEER 2
-extern unsigned char Is_pic_on_this_nasid[512];
-
/*
* Track which hubs have volunteered to manage devices hanging off of
* a Crosstalk Switch (e.g. xbow). This structure is allocated,
typedef struct xswitch_vol_s {
mutex_t xswitch_volunteer_mutex;
int xswitch_volunteer_count;
- devfs_handle_t xswitch_volunteer[NUM_XSWITCH_VOLUNTEER];
+ vertex_hdl_t xswitch_volunteer[NUM_XSWITCH_VOLUNTEER];
} *xswitch_vol_t;
void
-xswitch_vertex_init(devfs_handle_t xswitch)
+xswitch_vertex_init(vertex_hdl_t xswitch)
{
xswitch_vol_t xvolinfo;
int rc;
* xswitch volunteer structure hanging around. Destroy it.
*/
static void
-xswitch_volunteer_delete(devfs_handle_t xswitch)
+xswitch_volunteer_delete(vertex_hdl_t xswitch)
{
xswitch_vol_t xvolinfo;
int rc;
*/
/* ARGSUSED */
static void
-volunteer_for_widgets(devfs_handle_t xswitch, devfs_handle_t master)
+volunteer_for_widgets(vertex_hdl_t xswitch, vertex_hdl_t master)
{
xswitch_vol_t xvolinfo = NULL;
- devfs_handle_t hubv;
+ vertex_hdl_t hubv;
hubinfo_t hubinfo;
(void)hwgraph_info_get_LBL(xswitch,
*/
/* ARGSUSED */
static void
-assign_widgets_to_volunteers(devfs_handle_t xswitch, devfs_handle_t hubv)
+assign_widgets_to_volunteers(vertex_hdl_t xswitch, vertex_hdl_t hubv)
{
xswitch_info_t xswitch_info;
xswitch_vol_t xvolinfo = NULL;
bt = iobrick_type_get_nasid(nasid);
if (bt >= 0) {
- /*
- * PXBRICK has two busses per widget so this
- * algorithm wouldn't work (all busses would
- * be assigned to one volunteer). Change the
- * bricktype to PBRICK whose mapping is setup
- * suchthat 2 of the PICs will be assigned to
- * one volunteer and the other one will be
- * assigned to the other volunteer.
- */
- if (bt == MODULE_PXBRICK)
- bt = MODULE_PBRICK;
-
i = io_brick_map_widget(bt, widgetnum) & 1;
}
}
DBG("iograph_early_init: Found board 0x%p\n", board);
}
}
-
- hubio_init();
}
/*
* hwid for our use.
*/
static void
-early_probe_for_widget(devfs_handle_t hubv, xwidget_hwid_t hwid)
+early_probe_for_widget(vertex_hdl_t hubv, xwidget_hwid_t hwid)
{
hubreg_t llp_csr_reg;
nasid_t nasid;
* added as inventory information.
*/
static void
-xwidget_inventory_add(devfs_handle_t widgetv,
+xwidget_inventory_add(vertex_hdl_t widgetv,
lboard_t *board,
struct xwidget_hwid_s hwid)
{
*/
void
-io_xswitch_widget_init(devfs_handle_t xswitchv,
- devfs_handle_t hubv,
- xwidgetnum_t widgetnum,
- async_attach_t aa)
+io_xswitch_widget_init(vertex_hdl_t xswitchv,
+ vertex_hdl_t hubv,
+ xwidgetnum_t widgetnum)
{
xswitch_info_t xswitch_info;
xwidgetnum_t hub_widgetid;
- devfs_handle_t widgetv;
+ vertex_hdl_t widgetv;
cnodeid_t cnode;
widgetreg_t widget_id;
nasid_t nasid, peer_nasid;
char name[4];
lboard_t dummy;
+
/*
* If the current hub is not supposed to be the master
* for this widgetnum, then skip this widget.
memset(buffer, 0, 16);
format_module_id(buffer, geo_module(board->brd_geoid), MODULE_FORMAT_BRIEF);
- sprintf(pathname, EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d" "/%cbrick" "/%s/%d",
+
+ sprintf(pathname, EDGE_LBL_MODULE "/%s/" EDGE_LBL_SLAB "/%d" "/%s" "/%s/%d",
buffer,
geo_slab(board->brd_geoid),
- (board->brd_type == KLTYPE_IBRICK) ? 'I' :
- (board->brd_type == KLTYPE_PBRICK) ? 'P' :
- (board->brd_type == KLTYPE_XBRICK) ? 'X' : '?',
+ (board->brd_type == KLTYPE_IBRICK) ? EDGE_LBL_IBRICK :
+ (board->brd_type == KLTYPE_PBRICK) ? EDGE_LBL_PBRICK :
+ (board->brd_type == KLTYPE_PXBRICK) ? EDGE_LBL_PXBRICK :
+ (board->brd_type == KLTYPE_IXBRICK) ? EDGE_LBL_IXBRICK :
+ (board->brd_type == KLTYPE_XBRICK) ? EDGE_LBL_XBRICK : "?brick",
EDGE_LBL_XTALK, widgetnum);
DBG("io_xswitch_widget_init: path= %s\n", pathname);
xwidget_inventory_add(widgetv,board,hwid);
(void)xwidget_register(&hwid, widgetv, widgetnum,
- hubv, hub_widgetid,
- aa);
+ hubv, hub_widgetid);
ia64_sn_sysctl_iobrick_module_get(nasid, &io_module);
if (io_module >= 0) {
char buffer[16];
- devfs_handle_t to, from;
+ vertex_hdl_t to, from;
+ char *brick_name;
+ extern char *iobrick_L1bricktype_to_name(int type);
+
memset(buffer, 0, 16);
format_module_id(buffer, geo_module(board->brd_geoid), MODULE_FORMAT_BRIEF);
- bt = toupper(MODULE_GET_BTCHAR(io_module));
+ if ( islower(MODULE_GET_BTCHAR(io_module)) ) {
+ bt = toupper(MODULE_GET_BTCHAR(io_module));
+ }
+ else {
+ bt = MODULE_GET_BTCHAR(io_module);
+ }
+
+ brick_name = iobrick_L1bricktype_to_name(bt);
+
/* Add a helper vertex so xbow monitoring
* can identify the brick type. It's simply
* an edge from the widget 0 vertex to the
* brick vertex.
*/
- sprintf(pathname, "/dev/hw/" EDGE_LBL_MODULE "/%s/"
+ sprintf(pathname, EDGE_LBL_HW "/" EDGE_LBL_MODULE "/%s/"
EDGE_LBL_SLAB "/%d/"
EDGE_LBL_NODE "/" EDGE_LBL_XTALK "/"
"0",
buffer, geo_slab(board->brd_geoid));
from = hwgraph_path_to_vertex(pathname);
ASSERT_ALWAYS(from);
- sprintf(pathname, "/dev/hw/" EDGE_LBL_MODULE "/%s/"
+ sprintf(pathname, EDGE_LBL_HW "/" EDGE_LBL_MODULE "/%s/"
EDGE_LBL_SLAB "/%d/"
- "%cbrick",
- buffer, geo_slab(board->brd_geoid), bt);
+ "%s",
+ buffer, geo_slab(board->brd_geoid), brick_name);
to = hwgraph_path_to_vertex(pathname);
ASSERT_ALWAYS(to);
static void
-io_init_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnode)
+io_init_xswitch_widgets(vertex_hdl_t xswitchv, cnodeid_t cnode)
{
xwidgetnum_t widgetnum;
- async_attach_t aa;
-
- aa = async_attach_new();
DBG("io_init_xswitch_widgets: xswitchv 0x%p for cnode %d\n", xswitchv, cnode);
widgetnum++) {
io_xswitch_widget_init(xswitchv,
cnodeid_to_vertex(cnode),
- widgetnum, aa);
+ widgetnum);
}
- /*
- * Wait for parallel attach threads, if any, to complete.
- */
- async_attach_waitall(aa);
- async_attach_free(aa);
}
/*
* graph and risking hangs.
*/
static void
-io_link_xswitch_widgets(devfs_handle_t xswitchv, cnodeid_t cnodeid)
+io_link_xswitch_widgets(vertex_hdl_t xswitchv, cnodeid_t cnodeid)
{
xwidgetnum_t widgetnum;
char pathname[128];
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
nasid_t nasid, peer_nasid;
lboard_t *board;
return;
}
- if ( Is_pic_on_this_nasid[nasid] ) {
- /* Check both buses */
- sprintf(pathname, "%d/"EDGE_LBL_PCIX_0, widgetnum);
- if (hwgraph_traverse(xswitchv, pathname, &vhdl) == GRAPH_SUCCESS)
- board->brd_graph_link = vhdl;
- else {
- sprintf(pathname, "%d/"EDGE_LBL_PCIX_1, widgetnum);
- if (hwgraph_traverse(xswitchv, pathname, &vhdl) == GRAPH_SUCCESS)
- board->brd_graph_link = vhdl;
- else
- board->brd_graph_link = GRAPH_VERTEX_NONE;
- }
- }
+ /* Check both buses */
+ sprintf(pathname, "%d/"EDGE_LBL_PCIX_0, widgetnum);
+ if (hwgraph_traverse(xswitchv, pathname, &vhdl) == GRAPH_SUCCESS)
+ board->brd_graph_link = vhdl;
else {
- sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
+ sprintf(pathname, "%d/"EDGE_LBL_PCIX_1, widgetnum);
if (hwgraph_traverse(xswitchv, pathname, &vhdl) == GRAPH_SUCCESS)
board->brd_graph_link = vhdl;
else
io_init_node(cnodeid_t cnodeid)
{
/*REFERENCED*/
- devfs_handle_t hubv, switchv, widgetv;
+ vertex_hdl_t hubv, switchv, widgetv;
struct xwidget_hwid_s hwid;
hubinfo_t hubinfo;
int is_xswitch;
nodepda_t *npdap;
struct semaphore *peer_sema = 0;
uint32_t widget_partnum;
- nodepda_router_info_t *npda_rip;
cpu_cookie_t c = 0;
- extern int hubdev_docallouts(devfs_handle_t);
npdap = NODEPDA(cnodeid);
ASSERT(hubv != GRAPH_VERTEX_NONE);
- hubdev_docallouts(hubv);
-
- /*
- * Set up the dependent routers if we have any.
- */
- npda_rip = npdap->npda_rip_first;
-
- while(npda_rip) {
- /* If the router info has not been initialized
- * then we need to do the router initialization
- */
- if (!npda_rip->router_infop) {
- router_init(cnodeid,0,npda_rip);
- }
- npda_rip = npda_rip->router_next;
- }
-
/*
* Read mfg info on this hub
*/
*/
hubinfo_get(hubv, &hubinfo);
- (void)xwidget_register(&hwid, widgetv, npdap->basew_id, hubv, hubinfo->h_widgetid, NULL);
+ (void)xwidget_register(&hwid, widgetv, npdap->basew_id, hubv, hubinfo->h_widgetid);
if (!is_xswitch) {
/* io_init_done takes cpu cookie as 2nd argument
* XXX Irix legacy..controller numbering should be part of devfsd's job
*/
int num_base_io_scsi_ctlr = 2; /* used by syssgi */
-devfs_handle_t base_io_scsi_ctlr_vhdl[NUM_BASE_IO_SCSI_CTLR];
-static devfs_handle_t baseio_enet_vhdl,baseio_console_vhdl;
-
-/*
- * Put the logical controller number information in the
- * scsi controller vertices for each scsi controller that
- * is in a "fixed position".
- */
-static void
-scsi_ctlr_nums_add(devfs_handle_t pci_vhdl)
-{
- {
- int i;
-
- num_base_io_scsi_ctlr = NUM_BASE_IO_SCSI_CTLR;
-
- /* Initialize base_io_scsi_ctlr_vhdl array */
- for (i=0; i<num_base_io_scsi_ctlr; i++)
- base_io_scsi_ctlr_vhdl[i] = GRAPH_VERTEX_NONE;
- }
- {
- /*
- * May want to consider changing the SN0 code, above, to work more like
- * the way this works.
- */
- devfs_handle_t base_ibrick_xbridge_vhdl;
- devfs_handle_t base_ibrick_xtalk_widget_vhdl;
- devfs_handle_t scsi_ctlr_vhdl;
- int i;
- graph_error_t rv;
-
- /*
- * This is a table of "well-known" SCSI controllers and their well-known
- * controller numbers. The names in the table start from the base IBrick's
- * Xbridge vertex, so the first component is the xtalk widget number.
- */
- static struct {
- char *base_ibrick_scsi_path;
- int controller_number;
- } hardwired_scsi_controllers[] = {
- {"15/" EDGE_LBL_PCI "/1/" EDGE_LBL_SCSI_CTLR "/0", 0},
- {"15/" EDGE_LBL_PCI "/2/" EDGE_LBL_SCSI_CTLR "/0", 1},
- {"15/" EDGE_LBL_PCI "/3/" EDGE_LBL_SCSI_CTLR "/0", 2},
- {"14/" EDGE_LBL_PCI "/1/" EDGE_LBL_SCSI_CTLR "/0", 3},
- {"14/" EDGE_LBL_PCI "/2/" EDGE_LBL_SCSI_CTLR "/0", 4},
- {"15/" EDGE_LBL_PCI "/6/ohci/0/" EDGE_LBL_SCSI_CTLR "/0", 5},
- {NULL, -1} /* must be last */
- };
-
- base_ibrick_xtalk_widget_vhdl = hwgraph_connectpt_get(pci_vhdl);
- ASSERT_ALWAYS(base_ibrick_xtalk_widget_vhdl != GRAPH_VERTEX_NONE);
-
- base_ibrick_xbridge_vhdl = hwgraph_connectpt_get(base_ibrick_xtalk_widget_vhdl);
- ASSERT_ALWAYS(base_ibrick_xbridge_vhdl != GRAPH_VERTEX_NONE);
- hwgraph_vertex_unref(base_ibrick_xtalk_widget_vhdl);
-
- /*
- * Iterate through the list of well-known SCSI controllers.
- * For each controller found, set it's controller number according
- * to the table.
- */
- for (i=0; hardwired_scsi_controllers[i].base_ibrick_scsi_path != NULL; i++) {
- rv = hwgraph_path_lookup(base_ibrick_xbridge_vhdl,
- hardwired_scsi_controllers[i].base_ibrick_scsi_path, &scsi_ctlr_vhdl, NULL);
-
- if (rv != GRAPH_SUCCESS) /* No SCSI at this path */
- continue;
-
- ASSERT(hardwired_scsi_controllers[i].controller_number < NUM_BASE_IO_SCSI_CTLR);
- base_io_scsi_ctlr_vhdl[hardwired_scsi_controllers[i].controller_number] = scsi_ctlr_vhdl;
- device_controller_num_set(scsi_ctlr_vhdl, hardwired_scsi_controllers[i].controller_number);
- hwgraph_vertex_unref(scsi_ctlr_vhdl); /* (even though we're actually keeping a reference) */
- }
-
- hwgraph_vertex_unref(base_ibrick_xbridge_vhdl);
- }
-}
-
+vertex_hdl_t base_io_scsi_ctlr_vhdl[NUM_BASE_IO_SCSI_CTLR];
#include <asm/sn/ioerror_handling.h>
-devfs_handle_t sys_critical_graph_root = GRAPH_VERTEX_NONE;
-
-/* Define the system critical vertices and connect them through
- * a canonical parent-child relationships for easy traversal
- * during io error handling.
- */
-static void
-sys_critical_graph_init(void)
-{
- devfs_handle_t bridge_vhdl,master_node_vhdl;
- devfs_handle_t xbow_vhdl = GRAPH_VERTEX_NONE;
- extern devfs_handle_t hwgraph_root;
- devfs_handle_t pci_slot_conn;
- int slot;
- devfs_handle_t baseio_console_conn;
-
- DBG("sys_critical_graph_init: FIXME.\n");
- baseio_console_conn = hwgraph_connectpt_get(baseio_console_vhdl);
-
- if (baseio_console_conn == NULL) {
- return;
- }
-
- /* Get the vertex handle for the baseio bridge */
- bridge_vhdl = device_master_get(baseio_console_conn);
-
- /* Get the master node of the baseio card */
- master_node_vhdl = cnodeid_to_vertex(
- master_node_get(baseio_console_vhdl));
-
- /* Add the "root->node" part of the system critical graph */
-
- sys_critical_graph_vertex_add(hwgraph_root,master_node_vhdl);
-
- /* Check if we have a crossbow */
- if (hwgraph_traverse(master_node_vhdl,
- EDGE_LBL_XTALK"/0",
- &xbow_vhdl) == GRAPH_SUCCESS) {
- /* We have a crossbow.Add "node->xbow" part of the system
- * critical graph.
- */
- sys_critical_graph_vertex_add(master_node_vhdl,xbow_vhdl);
-
- /* Add "xbow->baseio bridge" of the system critical graph */
- sys_critical_graph_vertex_add(xbow_vhdl,bridge_vhdl);
-
- hwgraph_vertex_unref(xbow_vhdl);
- } else
- /* We donot have a crossbow. Add "node->baseio_bridge"
- * part of the system critical graph.
- */
- sys_critical_graph_vertex_add(master_node_vhdl,bridge_vhdl);
-
- /* Add all the populated PCI slot vertices to the system critical
- * graph with the bridge vertex as the parent.
- */
- for (slot = 0 ; slot < 8; slot++) {
- char slot_edge[10];
-
- sprintf(slot_edge,"%d",slot);
- if (hwgraph_traverse(bridge_vhdl,slot_edge, &pci_slot_conn)
- != GRAPH_SUCCESS)
- continue;
- sys_critical_graph_vertex_add(bridge_vhdl,pci_slot_conn);
- hwgraph_vertex_unref(pci_slot_conn);
- }
-
- hwgraph_vertex_unref(bridge_vhdl);
-
- /* Add the "ioc3 pci connection point -> console ioc3" part
- * of the system critical graph
- */
-
- if (hwgraph_traverse(baseio_console_vhdl,"..",&pci_slot_conn) ==
- GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- baseio_console_vhdl);
- hwgraph_vertex_unref(pci_slot_conn);
- }
-
- /* Add the "ethernet pci connection point -> base ethernet" part of
- * the system critical graph
- */
- if (hwgraph_traverse(baseio_enet_vhdl,"..",&pci_slot_conn) ==
- GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- baseio_enet_vhdl);
- hwgraph_vertex_unref(pci_slot_conn);
- }
-
- /* Add the "scsi controller pci connection point -> base scsi
- * controller" part of the system critical graph
- */
- if (hwgraph_traverse(base_io_scsi_ctlr_vhdl[0],
- "../..",&pci_slot_conn) == GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- base_io_scsi_ctlr_vhdl[0]);
- hwgraph_vertex_unref(pci_slot_conn);
- }
- if (hwgraph_traverse(base_io_scsi_ctlr_vhdl[1],
- "../..",&pci_slot_conn) == GRAPH_SUCCESS) {
- sys_critical_graph_vertex_add(pci_slot_conn,
- base_io_scsi_ctlr_vhdl[1]);
- hwgraph_vertex_unref(pci_slot_conn);
- }
- hwgraph_vertex_unref(baseio_console_conn);
-
-}
-
-static void
-baseio_ctlr_num_set(void)
-{
- char name[MAXDEVNAME];
- devfs_handle_t console_vhdl, pci_vhdl, enet_vhdl;
- devfs_handle_t ioc3_console_vhdl_get(void);
-
-
- DBG("baseio_ctlr_num_set; FIXME\n");
- console_vhdl = ioc3_console_vhdl_get();
- if (console_vhdl == GRAPH_VERTEX_NONE)
- return;
- /* Useful for setting up the system critical graph */
- baseio_console_vhdl = console_vhdl;
-
- vertex_to_name(console_vhdl,name,MAXDEVNAME);
-
- strcat(name,__DEVSTR1);
- pci_vhdl = hwgraph_path_to_vertex(name);
- scsi_ctlr_nums_add(pci_vhdl);
- /* Unref the pci_vhdl due to the reference by hwgraph_path_to_vertex
- */
- hwgraph_vertex_unref(pci_vhdl);
-
- vertex_to_name(console_vhdl, name, MAXDEVNAME);
- strcat(name, __DEVSTR4);
- enet_vhdl = hwgraph_path_to_vertex(name);
-
- /* Useful for setting up the system critical graph */
- baseio_enet_vhdl = enet_vhdl;
-
- device_controller_num_set(enet_vhdl, 0);
- /* Unref the enet_vhdl due to the reference by hwgraph_path_to_vertex
- */
- hwgraph_vertex_unref(enet_vhdl);
-}
/* #endif */
/*
*/
update_node_information(cnodeid);
- baseio_ctlr_num_set();
- /* Setup the system critical graph (which is a subgraph of the
- * main hwgraph). This information is useful during io error
- * handling.
- */
- sys_critical_graph_init();
-
#if HWG_PRINT
hwgraph_print();
#endif
}
},
+/* IXbrick widget number to PCI bus number map */
+ { MODULE_IXBRICK, /* IXbrick type */
+ /* PCI Bus # Widget # */
+ { 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
+ 0, /* 0x8 */
+ 0, /* 0x9 */
+ 0, 0, /* 0xa - 0xb */
+ 1, /* 0xc */
+ 5, /* 0xd */
+ 0, /* 0xe */
+ 3 /* 0xf */
+ }
+ },
+
/* Xbrick widget to XIO slot map */
{ MODULE_XBRICK, /* Xbrick type */
/* XIO Slot # Widget # */
return 0;
}
-
-/*
- * Use the device's vertex to map the device's widget to a meaningful int
- */
-int
-io_path_map_widget(devfs_handle_t vertex)
-{
- char hw_path_name[MAXDEVNAME];
- char *wp, *bp, *sp = NULL;
- int widget_num;
- long atoi(char *);
- int hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen);
-
-
- /* Get the full path name of the vertex */
- if (GRAPH_SUCCESS != hwgraph_vertex_name_get(vertex, hw_path_name,
- MAXDEVNAME))
- return 0;
-
- /* Find the widget number in the path name */
- wp = strstr(hw_path_name, "/"EDGE_LBL_XTALK"/");
- if (wp == NULL)
- return 0;
- widget_num = atoi(wp+7);
- if (widget_num < XBOW_PORT_8 || widget_num > XBOW_PORT_F)
- return 0;
-
- /* Find "brick" in the path name */
- bp = strstr(hw_path_name, "brick");
- if (bp == NULL)
- return 0;
-
- /* Find preceding slash */
- sp = bp;
- while (sp > hw_path_name) {
- sp--;
- if (*sp == '/')
- break;
- }
-
- /* Invalid if no preceding slash */
- if (!sp)
- return 0;
-
- /* Bump slash pointer to "brick" prefix */
- sp++;
- /*
- * Verify "brick" prefix length; valid exaples:
- * 'I' from "/Ibrick"
- * 'P' from "/Pbrick"
- * 'X' from "/Xbrick"
- */
- if ((bp - sp) != 1)
- return 0;
-
- return (io_brick_map_widget((int)*sp, widget_num));
-
-}
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/klconfig.h>
-#include <asm/sn/sn1/hubdev.h>
#include <asm/sn/module.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/xtalk/xswitch.h>
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <asm/sn/types.h>
-#include <asm/sn/hack.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/iograph.h>
-#include <asm/param.h>
-#include <asm/sn/pio.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/xtalk/xtalkaddrs.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/pci/pci_bus_cvlink.h>
-#include <asm/sn/simulator.h>
-#include <asm/sn/sn_cpuid.h>
-
-extern int bridge_rev_b_data_check_disable;
-
-devfs_handle_t busnum_to_pcibr_vhdl[MAX_PCI_XWIDGET];
-nasid_t busnum_to_nid[MAX_PCI_XWIDGET];
-void * busnum_to_atedmamaps[MAX_PCI_XWIDGET];
-unsigned char num_bridges;
-static int done_probing = 0;
-
-static int pci_bus_map_create(devfs_handle_t xtalk, char * io_moduleid);
-devfs_handle_t devfn_to_vertex(unsigned char busnum, unsigned int devfn);
-
-extern unsigned char Is_pic_on_this_nasid[512];
-
-extern void sn_init_irq_desc(void);
-extern void register_pcibr_intr(int irq, pcibr_intr_t intr);
-
-
-/*
- * For the given device, initialize whether it is a PIC device.
- */
-static void
-set_isPIC(struct sn_device_sysdata *device_sysdata)
-{
- pciio_info_t pciio_info = pciio_info_get(device_sysdata->vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-
- device_sysdata->isPIC = IS_PIC_SOFT(pcibr_soft);;
-}
-
-/*
- * pci_bus_cvlink_init() - To be called once during initialization before
- * SGI IO Infrastructure init is called.
- */
-void
-pci_bus_cvlink_init(void)
-{
-
- extern void ioconfig_bus_init(void);
-
- memset(busnum_to_pcibr_vhdl, 0x0, sizeof(devfs_handle_t) * MAX_PCI_XWIDGET);
- memset(busnum_to_nid, 0x0, sizeof(nasid_t) * MAX_PCI_XWIDGET);
-
- memset(busnum_to_atedmamaps, 0x0, sizeof(void *) * MAX_PCI_XWIDGET);
-
- num_bridges = 0;
-
- ioconfig_bus_init();
-}
-
-/*
- * pci_bus_to_vertex() - Given a logical Linux Bus Number returns the associated
- * pci bus vertex from the SGI IO Infrastructure.
- */
-devfs_handle_t
-pci_bus_to_vertex(unsigned char busnum)
-{
-
- devfs_handle_t pci_bus = NULL;
-
-
- /*
- * First get the xwidget vertex.
- */
- pci_bus = busnum_to_pcibr_vhdl[busnum];
- return(pci_bus);
-}
-
-/*
- * devfn_to_vertex() - returns the vertex of the device given the bus, slot,
- * and function numbers.
- */
-devfs_handle_t
-devfn_to_vertex(unsigned char busnum, unsigned int devfn)
-{
-
- int slot = 0;
- int func = 0;
- char name[16];
- devfs_handle_t pci_bus = NULL;
- devfs_handle_t device_vertex = (devfs_handle_t)NULL;
-
- /*
- * Go get the pci bus vertex.
- */
- pci_bus = pci_bus_to_vertex(busnum);
- if (!pci_bus) {
- /*
- * During probing, the Linux pci code invents non existant
- * bus numbers and pci_dev structures and tries to access
- * them to determine existance. Don't crib during probing.
- */
- if (done_probing)
- printk("devfn_to_vertex: Invalid bus number %d given.\n", busnum);
- return(NULL);
- }
-
-
- /*
- * Go get the slot&function vertex.
- * Should call pciio_slot_func_to_name() when ready.
- */
- slot = PCI_SLOT(devfn);
- func = PCI_FUNC(devfn);
-
- /*
- * For a NON Multi-function card the name of the device looks like:
- * ../pci/1, ../pci/2 ..
- */
- if (func == 0) {
- sprintf(name, "%d", slot);
- if (hwgraph_traverse(pci_bus, name, &device_vertex) ==
- GRAPH_SUCCESS) {
- if (device_vertex) {
- return(device_vertex);
- }
- }
- }
-
- /*
- * This maybe a multifunction card. It's names look like:
- * ../pci/1a, ../pci/1b, etc.
- */
- sprintf(name, "%d%c", slot, 'a'+func);
- if (hwgraph_traverse(pci_bus, name, &device_vertex) != GRAPH_SUCCESS) {
- if (!device_vertex) {
- return(NULL);
- }
- }
-
- return(device_vertex);
-}
-
-/*
- * For the given device, initialize the addresses for both the Device(x) Flush
- * Write Buffer register and the Xbow Flush Register for the port the PCI bus
- * is connected.
- */
-static void
-set_flush_addresses(struct pci_dev *device_dev,
- struct sn_device_sysdata *device_sysdata)
-{
- pciio_info_t pciio_info = pciio_info_get(device_sysdata->vhdl);
- pciio_slot_t pciio_slot = pciio_info_slot_get(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- bridge_t *bridge = pcibr_soft->bs_base;
- nasid_t nasid;
-
- /*
- * Get the nasid from the bridge.
- */
- nasid = NASID_GET(device_sysdata->dma_buf_sync);
- if (IS_PIC_DEVICE(device_dev)) {
- device_sysdata->dma_buf_sync = (volatile unsigned int *)
- &bridge->b_wr_req_buf[pciio_slot].reg;
- device_sysdata->xbow_buf_sync = (volatile unsigned int *)
- XBOW_PRIO_LINKREGS_PTR(NODE_SWIN_BASE(nasid, 0),
- pcibr_soft->bs_xid);
- } else {
- /*
- * Accessing Xbridge and Xbow register when SHUB swapoper is on!.
- */
- device_sysdata->dma_buf_sync = (volatile unsigned int *)
- ((uint64_t)&(bridge->b_wr_req_buf[pciio_slot].reg)^4);
- device_sysdata->xbow_buf_sync = (volatile unsigned int *)
- ((uint64_t)(XBOW_PRIO_LINKREGS_PTR(
- NODE_SWIN_BASE(nasid, 0), pcibr_soft->bs_xid)) ^ 4);
- }
-
-#ifdef DEBUG
- printk("set_flush_addresses: dma_buf_sync %p xbow_buf_sync %p\n",
- device_sysdata->dma_buf_sync, device_sysdata->xbow_buf_sync);
-
-printk("set_flush_addresses: dma_buf_sync\n");
- while((volatile unsigned int )*device_sysdata->dma_buf_sync);
-printk("set_flush_addresses: xbow_buf_sync\n");
- while((volatile unsigned int )*device_sysdata->xbow_buf_sync);
-#endif
-
-}
-
-/*
- * Most drivers currently do not properly tell the arch specific pci dma
- * interfaces whether they can handle A64. Here is where we privately
- * keep track of this.
- */
-static void __init
-set_sn_pci64(struct pci_dev *dev)
-{
- unsigned short vendor = dev->vendor;
- unsigned short device = dev->device;
-
- if (vendor == PCI_VENDOR_ID_QLOGIC) {
- if ((device == PCI_DEVICE_ID_QLOGIC_ISP2100) ||
- (device == PCI_DEVICE_ID_QLOGIC_ISP2200)) {
- SET_PCIA64(dev);
- return;
- }
- }
-
- if (vendor == PCI_VENDOR_ID_SGI) {
- if (device == PCI_DEVICE_ID_SGI_IOC3) {
- SET_PCIA64(dev);
- return;
- }
- }
-
-}
-
-/*
- * sn_pci_fixup() - This routine is called when platform_pci_fixup() is
- * invoked at the end of pcibios_init() to link the Linux pci
- * infrastructure to SGI IO Infrasturcture - ia64/kernel/pci.c
- *
- * Other platform specific fixup can also be done here.
- */
-void
-sn_pci_fixup(int arg)
-{
- struct list_head *ln;
- struct pci_bus *pci_bus = NULL;
- struct pci_dev *device_dev = NULL;
- struct sn_widget_sysdata *widget_sysdata;
- struct sn_device_sysdata *device_sysdata;
- pciio_intr_t intr_handle;
- int cpuid, bit;
- devfs_handle_t device_vertex;
- pciio_intr_line_t lines;
- extern void sn_pci_find_bios(void);
- extern int numnodes;
- int cnode;
- extern void io_sh_swapper(int, int);
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- if ( !Is_pic_on_this_nasid[cnodeid_to_nasid(cnode)] )
- io_sh_swapper((cnodeid_to_nasid(cnode)), 0);
- }
-
- if (arg == 0) {
-#ifdef CONFIG_PROC_FS
- extern void register_sn_procfs(void);
-#endif
-
- sn_init_irq_desc();
- sn_pci_find_bios();
- for (cnode = 0; cnode < numnodes; cnode++) {
- extern void intr_init_vecblk(nodepda_t *npda, cnodeid_t, int);
- intr_init_vecblk(NODEPDA(cnode), cnode, 0);
- }
-
- /*
- * When we return to generic Linux, Swapper is always on ..
- */
- for (cnode = 0; cnode < numnodes; cnode++) {
- if ( !Is_pic_on_this_nasid[cnodeid_to_nasid(cnode)] )
- io_sh_swapper((cnodeid_to_nasid(cnode)), 1);
- }
-#ifdef CONFIG_PROC_FS
- register_sn_procfs();
-#endif
- return;
- }
-
-
- done_probing = 1;
-
- /*
- * Initialize the pci bus vertex in the pci_bus struct.
- */
- for( ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
- pci_bus = pci_bus_b(ln);
- widget_sysdata = kmalloc(sizeof(struct sn_widget_sysdata),
- GFP_KERNEL);
- widget_sysdata->vhdl = pci_bus_to_vertex(pci_bus->number);
- pci_bus->sysdata = (void *)widget_sysdata;
- }
-
- /*
- * set the root start and end so that drivers calling check_region()
- * won't see a conflict
- */
- ioport_resource.start = 0xc000000000000000;
- ioport_resource.end = 0xcfffffffffffffff;
-
- /*
- * Initialize the device vertex in the pci_dev struct.
- */
- while ((device_dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, device_dev)) != NULL) {
- unsigned int irq;
- int idx;
- u16 cmd;
- devfs_handle_t vhdl;
- unsigned long size;
- extern int bit_pos_to_irq(int);
-
- if (device_dev->vendor == PCI_VENDOR_ID_SGI &&
- device_dev->device == PCI_DEVICE_ID_SGI_IOC3) {
- extern void pci_fixup_ioc3(struct pci_dev *d);
- pci_fixup_ioc3(device_dev);
- }
-
- /* Set the device vertex */
-
- device_sysdata = kmalloc(sizeof(struct sn_device_sysdata),
- GFP_KERNEL);
- device_sysdata->vhdl = devfn_to_vertex(device_dev->bus->number, device_dev->devfn);
- device_sysdata->isa64 = 0;
- /*
- * Set the xbridge Device(X) Write Buffer Flush and Xbow Flush
- * register addresses.
- */
- (void) set_flush_addresses(device_dev, device_sysdata);
-
- device_dev->sysdata = (void *) device_sysdata;
- set_sn_pci64(device_dev);
- set_isPIC(device_sysdata);
-
- pci_read_config_word(device_dev, PCI_COMMAND, &cmd);
-
- /*
- * Set the resources address correctly. The assumption here
- * is that the addresses in the resource structure has been
- * read from the card and it was set in the card by our
- * Infrastructure ..
- */
- vhdl = device_sysdata->vhdl;
- for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
- size = 0;
- size = device_dev->resource[idx].end -
- device_dev->resource[idx].start;
- if (size) {
- device_dev->resource[idx].start = (unsigned long)pciio_pio_addr(vhdl, 0, PCIIO_SPACE_WIN(idx), 0, size, 0, (IS_PIC_DEVICE(device_dev)) ? 0 : PCIIO_BYTE_STREAM);
- device_dev->resource[idx].start |= __IA64_UNCACHED_OFFSET;
- }
- else
- continue;
-
- device_dev->resource[idx].end =
- device_dev->resource[idx].start + size;
-
- if (device_dev->resource[idx].flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
-
- if (device_dev->resource[idx].flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
-#if 0
- /*
- * Software WAR for a Software BUG.
- * This is only temporary.
- * See PV 872791
- */
-
- /*
- * Now handle the ROM resource ..
- */
- size = device_dev->resource[PCI_ROM_RESOURCE].end -
- device_dev->resource[PCI_ROM_RESOURCE].start;
-
- if (size) {
- device_dev->resource[PCI_ROM_RESOURCE].start =
- (unsigned long) pciio_pio_addr(vhdl, 0, PCIIO_SPACE_ROM, 0,
- size, 0, (IS_PIC_DEVICE(device_dev)) ? 0 : PCIIO_BYTE_STREAM);
- device_dev->resource[PCI_ROM_RESOURCE].start |= __IA64_UNCACHED_OFFSET;
- device_dev->resource[PCI_ROM_RESOURCE].end =
- device_dev->resource[PCI_ROM_RESOURCE].start + size;
- }
-#endif
-
- /*
- * Update the Command Word on the Card.
- */
- cmd |= PCI_COMMAND_MASTER; /* If the device doesn't support */
- /* bit gets dropped .. no harm */
- pci_write_config_word(device_dev, PCI_COMMAND, cmd);
-
- pci_read_config_byte(device_dev, PCI_INTERRUPT_PIN, (unsigned char *)&lines);
- if (device_dev->vendor == PCI_VENDOR_ID_SGI &&
- device_dev->device == PCI_DEVICE_ID_SGI_IOC3 ) {
- lines = 1;
- }
-
- device_sysdata = (struct sn_device_sysdata *)device_dev->sysdata;
- device_vertex = device_sysdata->vhdl;
-
- intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex);
-
- bit = intr_handle->pi_irq;
- cpuid = intr_handle->pi_cpu;
- irq = bit;
- irq = irq + (cpuid << 8);
- pciio_intr_connect(intr_handle, (intr_func_t)0, (intr_arg_t)0);
- device_dev->irq = irq;
- register_pcibr_intr(irq, (pcibr_intr_t)intr_handle);
-#ifdef ajmtestintr
- {
- int slot = PCI_SLOT(device_dev->devfn);
- static int timer_set = 0;
- pcibr_intr_t pcibr_intr = (pcibr_intr_t)intr_handle;
- pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
- extern void intr_test_handle_intr(int, void*, struct pt_regs *);
-
- if (!timer_set) {
- intr_test_set_timer();
- timer_set = 1;
- }
- intr_test_register_irq(irq, pcibr_soft, slot);
- request_irq(irq, intr_test_handle_intr,0,NULL, NULL);
- }
-#endif
-
- }
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- if ( !Is_pic_on_this_nasid[cnodeid_to_nasid(cnode)] )
- io_sh_swapper((cnodeid_to_nasid(cnode)), 1);
- }
-}
-
-/*
- * linux_bus_cvlink() Creates a link between the Linux PCI Bus number
- * to the actual hardware component that it represents:
- * /dev/hw/linux/busnum/0 -> ../../../hw/module/001c01/slab/0/Ibrick/xtalk/15/pci
- *
- * The bus vertex, when called to devfs_generate_path() returns:
- * hw/module/001c01/slab/0/Ibrick/xtalk/15/pci
- * hw/module/001c01/slab/1/Pbrick/xtalk/12/pci-x/0
- * hw/module/001c01/slab/1/Pbrick/xtalk/12/pci-x/1
- */
-void
-linux_bus_cvlink(void)
-{
- char name[8];
- int index;
-
- for (index=0; index < MAX_PCI_XWIDGET; index++) {
- if (!busnum_to_pcibr_vhdl[index])
- continue;
-
- sprintf(name, "%x", index);
- (void) hwgraph_edge_add(linux_busnum, busnum_to_pcibr_vhdl[index],
- name);
- }
-}
-
-/*
- * pci_bus_map_create() - Called by pci_bus_to_hcl_cvlink() to finish the job.
- *
- * Linux PCI Bus numbers are assigned from lowest module_id numbers
- * (rack/slot etc.) starting from HUB_WIDGET_ID_MAX down to
- * HUB_WIDGET_ID_MIN:
- * widgetnum 15 gets lower Bus Number than widgetnum 14 etc.
- *
- * Given 2 modules 001c01 and 001c02 we get the following mappings:
- * 001c01, widgetnum 15 = Bus number 0
- * 001c01, widgetnum 14 = Bus number 1
- * 001c02, widgetnum 15 = Bus number 3
- * 001c02, widgetnum 14 = Bus number 4
- * etc.
- *
- * The rational for starting Bus Number 0 with Widget number 15 is because
- * the system boot disks are always connected via Widget 15 Slot 0 of the
- * I-brick. Linux creates /dev/sd* devices(naming) strating from Bus Number 0
- * Therefore, /dev/sda1 will be the first disk, on Widget 15 of the lowest
- * module id(Master Cnode) of the system.
- *
- */
-static int
-pci_bus_map_create(devfs_handle_t xtalk, char * io_moduleid)
-{
-
- devfs_handle_t master_node_vertex = NULL;
- devfs_handle_t xwidget = NULL;
- devfs_handle_t pci_bus = NULL;
- hubinfo_t hubinfo = NULL;
- xwidgetnum_t widgetnum;
- char pathname[128];
- graph_error_t rv;
- int bus;
- int basebus_num;
- int bus_number;
-
- /*
- * Loop throught this vertex and get the Xwidgets ..
- */
-
-
- /* PCI devices */
-
- for (widgetnum = HUB_WIDGET_ID_MAX; widgetnum >= HUB_WIDGET_ID_MIN; widgetnum--) {
- sprintf(pathname, "%d", widgetnum);
- xwidget = NULL;
-
- /*
- * Example - /hw/module/001c16/Pbrick/xtalk/8 is the xwidget
- * /hw/module/001c16/Pbrick/xtalk/8/pci/1 is device
- */
- rv = hwgraph_traverse(xtalk, pathname, &xwidget);
- if ( (rv != GRAPH_SUCCESS) ) {
- if (!xwidget) {
- continue;
- }
- }
-
- sprintf(pathname, "%d/"EDGE_LBL_PCI, widgetnum);
- pci_bus = NULL;
- if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
- if (!pci_bus) {
- continue;
-}
-
- /*
- * Assign the correct bus number and also the nasid of this
- * pci Xwidget.
- *
- * Should not be any race here ...
- */
- num_bridges++;
- busnum_to_pcibr_vhdl[num_bridges - 1] = pci_bus;
-
- /*
- * Get the master node and from there get the NASID.
- */
- master_node_vertex = device_master_get(xwidget);
- if (!master_node_vertex) {
- printk("WARNING: pci_bus_map_create: Unable to get .master for vertex 0x%p\n", (void *)xwidget);
- }
-
- hubinfo_get(master_node_vertex, &hubinfo);
- if (!hubinfo) {
- printk("WARNING: pci_bus_map_create: Unable to get hubinfo for master node vertex 0x%p\n", (void *)master_node_vertex);
- return(1);
- } else {
- busnum_to_nid[num_bridges - 1] = hubinfo->h_nasid;
- }
-
- /*
- * Pre assign DMA maps needed for 32 Bits Page Map DMA.
- */
- busnum_to_atedmamaps[num_bridges - 1] = (void *) kmalloc(
- sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS, GFP_KERNEL);
- if (!busnum_to_atedmamaps[num_bridges - 1])
- printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
-
- memset(busnum_to_atedmamaps[num_bridges - 1], 0x0,
- sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS);
-
- }
-
- /*
- * PCIX devices
- * We number busses differently for PCI-X devices.
- * We start from Lowest Widget on up ..
- */
-
- (void) ioconfig_get_busnum((char *)io_moduleid, &basebus_num);
-
- for (widgetnum = HUB_WIDGET_ID_MIN; widgetnum <= HUB_WIDGET_ID_MAX; widgetnum++) {
-
- /* Do both buses */
- for ( bus = 0; bus < 2; bus++ ) {
- sprintf(pathname, "%d", widgetnum);
- xwidget = NULL;
-
- /*
- * Example - /hw/module/001c16/Pbrick/xtalk/8 is the xwidget
- * /hw/module/001c16/Pbrick/xtalk/8/pci-x/0 is the bus
- * /hw/module/001c16/Pbrick/xtalk/8/pci-x/0/1 is device
- */
- rv = hwgraph_traverse(xtalk, pathname, &xwidget);
- if ( (rv != GRAPH_SUCCESS) ) {
- if (!xwidget) {
- continue;
- }
- }
-
- if ( bus == 0 )
- sprintf(pathname, "%d/"EDGE_LBL_PCIX_0, widgetnum);
- else
- sprintf(pathname, "%d/"EDGE_LBL_PCIX_1, widgetnum);
- pci_bus = NULL;
- if (hwgraph_traverse(xtalk, pathname, &pci_bus) != GRAPH_SUCCESS)
- if (!pci_bus) {
- continue;
- }
-
- /*
- * Assign the correct bus number and also the nasid of this
- * pci Xwidget.
- *
- * Should not be any race here ...
- */
- bus_number = basebus_num + bus + io_brick_map_widget(MODULE_PXBRICK, widgetnum);
-#ifdef DEBUG
- printk("bus_number %d basebus_num %d bus %d io %d\n",
- bus_number, basebus_num, bus,
- io_brick_map_widget(MODULE_PXBRICK, widgetnum));
-#endif
- busnum_to_pcibr_vhdl[bus_number] = pci_bus;
-
- /*
- * Pre assign DMA maps needed for 32 Bits Page Map DMA.
- */
- busnum_to_atedmamaps[bus_number] = (void *) kmalloc(
- sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS, GFP_KERNEL);
- if (!busnum_to_atedmamaps[bus_number])
- printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
-
- memset(busnum_to_atedmamaps[bus_number], 0x0,
- sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS);
- }
- }
-
- return(0);
-}
-
-/*
- * pci_bus_to_hcl_cvlink() - This routine is called after SGI IO Infrastructure
- * initialization has completed to set up the mappings between Xbridge
- * and logical pci bus numbers. We also set up the NASID for each of these
- * xbridges.
- *
- * Must be called before pci_init() is invoked.
- */
-int
-pci_bus_to_hcl_cvlink(void)
-{
-
- devfs_handle_t devfs_hdl = NULL;
- devfs_handle_t xtalk = NULL;
- int rv = 0;
- char name[256];
- char tmp_name[256];
- int i, ii;
-
- /*
- * Figure out which IO Brick is connected to the Compute Bricks.
- */
- for (i = 0; i < nummodules; i++) {
- extern int iomoduleid_get(nasid_t);
- moduleid_t iobrick_id;
- nasid_t nasid = -1;
- int nodecnt;
- int n = 0;
-
- nodecnt = modules[i]->nodecnt;
- for ( n = 0; n < nodecnt; n++ ) {
- nasid = cnodeid_to_nasid(modules[i]->nodes[n]);
- iobrick_id = iomoduleid_get(nasid);
- if ((int)iobrick_id > 0) { /* Valid module id */
- char name[12];
- memset(name, 0, 12);
- format_module_id((char *)&(modules[i]->io[n].moduleid), iobrick_id, MODULE_FORMAT_BRIEF);
- }
- }
- }
-
- devfs_hdl = hwgraph_path_to_vertex("/dev/hw/module");
- for (i = 0; i < nummodules ; i++) {
- for ( ii = 0; ii < 2 ; ii++ ) {
- memset(name, 0, 256);
- memset(tmp_name, 0, 256);
- format_module_id(name, modules[i]->id, MODULE_FORMAT_BRIEF);
- sprintf(tmp_name, "/slab/%d/Pbrick/xtalk", geo_slab(modules[i]->geoid[ii]));
- strcat(name, tmp_name);
- xtalk = NULL;
- rv = hwgraph_edge_get(devfs_hdl, name, &xtalk);
- pci_bus_map_create(xtalk, (char *)&(modules[i]->io[ii].moduleid));
- }
- }
-
- /*
- * Create the Linux PCI bus number vertex link.
- */
- (void)linux_bus_cvlink();
- (void)ioconfig_bus_new_entries();
-
- return(0);
-}
EXTRA_CFLAGS := -DLITTLE_ENDIAN
-ifdef CONFIG_IA64_SGI_SN2
-EXTRA_CFLAGS += -DSHUB_SWAP_WAR
-endif
-
-obj-$(CONFIG_IA64_SGI_SN2) += pcibr_dvr.o pcibr_ate.o pcibr_config.o \
- pcibr_dvr.o pcibr_hints.o \
- pcibr_intr.o pcibr_rrb.o pcibr_slot.o \
- pcibr_error.o
+obj-y += pcibr_ate.o pcibr_config.o pcibr_dvr.o pcibr_hints.o pcibr_intr.o pcibr_rrb.o \
+ pcibr_slot.o pcibr_error.o
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
int i, j;
bridgereg_t old_enable, new_enable;
int s;
- int this_is_pic = is_pic(bridge);
/* Probe SSRAM to determine its size. */
- if ( this_is_pic ) {
- old_enable = bridge->b_int_enable;
- new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
- bridge->b_int_enable = new_enable;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- old_enable = BRIDGE_REG_GET32((&bridge->b_int_enable));
- new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
- BRIDGE_REG_SET32((&bridge->b_int_enable)) = new_enable;
- }
- else {
- old_enable = bridge->b_int_enable;
- new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
- bridge->b_int_enable = new_enable;
- }
- }
+ old_enable = bridge->b_int_enable;
+ new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
+ bridge->b_int_enable = new_enable;
for (i = 1; i < ATE_NUM_SIZES; i++) {
/* Try writing a value */
- if ( this_is_pic ) {
- bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge)))
- bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = __swab64(ATE_PROBE_VALUE);
- else
- bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
- }
+ bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
/* Guard against wrap */
for (j = 1; j < i; j++)
bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(j) - 1] = 0;
/* See if value was written */
- if ( this_is_pic ) {
- if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
+ if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
largest_working_size = i;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == __swab64(ATE_PROBE_VALUE))
- largest_working_size = i;
- else {
- if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
- largest_working_size = i;
- }
- }
- }
- }
- if ( this_is_pic ) {
- bridge->b_int_enable = old_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&bridge->b_int_enable)) = old_enable;
- BRIDGE_REG_GET32((&bridge->b_wid_tflush)); /* wait until Bridge PIO complete */
- }
- else {
- bridge->b_int_enable = old_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
}
+ bridge->b_int_enable = old_enable;
+ bridge->b_wid_tflush; /* wait until Bridge PIO complete */
/*
* ensure that we write and read without any interruption.
*/
s = splhi();
- if ( this_is_pic ) {
- bridge->b_wid_control = (bridge->b_wid_control
+ bridge->b_wid_control = (bridge->b_wid_control
& ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
| BRIDGE_CTRL_SSRAM_SIZE(largest_working_size);
- bridge->b_wid_control; /* inval addr bug war */
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&(bridge->b_wid_control))) =
- __swab32((BRIDGE_REG_GET32((&bridge->b_wid_control))
- & ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
- | BRIDGE_CTRL_SSRAM_SIZE(largest_working_size));
- BRIDGE_REG_GET32((&bridge->b_wid_control));/* inval addr bug war */
- }
- else {
- bridge->b_wid_control = (bridge->b_wid_control & ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
- | BRIDGE_CTRL_SSRAM_SIZE(largest_working_size);
- bridge->b_wid_control; /* inval addr bug war */
- }
- }
+ bridge->b_wid_control; /* inval addr bug war */
splx(s);
num_entries = ATE_NUM_ENTRIES(largest_working_size);
/* Flush the write buffer associated with this
* PCI device which might be using dma map RAM.
*/
- if ( is_pic(bridge) ) {
- bridge->b_wr_req_buf[slot].reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge)) ) {
- BRIDGE_REG_GET32((&bridge->b_wr_req_buf[slot].reg));
- }
- else
- bridge->b_wr_req_buf[slot].reg;
- }
+ bridge->b_wr_req_buf[slot].reg;
}
}
}
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
-extern pcibr_info_t pcibr_info_get(devfs_handle_t);
+extern pcibr_info_t pcibr_info_get(vertex_hdl_t);
-uint64_t pcibr_config_get(devfs_handle_t, unsigned, unsigned);
-uint64_t do_pcibr_config_get(int, cfg_p, unsigned, unsigned);
-void pcibr_config_set(devfs_handle_t, unsigned, unsigned, uint64_t);
-void do_pcibr_config_set(int, cfg_p, unsigned, unsigned, uint64_t);
-static void swap_do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
+uint64_t pcibr_config_get(vertex_hdl_t, unsigned, unsigned);
+uint64_t do_pcibr_config_get(cfg_p, unsigned, unsigned);
+void pcibr_config_set(vertex_hdl_t, unsigned, unsigned, uint64_t);
+void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
-#ifdef LITTLE_ENDIAN
/*
* on sn-ia we need to twiddle the the addresses going out
* the pci bus because we use the unswizzled synergy space
#define CS(b,r) (((volatile uint16_t *) b)[((r^4)/2)])
#define CW(b,r) (((volatile uint32_t *) b)[((r^4)/4)])
-#define CBP(b,r) (((volatile uint8_t *) b)[(r)^3])
-#define CSP(b,r) (((volatile uint16_t *) b)[((r)/2)^1])
+#define CBP(b,r) (((volatile uint8_t *) b)[(r)])
+#define CSP(b,r) (((volatile uint16_t *) b)[((r)/2)])
#define CWP(b,r) (((volatile uint32_t *) b)[(r)/4])
#define SCB(b,r) (((volatile uint8_t *) b)[((r)^3)])
#define SCS(b,r) (((volatile uint16_t *) b)[((r^2)/2)])
#define SCW(b,r) (((volatile uint32_t *) b)[((r)/4)])
-#else
-#define CB(b,r) (((volatile uint8_t *) cfgbase)[(r)^3])
-#define CS(b,r) (((volatile uint16_t *) cfgbase)[((r)/2)^1])
-#define CW(b,r) (((volatile uint32_t *) cfgbase)[(r)/4])
-#endif
/*
* Return a config space address for given slot / func / offset. Note the
/*
* Type 0 config space
*/
- if (is_pic(bridge))
- slot++;
+ slot++;
return &bridge->b_type0_cfg_dev[slot].f[func].l[offset];
}
cfg_p cfg_base;
cfg_base = pcibr_slot_config_addr(bridge, slot, 0);
- return (do_pcibr_config_get(is_pic(bridge), cfg_base, offset, sizeof(unsigned)));
+ return (do_pcibr_config_get(cfg_base, offset, sizeof(unsigned)));
}
/*
cfg_p cfg_base;
cfg_base = pcibr_func_config_addr(bridge, 0, slot, func, 0);
- return (do_pcibr_config_get(is_pic(bridge), cfg_base, offset, sizeof(unsigned)));
+ return (do_pcibr_config_get(cfg_base, offset, sizeof(unsigned)));
}
/*
cfg_p cfg_base;
cfg_base = pcibr_slot_config_addr(bridge, slot, 0);
- do_pcibr_config_set(is_pic(bridge), cfg_base, offset, sizeof(unsigned), val);
+ do_pcibr_config_set(cfg_base, offset, sizeof(unsigned), val);
}
/*
cfg_p cfg_base;
cfg_base = pcibr_func_config_addr(bridge, 0, slot, func, 0);
- do_pcibr_config_set(is_pic(bridge), cfg_base, offset, sizeof(unsigned), val);
+ do_pcibr_config_set(cfg_base, offset, sizeof(unsigned), val);
}
int pcibr_config_debug = 0;
cfg_p
-pcibr_config_addr(devfs_handle_t conn,
+pcibr_config_addr(vertex_hdl_t conn,
unsigned reg)
{
pcibr_info_t pcibr_info;
pciio_func = PCI_TYPE1_FUNC(reg);
ASSERT(pciio_bus != 0);
-#if 0
- } else if (conn != pciio_info_hostdev_get(pciio_info)) {
- /*
- * Conn is on a subordinate bus, so get bus/slot/func directly from
- * its pciio_info_t structure.
- */
- pciio_bus = pciio_info->c_bus;
- pciio_slot = pciio_info->c_slot;
- pciio_func = pciio_info->c_func;
- if (pciio_func == PCIIO_FUNC_NONE) {
- pciio_func = 0;
- }
-#endif
} else {
/*
* Conn is directly connected to the host bus. PCI bus number is
return cfgbase;
}
-extern unsigned char Is_pic_on_this_nasid[];
uint64_t
-pcibr_config_get(devfs_handle_t conn,
+pcibr_config_get(vertex_hdl_t conn,
unsigned reg,
unsigned size)
{
- if ( !Is_pic_on_this_nasid[ NASID_GET((pcibr_config_addr(conn, reg)))] )
- return do_pcibr_config_get(0, pcibr_config_addr(conn, reg),
- PCI_TYPE1_REG(reg), size);
- else
- return do_pcibr_config_get(1, pcibr_config_addr(conn, reg),
+ return do_pcibr_config_get(pcibr_config_addr(conn, reg),
PCI_TYPE1_REG(reg), size);
}
uint64_t
-do_pcibr_config_get(
- int pic,
- cfg_p cfgbase,
+do_pcibr_config_get(cfg_p cfgbase,
unsigned reg,
unsigned size)
{
unsigned value;
- if ( pic ) {
- value = CWP(cfgbase, reg);
- }
- else {
- if ( io_get_sh_swapper(NASID_GET(cfgbase)) ) {
- /*
- * Shub Swapper on - 0 returns PCI Offset 0 but byte swapped!
- * Do not swizzle address and byte swap the result.
- */
- value = SCW(cfgbase, reg);
- value = __swab32(value);
- } else {
- value = CW(cfgbase, reg);
- }
- }
+ value = CWP(cfgbase, reg);
if (reg & 3)
value >>= 8 * (reg & 3);
if (size < 4)
}
void
-pcibr_config_set(devfs_handle_t conn,
+pcibr_config_set(vertex_hdl_t conn,
unsigned reg,
unsigned size,
uint64_t value)
{
- if ( Is_pic_on_this_nasid[ NASID_GET((pcibr_config_addr(conn, reg)))] )
- do_pcibr_config_set(1, pcibr_config_addr(conn, reg),
- PCI_TYPE1_REG(reg), size, value);
- else
- swap_do_pcibr_config_set(pcibr_config_addr(conn, reg),
+ do_pcibr_config_set(pcibr_config_addr(conn, reg),
PCI_TYPE1_REG(reg), size, value);
}
void
-do_pcibr_config_set(int pic,
- cfg_p cfgbase,
+do_pcibr_config_set(cfg_p cfgbase,
unsigned reg,
unsigned size,
uint64_t value)
{
- if ( pic ) {
- switch (size) {
- case 1:
+ switch (size) {
+ case 1:
+ CBP(cfgbase, reg) = value;
+ break;
+ case 2:
+ if (reg & 1) {
CBP(cfgbase, reg) = value;
- break;
- case 2:
- if (reg & 1) {
- CBP(cfgbase, reg) = value;
- CBP(cfgbase, reg + 1) = value >> 8;
- } else
- CSP(cfgbase, reg) = value;
- break;
- case 3:
- if (reg & 1) {
- CBP(cfgbase, reg) = value;
- CSP(cfgbase, (reg + 1)) = value >> 8;
- } else {
- CSP(cfgbase, reg) = value;
- CBP(cfgbase, reg + 2) = value >> 16;
- }
- break;
- case 4:
- CWP(cfgbase, reg) = value;
- break;
- }
- }
- else {
- switch (size) {
- case 1:
- CB(cfgbase, reg) = value;
- break;
- case 2:
- if (reg & 1) {
- CB(cfgbase, reg) = value;
- CB(cfgbase, reg + 1) = value >> 8;
- } else
- CS(cfgbase, reg) = value;
- break;
- case 3:
- if (reg & 1) {
- CB(cfgbase, reg) = value;
- CS(cfgbase, (reg + 1)) = value >> 8;
- } else {
- CS(cfgbase, reg) = value;
- CB(cfgbase, reg + 2) = value >> 16;
- }
- break;
- case 4:
- CW(cfgbase, reg) = value;
- break;
- }
- }
-}
-
-void
-swap_do_pcibr_config_set(cfg_p cfgbase,
- unsigned reg,
- unsigned size,
- uint64_t value)
-{
-
- uint64_t temp_value = 0;
-
- switch (size) {
- case 1:
- SCB(cfgbase, reg) = value;
- break;
- case 2:
- temp_value = __swab16(value);
- if (reg & 1) {
- SCB(cfgbase, reg) = temp_value;
- SCB(cfgbase, reg + 1) = temp_value >> 8;
- } else
- SCS(cfgbase, reg) = temp_value;
- break;
- case 3:
- BUG();
- break;
-
- case 4:
- temp_value = __swab32(value);
- SCW(cfgbase, reg) = temp_value;
- break;
- }
+ CBP(cfgbase, reg + 1) = value >> 8;
+ } else
+ CSP(cfgbase, reg) = value;
+ break;
+ case 3:
+ if (reg & 1) {
+ CBP(cfgbase, reg) = value;
+ CSP(cfgbase, (reg + 1)) = value >> 8;
+ } else {
+ CSP(cfgbase, reg) = value;
+ CBP(cfgbase, reg + 2) = value >> 16;
+ }
+ break;
+ case 4:
+ CWP(cfgbase, reg) = value;
+ break;
+ }
}
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
#include <asm/sn/sgi.h>
+#include <asm/sn/sn_sal.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
+#include <asm/sn/klconfig.h>
#include <asm/sn/xtalk/xwidget.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/pci/pciio.h>
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
#define USS302_BRIDGE_TIMEOUT_HLD 4
#endif
-int pcibr_devflag = D_MP;
-
-/*
- * This is the file operation table for the pcibr driver.
- * As each of the functions are implemented, put the
- * appropriate function name below.
- */
-struct file_operations pcibr_fops = {
- owner: THIS_MODULE,
- llseek: NULL,
- read: NULL,
- write: NULL,
- readdir: NULL,
- poll: NULL,
- ioctl: NULL,
- mmap: NULL,
- open: NULL,
- flush: NULL,
- release: NULL,
- fsync: NULL,
- fasync: NULL,
- lock: NULL,
- readv: NULL,
- writev: NULL
-};
-
/* kbrick widgetnum-to-bus layout */
int p_busnum[MAX_PORT_NUM] = { /* widget# */
0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
pcibr_list_p pcibr_list = 0;
#endif
-extern int hwgraph_vertex_name_get(devfs_handle_t vhdl, char *buf, uint buflen);
-extern int hub_device_flags_set(devfs_handle_t widget_dev, hub_widget_flags_t flags);
+extern int hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf, uint buflen);
extern long atoi(register char *p);
-extern cnodeid_t nodevertex_to_cnodeid(devfs_handle_t vhdl);
-extern char *dev_to_name(devfs_handle_t dev, char *buf, uint buflen);
+extern cnodeid_t nodevertex_to_cnodeid(vertex_hdl_t vhdl);
+extern char *dev_to_name(vertex_hdl_t dev, char *buf, uint buflen);
extern struct map *atemapalloc(uint64_t);
extern void atefree(struct map *, size_t, uint64_t);
extern void atemapfree(struct map *);
-extern pciio_dmamap_t get_free_pciio_dmamap(devfs_handle_t);
+extern pciio_dmamap_t get_free_pciio_dmamap(vertex_hdl_t);
extern void free_pciio_dmamap(pcibr_dmamap_t);
-extern void xwidget_error_register(devfs_handle_t, error_handler_f *, error_handler_arg_t);
+extern void xwidget_error_register(vertex_hdl_t, error_handler_f *, error_handler_arg_t);
#define ATE_WRITE() ate_write(pcibr_soft, ate_ptr, ate_count, ate)
#if PCIBR_FREEZE_TIME
extern int do_pcibr_rrb_free_all(pcibr_soft_t, bridge_t *, pciio_slot_t);
extern void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
-extern int pcibr_wrb_flush(devfs_handle_t);
-extern int pcibr_rrb_alloc(devfs_handle_t, int *, int *);
-extern void pcibr_rrb_flush(devfs_handle_t);
+extern int pcibr_wrb_flush(vertex_hdl_t);
+extern int pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
+extern void pcibr_rrb_flush(vertex_hdl_t);
static int pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, bridgereg_t);
void pcibr_release_device(pcibr_soft_t, pciio_slot_t, bridgereg_t);
extern iopaddr_t pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t,
pciio_space_t, int, int, int);
-void pcibr_init(void);
-int pcibr_attach(devfs_handle_t);
-int pcibr_attach2(devfs_handle_t, bridge_t *, devfs_handle_t,
+int pcibr_attach(vertex_hdl_t);
+int pcibr_attach2(vertex_hdl_t, bridge_t *, vertex_hdl_t,
int, pcibr_soft_t *);
-int pcibr_detach(devfs_handle_t);
-int pcibr_open(devfs_handle_t *, int, int, cred_t *);
-int pcibr_close(devfs_handle_t, int, int, cred_t *);
-int pcibr_map(devfs_handle_t, vhandl_t *, off_t, size_t, uint);
-int pcibr_unmap(devfs_handle_t, vhandl_t *);
-int pcibr_ioctl(devfs_handle_t, int, void *, int, struct cred *, int *);
+int pcibr_detach(vertex_hdl_t);
int pcibr_pcix_rbars_calc(pcibr_soft_t);
extern int pcibr_init_ext_ate_ram(bridge_t *);
extern int pcibr_ate_alloc(pcibr_soft_t, int);
extern void pcibr_ate_free(pcibr_soft_t, int, int);
-extern int pcibr_widget_to_bus(devfs_handle_t pcibr_vhdl);
+extern int pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
extern unsigned ate_freeze(pcibr_dmamap_t pcibr_dmamap,
#if PCIBR_FREEZE_TIME
unsigned *cmd_regs,
unsigned s);
-pcibr_info_t pcibr_info_get(devfs_handle_t);
+pcibr_info_t pcibr_info_get(vertex_hdl_t);
-static iopaddr_t pcibr_addr_pci_to_xio(devfs_handle_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
+static iopaddr_t pcibr_addr_pci_to_xio(vertex_hdl_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-pcibr_piomap_t pcibr_piomap_alloc(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
+pcibr_piomap_t pcibr_piomap_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
void pcibr_piomap_free(pcibr_piomap_t);
caddr_t pcibr_piomap_addr(pcibr_piomap_t, iopaddr_t, size_t);
void pcibr_piomap_done(pcibr_piomap_t);
-caddr_t pcibr_piotrans_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-iopaddr_t pcibr_piospace_alloc(devfs_handle_t, device_desc_t, pciio_space_t, size_t, size_t);
-void pcibr_piospace_free(devfs_handle_t, pciio_space_t, iopaddr_t, size_t);
+caddr_t pcibr_piotrans_addr(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
+iopaddr_t pcibr_piospace_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, size_t, size_t);
+void pcibr_piospace_free(vertex_hdl_t, pciio_space_t, iopaddr_t, size_t);
static iopaddr_t pcibr_flags_to_d64(unsigned, pcibr_soft_t);
extern bridge_ate_t pcibr_flags_to_ate(unsigned);
-pcibr_dmamap_t pcibr_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
+pcibr_dmamap_t pcibr_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
void pcibr_dmamap_free(pcibr_dmamap_t);
extern bridge_ate_p pcibr_ate_addr(pcibr_soft_t, int);
static iopaddr_t pcibr_addr_xio_to_pci(pcibr_soft_t, iopaddr_t, size_t);
iopaddr_t pcibr_dmamap_addr(pcibr_dmamap_t, paddr_t, size_t);
-alenlist_t pcibr_dmamap_list(pcibr_dmamap_t, alenlist_t, unsigned);
void pcibr_dmamap_done(pcibr_dmamap_t);
-cnodeid_t pcibr_get_dmatrans_node(devfs_handle_t);
-iopaddr_t pcibr_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
-alenlist_t pcibr_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
+cnodeid_t pcibr_get_dmatrans_node(vertex_hdl_t);
+iopaddr_t pcibr_dmatrans_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, unsigned);
void pcibr_dmamap_drain(pcibr_dmamap_t);
-void pcibr_dmaaddr_drain(devfs_handle_t, paddr_t, size_t);
-void pcibr_dmalist_drain(devfs_handle_t, alenlist_t);
+void pcibr_dmaaddr_drain(vertex_hdl_t, paddr_t, size_t);
+void pcibr_dmalist_drain(vertex_hdl_t, alenlist_t);
iopaddr_t pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
extern unsigned pcibr_intr_bits(pciio_info_t info,
pciio_intr_line_t lines, int nslots);
-extern pcibr_intr_t pcibr_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
+extern pcibr_intr_t pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
extern void pcibr_intr_free(pcibr_intr_t);
extern void pcibr_setpciint(xtalk_intr_t);
extern int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
extern void pcibr_intr_disconnect(pcibr_intr_t);
-extern devfs_handle_t pcibr_intr_cpu_get(pcibr_intr_t);
+extern vertex_hdl_t pcibr_intr_cpu_get(pcibr_intr_t);
extern void pcibr_intr_func(intr_arg_t);
extern void print_bridge_errcmd(uint32_t, char *);
extern int pcibr_dmawr_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
extern int pcibr_error_handler(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
extern int pcibr_error_handler_wrapper(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
-void pcibr_provider_startup(devfs_handle_t);
-void pcibr_provider_shutdown(devfs_handle_t);
+void pcibr_provider_startup(vertex_hdl_t);
+void pcibr_provider_shutdown(vertex_hdl_t);
-int pcibr_reset(devfs_handle_t);
-pciio_endian_t pcibr_endian_set(devfs_handle_t, pciio_endian_t, pciio_endian_t);
+int pcibr_reset(vertex_hdl_t);
+pciio_endian_t pcibr_endian_set(vertex_hdl_t, pciio_endian_t, pciio_endian_t);
int pcibr_priority_bits_set(pcibr_soft_t, pciio_slot_t, pciio_priority_t);
-pciio_priority_t pcibr_priority_set(devfs_handle_t, pciio_priority_t);
-int pcibr_device_flags_set(devfs_handle_t, pcibr_device_flags_t);
-
-extern cfg_p pcibr_config_addr(devfs_handle_t, unsigned);
-extern uint64_t pcibr_config_get(devfs_handle_t, unsigned, unsigned);
-extern void pcibr_config_set(devfs_handle_t, unsigned, unsigned, uint64_t);
-
-extern pcibr_hints_t pcibr_hints_get(devfs_handle_t, int);
-extern void pcibr_hints_fix_rrbs(devfs_handle_t);
-extern void pcibr_hints_dualslot(devfs_handle_t, pciio_slot_t, pciio_slot_t);
-extern void pcibr_hints_intr_bits(devfs_handle_t, pcibr_intr_bits_f *);
-extern void pcibr_set_rrb_callback(devfs_handle_t, rrb_alloc_funct_t);
-extern void pcibr_hints_handsoff(devfs_handle_t);
-extern void pcibr_hints_subdevs(devfs_handle_t, pciio_slot_t, uint64_t);
-
-extern int pcibr_slot_reset(devfs_handle_t,pciio_slot_t);
-extern int pcibr_slot_info_init(devfs_handle_t,pciio_slot_t);
-extern int pcibr_slot_info_free(devfs_handle_t,pciio_slot_t);
+pciio_priority_t pcibr_priority_set(vertex_hdl_t, pciio_priority_t);
+int pcibr_device_flags_set(vertex_hdl_t, pcibr_device_flags_t);
+
+extern cfg_p pcibr_config_addr(vertex_hdl_t, unsigned);
+extern uint64_t pcibr_config_get(vertex_hdl_t, unsigned, unsigned);
+extern void pcibr_config_set(vertex_hdl_t, unsigned, unsigned, uint64_t);
+
+extern pcibr_hints_t pcibr_hints_get(vertex_hdl_t, int);
+extern void pcibr_hints_fix_rrbs(vertex_hdl_t);
+extern void pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
+extern void pcibr_hints_intr_bits(vertex_hdl_t, pcibr_intr_bits_f *);
+extern void pcibr_set_rrb_callback(vertex_hdl_t, rrb_alloc_funct_t);
+extern void pcibr_hints_handsoff(vertex_hdl_t);
+extern void pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, uint64_t);
+
+extern int pcibr_slot_info_init(vertex_hdl_t,pciio_slot_t);
+extern int pcibr_slot_info_free(vertex_hdl_t,pciio_slot_t);
extern int pcibr_slot_info_return(pcibr_soft_t, pciio_slot_t,
pcibr_slot_info_resp_t);
extern void pcibr_slot_func_info_return(pcibr_info_h, int,
pcibr_slot_func_info_resp_t);
-extern int pcibr_slot_addr_space_init(devfs_handle_t,pciio_slot_t);
+extern int pcibr_slot_addr_space_init(vertex_hdl_t,pciio_slot_t);
extern int pcibr_slot_pcix_rbar_init(pcibr_soft_t, pciio_slot_t);
-extern int pcibr_slot_device_init(devfs_handle_t, pciio_slot_t);
-extern int pcibr_slot_guest_info_init(devfs_handle_t,pciio_slot_t);
-extern int pcibr_slot_call_device_attach(devfs_handle_t,
+extern int pcibr_slot_device_init(vertex_hdl_t, pciio_slot_t);
+extern int pcibr_slot_guest_info_init(vertex_hdl_t,pciio_slot_t);
+extern int pcibr_slot_call_device_attach(vertex_hdl_t,
pciio_slot_t, int);
-extern int pcibr_slot_call_device_detach(devfs_handle_t,
+extern int pcibr_slot_call_device_detach(vertex_hdl_t,
pciio_slot_t, int);
-extern int pcibr_slot_attach(devfs_handle_t, pciio_slot_t, int,
+extern int pcibr_slot_attach(vertex_hdl_t, pciio_slot_t, int,
char *, int *);
-extern int pcibr_slot_detach(devfs_handle_t, pciio_slot_t, int,
+extern int pcibr_slot_detach(vertex_hdl_t, pciio_slot_t, int,
char *, int *);
-extern int pcibr_is_slot_sys_critical(devfs_handle_t, pciio_slot_t);
-
-extern int pcibr_slot_initial_rrb_alloc(devfs_handle_t, pciio_slot_t);
-extern int pcibr_initial_rrb(devfs_handle_t, pciio_slot_t, pciio_slot_t);
+extern int pcibr_slot_initial_rrb_alloc(vertex_hdl_t, pciio_slot_t);
+extern int pcibr_initial_rrb(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
/* =====================================================================
* Device(x) register management
*/
-/*
- * pcibr_init: called once during system startup or
- * when a loadable driver is loaded.
- *
- * The driver_register function should normally
- * be in _reg, not _init. But the pcibr driver is
- * required by devinit before the _reg routines
- * are called, so this is an exception.
- */
-void
-pcibr_init(void)
+static int
+pcibr_mmap(struct file * file, struct vm_area_struct * vma)
{
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INIT, NULL, "pcibr_init()\n"));
-
- xwidget_driver_register(XBRIDGE_WIDGET_PART_NUM,
- XBRIDGE_WIDGET_MFGR_NUM,
- "pcibr_",
- 0);
- xwidget_driver_register(BRIDGE_WIDGET_PART_NUM,
- BRIDGE_WIDGET_MFGR_NUM,
- "pcibr_",
- 0);
+ vertex_hdl_t pcibr_vhdl = file->f_dentry->d_fsdata;
+ pcibr_soft_t pcibr_soft;
+ bridge_t *bridge;
+ unsigned long phys_addr;
+ int error = 0;
+
+ pcibr_soft = pcibr_soft_get(pcibr_vhdl);
+ bridge = pcibr_soft->bs_base;
+ phys_addr = (unsigned long)bridge & ~0xc000000000000000; /* Mask out the Uncache bits */
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ error = io_remap_page_range(vma, phys_addr, vma->vm_start,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ return(error);
}
/*
- * open/close mmap/munmap interface would be used by processes
- * that plan to map the PCI bridge, and muck around with the
- * registers. This is dangerous to do, and will be allowed
- * to a select brand of programs. Typically these are
- * diagnostics programs, or some user level commands we may
- * write to do some weird things.
- * To start with expect them to have root priveleges.
- * We will ask for more later.
+ * This is the file operation table for the pcibr driver.
+ * As each of the functions are implemented, put the
+ * appropriate function name below.
*/
-/* ARGSUSED */
-int
-pcibr_open(devfs_handle_t *devp, int oflag, int otyp, cred_t *credp)
-{
- return 0;
-}
-
-/*ARGSUSED */
-int
-pcibr_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
-{
- return 0;
-}
-
-/*ARGSUSED */
-int
-pcibr_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
-{
- int error;
- devfs_handle_t vhdl = dev_to_vhdl(dev);
- devfs_handle_t pcibr_vhdl = hwgraph_connectpt_get(vhdl);
- pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- bridge_t *bridge = pcibr_soft->bs_base;
-
- hwgraph_vertex_unref(pcibr_vhdl);
-
- ASSERT(pcibr_soft);
- len = ctob(btoc(len)); /* Make len page aligned */
- error = v_mapphys(vt, (void *) ((__psunsigned_t) bridge + off), len);
-
- /*
- * If the offset being mapped corresponds to the flash prom
- * base, and if the mapping succeeds, and if the user
- * has requested the protections to be WRITE, enable the
- * flash prom to be written.
- *
- * XXX- deprecate this in favor of using the
- * real flash driver ...
- */
- if (IS_BRIDGE_SOFT(pcibr_soft) && !error &&
- ((off == BRIDGE_EXTERNAL_FLASH) ||
- (len > BRIDGE_EXTERNAL_FLASH))) {
- int s;
-
- /*
- * ensure that we write and read without any interruption.
- * The read following the write is required for the Bridge war
- */
- s = splhi();
-
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&bridge->b_wid_control)) |= __swab32(BRIDGE_CTRL_FLASH_WR_EN);
- BRIDGE_REG_GET32((&bridge->b_wid_control)); /* inval addr bug war */
- } else {
- bridge->b_wid_control |= BRIDGE_CTRL_FLASH_WR_EN;
- bridge->b_wid_control; /* inval addr bug war */
- }
- splx(s);
- }
- return error;
-}
-
-/*ARGSUSED */
-int
-pcibr_unmap(devfs_handle_t dev, vhandl_t *vt)
-{
- devfs_handle_t pcibr_vhdl = hwgraph_connectpt_get((devfs_handle_t) dev);
- pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- bridge_t *bridge = pcibr_soft->bs_base;
+static int pcibr_mmap(struct file * file, struct vm_area_struct * vma);
+struct file_operations pcibr_fops = {
+ .owner = THIS_MODULE,
+ .mmap = pcibr_mmap,
+};
- hwgraph_vertex_unref(pcibr_vhdl);
-
- if ( IS_PIC_SOFT(pcibr_soft) ) {
- /*
- * If flashprom write was enabled, disable it, as
- * this is the last unmap.
- */
- if (IS_BRIDGE_SOFT(pcibr_soft) &&
- (bridge->b_wid_control & BRIDGE_CTRL_FLASH_WR_EN)) {
- int s;
-
- /*
- * ensure that we write and read without any interruption.
- * The read following the write is required for the Bridge war
- */
- s = splhi();
- bridge->b_wid_control &= ~BRIDGE_CTRL_FLASH_WR_EN;
- bridge->b_wid_control; /* inval addr bug war */
- splx(s);
- }
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- if (BRIDGE_REG_GET32((&bridge->b_wid_control)) & BRIDGE_CTRL_FLASH_WR_EN) {
- int s;
-
- /*
- * ensure that we write and read without any interruption.
- * The read following the write is required for the Bridge war
- */
- s = splhi();
- BRIDGE_REG_SET32((&bridge->b_wid_control)) &= __swab32((unsigned int)~BRIDGE_CTRL_FLASH_WR_EN);
- BRIDGE_REG_GET32((&bridge->b_wid_control)); /* inval addr bug war */
- splx(s);
- } else {
- if (bridge->b_wid_control & BRIDGE_CTRL_FLASH_WR_EN) {
- int s;
-
- /*
- * ensure that we write and read without any interruption.
- * The read following the write is required for the Bridge war
- */
- s = splhi();
- bridge->b_wid_control &= ~BRIDGE_CTRL_FLASH_WR_EN;
- bridge->b_wid_control; /* inval addr bug war */
- splx(s);
- }
- }
- }
- }
- return 0;
-}
/* This is special case code used by grio. There are plans to make
* this a bit more general in the future, but till then this should
* be sufficient.
*/
pciio_slot_t
-pcibr_device_slot_get(devfs_handle_t dev_vhdl)
+pcibr_device_slot_get(vertex_hdl_t dev_vhdl)
{
char devname[MAXDEVNAME];
- devfs_handle_t tdev;
+ vertex_hdl_t tdev;
pciio_info_t pciio_info;
pciio_slot_t slot = PCIIO_SLOT_NONE;
return slot;
}
-/*ARGSUSED */
-int
-pcibr_ioctl(devfs_handle_t dev,
- int cmd,
- void *arg,
- int flag,
- struct cred *cr,
- int *rvalp)
-{
- return 0;
-}
-
pcibr_info_t
-pcibr_info_get(devfs_handle_t vhdl)
+pcibr_info_get(vertex_hdl_t vhdl)
{
return (pcibr_info_t) pciio_info_get(vhdl);
}
* This is usually used at the time of shutting down of the PCI card.
*/
int
-pcibr_device_unregister(devfs_handle_t pconn_vhdl)
+pcibr_device_unregister(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info;
- devfs_handle_t pcibr_vhdl;
+ vertex_hdl_t pcibr_vhdl;
pciio_slot_t slot;
pcibr_soft_t pcibr_soft;
bridge_t *bridge;
* slot's device status to be set.
*/
void
-pcibr_driver_reg_callback(devfs_handle_t pconn_vhdl,
+pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl,
int key1, int key2, int error)
{
pciio_info_t pciio_info;
pcibr_info_t pcibr_info;
- devfs_handle_t pcibr_vhdl;
+ vertex_hdl_t pcibr_vhdl;
pciio_slot_t slot;
pcibr_soft_t pcibr_soft;
* slot's device status to be set.
*/
void
-pcibr_driver_unreg_callback(devfs_handle_t pconn_vhdl,
+pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl,
int key1, int key2, int error)
{
pciio_info_t pciio_info;
pcibr_info_t pcibr_info;
- devfs_handle_t pcibr_vhdl;
+ vertex_hdl_t pcibr_vhdl;
pciio_slot_t slot;
pcibr_soft_t pcibr_soft;
* depends on hwgraph separator == '/'
*/
int
-pcibr_bus_cnvlink(devfs_handle_t f_c)
+pcibr_bus_cnvlink(vertex_hdl_t f_c)
{
char dst[MAXDEVNAME];
char *dp = dst;
char *cp, *xp;
int widgetnum;
char pcibus[8];
- devfs_handle_t nvtx, svtx;
+ vertex_hdl_t nvtx, svtx;
int rv;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, f_c, "pcibr_bus_cnvlink\n"));
*/
/*ARGSUSED */
int
-pcibr_attach(devfs_handle_t xconn_vhdl)
+pcibr_attach(vertex_hdl_t xconn_vhdl)
{
/* REFERENCED */
graph_error_t rc;
- devfs_handle_t pcibr_vhdl;
+ vertex_hdl_t pcibr_vhdl;
bridge_t *bridge;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, xconn_vhdl, "pcibr_attach\n"));
/*ARGSUSED */
int
-pcibr_attach2(devfs_handle_t xconn_vhdl, bridge_t *bridge,
- devfs_handle_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
+pcibr_attach2(vertex_hdl_t xconn_vhdl, bridge_t *bridge,
+ vertex_hdl_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
{
/* REFERENCED */
- devfs_handle_t ctlr_vhdl;
+ vertex_hdl_t ctlr_vhdl;
bridgereg_t id;
int rev;
pcibr_soft_t pcibr_soft;
xtalk_intr_t xtalk_intr;
int slot;
int ibit;
- devfs_handle_t noslot_conn;
+ vertex_hdl_t noslot_conn;
char devnm[MAXDEVNAME], *s;
pcibr_hints_t pcibr_hints;
uint64_t int_enable;
nasid_t nasid;
int iobrick_type_get_nasid(nasid_t nasid);
int iobrick_module_get_nasid(nasid_t nasid);
- extern unsigned char Is_pic_on_this_nasid[512];
-
-
- async_attach_t aa = NULL;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
"pcibr_attach2: bridge=0x%p, busnum=%d\n", bridge, busnum));
- aa = async_attach_get_info(xconn_vhdl);
-
ctlr_vhdl = NULL;
- ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER,
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- &pcibr_fops, NULL);
-
+ ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0,
+ 0, 0, 0,
+ S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
+ (struct file_operations *)&pcibr_fops, (void *)pcibr_vhdl);
ASSERT(ctlr_vhdl != NULL);
/*
pcibr_soft->bs_min_slot = 0; /* lowest possible slot# */
pcibr_soft->bs_max_slot = 7; /* highest possible slot# */
pcibr_soft->bs_busnum = busnum;
- if (is_xbridge(bridge)) {
- pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_XBRIDGE;
- } else if (is_pic(bridge)) {
- pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
- } else {
- pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_BRIDGE;
- }
+ pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
switch(pcibr_soft->bs_bridge_type) {
case PCIBR_BRIDGETYPE_BRIDGE:
pcibr_soft->bs_int_ate_size = BRIDGE_INTERNAL_ATES;
nasid = NASID_GET(bridge);
- /* set whether it is a PIC or not */
- Is_pic_on_this_nasid[nasid] = (IS_PIC_SOFT(pcibr_soft)) ? 1 : 0;
-
-
if ((pcibr_soft->bs_bricktype = iobrick_type_get_nasid(nasid)) < 0)
printk(KERN_WARNING "0x%p: Unknown bricktype : 0x%x\n", (void *)xconn_vhdl,
(unsigned int)pcibr_soft->bs_bricktype);
if (pcibr_soft->bs_bricktype > 0) {
switch (pcibr_soft->bs_bricktype) {
case MODULE_PXBRICK:
+ case MODULE_IXBRICK:
pcibr_soft->bs_first_slot = 0;
pcibr_soft->bs_last_slot = 1;
pcibr_soft->bs_last_reset = 1;
+
+ /* If Bus 1 has IO9 then there are 4 devices in that bus. Note
+ * we figure this out from klconfig since the kernel has yet to
+ * probe
+ */
+ if (pcibr_widget_to_bus(pcibr_vhdl) == 1) {
+ lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
+
+ while (brd) {
+ if (brd->brd_flags & LOCAL_MASTER_IO6) {
+ pcibr_soft->bs_last_slot = 3;
+ pcibr_soft->bs_last_reset = 3;
+ }
+ brd = KLCF_NEXT(brd);
+ }
+ }
break;
- case MODULE_PEBRICK:
case MODULE_PBRICK:
pcibr_soft->bs_first_slot = 1;
pcibr_soft->bs_last_slot = 2;
/* enable parity checking on PICs internal RAM */
pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP;
pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE;
- /* PIC BRINGUP WAR (PV# 862253): don't enable write request
+ /* PIC BRINGUP WAR (PV# 862253): dont enable write request
* parity checking.
*/
if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) {
int entry;
cnodeid_t cnodeid;
nasid_t nasid;
-#ifdef PIC_LATER
- char *node_val;
- devfs_handle_t node_vhdl;
- char vname[MAXDEVNAME];
-#endif
/* Set the Bridge's 32-bit PCI to XTalk
* Direct Map register to the most useful
*/
cnodeid = 0; /* default node id */
- /*
- * Determine the base address node id to be used for all 32-bit
- * Direct Mapping I/O. The default is node 0, but this can be changed
- * via a DEVICE_ADMIN directive and the PCIBUS_DMATRANS_NODE
- * attribute in the irix.sm config file. A device driver can obtain
- * this node value via a call to pcibr_get_dmatrans_node().
- */
-#ifdef PIC_LATER
-// This probably needs to be addressed - pfg
- node_val = device_admin_info_get(pcibr_vhdl, ADMIN_LBL_DMATRANS_NODE);
- if (node_val != NULL) {
- node_vhdl = hwgraph_path_to_vertex(node_val);
- if (node_vhdl != GRAPH_VERTEX_NONE) {
- cnodeid = nodevertex_to_cnodeid(node_vhdl);
- }
- if ((node_vhdl == GRAPH_VERTEX_NONE) || (cnodeid == CNODEID_NONE)) {
- cnodeid = 0;
- vertex_to_name(pcibr_vhdl, vname, sizeof(vname));
- printk(KERN_WARNING "Invalid hwgraph node path specified:\n"
- " DEVICE_ADMIN: %s %s=%s\n",
- vname, ADMIN_LBL_DMATRANS_NODE, node_val);
- }
- }
-#endif /* PIC_LATER */
nasid = COMPACT_TO_NASID_NODEID(cnodeid);
paddr = NODE_OFFSET(nasid) + 0;
*/
xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl);
+ {
+ int irq = ((hub_intr_t)xtalk_intr)->i_bit;
+ int cpu = ((hub_intr_t)xtalk_intr)->i_cpuid;
+
+ intr_unreserve_level(cpu, irq);
+ ((hub_intr_t)xtalk_intr)->i_bit = SGI_PCIBR_ERROR;
+ }
ASSERT(xtalk_intr != NULL);
pcibr_soft->bsi_err_intr = xtalk_intr;
xtalk_intr_connect(xtalk_intr, (intr_func_t) pcibr_error_intr_handler,
(intr_arg_t) pcibr_soft, (xtalk_intr_setfunc_t)pcibr_setwidint, (void *)bridge);
-#ifdef BUS_INT_WAR_NOT_YET
- request_irq(CPU_VECTOR_TO_IRQ(((hub_intr_t)xtalk_intr)->i_cpuid,
- ((hub_intr_t)xtalk_intr)->i_bit),
- (intr_func_t)pcibr_error_intr_handler, 0, "PCIBR error",
+ request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ, "PCIBR error",
(intr_arg_t) pcibr_soft);
-#endif
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
"pcibr_setwidint: b_wid_int_upper=0x%x, b_wid_int_lower=0x%x\n",
if (IS_PIC_SOFT(pcibr_soft)) {
int_enable_64 = bridge->p_int_enable_64 | BRIDGE_ISR_ERRORS;
int_enable = (uint64_t)int_enable_64;
+#ifdef PFG_TEST
+ int_enable = (uint64_t)0x7ffffeff7ffffeff;
+#endif
} else {
int_enable_32 = bridge->b_int_enable | (BRIDGE_ISR_ERRORS & 0xffffffff);
int_enable = ((uint64_t)int_enable_32 & 0xffffffff);
- }
-#ifdef BUS_INT_WAR_NOT_YET
- {
- extern void sn_add_polled_interrupt(int irq, int interval);
-
- sn_add_polled_interrupt(CPU_VECTOR_TO_IRQ(((hub_intr_t)xtalk_intr)->i_cpuid,
- ((hub_intr_t)xtalk_intr)->i_bit), 20000);
- }
+#ifdef PFG_TEST
+ int_enable = (uint64_t)0x7ffffeff;
#endif
+ }
#if BRIDGE_ERROR_INTR_WAR
}
#endif
-#ifdef BRIDGE_B_DATACORR_WAR
-
- /* WAR panic for Rev B silent data corruption.
- * PIOERR turned off here because there is a problem
- * with not re-arming it in pcibr_error_intr_handler.
- * We don't get LLP error interrupts if we don't
- * re-arm PIOERR interrupts! Just disable them here
- */
-
- if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_B) {
- int_enable |= BRIDGE_IMR_LLP_REC_CBERR;
- int_enable &= ~BRIDGE_ISR_PCIBUS_PIOERR;
-
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
- "Turning on LLP_REC_CBERR for Rev B Bridge.\n"));
- }
-#endif
-
/* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are
* locked out to be freed up sooner (by timing out) so that the
* read tnums are never completely used up.
if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_B)
pcibr_soft->bs_dma_flags |= PCIBR_NOPREFETCH;
else if (pcibr_soft->bs_rev_num <
- (BRIDGE_WIDGET_PART_NUM << 4 | pcibr_prefetch_enable_rev))
+ (BRIDGE_WIDGET_PART_NUM << 4))
pcibr_soft->bs_dma_flags |= PCIIO_NOPREFETCH;
- /* WRITE_GATHER:
- * Disabled up to but not including the
- * rev number in pcibr_wg_enable_rev. There
- * is no "WAR range" as with prefetch.
- */
+ /* WRITE_GATHER: Disabled */
if (pcibr_soft->bs_rev_num <
- (BRIDGE_WIDGET_PART_NUM << 4 | pcibr_wg_enable_rev))
+ (BRIDGE_WIDGET_PART_NUM << 4))
pcibr_soft->bs_dma_flags |= PCIBR_NOWRITE_GATHER;
/* PIC only supports 64-bit direct mapping in PCI-X mode. Since
*/
if (pcibr_soft->bs_bricktype > 0) {
switch (pcibr_soft->bs_bricktype) {
+ case MODULE_PBRICK:
+ do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
+ do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
+ break;
+ case MODULE_IBRICK:
+ /* port 0xe on the Ibrick only has slots 1 and 2 */
+ if (pcibr_soft->bs_xid == 0xe) {
+ do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
+ do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
+ }
+ else {
+ /* allocate one RRB for the serial port */
+ do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 1);
+ }
+ break;
case MODULE_PXBRICK:
+ case MODULE_IXBRICK:
/*
* If the IO9 is in the PXBrick (bus1, slot1) allocate
* RRBs to all the devices
do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 8);
do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
}
-
- break;
- case MODULE_PEBRICK:
- case MODULE_PBRICK:
- do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
- do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
- break;
- case MODULE_IBRICK:
- /* port 0xe on the Ibrick only has slots 1 and 2 */
- if (pcibr_soft->bs_xid == 0xe) {
- do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
- do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
- }
- else {
- /* allocate one RRB for the serial port */
- do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 1);
- }
break;
} /* switch */
}
/* Call the device attach */
(void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
-#ifdef PIC_LATER
-#if (defined(USS302_TIMEOUT_WAR))
- /*
- * If this bridge holds a Lucent USS-302 or USS-312 pci/usb controller,
- * increase the Bridge PCI retry backoff interval. This part seems
- * to go away for long periods of time if a DAC appears on the bus during
- * a read command that is being retried.
- */
-
-{
- ii_ixtt_u_t ixtt;
-
- for (slot = pcibr_soft->bs_min_slot;
- slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
- if (pcibr_soft->bs_slot[slot].bss_vendor_id ==
- LUCENT_USBHC_VENDOR_ID_NUM &&
- (pcibr_soft->bs_slot[slot].bss_device_id ==
- LUCENT_USBHC302_DEVICE_ID_NUM ||
- pcibr_soft->bs_slot[slot].bss_device_id ==
- LUCENT_USBHC312_DEVICE_ID_NUM)) {
- printk(KERN_NOTICE
- "pcibr_attach: %x Bus holds a usb part - setting"
- "bridge PCI_RETRY_HLD to %d\n",
- pcibr_vhdl, USS302_BRIDGE_TIMEOUT_HLD);
-
- bridge->b_bus_timeout &= ~BRIDGE_BUS_PCI_RETRY_HLD_MASK;
- bridge->b_bus_timeout |=
- BRIDGE_BUS_PCI_RETRY_HLD(USS302_BRIDGE_TIMEOUT_HLD);
-
- /*
- * Have to consider the read response timer in the hub II as well
- */
-
- hubii_ixtt_get(xconn_vhdl, &ixtt);
-
- /*
- * bump rrsp_ps to allow at least 1ms for read
- * responses from this widget
- */
-
- ixtt.ii_ixtt_fld_s.i_rrsp_ps = 20000;
- hubii_ixtt_set(xconn_vhdl, &ixtt);
-
- /*
- * print the current setting
- */
-
- hubii_ixtt_get(xconn_vhdl, &ixtt);
- printk( "Setting hub ixtt.rrsp_ps field to 0x%x\n",
- ixtt.ii_ixtt_fld_s.i_rrsp_ps);
-
- break; /* only need to do it once */
- }
- }
-}
-#endif /* (defined(USS302_TIMEOUT_WAR)) */
-#else
- FIXME("pcibr_attach: Call do_pcibr_rrb_autoalloc nicinfo\n");
-#endif /* PIC_LATER */
-
- if (aa)
- async_attach_add_info(noslot_conn, aa);
-
pciio_device_attach(noslot_conn, (int)0);
- /*
- * Tear down pointer to async attach info -- async threads for
- * bridge's descendants may be running but the bridge's work is done.
- */
- if (aa)
- async_attach_del_info(xconn_vhdl);
-
return 0;
}
*/
int
-pcibr_detach(devfs_handle_t xconn)
+pcibr_detach(vertex_hdl_t xconn)
{
pciio_slot_t slot;
- devfs_handle_t pcibr_vhdl;
+ vertex_hdl_t pcibr_vhdl;
pcibr_soft_t pcibr_soft;
bridge_t *bridge;
unsigned s;
}
int
-pcibr_asic_rev(devfs_handle_t pconn_vhdl)
+pcibr_asic_rev(vertex_hdl_t pconn_vhdl)
{
- devfs_handle_t pcibr_vhdl;
+ vertex_hdl_t pcibr_vhdl;
int tmp_vhdl;
arbitrary_info_t ainfo;
}
int
-pcibr_write_gather_flush(devfs_handle_t pconn_vhdl)
+pcibr_write_gather_flush(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
*/
static iopaddr_t
-pcibr_addr_pci_to_xio(devfs_handle_t pconn_vhdl,
+pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
pciio_slot_t slot,
pciio_space_t space,
iopaddr_t pci_addr,
unsigned bar; /* which BASE reg on device is decoding */
iopaddr_t xio_addr = XIO_NOWHERE;
+ iopaddr_t base; /* base of devio(x) mapped area on PCI */
+ iopaddr_t limit; /* base of devio(x) mapped area on PCI */
pciio_space_t wspace; /* which space device is decoding */
iopaddr_t wbase; /* base of device decode on PCI */
PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pconn_vhdl,
"pcibr_addr_pci_to_xio: Device(%d): %x\n",
win, devreg, device_bits));
-#else
- printk("pcibr_addr_pci_to_xio: Device(%d): %x\n", win, devreg);
#endif
}
pcibr_soft->bs_slot[win].bss_devio.bssd_space = space;
*/
case PCIIO_SPACE_MEM: /* "mem space" */
case PCIIO_SPACE_MEM32: /* "mem, use 32-bit-wide bus" */
- if ((pci_addr + BRIDGE_PCI_MEM32_BASE + req_size - 1) <=
- BRIDGE_PCI_MEM32_LIMIT)
- xio_addr = pci_addr + BRIDGE_PCI_MEM32_BASE;
+ if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) { /* PIC bus 0 */
+ base = PICBRIDGE0_PCI_MEM32_BASE;
+ limit = PICBRIDGE0_PCI_MEM32_LIMIT;
+ } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) { /* PIC bus 1 */
+ base = PICBRIDGE1_PCI_MEM32_BASE;
+ limit = PICBRIDGE1_PCI_MEM32_LIMIT;
+ } else { /* Bridge/Xbridge */
+ base = BRIDGE_PCI_MEM32_BASE;
+ limit = BRIDGE_PCI_MEM32_LIMIT;
+ }
+
+ if ((pci_addr + base + req_size - 1) <= limit)
+ xio_addr = pci_addr + base;
break;
case PCIIO_SPACE_MEM64: /* "mem, use 64-bit-wide bus" */
- if ((pci_addr + BRIDGE_PCI_MEM64_BASE + req_size - 1) <=
- BRIDGE_PCI_MEM64_LIMIT)
- xio_addr = pci_addr + BRIDGE_PCI_MEM64_BASE;
+ if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) { /* PIC bus 0 */
+ base = PICBRIDGE0_PCI_MEM64_BASE;
+ limit = PICBRIDGE0_PCI_MEM64_LIMIT;
+ } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) { /* PIC bus 1 */
+ base = PICBRIDGE1_PCI_MEM64_BASE;
+ limit = PICBRIDGE1_PCI_MEM64_LIMIT;
+ } else { /* Bridge/Xbridge */
+ base = BRIDGE_PCI_MEM64_BASE;
+ limit = BRIDGE_PCI_MEM64_LIMIT;
+ }
+
+ if ((pci_addr + base + req_size - 1) <= limit)
+ xio_addr = pci_addr + base;
break;
case PCIIO_SPACE_IO: /* "i/o space" */
+ /*
+ * PIC bridges do not support big-window aliases into PCI I/O space
+ */
+ if (IS_PIC_SOFT(pcibr_soft)) {
+ xio_addr = XIO_NOWHERE;
+ break;
+ }
+
/* Bridge Hardware Bug WAR #482741:
* The 4G area that maps directly from
* XIO space to PCI I/O space is busted
/*ARGSUSED6 */
pcibr_piomap_t
-pcibr_piomap_alloc(devfs_handle_t pconn_vhdl,
+pcibr_piomap_alloc(vertex_hdl_t pconn_vhdl,
device_desc_t dev_desc,
pciio_space_t space,
iopaddr_t pci_addr,
pciio_info_t pciio_info = &pcibr_info->f_c;
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
pcibr_piomap_t *mapptr;
pcibr_piomap_t maplist;
/*ARGSUSED */
caddr_t
-pcibr_piotrans_addr(devfs_handle_t pconn_vhdl,
+pcibr_piotrans_addr(vertex_hdl_t pconn_vhdl,
device_desc_t dev_desc,
pciio_space_t space,
iopaddr_t pci_addr,
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
iopaddr_t xio_addr;
caddr_t addr;
/*ARGSUSED */
iopaddr_t
-pcibr_piospace_alloc(devfs_handle_t pconn_vhdl,
+pcibr_piospace_alloc(vertex_hdl_t pconn_vhdl,
device_desc_t dev_desc,
pciio_space_t space,
size_t req_size,
/*ARGSUSED */
void
-pcibr_piospace_free(devfs_handle_t pconn_vhdl,
+pcibr_piospace_free(vertex_hdl_t pconn_vhdl,
pciio_space_t space,
iopaddr_t pciaddr,
size_t req_size)
/*ARGSUSED */
pcibr_dmamap_t
-pcibr_dmamap_alloc(devfs_handle_t pconn_vhdl,
+pcibr_dmamap_alloc(vertex_hdl_t pconn_vhdl,
device_desc_t dev_desc,
size_t req_size_max,
unsigned flags)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
pciio_slot_t slot;
xwidgetnum_t xio_port;
iopaddr_t pci_addr;
pciio_slot_t slot;
+ if (IS_PIC_BUSNUM_SOFT(soft, 0)) {
+ if ((xio_addr >= PICBRIDGE0_PCI_MEM32_BASE) &&
+ (xio_lim <= PICBRIDGE0_PCI_MEM32_LIMIT)) {
+ pci_addr = xio_addr - PICBRIDGE0_PCI_MEM32_BASE;
+ return pci_addr;
+ }
+ if ((xio_addr >= PICBRIDGE0_PCI_MEM64_BASE) &&
+ (xio_lim <= PICBRIDGE0_PCI_MEM64_LIMIT)) {
+ pci_addr = xio_addr - PICBRIDGE0_PCI_MEM64_BASE;
+ return pci_addr;
+ }
+ } else if (IS_PIC_BUSNUM_SOFT(soft, 1)) {
+ if ((xio_addr >= PICBRIDGE1_PCI_MEM32_BASE) &&
+ (xio_lim <= PICBRIDGE1_PCI_MEM32_LIMIT)) {
+ pci_addr = xio_addr - PICBRIDGE1_PCI_MEM32_BASE;
+ return pci_addr;
+ }
+ if ((xio_addr >= PICBRIDGE1_PCI_MEM64_BASE) &&
+ (xio_lim <= PICBRIDGE1_PCI_MEM64_LIMIT)) {
+ pci_addr = xio_addr - PICBRIDGE1_PCI_MEM64_BASE;
+ return pci_addr;
+ }
+ } else {
if ((xio_addr >= BRIDGE_PCI_MEM32_BASE) &&
(xio_lim <= BRIDGE_PCI_MEM32_LIMIT)) {
pci_addr = xio_addr - BRIDGE_PCI_MEM32_BASE;
pci_addr = xio_addr - BRIDGE_PCI_MEM64_BASE;
return pci_addr;
}
+ }
for (slot = soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(soft); ++slot)
if ((xio_addr >= PCIBR_BRIDGE_DEVIO(soft, slot)) &&
(xio_lim < PCIBR_BRIDGE_DEVIO(soft, slot + 1))) {
}
/*ARGSUSED */
-alenlist_t
-pcibr_dmamap_list(pcibr_dmamap_t pcibr_dmamap,
- alenlist_t palenlist,
- unsigned flags)
-{
- pcibr_soft_t pcibr_soft;
- bridge_t *bridge=NULL;
-
- unsigned al_flags = (flags & PCIIO_NOSLEEP) ? AL_NOSLEEP : 0;
- int inplace = flags & PCIIO_INPLACE;
-
- alenlist_t pciio_alenlist = 0;
- alenlist_t xtalk_alenlist;
- size_t length;
- iopaddr_t offset;
- unsigned direct64;
- int ate_index = 0;
- int ate_count = 0;
- int ate_total = 0;
- bridge_ate_p ate_ptr = (bridge_ate_p)0;
- bridge_ate_t ate_proto = (bridge_ate_t)0;
- bridge_ate_t ate_prev;
- bridge_ate_t ate;
- alenaddr_t xio_addr;
- xwidgetnum_t xio_port;
- iopaddr_t pci_addr;
- alenaddr_t new_addr;
- unsigned cmd_regs[8];
- unsigned s = 0;
-
-#if PCIBR_FREEZE_TIME
- unsigned freeze_time;
-#endif
- int ate_freeze_done = 0; /* To pair ATE_THAW
- * with an ATE_FREEZE
- */
-
- pcibr_soft = pcibr_dmamap->bd_soft;
-
- xtalk_alenlist = xtalk_dmamap_list(pcibr_dmamap->bd_xtalk, palenlist,
- flags & DMAMAP_FLAGS);
- if (!xtalk_alenlist) {
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: xtalk_dmamap_list() failed, "
- "pcibr_dmamap=0x%x\n", pcibr_dmamap));
- goto fail;
- }
- alenlist_cursor_init(xtalk_alenlist, 0, NULL);
-
- if (inplace) {
- pciio_alenlist = xtalk_alenlist;
- } else {
- pciio_alenlist = alenlist_create(al_flags);
- if (!pciio_alenlist) {
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: alenlist_create() failed, "
- "pcibr_dmamap=0x%lx\n", (unsigned long)pcibr_dmamap));
- goto fail;
- }
- }
-
- direct64 = pcibr_dmamap->bd_flags & PCIIO_DMA_A64;
- if (!direct64) {
- bridge = pcibr_soft->bs_base;
- ate_ptr = pcibr_dmamap->bd_ate_ptr;
- ate_index = pcibr_dmamap->bd_ate_index;
- ate_proto = pcibr_dmamap->bd_ate_proto;
- ATE_FREEZE();
- ate_freeze_done = 1; /* Remember that we need to do an ATE_THAW */
- }
- pci_addr = pcibr_dmamap->bd_pci_addr;
-
- ate_prev = 0; /* matches no valid ATEs */
- while (ALENLIST_SUCCESS ==
- alenlist_get(xtalk_alenlist, NULL, 0,
- &xio_addr, &length, al_flags)) {
- if (XIO_PACKED(xio_addr)) {
- xio_port = XIO_PORT(xio_addr);
- xio_addr = XIO_ADDR(xio_addr);
- } else
- xio_port = pcibr_dmamap->bd_xio_port;
-
- if (xio_port == pcibr_soft->bs_xid) {
- new_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, length);
- if (new_addr == PCI_NOWHERE) {
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: pcibr_addr_xio_to_pci failed, "
- "pcibr_dmamap=0x%x\n", pcibr_dmamap));
- goto fail;
- }
- } else if (direct64) {
- new_addr = pci_addr | xio_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
-
- /* Bridge Hardware WAR #482836:
- * If the transfer is not cache aligned
- * and the Bridge Rev is <= B, force
- * prefetch to be off.
- */
- if (flags & PCIBR_NOPREFETCH)
- new_addr &= ~PCI64_ATTR_PREF;
-
- } else {
- /* calculate the ate value for
- * the first address. If it
- * matches the previous
- * ATE written (ie. we had
- * multiple blocks in the
- * same IOPG), then back up
- * and reuse that ATE.
- *
- * We are NOT going to
- * aggressively try to
- * reuse any other ATEs.
- */
- offset = IOPGOFF(xio_addr);
- ate = ate_proto
- | (xio_port << ATE_TIDSHIFT)
- | (xio_addr - offset);
- if (ate == ate_prev) {
- PCIBR_DEBUG((PCIBR_DEBUG_ATE, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: ATE share\n"));
- ate_ptr--;
- ate_index--;
- pci_addr -= IOPGSIZE;
- }
- new_addr = pci_addr + offset;
-
- /* Fill in the hardware ATEs
- * that contain this block.
- */
- ate_count = IOPG(offset + length - 1) + 1;
- ate_total += ate_count;
-
- /* Ensure that this map contains enough ATE's */
- if (ate_total > pcibr_dmamap->bd_ate_count) {
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list :\n"
- "\twanted xio_addr [0x%x..0x%x]\n"
- "\tate_total 0x%x bd_ate_count 0x%x\n"
- "\tATE's required > number allocated\n",
- xio_addr, xio_addr + length - 1,
- ate_total, pcibr_dmamap->bd_ate_count));
- goto fail;
- }
-
- ATE_WRITE();
-
- ate_index += ate_count;
- ate_ptr += ate_count;
-
- ate_count <<= IOPFNSHIFT;
- ate += ate_count;
- pci_addr += ate_count;
- }
-
- /* write the PCI DMA address
- * out to the scatter-gather list.
- */
- if (inplace) {
- if (ALENLIST_SUCCESS !=
- alenlist_replace(pciio_alenlist, NULL,
- &new_addr, &length, al_flags)) {
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: alenlist_replace() failed, "
- "pcibr_dmamap=0x%x\n", pcibr_dmamap));
-
- goto fail;
- }
- } else {
- if (ALENLIST_SUCCESS !=
- alenlist_append(pciio_alenlist,
- new_addr, length, al_flags)) {
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: alenlist_append() failed, "
- "pcibr_dmamap=0x%x\n", pcibr_dmamap));
- goto fail;
- }
- }
- }
- if (!inplace)
- alenlist_done(xtalk_alenlist);
-
- /* Reset the internal cursor of the alenlist to be returned back
- * to the caller.
- */
- alenlist_cursor_init(pciio_alenlist, 0, NULL);
-
-
- /* In case an ATE_FREEZE was done do the ATE_THAW to unroll all the
- * changes that ATE_FREEZE has done to implement the external SSRAM
- * bug workaround.
- */
- if (ate_freeze_done) {
- ATE_THAW();
- if ( IS_PIC_SOFT(pcibr_soft) ) {
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_GET32((&bridge->b_wid_tflush));
- } else {
- bridge->b_wid_tflush;
- }
- }
- }
- PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
- "pcibr_dmamap_list: pcibr_dmamap=0x%x, pciio_alenlist=0x%x\n",
- pcibr_dmamap, pciio_alenlist));
-
- return pciio_alenlist;
-
- fail:
- /* There are various points of failure after doing an ATE_FREEZE
- * We need to do an ATE_THAW. Otherwise the ATEs are locked forever.
- * The decision to do an ATE_THAW needs to be based on whether a
- * an ATE_FREEZE was done before.
- */
- if (ate_freeze_done) {
- ATE_THAW();
- if ( IS_PIC_SOFT(pcibr_soft) ) {
- bridge->b_wid_tflush;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_GET32((&bridge->b_wid_tflush));
- } else {
- bridge->b_wid_tflush;
- }
- }
- }
- if (pciio_alenlist && !inplace)
- alenlist_destroy(pciio_alenlist);
- return 0;
-}
-
-/*ARGSUSED */
void
pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
{
/*ARGSUSED */
cnodeid_t
-pcibr_get_dmatrans_node(devfs_handle_t pconn_vhdl)
+pcibr_get_dmatrans_node(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
/*ARGSUSED */
iopaddr_t
-pcibr_dmatrans_addr(devfs_handle_t pconn_vhdl,
+pcibr_dmatrans_addr(vertex_hdl_t pconn_vhdl,
device_desc_t dev_desc,
paddr_t paddr,
size_t req_size,
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
return 0;
}
-/*ARGSUSED */
-alenlist_t
-pcibr_dmatrans_list(devfs_handle_t pconn_vhdl,
- device_desc_t dev_desc,
- alenlist_t palenlist,
- unsigned flags)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
- pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
- pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
- xwidgetnum_t xio_port;
-
- alenlist_t pciio_alenlist = 0;
- alenlist_t xtalk_alenlist = 0;
-
- int inplace;
- unsigned direct64;
- unsigned al_flags;
-
- iopaddr_t xio_base;
- alenaddr_t xio_addr;
- size_t xio_size;
-
- size_t map_size;
- iopaddr_t pci_base;
- alenaddr_t pci_addr;
-
- unsigned relbits = 0;
-
- /* merge in forced flags */
- flags |= pcibr_soft->bs_dma_flags;
-
- inplace = flags & PCIIO_INPLACE;
- direct64 = flags & PCIIO_DMA_A64;
- al_flags = (flags & PCIIO_NOSLEEP) ? AL_NOSLEEP : 0;
-
- if (direct64) {
- map_size = 1ull << 48;
- xio_base = 0;
- pci_base = slotp->bss_d64_base;
- if ((pci_base != PCIBR_D64_BASE_UNSET) &&
- (flags == slotp->bss_d64_flags)) {
- /* reuse previous base info */
- } else if (pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS) < 0) {
- /* DMA configuration conflict */
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: DMA configuration conflict "
- "for direct64, flags=0x%x\n", flags));
- goto fail;
- } else {
- relbits = BRIDGE_DEV_D64_BITS;
- pci_base =
- pcibr_flags_to_d64(flags, pcibr_soft);
- }
- } else {
- xio_base = pcibr_soft->bs_dir_xbase;
- map_size = 1ull << 31;
- pci_base = slotp->bss_d32_base;
- if ((pci_base != PCIBR_D32_BASE_UNSET) &&
- (flags == slotp->bss_d32_flags)) {
- /* reuse previous base info */
- } else if (pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS) < 0) {
- /* DMA configuration conflict */
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: DMA configuration conflict "
- "for direct32, flags=0x%x\n", flags));
- goto fail;
- } else {
- relbits = BRIDGE_DEV_D32_BITS;
- pci_base = PCI32_DIRECT_BASE;
- }
- }
-
- xtalk_alenlist = xtalk_dmatrans_list(xconn_vhdl, 0, palenlist,
- flags & DMAMAP_FLAGS);
- if (!xtalk_alenlist) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: xtalk_dmatrans_list failed "
- "xtalk_alenlist=0x%x\n", xtalk_alenlist));
- goto fail;
- }
-
- alenlist_cursor_init(xtalk_alenlist, 0, NULL);
-
- if (inplace) {
- pciio_alenlist = xtalk_alenlist;
- } else {
- pciio_alenlist = alenlist_create(al_flags);
- if (!pciio_alenlist) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: alenlist_create failed with "
- " 0x%x\n", pciio_alenlist));
- goto fail;
- }
- }
-
- while (ALENLIST_SUCCESS ==
- alenlist_get(xtalk_alenlist, NULL, 0,
- &xio_addr, &xio_size, al_flags)) {
-
- /*
- * find which XIO port this goes to.
- */
- if (XIO_PACKED(xio_addr)) {
- if (xio_addr == XIO_NOWHERE) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: xio_addr == XIO_NOWHERE\n"));
- return 0;
- }
- xio_port = XIO_PORT(xio_addr);
- xio_addr = XIO_ADDR(xio_addr);
- } else
- xio_port = pcibr_soft->bs_mxid;
-
- /*
- * If this DMA comes back to us,
- * return the PCI MEM address on
- * which it would land, or NULL
- * if the target is something
- * on bridge other than PCI MEM.
- */
- if (xio_port == pcibr_soft->bs_xid) {
- pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, xio_size);
- if (pci_addr == (alenaddr_t)NULL) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: pcibr_addr_xio_to_pci failed "
- "xio_addr=0x%x, xio_size=0x%x\n", xio_addr, xio_size));
- goto fail;
- }
- } else if (direct64) {
- ASSERT(xio_port != 0);
- pci_addr = pci_base | xio_addr
- | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
- } else {
- iopaddr_t offset = xio_addr - xio_base;
- iopaddr_t endoff = xio_size + offset;
-
- if ((xio_size > map_size) ||
- (xio_addr < xio_base) ||
- (xio_port != pcibr_soft->bs_dir_xport) ||
- (endoff > map_size)) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: xio_size > map_size fail\n"
- "xio_addr=0x%x, xio_size=0x%x. map_size=0x%x, "
- "xio_port=0x%x, endoff=0x%x\n",
- xio_addr, xio_size, map_size, xio_port, endoff));
- goto fail;
- }
-
- pci_addr = pci_base + (xio_addr - xio_base);
- }
-
- /* write the PCI DMA address
- * out to the scatter-gather list.
- */
- if (inplace) {
- if (ALENLIST_SUCCESS !=
- alenlist_replace(pciio_alenlist, NULL,
- &pci_addr, &xio_size, al_flags)) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: alenlist_replace failed\n"));
- goto fail;
- }
- } else {
- if (ALENLIST_SUCCESS !=
- alenlist_append(pciio_alenlist,
- pci_addr, xio_size, al_flags)) {
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: alenlist_append failed\n"));
- goto fail;
- }
- }
- }
-
- if (relbits) {
- if (direct64) {
- slotp->bss_d64_flags = flags;
- slotp->bss_d64_base = pci_base;
- } else {
- slotp->bss_d32_flags = flags;
- slotp->bss_d32_base = pci_base;
- }
- }
- if (!inplace)
- alenlist_done(xtalk_alenlist);
-
- /* Reset the internal cursor of the alenlist to be returned back
- * to the caller.
- */
- alenlist_cursor_init(pciio_alenlist, 0, NULL);
-
- PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
- "pcibr_dmatrans_list: pciio_alenlist=0x%x\n",
- pciio_alenlist));
-
- return pciio_alenlist;
-
- fail:
- if (relbits)
- pcibr_release_device(pcibr_soft, pciio_slot, relbits);
- if (pciio_alenlist && !inplace)
- alenlist_destroy(pciio_alenlist);
- return 0;
-}
-
void
pcibr_dmamap_drain(pcibr_dmamap_t map)
{
}
void
-pcibr_dmaaddr_drain(devfs_handle_t pconn_vhdl,
+pcibr_dmaaddr_drain(vertex_hdl_t pconn_vhdl,
paddr_t paddr,
size_t bytes)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
xtalk_dmaaddr_drain(xconn_vhdl, paddr, bytes);
}
void
-pcibr_dmalist_drain(devfs_handle_t pconn_vhdl,
+pcibr_dmalist_drain(vertex_hdl_t pconn_vhdl,
alenlist_t list)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
xtalk_dmalist_drain(xconn_vhdl, list);
}
*/
/*ARGSUSED */
void
-pcibr_provider_startup(devfs_handle_t pcibr)
+pcibr_provider_startup(vertex_hdl_t pcibr)
{
}
/*ARGSUSED */
void
-pcibr_provider_shutdown(devfs_handle_t pcibr)
+pcibr_provider_shutdown(vertex_hdl_t pcibr)
{
}
int
-pcibr_reset(devfs_handle_t conn)
+pcibr_reset(vertex_hdl_t conn)
{
#ifdef PIC_LATER
pciio_info_t pciio_info = pciio_info_get(conn);
}
pciio_endian_t
-pcibr_endian_set(devfs_handle_t pconn_vhdl,
+pcibr_endian_set(vertex_hdl_t pconn_vhdl,
pciio_endian_t device_end,
pciio_endian_t desired_end)
{
}
pciio_priority_t
-pcibr_priority_set(devfs_handle_t pconn_vhdl,
+pcibr_priority_set(vertex_hdl_t pconn_vhdl,
pciio_priority_t device_prio)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
* Returns 0 on failure, 1 on success
*/
int
-pcibr_device_flags_set(devfs_handle_t pconn_vhdl,
+pcibr_device_flags_set(vertex_hdl_t pconn_vhdl,
pcibr_device_flags_t flags)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
(pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
(pciio_dmamap_free_f *) pcibr_dmamap_free,
(pciio_dmamap_addr_f *) pcibr_dmamap_addr,
- (pciio_dmamap_list_f *) pcibr_dmamap_list,
(pciio_dmamap_done_f *) pcibr_dmamap_done,
(pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
- (pciio_dmatrans_list_f *) pcibr_dmatrans_list,
(pciio_dmamap_drain_f *) pcibr_dmamap_drain,
(pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
(pciio_dmalist_drain_f *) pcibr_dmalist_drain,
(pciio_priority_set_f *) pcibr_priority_set,
(pciio_config_get_f *) pcibr_config_get,
(pciio_config_set_f *) pcibr_config_set,
-#ifdef PIC_LATER
- (pciio_error_devenable_f *) pcibr_error_devenable,
- (pciio_error_extract_f *) pcibr_error_extract,
- (pciio_driver_reg_callback_f *) pcibr_driver_reg_callback,
- (pciio_driver_unreg_callback_f *) pcibr_driver_unreg_callback,
-#else
(pciio_error_devenable_f *) 0,
(pciio_error_extract_f *) 0,
(pciio_driver_reg_callback_f *) 0,
(pciio_driver_unreg_callback_f *) 0,
-#endif /* PIC_LATER */
(pciio_device_unregister_f *) pcibr_device_unregister,
(pciio_dma_enabled_f *) pcibr_dma_enabled,
};
int
-pcibr_dma_enabled(devfs_handle_t pconn_vhdl)
+pcibr_dma_enabled(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
* parameter 'format' is sent to the console.
*/
void
-pcibr_debug(uint32_t type, devfs_handle_t vhdl, char *format, ...)
+pcibr_debug(uint32_t type, vertex_hdl_t vhdl, char *format, ...)
{
char hwpath[MAXDEVNAME] = "\0";
char copy_of_hwpath[MAXDEVNAME];
short widget = -1;
short slot = -1;
va_list ap;
- char *strtok_r(char *string, const char *sepset, char **lasts);
if (pcibr_debug_mask & type) {
if (vhdl) {
char *cp;
if (strcmp(module, pcibr_debug_module)) {
- /* strtok_r() wipes out string, use a copy */
+ /* use a copy */
(void)strcpy(copy_of_hwpath, hwpath);
cp = strstr(copy_of_hwpath, "/module/");
if (cp) {
- char *last = NULL;
cp += strlen("/module");
- module = strtok_r(cp, "/", &last);
+ module = strsep(&cp, "/");
}
}
if (pcibr_debug_widget != -1) {
}
}
}
+
+int
+isIO9(nasid_t nasid) {
+ lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
+
+ while (brd) {
+ if (brd->brd_flags & LOCAL_MASTER_IO6) {
+ return 1;
+ }
+ brd = KLCF_NEXT(brd);
+ }
+ /* if it's dual ported, check the peer also */
+ nasid = NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->xbow_peer;
+ if (nasid < 0) return 0;
+ brd = (lboard_t *)KL_CONFIG_INFO(nasid);
+ while (brd) {
+ if (brd->brd_flags & LOCAL_MASTER_IO6) {
+ return 1;
+ }
+ brd = KLCF_NEXT(brd);
+ }
+ return 0;
+}
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
-#ifdef __ia64
-#define rmallocmap atemapalloc
-#define rmfreemap atemapfree
-#define rmfree atefree
-#define rmalloc atealloc
-#endif
-
extern int hubii_check_widget_disabled(nasid_t, int);
-#ifdef BRIDGE_B_DATACORR_WAR
-extern int ql_bridge_rev_b_war(devfs_handle_t);
-extern int bridge_rev_b_data_check_disable;
-char *rev_b_datacorr_warning =
-"***************************** WARNING! ******************************\n";
-char *rev_b_datacorr_mesg =
-"UNRECOVERABLE IO LINK ERROR. CONTACT SERVICE PROVIDER\n";
-#endif
+
/* =====================================================================
* ERROR HANDLING
BRIDGE_ISR_PCIBUS_PIOERR;
#endif
-#if defined (PCIBR_LLP_CONTROL_WAR)
-int pcibr_llp_control_war_cnt;
-#endif /* PCIBR_LLP_CONTROL_WAR */
+int pcibr_llp_control_war_cnt; /* PCIBR_LLP_CONTROL_WAR */
-/* FIXME: can these arrays be local ? */
-
-struct reg_values xio_cmd_pactyp[] =
+static struct reg_values xio_cmd_pactyp[] =
{
{0x0, "RdReq"},
{0x1, "RdResp"},
{0}
};
-struct reg_desc xio_cmd_bits[] =
+static struct reg_desc xio_cmd_bits[] =
{
{WIDGET_DIDN, -28, "DIDN", "%x"},
{WIDGET_SIDN, -24, "SIDN", "%x"},
#define F(s,n) { 1l<<(s),-(s), n }
-struct reg_desc bridge_int_status_desc[] =
-{
- F(45, "PCI_X_SPLIT_MES_PE"),/* PIC ONLY */
- F(44, "PCI_X_SPLIT_EMES"), /* PIC ONLY */
- F(43, "PCI_X_SPLIT_TO"), /* PIC ONLY */
- F(42, "PCI_X_UNEX_COMP"), /* PIC ONLY */
- F(41, "INT_RAM_PERR"), /* PIC ONLY */
- F(40, "PCI_X_ARB_ERR"), /* PIC ONLY */
- F(39, "PCI_X_REQ_TOUT"), /* PIC ONLY */
- F(38, "PCI_X_TABORT"), /* PIC ONLY */
- F(37, "PCI_X_PERR"), /* PIC ONLY */
- F(36, "PCI_X_SERR"), /* PIC ONLY */
- F(35, "PCI_X_MRETRY"), /* PIC ONLY */
- F(34, "PCI_X_MTOUT"), /* PIC ONLY */
- F(33, "PCI_X_DA_PARITY"), /* PIC ONLY */
- F(32, "PCI_X_AD_PARITY"), /* PIC ONLY */
- F(31, "MULTI_ERR"), /* BRIDGE ONLY */
- F(30, "PMU_ESIZE_EFAULT"),
- F(29, "UNEXPECTED_RESP"),
- F(28, "BAD_XRESP_PACKET"),
- F(27, "BAD_XREQ_PACKET"),
- F(26, "RESP_XTALK_ERROR"),
- F(25, "REQ_XTALK_ERROR"),
- F(24, "INVALID_ADDRESS"),
- F(23, "UNSUPPORTED_XOP"),
- F(22, "XREQ_FIFO_OFLOW"),
- F(21, "LLP_REC_SNERROR"),
- F(20, "LLP_REC_CBERROR"),
- F(19, "LLP_RCTY"),
- F(18, "LLP_TX_RETRY"),
- F(17, "LLP_TCTY"),
- F(16, "SSRAM_PERR"), /* BRIDGE ONLY */
- F(15, "PCI_ABORT"),
- F(14, "PCI_PARITY"),
- F(13, "PCI_SERR"),
- F(12, "PCI_PERR"),
- F(11, "PCI_MASTER_TOUT"),
- F(10, "PCI_RETRY_CNT"),
- F(9, "XREAD_REQ_TOUT"),
- F(8, "GIO_BENABLE_ERR"), /* BRIDGE ONLY */
- F(7, "INT7"),
- F(6, "INT6"),
- F(5, "INT5"),
- F(4, "INT4"),
- F(3, "INT3"),
- F(2, "INT2"),
- F(1, "INT1"),
- F(0, "INT0"),
- {0}
-};
-
-struct reg_values space_v[] =
+static struct reg_values space_v[] =
{
{PCIIO_SPACE_NONE, "none"},
{PCIIO_SPACE_ROM, "ROM"},
{PCIIO_SPACE_BAD, "BAD"},
{0}
};
-struct reg_desc space_desc[] =
+static struct reg_desc space_desc[] =
{
{0xFF, 0, "space", 0, space_v},
{0}
};
#define device_desc device_bits
-struct reg_desc device_bits[] =
+static struct reg_desc device_bits[] =
{
{BRIDGE_DEV_ERR_LOCK_EN, 0, "ERR_LOCK_EN"},
{BRIDGE_DEV_PAGE_CHK_DIS, 0, "PAGE_CHK_DIS"},
{0}
};
-void
+static void
print_bridge_errcmd(uint32_t cmdword, char *errtype)
{
printk("\t Bridge %s Error Command Word Register ", errtype);
print_register(cmdword, xio_cmd_bits);
}
-char *pcibr_isr_errs[] =
+static char *pcibr_isr_errs[] =
{
"", "", "", "", "", "", "", "",
"08: GIO non-contiguous byte enable in crosstalk packet", /* BRIDGE ONLY */
/*
* display memory directory state
*/
-void
+static void
pcibr_show_dir_state(paddr_t paddr, char *prefix)
{
#ifdef LATER
break;
case BRIDGE_ISR_PAGE_FAULT: /* bit30 PMU_PAGE_FAULT */
-/* case BRIDGE_ISR_PMU_ESIZE_FAULT: bit30 PMU_ESIZE_FAULT */
if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
reg_desc = "Map Fault Address";
else
printk( "\t%s\n", pcibr_isr_errs[i]);
}
}
-
-#if BRIDGE_ERROR_INTR_WAR
- if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) { /* known bridge bug */
- /*
- * Should never receive interrupts for these reasons on Rev 1 bridge
- * as they are not enabled. Assert for it.
- */
- ASSERT((int_status & (BRIDGE_IMR_PCI_MST_TIMEOUT |
- BRIDGE_ISR_RESP_XTLK_ERR |
- BRIDGE_ISR_LLP_TX_RETRY)) == 0);
- }
- if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_C) { /* known bridge bug */
- /*
- * This interrupt is turned off at init time. So, should never
- * see this interrupt.
- */
- ASSERT((int_status & BRIDGE_ISR_BAD_XRESP_PKT) == 0);
- }
-#endif
}
-#define PCIBR_ERRINTR_GROUP(error) \
- (( error & (BRIDGE_IRR_PCI_GRP|BRIDGE_IRR_GIO_GRP)
-
-uint32_t
+static uint32_t
pcibr_errintr_group(uint32_t error)
{
uint32_t group = BRIDGE_IRR_MULTI_CLR;
picreg_t int_status_64;
int number_bits;
int i;
-
- /* REFERENCED */
uint64_t disable_errintr_mask = 0;
-#ifdef EHE_ENABLE
- int rv;
- int error_code = IOECODE_DMA | IOECODE_READ;
- ioerror_mode_t mode = MODE_DEVERROR;
- ioerror_t ioe;
-#endif /* EHE_ENABLE */
nasid_t nasid;
pcibr_soft->bs_errinfo.bserr_toutcnt++;
/* Let's go recursive */
return(pcibr_error_intr_handler(irq, arg, ep));
-#ifdef LATER
- timeout(pcibr_error_intr_handler, pcibr_soft, BRIDGE_PIOERR_TIMEOUT);
-#endif
- return;
}
/* We read the INT_STATUS register as a 64bit picreg_t for PIC and a
pcibr_pioerr_check(pcibr_soft);
}
-#ifdef BRIDGE_B_DATACORR_WAR
- if ((pcibr_soft->bs_rev_num == BRIDGE_PART_REV_B) &&
- (err_status & BRIDGE_IMR_LLP_REC_CBERR)) {
- if (bridge_rev_b_data_check_disable)
- printk(KERN_WARNING "\n%s%s: %s%s\n", rev_b_datacorr_warning,
- pcibr_soft->bs_name, rev_b_datacorr_mesg,
- rev_b_datacorr_warning);
- else {
- ql_bridge_rev_b_war(pcibr_soft->bs_vhdl);
- PRINT_PANIC( "\n%s%s: %s%s\n", rev_b_datacorr_warning,
- pcibr_soft->bs_name, rev_b_datacorr_mesg,
- rev_b_datacorr_warning);
- }
-
- err_status &= ~BRIDGE_IMR_LLP_REC_CBERR;
- }
-#endif /* BRIDGE_B_DATACORR_WAR */
-
if (err_status) {
struct bs_errintr_stat_s *bs_estat = pcibr_soft->bs_errintr_stat;
(0x00402000 == (0x00F07F00 & bridge->b_wid_err_cmdword))) {
err_status &= ~BRIDGE_ISR_INVLD_ADDR;
}
-#if defined (PCIBR_LLP_CONTROL_WAR)
/*
- * The bridge bug, where the llp_config or control registers
+ * The bridge bug (PCIBR_LLP_CONTROL_WAR), where the llp_config or control registers
* need to be read back after being written, affects an MP
* system since there could be small windows between writing
* the register and reading it back on one cpu while another
if ((err_status & BRIDGE_ISR_INVLD_ADDR) &&
((((uint64_t) bridge->b_wid_err_upper << 32) | (bridge->b_wid_err_lower))
== (BRIDGE_INT_RST_STAT & 0xff0))) {
-#if 0
- if (kdebug)
- printk(KERN_NOTICE "%s bridge: ignoring llp/control address interrupt",
- pcibr_soft->bs_name);
-#endif
pcibr_llp_control_war_cnt++;
err_status &= ~BRIDGE_ISR_INVLD_ADDR;
}
-#endif /* PCIBR_LLP_CONTROL_WAR */
-
-#ifdef EHE_ENABLE
- /* Check if this is the RESP_XTALK_ERROR interrupt.
- * This can happen due to a failed DMA READ operation.
- */
- if (err_status & BRIDGE_ISR_RESP_XTLK_ERR) {
- /* Phase 1 : Look at the error state in the bridge and further
- * down in the device layers.
- */
- (void)error_state_set(pcibr_soft->bs_conn, ERROR_STATE_LOOKUP);
- IOERROR_SETVALUE(&ioe, widgetnum, pcibr_soft->bs_xid);
- (void)pcibr_error_handler((error_handler_arg_t)pcibr_soft,
- error_code,
- mode,
- &ioe);
- /* Phase 2 : Perform the action agreed upon in phase 1.
- */
- (void)error_state_set(pcibr_soft->bs_conn, ERROR_STATE_ACTION);
- rv = pcibr_error_handler((error_handler_arg_t)pcibr_soft,
- error_code,
- mode,
- &ioe);
- }
- if (rv != IOERROR_HANDLED) {
-#endif /* EHE_ENABLE */
bridge_errors_to_dump |= BRIDGE_ISR_PCIBUS_PIOERR;
*/
if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV867308, pcibr_soft) &&
(err_status & (BRIDGE_ISR_LLP_REC_SNERR | BRIDGE_ISR_LLP_REC_CBERR))) {
- printk("BRIDGE ERR_STATUS 0x%x\n", err_status);
+ printk("BRIDGE ERR_STATUS 0x%lx\n", err_status);
pcibr_error_dump(pcibr_soft);
-#ifdef LATER
- machine_error_dump("");
-#endif
PRINT_PANIC("PCI Bridge Error interrupt killed the system");
}
if (err_status & BRIDGE_ISR_ERROR_FATAL) {
-#ifdef LATER
- machine_error_dump("");
-#endif
PRINT_PANIC("PCI Bridge Error interrupt killed the system");
/*NOTREACHED */
}
-#ifdef EHE_ENABLE
- }
-#endif
/*
* We can't return without re-enabling the interrupt, since
pcibr_soft->bs_errinfo.bserr_intstat = 0;
}
-/*
- * pcibr_addr_toslot
- * Given the 'pciaddr' find out which slot this address is
- * allocated to, and return the slot number.
- * While we have the info handy, construct the
- * function number, space code and offset as well.
- *
- * NOTE: if this routine is called, we don't know whether
- * the address is in CFG, MEM, or I/O space. We have to guess.
- * This will be the case on PIO stores, where the only way
- * we have of getting the address is to check the Bridge, which
- * stores the PCI address but not the space and not the xtalk
- * address (from which we could get it).
- */
-int
-pcibr_addr_toslot(pcibr_soft_t pcibr_soft,
- iopaddr_t pciaddr,
- pciio_space_t *spacep,
- iopaddr_t *offsetp,
- pciio_function_t *funcp)
-{
- int s, f = 0, w;
- iopaddr_t base;
- size_t size;
- pciio_piospace_t piosp;
-
- /*
- * Check if the address is in config space
- */
-
- if ((pciaddr >= BRIDGE_CONFIG_BASE) && (pciaddr < BRIDGE_CONFIG_END)) {
-
- if (pciaddr >= BRIDGE_CONFIG1_BASE)
- pciaddr -= BRIDGE_CONFIG1_BASE;
- else
- pciaddr -= BRIDGE_CONFIG_BASE;
-
- s = pciaddr / BRIDGE_CONFIG_SLOT_SIZE;
- pciaddr %= BRIDGE_CONFIG_SLOT_SIZE;
-
- if (funcp) {
- f = pciaddr / 0x100;
- pciaddr %= 0x100;
- }
- if (spacep)
- *spacep = PCIIO_SPACE_CFG;
- if (offsetp)
- *offsetp = pciaddr;
- if (funcp)
- *funcp = f;
-
- return s;
- }
- for (s = pcibr_soft->bs_min_slot; s < PCIBR_NUM_SLOTS(pcibr_soft); ++s) {
- int nf = pcibr_soft->bs_slot[s].bss_ninfo;
- pcibr_info_h pcibr_infoh = pcibr_soft->bs_slot[s].bss_infos;
-
- for (f = 0; f < nf; f++) {
- pcibr_info_t pcibr_info = pcibr_infoh[f];
-
- if (!pcibr_info)
- continue;
- for (w = 0; w < 6; w++) {
- if (pcibr_info->f_window[w].w_space == PCIIO_SPACE_NONE) {
- continue;
- }
- base = pcibr_info->f_window[w].w_base;
- size = pcibr_info->f_window[w].w_size;
-
- if ((pciaddr >= base) && (pciaddr < (base + size))) {
- if (spacep)
- *spacep = PCIIO_SPACE_WIN(w);
- if (offsetp)
- *offsetp = pciaddr - base;
- if (funcp)
- *funcp = f;
- return s;
- } /* endif match */
- } /* next window */
- } /* next func */
- } /* next slot */
-
- /*
- * Check if the address was allocated as part of the
- * pcibr_piospace_alloc calls.
- */
- for (s = pcibr_soft->bs_min_slot; s < PCIBR_NUM_SLOTS(pcibr_soft); ++s) {
- int nf = pcibr_soft->bs_slot[s].bss_ninfo;
- pcibr_info_h pcibr_infoh = pcibr_soft->bs_slot[s].bss_infos;
-
- for (f = 0; f < nf; f++) {
- pcibr_info_t pcibr_info = pcibr_infoh[f];
-
- if (!pcibr_info)
- continue;
- piosp = pcibr_info->f_piospace;
- while (piosp) {
- if ((piosp->start <= pciaddr) &&
- ((piosp->count + piosp->start) > pciaddr)) {
- if (spacep)
- *spacep = piosp->space;
- if (offsetp)
- *offsetp = pciaddr - piosp->start;
- return s;
- } /* endif match */
- piosp = piosp->next;
- } /* next piosp */
- } /* next func */
- } /* next slot */
-
- /*
- * Some other random address on the PCI bus ...
- * we have no way of knowing whether this was
- * a MEM or I/O access; so, for now, we just
- * assume that the low 1G is MEM, the next
- * 3G is I/O, and anything above the 4G limit
- * is obviously MEM.
- */
-
- if (spacep)
- *spacep = ((pciaddr < (1ul << 30)) ? PCIIO_SPACE_MEM :
- (pciaddr < (4ul << 30)) ? PCIIO_SPACE_IO :
- PCIIO_SPACE_MEM);
- if (offsetp)
- *offsetp = pciaddr;
-
- return PCIIO_SLOT_NONE;
-
-}
-
void
pcibr_error_cleanup(pcibr_soft_t pcibr_soft, int error_code)
{
(void) bridge->b_wid_tflush; /* flushbus */
}
-/*
- * pcibr_error_extract
- * Given the 'pcibr vertex handle' find out which slot
- * the bridge status error address (from pcibr_soft info
- * hanging off the vertex)
- * allocated to, and return the slot number.
- * While we have the info handy, construct the
- * space code and offset as well.
- *
- * NOTE: if this routine is called, we don't know whether
- * the address is in CFG, MEM, or I/O space. We have to guess.
- * This will be the case on PIO stores, where the only way
- * we have of getting the address is to check the Bridge, which
- * stores the PCI address but not the space and not the xtalk
- * address (from which we could get it).
- *
- * XXX- this interface has no way to return the function
- * number on a multifunction card, even though that data
- * is available.
- */
-
-pciio_slot_t
-pcibr_error_extract(devfs_handle_t pcibr_vhdl,
- pciio_space_t *spacep,
- iopaddr_t *offsetp)
-{
- pcibr_soft_t pcibr_soft = 0;
- iopaddr_t bserr_addr;
- bridge_t *bridge;
- pciio_slot_t slot = PCIIO_SLOT_NONE;
- arbitrary_info_t rev;
-
- /* Do a sanity check as to whether we really got a
- * bridge vertex handle.
- */
- if (hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &rev) !=
- GRAPH_SUCCESS)
- return(slot);
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- if (pcibr_soft) {
- bridge = pcibr_soft->bs_base;
- bserr_addr =
- bridge->b_pci_err_lower |
- ((uint64_t) (bridge->b_pci_err_upper &
- BRIDGE_ERRUPPR_ADDRMASK) << 32);
-
- slot = pcibr_addr_toslot(pcibr_soft, bserr_addr,
- spacep, offsetp, NULL);
- }
- return slot;
-}
-
/*ARGSUSED */
void
pcibr_device_disable(pcibr_soft_t pcibr_soft, int devnum)
{
int retval = IOERROR_HANDLED;
- devfs_handle_t pcibr_vhdl = pcibr_soft->bs_vhdl;
+ vertex_hdl_t pcibr_vhdl = pcibr_soft->bs_vhdl;
bridge_t *bridge = pcibr_soft->bs_base;
iopaddr_t bad_xaddr;
ioerror_mode_t mode,
ioerror_t *ioe)
{
- devfs_handle_t pcibr_vhdl = pcibr_soft->bs_vhdl;
+ vertex_hdl_t pcibr_vhdl = pcibr_soft->bs_vhdl;
bridge_t *bridge = pcibr_soft->bs_base;
bridgereg_t bus_lowaddr, bus_uppraddr;
int retval = 0;
ioerror_mode_t mode,
ioerror_t *ioe)
{
- devfs_handle_t pcibr_vhdl = pcibr_soft->bs_vhdl;
+ vertex_hdl_t pcibr_vhdl = pcibr_soft->bs_vhdl;
int retval;
retval = pciio_error_handler(pcibr_vhdl, error_code, mode, ioe);
pcibr_soft_t pcibr_soft;
int retval = IOERROR_BADERRORCODE;
-#ifdef EHE_ENABLE
- devfs_handle_t xconn_vhdl,pcibr_vhdl;
- error_state_t e_state;
-#endif /* EHE_ENABLE */
-
pcibr_soft = (pcibr_soft_t) einfo;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ERROR_HDLR, pcibr_soft->bs_conn,
"pcibr_error_handler: pcibr_soft=0x%x, error_code=0x%x\n",
pcibr_soft, error_code));
-#ifdef EHE_ENABLE
- xconn_vhdl = pcibr_soft->bs_conn;
- pcibr_vhdl = pcibr_soft->bs_vhdl;
-
- e_state = error_state_get(xconn_vhdl);
-
- if (error_state_set(pcibr_vhdl, e_state) ==
- ERROR_RETURN_CODE_CANNOT_SET_STATE)
- return(IOERROR_UNHANDLED);
-
- /* If we are in the action handling phase clean out the error state
- * on the xswitch.
- */
- if (e_state == ERROR_STATE_ACTION)
- (void)error_state_set(xconn_vhdl, ERROR_STATE_NONE);
-#endif /* EHE_ENABLE */
-
#if DEBUG && ERROR_DEBUG
printk( "%s: pcibr_error_handler\n", pcibr_soft->bs_name);
#endif
* the error from the PIO address.
*/
-#if 0
- if (mode == MODE_DEVPROBE)
- pio_retval = IOERROR_HANDLED;
- else {
-#endif
if (error_code & IOECODE_PIO) {
iopaddr_t bad_xaddr;
/*
pio_retval = IOERROR_UNHANDLED;
}
}
-#if 0
- } /* MODE_DEVPROBE */
-#endif
/*
* If the error was a result of a DMA Write, we tell what bus on the PIC
return IOERROR_HANDLED;
}
}
-
-
-/*
- * Reenable a device after handling the error.
- * This is called by the lower layers when they wish to be reenabled
- * after an error.
- * Note that each layer would be calling the previous layer to reenable
- * first, before going ahead with their own re-enabling.
- */
-
-int
-pcibr_error_devenable(devfs_handle_t pconn_vhdl, int error_code)
-{
- pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
- pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
- pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
-
- ASSERT(error_code & IOECODE_PIO);
-
- /* If the error is not known to be a write,
- * we have to call devenable.
- * write errors are isolated to the bridge.
- */
- if (!(error_code & IOECODE_WRITE)) {
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
- int rc;
-
- rc = xtalk_error_devenable(xconn_vhdl, pciio_slot, error_code);
- if (rc != IOERROR_HANDLED)
- return rc;
- }
- pcibr_error_cleanup(pcibr_soft, error_code);
- return IOERROR_HANDLED;
-}
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
-pcibr_hints_t pcibr_hints_get(devfs_handle_t, int);
-void pcibr_hints_fix_rrbs(devfs_handle_t);
-void pcibr_hints_dualslot(devfs_handle_t, pciio_slot_t, pciio_slot_t);
-void pcibr_hints_intr_bits(devfs_handle_t, pcibr_intr_bits_f *);
-void pcibr_set_rrb_callback(devfs_handle_t, rrb_alloc_funct_t);
-void pcibr_hints_handsoff(devfs_handle_t);
-void pcibr_hints_subdevs(devfs_handle_t, pciio_slot_t, uint64_t);
+pcibr_hints_t pcibr_hints_get(vertex_hdl_t, int);
+void pcibr_hints_fix_rrbs(vertex_hdl_t);
+void pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
+void pcibr_hints_intr_bits(vertex_hdl_t, pcibr_intr_bits_f *);
+void pcibr_set_rrb_callback(vertex_hdl_t, rrb_alloc_funct_t);
+void pcibr_hints_handsoff(vertex_hdl_t);
+void pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, uint64_t);
pcibr_hints_t
-pcibr_hints_get(devfs_handle_t xconn_vhdl, int alloc)
+pcibr_hints_get(vertex_hdl_t xconn_vhdl, int alloc)
{
arbitrary_info_t ainfo = 0;
graph_error_t rv;
}
void
-pcibr_hints_fix_some_rrbs(devfs_handle_t xconn_vhdl, unsigned mask)
+pcibr_hints_fix_some_rrbs(vertex_hdl_t xconn_vhdl, unsigned mask)
{
pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
}
void
-pcibr_hints_fix_rrbs(devfs_handle_t xconn_vhdl)
+pcibr_hints_fix_rrbs(vertex_hdl_t xconn_vhdl)
{
pcibr_hints_fix_some_rrbs(xconn_vhdl, 0xFF);
}
void
-pcibr_hints_dualslot(devfs_handle_t xconn_vhdl,
+pcibr_hints_dualslot(vertex_hdl_t xconn_vhdl,
pciio_slot_t host,
pciio_slot_t guest)
{
}
void
-pcibr_hints_intr_bits(devfs_handle_t xconn_vhdl,
+pcibr_hints_intr_bits(vertex_hdl_t xconn_vhdl,
pcibr_intr_bits_f *xxx_intr_bits)
{
pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
}
void
-pcibr_set_rrb_callback(devfs_handle_t xconn_vhdl, rrb_alloc_funct_t rrb_alloc_funct)
+pcibr_set_rrb_callback(vertex_hdl_t xconn_vhdl, rrb_alloc_funct_t rrb_alloc_funct)
{
pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
}
void
-pcibr_hints_handsoff(devfs_handle_t xconn_vhdl)
+pcibr_hints_handsoff(vertex_hdl_t xconn_vhdl)
{
pcibr_hints_t hint = pcibr_hints_get(xconn_vhdl, 1);
}
void
-pcibr_hints_subdevs(devfs_handle_t xconn_vhdl,
+pcibr_hints_subdevs(vertex_hdl_t xconn_vhdl,
pciio_slot_t slot,
uint64_t subdevs)
{
arbitrary_info_t ainfo = 0;
char sdname[16];
- devfs_handle_t pconn_vhdl = GRAPH_VERTEX_NONE;
+ vertex_hdl_t pconn_vhdl = GRAPH_VERTEX_NONE;
sprintf(sdname, "%s/%d", EDGE_LBL_PCI, slot);
(void) hwgraph_path_add(xconn_vhdl, sdname, &pconn_vhdl);
+++ /dev/null
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/pci/pcibr.h>
-#include <asm/sn/pci/pcibr_private.h>
-#include <asm/sn/pci/pci_defs.h>
-#include <asm/sn/prio.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_private.h>
-
-#ifdef LATER
-
-char *pci_space[] = {"NONE",
- "ROM",
- "IO",
- "",
- "MEM",
- "MEM32",
- "MEM64",
- "CFG",
- "WIN0",
- "WIN1",
- "WIN2",
- "WIN3",
- "WIN4",
- "WIN5",
- "",
- "BAD"};
-
-void
-idbg_pss_func(pcibr_info_h pcibr_infoh, int func)
-{
- pcibr_info_t pcibr_info = pcibr_infoh[func];
- char name[MAXDEVNAME];
- int win;
-
- if (!pcibr_info)
- return;
- qprintf("Per-slot Function Info\n");
- sprintf(name, "%v", pcibr_info->f_vertex);
- qprintf("\tSlot Name : %s\n",name);
- qprintf("\tPCI Bus : %d ",pcibr_info->f_bus);
- qprintf("Slot : %d ", pcibr_info->f_slot);
- qprintf("Function : %d ", pcibr_info->f_func);
- qprintf("VendorId : 0x%x " , pcibr_info->f_vendor);
- qprintf("DeviceId : 0x%x\n", pcibr_info->f_device);
- sprintf(name, "%v", pcibr_info->f_master);
- qprintf("\tBus provider : %s\n",name);
- qprintf("\tProvider Fns : 0x%x ", pcibr_info->f_pops);
- qprintf("Error Handler : 0x%x Arg 0x%x\n",
- pcibr_info->f_efunc,pcibr_info->f_einfo);
- for(win = 0 ; win < 6 ; win++)
- qprintf("\tBase Reg #%d space %s base 0x%x size 0x%x\n",
- win,pci_space[pcibr_info->f_window[win].w_space],
- pcibr_info->f_window[win].w_base,
- pcibr_info->f_window[win].w_size);
-
- qprintf("\tRom base 0x%x size 0x%x\n",
- pcibr_info->f_rbase,pcibr_info->f_rsize);
-
- qprintf("\tInterrupt Bit Map\n");
- qprintf("\t\tPCI Int#\tBridge Pin#\n");
- for (win = 0 ; win < 4; win++)
- qprintf("\t\tINT%c\t\t%d\n",win+'A',pcibr_info->f_ibit[win]);
- qprintf("\n");
-}
-
-
-void
-idbg_pss_info(pcibr_soft_t pcibr_soft, pciio_slot_t slot)
-{
- pcibr_soft_slot_t pss;
- char slot_conn_name[MAXDEVNAME];
- int func;
-
- pss = &pcibr_soft->bs_slot[slot];
- qprintf("PCI INFRASTRUCTURAL INFO FOR SLOT %d\n", slot);
- qprintf("\tHost Present ? %s ", pss->has_host ? "yes" : "no");
- qprintf("\tHost Slot : %d\n",pss->host_slot);
- sprintf(slot_conn_name, "%v", pss->slot_conn);
- qprintf("\tSlot Conn : %s\n",slot_conn_name);
- qprintf("\t#Functions : %d\n",pss->bss_ninfo);
- for (func = 0; func < pss->bss_ninfo; func++)
- idbg_pss_func(pss->bss_infos,func);
- qprintf("\tSpace : %s ",pci_space[pss->bss_devio.bssd_space]);
- qprintf("\tBase : 0x%x ", pss->bss_devio.bssd_base);
- qprintf("\tShadow Devreg : 0x%x\n", pss->bss_device);
- qprintf("\tUsage counts : pmu %d d32 %d d64 %d\n",
- pss->bss_pmu_uctr,pss->bss_d32_uctr,pss->bss_d64_uctr);
-
- qprintf("\tDirect Trans Info : d64_base 0x%x d64_flags 0x%x"
- "d32_base 0x%x d32_flags 0x%x\n",
- pss->bss_d64_base, pss->bss_d64_flags,
- pss->bss_d32_base, pss->bss_d32_flags);
-
- qprintf("\tExt ATEs active ? %s",
- pss->bss_ext_ates_active ? "yes" : "no");
- qprintf(" Command register : 0x%x ", pss->bss_cmd_pointer);
- qprintf(" Shadow command val : 0x%x\n", pss->bss_cmd_shadow);
-
- qprintf("\tRRB Info : Valid %d+%d Reserved %d\n",
- pcibr_soft->bs_rrb_valid[slot],
- pcibr_soft->bs_rrb_valid[slot + PCIBR_RRB_SLOT_VIRTUAL],
- pcibr_soft->bs_rrb_res[slot]);
-
-}
-
-int ips = 0;
-
-void
-idbg_pss(pcibr_soft_t pcibr_soft)
-{
- pciio_slot_t slot;
-
-
- if (ips >= 0 && ips < 8)
- idbg_pss_info(pcibr_soft,ips);
- else if (ips < 0)
- for (slot = 0; slot < 8; slot++)
- idbg_pss_info(pcibr_soft,slot);
- else
- qprintf("Invalid ips %d\n",ips);
-}
-#endif /* LATER */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
#define rmfreemap atemapfree
#define rmfree atefree
#define rmalloc atealloc
+
+inline int
+compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr)
+{
+ FIXME("compare_and_swap_ptr : NOT ATOMIC");
+ if (*location == old_ptr) {
+ *location = new_ptr;
+ return(1);
+ }
+ else
+ return(0);
+}
#endif
unsigned pcibr_intr_bits(pciio_info_t info, pciio_intr_line_t lines, int nslots);
-pcibr_intr_t pcibr_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
+pcibr_intr_t pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
void pcibr_intr_free(pcibr_intr_t);
void pcibr_setpciint(xtalk_intr_t);
int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
void pcibr_intr_disconnect(pcibr_intr_t);
-devfs_handle_t pcibr_intr_cpu_get(pcibr_intr_t);
+vertex_hdl_t pcibr_intr_cpu_get(pcibr_intr_t);
void pcibr_xintr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
void pcibr_intr_func(intr_arg_t);
-extern pcibr_info_t pcibr_info_get(devfs_handle_t);
+extern pcibr_info_t pcibr_info_get(vertex_hdl_t);
/* =====================================================================
* INTERRUPT MANAGEMENT
}
/*
+ * On SN systems there is a race condition between a PIO read response
+ * and DMA's. In rare cases, the read response may beat the DMA, causing
+ * the driver to think that data in memory is complete and meaningful.
+ * This code eliminates that race.
+ * This routine is called by the PIO read routines after doing the read.
+ * This routine then forces a fake interrupt on another line, which
+ * is logically associated with the slot that the PIO is addressed to.
+ * (see sn_dma_flush_init() )
+ * It then spins while watching the memory location that the interrupt
+ * is targetted to. When the interrupt response arrives, we are sure
+ * that the DMA has landed in memory and it is safe for the driver
+ * to proceed.
+ */
+
+extern struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
+
+void
+sn_dma_flush(unsigned long addr) {
+ nasid_t nasid;
+ int wid_num;
+ volatile struct sn_flush_device_list *p;
+ int i,j;
+ int bwin;
+ unsigned long flags;
+
+ nasid = NASID_GET(addr);
+ wid_num = SWIN_WIDGETNUM(addr);
+ bwin = BWIN_WINDOWNUM(addr);
+
+ if (flush_nasid_list[nasid].widget_p == NULL) return;
+ if (bwin > 0) {
+ bwin--;
+ switch (bwin) {
+ case 0:
+ wid_num = ((flush_nasid_list[nasid].iio_itte1) >> 8) & 0xf;
+ break;
+ case 1:
+ wid_num = ((flush_nasid_list[nasid].iio_itte2) >> 8) & 0xf;
+ break;
+ case 2:
+ wid_num = ((flush_nasid_list[nasid].iio_itte3) >> 8) & 0xf;
+ break;
+ case 3:
+ wid_num = ((flush_nasid_list[nasid].iio_itte4) >> 8) & 0xf;
+ break;
+ case 4:
+ wid_num = ((flush_nasid_list[nasid].iio_itte5) >> 8) & 0xf;
+ break;
+ case 5:
+ wid_num = ((flush_nasid_list[nasid].iio_itte6) >> 8) & 0xf;
+ break;
+ case 6:
+ wid_num = ((flush_nasid_list[nasid].iio_itte7) >> 8) & 0xf;
+ break;
+ }
+ }
+ if (flush_nasid_list[nasid].widget_p == NULL) return;
+ if (flush_nasid_list[nasid].widget_p[wid_num] == NULL) return;
+ p = &flush_nasid_list[nasid].widget_p[wid_num][0];
+
+ // find a matching BAR
+
+ for (i=0; i<DEV_PER_WIDGET;i++) {
+ for (j=0; j<PCI_ROM_RESOURCE;j++) {
+ if (p->bar_list[j].start == 0) break;
+ if (addr >= p->bar_list[j].start && addr <= p->bar_list[j].end) break;
+ }
+ if (j < PCI_ROM_RESOURCE && p->bar_list[j].start != 0) break;
+ p++;
+ }
+
+ // if no matching BAR, return without doing anything.
+
+ if (i == DEV_PER_WIDGET) return;
+
+ spin_lock_irqsave(&p->flush_lock, flags);
+
+ p->flush_addr = 0;
+
+ // force an interrupt.
+
+ *(bridgereg_t *)(p->force_int_addr) = 1;
+
+ // wait for the interrupt to come back.
+
+ while (p->flush_addr != 0x10f);
+
+ // okay, everything is synched up.
+ spin_unlock_irqrestore(&p->flush_lock, flags);
+
+ return;
+}
+
+EXPORT_SYMBOL(sn_dma_flush);
+
+/*
* There are end cases where a deadlock can occur if interrupt
* processing completes and the Bridge b_int_status bit is still set.
*
* to check if a specific Bridge b_int_status bit is set, and if so,
* cause the setting of the corresponding interrupt bit.
*
- * On a XBridge (SN1), we do this by writing the appropriate Bridge Force
- * Interrupt register. On SN0, or SN1 with an older Bridge, the Bridge
- * Force Interrupt register does not exist, so we write the Hub
- * INT_PEND_MOD register directly. Likewise for Octane, where we write the
- * Heart Set Interrupt Status register directly.
+ * On a XBridge (SN1) and PIC (SN2), we do this by writing the appropriate Bridge Force
+ * Interrupt register.
*/
void
-pcibr_force_interrupt(pcibr_intr_wrap_t wrap)
+pcibr_force_interrupt(pcibr_intr_t intr)
{
-#ifdef PIC_LATER
unsigned bit;
- pcibr_soft_t pcibr_soft = wrap->iw_soft;
+ unsigned bits;
+ pcibr_soft_t pcibr_soft = intr->bi_soft;
bridge_t *bridge = pcibr_soft->bs_base;
- bit = wrap->iw_ibit;
+ bits = intr->bi_ibits;
+ for (bit = 0; bit < 8; bit++) {
+ if (bits & (1 << bit)) {
- PCIBR_DEBUG((PCIBR_DEBUG_INTR, pcibr_soft->bs_vhdl,
- "pcibr_force_interrupt: bit=0x%x\n", bit));
+ PCIBR_DEBUG((PCIBR_DEBUG_INTR, pcibr_soft->bs_vhdl,
+ "pcibr_force_interrupt: bit=0x%x\n", bit));
- if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
- bridge->b_force_pin[bit].intr = 1;
- } else if ((1 << bit) & *wrap->iw_stat) {
- cpuid_t cpu;
- unsigned intr_bit;
- xtalk_intr_t xtalk_intr =
- pcibr_soft->bs_intr[bit].bsi_xtalk_intr;
-
- intr_bit = (short) xtalk_intr_vector_get(xtalk_intr);
- cpu = xtalk_intr_cpuid_get(xtalk_intr);
- REMOTE_CPU_SEND_INTR(cpu, intr_bit);
+ if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
+ bridge->b_force_pin[bit].intr = 1;
+ }
+ }
}
-#endif /* PIC_LATER */
}
/*ARGSUSED */
pcibr_intr_t
-pcibr_intr_alloc(devfs_handle_t pconn_vhdl,
+pcibr_intr_alloc(vertex_hdl_t pconn_vhdl,
device_desc_t dev_desc,
pciio_intr_line_t lines,
- devfs_handle_t owner_dev)
+ vertex_hdl_t owner_dev)
{
pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
- devfs_handle_t xconn_vhdl = pcibr_soft->bs_conn;
+ vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
bridge_t *bridge = pcibr_soft->bs_base;
int is_threaded = 0;
{
iopaddr_t addr;
xtalk_intr_vector_t vect;
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
bridge_t *bridge;
+ picreg_t *int_addr;
addr = xtalk_intr_addr_get(xtalk_intr);
vect = xtalk_intr_vector_get(xtalk_intr);
vhdl = xtalk_intr_dev_get(xtalk_intr);
bridge = (bridge_t *)xtalk_piotrans_addr(vhdl, 0, 0, sizeof(bridge_t), 0);
- if (is_pic(bridge)) {
- picreg_t *int_addr;
- int_addr = (picreg_t *)xtalk_intr_sfarg_get(xtalk_intr);
- *int_addr = ((PIC_INT_ADDR_FLD & ((uint64_t)vect << 48)) |
+ int_addr = (picreg_t *)xtalk_intr_sfarg_get(xtalk_intr);
+ *int_addr = ((PIC_INT_ADDR_FLD & ((uint64_t)vect << 48)) |
(PIC_INT_ADDR_HOST & addr));
- } else {
- bridgereg_t *int_addr;
- int_addr = (bridgereg_t *)xtalk_intr_sfarg_get(xtalk_intr);
- *int_addr = ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
- (BRIDGE_INT_ADDR_FLD & vect));
- }
}
/*ARGSUSED */
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
"pcibr_setpciint: int_addr=0x%x, *int_addr=0x%x, "
"pcibr_int_bit=0x%x\n", int_addr,
- (is_pic(bridge) ?
- *(picreg_t *)int_addr : *(bridgereg_t *)int_addr),
+ *(picreg_t *)int_addr,
pcibr_int_bit));
}
xtalk_intr_connect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr,
pcibr_intr_func, (intr_arg_t) intr_wrap,
(xtalk_intr_setfunc_t)pcibr_setpciint,
- (void *)pcibr_int_bit);
+ (void *)(long)pcibr_int_bit);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
"pcibr_intr_disconnect: now-sharing int_bits=0x%x\n",
pcibr_int_bit));
}
/*ARGSUSED */
-devfs_handle_t
+vertex_hdl_t
pcibr_intr_cpu_get(pcibr_intr_t pcibr_intr)
{
pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
bridge->b_wid_int_lower = NEW_b_wid_int_lower;
bridge->b_int_host_err = vect;
-printk("pcibr_setwidint: b_wid_int_upper 0x%x b_wid_int_lower 0x%x b_int_host_err 0x%x\n",
- NEW_b_wid_int_upper, NEW_b_wid_int_lower, vect);
-
}
/*
* interrupt to avoid a potential deadlock situation.
*/
if (wrap->iw_hdlrcnt == 0) {
- pcibr_force_interrupt(wrap);
+ pcibr_force_interrupt((pcibr_intr_t) wrap);
}
}
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
-int pcibr_wrb_flush(devfs_handle_t);
-int pcibr_rrb_alloc(devfs_handle_t, int *, int *);
-int pcibr_rrb_check(devfs_handle_t, int *, int *, int *, int *);
-void pcibr_rrb_flush(devfs_handle_t);
-int pcibr_slot_initial_rrb_alloc(devfs_handle_t,pciio_slot_t);
+int pcibr_wrb_flush(vertex_hdl_t);
+int pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
+int pcibr_rrb_check(vertex_hdl_t, int *, int *, int *, int *);
+void pcibr_rrb_flush(vertex_hdl_t);
+int pcibr_slot_initial_rrb_alloc(vertex_hdl_t,pciio_slot_t);
void pcibr_rrb_debug(char *, pcibr_soft_t);
#define RRB_SIZE (4) /* sizeof rrb within reg (bits) */
#define RRB_ENABLE_BIT(bridge) (0x8) /* [BRIDGE | PIC]_RRB_EN */
-#define NUM_PDEV_BITS(bridge) (is_pic((bridge)) ? 1 : 2)
-#define NUM_VDEV_BITS(bridge) (is_pic((bridge)) ? 2 : 1)
-#define NUMBER_VCHANNELS(bridge) (is_pic((bridge)) ? 4 : 2)
+#define NUM_PDEV_BITS(bridge) (1)
+#define NUM_VDEV_BITS(bridge) (2)
+#define NUMBER_VCHANNELS(bridge) (4)
#define SLOT_2_PDEV(bridge, slot) ((slot) >> 1)
#define SLOT_2_RRB_REG(bridge, slot) ((slot) & 0x1)
/* validate that the slot and virtual channel are valid for a given bridge */
#define VALIDATE_SLOT_n_VCHAN(bridge, s, v) \
- (is_pic((bridge)) ? \
- (((((s) != PCIIO_SLOT_NONE) && ((s) <= (pciio_slot_t)3)) && (((v) >= 0) && ((v) <= 3))) ? 1 : 0) : \
- (((((s) != PCIIO_SLOT_NONE) && ((s) <= (pciio_slot_t)7)) && (((v) >= 0) && ((v) <= 1))) ? 1 : 0))
+ (((((s) != PCIIO_SLOT_NONE) && ((s) <= (pciio_slot_t)3)) && (((v) >= 0) && ((v) <= 3))) ? 1 : 0)
/*
* Count how many RRBs are marked valid for the specified PCI slot
pdev_bits = SLOT_2_PDEV(bridge, slot);
rrb_bits = enable_bit | vchan_bits | pdev_bits;
- if ( is_pic(bridge) ) {
- tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- tmp = BRIDGE_REG_GET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg));
- } else {
- tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- }
+ tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
for (rrb_index = 0; rrb_index < 8; rrb_index++) {
if ((tmp & RRB_MASK) == rrb_bits)
enable_bit = RRB_ENABLE_BIT(bridge);
- if ( is_pic(bridge) ) {
- tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- tmp = BRIDGE_REG_GET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg));
- } else {
- tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- }
+ tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
for (rrb_index = 0; rrb_index < 8; rrb_index++) {
if ((tmp & enable_bit) != enable_bit)
pdev_bits = SLOT_2_PDEV(bridge, slot);
rrb_bits = enable_bit | vchan_bits | pdev_bits;
- if ( is_pic(bridge) ) {
- reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- reg = tmp = BRIDGE_REG_GET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg));
- } else {
- reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- }
-
+ reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
+
for (rrb_index = 0; ((rrb_index < 8) && (more > 0)); rrb_index++) {
if ((tmp & enable_bit) != enable_bit) {
/* clear the rrb and OR in the new rrb into 'reg' */
tmp = (tmp >> RRB_SIZE);
}
- if ( is_pic(bridge) ) {
- bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg)) = reg;
- } else {
- bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
- }
- }
+ bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
return (more ? -1 : 0);
}
pdev_bits = SLOT_2_PDEV(bridge, slot);
rrb_bits = enable_bit | vchan_bits | pdev_bits;
- if ( is_pic(bridge) ) {
- reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- reg = BRIDGE_REG_GET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg));
- } else {
- reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
- }
- }
-
+ reg = tmp = bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg;
+
for (rrb_index = 0; ((rrb_index < 8) && (less > 0)); rrb_index++) {
if ((tmp & RRB_MASK) == rrb_bits) {
/*
tmp = (tmp >> RRB_SIZE);
}
- if ( is_pic(bridge) ) {
- bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg)) = reg;
- } else {
- bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
- }
- }
+ bridge->b_rrb_map[SLOT_2_RRB_REG(bridge, slot)].reg = reg;
/* call do_pcibr_rrb_clear() for all the rrbs we've freed */
for (rrb_index = 0; rrb_index < 8; rrb_index++) {
* this RRB must be disabled.
*/
- if ( is_pic(bridge) ) {
- /* wait until RRB has no outstanduing XIO packets. */
- while ((status = bridge->b_resp_status) & BRIDGE_RRB_INUSE(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
+ /* wait until RRB has no outstanduing XIO packets. */
+ while ((status = bridge->b_resp_status) & BRIDGE_RRB_INUSE(rrb)) {
+ ; /* XXX- beats on bridge. bad idea? */
+ }
- /* if the RRB has data, drain it. */
- if (status & BRIDGE_RRB_VALID(rrb)) {
- bridge->b_resp_clear = BRIDGE_RRB_CLEAR(rrb);
+ /* if the RRB has data, drain it. */
+ if (status & BRIDGE_RRB_VALID(rrb)) {
+ bridge->b_resp_clear = BRIDGE_RRB_CLEAR(rrb);
- /* wait until RRB is no longer valid. */
- while ((status = bridge->b_resp_status) & BRIDGE_RRB_VALID(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
- }
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- while ((status = BRIDGE_REG_GET32((&bridge->b_resp_status))) & BRIDGE_RRB_INUSE(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
-
- /* if the RRB has data, drain it. */
- if (status & BRIDGE_RRB_VALID(rrb)) {
- BRIDGE_REG_SET32((&bridge->b_resp_clear)) = __swab32(BRIDGE_RRB_CLEAR(rrb));
-
- /* wait until RRB is no longer valid. */
- while ((status = BRIDGE_REG_GET32((&bridge->b_resp_status))) & BRIDGE_RRB_VALID(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
- }
- } else { /* io_get_sh_swapper(NASID_GET(bridge)) */
- while ((status = bridge->b_resp_status) & BRIDGE_RRB_INUSE(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
-
- /* if the RRB has data, drain it. */
- if (status & BRIDGE_RRB_VALID(rrb)) {
- bridge->b_resp_clear = BRIDGE_RRB_CLEAR(rrb);
- /* wait until RRB is no longer valid. */
- while ((status = bridge->b_resp_status) & BRIDGE_RRB_VALID(rrb)) {
- ; /* XXX- beats on bridge. bad idea? */
- }
- }
+ /* wait until RRB is no longer valid. */
+ while ((status = bridge->b_resp_status) & BRIDGE_RRB_VALID(rrb)) {
+ ; /* XXX- beats on bridge. bad idea? */
}
}
}
int shft = (RRB_SIZE * (rrbn >> 1));
unsigned long ebit = RRB_ENABLE_BIT(bridge) << shft;
- if ( is_pic(bridge) ) {
- rrbv = *rrbp;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- rrbv = BRIDGE_REG_GET32((&rrbp));
- } else {
- rrbv = *rrbp;
- }
- }
+ rrbv = *rrbp;
if (rrbv & ebit) {
- if ( is_pic(bridge) ) {
- *rrbp = rrbv & ~ebit;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&rrbp)) = __swab32((rrbv & ~ebit));
- } else {
- *rrbp = rrbv & ~ebit;
- }
- }
+ *rrbp = rrbv & ~ebit;
}
do_pcibr_rrb_clear(bridge, rrbn);
if (rrbv & ebit) {
- if ( is_pic(bridge) ) {
- *rrbp = rrbv;
- }
- else {
- if (io_get_sh_swapper(NASID_GET(bridge))) {
- BRIDGE_REG_SET32((&rrbp)) = __swab32(rrbv);
- } else {
- *rrbp = rrbv;
- }
- }
+ *rrbp = rrbv;
}
}
* Flush all the rrb's assigned to the specified connection point.
*/
void
-pcibr_rrb_flush(devfs_handle_t pconn_vhdl)
+pcibr_rrb_flush(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pcibr_soft_t pcibr_soft = (pcibr_soft_t)pciio_info_mfast_get(pciio_info);
* device hanging off the bridge.
*/
int
-pcibr_wrb_flush(devfs_handle_t pconn_vhdl)
+pcibr_wrb_flush(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
* as best we can and return 0.
*/
int
-pcibr_rrb_alloc(devfs_handle_t pconn_vhdl,
+pcibr_rrb_alloc(vertex_hdl_t pconn_vhdl,
int *count_vchan0,
int *count_vchan1)
{
*/
int
-pcibr_rrb_check(devfs_handle_t pconn_vhdl,
+pcibr_rrb_check(vertex_hdl_t pconn_vhdl,
int *count_vchan0,
int *count_vchan1,
int *count_reserved,
*/
int
-pcibr_slot_initial_rrb_alloc(devfs_handle_t pcibr_vhdl,
+pcibr_slot_initial_rrb_alloc(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
*/
int
-pcibr_initial_rrb(devfs_handle_t pcibr_vhdl,
+pcibr_initial_rrb(vertex_hdl_t pcibr_vhdl,
pciio_slot_t first, pciio_slot_t last)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/ate_utils.h>
#endif
-extern pcibr_info_t pcibr_info_get(devfs_handle_t);
-extern int pcibr_widget_to_bus(devfs_handle_t pcibr_vhdl);
+extern pcibr_info_t pcibr_info_get(vertex_hdl_t);
+extern int pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
extern pcibr_info_t pcibr_device_info_new(pcibr_soft_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
-extern int pcibr_slot_initial_rrb_alloc(devfs_handle_t,pciio_slot_t);
+extern int pcibr_slot_initial_rrb_alloc(vertex_hdl_t,pciio_slot_t);
extern int pcibr_pcix_rbars_calc(pcibr_soft_t);
-int pcibr_slot_info_init(devfs_handle_t pcibr_vhdl, pciio_slot_t slot);
-int pcibr_slot_info_free(devfs_handle_t pcibr_vhdl, pciio_slot_t slot);
-int pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl, pciio_slot_t slot);
+int pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
+int pcibr_slot_info_free(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
+int pcibr_slot_addr_space_init(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
int pcibr_slot_pcix_rbar_init(pcibr_soft_t pcibr_soft, pciio_slot_t slot);
-int pcibr_slot_device_init(devfs_handle_t pcibr_vhdl, pciio_slot_t slot);
-int pcibr_slot_guest_info_init(devfs_handle_t pcibr_vhdl, pciio_slot_t slot);
-int pcibr_slot_call_device_attach(devfs_handle_t pcibr_vhdl,
+int pcibr_slot_device_init(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
+int pcibr_slot_guest_info_init(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot);
+int pcibr_slot_call_device_attach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot, int drv_flags);
-int pcibr_slot_call_device_detach(devfs_handle_t pcibr_vhdl,
+int pcibr_slot_call_device_detach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot, int drv_flags);
-int pcibr_slot_detach(devfs_handle_t pcibr_vhdl, pciio_slot_t slot,
+int pcibr_slot_detach(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot,
int drv_flags, char *l1_msg, int *sub_errorp);
-int pcibr_is_slot_sys_critical(devfs_handle_t pcibr_vhdl, pciio_slot_t slot);
static int pcibr_probe_slot(bridge_t *, cfg_p, unsigned int *);
-void pcibr_device_info_free(devfs_handle_t, pciio_slot_t);
+void pcibr_device_info_free(vertex_hdl_t, pciio_slot_t);
iopaddr_t pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t,
pciio_space_t, int, int, int);
void pciibr_bus_addr_free(pcibr_soft_t, pciio_win_info_t);
cfg_p pcibr_find_capability(cfg_p, unsigned);
-extern uint64_t do_pcibr_config_get(int, cfg_p, unsigned, unsigned);
-void do_pcibr_config_set(int, cfg_p, unsigned, unsigned, uint64_t);
+extern uint64_t do_pcibr_config_get(cfg_p, unsigned, unsigned);
+void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
-int pcibr_slot_attach(devfs_handle_t pcibr_vhdl, pciio_slot_t slot,
+int pcibr_slot_attach(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot,
int drv_flags, char *l1_msg, int *sub_errorp);
int pcibr_slot_info_return(pcibr_soft_t pcibr_soft, pciio_slot_t slot,
pcibr_slot_info_resp_t respp);
-extern devfs_handle_t baseio_pci_vhdl;
-int scsi_ctlr_nums_add(devfs_handle_t, devfs_handle_t);
+extern vertex_hdl_t baseio_pci_vhdl;
+int scsi_ctlr_nums_add(vertex_hdl_t, vertex_hdl_t);
/* For now .... */
#ifdef PIC_LATER
int
-pcibr_slot_startup(devfs_handle_t pcibr_vhdl, pcibr_slot_req_t reqp)
+pcibr_slot_startup(vertex_hdl_t pcibr_vhdl, pcibr_slot_req_t reqp)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
pciio_slot_t slot;
/* req_slot is the 'external' slot number, convert for internal use */
slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft, reqp->req_slot);
- /* Do not allow start-up of a slot in a shoehorn */
- if(nic_vertex_info_match(pcibr_soft->bs_conn, XTALK_PCI_PART_NUM)) {
- return(PCI_SLOT_IN_SHOEHORN);
- }
-
/* Check for the valid slot */
if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
return(PCI_NOT_A_SLOT);
* Software shut-down the PCI slot
*/
int
-pcibr_slot_shutdown(devfs_handle_t pcibr_vhdl, pcibr_slot_req_t reqp)
+pcibr_slot_shutdown(vertex_hdl_t pcibr_vhdl, pcibr_slot_req_t reqp)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
bridge_t *bridge;
if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
return(PCI_NOT_A_SLOT);
- /* Do not allow shut-down of a slot in a shoehorn */
- if(nic_vertex_info_match(pcibr_soft->bs_conn, XTALK_PCI_PART_NUM)) {
- return(PCI_SLOT_IN_SHOEHORN);
- }
-
#ifdef PIC_LATER
/* Acquire update access to the bus */
mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
{
pcibr_info_t pcibr_info = pcibr_infoh[func];
int win;
- boolean_t is_sys_critical_vertex(devfs_handle_t);
funcp->resp_f_status = 0;
#if defined(SUPPORT_PRINTING_V_FORMAT)
sprintf(funcp->resp_f_slot_name, "%v", pcibr_info->f_vertex);
#endif
- if(is_sys_critical_vertex(pcibr_info->f_vertex)) {
- funcp->resp_f_status |= FUNC_IS_SYS_CRITICAL;
- }
funcp->resp_f_bus = pcibr_info->f_bus;
funcp->resp_f_slot = PCIBR_INFO_SLOT_GET_EXT(pcibr_info);
reg_p b_respp;
pcibr_slot_info_resp_t slotp;
pcibr_slot_func_info_resp_t funcp;
- boolean_t is_sys_critical_vertex(devfs_handle_t);
extern void snia_kmem_free(void *, int);
slotp = snia_kmem_zalloc(sizeof(*slotp), 0);
slotp->resp_slot_status = pss->slot_status;
slotp->resp_l1_bus_num = pcibr_widget_to_bus(pcibr_soft->bs_vhdl);
-
- if (is_sys_critical_vertex(pss->slot_conn)) {
- slotp->resp_slot_status |= SLOT_IS_SYS_CRITICAL;
- }
-
slotp->resp_bss_ninfo = pss->bss_ninfo;
for (func = 0; func < pss->bss_ninfo; func++) {
* External SSRAM workaround info
*/
int
-pcibr_slot_query(devfs_handle_t pcibr_vhdl, pcibr_slot_req_t reqp)
+pcibr_slot_query(vertex_hdl_t pcibr_vhdl, pcibr_slot_req_t reqp)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
pciio_slot_t slot;
return(PCI_NOT_A_SLOT);
}
- /* Do not allow a query of a slot in a shoehorn */
- if(nic_vertex_info_match(pcibr_soft->bs_conn, XTALK_PCI_PART_NUM)) {
- return(PCI_SLOT_IN_SHOEHORN);
- }
-
/* Return information for the requested PCI slot */
if (slot != PCIIO_SLOT_NONE) {
if (size < sizeof(*respp)) {
return(error);
}
-#if 0
-/*
- * pcibr_slot_reset
- * Reset the PCI device in the particular slot.
- *
- * The Xbridge does not comply with the PCI Specification
- * when resetting an indiviaudl slot. An individual slot is
- * is reset by toggling the slot's bit in the Xbridge Control
- * Register. The Xbridge will assert the target slot's
- * (non-bussed) RST signal, but does not assert the (bussed)
- * REQ64 signal as required by the specification. As
- * designed, the Xbridge cannot assert the REQ64 signal
- * becuase it may interfere with a bus transaction in progress.
- * The practical effects of this Xbridge implementation is
- * device dependent; it probably will not adversely effect
- * 32-bit cards, but may disable 64-bit data transfers by those
- * cards that normally support 64-bit data transfers.
- *
- * The Xbridge will assert REQ64 when all four slots are reset
- * by simultaneously toggling all four slot reset bits in the
- * Xbridge Control Register. This is basically a PCI bus reset
- * and asserting the (bussed) REQ64 signal will not interfere
- * with any bus transactions in progress.
- *
- * The Xbridge (and the SN0 Bridge) support resetting only
- * four PCI bus slots via the (X)bridge Control Register.
- *
- * To reset an individual slot for the PCI Hot-Plug feature
- * use the L1 console commands to power-down and then
- * power-up the slot, or use the kernel infrastructure
- * functions to power-down/up the slot when they are
- * implemented for SN1.
- */
-int
-pcibr_slot_reset(devfs_handle_t pcibr_vhdl, pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- bridge_t *bridge;
- bridgereg_t ctrlreg,tmp;
- volatile bridgereg_t *wrb_flush;
-
- if (!pcibr_soft)
- return(EINVAL);
-
- if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
- return(EINVAL);
-
- /* Enable the DMA operations from this device of the xtalk widget
- * (PCI host bridge in this case).
- */
- xtalk_widgetdev_enable(pcibr_soft->bs_conn, slot);
-
- /* Set the reset slot bit in the bridge's wid control register
- * to reset the PCI slot
- */
- bridge = pcibr_soft->bs_base;
-
- /* Read the bridge widget control and clear out the reset pin
- * bit for the corresponding slot.
- */
- tmp = ctrlreg = bridge->b_wid_control;
-
- tmp &= ~BRIDGE_CTRL_RST_PIN(slot);
-
- bridge->b_wid_control = tmp;
- tmp = bridge->b_wid_control;
-
- /* Restore the old control register back.
- * NOTE : PCI card gets reset when the reset pin bit
- * changes from 0 (set above) to 1 (going to be set now).
- */
-
- bridge->b_wid_control = ctrlreg;
-
- /* Flush the write buffers if any !! */
- wrb_flush = &(bridge->b_wr_req_buf[slot].reg);
- while (*wrb_flush);
-
- return(0);
-}
-#endif
-
#define PROBE_LOCK 0 /* FIXME: we're attempting to lock around accesses
* to b_int_enable. This hangs pcibr_probe_slot()
*/
* information associated with this particular PCI device.
*/
int
-pcibr_slot_info_init(devfs_handle_t pcibr_vhdl,
+pcibr_slot_info_init(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
int nfunc;
pciio_function_t rfunc;
int func;
- devfs_handle_t conn_vhdl;
+ vertex_hdl_t conn_vhdl;
pcibr_soft_slot_t slotp;
/* Get the basic software information required to proceed */
return(0);
}
- /* Check for a slot with any system critical functions */
- if (pcibr_is_slot_sys_critical(pcibr_vhdl, slot))
- return(EPERM);
-
/* Try to read the device-id/vendor-id from the config space */
cfgw = pcibr_slot_config_addr(bridge, slot, 0);
if (vendor == 0xFFFF)
return(ENODEV);
- htype = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_HEADER_TYPE, 1);
+ htype = do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1);
nfunc = 1;
rfunc = PCIIO_FUNC_NONE;
pfail = 0;
cfgw = pcibr_func_config_addr(bridge, 0, slot, func, 0);
device = 0xFFFF & (idword >> 16);
- htype = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_HEADER_TYPE, 1);
+ htype = do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1);
rfunc = func;
}
htype &= 0x7f;
* Timer for these devices
*/
- lt_time = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_LATENCY_TIMER, 1);
+ lt_time = do_pcibr_config_get(cfgw, PCI_CFG_LATENCY_TIMER, 1);
if ((lt_time == 0) && !(bridge->b_device[slot].reg & BRIDGE_DEV_RT) &&
- !((vendor == IOC3_VENDOR_ID_NUM) &&
- (
-#ifdef PIC_LATER
- (device == IOC3_DEVICE_ID_NUM) ||
- (device == LINC_DEVICE_ID_NUM) ||
-#endif
- (device == 0x5 /* RAD_DEV */)))) {
+ (device == 0x5 /* RAD_DEV */)) {
unsigned min_gnt;
unsigned min_gnt_mult;
* needs in increments of 250ns. But latency timer is in
* PCI clock cycles, so a conversion is needed.
*/
- min_gnt = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_MIN_GNT, 1);
+ min_gnt = do_pcibr_config_get(cfgw, PCI_MIN_GNT, 1);
if (IS_133MHZ(pcibr_soft))
min_gnt_mult = 32; /* 250ns @ 133MHz in clocks */
else
lt_time = 4 * min_gnt_mult; /* 1 micro second */
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_LATENCY_TIMER, 1, lt_time);
+ do_pcibr_config_set(cfgw, PCI_CFG_LATENCY_TIMER, 1, lt_time);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_CONFIG, pcibr_vhdl,
"pcibr_slot_info_init: set Latency Timer for slot=%d, "
PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), func, lt_time));
}
- /* Get the PCI-X capability if running in PCI-X mode. If the func
- * doesn't have a pcix capability, allocate a PCIIO_VENDOR_ID_NONE
- * pcibr_info struct so the device driver for that function is not
- * called.
+
+ /* In our architecture the setting of the cacheline size isn't
+ * beneficial for cards in PCI mode, but in PCI-X mode devices
+ * can optionally use the cacheline size value for internal
+ * device optimizations (See 7.1.5 of the PCI-X v1.0 spec).
+ * NOTE: cachline size is in doubleword increments
*/
if (IS_PCIX(pcibr_soft)) {
+ if (!do_pcibr_config_get(cfgw, PCI_CFG_CACHE_LINE, 1)) {
+ do_pcibr_config_set(cfgw, PCI_CFG_CACHE_LINE, 1, 0x20);
+ PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_CONFIG, pcibr_vhdl,
+ "pcibr_slot_info_init: set CacheLine for slot=%d, "
+ "func=%d, to 0x20\n",
+ PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot), func));
+ }
+
+ /* Get the PCI-X capability if running in PCI-X mode. If the func
+ * doesnt have a pcix capability, allocate a PCIIO_VENDOR_ID_NONE
+ * pcibr_info struct so the device driver for that function is not
+ * called.
+ */
if (!(pcix_cap = pcibr_find_capability(cfgw, PCI_CAP_PCIX))) {
printk(KERN_WARNING
#if defined(SUPPORT_PRINTING_V_FORMAT)
if (func == 0)
slotp->slot_conn = conn_vhdl;
- cmd_reg = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_COMMAND, 4);
+ cmd_reg = do_pcibr_config_get(cfgw, PCI_CFG_COMMAND, 4);
wptr = cfgw + PCI_CFG_BASE_ADDR_0 / 4;
* this could be pushed up into pciio, when we
* start supporting more PCI providers.
*/
- base = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), wptr, (win * 4), 4);
+ base = do_pcibr_config_get(wptr, (win * 4), 4);
if (base & PCI_BA_IO_SPACE) {
/* BASE is in I/O space. */
} else if (base & 0xC0000000) {
base = 0; /* outside permissable range */
} else if ((code == PCI_BA_MEM_64BIT) &&
- (do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), wptr, ((win + 1)*4), 4) != 0)) {
+ (do_pcibr_config_get(wptr, ((win + 1)*4), 4) != 0)) {
base = 0; /* outside permissable range */
}
}
if (base != 0) { /* estimate size */
size = base & -base;
} else { /* calculate size */
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), wptr, (win * 4), 4, ~0); /* write 1's */
- size = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), wptr, (win * 4), 4); /* read back */
+ do_pcibr_config_set(wptr, (win * 4), 4, ~0); /* write 1's */
+ size = do_pcibr_config_get(wptr, (win * 4), 4); /* read back */
size &= mask; /* keep addr */
size &= -size; /* keep lsbit */
if (size == 0)
pcibr_info->f_window[win].w_base = base;
pcibr_info->f_window[win].w_size = size;
-#if defined(IOC3_VENDOR_ID_NUM) && defined(IOC3_DEVICE_ID_NUM)
- /*
- * IOC3 BASE_ADDR* BUG WORKAROUND
- *
-
- * If we write to BASE1 on the IOC3, the
- * data in BASE0 is replaced. The
- * original workaround was to remember
- * the value of BASE0 and restore it
- * when we ran off the end of the BASE
- * registers; however, a later
- * workaround was added (I think it was
- * rev 1.44) to avoid setting up
- * anything but BASE0, with the comment
- * that writing all ones to BASE1 set
- * the enable-parity-error test feature
- * in IOC3's SCR bit 14.
- *
- * So, unless we defer doing any PCI
- * space allocation until drivers
- * attach, and set up a way for drivers
- * (the IOC3 in paricular) to tell us
- * generically to keep our hands off
- * BASE registers, we gotta "know" about
- * the IOC3 here.
- *
- * Too bad the PCI folks didn't reserve the
- * all-zero value for 'no BASE here' (it is a
- * valid code for an uninitialized BASE in
- * 32-bit PCI memory space).
- */
-
- if ((vendor == IOC3_VENDOR_ID_NUM) &&
- (device == IOC3_DEVICE_ID_NUM))
- break;
-#endif
if (code == PCI_BA_MEM_64BIT) {
win++; /* skip upper half */
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), wptr, (win * 4), 4, 0); /* must be zero */
+ do_pcibr_config_set(wptr, (win * 4), 4, 0); /* must be zero */
}
} /* next win */
} /* next func */
int defend_against_circular_linkedlist = 0;
/* Check to see if there is a capabilities pointer in the cfg header */
- if (!(do_pcibr_config_get(1, cfgw, PCI_CFG_STATUS, 2) & PCI_STAT_CAP_LIST)) {
+ if (!(do_pcibr_config_get(cfgw, PCI_CFG_STATUS, 2) & PCI_STAT_CAP_LIST)) {
return (NULL);
}
* significant bits of the next pointer must be ignored, so we mask
* with 0xfc).
*/
- cap_nxt = (do_pcibr_config_get(1, cfgw, PCI_CAPABILITIES_PTR, 1) & 0xfc);
+ cap_nxt = (do_pcibr_config_get(cfgw, PCI_CAPABILITIES_PTR, 1) & 0xfc);
while (cap_nxt && (defend_against_circular_linkedlist <= 48)) {
- cap_id = do_pcibr_config_get(1, cfgw, cap_nxt, 1);
+ cap_id = do_pcibr_config_get(cfgw, cap_nxt, 1);
if (cap_id == capability) {
return ((cfg_p)((char *)cfgw + cap_nxt));
}
- cap_nxt = (do_pcibr_config_get(1, cfgw, cap_nxt+1, 1) & 0xfc);
+ cap_nxt = (do_pcibr_config_get(cfgw, cap_nxt+1, 1) & 0xfc);
defend_against_circular_linkedlist++;
}
* with a particular PCI device.
*/
int
-pcibr_slot_info_free(devfs_handle_t pcibr_vhdl,
+pcibr_slot_info_free(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
* the base registers in the card.
*/
int
-pcibr_slot_addr_space_init(devfs_handle_t pcibr_vhdl,
+pcibr_slot_addr_space_init(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
pcibr_info_h pcibr_infoh;
pcibr_info_t pcibr_info;
bridge_t *bridge;
- size_t align_slot;
iopaddr_t mask;
int nbars;
int nfunc;
int func;
int win;
int rc = 0;
+ int align;
+ int align_slot;
pcibr_soft = pcibr_soft_get(pcibr_vhdl);
* the entire "lo" area is only a
* megabyte, total ...
*/
- align_slot = (slot < 2) ? 0x200000 : 0x100000;
+ align_slot = 0x100000;
+ align = align_slot;
for (func = 0; func < nfunc; ++func) {
cfg_p cfgw;
cfgw = pcibr_func_config_addr(bridge, 0, slot, func, 0);
wptr = cfgw + PCI_CFG_BASE_ADDR_0 / 4;
- if ((do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_HEADER_TYPE, 1) & 0x7f) != 0)
+ if ((do_pcibr_config_get(cfgw, PCI_CFG_HEADER_TYPE, 1) & 0x7f) != 0)
nbars = 2;
else
nbars = PCI_CFG_BASE_ADDRS;
continue; /* already allocated */
}
+ align = (win) ? size : align_slot;
+
+ if (align < _PAGESZ)
+ align = _PAGESZ; /* ie. 0x00004000 */
+
switch (space) {
case PCIIO_SPACE_IO:
base = pcibr_bus_addr_alloc(pcibr_soft,
&pcibr_info->f_window[win],
PCIIO_SPACE_IO,
- 0, size, align_slot);
+ 0, size, align);
if (!base)
rc = ENOSPC;
break;
case PCIIO_SPACE_MEM:
- if ((do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), wptr, (win * 4), 4) &
+ if ((do_pcibr_config_get(wptr, (win * 4), 4) &
PCI_BA_MEM_LOCATION) == PCI_BA_MEM_1MEG) {
- int align = size; /* ie. 0x00001000 */
-
- if (align < _PAGESZ)
- align = _PAGESZ; /* ie. 0x00004000 */
/* allocate from 20-bit PCI space */
base = pcibr_bus_addr_alloc(pcibr_soft,
base = pcibr_bus_addr_alloc(pcibr_soft,
&pcibr_info->f_window[win],
PCIIO_SPACE_MEM32,
- 0, size, align_slot);
+ 0, size, align);
if (!base)
rc = ENOSPC;
}
PCIBR_DEVICE_TO_SLOT(pcibr_soft,slot), win, space));
}
pcibr_info->f_window[win].w_base = base;
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), wptr, (win * 4), 4, base);
+ do_pcibr_config_set(wptr, (win * 4), 4, base);
#if defined(SUPPORT_PRINTING_R_FORMAT)
if (pcibr_debug_mask & PCIBR_DEBUG_BAR) {
/*
* Allocate space for the EXPANSION ROM
- * NOTE: DO NOT DO THIS ON AN IOC3,
- * as it blows the system away.
*/
base = size = 0;
- if ((pcibr_soft->bs_slot[slot].bss_vendor_id != IOC3_VENDOR_ID_NUM) ||
- (pcibr_soft->bs_slot[slot].bss_device_id != IOC3_DEVICE_ID_NUM)) {
-
+ {
wptr = cfgw + PCI_EXPANSION_ROM / 4;
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), wptr, 0, 4, 0xFFFFF000);
- mask = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), wptr, 0, 4);
+ do_pcibr_config_set(wptr, 0, 4, 0xFFFFF000);
+ mask = do_pcibr_config_get(wptr, 0, 4);
if (mask & 0xFFFFF000) {
size = mask & -mask;
base = pcibr_bus_addr_alloc(pcibr_soft,
&pcibr_info->f_rwindow,
PCIIO_SPACE_MEM32,
- 0, size, align_slot);
+ 0, size, align);
if (!base)
rc = ENOSPC;
else {
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), wptr, 0, 4, base);
+ do_pcibr_config_set(wptr, 0, 4, base);
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_BAR, pcibr_vhdl,
"pcibr_slot_addr_space_init: slot=%d, func=%d, "
"ROM in [0x%X..0x%X], allocated by pcibr\n",
}
pcibr_info->f_rbase = base;
pcibr_info->f_rsize = size;
-
+
/*
* if necessary, update the board's
* command register to enable decoding
pci_cfg_cmd_reg_add |= PCI_CMD_BUS_MASTER;
- pci_cfg_cmd_reg = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_COMMAND, 4);
+ pci_cfg_cmd_reg = do_pcibr_config_get(cfgw, PCI_CFG_COMMAND, 4);
#if PCI_FBBE /* XXX- check here to see if dev can do fast-back-to-back */
if (!((pci_cfg_cmd_reg >> 16) & PCI_STAT_F_BK_BK_CAP))
#endif
pci_cfg_cmd_reg &= 0xFFFF;
if (pci_cfg_cmd_reg_add & ~pci_cfg_cmd_reg)
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_COMMAND, 4,
+ do_pcibr_config_set(cfgw, PCI_CFG_COMMAND, 4,
pci_cfg_cmd_reg | pci_cfg_cmd_reg_add);
} /* next func */
return(rc);
*/
int
-pcibr_slot_device_init(devfs_handle_t pcibr_vhdl,
+pcibr_slot_device_init(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pcibr_vhdl,
"pcibr_slot_device_init: Device(%d): %R\n",
slot, devreg, device_bits));
-#else
- printk("pcibr_slot_device_init: Device(%d) 0x%x\n", slot, devreg);
#endif
return(0);
}
* Setup the host/guest relations for a PCI slot.
*/
int
-pcibr_slot_guest_info_init(devfs_handle_t pcibr_vhdl,
+pcibr_slot_guest_info_init(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft;
* card in this slot.
*/
int
-pcibr_slot_call_device_attach(devfs_handle_t pcibr_vhdl,
+pcibr_slot_call_device_attach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot,
int drv_flags)
{
pcibr_soft_t pcibr_soft;
pcibr_info_h pcibr_infoh;
pcibr_info_t pcibr_info;
- async_attach_t aa = NULL;
int func;
- devfs_handle_t xconn_vhdl, conn_vhdl;
+ vertex_hdl_t xconn_vhdl, conn_vhdl;
#ifdef PIC_LATER
- devfs_handle_t scsi_vhdl;
+ vertex_hdl_t scsi_vhdl;
#endif
int nfunc;
int error_func;
}
xconn_vhdl = pcibr_soft->bs_conn;
- aa = async_attach_get_info(xconn_vhdl);
nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
conn_vhdl = pcibr_info->f_vertex;
-#ifdef LATER
- /*
- * Activate if and when we support cdl.
- */
- if (aa)
- async_attach_add_info(conn_vhdl, aa);
-#endif /* LATER */
error_func = pciio_device_attach(conn_vhdl, drv_flags);
* card in this slot.
*/
int
-pcibr_slot_call_device_detach(devfs_handle_t pcibr_vhdl,
+pcibr_slot_call_device_detach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot,
int drv_flags)
{
pcibr_info_h pcibr_infoh;
pcibr_info_t pcibr_info;
int func;
- devfs_handle_t conn_vhdl = GRAPH_VERTEX_NONE;
+ vertex_hdl_t conn_vhdl = GRAPH_VERTEX_NONE;
int nfunc;
int error_func;
int error_slot = 0;
* PCI card on the bus.
*/
int
-pcibr_slot_attach(devfs_handle_t pcibr_vhdl,
+pcibr_slot_attach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot,
int drv_flags,
char *l1_msg,
* slot-specific freeing that needs to be done.
*/
int
-pcibr_slot_detach(devfs_handle_t pcibr_vhdl,
+pcibr_slot_detach(vertex_hdl_t pcibr_vhdl,
pciio_slot_t slot,
int drv_flags,
char *l1_msg,
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
int error;
- /* Make sure that we do not detach a system critical function vertex */
- if(pcibr_is_slot_sys_critical(pcibr_vhdl, slot))
- return(PCI_IS_SYS_CRITICAL);
-
/* Call the device detach function */
error = (pcibr_slot_call_device_detach(pcibr_vhdl, slot, drv_flags));
if (error) {
}
/*
- * pcibr_is_slot_sys_critical
- * Check slot for any functions that are system critical.
- * Return 1 if any are system critical or 0 otherwise.
- *
- * This function will always return 0 when called by
- * pcibr_attach() because the system critical vertices
- * have not yet been set in the hwgraph.
- */
-int
-pcibr_is_slot_sys_critical(devfs_handle_t pcibr_vhdl,
- pciio_slot_t slot)
-{
- pcibr_soft_t pcibr_soft;
- pcibr_info_h pcibr_infoh;
- pcibr_info_t pcibr_info;
- devfs_handle_t conn_vhdl = GRAPH_VERTEX_NONE;
- int nfunc;
- int func;
- boolean_t is_sys_critical_vertex(devfs_handle_t);
-
- pcibr_soft = pcibr_soft_get(pcibr_vhdl);
- if (!pcibr_soft)
- return(EINVAL);
-
- if (!PCIBR_VALID_SLOT(pcibr_soft, slot))
- return(EINVAL);
-
- nfunc = pcibr_soft->bs_slot[slot].bss_ninfo;
- pcibr_infoh = pcibr_soft->bs_slot[slot].bss_infos;
-
- for (func = 0; func < nfunc; ++func) {
-
- pcibr_info = pcibr_infoh[func];
- if (!pcibr_info)
- continue;
-
- if (pcibr_info->f_vendor == PCIIO_VENDOR_ID_NONE)
- continue;
-
- conn_vhdl = pcibr_info->f_vertex;
- if (is_sys_critical_vertex(conn_vhdl)) {
-#if defined(SUPPORT_PRINTING_V_FORMAT)
- printk(KERN_WARNING "%v is a system critical device vertex\n", conn_vhdl);
-#else
- printk(KERN_WARNING "%p is a system critical device vertex\n", (void *)conn_vhdl);
-#endif
- return(1);
- }
-
- }
-
- return(0);
-}
-
-/*
* pcibr_probe_slot_pic: read a config space word
* while trapping any errors; return zero if
* all went OK, or nonzero if there was an error.
}
/*
- * pcibr_probe_slot_non_pic: read a config space word
- * while trapping any errors; return zero if
- * all went OK, or nonzero if there was an error.
- * The value read, if any, is passed back
- * through the valp parameter.
- */
-static int
-pcibr_probe_slot_non_pic(bridge_t *bridge,
- cfg_p cfg,
- unsigned *valp)
-{
- int rv;
- bridgereg_t b_old_enable = (bridgereg_t)0, b_new_enable = (bridgereg_t)0;
- extern int badaddr_val(volatile void *, int, volatile void *);
-
- b_old_enable = bridge->b_int_enable;
- b_new_enable = b_old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
- bridge->b_int_enable = b_new_enable;
-
- /*
- * The xbridge doesn't clear b_err_int_view unless
- * multi-err is cleared...
- */
- if (is_xbridge(bridge)) {
- if (bridge->b_err_int_view & BRIDGE_ISR_PCI_MST_TIMEOUT)
- bridge->b_int_rst_stat = BRIDGE_IRR_MULTI_CLR;
- }
-
- if (bridge->b_int_status & BRIDGE_IRR_PCI_GRP) {
- bridge->b_int_rst_stat = BRIDGE_IRR_PCI_GRP_CLR;
- (void) bridge->b_wid_tflush; /* flushbus */
- }
- rv = badaddr_val((void *) (((uint64_t)cfg) ^ 4), 4, valp);
- /*
- * The xbridge doesn't set master timeout in b_int_status
- * here. Fortunately it's in error_interrupt_view.
- */
- if (is_xbridge(bridge)) {
- if (bridge->b_err_int_view & BRIDGE_ISR_PCI_MST_TIMEOUT) {
- bridge->b_int_rst_stat = BRIDGE_IRR_MULTI_CLR;
- rv = 1; /* unoccupied slot */
- }
- }
- bridge->b_int_enable = b_old_enable;
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
-
- return(rv);
-}
-
-
-/*
* pcibr_probe_slot: read a config space word
* while trapping any errors; return zero if
* all went OK, or nonzero if there was an error.
cfg_p cfg,
unsigned *valp)
{
- if ( is_pic(bridge) )
- return(pcibr_probe_slot_pic(bridge, cfg, valp));
- else
- return(pcibr_probe_slot_non_pic(bridge, cfg, valp));
+ return(pcibr_probe_slot_pic(bridge, cfg, valp));
}
void
-pcibr_device_info_free(devfs_handle_t pcibr_vhdl, pciio_slot_t slot)
+pcibr_device_info_free(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
pcibr_info_t pcibr_info;
/* Disable memory and I/O BARs */
cfgw = pcibr_func_config_addr(bridge, 0, slot, func, 0);
- cmd_reg = do_pcibr_config_get(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_COMMAND, 4);
+ cmd_reg = do_pcibr_config_get(cfgw, PCI_CFG_COMMAND, 4);
cmd_reg &= (PCI_CMD_MEM_SPACE | PCI_CMD_IO_SPACE);
- do_pcibr_config_set(IS_PIC_SOFT(pcibr_soft), cfgw, PCI_CFG_COMMAND, 4, cmd_reg);
+ do_pcibr_config_set(cfgw, PCI_CFG_COMMAND, 4, cmd_reg);
for (bar = 0; bar < PCI_CFG_BASE_ADDRS; bar++) {
if (pcibr_info->f_window[bar].w_space == PCIIO_SPACE_NONE)
* io_brick_tab[] array defined in ml/SN/iograph.c
*/
int
-pcibr_widget_to_bus(devfs_handle_t pcibr_vhdl)
+pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl)
{
pcibr_soft_t pcibr_soft = pcibr_soft_get(pcibr_vhdl);
xwidgetnum_t widget = pcibr_soft->bs_xid;
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
-#define USRPCI 0
-
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#undef DEBUG_PCIIO /* turn this on for yet more console output */
-#define GET_NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
-#define DO_DEL(ptr) (kfree(ptr))
-
char pciio_info_fingerprint[] = "pciio_info";
-cdl_p pciio_registry = NULL;
-
int
badaddr_val(volatile void *addr, int len, volatile void *ptr)
{
extern char master_baseio_wid;
if (master_baseio_nasid < 0) {
- nasid_t tmp;
-
master_baseio_nasid = ia64_sn_get_master_baseio_nasid();
if ( master_baseio_nasid >= 0 ) {
}
int
-hub_dma_enabled(devfs_handle_t xconn_vhdl)
+hub_dma_enabled(vertex_hdl_t xconn_vhdl)
{
return(0);
}
int
-hub_error_devenable(devfs_handle_t xconn_vhdl, int devnum, int error_code)
+hub_error_devenable(vertex_hdl_t xconn_vhdl, int devnum, int error_code)
{
return(0);
}
*/
#if !defined(DEV_FUNC)
-static pciio_provider_t *pciio_to_provider_fns(devfs_handle_t dev);
+static pciio_provider_t *pciio_to_provider_fns(vertex_hdl_t dev);
#endif
-pciio_piomap_t pciio_piomap_alloc(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
+pciio_piomap_t pciio_piomap_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
void pciio_piomap_free(pciio_piomap_t);
caddr_t pciio_piomap_addr(pciio_piomap_t, iopaddr_t, size_t);
void pciio_piomap_done(pciio_piomap_t);
-caddr_t pciio_piotrans_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
-caddr_t pciio_pio_addr(devfs_handle_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, pciio_piomap_t *, unsigned);
+caddr_t pciio_piotrans_addr(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
+caddr_t pciio_pio_addr(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, pciio_piomap_t *, unsigned);
-iopaddr_t pciio_piospace_alloc(devfs_handle_t, device_desc_t, pciio_space_t, size_t, size_t);
-void pciio_piospace_free(devfs_handle_t, pciio_space_t, iopaddr_t, size_t);
+iopaddr_t pciio_piospace_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, size_t, size_t);
+void pciio_piospace_free(vertex_hdl_t, pciio_space_t, iopaddr_t, size_t);
-pciio_dmamap_t pciio_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
+pciio_dmamap_t pciio_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
void pciio_dmamap_free(pciio_dmamap_t);
iopaddr_t pciio_dmamap_addr(pciio_dmamap_t, paddr_t, size_t);
-alenlist_t pciio_dmamap_list(pciio_dmamap_t, alenlist_t, unsigned);
void pciio_dmamap_done(pciio_dmamap_t);
-iopaddr_t pciio_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
-alenlist_t pciio_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
+iopaddr_t pciio_dmatrans_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, unsigned);
void pciio_dmamap_drain(pciio_dmamap_t);
-void pciio_dmaaddr_drain(devfs_handle_t, paddr_t, size_t);
-void pciio_dmalist_drain(devfs_handle_t, alenlist_t);
-iopaddr_t pciio_dma_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, pciio_dmamap_t *, unsigned);
+void pciio_dmaaddr_drain(vertex_hdl_t, paddr_t, size_t);
+void pciio_dmalist_drain(vertex_hdl_t, alenlist_t);
+iopaddr_t pciio_dma_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, pciio_dmamap_t *, unsigned);
-pciio_intr_t pciio_intr_alloc(devfs_handle_t, device_desc_t, pciio_intr_line_t, devfs_handle_t);
+pciio_intr_t pciio_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
void pciio_intr_free(pciio_intr_t);
int pciio_intr_connect(pciio_intr_t, intr_func_t, intr_arg_t);
void pciio_intr_disconnect(pciio_intr_t);
-devfs_handle_t pciio_intr_cpu_get(pciio_intr_t);
+vertex_hdl_t pciio_intr_cpu_get(pciio_intr_t);
void pciio_slot_func_to_name(char *, pciio_slot_t, pciio_function_t);
-void pciio_provider_startup(devfs_handle_t);
-void pciio_provider_shutdown(devfs_handle_t);
+void pciio_provider_startup(vertex_hdl_t);
+void pciio_provider_shutdown(vertex_hdl_t);
-pciio_endian_t pciio_endian_set(devfs_handle_t, pciio_endian_t, pciio_endian_t);
-pciio_priority_t pciio_priority_set(devfs_handle_t, pciio_priority_t);
-devfs_handle_t pciio_intr_dev_get(pciio_intr_t);
+pciio_endian_t pciio_endian_set(vertex_hdl_t, pciio_endian_t, pciio_endian_t);
+pciio_priority_t pciio_priority_set(vertex_hdl_t, pciio_priority_t);
+vertex_hdl_t pciio_intr_dev_get(pciio_intr_t);
-devfs_handle_t pciio_pio_dev_get(pciio_piomap_t);
+vertex_hdl_t pciio_pio_dev_get(pciio_piomap_t);
pciio_slot_t pciio_pio_slot_get(pciio_piomap_t);
pciio_space_t pciio_pio_space_get(pciio_piomap_t);
iopaddr_t pciio_pio_pciaddr_get(pciio_piomap_t);
ulong pciio_pio_mapsz_get(pciio_piomap_t);
caddr_t pciio_pio_kvaddr_get(pciio_piomap_t);
-devfs_handle_t pciio_dma_dev_get(pciio_dmamap_t);
+vertex_hdl_t pciio_dma_dev_get(pciio_dmamap_t);
pciio_slot_t pciio_dma_slot_get(pciio_dmamap_t);
-pciio_info_t pciio_info_chk(devfs_handle_t);
-pciio_info_t pciio_info_get(devfs_handle_t);
-void pciio_info_set(devfs_handle_t, pciio_info_t);
-devfs_handle_t pciio_info_dev_get(pciio_info_t);
+pciio_info_t pciio_info_chk(vertex_hdl_t);
+pciio_info_t pciio_info_get(vertex_hdl_t);
+void pciio_info_set(vertex_hdl_t, pciio_info_t);
+vertex_hdl_t pciio_info_dev_get(pciio_info_t);
pciio_slot_t pciio_info_slot_get(pciio_info_t);
pciio_function_t pciio_info_function_get(pciio_info_t);
pciio_vendor_id_t pciio_info_vendor_id_get(pciio_info_t);
pciio_device_id_t pciio_info_device_id_get(pciio_info_t);
-devfs_handle_t pciio_info_master_get(pciio_info_t);
+vertex_hdl_t pciio_info_master_get(pciio_info_t);
arbitrary_info_t pciio_info_mfast_get(pciio_info_t);
pciio_provider_t *pciio_info_pops_get(pciio_info_t);
error_handler_f *pciio_info_efunc_get(pciio_info_t);
iopaddr_t pciio_info_rom_base_get(pciio_info_t);
size_t pciio_info_rom_size_get(pciio_info_t);
-void pciio_init(void);
-int pciio_attach(devfs_handle_t);
+int pciio_attach(vertex_hdl_t);
-void pciio_provider_register(devfs_handle_t, pciio_provider_t *pciio_fns);
-void pciio_provider_unregister(devfs_handle_t);
-pciio_provider_t *pciio_provider_fns_get(devfs_handle_t);
+void pciio_provider_register(vertex_hdl_t, pciio_provider_t *pciio_fns);
+void pciio_provider_unregister(vertex_hdl_t);
+pciio_provider_t *pciio_provider_fns_get(vertex_hdl_t);
int pciio_driver_register(pciio_vendor_id_t, pciio_device_id_t, char *driver_prefix, unsigned);
-void pciio_driver_unregister(char *driver_prefix);
-devfs_handle_t pciio_device_register(devfs_handle_t, devfs_handle_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
+vertex_hdl_t pciio_device_register(vertex_hdl_t, vertex_hdl_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
-void pciio_device_unregister(devfs_handle_t);
-pciio_info_t pciio_device_info_new(pciio_info_t, devfs_handle_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
+void pciio_device_unregister(vertex_hdl_t);
+pciio_info_t pciio_device_info_new(pciio_info_t, vertex_hdl_t, pciio_slot_t, pciio_function_t, pciio_vendor_id_t, pciio_device_id_t);
void pciio_device_info_free(pciio_info_t);
-devfs_handle_t pciio_device_info_register(devfs_handle_t, pciio_info_t);
-void pciio_device_info_unregister(devfs_handle_t, pciio_info_t);
-int pciio_device_attach(devfs_handle_t, int);
-int pciio_device_detach(devfs_handle_t, int);
-void pciio_error_register(devfs_handle_t, error_handler_f *, error_handler_arg_t);
+vertex_hdl_t pciio_device_info_register(vertex_hdl_t, pciio_info_t);
+void pciio_device_info_unregister(vertex_hdl_t, pciio_info_t);
+int pciio_device_attach(vertex_hdl_t, int);
+int pciio_device_detach(vertex_hdl_t, int);
+void pciio_error_register(vertex_hdl_t, error_handler_f *, error_handler_arg_t);
-int pciio_reset(devfs_handle_t);
-int pciio_write_gather_flush(devfs_handle_t);
-int pciio_slot_inuse(devfs_handle_t);
+int pciio_reset(vertex_hdl_t);
+int pciio_write_gather_flush(vertex_hdl_t);
+int pciio_slot_inuse(vertex_hdl_t);
/* =====================================================================
* Provider Function Location
#if !defined(DEV_FUNC)
static pciio_provider_t *
-pciio_to_provider_fns(devfs_handle_t dev)
+pciio_to_provider_fns(vertex_hdl_t dev)
{
pciio_info_t card_info;
pciio_provider_t *provider_fns;
*/
pciio_piomap_t
-pciio_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
+pciio_piomap_alloc(vertex_hdl_t dev, /* set up mapping for this device */
device_desc_t dev_desc, /* device descriptor */
pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
iopaddr_t addr, /* lowest address (or offset in window) */
}
caddr_t
-pciio_piotrans_addr(devfs_handle_t dev, /* translate for this device */
+pciio_piotrans_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
iopaddr_t addr, /* starting address (or offset in window) */
}
caddr_t
-pciio_pio_addr(devfs_handle_t dev, /* translate for this device */
+pciio_pio_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
pciio_space_t space, /* CFG, MEM, IO, or a device-decoded window */
iopaddr_t addr, /* starting address (or offset in window) */
}
iopaddr_t
-pciio_piospace_alloc(devfs_handle_t dev, /* Device requiring space */
+pciio_piospace_alloc(vertex_hdl_t dev, /* Device requiring space */
device_desc_t dev_desc, /* Device descriptor */
pciio_space_t space, /* MEM32/MEM64/IO */
size_t byte_count, /* Size of mapping */
}
void
-pciio_piospace_free(devfs_handle_t dev, /* Device freeing space */
+pciio_piospace_free(vertex_hdl_t dev, /* Device freeing space */
pciio_space_t space, /* Type of space */
iopaddr_t pciaddr, /* starting address */
size_t byte_count)
*/
pciio_dmamap_t
-pciio_dmamap_alloc(devfs_handle_t dev, /* set up mappings for this device */
+pciio_dmamap_alloc(vertex_hdl_t dev, /* set up mappings for this device */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags)
(CAST_DMAMAP(pciio_dmamap), paddr, byte_count);
}
-alenlist_t
-pciio_dmamap_list(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
- alenlist_t alenlist, /* map this Address/Length List */
- unsigned flags)
-{
- return DMAMAP_FUNC(pciio_dmamap, dmamap_list)
- (CAST_DMAMAP(pciio_dmamap), alenlist, flags);
-}
-
void
pciio_dmamap_done(pciio_dmamap_t pciio_dmamap)
{
}
iopaddr_t
-pciio_dmatrans_addr(devfs_handle_t dev, /* translate for this device */
+pciio_dmatrans_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
(dev, dev_desc, paddr, byte_count, flags);
}
-alenlist_t
-pciio_dmatrans_list(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- alenlist_t palenlist, /* system address/length list */
- unsigned flags)
-{ /* defined in dma.h */
- return DEV_FUNC(dev, dmatrans_list)
- (dev, dev_desc, palenlist, flags);
-}
-
iopaddr_t
-pciio_dma_addr(devfs_handle_t dev, /* translate for this device */
+pciio_dma_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
}
void
-pciio_dmaaddr_drain(devfs_handle_t dev, paddr_t addr, size_t size)
+pciio_dmaaddr_drain(vertex_hdl_t dev, paddr_t addr, size_t size)
{
DEV_FUNC(dev, dmaaddr_drain)
(dev, addr, size);
}
void
-pciio_dmalist_drain(devfs_handle_t dev, alenlist_t list)
+pciio_dmalist_drain(vertex_hdl_t dev, alenlist_t list)
{
DEV_FUNC(dev, dmalist_drain)
(dev, list);
* Return resource handle in intr_hdl.
*/
pciio_intr_t
-pciio_intr_alloc(devfs_handle_t dev, /* which Crosstalk device */
+pciio_intr_alloc(vertex_hdl_t dev, /* which Crosstalk device */
device_desc_t dev_desc, /* device descriptor */
pciio_intr_line_t lines, /* INTR line(s) to attach */
- devfs_handle_t owner_dev)
+ vertex_hdl_t owner_dev)
{ /* owner of this interrupt */
return (pciio_intr_t) DEV_FUNC(dev, intr_alloc)
(dev, dev_desc, lines, owner_dev);
* Return a hwgraph vertex that represents the CPU currently
* targeted by an interrupt.
*/
-devfs_handle_t
+vertex_hdl_t
pciio_intr_cpu_get(pciio_intr_t intr_hdl)
{
return INTR_FUNC(intr_hdl, intr_cpu_get)
*/
static pciio_info_t
pciio_cardinfo_get(
- devfs_handle_t pciio_vhdl,
+ vertex_hdl_t pciio_vhdl,
pciio_slot_t pci_slot)
{
char namebuf[16];
pciio_info_t info = 0;
- devfs_handle_t conn;
+ vertex_hdl_t conn;
pciio_slot_func_to_name(namebuf, pci_slot, PCIIO_FUNC_NONE);
if (GRAPH_SUCCESS ==
/*ARGSUSED */
int
pciio_error_handler(
- devfs_handle_t pciio_vhdl,
+ vertex_hdl_t pciio_vhdl,
int error_code,
ioerror_mode_t mode,
ioerror_t *ioerror)
{
pciio_info_t pciio_info;
- devfs_handle_t pconn_vhdl;
-#if USRPCI
- devfs_handle_t usrpci_v;
-#endif
+ vertex_hdl_t pconn_vhdl;
pciio_slot_t slot;
int retval;
-#ifdef EHE_ENABLE
- error_state_t e_state;
-#endif /* EHE_ENABLE */
#if DEBUG && ERROR_DEBUG
printk("%v: pciio_error_handler\n", pciio_vhdl);
if (pciio_info && pciio_info->c_efunc) {
pconn_vhdl = pciio_info_dev_get(pciio_info);
-#ifdef EHE_ENABLE
- e_state = error_state_get(pciio_vhdl);
-
- if (e_state == ERROR_STATE_ACTION)
- (void)error_state_set(pciio_vhdl, ERROR_STATE_NONE);
-
- if (error_state_set(pconn_vhdl,e_state) == ERROR_RETURN_CODE_CANNOT_SET_STATE)
- return(IOERROR_UNHANDLED);
-#endif
-
retval = pciio_info->c_efunc
(pciio_info->c_einfo, error_code, mode, ioerror);
if (retval != IOERROR_UNHANDLED)
pconn_vhdl = pciio_info_dev_get(pciio_info);
-#ifdef EHE_ENABLE
- e_state = error_state_get(pciio_vhdl);
-
- if (e_state == ERROR_STATE_ACTION)
- (void)error_state_set(pciio_vhdl, ERROR_STATE_NONE);
-
- if (error_state_set(pconn_vhdl,e_state) ==
- ERROR_RETURN_CODE_CANNOT_SET_STATE)
- return(IOERROR_UNHANDLED);
-#endif /* EHE_ENABLE */
-
retval = pciio_info->c_efunc
(pciio_info->c_einfo, error_code, mode, ioerror);
if (retval != IOERROR_UNHANDLED)
return retval;
}
-
-#if USRPCI
- /* If the USRPCI driver is available and
- * knows about this connection point,
- * deliver the error to it.
- *
- * OK to use pconn_vhdl here, even though we
- * have already UNREF'd it, since we know that
- * it is not going away.
- */
- pconn_vhdl = pciio_info_dev_get(pciio_info);
- if (GRAPH_SUCCESS == hwgraph_traverse(pconn_vhdl, EDGE_LBL_USRPCI, &usrpci_v)) {
- iopaddr_t busaddr;
- IOERROR_GETVALUE(busaddr, ioerror, busaddr);
- retval = usrpci_error_handler (usrpci_v, error_code, busaddr);
- hwgraph_vertex_unref(usrpci_v);
- if (retval != IOERROR_UNHANDLED) {
- /*
- * This unref is not needed. If this code is called often enough,
- * the system will crash, due to vertex reference count reaching 0,
- * causing vertex to be unallocated. -jeremy
- * hwgraph_vertex_unref(pconn_vhdl);
- */
- return retval;
- }
- }
-#endif
}
}
* Startup a crosstalk provider
*/
void
-pciio_provider_startup(devfs_handle_t pciio_provider)
+pciio_provider_startup(vertex_hdl_t pciio_provider)
{
DEV_FUNC(pciio_provider, provider_startup)
(pciio_provider);
* Shutdown a crosstalk provider
*/
void
-pciio_provider_shutdown(devfs_handle_t pciio_provider)
+pciio_provider_shutdown(vertex_hdl_t pciio_provider)
{
DEV_FUNC(pciio_provider, provider_shutdown)
(pciio_provider);
* how things will actually appear in memory.
*/
pciio_endian_t
-pciio_endian_set(devfs_handle_t dev,
+pciio_endian_set(vertex_hdl_t dev,
pciio_endian_t device_end,
pciio_endian_t desired_end)
{
* Specify PCI arbitration priority.
*/
pciio_priority_t
-pciio_priority_set(devfs_handle_t dev,
+pciio_priority_set(vertex_hdl_t dev,
pciio_priority_t device_prio)
{
ASSERT((device_prio == PCI_PRIO_HIGH) || (device_prio == PCI_PRIO_LOW));
* Read value of configuration register
*/
uint64_t
-pciio_config_get(devfs_handle_t dev,
+pciio_config_get(vertex_hdl_t dev,
unsigned reg,
unsigned size)
{
* Change value of configuration register
*/
void
-pciio_config_set(devfs_handle_t dev,
+pciio_config_set(vertex_hdl_t dev,
unsigned reg,
unsigned size,
uint64_t value)
* Issue a hardware reset to a card.
*/
int
-pciio_reset(devfs_handle_t dev)
+pciio_reset(vertex_hdl_t dev)
{
return DEV_FUNC(dev, reset) (dev);
}
* flush write gather buffers
*/
int
-pciio_write_gather_flush(devfs_handle_t dev)
+pciio_write_gather_flush(vertex_hdl_t dev)
{
return DEV_FUNC(dev, write_gather_flush) (dev);
}
-devfs_handle_t
+vertex_hdl_t
pciio_intr_dev_get(pciio_intr_t pciio_intr)
{
return (pciio_intr->pi_dev);
}
/****** Generic crosstalk pio interfaces ******/
-devfs_handle_t
+vertex_hdl_t
pciio_pio_dev_get(pciio_piomap_t pciio_piomap)
{
return (pciio_piomap->pp_dev);
}
/****** Generic crosstalk dma interfaces ******/
-devfs_handle_t
+vertex_hdl_t
pciio_dma_dev_get(pciio_dmamap_t pciio_dmamap)
{
return (pciio_dmamap->pd_dev);
/****** Generic pci slot information interfaces ******/
pciio_info_t
-pciio_info_chk(devfs_handle_t pciio)
+pciio_info_chk(vertex_hdl_t pciio)
{
arbitrary_info_t ainfo = 0;
}
pciio_info_t
-pciio_info_get(devfs_handle_t pciio)
+pciio_info_get(vertex_hdl_t pciio)
{
pciio_info_t pciio_info;
#endif /* DEBUG_PCIIO */
if ((pciio_info != NULL) &&
- (pciio_info->c_fingerprint != pciio_info_fingerprint)
- && (pciio_info->c_fingerprint != NULL)) {
+ (pciio_info->c_fingerprint != pciio_info_fingerprint)
+ && (pciio_info->c_fingerprint != NULL)) {
- return((pciio_info_t)-1); /* Should panic .. */
+ return((pciio_info_t)-1); /* Should panic .. */
}
-
return pciio_info;
}
void
-pciio_info_set(devfs_handle_t pciio, pciio_info_t pciio_info)
+pciio_info_set(vertex_hdl_t pciio, pciio_info_t pciio_info)
{
if (pciio_info != NULL)
pciio_info->c_fingerprint = pciio_info_fingerprint;
(arbitrary_info_t) pciio_info);
}
-devfs_handle_t
+vertex_hdl_t
pciio_info_dev_get(pciio_info_t pciio_info)
{
return (pciio_info->c_vertex);
return (pciio_info->c_device);
}
-devfs_handle_t
+vertex_hdl_t
pciio_info_master_get(pciio_info_t pciio_info)
{
return (pciio_info->c_master);
*/
/*
- * pciioinit: called once during device driver
- * initializtion if this driver is configured into
- * the system.
- */
-void
-pciio_init(void)
-{
- cdl_p cp;
-
-#if DEBUG && ATTACH_DEBUG
- printf("pciio_init\n");
-#endif
- /* Allocate the registry.
- * We might already have one.
- * If we don't, go get one.
- * MPness: someone might have
- * set one up for us while we
- * were not looking; use an atomic
- * compare-and-swap to commit to
- * using the new registry if and
- * only if nobody else did first.
- * If someone did get there first,
- * toss the one we allocated back
- * into the pool.
- */
- if (pciio_registry == NULL) {
- cp = cdl_new(EDGE_LBL_PCI, "vendor", "device");
- if (!compare_and_swap_ptr((void **) &pciio_registry, NULL, (void *) cp)) {
- cdl_del(cp);
- }
- }
- ASSERT(pciio_registry != NULL);
-}
-
-/*
* pciioattach: called for each vertex in the graph
* that is a PCI provider.
*/
/*ARGSUSED */
int
-pciio_attach(devfs_handle_t pciio)
+pciio_attach(vertex_hdl_t pciio)
{
#if DEBUG && ATTACH_DEBUG
#if defined(SUPPORT_PRINTING_V_FORMAT)
* Associate a set of pciio_provider functions with a vertex.
*/
void
-pciio_provider_register(devfs_handle_t provider, pciio_provider_t *pciio_fns)
+pciio_provider_register(vertex_hdl_t provider, pciio_provider_t *pciio_fns)
{
hwgraph_info_add_LBL(provider, INFO_LBL_PFUNCS, (arbitrary_info_t) pciio_fns);
}
* Disassociate a set of pciio_provider functions with a vertex.
*/
void
-pciio_provider_unregister(devfs_handle_t provider)
+pciio_provider_unregister(vertex_hdl_t provider)
{
arbitrary_info_t ainfo;
* provider.
*/
pciio_provider_t *
-pciio_provider_fns_get(devfs_handle_t provider)
+pciio_provider_fns_get(vertex_hdl_t provider)
{
arbitrary_info_t ainfo = 0;
char *driver_prefix,
unsigned flags)
{
- /* a driver's init routine might call
- * pciio_driver_register before the
- * system calls pciio_init; so we
- * make the init call ourselves here.
- */
- if (pciio_registry == NULL)
- pciio_init();
-
- return cdl_add_driver(pciio_registry,
- vendor_id, device_id,
- driver_prefix, flags, NULL);
-}
-
-/*
- * Remove an initialization function.
- */
-void
-pciio_driver_unregister(
- char *driver_prefix)
-{
- /* before a driver calls unregister,
- * it must have called register; so
- * we can assume we have a registry here.
- */
- ASSERT(pciio_registry != NULL);
-
- cdl_del_driver(pciio_registry, driver_prefix, NULL);
-}
-
-/*
- * Set the slot status for a device supported by the
- * driver being registered.
- */
-void
-pciio_driver_reg_callback(
- devfs_handle_t pconn_vhdl,
- int key1,
- int key2,
- int error)
-{
-}
-
-/*
- * Set the slot status for a device supported by the
- * driver being unregistered.
- */
-void
-pciio_driver_unreg_callback(
- devfs_handle_t pconn_vhdl,
- int key1,
- int key2,
- int error)
-{
-}
-
-/*
- * Call some function with each vertex that
- * might be one of this driver's attach points.
- */
-void
-pciio_iterate(char *driver_prefix,
- pciio_iter_f * func)
-{
- /* a driver's init routine might call
- * pciio_iterate before the
- * system calls pciio_init; so we
- * make the init call ourselves here.
- */
- if (pciio_registry == NULL)
- pciio_init();
-
- ASSERT(pciio_registry != NULL);
-
- cdl_iterate(pciio_registry, driver_prefix, (cdl_iter_f *) func);
+ return(0);
}
-devfs_handle_t
+vertex_hdl_t
pciio_device_register(
- devfs_handle_t connectpt, /* vertex for /hw/.../pciio/%d */
- devfs_handle_t master, /* card's master ASIC (PCI provider) */
+ vertex_hdl_t connectpt, /* vertex for /hw/.../pciio/%d */
+ vertex_hdl_t master, /* card's master ASIC (PCI provider) */
pciio_slot_t slot, /* card's slot */
pciio_function_t func, /* card's func */
pciio_vendor_id_t vendor_id,
}
void
-pciio_device_unregister(devfs_handle_t pconn)
+pciio_device_unregister(vertex_hdl_t pconn)
{
DEV_FUNC(pconn,device_unregister)(pconn);
}
pciio_info_t
pciio_device_info_new(
pciio_info_t pciio_info,
- devfs_handle_t master,
+ vertex_hdl_t master,
pciio_slot_t slot,
pciio_function_t func,
pciio_vendor_id_t vendor_id,
pciio_device_id_t device_id)
{
if (!pciio_info)
- GET_NEW(pciio_info);
+ NEW(pciio_info);
ASSERT(pciio_info != NULL);
pciio_info->c_slot = slot;
BZERO((char *)pciio_info,sizeof(pciio_info));
}
-devfs_handle_t
+vertex_hdl_t
pciio_device_info_register(
- devfs_handle_t connectpt, /* vertex at center of bus */
+ vertex_hdl_t connectpt, /* vertex at center of bus */
pciio_info_t pciio_info) /* details about the connectpt */
{
char name[32];
- devfs_handle_t pconn;
- int device_master_set(devfs_handle_t, devfs_handle_t);
+ vertex_hdl_t pconn;
+ int device_master_set(vertex_hdl_t, vertex_hdl_t);
pciio_slot_func_to_name(name,
pciio_info->c_slot,
*/
device_master_set(pconn, pciio_info->c_master);
-
-#if USRPCI
- /*
- * Call into usrpci provider to let it initialize for
- * the given slot.
- */
- if (pciio_info->c_slot != PCIIO_SLOT_NONE)
- usrpci_device_register(pconn, pciio_info->c_master, pciio_info->c_slot);
-#endif
-
return pconn;
}
void
-pciio_device_info_unregister(devfs_handle_t connectpt,
+pciio_device_info_unregister(vertex_hdl_t connectpt,
pciio_info_t pciio_info)
{
char name[32];
- devfs_handle_t pconn;
+ vertex_hdl_t pconn;
if (!pciio_info)
return;
/* Add the pci card inventory information to the hwgraph
*/
static void
-pciio_device_inventory_add(devfs_handle_t pconn_vhdl)
+pciio_device_inventory_add(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
/*ARGSUSED */
int
-pciio_device_attach(devfs_handle_t pconn,
+pciio_device_attach(vertex_hdl_t pconn,
int drv_flags)
{
pciio_info_t pciio_info;
* pciio_init) have been called; so we
* can assume here that we have a registry.
*/
- ASSERT(pciio_registry != NULL);
- return(cdl_add_connpt(pciio_registry, vendor_id, device_id, pconn, drv_flags));
+ return(cdl_add_connpt(vendor_id, device_id, pconn, drv_flags));
}
int
-pciio_device_detach(devfs_handle_t pconn,
+pciio_device_detach(vertex_hdl_t pconn,
int drv_flags)
{
- pciio_info_t pciio_info;
- pciio_vendor_id_t vendor_id;
- pciio_device_id_t device_id;
-
- pciio_info = pciio_info_get(pconn);
-
- vendor_id = pciio_info->c_vendor;
- device_id = pciio_info->c_device;
-
- /* we don't start attaching things until
- * all the driver init routines (including
- * pciio_init) have been called; so we
- * can assume here that we have a registry.
- */
- ASSERT(pciio_registry != NULL);
-
- return(cdl_del_connpt(pciio_registry, vendor_id, device_id,
- pconn, drv_flags));
-
+ return(0);
}
/* SN2 */
* cooperating drivers, well, cooperate ...
*/
void
-pciio_error_register(devfs_handle_t pconn,
+pciio_error_register(vertex_hdl_t pconn,
error_handler_f *efunc,
error_handler_arg_t einfo)
{
* vhdl is the vertex for the slot
*/
int
-pciio_slot_inuse(devfs_handle_t pconn_vhdl)
+pciio_slot_inuse(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
}
int
-pciio_dma_enabled(devfs_handle_t pconn_vhdl)
+pciio_dma_enabled(vertex_hdl_t pconn_vhdl)
{
return DEV_FUNC(pconn_vhdl, dma_enabled)(pconn_vhdl);
}
/*
* These are complementary Linux interfaces that takes in a pci_dev * as the
- * first arguement instead of devfs_handle_t.
+ * first arguement instead of vertex_hdl_t.
*/
iopaddr_t snia_pciio_dmatrans_addr(struct pci_dev *, device_desc_t, paddr_t, size_t, unsigned);
pciio_dmamap_t snia_pciio_dmamap_alloc(struct pci_dev *, device_desc_t, size_t, unsigned);
int *count_vchan0,
int *count_vchan1)
{
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
+ vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
return pcibr_rrb_alloc(dev, count_vchan0, count_vchan1);
}
pciio_endian_t device_end,
pciio_endian_t desired_end)
{
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
+ vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
return DEV_FUNC(dev, endian_set)
(dev, device_end, desired_end);
unsigned flags)
{ /* defined in dma.h */
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
+ vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
/*
* If the device is not a PIC, we always want the PCIIO_BYTE_STREAM to be
unsigned flags)
{ /* defined in dma.h */
- devfs_handle_t dev = PCIDEV_VERTEX(pci_dev);
+ vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
/*
* If the device is not a PIC, we always want the PCIIO_BYTE_STREAM to be
#include <asm/sn/prio.h>
#include <asm/sn/xtalk/xbow.h>
#include <asm/sn/ioc3.h>
-#include <asm/sn/eeprom.h>
#include <asm/sn/io.h>
#include <asm/sn/sn_private.h>
#define PCI_BUS_NO_1 1
-int pic_devflag = D_MP;
+extern int pcibr_attach2(vertex_hdl_t, bridge_t *, vertex_hdl_t, int, pcibr_soft_t *);
+extern void pcibr_driver_reg_callback(vertex_hdl_t, int, int, int);
+extern void pcibr_driver_unreg_callback(vertex_hdl_t, int, int, int);
-extern int pcibr_attach2(devfs_handle_t, bridge_t *, devfs_handle_t, int, pcibr_soft_t *);
-extern void pcibr_driver_reg_callback(devfs_handle_t, int, int, int);
-extern void pcibr_driver_unreg_callback(devfs_handle_t, int, int, int);
-
-
-void
-pic_init(void)
-{
- PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INIT, NULL, "pic_init()\n"));
-
- xwidget_driver_register(PIC_WIDGET_PART_NUM_BUS0,
- PIC_WIDGET_MFGR_NUM,
- "pic_",
- 0);
-}
/*
* copy inventory_t from conn_v to peer_conn_v
*/
int
-pic_bus1_inventory_dup(devfs_handle_t conn_v, devfs_handle_t peer_conn_v)
+pic_bus1_inventory_dup(vertex_hdl_t conn_v, vertex_hdl_t peer_conn_v)
{
inventory_t *pinv, *peer_pinv;
(arbitrary_info_t *)&pinv) == GRAPH_SUCCESS)
{
NEW(peer_pinv);
- bcopy(pinv, peer_pinv, sizeof(inventory_t));
+ bcopy((const char *)pinv, (char *)peer_pinv, sizeof(inventory_t));
if (hwgraph_info_add_LBL(peer_conn_v, INFO_LBL_INVENT,
(arbitrary_info_t)peer_pinv) != GRAPH_SUCCESS) {
DEL(peer_pinv);
return 1;
}
- printk("pic_bus1_inventory_dup: cannot get INFO_LBL_INVENT from 0x%lx\n ",
- conn_v);
+ printk("pic_bus1_inventory_dup: cannot get INFO_LBL_INVENT from 0x%lx\n ", (uint64_t)conn_v);
return 0;
}
* copy xwidget_info_t from conn_v to peer_conn_v
*/
int
-pic_bus1_widget_info_dup(devfs_handle_t conn_v, devfs_handle_t peer_conn_v,
+pic_bus1_widget_info_dup(vertex_hdl_t conn_v, vertex_hdl_t peer_conn_v,
cnodeid_t xbow_peer)
{
xwidget_info_t widget_info, peer_widget_info;
char peer_path[256];
- char *p;
- devfs_handle_t peer_hubv;
+ vertex_hdl_t peer_hubv;
hubinfo_t peer_hub_info;
/* get the peer hub's widgetid */
}
printk("pic_bus1_widget_info_dup: "
- "cannot get INFO_LBL_XWIDGET from 0x%lx\n", conn_v);
+ "cannot get INFO_LBL_XWIDGET from 0x%lx\n", (uint64_t)conn_v);
return 0;
}
* If not successful, return zero and both buses will attach to the
* vertex passed into pic_attach().
*/
-devfs_handle_t
-pic_bus1_redist(nasid_t nasid, devfs_handle_t conn_v)
+vertex_hdl_t
+pic_bus1_redist(nasid_t nasid, vertex_hdl_t conn_v)
{
cnodeid_t cnode = NASID_TO_COMPACT_NODEID(nasid);
cnodeid_t xbow_peer = -1;
char pathname[256], peer_path[256], tmpbuf[256];
char *p;
int rc;
- devfs_handle_t peer_conn_v;
+ vertex_hdl_t peer_conn_v;
int pos;
slabid_t slab;
/* pcibr widget hw/module/001c11/slab/0/Pbrick/xtalk/12 */
/* sprintf(pathname, "%v", conn_v); */
xbow_peer = NASID_TO_COMPACT_NODEID(NODEPDA(cnode)->xbow_peer);
- pos = devfs_generate_path(conn_v, tmpbuf, 256);
+ pos = hwgfs_generate_path(conn_v, tmpbuf, 256);
strcpy(pathname, &tmpbuf[pos]);
p = pathname + strlen("hw/module/001c01/slab/0/");
rc = hwgraph_traverse(hwgraph_root, peer_path, &peer_conn_v);
if (GRAPH_SUCCESS == rc)
printk("pic_attach: found unexpected vertex: 0x%lx\n",
- peer_conn_v);
+ (uint64_t)peer_conn_v);
else if (GRAPH_NOT_FOUND != rc) {
printk("pic_attach: hwgraph_traverse unexpectedly"
" returned 0x%x\n", rc);
int
-pic_attach(devfs_handle_t conn_v)
+pic_attach(vertex_hdl_t conn_v)
{
int rc;
bridge_t *bridge0, *bridge1 = (bridge_t *)0;
- devfs_handle_t pcibr_vhdl0, pcibr_vhdl1 = (devfs_handle_t)0;
+ vertex_hdl_t pcibr_vhdl0, pcibr_vhdl1 = (vertex_hdl_t)0;
pcibr_soft_t bus0_soft, bus1_soft = (pcibr_soft_t)0;
- devfs_handle_t conn_v0, conn_v1, peer_conn_v;
+ vertex_hdl_t conn_v0, conn_v1, peer_conn_v;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v, "pic_attach()\n"));
conn_v0 = conn_v1 = conn_v;
/* If dual-ported then split the two PIC buses across both Cbricks */
- if (peer_conn_v = pic_bus1_redist(NASID_GET(bridge0), conn_v))
+ if ((peer_conn_v = (pic_bus1_redist(NASID_GET(bridge0), conn_v))))
conn_v1 = peer_conn_v;
/*
- * Create the vertex for the PCI buses, which week
+ * Create the vertex for the PCI buses, which we
* will also use to hold the pcibr_soft and
* which will be the "master" vertex for all the
* pciio connection points we will hang off it.
/* save a pointer to the PIC's other bus's soft struct */
bus0_soft->bs_peers_soft = bus1_soft;
bus1_soft->bs_peers_soft = bus0_soft;
- bus0_soft->bs_peers_soft = (pcibr_soft_t)0;
PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
"pic_attach: bus0_soft=0x%x, bus1_soft=0x%x\n",
(pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
(pciio_dmamap_free_f *) pcibr_dmamap_free,
(pciio_dmamap_addr_f *) pcibr_dmamap_addr,
- (pciio_dmamap_list_f *) pcibr_dmamap_list,
(pciio_dmamap_done_f *) pcibr_dmamap_done,
(pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
- (pciio_dmatrans_list_f *) pcibr_dmatrans_list,
(pciio_dmamap_drain_f *) pcibr_dmamap_drain,
(pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
(pciio_dmalist_drain_f *) pcibr_dmalist_drain,
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/config.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/io.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/klconfig.h>
-#include <asm/sn/sn_private.h>
-#include <asm/sn/pci/pciba.h>
-#include <linux/smp.h>
-
-extern void mlreset(void);
-extern int init_hcl(void);
-extern void klgraph_hack_init(void);
-extern void hubspc_init(void);
-extern void pciio_init(void);
-extern void pcibr_init(void);
-extern void xtalk_init(void);
-extern void xbow_init(void);
-extern void xbmon_init(void);
-extern void pciiox_init(void);
-extern void pic_init(void);
-extern void usrpci_init(void);
-extern void ioc3_init(void);
-extern void initialize_io(void);
-extern void klhwg_add_all_modules(devfs_handle_t);
-extern void klhwg_add_all_nodes(devfs_handle_t);
-
-void sn_mp_setup(void);
-extern devfs_handle_t hwgraph_root;
-extern void io_module_init(void);
-extern void pci_bus_cvlink_init(void);
-extern void temp_hack(void);
-
-extern int pci_bus_to_hcl_cvlink(void);
-
-/* #define DEBUG_IO_INIT 1 */
-#ifdef DEBUG_IO_INIT
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif /* DEBUG_IO_INIT */
-
-/*
- * per_hub_init
- *
- * This code is executed once for each Hub chip.
- */
-static void
-per_hub_init(cnodeid_t cnode)
-{
- nasid_t nasid;
- nodepda_t *npdap;
- ii_icmr_u_t ii_icmr;
- ii_ibcr_u_t ii_ibcr;
-
- nasid = COMPACT_TO_NASID_NODEID(cnode);
-
- ASSERT(nasid != INVALID_NASID);
- ASSERT(NASID_TO_COMPACT_NODEID(nasid) == cnode);
-
- npdap = NODEPDA(cnode);
-
- REMOTE_HUB_S(nasid, IIO_IWEIM, 0x8000);
-
- /*
- * Set the total number of CRBs that can be used.
- */
- ii_icmr.ii_icmr_regval= 0x0;
- ii_icmr.ii_icmr_fld_s.i_c_cnt = 0xf;
- REMOTE_HUB_S(nasid, IIO_ICMR, ii_icmr.ii_icmr_regval);
-
- /*
- * Set the number of CRBs that both of the BTEs combined
- * can use minus 1.
- */
- ii_ibcr.ii_ibcr_regval= 0x0;
- ii_ibcr.ii_ibcr_fld_s.i_count = 0x8;
- REMOTE_HUB_S(nasid, IIO_IBCR, ii_ibcr.ii_ibcr_regval);
-
- /*
- * Set CRB timeout to be 10ms.
- */
-#ifdef BRINGUP2
- REMOTE_HUB_S(nasid, IIO_ICTP, 0xffffff );
- REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
- //REMOTE_HUB_S(nasid, IIO_IWI, 0x00FF00FF00FFFFFF);
-#endif
-
- /* Initialize error interrupts for this hub. */
- hub_error_init(cnode);
-}
-
-/*
- * This routine is responsible for the setup of all the IRIX hwgraph style
- * stuff that's been pulled into linux. It's called by sn_pci_find_bios which
- * is called just before the generic Linux PCI layer does its probing (by
- * platform_pci_fixup aka sn_pci_fixup).
- *
- * It is very IMPORTANT that this call is only made by the Master CPU!
- *
- */
-
-void
-sgi_master_io_infr_init(void)
-{
- int cnode;
- extern void kdba_io_init();
-
- /*
- * Do any early init stuff .. einit_tbl[] etc.
- */
- init_hcl(); /* Sets up the hwgraph compatibility layer with devfs */
-
- /*
- * initialize the Linux PCI to xwidget vertexes ..
- */
- pci_bus_cvlink_init();
-
- kdba_io_init();
-
-#ifdef BRINGUP
- /*
- * Hack to provide statically initialzed klgraph entries.
- */
- DBG("--> sgi_master_io_infr_init: calling klgraph_hack_init()\n");
- klgraph_hack_init();
-#endif /* BRINGUP */
-
- /*
- * This is the Master CPU. Emulate mlsetup and main.c in Irix.
- */
- mlreset();
-
- /*
- * allowboot() is called by kern/os/main.c in main()
- * Emulate allowboot() ...
- * per_cpu_init() - only need per_hub_init()
- * cpu_io_setup() - Nothing to do.
- *
- */
- sn_mp_setup();
-
- for (cnode = 0; cnode < numnodes; cnode++) {
- per_hub_init(cnode);
- }
-
- /* We can do headless hub cnodes here .. */
-
- /*
- * io_init[] stuff.
- *
- * Get SGI IO Infrastructure drivers to init and register with
- * each other etc.
- */
-
- hubspc_init();
- pciio_init();
- pcibr_init();
- pic_init();
- xtalk_init();
- xbow_init();
- xbmon_init();
- pciiox_init();
- usrpci_init();
- ioc3_init();
-
- /*
- *
- * Our IO Infrastructure drivers are in place ..
- * Initialize the whole IO Infrastructure .. xwidget/device probes.
- *
- */
- initialize_io();
- pci_bus_to_hcl_cvlink();
-
-#ifdef CONFIG_PCIBA
- DBG("--> sgi_master_io_infr_init: calling pciba_init()\n");
-#ifndef BRINGUP2
- pciba_init();
-#endif
-#endif
-}
-
-/*
- * One-time setup for MP SN.
- * Allocate per-node data, slurp prom klconfig information and
- * convert it to hwgraph information.
- */
-void
-sn_mp_setup(void)
-{
- cpuid_t cpu;
-
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- /* Skip holes in CPU space */
- if (cpu_enabled(cpu)) {
- init_platform_pda(cpu);
- }
- }
-
- /*
- * Initialize platform-dependent vertices in the hwgraph:
- * module
- * node
- * cpu
- * memory
- * slot
- * hub
- * router
- * xbow
- */
-
- io_module_init(); /* Use to be called module_init() .. */
- klhwg_add_all_modules(hwgraph_root);
- klhwg_add_all_nodes(hwgraph_root);
-}
}
static inline void
+shub_mmr_write_iospace(cnodeid_t cnode, shubreg_t reg, uint64_t val)
+{
+ int nasid = cnodeid_to_nasid(cnode);
+
+ REMOTE_HUB_S(nasid, reg, val);
+}
+
+static inline void
shub_mmr_write32(cnodeid_t cnode, shubreg_t reg, uint32_t val)
{
int nasid = cnodeid_to_nasid(cnode);
return val;
}
+static inline uint64_t
+shub_mmr_read_iospace(cnodeid_t cnode, shubreg_t reg)
+{
+ int nasid = cnodeid_to_nasid(cnode);
+
+ return REMOTE_HUB_L(nasid, reg);
+}
+
static inline uint32_t
shub_mmr_read32(cnodeid_t cnode, shubreg_t reg)
{
{
cnodeid_t cnode;
uint64_t longarg;
- devfs_handle_t d;
+ vertex_hdl_t d;
int nasid;
- if ((d = devfs_get_handle_from_inode(inode)) == NULL)
- return -ENODEV;
cnode = (cnodeid_t)hwgraph_fastinfo_get(d);
switch (cmd) {
struct file_operations shub_mon_fops = {
ioctl: shubstats_ioctl,
};
+
+/*
+ * "linkstatd" kernel thread to export SGI Numalink
+ * stats via /proc/sgi_sn/linkstats
+ */
+static struct s_linkstats {
+ uint64_t hs_ni_sn_errors[2];
+ uint64_t hs_ni_cb_errors[2];
+ uint64_t hs_ni_retry_errors[2];
+ int hs_ii_up;
+ uint64_t hs_ii_sn_errors;
+ uint64_t hs_ii_cb_errors;
+ uint64_t hs_ii_retry_errors;
+} *sn_linkstats;
+
+static spinlock_t sn_linkstats_lock;
+static unsigned long sn_linkstats_starttime;
+static unsigned long sn_linkstats_samples;
+static unsigned long sn_linkstats_overflows;
+static unsigned long sn_linkstats_update_msecs;
+
+void
+sn_linkstats_reset(unsigned long msecs)
+{
+ int cnode;
+ uint64_t iio_wstat;
+ uint64_t llp_csr_reg;
+
+ spin_lock(&sn_linkstats_lock);
+ memset(sn_linkstats, 0, numnodes * sizeof(struct s_linkstats));
+ for (cnode=0; cnode < numnodes; cnode++) {
+ shub_mmr_write(cnode, SH_NI0_LLP_ERR, 0L);
+ shub_mmr_write(cnode, SH_NI1_LLP_ERR, 0L);
+ shub_mmr_write_iospace(cnode, IIO_LLP_LOG, 0L);
+
+ /* zero the II retry counter */
+ iio_wstat = shub_mmr_read_iospace(cnode, IIO_WSTAT);
+ iio_wstat &= 0xffffffffff00ffff; /* bits 23:16 */
+ shub_mmr_write_iospace(cnode, IIO_WSTAT, iio_wstat);
+
+ /* Check if the II xtalk link is working */
+ llp_csr_reg = shub_mmr_read_iospace(cnode, IIO_LLP_CSR);
+ if (llp_csr_reg & IIO_LLP_CSR_IS_UP)
+ sn_linkstats[cnode].hs_ii_up = 1;
+ }
+
+ sn_linkstats_update_msecs = msecs;
+ sn_linkstats_samples = 0;
+ sn_linkstats_overflows = 0;
+ sn_linkstats_starttime = jiffies;
+ spin_unlock(&sn_linkstats_lock);
+}
+
+int
+linkstatd_thread(void *unused)
+{
+ int cnode;
+ int overflows;
+ uint64_t reg[2];
+ uint64_t iio_wstat = 0L;
+ ii_illr_u_t illr;
+ struct s_linkstats *lsp;
+ struct task_struct *tsk = current;
+
+ daemonize("linkstatd");
+ set_user_nice(tsk, 19);
+ sigfillset(&tsk->blocked);
+ strcpy(tsk->comm, "linkstatd");
+
+ while(1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(sn_linkstats_update_msecs * HZ / 1000);
+
+ spin_lock(&sn_linkstats_lock);
+
+ overflows = 0;
+ for (lsp=sn_linkstats, cnode=0; cnode < numnodes; cnode++, lsp++) {
+ reg[0] = shub_mmr_read(cnode, SH_NI0_LLP_ERR);
+ reg[1] = shub_mmr_read(cnode, SH_NI1_LLP_ERR);
+ if (lsp->hs_ii_up) {
+ illr = (ii_illr_u_t)shub_mmr_read_iospace(cnode, IIO_LLP_LOG);
+ iio_wstat = shub_mmr_read_iospace(cnode, IIO_WSTAT);
+ }
+
+ if (!overflows && (
+ (reg[0] & SH_NI0_LLP_ERR_RX_SN_ERR_COUNT_MASK) ==
+ SH_NI0_LLP_ERR_RX_SN_ERR_COUNT_MASK ||
+ (reg[0] & SH_NI0_LLP_ERR_RX_CB_ERR_COUNT_MASK) ==
+ SH_NI0_LLP_ERR_RX_CB_ERR_COUNT_MASK ||
+ (reg[1] & SH_NI1_LLP_ERR_RX_SN_ERR_COUNT_MASK) ==
+ SH_NI1_LLP_ERR_RX_SN_ERR_COUNT_MASK ||
+ (reg[1] & SH_NI1_LLP_ERR_RX_CB_ERR_COUNT_MASK) ==
+ SH_NI1_LLP_ERR_RX_CB_ERR_COUNT_MASK ||
+ (lsp->hs_ii_up && illr.ii_illr_fld_s.i_sn_cnt == IIO_LLP_SN_MAX) ||
+ (lsp->hs_ii_up && illr.ii_illr_fld_s.i_cb_cnt == IIO_LLP_CB_MAX))) {
+ overflows = 1;
+ }
+
+#define LINKSTAT_UPDATE(reg, cnt, mask, shift) cnt += (reg & mask) >> shift
+
+ LINKSTAT_UPDATE(reg[0], lsp->hs_ni_sn_errors[0],
+ SH_NI0_LLP_ERR_RX_SN_ERR_COUNT_MASK,
+ SH_NI0_LLP_ERR_RX_SN_ERR_COUNT_SHFT);
+
+ LINKSTAT_UPDATE(reg[1], lsp->hs_ni_sn_errors[1],
+ SH_NI1_LLP_ERR_RX_SN_ERR_COUNT_MASK,
+ SH_NI1_LLP_ERR_RX_SN_ERR_COUNT_SHFT);
+
+ LINKSTAT_UPDATE(reg[0], lsp->hs_ni_cb_errors[0],
+ SH_NI0_LLP_ERR_RX_CB_ERR_COUNT_MASK,
+ SH_NI0_LLP_ERR_RX_CB_ERR_COUNT_SHFT);
+
+ LINKSTAT_UPDATE(reg[1], lsp->hs_ni_cb_errors[1],
+ SH_NI1_LLP_ERR_RX_CB_ERR_COUNT_MASK,
+ SH_NI1_LLP_ERR_RX_CB_ERR_COUNT_SHFT);
+
+ LINKSTAT_UPDATE(reg[0], lsp->hs_ni_retry_errors[0],
+ SH_NI0_LLP_ERR_RETRY_COUNT_MASK,
+ SH_NI0_LLP_ERR_RETRY_COUNT_SHFT);
+
+ LINKSTAT_UPDATE(reg[1], lsp->hs_ni_retry_errors[1],
+ SH_NI1_LLP_ERR_RETRY_COUNT_MASK,
+ SH_NI1_LLP_ERR_RETRY_COUNT_SHFT);
+
+ if (lsp->hs_ii_up) {
+ /* II sn and cb errors */
+ lsp->hs_ii_sn_errors += illr.ii_illr_fld_s.i_sn_cnt;
+ lsp->hs_ii_cb_errors += illr.ii_illr_fld_s.i_cb_cnt;
+ lsp->hs_ii_retry_errors += (iio_wstat & 0x0000000000ff0000) >> 16;
+
+ shub_mmr_write(cnode, SH_NI0_LLP_ERR, 0L);
+ shub_mmr_write(cnode, SH_NI1_LLP_ERR, 0L);
+ shub_mmr_write_iospace(cnode, IIO_LLP_LOG, 0L);
+
+ /* zero the II retry counter */
+ iio_wstat = shub_mmr_read_iospace(cnode, IIO_WSTAT);
+ iio_wstat &= 0xffffffffff00ffff; /* bits 23:16 */
+ shub_mmr_write_iospace(cnode, IIO_WSTAT, iio_wstat);
+ }
+ }
+
+ sn_linkstats_samples++;
+ if (overflows)
+ sn_linkstats_overflows++;
+
+ spin_unlock(&sn_linkstats_lock);
+ }
+}
+
+static char *
+rate_per_minute(uint64_t val, uint64_t secs)
+{
+ static char buf[16];
+ uint64_t a=0, b=0, c=0, d=0;
+
+ if (secs) {
+ a = 60 * val / secs;
+ b = 60 * 10 * val / secs - (10 * a);
+ c = 60 * 100 * val / secs - (100 * a) - (10 * b);
+ d = 60 * 1000 * val / secs - (1000 * a) - (100 * b) - (10 * c);
+ }
+ sprintf(buf, "%4lu.%lu%lu%lu", a, b, c, d);
+
+ return buf;
+}
+
+int
+sn_linkstats_get(char *page)
+{
+ int n = 0;
+ int cnode;
+ int nlport;
+ struct s_linkstats *lsp;
+ nodepda_t *npda;
+ uint64_t snsum = 0;
+ uint64_t cbsum = 0;
+ uint64_t retrysum = 0;
+ uint64_t snsum_ii = 0;
+ uint64_t cbsum_ii = 0;
+ uint64_t retrysum_ii = 0;
+ uint64_t secs;
+
+ spin_lock(&sn_linkstats_lock);
+ secs = (jiffies - sn_linkstats_starttime) / HZ;
+
+ n += sprintf(page, "# SGI Numalink stats v1 : %lu samples, %lu o/flows, update %lu msecs\n",
+ sn_linkstats_samples, sn_linkstats_overflows, sn_linkstats_update_msecs);
+
+ n += sprintf(page+n, "%-37s %8s %8s %8s %8s\n",
+ "# Numalink", "sn errs", "cb errs", "cb/min", "retries");
+
+ for (lsp=sn_linkstats, cnode=0; cnode < numnodes; cnode++, lsp++) {
+ npda = NODEPDA(cnode);
+
+ /* two NL links on each SHub */
+ for (nlport=0; nlport < 2; nlport++) {
+ cbsum += lsp->hs_ni_cb_errors[nlport];
+ snsum += lsp->hs_ni_sn_errors[nlport];
+ retrysum += lsp->hs_ni_retry_errors[nlport];
+
+ /* avoid buffer overrun (should be using seq_read API) */
+ if (numnodes > 64)
+ continue;
+
+ n += sprintf(page + n, "/%s/link/%d %8lu %8lu %8s %8lu\n",
+ npda->hwg_node_name, nlport+1, lsp->hs_ni_sn_errors[nlport],
+ lsp->hs_ni_cb_errors[nlport],
+ rate_per_minute(lsp->hs_ni_cb_errors[nlport], secs),
+ lsp->hs_ni_retry_errors[nlport]);
+ }
+
+ /* one II port on each SHub (may not be connected) */
+ if (lsp->hs_ii_up) {
+ n += sprintf(page + n, "/%s/xtalk %8lu %8lu %8s %8lu\n",
+ npda->hwg_node_name, lsp->hs_ii_sn_errors,
+ lsp->hs_ii_cb_errors, rate_per_minute(lsp->hs_ii_cb_errors, secs),
+ lsp->hs_ii_retry_errors);
+
+ snsum_ii += lsp->hs_ii_sn_errors;
+ cbsum_ii += lsp->hs_ii_cb_errors;
+ retrysum_ii += lsp->hs_ii_retry_errors;
+ }
+ }
+
+ n += sprintf(page + n, "%-37s %8lu %8lu %8s %8lu\n",
+ "System wide NL totals", snsum, cbsum,
+ rate_per_minute(cbsum, secs), retrysum);
+
+ n += sprintf(page + n, "%-37s %8lu %8lu %8s %8lu\n",
+ "System wide II totals", snsum_ii, cbsum_ii,
+ rate_per_minute(cbsum_ii, secs), retrysum_ii);
+
+ spin_unlock(&sn_linkstats_lock);
+
+ return n;
+}
+
+static int __init
+linkstatd_init(void)
+{
+ spin_lock_init(&sn_linkstats_lock);
+ sn_linkstats = kmalloc(numnodes * sizeof(struct s_linkstats), GFP_KERNEL);
+ sn_linkstats_reset(60000UL); /* default 60 second update interval */
+ kernel_thread(linkstatd_thread, NULL, CLONE_FS | CLONE_FILES);
+
+ return 0;
+}
+
+__initcall(linkstatd_init);
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
/* ARGSUSED */
void
-hub_intr_init(devfs_handle_t hubv)
+hub_intr_init(vertex_hdl_t hubv)
{
}
}
static hub_intr_t
-do_hub_intr_alloc(devfs_handle_t dev,
+do_hub_intr_alloc(vertex_hdl_t dev,
device_desc_t dev_desc,
- devfs_handle_t owner_dev,
+ vertex_hdl_t owner_dev,
int uncond_nothread)
{
cpuid_t cpu = 0;
cpuphys = cpu_physical_id(cpu);
slice = cpu_physical_id_to_slice(cpuphys);
nasid = cpu_physical_id_to_nasid(cpuphys);
- cnode = cpu_to_node_map[cpu];
+ cnode = cpuid_to_cnodeid(cpu);
if (slice) {
xtalk_addr = SH_II_INT1 | ((unsigned long)nasid << 36) | (1UL << 47);
}
hub_intr_t
-hub_intr_alloc(devfs_handle_t dev,
+hub_intr_alloc(vertex_hdl_t dev,
device_desc_t dev_desc,
- devfs_handle_t owner_dev)
+ vertex_hdl_t owner_dev)
{
return(do_hub_intr_alloc(dev, dev_desc, owner_dev, 0));
}
hub_intr_t
-hub_intr_alloc_nothd(devfs_handle_t dev,
+hub_intr_alloc_nothd(vertex_hdl_t dev,
device_desc_t dev_desc,
- devfs_handle_t owner_dev)
+ vertex_hdl_t owner_dev)
{
return(do_hub_intr_alloc(dev, dev_desc, owner_dev, 1));
}
ASSERT(rv == 0);
intr_hdl->i_flags &= ~HUB_INTR_IS_CONNECTED;
}
-
-
-/*
- * Return a hwgraph vertex that represents the CPU currently
- * targeted by an interrupt.
- */
-devfs_handle_t
-hub_intr_cpu_get(hub_intr_t intr_hdl)
-{
- cpuid_t cpuid = intr_hdl->i_cpuid;
-
- ASSERT(cpuid != CPU_NONE);
-
- return(cpuid_to_vertex(cpuid));
-}
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000,2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000,2002-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/irq.h>
+#include <asm/io.h>
#include <asm/irq.h>
#include <asm/smp.h>
#include <asm/sn/sgi.h>
#include <asm/sn/xtalk/xtalk.h>
#include <asm/sn/pci/pcibr_private.h>
#include <asm/sn/intr.h>
+#include <asm/sn/ioerror_handling.h>
#include <asm/sn/ioerror.h>
#include <asm/sn/sn2/shubio.h>
#include <asm/sn/bte.h>
extern void hubni_eint_init(cnodeid_t cnode);
extern void hubii_eint_init(cnodeid_t cnode);
-extern void hubii_eint_handler (int irq, void *arg, struct pt_regs *ep);
-int hubiio_crb_error_handler(devfs_handle_t hub_v, hubinfo_t hinfo);
-int hubiio_prb_error_handler(devfs_handle_t hub_v, hubinfo_t hinfo);
-extern void bte_crb_error_handler(devfs_handle_t hub_v, int btenum, int crbnum, ioerror_t *ioe, int bteop);
+extern irqreturn_t hubii_eint_handler (int irq, void *arg, struct pt_regs *ep);
+int hubiio_crb_error_handler(vertex_hdl_t hub_v, hubinfo_t hinfo);
+int hubiio_prb_error_handler(vertex_hdl_t hub_v, hubinfo_t hinfo);
+extern void bte_crb_error_handler(vertex_hdl_t hub_v, int btenum, int crbnum, ioerror_t *ioe, int bteop);
+void print_crb_fields(int crb_num, ii_icrb0_a_u_t icrba,
+ ii_icrb0_b_u_t icrbb, ii_icrb0_c_u_t icrbc,
+ ii_icrb0_d_u_t icrbd, ii_icrb0_e_u_t icrbe);
extern int maxcpus;
+extern error_return_code_t error_state_set(vertex_hdl_t v,error_state_t new_state);
#define HUB_ERROR_PERIOD (120 * HZ) /* 2 minutes */
-#ifdef BUS_INT_WAR
-void sn_add_polled_interrupt(int irq, int interval);
-void sn_delete_polled_interrupt(int irq);
-extern int bus_int_war_ide_irq;
-#endif
-
-
void
hub_error_clear(nasid_t nasid)
{
REMOTE_HUB_S(nasid, IIO_IOPRB_0 + (i * sizeof(hubreg_t)), prb.iprb_regval);
}
- REMOTE_HUB_S(nasid, IIO_IO_ERR_CLR, -1);
- idsr = REMOTE_HUB_L(nasid, IIO_IIDSR);
- REMOTE_HUB_S(nasid, IIO_IIDSR, (idsr & ~(IIO_IIDSR_SENT_MASK)));
+ REMOTE_HUB_S(nasid, IIO_IECLR, -1);
}
* Returns : None.
*/
-
void
hubii_eint_init(cnodeid_t cnode)
{
ii_iidsr_u_t hubio_eint;
hubinfo_t hinfo;
cpuid_t intr_cpu;
- devfs_handle_t hub_v;
+ vertex_hdl_t hub_v;
int bit_pos_to_irq(int bit);
+ ii_ilcsr_u_t ilcsr;
- hub_v = (devfs_handle_t)cnodeid_to_vertex(cnode);
+ hub_v = (vertex_hdl_t)cnodeid_to_vertex(cnode);
ASSERT_ALWAYS(hub_v);
hubinfo_get(hub_v, &hinfo);
ASSERT(hinfo);
ASSERT(hinfo->h_cnodeid == cnode);
+ ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(hinfo->h_nasid, IIO_ILCSR);
+ if ((ilcsr.ii_ilcsr_fld_s.i_llp_stat & 0x2) == 0) {
+ /*
+ * HUB II link is not up. Disable LLP. Clear old errors.
+ * Enable interrupts to handle BTE errors.
+ */
+ ilcsr.ii_ilcsr_fld_s.i_llp_en = 0;
+ REMOTE_HUB_S(hinfo->h_nasid, IIO_ILCSR, ilcsr.ii_ilcsr_regval);
+ }
+
/* Select a possible interrupt target where there is a free interrupt
* bit and also reserve the interrupt bit for this IO error interrupt
*/
- intr_cpu = intr_heuristic(hub_v,0,-1,0,hub_v,
+ intr_cpu = intr_heuristic(hub_v,0,SGI_II_ERROR,0,hub_v,
"HUB IO error interrupt",&bit);
if (intr_cpu == CPU_NONE) {
printk("hubii_eint_init: intr_reserve_level failed, cnode %d", cnode);
return;
}
- rv = intr_connect_level(intr_cpu, bit, 0, NULL);
- request_irq(bit + (intr_cpu << 8), hubii_eint_handler, 0, "SN_hub_error", (void *)hub_v);
- irq_desc(bit + (intr_cpu << 8))->status |= SN2_IRQ_PER_HUB;
-#ifdef BUS_INT_WAR
- sn_add_polled_interrupt(bit + (intr_cpu << 8), (0.01 * HZ));
-#endif
+ rv = intr_connect_level(intr_cpu, SGI_II_ERROR, 0, NULL);
+ request_irq(SGI_II_ERROR, hubii_eint_handler, SA_SHIRQ, "SN_hub_error", (void *)hub_v);
+ irq_desc(bit)->status |= SN2_IRQ_PER_HUB;
ASSERT_ALWAYS(rv >= 0);
hubio_eint.ii_iidsr_regval = 0;
hubio_eint.ii_iidsr_fld_s.i_enable = 1;
/*ARGSUSED*/
-void
+irqreturn_t
hubii_eint_handler (int irq, void *arg, struct pt_regs *ep)
{
- devfs_handle_t hub_v;
+ vertex_hdl_t hub_v;
hubinfo_t hinfo;
ii_wstat_u_t wstat;
hubreg_t idsr;
+ ii_ilcsr_u_t ilcsr;
/* two levels of casting avoids compiler warning.!! */
- hub_v = (devfs_handle_t)(long)(arg);
+ hub_v = (vertex_hdl_t)(long)(arg);
ASSERT(hub_v);
hubinfo_get(hub_v, &hinfo);
+ idsr = REMOTE_HUB_L(hinfo->h_nasid, IIO_ICMR);
+#if 0
+ if (idsr & 0x1) {
+ /* ICMR bit is set .. we are getting into "Spurious Interrupts condition. */
+ printk("Cnode %d II has seen the ICMR condition\n", hinfo->h_cnodeid);
+ printk("***** Please file PV with the above messages *****\n");
+ /* panic("We have to panic to prevent further unknown states ..\n"); */
+ }
+#endif
+
/*
* Identify the reason for error.
*/
* Note: we may never be able to print this, if the II talking
* to Xbow which hosts the console is dead.
*/
- printk("Hub %d to Xtalk Link failed (II_ECRAZY) Reason: %s",
- hinfo->h_cnodeid, reason);
+ ilcsr.ii_ilcsr_regval = REMOTE_HUB_L(hinfo->h_nasid, IIO_ILCSR);
+ if (ilcsr.ii_ilcsr_fld_s.i_llp_en == 1) { /* Link is enabled */
+ printk("Hub %d, cnode %d to Xtalk Link failed (II_ECRAZY) Reason: %s",
+ hinfo->h_nasid, hinfo->h_cnodeid, reason);
+ }
}
+
+ /*
+ * Before processing any interrupt related information, clear all
+ * error indication and reenable interrupts. This will prevent
+ * lost interrupts due to the interrupt handler scanning past a PRB/CRB
+ * which has not errorred yet and then the PRB/CRB goes into error.
+ * Note, PRB errors are cleared individually.
+ */
+ REMOTE_HUB_S(hinfo->h_nasid, IIO_IECLR, 0xff0000);
+ idsr = REMOTE_HUB_L(hinfo->h_nasid, IIO_IIDSR) & ~IIO_IIDSR_SENT_MASK;
+ REMOTE_HUB_S(hinfo->h_nasid, IIO_IIDSR, idsr);
+
+
/*
* It's a toss as to which one among PRB/CRB to check first.
* Current decision is based on the severity of the errors.
*/
(void)hubiio_crb_error_handler(hub_v, hinfo);
(void)hubiio_prb_error_handler(hub_v, hinfo);
- /*
- * If we reach here, it indicates crb/prb handlers successfully
- * handled the error. So, re-enable II to send more interrupt
- * and return.
- */
- REMOTE_HUB_S(hinfo->h_nasid, IIO_IECLR, 0xffffff);
- idsr = REMOTE_HUB_L(hinfo->h_nasid, IIO_IIDSR) & ~IIO_IIDSR_SENT_MASK;
- REMOTE_HUB_S(hinfo->h_nasid, IIO_IIDSR, idsr);
+
+ return IRQ_HANDLED;
}
/*
"Xtalk Error Packet"
};
+void
+print_crb_fields(int crb_num, ii_icrb0_a_u_t icrba,
+ ii_icrb0_b_u_t icrbb, ii_icrb0_c_u_t icrbc,
+ ii_icrb0_d_u_t icrbd, ii_icrb0_e_u_t icrbe)
+{
+ printk("CRB %d regA\n\t"
+ "a_iow 0x%x\n\t"
+ "valid0x%x\n\t"
+ "Address0x%lx\n\t"
+ "a_tnum 0x%x\n\t"
+ "a_sidn 0x%x\n",
+ crb_num,
+ icrba.a_iow,
+ icrba.a_valid,
+ icrba.a_addr,
+ icrba.a_tnum,
+ icrba.a_sidn);
+ printk("CRB %d regB\n\t"
+ "b_imsgtype 0x%x\n\t"
+ "b_imsg 0x%x\n"
+ "\tb_use_old 0x%x\n\t"
+ "b_initiator 0x%x\n\t"
+ "b_exc 0x%x\n"
+ "\tb_ackcnt 0x%x\n\t"
+ "b_resp 0x%x\n\t"
+ "b_ack 0x%x\n"
+ "\tb_hold 0x%x\n\t"
+ "b_wb 0x%x\n\t"
+ "b_intvn 0x%x\n"
+ "\tb_stall_ib 0x%x\n\t"
+ "b_stall_int 0x%x\n"
+ "\tb_stall_bte_0 0x%x\n\t"
+ "b_stall_bte_1 0x%x\n"
+ "\tb_error 0x%x\n\t"
+ "b_lnetuce 0x%x\n\t"
+ "b_mark 0x%x\n\t"
+ "b_xerr 0x%x\n",
+ crb_num,
+ icrbb.b_imsgtype,
+ icrbb.b_imsg,
+ icrbb.b_use_old,
+ icrbb.b_initiator,
+ icrbb.b_exc,
+ icrbb.b_ackcnt,
+ icrbb.b_resp,
+ icrbb.b_ack,
+ icrbb.b_hold,
+ icrbb.b_wb,
+ icrbb.b_intvn,
+ icrbb.b_stall_ib,
+ icrbb.b_stall_int,
+ icrbb.b_stall_bte_0,
+ icrbb.b_stall_bte_1,
+ icrbb.b_error,
+ icrbb.b_lnetuce,
+ icrbb.b_mark,
+ icrbb.b_xerr);
+ printk("CRB %d regC\n\t"
+ "c_source 0x%x\n\t"
+ "c_xtsize 0x%x\n\t"
+ "c_cohtrans 0x%x\n\t"
+ "c_btenum 0x%x\n\t"
+ "c_gbr 0x%x\n\t"
+ "c_doresp 0x%x\n\t"
+ "c_barrop 0x%x\n\t"
+ "c_suppl 0x%x\n",
+ crb_num,
+ icrbc.c_source,
+ icrbc.c_xtsize,
+ icrbc.c_cohtrans,
+ icrbc.c_btenum,
+ icrbc.c_gbr,
+ icrbc.c_doresp,
+ icrbc.c_barrop,
+ icrbc.c_suppl);
+ printk("CRB %d regD\n\t"
+ "d_bteaddr 0x%lx\n\t"
+ "d_bteop 0x%x\n\t"
+ "d_pripsc 0x%x\n\t"
+ "d_pricnt 0x%x\n\t"
+ "d_sleep 0x%x\n\t",
+ crb_num,
+ icrbd.d_bteaddr,
+ icrbd.d_bteop,
+ icrbd.d_pripsc,
+ icrbd.d_pricnt,
+ icrbd.d_sleep);
+ printk("CRB %d regE\n\t"
+ "icrbe_timeout 0x%x\n\t"
+ "icrbe_context 0x%x\n\t"
+ "icrbe_toutvld 0x%x\n\t"
+ "icrbe_ctxtvld 0x%x\n\t",
+ crb_num,
+ icrbe.icrbe_timeout,
+ icrbe.icrbe_context,
+ icrbe.icrbe_toutvld,
+ icrbe.icrbe_ctxtvld);
+}
+
/*
* hubiio_crb_error_handler
*
*/
int
-hubiio_crb_error_handler(devfs_handle_t hub_v, hubinfo_t hinfo)
+hubiio_crb_error_handler(vertex_hdl_t hub_v, hubinfo_t hinfo)
{
cnodeid_t cnode;
nasid_t nasid;
cnode = NASID_TO_COMPACT_NODEID(nasid);
/*
+ * XXX - Add locking for any recovery actions
+ */
+ /*
* Scan through all CRBs in the Hub, and handle the errors
* in any of the CRBs marked.
*/
else /* b_initiator bit 2 gives BTE number */
bte_num = (icrbb.b_initiator & 0x4) >> 2;
- /* >>> bte_crb_error_handler needs to be
- * broken into two parts. The first should
- * cleanup the CRB. The second should wait
- * until all bte related CRB's are complete
- * and then do the error reset.
- */
+ hubiio_crb_free(hinfo, i);
+
bte_crb_error_handler(hub_v, bte_num,
i, &ioerror,
icrbd.d_bteop);
- hubiio_crb_free(hinfo, i);
num_errors++;
continue;
}
IOERROR_SETVALUE(&ioerror, tnum, icrba.a_tnum);
}
+ if (icrbb.b_error) {
+ /*
+ * CRB 'i' has some error. Identify the type of error,
+ * and try to handle it.
+ *
+ */
+ switch(icrbb.b_ecode) {
+ case IIO_ICRB_ECODE_PERR:
+ case IIO_ICRB_ECODE_WERR:
+ case IIO_ICRB_ECODE_AERR:
+ case IIO_ICRB_ECODE_PWERR:
+ case IIO_ICRB_ECODE_TOUT:
+ case IIO_ICRB_ECODE_XTERR:
+ printk("Shub II CRB %d: error %s on hub cnodeid: %d",
+ i, hubiio_crb_errors[icrbb.b_ecode], cnode);
+ /*
+ * Any sort of write error is mostly due
+ * bad programming (Note it's not a timeout.)
+ * So, invoke hub_iio_error_handler with
+ * appropriate information.
+ */
+ IOERROR_SETVALUE(&ioerror,errortype,icrbb.b_ecode);
+
+ /* Go through the error bit lookup phase */
+ if (error_state_set(hub_v, ERROR_STATE_LOOKUP) ==
+ ERROR_RETURN_CODE_CANNOT_SET_STATE)
+ return(IOERROR_UNHANDLED);
+ rc = hub_ioerror_handler(
+ hub_v,
+ DMA_WRITE_ERROR,
+ MODE_DEVERROR,
+ &ioerror);
+ if (rc == IOERROR_HANDLED) {
+ rc = hub_ioerror_handler(
+ hub_v,
+ DMA_WRITE_ERROR,
+ MODE_DEVREENABLE,
+ &ioerror);
+ }else {
+ printk("Unable to handle %s on hub %d",
+ hubiio_crb_errors[icrbb.b_ecode],
+ cnode);
+ /* panic; */
+ }
+ /* Go to Next error */
+ print_crb_fields(i, icrba, icrbb, icrbc,
+ icrbd, icrbe);
+ hubiio_crb_free(hinfo, i);
+ continue;
+ case IIO_ICRB_ECODE_PRERR:
+ case IIO_ICRB_ECODE_DERR:
+ printk("Shub II CRB %d: error %s on hub : %d",
+ i, hubiio_crb_errors[icrbb.b_ecode], cnode);
+ /* panic */
+ default:
+ printk("Shub II CRB error (code : %d) on hub : %d",
+ icrbb.b_ecode, cnode);
+ /* panic */
+ }
+ }
+ /*
+ * Error is not indicated via the errcode field
+ * Check other error indications in this register.
+ */
+ if (icrbb.b_xerr) {
+ printk("Shub II CRB %d: Xtalk Packet with error bit set to hub %d",
+ i, cnode);
+ /* panic */
+ }
+ if (icrbb.b_lnetuce) {
+ printk("Shub II CRB %d: Uncorrectable data error detected on data "
+ " from NUMAlink to node %d",
+ i, cnode);
+ /* panic */
+ }
+ print_crb_fields(i, icrba, icrbb, icrbc, icrbd, icrbe);
+
+
+
+
if (icrbb.b_error) {
/*
default:
panic("Fatal error (code : %d) on hub : %d",
- cnode);
+ icrbb.b_ecode, cnode);
/*NOTREACHED*/
}
* Cleanup involes freeing the PRB register
*/
static void
-hubii_prb_handler(devfs_handle_t hub_v, hubinfo_t hinfo, int wnum)
+hubii_prb_handler(vertex_hdl_t hub_v, hubinfo_t hinfo, int wnum)
{
nasid_t nasid;
/*
* Clear error bit by writing to IECLR register.
*/
- REMOTE_HUB_S(nasid, IIO_IO_ERR_CLR, (1 << wnum));
+ REMOTE_HUB_S(nasid, IIO_IECLR, (1 << wnum));
/*
* PIO Write to Widget 'i' got into an error.
* Invoke hubiio_error_handler with this information.
*/
- printk( "Hub nasid %d got a PIO Write error from widget %d, cleaning up and continuing",
- nasid, wnum);
+ printk( "Hub nasid %d got a PIO Write error from widget %d, "
+ "cleaning up and continuing", nasid, wnum);
/*
* XXX
* It may be necessary to adjust IO PRB counter
}
int
-hubiio_prb_error_handler(devfs_handle_t hub_v, hubinfo_t hinfo)
+hubiio_prb_error_handler(vertex_hdl_t hub_v, hubinfo_t hinfo)
{
int wnum;
nasid_t nasid;
#include <asm/sn/sn2/shubio.h>
-error_state_t error_state_get(devfs_handle_t v);
-error_return_code_t error_state_set(devfs_handle_t v,error_state_t new_state);
+error_state_t error_state_get(vertex_hdl_t v);
+error_return_code_t error_state_set(vertex_hdl_t v,error_state_t new_state);
/*
/*ARGSUSED*/
int
hub_xp_error_handler(
- devfs_handle_t hub_v,
+ vertex_hdl_t hub_v,
nasid_t nasid,
int error_code,
ioerror_mode_t mode,
{
/*REFERENCED*/
hubreg_t iio_imem;
- devfs_handle_t xswitch;
+ vertex_hdl_t xswitch;
error_state_t e_state;
cnodeid_t cnode;
*/
int
hub_ioerror_handler(
- devfs_handle_t hub_v,
+ vertex_hdl_t hub_v,
int error_code,
int mode,
struct io_error_s *ioerror)
int retval = 0;
/*REFERENCED*/
iopaddr_t p;
+ caddr_t cp;
IOERROR_DUMP("hub_ioerror_handler", error_code, mode, ioerror);
* This is typically true for user mode bus errors while
* accessing I/O space.
*/
- IOERROR_GETVALUE(p,ioerror,vaddr);
- if (p){
+ IOERROR_GETVALUE(cp,ioerror,vaddr);
+ if (cp){
/*
* If neither in small window nor in large window range,
* outright reject it.
*/
- IOERROR_GETVALUE(p,ioerror,vaddr);
- if (NODE_SWIN_ADDR(nasid, (paddr_t)p)){
+ IOERROR_GETVALUE(cp,ioerror,vaddr);
+ if (NODE_SWIN_ADDR(nasid, (paddr_t)cp)){
iopaddr_t hubaddr;
xwidgetnum_t widgetnum;
iopaddr_t xtalkaddr;
IOERROR_SETVALUE(ioerror,xtalkaddr,xtalkaddr);
- } else if (NODE_BWIN_ADDR(nasid, (paddr_t)p)){
+ } else if (NODE_BWIN_ADDR(nasid, (paddr_t)cp)){
/*
* Address corresponds to large window space.
* Convert it to xtalk address.
return retval;
}
-#define L_BITSMINOR 18
-#define L_MAXMAJ 0x1ff
-#define emajor(x) (int )(((unsigned )(x)>>L_BITSMINOR) & L_MAXMAJ)
-#define dev_is_vertex(dev) (emajor((dev_t)(dev)) == 0)
-
#define INFO_LBL_ERROR_STATE "error_state"
#define v_error_state_get(v,s) \
* current state otherwise
*/
error_state_t
-error_state_get(devfs_handle_t v)
+error_state_get(vertex_hdl_t v)
{
error_state_t s;
/* Check if we have a valid hwgraph vertex */
- if (!dev_is_vertex(v))
+ if ( v == (vertex_hdl_t)0 )
return(ERROR_STATE_NONE);
/* Get the labelled info hanging off the vertex which corresponds
* ERROR_RETURN_CODE_SUCCESS otherwise
*/
error_return_code_t
-error_state_set(devfs_handle_t v,error_state_t new_state)
+error_state_set(vertex_hdl_t v,error_state_t new_state)
{
error_state_t old_state;
boolean_t replace = B_TRUE;
/* Check if we have a valid hwgraph vertex */
- if (!dev_is_vertex(v))
+ if ( v == (vertex_hdl_t)0 )
return(ERROR_RETURN_CODE_GENERAL_FAILURE);
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/interrupt.h>
#include <asm/sn/sgi.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn2/sn_private.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
-#include <asm/sn/hack.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/xtalk/xtalk_private.h>
#include <asm/sn/simulator.h>
#define NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
#define DEL(ptr) (kfree(ptr))
-int xbow_devflag = D_MP;
-
/*
* This file supports the Xbow chip. Main functions: initializtion,
* error handling, and GBR.
typedef struct xbow_soft_s *xbow_soft_t;
struct xbow_soft_s {
- devfs_handle_t conn; /* our connection point */
- devfs_handle_t vhdl; /* xbow's private vertex */
- devfs_handle_t busv; /* the xswitch vertex */
+ vertex_hdl_t conn; /* our connection point */
+ vertex_hdl_t vhdl; /* xbow's private vertex */
+ vertex_hdl_t busv; /* the xswitch vertex */
xbow_t *base; /* PIO pointer to crossbow chip */
char *name; /* hwgraph name */
*/
void xbow_mlreset(xbow_t *);
-void xbow_init(void);
-int xbow_attach(devfs_handle_t);
-
-int xbow_open(devfs_handle_t *, int, int, cred_t *);
-int xbow_close(devfs_handle_t, int, int, cred_t *);
-
-int xbow_map(devfs_handle_t, vhandl_t *, off_t, size_t, uint);
-int xbow_unmap(devfs_handle_t, vhandl_t *);
-int xbow_ioctl(devfs_handle_t, int, void *, int, struct cred *, int *);
+int xbow_attach(vertex_hdl_t);
int xbow_widget_present(xbow_t *, int);
static int xbow_link_alive(xbow_t *, int);
-devfs_handle_t xbow_widget_lookup(devfs_handle_t, int);
+vertex_hdl_t xbow_widget_lookup(vertex_hdl_t, int);
void xbow_intr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
-void xbow_update_perf_counters(devfs_handle_t);
-xbow_perf_link_t *xbow_get_perf_counters(devfs_handle_t);
-int xbow_enable_perf_counter(devfs_handle_t, int, int, int);
-xbow_link_status_t *xbow_get_llp_status(devfs_handle_t);
-void xbow_update_llp_status(devfs_handle_t);
+void xbow_update_perf_counters(vertex_hdl_t);
+xbow_perf_link_t *xbow_get_perf_counters(vertex_hdl_t);
+int xbow_enable_perf_counter(vertex_hdl_t, int, int, int);
+xbow_link_status_t *xbow_get_llp_status(vertex_hdl_t);
+void xbow_update_llp_status(vertex_hdl_t);
-int xbow_disable_llp_monitor(devfs_handle_t);
-int xbow_enable_llp_monitor(devfs_handle_t);
-int xbow_prio_bw_alloc(devfs_handle_t, xwidgetnum_t, xwidgetnum_t,
+int xbow_disable_llp_monitor(vertex_hdl_t);
+int xbow_enable_llp_monitor(vertex_hdl_t);
+int xbow_prio_bw_alloc(vertex_hdl_t, xwidgetnum_t, xwidgetnum_t,
unsigned long long, unsigned long long);
static void xbow_setwidint(xtalk_intr_t);
-void idbg_xbowregs(int64_t);
xswitch_reset_link_f xbow_reset_link;
xbow_reset_link,
};
-/*
- * This is the file operation table for the pcibr driver.
- * As each of the functions are implemented, put the
- * appropriate function name below.
- */
-static int xbow_mmap(struct file * file, struct vm_area_struct * vma);
-struct file_operations xbow_fops = {
- owner: THIS_MODULE,
- llseek: NULL,
- read: NULL,
- write: NULL,
- readdir: NULL,
- poll: NULL,
- ioctl: NULL,
- mmap: xbow_mmap,
- open: xbow_open,
- flush: NULL,
- release: NULL,
- fsync: NULL,
- fasync: NULL,
- lock: NULL,
- readv: NULL,
- writev: NULL,
- sendpage: NULL,
- get_unmapped_area: NULL
-};
static int
xbow_mmap(struct file * file, struct vm_area_struct * vma)
phys_addr = (unsigned long)file->private_data & ~0xc000000000000000; /* Mask out the Uncache bits */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= VM_RESERVED | VM_IO;
- error = io_remap_page_range(vma, vma->vm_start, phys_addr,
- vma->vm_end - vma->vm_start,
+ error = io_remap_page_range(vma, phys_addr, vma->vm_start,
+ vma->vm_end-vma->vm_start,
vma->vm_page_prot);
return(error);
}
+/*
+ * This is the file operation table for the pcibr driver.
+ * As each of the functions are implemented, put the
+ * appropriate function name below.
+ */
+struct file_operations xbow_fops = {
+ .owner = THIS_MODULE,
+ .mmap = xbow_mmap,
+};
/*
* xbow_mlreset: called at mlreset time if the
{
}
-/*
- * xbow_init: called with the rest of the device
- * driver XXX_init routines. This platform *might*
- * have a Crossbow chip, or even several, but it
- * might have none. Register with the crosstalk
- * generic provider so when we encounter the chip
- * the right magic happens.
- */
-void
-xbow_init(void)
-{
-
-#if DEBUG && ATTACH_DEBUG
- printk("xbow_init\n");
-#endif
-
- xwidget_driver_register(PXBOW_WIDGET_PART_NUM,
- 0, /* XXBOW_WIDGET_MFGR_NUM, */
- "xbow_",
- CDL_PRI_HI); /* attach before friends */
-
-
- xwidget_driver_register(XXBOW_WIDGET_PART_NUM,
- 0, /* XXBOW_WIDGET_MFGR_NUM, */
- "xbow_",
- CDL_PRI_HI); /* attach before friends */
-
- xwidget_driver_register(XBOW_WIDGET_PART_NUM,
- XBOW_WIDGET_MFGR_NUM,
- "xbow_",
- CDL_PRI_HI); /* attach before friends */
-}
-
#ifdef XBRIDGE_REGS_SIM
/* xbow_set_simulated_regs: sets xbow regs as needed
* for powering through the boot
/*ARGSUSED */
int
-xbow_attach(devfs_handle_t conn)
+xbow_attach(vertex_hdl_t conn)
{
/*REFERENCED */
- devfs_handle_t vhdl;
- devfs_handle_t busv;
+ vertex_hdl_t vhdl;
+ vertex_hdl_t busv;
xbow_t *xbow;
xbow_soft_t soft;
int port;
* file ops.
*/
vhdl = NULL;
- vhdl = devfs_register(conn, EDGE_LBL_XBOW,
- DEVFS_FL_AUTO_DEVNUM, 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
- &xbow_fops, (void *)xbow);
+ vhdl = hwgraph_register(conn, EDGE_LBL_XBOW, 0,
+ 0, 0, 0,
+ S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
+ (struct file_operations *)&xbow_fops, (void *)xbow);
if (!vhdl) {
printk(KERN_WARNING "xbow_attach: Unable to create char device for xbow conn %p\n",
(void *)conn);
*/
intr_hdl = xtalk_intr_alloc(conn, (device_desc_t)0, vhdl);
ASSERT(intr_hdl != NULL);
+
+ {
+ int irq = ((hub_intr_t)intr_hdl)->i_bit;
+ int cpu = ((hub_intr_t)intr_hdl)->i_cpuid;
+
+ intr_unreserve_level(cpu, irq);
+ ((hub_intr_t)intr_hdl)->i_bit = SGI_XBOW_ERROR;
+ }
xtalk_intr_connect(intr_hdl,
(intr_func_t) xbow_errintr_handler,
(xtalk_intr_setfunc_t) xbow_setwidint,
(void *) xbow);
- request_irq(CPU_VECTOR_TO_IRQ(((hub_intr_t)intr_hdl)->i_cpuid,
- ((hub_intr_t)intr_hdl)->i_bit),
- (intr_func_t)xbow_errintr_handler, 0, "XBOW error",
+ request_irq(SGI_XBOW_ERROR, (void *)xbow_errintr_handler, SA_SHIRQ, "XBOW error",
(intr_arg_t) soft);
-#ifdef BUS_INT_WAR_NOT_YET
- {
- void sn_add_polled_interrupt(int, int);
- sn_add_polled_interrupt(CPU_VECTOR_TO_IRQ(((hub_intr_t)intr_hdl)->i_cpuid,
- ((hub_intr_t)intr_hdl)->i_bit), 5000);
- }
-#endif
-
/*
* Enable xbow error interrupts
return 0; /* attach successful */
}
-/*ARGSUSED */
-int
-xbow_open(devfs_handle_t *devp, int oflag, int otyp, cred_t *credp)
-{
- return 0;
-}
-
-/*ARGSUSED */
-int
-xbow_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
-{
- return 0;
-}
-
-/*ARGSUSED */
-int
-xbow_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
-{
- devfs_handle_t vhdl = dev_to_vhdl(dev);
- xbow_soft_t soft = xbow_soft_get(vhdl);
- int error;
-
- ASSERT(soft);
- len = ctob(btoc(len));
- /* XXX- this ignores the offset!!! */
- error = v_mapphys(vt, (void *) soft->base, len);
- return error;
-}
-
-/*ARGSUSED */
-int
-xbow_unmap(devfs_handle_t dev, vhandl_t *vt)
-{
- return 0;
-}
-
/* This contains special-case code for grio. There are plans to make
* this general sometime in the future, but till then this should
* be good enough.
*/
xwidgetnum_t
-xbow_widget_num_get(devfs_handle_t dev)
+xbow_widget_num_get(vertex_hdl_t dev)
{
- devfs_handle_t tdev;
+ vertex_hdl_t tdev;
char devname[MAXDEVNAME];
xwidget_info_t xwidget_info;
int i;
return XWIDGET_NONE;
}
-int
-xbow_ioctl(devfs_handle_t dev,
- int cmd,
- void *arg,
- int flag,
- struct cred *cr,
- int *rvalp)
-{
- devfs_handle_t vhdl;
- int error = 0;
-
-#if defined (DEBUG)
- int rc;
- devfs_handle_t conn;
- struct xwidget_info_s *xwidget_info;
- xbow_soft_t xbow_soft;
-#endif
- *rvalp = 0;
-
- vhdl = dev_to_vhdl(dev);
-#if defined (DEBUG)
- xbow_soft = xbow_soft_get(vhdl);
- conn = xbow_soft->conn;
-
- xwidget_info = xwidget_info_get(conn);
- ASSERT_ALWAYS(xwidget_info != NULL);
-
- rc = xwidget_hwid_is_xswitch(&xwidget_info->w_hwid);
- ASSERT_ALWAYS(rc != 0);
-#endif
- switch (cmd) {
-
- case XBOWIOC_LLP_ERROR_ENABLE:
- if ((error = xbow_enable_llp_monitor(vhdl)) != 0)
- error = EINVAL;
-
- break;
-
- case XBOWIOC_LLP_ERROR_DISABLE:
-
- if ((error = xbow_disable_llp_monitor(vhdl)) != 0)
- error = EINVAL;
-
- break;
-
- default:
- break;
-
- }
- return error;
-}
-
/*
* xbow_widget_present: See if a device is present
* on the specified port of this crossbow.
* specified.
* If not found, return 0.
*/
-devfs_handle_t
-xbow_widget_lookup(devfs_handle_t vhdl,
+vertex_hdl_t
+xbow_widget_lookup(vertex_hdl_t vhdl,
int widgetnum)
{
xswitch_info_t xswitch_info;
- devfs_handle_t conn;
+ vertex_hdl_t conn;
xswitch_info = xswitch_info_get(vhdl);
conn = xswitch_info_vhdl_get(xswitch_info, widgetnum);
XEM_ADD_NVAR("ioe." #n, p); \
}
-#ifdef LATER
-static void
-xem_add_ioe(ioerror_t *ioe)
-{
- union tmp {
- ushort stmp;
- unsigned long long lltmp;
- cpuid_t cputmp;
- cnodeid_t cntmp;
- iopaddr_t iotmp;
- caddr_t catmp;
- paddr_t patmp;
- } tmp;
-
- XEM_ADD_IOEF(tmp.stmp, errortype);
- XEM_ADD_IOEF(tmp.stmp, widgetnum);
- XEM_ADD_IOEF(tmp.stmp, widgetdev);
- XEM_ADD_IOEF(tmp.cputmp, srccpu);
- XEM_ADD_IOEF(tmp.cntmp, srcnode);
- XEM_ADD_IOEF(tmp.cntmp, errnode);
- XEM_ADD_IOEF(tmp.iotmp, sysioaddr);
- XEM_ADD_IOEF(tmp.iotmp, xtalkaddr);
- XEM_ADD_IOEF(tmp.iotmp, busspace);
- XEM_ADD_IOEF(tmp.iotmp, busaddr);
- XEM_ADD_IOEF(tmp.catmp, vaddr);
- XEM_ADD_IOEF(tmp.patmp, memaddr);
- XEM_ADD_IOEF(tmp.catmp, epc);
- XEM_ADD_IOEF(tmp.catmp, ef);
- XEM_ADD_IOEF(tmp.stmp, tnum);
-}
-
-#define XEM_ADD_IOE() (xem_add_ioe(ioe))
-#endif /* LATER */
-
-int xbow_xmit_retry_errors = 0;
+int xbow_xmit_retry_errors;
int
xbow_xmit_retry_error(xbow_soft_t soft,
int port)
{
xswitch_info_t info;
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
widget_cfg_t *wid;
widgetreg_t id;
int part;
link_pend &= ~XB_STAT_XMT_RTRY_ERR;
}
if (link_pend) {
- devfs_handle_t xwidget_vhdl;
+ vertex_hdl_t xwidget_vhdl;
char *xwidget_name;
/* Get the widget name corresponding to the current
XEM_ADD_VAR(link_status);
XEM_ADD_VAR(link_aux_status);
-#ifdef LATER
- if (dump_ioe) {
- XEM_ADD_IOE();
- dump_ioe = 0;
- }
-#endif
#if !DEBUG
}
#endif
xbow_soft_t soft = (xbow_soft_t) einfo;
int port;
- devfs_handle_t conn;
- devfs_handle_t busv;
+ vertex_hdl_t conn;
+ vertex_hdl_t busv;
xbow_t *xbow = soft->base;
xbowreg_t wid_stat;
}
void
-xbow_update_perf_counters(devfs_handle_t vhdl)
+xbow_update_perf_counters(vertex_hdl_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
}
xbow_perf_link_t *
-xbow_get_perf_counters(devfs_handle_t vhdl)
+xbow_get_perf_counters(vertex_hdl_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_perf_link_t *xbow_perf_link = xbow_soft->xbow_perflink;
}
int
-xbow_enable_perf_counter(devfs_handle_t vhdl, int link, int mode, int counter)
+xbow_enable_perf_counter(vertex_hdl_t vhdl, int link, int mode, int counter)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
}
xbow_link_status_t *
-xbow_get_llp_status(devfs_handle_t vhdl)
+xbow_get_llp_status(vertex_hdl_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
}
void
-xbow_update_llp_status(devfs_handle_t vhdl)
+xbow_update_llp_status(vertex_hdl_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
xbwX_stat_t lnk_sts;
xbow_aux_link_status_t aux_sts;
int link;
- devfs_handle_t xwidget_vhdl;
+ vertex_hdl_t xwidget_vhdl;
char *xwidget_name;
xbow = (xbow_t *) xbow_soft->base;
}
int
-xbow_disable_llp_monitor(devfs_handle_t vhdl)
+xbow_disable_llp_monitor(vertex_hdl_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
int port;
}
int
-xbow_enable_llp_monitor(devfs_handle_t vhdl)
+xbow_enable_llp_monitor(vertex_hdl_t vhdl)
{
xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
int
-xbow_reset_link(devfs_handle_t xconn_vhdl)
+xbow_reset_link(vertex_hdl_t xconn_vhdl)
{
xwidget_info_t widget_info;
xwidgetnum_t port;
xbow = XBOW_K1PTR;
#else
{
- devfs_handle_t xbow_vhdl;
+ vertex_hdl_t xbow_vhdl;
xbow_soft_t xbow_soft;
hwgraph_traverse(xconn_vhdl, ".master/xtalk/0/xbow", &xbow_vhdl);
return 0;
}
-/*
- * Dump xbow registers.
- * input parameter is either a pointer to
- * the xbow chip or the vertex handle for
- * an xbow vertex.
- */
-void
-idbg_xbowregs(int64_t regs)
-{
- xbow_t *xbow;
- int i;
- xb_linkregs_t *link;
-
- xbow = (xbow_t *) regs;
-
-#ifdef LATER
- qprintf("Printing xbow registers starting at 0x%x\n", xbow);
- qprintf("wid %x status %x erruppr %x errlower %x control %x timeout %x\n",
- xbow->xb_wid_id, xbow->xb_wid_stat, xbow->xb_wid_err_upper,
- xbow->xb_wid_err_lower, xbow->xb_wid_control,
- xbow->xb_wid_req_timeout);
- qprintf("intr uppr %x lower %x errcmd %x llp ctrl %x arb_reload %x\n",
- xbow->xb_wid_int_upper, xbow->xb_wid_int_lower,
- xbow->xb_wid_err_cmdword, xbow->xb_wid_llp,
- xbow->xb_wid_arb_reload);
-#endif
-
- for (i = 8; i <= 0xf; i++) {
- link = &xbow->xb_link(i);
-#ifdef LATER
- qprintf("Link %d registers\n", i);
- qprintf("\tctrl %x stat %x arbuppr %x arblowr %x auxstat %x\n",
- link->link_control, link->link_status,
- link->link_arb_upper, link->link_arb_lower,
- link->link_aux_status);
-#endif
- }
-}
-
-
#define XBOW_ARB_RELOAD_TICKS 25
/* granularity: 4 MB/s, max: 124 MB/s */
#define GRANULARITY ((100 * 1000000) / XBOW_ARB_RELOAD_TICKS)
* If bandwidth allocation is successful, return success else return failure.
*/
int
-xbow_prio_bw_alloc(devfs_handle_t vhdl,
+xbow_prio_bw_alloc(vertex_hdl_t vhdl,
xwidgetnum_t src_wid,
xwidgetnum_t dest_wid,
unsigned long long old_alloc_bw,
char widget_info_fingerprint[] = "widget_info";
-cdl_p xtalk_registry = NULL;
-
#define DEV_FUNC(dev,func) hub_##func
#define CAST_PIOMAP(x) ((hub_piomap_t)(x))
#define CAST_DMAMAP(x) ((hub_dmamap_t)(x))
/* =====================================================================
* Function Table of Contents
*/
-xtalk_piomap_t xtalk_piomap_alloc(devfs_handle_t, device_desc_t, iopaddr_t, size_t, size_t, unsigned);
+xtalk_piomap_t xtalk_piomap_alloc(vertex_hdl_t, device_desc_t, iopaddr_t, size_t, size_t, unsigned);
void xtalk_piomap_free(xtalk_piomap_t);
caddr_t xtalk_piomap_addr(xtalk_piomap_t, iopaddr_t, size_t);
void xtalk_piomap_done(xtalk_piomap_t);
-caddr_t xtalk_piotrans_addr(devfs_handle_t, device_desc_t, iopaddr_t, size_t, unsigned);
-caddr_t xtalk_pio_addr(devfs_handle_t, device_desc_t, iopaddr_t, size_t, xtalk_piomap_t *, unsigned);
+caddr_t xtalk_piotrans_addr(vertex_hdl_t, device_desc_t, iopaddr_t, size_t, unsigned);
+caddr_t xtalk_pio_addr(vertex_hdl_t, device_desc_t, iopaddr_t, size_t, xtalk_piomap_t *, unsigned);
void xtalk_set_early_piotrans_addr(xtalk_early_piotrans_addr_f *);
caddr_t xtalk_early_piotrans_addr(xwidget_part_num_t, xwidget_mfg_num_t, int, iopaddr_t, size_t, unsigned);
static caddr_t null_xtalk_early_piotrans_addr(xwidget_part_num_t, xwidget_mfg_num_t, int, iopaddr_t, size_t, unsigned);
-xtalk_dmamap_t xtalk_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
+xtalk_dmamap_t xtalk_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
void xtalk_dmamap_free(xtalk_dmamap_t);
iopaddr_t xtalk_dmamap_addr(xtalk_dmamap_t, paddr_t, size_t);
alenlist_t xtalk_dmamap_list(xtalk_dmamap_t, alenlist_t, unsigned);
void xtalk_dmamap_done(xtalk_dmamap_t);
-iopaddr_t xtalk_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
-alenlist_t xtalk_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
+iopaddr_t xtalk_dmatrans_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, unsigned);
+alenlist_t xtalk_dmatrans_list(vertex_hdl_t, device_desc_t, alenlist_t, unsigned);
void xtalk_dmamap_drain(xtalk_dmamap_t);
-void xtalk_dmaaddr_drain(devfs_handle_t, iopaddr_t, size_t);
-void xtalk_dmalist_drain(devfs_handle_t, alenlist_t);
-xtalk_intr_t xtalk_intr_alloc(devfs_handle_t, device_desc_t, devfs_handle_t);
-xtalk_intr_t xtalk_intr_alloc_nothd(devfs_handle_t, device_desc_t, devfs_handle_t);
+void xtalk_dmaaddr_drain(vertex_hdl_t, iopaddr_t, size_t);
+void xtalk_dmalist_drain(vertex_hdl_t, alenlist_t);
+xtalk_intr_t xtalk_intr_alloc(vertex_hdl_t, device_desc_t, vertex_hdl_t);
+xtalk_intr_t xtalk_intr_alloc_nothd(vertex_hdl_t, device_desc_t, vertex_hdl_t);
void xtalk_intr_free(xtalk_intr_t);
int xtalk_intr_connect(xtalk_intr_t, intr_func_t, intr_arg_t, xtalk_intr_setfunc_t, void *);
void xtalk_intr_disconnect(xtalk_intr_t);
-devfs_handle_t xtalk_intr_cpu_get(xtalk_intr_t);
-int xtalk_error_handler(devfs_handle_t, int, ioerror_mode_t, ioerror_t *);
-int xtalk_error_devenable(devfs_handle_t, int, int);
-void xtalk_provider_startup(devfs_handle_t);
-void xtalk_provider_shutdown(devfs_handle_t);
-devfs_handle_t xtalk_intr_dev_get(xtalk_intr_t);
+vertex_hdl_t xtalk_intr_cpu_get(xtalk_intr_t);
+int xtalk_error_handler(vertex_hdl_t, int, ioerror_mode_t, ioerror_t *);
+int xtalk_error_devenable(vertex_hdl_t, int, int);
+void xtalk_provider_startup(vertex_hdl_t);
+void xtalk_provider_shutdown(vertex_hdl_t);
+vertex_hdl_t xtalk_intr_dev_get(xtalk_intr_t);
xwidgetnum_t xtalk_intr_target_get(xtalk_intr_t);
xtalk_intr_vector_t xtalk_intr_vector_get(xtalk_intr_t);
iopaddr_t xtalk_intr_addr_get(struct xtalk_intr_s *);
void *xtalk_intr_sfarg_get(xtalk_intr_t);
-devfs_handle_t xtalk_pio_dev_get(xtalk_piomap_t);
+vertex_hdl_t xtalk_pio_dev_get(xtalk_piomap_t);
xwidgetnum_t xtalk_pio_target_get(xtalk_piomap_t);
iopaddr_t xtalk_pio_xtalk_addr_get(xtalk_piomap_t);
ulong xtalk_pio_mapsz_get(xtalk_piomap_t);
caddr_t xtalk_pio_kvaddr_get(xtalk_piomap_t);
-devfs_handle_t xtalk_dma_dev_get(xtalk_dmamap_t);
+vertex_hdl_t xtalk_dma_dev_get(xtalk_dmamap_t);
xwidgetnum_t xtalk_dma_target_get(xtalk_dmamap_t);
-xwidget_info_t xwidget_info_chk(devfs_handle_t);
-xwidget_info_t xwidget_info_get(devfs_handle_t);
-void xwidget_info_set(devfs_handle_t, xwidget_info_t);
-devfs_handle_t xwidget_info_dev_get(xwidget_info_t);
+xwidget_info_t xwidget_info_chk(vertex_hdl_t);
+xwidget_info_t xwidget_info_get(vertex_hdl_t);
+void xwidget_info_set(vertex_hdl_t, xwidget_info_t);
+vertex_hdl_t xwidget_info_dev_get(xwidget_info_t);
xwidgetnum_t xwidget_info_id_get(xwidget_info_t);
-devfs_handle_t xwidget_info_master_get(xwidget_info_t);
+vertex_hdl_t xwidget_info_master_get(xwidget_info_t);
xwidgetnum_t xwidget_info_masterid_get(xwidget_info_t);
xwidget_part_num_t xwidget_info_part_num_get(xwidget_info_t);
xwidget_mfg_num_t xwidget_info_mfg_num_get(xwidget_info_t);
char *xwidget_info_name_get(xwidget_info_t);
-void xtalk_init(void);
-void xtalk_provider_register(devfs_handle_t, xtalk_provider_t *);
-void xtalk_provider_unregister(devfs_handle_t);
-xtalk_provider_t *xtalk_provider_fns_get(devfs_handle_t);
+void xtalk_provider_register(vertex_hdl_t, xtalk_provider_t *);
+void xtalk_provider_unregister(vertex_hdl_t);
+xtalk_provider_t *xtalk_provider_fns_get(vertex_hdl_t);
int xwidget_driver_register(xwidget_part_num_t,
xwidget_mfg_num_t,
char *, unsigned);
void xwidget_driver_unregister(char *);
-int xwidget_register(xwidget_hwid_t, devfs_handle_t,
- xwidgetnum_t, devfs_handle_t,
- xwidgetnum_t, async_attach_t);
-int xwidget_unregister(devfs_handle_t);
-void xwidget_reset(devfs_handle_t);
-char *xwidget_name_get(devfs_handle_t);
+int xwidget_register(xwidget_hwid_t, vertex_hdl_t,
+ xwidgetnum_t, vertex_hdl_t,
+ xwidgetnum_t);
+int xwidget_unregister(vertex_hdl_t);
+void xwidget_reset(vertex_hdl_t);
+char *xwidget_name_get(vertex_hdl_t);
#if !defined(DEV_FUNC)
/*
* There is more than one possible provider
#define CAST_INTR(x) ((xtalk_intr_t)(x))
static xtalk_provider_t *
-xwidget_to_provider_fns(devfs_handle_t xconn)
+xwidget_to_provider_fns(vertex_hdl_t xconn)
{
xwidget_info_t widget_info;
xtalk_provider_t *provider_fns;
*/
xtalk_piomap_t
-xtalk_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
+xtalk_piomap_alloc(vertex_hdl_t dev, /* set up mapping for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* map for this xtalk_addr range */
size_t byte_count,
caddr_t
-xtalk_piotrans_addr(devfs_handle_t dev, /* translate for this device */
+xtalk_piotrans_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* Crosstalk address */
size_t byte_count, /* map this many bytes */
}
caddr_t
-xtalk_pio_addr(devfs_handle_t dev, /* translate for this device */
+xtalk_pio_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t addr, /* starting address (or offset in window) */
size_t byte_count, /* map this many bytes */
*/
xtalk_dmamap_t
-xtalk_dmamap_alloc(devfs_handle_t dev, /* set up mappings for this device */
+xtalk_dmamap_alloc(vertex_hdl_t dev, /* set up mappings for this device */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags)
iopaddr_t
-xtalk_dmatrans_addr(devfs_handle_t dev, /* translate for this device */
+xtalk_dmatrans_addr(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
alenlist_t
-xtalk_dmatrans_list(devfs_handle_t dev, /* translate for this device */
+xtalk_dmatrans_list(vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
alenlist_t palenlist, /* system address/length list */
unsigned flags)
}
void
-xtalk_dmaaddr_drain(devfs_handle_t dev, paddr_t addr, size_t size)
+xtalk_dmaaddr_drain(vertex_hdl_t dev, paddr_t addr, size_t size)
{
DEV_FUNC(dev, dmaaddr_drain)
(dev, addr, size);
}
void
-xtalk_dmalist_drain(devfs_handle_t dev, alenlist_t list)
+xtalk_dmalist_drain(vertex_hdl_t dev, alenlist_t list)
{
DEV_FUNC(dev, dmalist_drain)
(dev, list);
* Return resource handle in intr_hdl.
*/
xtalk_intr_t
-xtalk_intr_alloc(devfs_handle_t dev, /* which Crosstalk device */
+xtalk_intr_alloc(vertex_hdl_t dev, /* which Crosstalk device */
device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev)
+ vertex_hdl_t owner_dev)
{ /* owner of this interrupt */
return (xtalk_intr_t) DEV_FUNC(dev, intr_alloc)
(dev, dev_desc, owner_dev);
* Return resource handle in intr_hdl.
*/
xtalk_intr_t
-xtalk_intr_alloc_nothd(devfs_handle_t dev, /* which Crosstalk device */
+xtalk_intr_alloc_nothd(vertex_hdl_t dev, /* which Crosstalk device */
device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev) /* owner of this interrupt */
+ vertex_hdl_t owner_dev) /* owner of this interrupt */
{
return (xtalk_intr_t) DEV_FUNC(dev, intr_alloc_nothd)
(dev, dev_desc, owner_dev);
* Return a hwgraph vertex that represents the CPU currently
* targeted by an interrupt.
*/
-devfs_handle_t
+vertex_hdl_t
xtalk_intr_cpu_get(xtalk_intr_t intr_hdl)
{
- return INTR_FUNC(intr_hdl, intr_cpu_get)
- (CAST_INTR(intr_hdl));
+ return (vertex_hdl_t)0;
}
*/
int
xtalk_error_handler(
- devfs_handle_t xconn,
+ vertex_hdl_t xconn,
int error_code,
ioerror_mode_t mode,
ioerror_t *ioerror)
#if defined(SUPPORT_PRINTING_V_FORMAT)
printk(KERN_WARNING "Xbow at %v encountered Fatal error", xconn);
#else
- printk(KERN_WARNING "Xbow at 0x%p encountered Fatal error", xconn);
+ printk(KERN_WARNING "Xbow at 0x%p encountered Fatal error", (void *)xconn);
#endif
ioerror_dump("xtalk", error_code, mode, ioerror);
}
int
-xtalk_error_devenable(devfs_handle_t xconn_vhdl, int devnum, int error_code)
+xtalk_error_devenable(vertex_hdl_t xconn_vhdl, int devnum, int error_code)
{
return DEV_FUNC(xconn_vhdl, error_devenable) (xconn_vhdl, devnum, error_code);
}
* Startup a crosstalk provider
*/
void
-xtalk_provider_startup(devfs_handle_t xtalk_provider)
+xtalk_provider_startup(vertex_hdl_t xtalk_provider)
{
DEV_FUNC(xtalk_provider, provider_startup)
(xtalk_provider);
* Shutdown a crosstalk provider
*/
void
-xtalk_provider_shutdown(devfs_handle_t xtalk_provider)
+xtalk_provider_shutdown(vertex_hdl_t xtalk_provider)
{
DEV_FUNC(xtalk_provider, provider_shutdown)
(xtalk_provider);
* Enable a device on a xtalk widget
*/
void
-xtalk_widgetdev_enable(devfs_handle_t xconn_vhdl, int devnum)
+xtalk_widgetdev_enable(vertex_hdl_t xconn_vhdl, int devnum)
{
- DEV_FUNC(xconn_vhdl, widgetdev_enable) (xconn_vhdl, devnum);
+ return;
}
/*
* Shutdown a device on a xtalk widget
*/
void
-xtalk_widgetdev_shutdown(devfs_handle_t xconn_vhdl, int devnum)
+xtalk_widgetdev_shutdown(vertex_hdl_t xconn_vhdl, int devnum)
{
- DEV_FUNC(xconn_vhdl, widgetdev_shutdown) (xconn_vhdl, devnum);
+ return;
}
int
-xtalk_dma_enabled(devfs_handle_t xconn_vhdl)
+xtalk_dma_enabled(vertex_hdl_t xconn_vhdl)
{
return DEV_FUNC(xconn_vhdl, dma_enabled) (xconn_vhdl);
}
*/
/****** Generic crosstalk interrupt interfaces ******/
-devfs_handle_t
+vertex_hdl_t
xtalk_intr_dev_get(xtalk_intr_t xtalk_intr)
{
return (xtalk_intr->xi_dev);
}
/****** Generic crosstalk pio interfaces ******/
-devfs_handle_t
+vertex_hdl_t
xtalk_pio_dev_get(xtalk_piomap_t xtalk_piomap)
{
return (xtalk_piomap->xp_dev);
/****** Generic crosstalk dma interfaces ******/
-devfs_handle_t
+vertex_hdl_t
xtalk_dma_dev_get(xtalk_dmamap_t xtalk_dmamap)
{
return (xtalk_dmamap->xd_dev);
* if not, return NULL.
*/
xwidget_info_t
-xwidget_info_chk(devfs_handle_t xwidget)
+xwidget_info_chk(vertex_hdl_t xwidget)
{
arbitrary_info_t ainfo = 0;
xwidget_info_t
-xwidget_info_get(devfs_handle_t xwidget)
+xwidget_info_get(vertex_hdl_t xwidget)
{
xwidget_info_t widget_info;
widget_info = (xwidget_info_t)
hwgraph_fastinfo_get(xwidget);
-#ifdef LATER
- if ((widget_info != NULL) &&
- (widget_info->w_fingerprint != widget_info_fingerprint))
-#ifdef SUPPORT_PRINTING_V_FORMAT
- PRINT_PANIC("%v bad xwidget_info", xwidget);
-#else
- PRINT_PANIC("%x bad xwidget_info", xwidget);
-#endif
-#endif /* LATER */
-
return (widget_info);
}
void
-xwidget_info_set(devfs_handle_t xwidget, xwidget_info_t widget_info)
+xwidget_info_set(vertex_hdl_t xwidget, xwidget_info_t widget_info)
{
if (widget_info != NULL)
widget_info->w_fingerprint = widget_info_fingerprint;
(arbitrary_info_t) widget_info);
}
-devfs_handle_t
+vertex_hdl_t
xwidget_info_dev_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget_info");
+ panic("xwidget_info_dev_get: null xwidget_info");
return (xwidget_info->w_vertex);
}
xwidget_info_id_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget_info");
+ panic("xwidget_info_id_get: null xwidget_info");
return (xwidget_info->w_id);
}
-devfs_handle_t
+vertex_hdl_t
xwidget_info_master_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget_info");
+ panic("xwidget_info_master_get: null xwidget_info");
return (xwidget_info->w_master);
}
xwidget_info_masterid_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget_info");
+ panic("xwidget_info_masterid_get: null xwidget_info");
return (xwidget_info->w_masterid);
}
xwidget_info_part_num_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget_info");
+ panic("xwidget_info_part_num_get: null xwidget_info");
return (xwidget_info->w_hwid.part_num);
}
xwidget_info_mfg_num_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget_info");
+ panic("xwidget_info_mfg_num_get: null xwidget_info");
return (xwidget_info->w_hwid.mfg_num);
}
/* Extract the widget name from the widget information
xwidget_info_name_get(xwidget_info_t xwidget_info)
{
if (xwidget_info == NULL)
- panic("null xwidget info");
+ panic("xwidget_info_name_get: null xwidget_info");
return(xwidget_info->w_name);
}
/****** Generic crosstalk initialization interfaces ******/
/*
- * One-time initialization needed for systems that support crosstalk.
- */
-void
-xtalk_init(void)
-{
- cdl_p cp;
-
-#if DEBUG && ATTACH_DEBUG
- printf("xtalk_init\n");
-#endif
- /* Allocate the registry.
- * We might already have one.
- * If we don't, go get one.
- * MPness: someone might have
- * set one up for us while we
- * were not looking; use an atomic
- * compare-and-swap to commit to
- * using the new registry if and
- * only if nobody else did first.
- * If someone did get there first,
- * toss the one we allocated back
- * into the pool.
- */
- if (xtalk_registry == NULL) {
- cp = cdl_new(EDGE_LBL_XIO, "part", "mfgr");
- if (!compare_and_swap_ptr((void **) &xtalk_registry, NULL, (void *) cp)) {
- cdl_del(cp);
- }
- }
- ASSERT(xtalk_registry != NULL);
-}
-
-/*
* Associate a set of xtalk_provider functions with a vertex.
*/
void
-xtalk_provider_register(devfs_handle_t provider, xtalk_provider_t *xtalk_fns)
+xtalk_provider_register(vertex_hdl_t provider, xtalk_provider_t *xtalk_fns)
{
hwgraph_fastinfo_set(provider, (arbitrary_info_t) xtalk_fns);
}
* Disassociate a set of xtalk_provider functions with a vertex.
*/
void
-xtalk_provider_unregister(devfs_handle_t provider)
+xtalk_provider_unregister(vertex_hdl_t provider)
{
hwgraph_fastinfo_set(provider, (arbitrary_info_t)NULL);
}
* provider.
*/
xtalk_provider_t *
-xtalk_provider_fns_get(devfs_handle_t provider)
+xtalk_provider_fns_get(vertex_hdl_t provider)
{
return ((xtalk_provider_t *) hwgraph_fastinfo_get(provider));
}
/*
- * Announce a driver for a particular crosstalk part.
- * Returns 0 on success or -1 on failure. Failure occurs if the
- * specified hardware already has a driver.
- */
-/*ARGSUSED4 */
-int
-xwidget_driver_register(xwidget_part_num_t part_num,
- xwidget_mfg_num_t mfg_num,
- char *driver_prefix,
- unsigned flags)
-{
- /* a driver's init routine could call
- * xwidget_driver_register before the
- * system calls xtalk_init; so, we
- * make the call here.
- */
- if (xtalk_registry == NULL)
- xtalk_init();
-
- return cdl_add_driver(xtalk_registry,
- part_num, mfg_num,
- driver_prefix, flags, NULL);
-}
-
-/*
* Inform xtalk infrastructure that a driver is no longer available for
* handling any widgets.
*/
void
xwidget_driver_unregister(char *driver_prefix)
{
- /* before a driver calls unregister,
- * it must have called registger; so we
- * can assume we have a registry here.
- */
- ASSERT(xtalk_registry != NULL);
-
- cdl_del_driver(xtalk_registry, driver_prefix, NULL);
+ return;
}
/*
xtalk_iterate(char *driver_prefix,
xtalk_iter_f *func)
{
- ASSERT(xtalk_registry != NULL);
-
- cdl_iterate(xtalk_registry, driver_prefix, (cdl_iter_f *)func);
}
/*
*/
int
xwidget_register(xwidget_hwid_t hwid, /* widget's hardware ID */
- devfs_handle_t widget, /* widget to initialize */
+ vertex_hdl_t widget, /* widget to initialize */
xwidgetnum_t id, /* widget's target id (0..f) */
- devfs_handle_t master, /* widget's master vertex */
- xwidgetnum_t targetid, /* master's target id (9/a) */
- async_attach_t aa)
+ vertex_hdl_t master, /* widget's master vertex */
+ xwidgetnum_t targetid) /* master's target id (9/a) */
{
xwidget_info_t widget_info;
char *s,devnm[MAXDEVNAME];
device_master_set(widget, master);
- /* All the driver init routines (including
- * xtalk_init) are called before we get into
- * attaching devices, so we can assume we
- * have a registry here.
- */
- ASSERT(xtalk_registry != NULL);
-
/*
* Add pointer to async attach info -- tear down will be done when
* the particular descendant is done with the info.
*/
- if (aa)
- async_attach_add_info(widget, aa);
-
- return cdl_add_connpt(xtalk_registry, hwid->part_num, hwid->mfg_num,
+ return cdl_add_connpt(hwid->part_num, hwid->mfg_num,
widget, 0);
}
* Unregister the xtalk device and detach all its hwgraph namespace.
*/
int
-xwidget_unregister(devfs_handle_t widget)
+xwidget_unregister(vertex_hdl_t widget)
{
xwidget_info_t widget_info;
xwidget_hwid_t hwid;
hwid = &(widget_info->w_hwid);
- cdl_del_connpt(xtalk_registry, hwid->part_num, hwid->mfg_num,
- widget, 0);
-
/* Clean out the xwidget information */
(void)kfree(widget_info->w_name);
BZERO((void *)widget_info, sizeof(widget_info));
}
void
-xwidget_error_register(devfs_handle_t xwidget,
+xwidget_error_register(vertex_hdl_t xwidget,
error_handler_f *efunc,
error_handler_arg_t einfo)
{
* Issue a link reset to a widget.
*/
void
-xwidget_reset(devfs_handle_t xwidget)
+xwidget_reset(vertex_hdl_t xwidget)
{
xswitch_reset_link(xwidget);
-
}
void
-xwidget_gfx_reset(devfs_handle_t xwidget)
+xwidget_gfx_reset(vertex_hdl_t xwidget)
{
- xwidget_info_t info;
-
- xswitch_reset_link(xwidget);
- info = xwidget_info_get(xwidget);
-#ifdef LATER
- ASSERT_ALWAYS(info != NULL);
-#endif
-
- /*
- * Enable this for other architectures once we add widget_reset to the
- * xtalk provider interface.
- */
- DEV_FUNC(xtalk_provider, widget_reset)
- (xwidget_info_master_get(info), xwidget_info_id_get(info));
+ return;
}
#define ANON_XWIDGET_NAME "No Name" /* Default Widget Name */
/* Get the canonical hwgraph name of xtalk widget */
char *
-xwidget_name_get(devfs_handle_t xwidget_vhdl)
+xwidget_name_get(vertex_hdl_t xwidget_vhdl)
{
xwidget_info_t info;
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/mmzone.h>
-#include <linux/slab.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/ioerror_handling.h>
-#include <asm/sn/pci/pciio.h>
-#include <asm/sn/slotnum.h>
-#include <asm/sn/vector.h>
-#include <asm/sn/nic.h>
-
-/******
- ****** hack defines ......
- ******/
-
-int pcibr_prefetch_enable_rev, pcibr_wg_enable_rev;
-int default_intr_pri;
-int force_fire_and_forget = 1;
-int ignore_conveyor_override = 0;
-
-devfs_handle_t dummy_vrtx; /* Needed for cpuid_to_vertex() in hack.h */
-
-
-/* ARGSUSED */
-void hub_widgetdev_enable(devfs_handle_t xconn_vhdl, int devnum)
- {FIXME("hub_widgetdev_enable");}
-
-/* ARGSUSED */
-void hub_widgetdev_shutdown(devfs_handle_t xconn_vhdl, int devnum)
- {FIXME("hub_widgetdev_shutdown");}
-
-/* ARGSUSED */
-void hub_widget_reset(devfs_handle_t hubv, xwidgetnum_t widget)
- {FIXME("hub_widget_reset");}
-
-boolean_t
-is_sys_critical_vertex(devfs_handle_t x)
-{
- FIXME("is_sys_critical_vertex : returns 0");
- return(0);
-}
-
-void *
-snia_kmem_zone_alloc(register struct zone *zone, int flags)
-{
- FIXME("snia_kmem_zone_alloc : return null");
- return((void *)0);
-}
-
-void
-snia_kmem_zone_free(register struct zone *zone, void *ptr)
-{
- FIXME("snia_kmem_zone_free : no-op");
-}
-
-struct zone *
-snia_kmem_zone_init(register int size, char *zone_name)
-{
- FIXME("snia_kmem_zone_free : returns NULL");
- return((struct zone *)0);
-}
-
-int
-compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr)
-{
- FIXME("compare_and_swap_ptr : NOT ATOMIC");
- if (*location == old_ptr) {
- *location = new_ptr;
- return(1);
- }
- else
- return(0);
-}
-
-/* For ml/SN/SN1/slots.c */
-/* ARGSUSED */
-slotid_t get_widget_slotnum(int xbow, int widget)
- {FIXME("get_widget_slotnum"); return (unsigned char)NULL;}
-
-/* For router */
-int
-router_init(cnodeid_t cnode,int writeid, void *npda_rip)
- {FIXME("router_init"); return(0);}
-
-/* From io/ioerror_handling.c */
-error_return_code_t
-sys_critical_graph_vertex_add(devfs_handle_t parent, devfs_handle_t child)
- {FIXME("sys_critical_graph_vertex_add"); return(0);}
-
-/* From io/ioc3.c */
-devfs_handle_t
-ioc3_console_vhdl_get(void)
- {FIXME("ioc3_console_vhdl_get"); return( (devfs_handle_t)-1);}
-
-void
-nic_vmc_check(devfs_handle_t vhdl, char *nicinfo)
-{
-
- FIXME("nic_vmc_check\n");
-
-}
-
-char *
-nic_vertex_info_get(devfs_handle_t v)
-{
- FIXME("nic_vertex_info_get\n");
- return(NULL);
-}
-
-int
-vector_read_node(net_vec_t dest, nasid_t nasid,
- int write_id, int address,
- uint64_t *value)
-{
- FIXME("vector_read_node\n");
- return(0);
-}
-
-int
-vector_write_node(net_vec_t dest, nasid_t nasid,
- int write_id, int address,
- uint64_t value)
-{
- FIXME("vector_write_node\n");
- return(0);
-}
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/hack.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/xtalk/xtalk_private.h>
-#include <asm/sn/simulator.h>
-
-/* #define DEBUG 1 */
-/* #define XBOW_DEBUG 1 */
-
-
-/*
- * Files needed to get the device driver entry points
- */
-
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/xtalk/xswitch.h>
-#include <asm/sn/xtalk/xwidget.h>
-
-#include <asm/sn/prio.h>
-#include <asm/sn/hcl_util.h>
-
-
-#define NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
-#define DEL(ptr) (kfree(ptr))
-
-int xbow_devflag = D_MP;
-
-/*
- * This file supports the Xbow chip. Main functions: initializtion,
- * error handling, and GBR.
- */
-
-/*
- * each vertex corresponding to an xbow chip
- * has a "fastinfo" pointer pointing at one
- * of these things.
- */
-typedef struct xbow_soft_s *xbow_soft_t;
-
-struct xbow_soft_s {
- devfs_handle_t conn; /* our connection point */
- devfs_handle_t vhdl; /* xbow's private vertex */
- devfs_handle_t busv; /* the xswitch vertex */
- xbow_t *base; /* PIO pointer to crossbow chip */
- char *name; /* hwgraph name */
-
- xbow_perf_t xbow_perfcnt[XBOW_PERF_COUNTERS];
- xbow_perf_link_t xbow_perflink[MAX_XBOW_PORTS];
- xbow_link_status_t xbow_link_status[MAX_XBOW_PORTS];
- spinlock_t xbow_perf_lock;
- int link_monitor;
- widget_cfg_t *wpio[MAX_XBOW_PORTS]; /* cached PIO pointer */
-
- /* Bandwidth allocation state. Bandwidth values are for the
- * destination port since contention happens there.
- * Implicit mapping from xbow ports (8..f) -> (0..7) array indices.
- */
- spinlock_t xbow_bw_alloc_lock; /* bw allocation lock */
- unsigned long long bw_hiwm[MAX_XBOW_PORTS]; /* hiwater mark values */
- unsigned long long bw_cur_used[MAX_XBOW_PORTS]; /* bw used currently */
-};
-
-#define xbow_soft_set(v,i) hwgraph_fastinfo_set((v), (arbitrary_info_t)(i))
-#define xbow_soft_get(v) ((xbow_soft_t)hwgraph_fastinfo_get((v)))
-
-/*
- * Function Table of Contents
- */
-
-void xbow_mlreset(xbow_t *);
-void xbow_init(void);
-int xbow_attach(devfs_handle_t);
-
-int xbow_open(devfs_handle_t *, int, int, cred_t *);
-int xbow_close(devfs_handle_t, int, int, cred_t *);
-
-int xbow_map(devfs_handle_t, vhandl_t *, off_t, size_t, uint);
-int xbow_unmap(devfs_handle_t, vhandl_t *);
-int xbow_ioctl(devfs_handle_t, int, void *, int, struct cred *, int *);
-
-int xbow_widget_present(xbow_t *, int);
-static int xbow_link_alive(xbow_t *, int);
-devfs_handle_t xbow_widget_lookup(devfs_handle_t, int);
-
-#ifdef LATER
-static void xbow_setwidint(xtalk_intr_t);
-static void xbow_errintr_handler(intr_arg_t);
-#endif
-void xbow_intr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
-
-
-
-void xbow_update_perf_counters(devfs_handle_t);
-xbow_perf_link_t *xbow_get_perf_counters(devfs_handle_t);
-int xbow_enable_perf_counter(devfs_handle_t, int, int, int);
-xbow_link_status_t *xbow_get_llp_status(devfs_handle_t);
-void xbow_update_llp_status(devfs_handle_t);
-
-int xbow_disable_llp_monitor(devfs_handle_t);
-int xbow_enable_llp_monitor(devfs_handle_t);
-int xbow_prio_bw_alloc(devfs_handle_t, xwidgetnum_t, xwidgetnum_t,
- unsigned long long, unsigned long long);
-
-xswitch_reset_link_f xbow_reset_link;
-
-void idbg_xbowregs(int64_t);
-
-xswitch_provider_t xbow_provider =
-{
- xbow_reset_link,
-};
-
-/*
- * xbow_mlreset: called at mlreset time if the
- * platform specific code determines that there is
- * a crossbow in a critical path that must be
- * functional before the driver would normally get
- * the device properly set up.
- *
- * what do we need to do, that the boot prom can
- * not be counted on to have already done, that is
- * generic across all platforms using crossbows?
- */
-/*ARGSUSED */
-void
-xbow_mlreset(xbow_t * xbow)
-{
-}
-
-/*
- * xbow_init: called with the rest of the device
- * driver XXX_init routines. This platform *might*
- * have a Crossbow chip, or even several, but it
- * might have none. Register with the crosstalk
- * generic provider so when we encounter the chip
- * the right magic happens.
- */
-void
-xbow_init(void)
-{
-
-#if DEBUG && ATTACH_DEBUG
- printf("xbow_init\n");
-#endif
-
- xwidget_driver_register(XXBOW_WIDGET_PART_NUM,
- 0, /* XXBOW_WIDGET_MFGR_NUM, */
- "xbow_",
- CDL_PRI_HI); /* attach before friends */
-
- xwidget_driver_register(XBOW_WIDGET_PART_NUM,
- XBOW_WIDGET_MFGR_NUM,
- "xbow_",
- CDL_PRI_HI); /* attach before friends */
-}
-
-#ifdef XBRIDGE_REGS_SIM
-/* xbow_set_simulated_regs: sets xbow regs as needed
- * for powering through the boot
- */
-void
-xbow_set_simulated_regs(xbow_t *xbow, int port)
-{
- /*
- * turn on link
- */
- xbow->xb_link(port).link_status = (1<<31);
- /*
- * and give it a live widget too
- */
- xbow->xb_link(port).link_aux_status = XB_AUX_STAT_PRESENT;
- /*
- * zero the link control reg
- */
- xbow->xb_link(port).link_control = 0x0;
-}
-#endif /* XBRIDGE_REGS_SIM */
-
-/*
- * xbow_attach: the crosstalk provider has
- * determined that there is a crossbow widget
- * present, and has handed us the connection
- * point for that vertex.
- *
- * We not only add our own vertex, but add
- * some "xtalk switch" data to the switch
- * vertex (at the connect point's parent) if
- * it does not have any.
- */
-
-/*ARGSUSED */
-int
-xbow_attach(devfs_handle_t conn)
-{
- /*REFERENCED */
- devfs_handle_t vhdl;
- devfs_handle_t busv;
- xbow_t *xbow;
- xbow_soft_t soft;
- int port;
- xswitch_info_t info;
-#ifdef LATER
- xtalk_intr_t intr_hdl;
- device_desc_t dev_desc;
-#endif
- char devnm[MAXDEVNAME], *s;
- xbowreg_t id;
- int rev;
- int i;
- int xbow_num;
-
-#if DEBUG && ATTACH_DEBUG
-#if defined(SUPPORT_PRINTING_V_FORMAT
- printk("%v: xbow_attach\n", conn);
-#else
- printk("0x%x: xbow_attach\n", conn);
-#endif
-#endif
-
- /*
- * Get a PIO pointer to the base of the crossbow
- * chip.
- */
-#ifdef XBRIDGE_REGS_SIM
- printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: allocating %ld bytes for xbow_s\n", sizeof(xbow_t));
- xbow = (xbow_t *) kmalloc(sizeof(xbow_t), GFP_KERNEL);
- /*
- * turn on ports e and f like in a real live ibrick
- */
- xbow_set_simulated_regs(xbow, 0xe);
- xbow_set_simulated_regs(xbow, 0xf);
-#else
- xbow = (xbow_t *) xtalk_piotrans_addr(conn, 0, 0, sizeof(xbow_t), 0);
-#endif /* XBRIDGE_REGS_SIM */
-
- /*
- * Locate the "switch" vertex: it is the parent
- * of our connection point.
- */
- busv = hwgraph_connectpt_get(conn);
-#if DEBUG && ATTACH_DEBUG
- printk("xbow_attach: Bus Vertex 0x%p, conn 0x%p, xbow register 0x%p wid= 0x%x\n", busv, conn, xbow, *(volatile u32 *)xbow);
-#endif
-
- ASSERT(busv != GRAPH_VERTEX_NONE);
-
- /*
- * Create our private vertex, and connect our
- * driver information to it. This makes it possible
- * for diagnostic drivers to open the crossbow
- * vertex for access to registers.
- */
-
- /*
- * We need to teach xbow drivers to provide the right set of
- * file ops.
- */
- vhdl = NULL;
- vhdl = hwgraph_register(conn, EDGE_LBL_XBOW,
- 0, DEVFS_FL_AUTO_DEVNUM,
- 0, 0,
- S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
- /* &hcl_fops */ (void *)&vhdl, NULL);
- if (!vhdl) {
- printk(KERN_WARNING "xbow_attach: Unable to create char device for xbow conn %p\n",
- (void *)conn);
- }
-
- /*
- * Allocate the soft state structure and attach
- * it to the xbow's vertex
- */
- NEW(soft);
- soft->conn = conn;
- soft->vhdl = vhdl;
- soft->busv = busv;
- soft->base = xbow;
- /* does the universe really need another macro? */
- /* xbow_soft_set(vhdl, (arbitrary_info_t) soft); */
- hwgraph_fastinfo_set(vhdl, (arbitrary_info_t) soft);
-
-#define XBOW_NUM_SUFFIX_FORMAT "[xbow# %d]"
-
- /* Add xbow number as a suffix to the hwgraph name of the xbow.
- * This is helpful while looking at the error/warning messages.
- */
- xbow_num = 0;
-
- /*
- * get the name of this xbow vertex and keep the info.
- * This is needed during errors and interrupts, but as
- * long as we have it, we can use it elsewhere.
- */
- s = dev_to_name(vhdl, devnm, MAXDEVNAME);
- soft->name = kmalloc(strlen(s) + strlen(XBOW_NUM_SUFFIX_FORMAT) + 1,
- GFP_KERNEL);
- sprintf(soft->name,"%s"XBOW_NUM_SUFFIX_FORMAT, s,xbow_num);
-
-#ifdef XBRIDGE_REGS_SIM
- /* my o200/ibrick has id=0x2d002049, but XXBOW_WIDGET_PART_NUM is defined
- * as 0xd000, so I'm using that for the partnum bitfield.
- */
- printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: need xb_wid_id value!!\n");
- id = 0x2d000049;
-#else
- id = xbow->xb_wid_id;
-#endif /* XBRIDGE_REGS_SIM */
- rev = XWIDGET_PART_REV_NUM(id);
-
- /*
- * Print the revision if DEBUG, or SHOW_REVS and kdebug,
- * or the xbow is downrev.
- *
- * If xbow is downrev, make it a WARNING that the
- * Crossbow is DOWNREV: these chips are not good
- * to have around, and the operator should be told.
- */
-#ifdef LATER
-#if !DEBUG
- if (
-#if SHOW_REVS
- (kdebug) ||
-#endif /* SHOW_REVS */
- (rev < XBOW_REV_1_1))
-#endif /* !DEBUG */
- printk("%sCrossbow ASIC: rev %s (code=%d) at %s%s",
- (rev < XBOW_REV_1_1) ? "DOWNREV " : "",
- (rev == XBOW_REV_1_0) ? "1.0" :
- (rev == XBOW_REV_1_1) ? "1.1" :
- (rev == XBOW_REV_1_2) ? "1.2" :
- (rev == XBOW_REV_1_3) ? "1.3" :
- (rev == XBOW_REV_2_0) ? "2.0" :
- (rev == XXBOW_PART_REV_1_0) ? "Xbridge 1.0" :
- (rev == XXBOW_PART_REV_2_0) ? "Xbridge 2.0" :
- "unknown",
- rev, soft->name,
- (rev < XBOW_REV_1_1) ? "" : "\n");
-#endif /* LATER */
- mutex_spinlock_init(&soft->xbow_perf_lock);
- soft->xbow_perfcnt[0].xp_perf_reg = &xbow->xb_perf_ctr_a;
- soft->xbow_perfcnt[1].xp_perf_reg = &xbow->xb_perf_ctr_b;
-
- /* Initialization for GBR bw allocation */
- mutex_spinlock_init(&soft->xbow_bw_alloc_lock);
-
-#define XBOW_8_BIT_PORT_BW_MAX (400 * 1000 * 1000) /* 400 MB/s */
-#define XBOW_16_BIT_PORT_BW_MAX (800 * 1000 * 1000) /* 800 MB/s */
-
- /* Set bandwidth hiwatermark and current values */
- for (i = 0; i < MAX_XBOW_PORTS; i++) {
- soft->bw_hiwm[i] = XBOW_16_BIT_PORT_BW_MAX; /* for now */
- soft->bw_cur_used[i] = 0;
- }
-
- /*
- * Enable xbow error interrupts
- */
- xbow->xb_wid_control = (XB_WID_CTRL_REG_ACC_IE | XB_WID_CTRL_XTALK_IE);
-
- /*
- * take a census of the widgets present,
- * leaving notes at the switch vertex.
- */
- info = xswitch_info_new(busv);
-
- for (port = MAX_PORT_NUM - MAX_XBOW_PORTS;
- port < MAX_PORT_NUM; ++port) {
- if (!xbow_link_alive(xbow, port)) {
-#if DEBUG && XBOW_DEBUG
- printk(KERN_INFO "0x%p link %d is not alive\n",
- busv, port);
-#endif
- continue;
- }
- if (!xbow_widget_present(xbow, port)) {
-#if DEBUG && XBOW_DEBUG
- printk(KERN_INFO "0x%p link %d is alive but no widget is present\n", busv, port);
-#endif
- continue;
- }
-#if DEBUG && XBOW_DEBUG
- printk(KERN_INFO "0x%p link %d has a widget\n",
- busv, port);
-#endif
-
- xswitch_info_link_is_ok(info, port);
- /*
- * Turn some error interrupts on
- * and turn others off. The PROM has
- * some things turned on we don't
- * want to see (bandwidth allocation
- * errors for instance); so if it
- * is not listed here, it is not on.
- */
- xbow->xb_link(port).link_control =
- ( (xbow->xb_link(port).link_control
- /*
- * Turn off these bits; they are non-fatal,
- * but we might want to save some statistics
- * on the frequency of these errors.
- * XXX FIXME XXX
- */
- & ~XB_CTRL_RCV_CNT_OFLOW_IE
- & ~XB_CTRL_XMT_CNT_OFLOW_IE
- & ~XB_CTRL_BNDWDTH_ALLOC_IE
- & ~XB_CTRL_RCV_IE)
- /*
- * These are the ones we want to turn on.
- */
- | (XB_CTRL_ILLEGAL_DST_IE
- | XB_CTRL_OALLOC_IBUF_IE
- | XB_CTRL_XMT_MAX_RTRY_IE
- | XB_CTRL_MAXREQ_TOUT_IE
- | XB_CTRL_XMT_RTRY_IE
- | XB_CTRL_SRC_TOUT_IE) );
- }
-
- xswitch_provider_register(busv, &xbow_provider);
-
- return 0; /* attach successful */
-}
-
-/*ARGSUSED */
-int
-xbow_open(devfs_handle_t *devp, int oflag, int otyp, cred_t *credp)
-{
- return 0;
-
-}
-
-/*ARGSUSED */
-int
-xbow_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp)
-{
- return 0;
-}
-
-/*ARGSUSED */
-int
-xbow_map(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot)
-{
- devfs_handle_t vhdl = dev_to_vhdl(dev);
- xbow_soft_t soft = xbow_soft_get(vhdl);
- int error;
-
- ASSERT(soft);
- len = ctob(btoc(len));
- /* XXX- this ignores the offset!!! */
- error = v_mapphys(vt, (void *) soft->base, len);
- return error;
-}
-
-/*ARGSUSED */
-int
-xbow_unmap(devfs_handle_t dev, vhandl_t *vt)
-{
- return 0;
-}
-
-/* This contains special-case code for grio. There are plans to make
- * this general sometime in the future, but till then this should
- * be good enough.
- */
-xwidgetnum_t
-xbow_widget_num_get(devfs_handle_t dev)
-{
- devfs_handle_t tdev;
- char devname[MAXDEVNAME];
- xwidget_info_t xwidget_info;
- int i;
-
- vertex_to_name(dev, devname, MAXDEVNAME);
-
- /* If this is a pci controller vertex, traverse up using
- * the ".." links to get to the widget.
- */
- if (strstr(devname, EDGE_LBL_PCI) &&
- strstr(devname, EDGE_LBL_CONTROLLER)) {
- tdev = dev;
- for (i=0; i< 2; i++) {
- if (hwgraph_edge_get(tdev,
- HWGRAPH_EDGELBL_DOTDOT, &tdev) !=
- GRAPH_SUCCESS)
- return XWIDGET_NONE;
- }
-
- if ((xwidget_info = xwidget_info_chk(tdev)) != NULL) {
- return (xwidget_info_id_get(xwidget_info));
- } else {
- return XWIDGET_NONE;
- }
- }
-
- return XWIDGET_NONE;
-}
-
-int
-xbow_ioctl(devfs_handle_t dev,
- int cmd,
- void *arg,
- int flag,
- struct cred *cr,
- int *rvalp)
-{
- devfs_handle_t vhdl;
- int error = 0;
-
-#if defined (DEBUG)
- int rc;
- devfs_handle_t conn;
- struct xwidget_info_s *xwidget_info;
- xbow_soft_t xbow_soft;
-#endif
- *rvalp = 0;
-
- vhdl = dev_to_vhdl(dev);
-#if defined (DEBUG)
- xbow_soft = xbow_soft_get(vhdl);
- conn = xbow_soft->conn;
-
- xwidget_info = xwidget_info_get(conn);
- ASSERT_ALWAYS(xwidget_info != NULL);
-
- rc = xwidget_hwid_is_xswitch(&xwidget_info->w_hwid);
- ASSERT_ALWAYS(rc != 0);
-#endif
- switch (cmd) {
-#ifdef LATER
- case XBOWIOC_PERF_ENABLE:
- case XBOWIOC_PERF_DISABLE:
- {
- struct xbow_perfarg_t xbow_perf_en;
-
- if (!_CAP_CRABLE(cr, CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
- if ((flag & FWRITE) == 0) {
- error = EBADF;
- break;
- }
- if (COPYIN(arg, &xbow_perf_en, sizeof(xbow_perf_en))) {
- error = EFAULT;
- break;
- }
- if (error = xbow_enable_perf_counter(vhdl,
- xbow_perf_en.link,
- (cmd == XBOWIOC_PERF_DISABLE) ? 0 : xbow_perf_en.mode,
- xbow_perf_en.counter)) {
- error = EINVAL;
- break;
- }
- break;
- }
-#endif
-
-#ifdef LATER
- case XBOWIOC_PERF_GET:
- {
- xbow_perf_link_t *xbow_perf_cnt;
-
- if ((flag & FREAD) == 0) {
- error = EBADF;
- break;
- }
- xbow_perf_cnt = xbow_get_perf_counters(vhdl);
- ASSERT_ALWAYS(xbow_perf_cnt != NULL);
-
- if (COPYOUT((void *) xbow_perf_cnt, (void *) arg,
- MAX_XBOW_PORTS * sizeof(xbow_perf_link_t))) {
- error = EFAULT;
- break;
- }
- break;
- }
-#endif
-
- case XBOWIOC_LLP_ERROR_ENABLE:
- if ((error = xbow_enable_llp_monitor(vhdl)) != 0)
- error = EINVAL;
-
- break;
-
- case XBOWIOC_LLP_ERROR_DISABLE:
-
- if ((error = xbow_disable_llp_monitor(vhdl)) != 0)
- error = EINVAL;
-
- break;
-
-#ifdef LATER
- case XBOWIOC_LLP_ERROR_GET:
- {
- xbow_link_status_t *xbow_llp_status;
-
- if ((flag & FREAD) == 0) {
- error = EBADF;
- break;
- }
- xbow_llp_status = xbow_get_llp_status(vhdl);
- ASSERT_ALWAYS(xbow_llp_status != NULL);
-
- if (COPYOUT((void *) xbow_llp_status, (void *) arg,
- MAX_XBOW_PORTS * sizeof(xbow_link_status_t))) {
- error = EFAULT;
- break;
- }
- break;
- }
-#endif
-
-#ifdef LATER
- case GIOCSETBW:
- {
- grio_ioctl_info_t info;
- xwidgetnum_t src_widgetnum, dest_widgetnum;
-
- if (COPYIN(arg, &info, sizeof(grio_ioctl_info_t))) {
- error = EFAULT;
- break;
- }
-#ifdef GRIO_DEBUG
- printf("xbow:: prev_vhdl: %d next_vhdl: %d reqbw: %lld\n",
- info.prev_vhdl, info.next_vhdl, info.reqbw);
-#endif /* GRIO_DEBUG */
-
- src_widgetnum = xbow_widget_num_get(info.prev_vhdl);
- dest_widgetnum = xbow_widget_num_get(info.next_vhdl);
-
- /* Bandwidth allocation is bi-directional. Since bandwidth
- * reservations have already been done at an earlier stage,
- * we cannot fail here for lack of bandwidth.
- */
- xbow_prio_bw_alloc(dev, src_widgetnum, dest_widgetnum,
- 0, info.reqbw);
- xbow_prio_bw_alloc(dev, dest_widgetnum, src_widgetnum,
- 0, info.reqbw);
-
- break;
- }
-
- case GIOCRELEASEBW:
- {
- grio_ioctl_info_t info;
- xwidgetnum_t src_widgetnum, dest_widgetnum;
-
- if (!cap_able(CAP_DEVICE_MGT)) {
- error = EPERM;
- break;
- }
-
- if (COPYIN(arg, &info, sizeof(grio_ioctl_info_t))) {
- error = EFAULT;
- break;
- }
-#ifdef GRIO_DEBUG
- printf("xbow:: prev_vhdl: %d next_vhdl: %d reqbw: %lld\n",
- info.prev_vhdl, info.next_vhdl, info.reqbw);
-#endif /* GRIO_DEBUG */
-
- src_widgetnum = xbow_widget_num_get(info.prev_vhdl);
- dest_widgetnum = xbow_widget_num_get(info.next_vhdl);
-
- /* Bandwidth reservation is bi-directional. Hence, remove
- * bandwidth reservations for both directions.
- */
- xbow_prio_bw_alloc(dev, src_widgetnum, dest_widgetnum,
- info.reqbw, (-1 * info.reqbw));
- xbow_prio_bw_alloc(dev, dest_widgetnum, src_widgetnum,
- info.reqbw, (-1 * info.reqbw));
-
- break;
- }
-#endif
-
- default:
- break;
-
- }
- return error;
-}
-
-/*
- * xbow_widget_present: See if a device is present
- * on the specified port of this crossbow.
- */
-int
-xbow_widget_present(xbow_t * xbow, int port)
-{
- if ( IS_RUNNING_ON_SIMULATOR() ) {
- if ( (port == 14) || (port == 15) ) {
- return 1;
- }
- else {
- return 0;
- }
- }
- else {
- return xbow->xb_link(port).link_aux_status & XB_AUX_STAT_PRESENT;
- }
-}
-
-static int
-xbow_link_alive(xbow_t * xbow, int port)
-{
- xbwX_stat_t xbow_linkstat;
-
- xbow_linkstat.linkstatus = xbow->xb_link(port).link_status;
- return (xbow_linkstat.link_alive);
-}
-
-/*
- * xbow_widget_lookup
- * Lookup the edges connected to the xbow specified, and
- * retrieve the handle corresponding to the widgetnum
- * specified.
- * If not found, return 0.
- */
-devfs_handle_t
-xbow_widget_lookup(devfs_handle_t vhdl,
- int widgetnum)
-{
- xswitch_info_t xswitch_info;
- devfs_handle_t conn;
-
- xswitch_info = xswitch_info_get(vhdl);
- conn = xswitch_info_vhdl_get(xswitch_info, widgetnum);
- return conn;
-}
-
-/*
- * xbow_setwidint: called when xtalk
- * is establishing or migrating our
- * interrupt service.
- */
-#ifdef LATER
-static void
-xbow_setwidint(xtalk_intr_t intr)
-{
- xwidgetnum_t targ = xtalk_intr_target_get(intr);
- iopaddr_t addr = xtalk_intr_addr_get(intr);
- xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
- xbow_t *xbow = (xbow_t *) xtalk_intr_sfarg_get(intr);
-
- xbow_intr_preset((void *) xbow, 0, targ, addr, vect);
-}
-#endif /* LATER */
-
-/*
- * xbow_intr_preset: called during mlreset time
- * if the platform specific code needs to route
- * an xbow interrupt before the xtalk infrastructure
- * is available for use.
- *
- * Also called from xbow_setwidint, so we don't
- * replicate the guts of the routine.
- *
- * XXX- probably should be renamed xbow_wid_intr_set or
- * something to reduce confusion.
- */
-/*ARGSUSED3 */
-void
-xbow_intr_preset(void *which_widget,
- int which_widget_intr,
- xwidgetnum_t targ,
- iopaddr_t addr,
- xtalk_intr_vector_t vect)
-{
- xbow_t *xbow = (xbow_t *) which_widget;
-
- xbow->xb_wid_int_upper = ((0xFF000000 & (vect << 24)) |
- (0x000F0000 & (targ << 16)) |
- XTALK_ADDR_TO_UPPER(addr));
- xbow->xb_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
-}
-
-#define XEM_ADD_STR(s) printk("%s", (s))
-#define XEM_ADD_NVAR(n,v) printk("\t%20s: 0x%x\n", (n), (v))
-#define XEM_ADD_VAR(v) XEM_ADD_NVAR(#v,(v))
-#define XEM_ADD_IOEF(n) if (IOERROR_FIELDVALID(ioe,n)) \
- XEM_ADD_NVAR("ioe." #n, \
- IOERROR_GETVALUE(ioe,n))
-
-#ifdef LATER
-static void
-xem_add_ioe(ioerror_t *ioe)
-{
- XEM_ADD_IOEF(errortype);
- XEM_ADD_IOEF(widgetnum);
- XEM_ADD_IOEF(widgetdev);
- XEM_ADD_IOEF(srccpu);
- XEM_ADD_IOEF(srcnode);
- XEM_ADD_IOEF(errnode);
- XEM_ADD_IOEF(sysioaddr);
- XEM_ADD_IOEF(xtalkaddr);
- XEM_ADD_IOEF(busspace);
- XEM_ADD_IOEF(busaddr);
- XEM_ADD_IOEF(vaddr);
- XEM_ADD_IOEF(memaddr);
- XEM_ADD_IOEF(epc);
- XEM_ADD_IOEF(ef);
-}
-
-#define XEM_ADD_IOE() (xem_add_ioe(ioe))
-#endif /* LATER */
-
-int xbow_xmit_retry_errors = 0;
-
-int
-xbow_xmit_retry_error(xbow_soft_t soft,
- int port)
-{
- xswitch_info_t info;
- devfs_handle_t vhdl;
- widget_cfg_t *wid;
- widgetreg_t id;
- int part;
- int mfgr;
-
- wid = soft->wpio[port - BASE_XBOW_PORT];
- if (wid == NULL) {
- /* If we can't track down a PIO
- * pointer to our widget yet,
- * leave our caller knowing that
- * we are interested in this
- * interrupt if it occurs in
- * the future.
- */
- info = xswitch_info_get(soft->busv);
- if (!info)
- return 1;
- vhdl = xswitch_info_vhdl_get(info, port);
- if (vhdl == GRAPH_VERTEX_NONE)
- return 1;
- wid = (widget_cfg_t *) xtalk_piotrans_addr
- (vhdl, 0, 0, sizeof *wid, 0);
- if (!wid)
- return 1;
- soft->wpio[port - BASE_XBOW_PORT] = wid;
- }
- id = wid->w_id;
- part = XWIDGET_PART_NUM(id);
- mfgr = XWIDGET_MFG_NUM(id);
-
- /* If this thing is not a Bridge,
- * do not activate the WAR, and
- * tell our caller we do not need
- * to be called again.
- */
- if ((part != BRIDGE_WIDGET_PART_NUM) ||
- (mfgr != BRIDGE_WIDGET_MFGR_NUM)) {
- /* FIXME: add Xbridge to the WAR.
- * Shouldn't hurt anything. Later need to
- * check if we can remove this.
- */
- if ((part != XBRIDGE_WIDGET_PART_NUM) ||
- (mfgr != XBRIDGE_WIDGET_MFGR_NUM))
- return 0;
- }
-
- /* count how many times we
- * have picked up after
- * LLP Transmit problems.
- */
- xbow_xmit_retry_errors++;
-
- /* rewrite the control register
- * to fix things up.
- */
- wid->w_control = wid->w_control;
- wid->w_control;
-
- return 1;
-}
-
-void
-xbow_update_perf_counters(devfs_handle_t vhdl)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
- xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
- xbow_perf_link_t *xbow_plink = xbow_soft->xbow_perflink;
- xbow_perfcount_t perf_reg;
- unsigned long s;
- int link, i;
-
- for (i = 0; i < XBOW_PERF_COUNTERS; i++, xbow_perf++) {
- if (xbow_perf->xp_mode == XBOW_MONITOR_NONE)
- continue;
-
- s = mutex_spinlock(&xbow_soft->xbow_perf_lock);
-
- perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
-
- link = perf_reg.xb_perf.link_select;
-
- (xbow_plink + link)->xlp_cumulative[xbow_perf->xp_curmode] +=
- ((perf_reg.xb_perf.count - xbow_perf->xp_current) & XBOW_COUNTER_MASK);
- xbow_perf->xp_current = perf_reg.xb_perf.count;
-
- mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
- }
- /* Do port /mode multiplexing here */
-
-#ifdef LATER
- (void) timeout(xbow_update_perf_counters,
- (void *) (__psunsigned_t) vhdl, XBOW_PERF_TIMEOUT);
-#endif
-
-}
-
-xbow_perf_link_t *
-xbow_get_perf_counters(devfs_handle_t vhdl)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
- xbow_perf_link_t *xbow_perf_link = xbow_soft->xbow_perflink;
-
- return xbow_perf_link;
-}
-
-int
-xbow_enable_perf_counter(devfs_handle_t vhdl, int link, int mode, int counter)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
- xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
- xbow_linkctrl_t xbow_link_ctrl;
- xbow_t *xbow = xbow_soft->base;
- xbow_perfcount_t perf_reg;
- unsigned long s;
- int i;
-
- link -= BASE_XBOW_PORT;
- if ((link < 0) || (link >= MAX_XBOW_PORTS))
- return -1;
-
- if ((mode < XBOW_MONITOR_NONE) || (mode > XBOW_MONITOR_DEST_LINK))
- return -1;
-
- if ((counter < 0) || (counter >= XBOW_PERF_COUNTERS))
- return -1;
-
- s = mutex_spinlock(&xbow_soft->xbow_perf_lock);
-
- if ((xbow_perf + counter)->xp_mode && mode) {
- mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
- return -1;
- }
- for (i = 0; i < XBOW_PERF_COUNTERS; i++) {
- if (i == counter)
- continue;
- if (((xbow_perf + i)->xp_link == link) &&
- ((xbow_perf + i)->xp_mode)) {
- mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
- return -1;
- }
- }
- xbow_perf += counter;
-
- xbow_perf->xp_curlink = xbow_perf->xp_link = link;
- xbow_perf->xp_curmode = xbow_perf->xp_mode = mode;
-
- xbow_link_ctrl.xbl_ctrlword = xbow->xb_link_raw[link].link_control;
- xbow_link_ctrl.xb_linkcontrol.perf_mode = mode;
- xbow->xb_link_raw[link].link_control = xbow_link_ctrl.xbl_ctrlword;
-
- perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
- perf_reg.xb_perf.link_select = link;
- *(xbowreg_t *) xbow_perf->xp_perf_reg = perf_reg.xb_counter_val;
- xbow_perf->xp_current = perf_reg.xb_perf.count;
-
-#ifdef LATER
- (void) timeout(xbow_update_perf_counters,
- (void *) (__psunsigned_t) vhdl, XBOW_PERF_TIMEOUT);
-#endif
-
- mutex_spinunlock(&xbow_soft->xbow_perf_lock, s);
-
- return 0;
-}
-
-xbow_link_status_t *
-xbow_get_llp_status(devfs_handle_t vhdl)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
- xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
-
- return xbow_llp_status;
-}
-
-void
-xbow_update_llp_status(devfs_handle_t vhdl)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
- xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
- xbow_t *xbow;
- xbwX_stat_t lnk_sts;
- xbow_aux_link_status_t aux_sts;
- int link;
- devfs_handle_t xwidget_vhdl;
- char *xwidget_name;
-
- xbow = (xbow_t *) xbow_soft->base;
- for (link = 0; link < MAX_XBOW_PORTS; link++, xbow_llp_status++) {
- /* Get the widget name corresponding the current link.
- * Note : 0 <= link < MAX_XBOW_PORTS(8).
- * BASE_XBOW_PORT(0x8) <= xwidget number < MAX_PORT_NUM (0x10)
- */
- xwidget_vhdl = xbow_widget_lookup(xbow_soft->busv,link+BASE_XBOW_PORT);
- xwidget_name = xwidget_name_get(xwidget_vhdl);
- aux_sts.aux_linkstatus
- = xbow->xb_link_raw[link].link_aux_status;
- lnk_sts.linkstatus = xbow->xb_link_raw[link].link_status_clr;
-
- if (lnk_sts.link_alive == 0)
- continue;
-
- xbow_llp_status->rx_err_count +=
- aux_sts.xb_aux_linkstatus.rx_err_cnt;
-
- xbow_llp_status->tx_retry_count +=
- aux_sts.xb_aux_linkstatus.tx_retry_cnt;
-
- if (lnk_sts.linkstatus & ~(XB_STAT_RCV_ERR | XB_STAT_XMT_RTRY_ERR | XB_STAT_LINKALIVE)) {
-#ifdef LATER
- printk(KERN_WARNING "link %d[%s]: bad status 0x%x\n",
- link, xwidget_name, lnk_sts.linkstatus);
-#endif
- }
- }
-#ifdef LATER
- if (xbow_soft->link_monitor)
- (void) timeout(xbow_update_llp_status,
- (void *) (__psunsigned_t) vhdl, XBOW_STATS_TIMEOUT);
-#endif
-}
-
-int
-xbow_disable_llp_monitor(devfs_handle_t vhdl)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
- int port;
-
- for (port = 0; port < MAX_XBOW_PORTS; port++) {
- xbow_soft->xbow_link_status[port].rx_err_count = 0;
- xbow_soft->xbow_link_status[port].tx_retry_count = 0;
- }
-
- xbow_soft->link_monitor = 0;
- return 0;
-}
-
-int
-xbow_enable_llp_monitor(devfs_handle_t vhdl)
-{
- xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
-
-#ifdef LATER
- (void) timeout(xbow_update_llp_status,
- (void *) (__psunsigned_t) vhdl, XBOW_STATS_TIMEOUT);
-#endif
- xbow_soft->link_monitor = 1;
- return 0;
-}
-
-
-int
-xbow_reset_link(devfs_handle_t xconn_vhdl)
-{
- xwidget_info_t widget_info;
- xwidgetnum_t port;
- xbow_t *xbow;
- xbowreg_t ctrl;
- xbwX_stat_t stat;
- unsigned itick;
- unsigned dtick;
- static int ticks_per_ms = 0;
-
- if (!ticks_per_ms) {
- itick = get_timestamp();
- us_delay(1000);
- ticks_per_ms = get_timestamp() - itick;
- }
- widget_info = xwidget_info_get(xconn_vhdl);
- port = xwidget_info_id_get(widget_info);
-
-#ifdef XBOW_K1PTR /* defined if we only have one xbow ... */
- xbow = XBOW_K1PTR;
-#else
- {
- devfs_handle_t xbow_vhdl;
- xbow_soft_t xbow_soft;
-
- hwgraph_traverse(xconn_vhdl, ".master/xtalk/0/xbow", &xbow_vhdl);
- xbow_soft = xbow_soft_get(xbow_vhdl);
- xbow = xbow_soft->base;
- }
-#endif
-
- /*
- * This requires three PIOs (reset the link, check for the
- * reset, restore the control register for the link) plus
- * 10us to wait for the reset. We allow up to 1ms for the
- * widget to come out of reset before giving up and
- * returning a failure.
- */
- ctrl = xbow->xb_link(port).link_control;
- xbow->xb_link(port).link_reset = 0;
- itick = get_timestamp();
- while (1) {
- stat.linkstatus = xbow->xb_link(port).link_status;
- if (stat.link_alive)
- break;
- dtick = get_timestamp() - itick;
- if (dtick > ticks_per_ms) {
- return -1; /* never came out of reset */
- }
- DELAY(2); /* don't beat on link_status */
- }
- xbow->xb_link(port).link_control = ctrl;
- return 0;
-}
-
-/*
- * Dump xbow registers.
- * input parameter is either a pointer to
- * the xbow chip or the vertex handle for
- * an xbow vertex.
- */
-void
-idbg_xbowregs(int64_t regs)
-{
- xbow_t *xbow;
- int i;
- xb_linkregs_t *link;
-
-#ifdef LATER
- if (dev_is_vertex((devfs_handle_t) regs)) {
- devfs_handle_t vhdl = (devfs_handle_t) regs;
- xbow_soft_t soft = xbow_soft_get(vhdl);
-
- xbow = soft->base;
- } else
-#endif
- {
- xbow = (xbow_t *) regs;
- }
-
-#ifdef LATER
- qprintf("Printing xbow registers starting at 0x%x\n", xbow);
- qprintf("wid %x status %x erruppr %x errlower %x control %x timeout %x\n",
- xbow->xb_wid_id, xbow->xb_wid_stat, xbow->xb_wid_err_upper,
- xbow->xb_wid_err_lower, xbow->xb_wid_control,
- xbow->xb_wid_req_timeout);
- qprintf("intr uppr %x lower %x errcmd %x llp ctrl %x arb_reload %x\n",
- xbow->xb_wid_int_upper, xbow->xb_wid_int_lower,
- xbow->xb_wid_err_cmdword, xbow->xb_wid_llp,
- xbow->xb_wid_arb_reload);
-#endif
-
- for (i = 8; i <= 0xf; i++) {
- link = &xbow->xb_link(i);
-#ifdef LATER
- qprintf("Link %d registers\n", i);
- qprintf("\tctrl %x stat %x arbuppr %x arblowr %x auxstat %x\n",
- link->link_control, link->link_status,
- link->link_arb_upper, link->link_arb_lower,
- link->link_aux_status);
-#endif
- }
-}
-
-
-#define XBOW_ARB_RELOAD_TICKS 25
- /* granularity: 4 MB/s, max: 124 MB/s */
-#define GRANULARITY ((100 * 1000000) / XBOW_ARB_RELOAD_TICKS)
-
-#define XBOW_BYTES_TO_GBR(BYTES_per_s) (int) (BYTES_per_s / GRANULARITY)
-
-#define XBOW_GBR_TO_BYTES(cnt) (bandwidth_t) ((cnt) * GRANULARITY)
-
-#define CEILING_BYTES_TO_GBR(gbr, bytes_per_sec) \
- ((XBOW_GBR_TO_BYTES(gbr) < bytes_per_sec) ? gbr+1 : gbr)
-
-#define XBOW_ARB_GBR_MAX 31
-
-#define ABS(x) ((x > 0) ? (x) : (-1 * x))
- /* absolute value */
-
-int
-xbow_bytes_to_gbr(bandwidth_t old_bytes_per_sec, bandwidth_t bytes_per_sec)
-{
- int gbr_granted;
- int new_total_gbr;
- int change_gbr;
- bandwidth_t new_total_bw;
-
-#ifdef GRIO_DEBUG
- printf("xbow_bytes_to_gbr: old_bytes_per_sec %lld bytes_per_sec %lld\n",
- old_bytes_per_sec, bytes_per_sec);
-#endif /* GRIO_DEBUG */
-
- gbr_granted = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(old_bytes_per_sec)),
- old_bytes_per_sec);
- new_total_bw = old_bytes_per_sec + bytes_per_sec;
- new_total_gbr = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(new_total_bw)),
- new_total_bw);
-
- change_gbr = new_total_gbr - gbr_granted;
-
-#ifdef GRIO_DEBUG
- printf("xbow_bytes_to_gbr: gbr_granted %d new_total_gbr %d change_gbr %d\n",
- gbr_granted, new_total_gbr, change_gbr);
-#endif /* GRIO_DEBUG */
-
- return (change_gbr);
-}
-
-/* Conversion from GBR to bytes */
-bandwidth_t
-xbow_gbr_to_bytes(int gbr)
-{
- return (XBOW_GBR_TO_BYTES(gbr));
-}
-
-/* Given the vhdl for the desired xbow, the src and dest. widget ids
- * and the req_bw value, this xbow driver entry point accesses the
- * xbow registers and allocates the desired bandwidth if available.
- *
- * If bandwidth allocation is successful, return success else return failure.
- */
-int
-xbow_prio_bw_alloc(devfs_handle_t vhdl,
- xwidgetnum_t src_wid,
- xwidgetnum_t dest_wid,
- unsigned long long old_alloc_bw,
- unsigned long long req_bw)
-{
- xbow_soft_t soft = xbow_soft_get(vhdl);
- volatile xbowreg_t *xreg;
- xbowreg_t mask;
- unsigned long s;
- int error = 0;
- bandwidth_t old_bw_BYTES, req_bw_BYTES;
- xbowreg_t old_xreg;
- int old_bw_GBR, req_bw_GBR, new_bw_GBR;
-
-#ifdef GRIO_DEBUG
- printf("xbow_prio_bw_alloc: vhdl %d src_wid %d dest_wid %d req_bw %lld\n",
- (int) vhdl, (int) src_wid, (int) dest_wid, req_bw);
-#endif
-
- ASSERT(XBOW_WIDGET_IS_VALID(src_wid));
- ASSERT(XBOW_WIDGET_IS_VALID(dest_wid));
-
- s = mutex_spinlock(&soft->xbow_bw_alloc_lock);
-
- /* Get pointer to the correct register */
- xreg = XBOW_PRIO_ARBREG_PTR(soft->base, dest_wid, src_wid);
-
- /* Get mask for GBR count value */
- mask = XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(src_wid);
-
- req_bw_GBR = xbow_bytes_to_gbr(old_alloc_bw, req_bw);
- req_bw_BYTES = (req_bw_GBR < 0) ? (-1 * xbow_gbr_to_bytes(ABS(req_bw_GBR)))
- : xbow_gbr_to_bytes(req_bw_GBR);
-
-#ifdef GRIO_DEBUG
- printf("req_bw %lld req_bw_BYTES %lld req_bw_GBR %d\n",
- req_bw, req_bw_BYTES, req_bw_GBR);
-#endif /* GRIO_DEBUG */
-
- old_bw_BYTES = soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS];
- old_xreg = *xreg;
- old_bw_GBR = (((*xreg) & mask) >> XB_ARB_GBR_SHFT(src_wid));
-
-#ifdef GRIO_DEBUG
- ASSERT(XBOW_BYTES_TO_GBR(old_bw_BYTES) == old_bw_GBR);
-
- printf("old_bw_BYTES %lld old_bw_GBR %d\n", old_bw_BYTES, old_bw_GBR);
-
- printf("req_bw_BYTES %lld old_bw_BYTES %lld soft->bw_hiwm %lld\n",
- req_bw_BYTES, old_bw_BYTES,
- soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]);
-
-#endif /* GRIO_DEBUG */
-
- /* Accept the request only if we don't exceed the destination
- * port HIWATER_MARK *AND* the max. link GBR arbitration count
- */
- if (((old_bw_BYTES + req_bw_BYTES) <=
- soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]) &&
- (req_bw_GBR + old_bw_GBR <= XBOW_ARB_GBR_MAX)) {
-
- new_bw_GBR = (old_bw_GBR + req_bw_GBR);
-
- /* Set this in the xbow link register */
- *xreg = (old_xreg & ~mask) | \
- (new_bw_GBR << XB_ARB_GBR_SHFT(src_wid) & mask);
-
- soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS] =
- xbow_gbr_to_bytes(new_bw_GBR);
- } else {
- error = 1;
- }
-
- mutex_spinunlock(&soft->xbow_bw_alloc_lock, s);
-
- return (error);
-}
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997,2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#define NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
#define DEL(ptr) (kfree(ptr))
-int xswitch_devflag = D_MP;
-
/*
* This file provides generic support for Crosstalk
* Switches, in a way that insulates crosstalk providers
#define DEV_FUNC(dev,func) xwidget_to_provider_fns(dev)->func
static xswitch_provider_t *
-xwidget_to_provider_fns(devfs_handle_t xconn)
+xwidget_to_provider_fns(vertex_hdl_t xconn)
{
- devfs_handle_t busv;
+ vertex_hdl_t busv;
xswitch_info_t xswitch_info;
xswitch_provider_t provider_fns;
struct xswitch_info_s {
char *fingerprint;
unsigned census;
- devfs_handle_t vhdl[XSWITCH_CENSUS_PORTS];
- devfs_handle_t master_vhdl[XSWITCH_CENSUS_PORTS];
+ vertex_hdl_t vhdl[XSWITCH_CENSUS_PORTS];
+ vertex_hdl_t master_vhdl[XSWITCH_CENSUS_PORTS];
xswitch_provider_t *xswitch_fns;
};
xswitch_info_t
-xswitch_info_get(devfs_handle_t xwidget)
+xswitch_info_get(vertex_hdl_t xwidget)
{
xswitch_info_t xswitch_info;
xswitch_info = (xswitch_info_t)
hwgraph_fastinfo_get(xwidget);
-#ifdef LATER
- if ((xswitch_info != NULL) &&
- (xswitch_info->fingerprint != xswitch_info_fingerprint))
-#ifdef SUPPORT_PRINTING_V_FORMAT
- PRINT_PANIC("%v xswitch_info_get bad fingerprint", xwidget);
-#else
- PRINT_PANIC("%x xswitch_info_get bad fingerprint", xwidget);
-#endif
-#endif /* LATER */
return (xswitch_info);
}
void
xswitch_info_vhdl_set(xswitch_info_t xswitch_info,
xwidgetnum_t port,
- devfs_handle_t xwidget)
+ vertex_hdl_t xwidget)
{
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
xswitch_info->vhdl[port - XSWITCH_CENSUS_PORT_MIN] = xwidget;
}
-devfs_handle_t
+vertex_hdl_t
xswitch_info_vhdl_get(xswitch_info_t xswitch_info,
xwidgetnum_t port)
{
-#ifdef LATER
- if (xswitch_info == NULL)
- PRINT_PANIC("xswitch_info_vhdl_get: null xswitch_info");
-#endif
-
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
return GRAPH_VERTEX_NONE;
void
xswitch_info_master_assignment_set(xswitch_info_t xswitch_info,
xwidgetnum_t port,
- devfs_handle_t master_vhdl)
+ vertex_hdl_t master_vhdl)
{
#if XSWITCH_CENSUS_PORT_MIN
if (port < XSWITCH_CENSUS_PORT_MIN)
xswitch_info->master_vhdl[port - XSWITCH_CENSUS_PORT_MIN] = master_vhdl;
}
-devfs_handle_t
+vertex_hdl_t
xswitch_info_master_assignment_get(xswitch_info_t xswitch_info,
xwidgetnum_t port)
{
}
void
-xswitch_info_set(devfs_handle_t xwidget, xswitch_info_t xswitch_info)
+xswitch_info_set(vertex_hdl_t xwidget, xswitch_info_t xswitch_info)
{
xswitch_info->fingerprint = xswitch_info_fingerprint;
hwgraph_fastinfo_set(xwidget, (arbitrary_info_t) xswitch_info);
}
xswitch_info_t
-xswitch_info_new(devfs_handle_t xwidget)
+xswitch_info_new(vertex_hdl_t xwidget)
{
xswitch_info_t xswitch_info;
}
void
-xswitch_provider_register(devfs_handle_t busv,
+xswitch_provider_register(vertex_hdl_t busv,
xswitch_provider_t * xswitch_fns)
{
xswitch_info_t xswitch_info = xswitch_info_get(busv);
}
int
-xswitch_reset_link(devfs_handle_t xconn_vhdl)
+xswitch_reset_link(vertex_hdl_t xconn_vhdl)
{
return DEV_FUNC(xconn_vhdl, reset_link)
(xconn_vhdl);
}
-
-/* Given a vertex handle to the xswitch get its logical
- * id.
- */
-int
-xswitch_id_get(devfs_handle_t xconn_vhdl)
-{
- arbitrary_info_t xbow_num;
- graph_error_t rv;
-
- rv = hwgraph_info_get_LBL(xconn_vhdl,INFO_LBL_XSWITCH_ID,&xbow_num);
- ASSERT(rv == GRAPH_SUCCESS);
- return(xbow_num);
-}
-
-/* Given a vertex handle to the xswitch set its logical
- * id.
- */
-void
-xswitch_id_set(devfs_handle_t xconn_vhdl,int xbow_num)
-{
- graph_error_t rv;
-
- rv = hwgraph_info_add_LBL(xconn_vhdl,INFO_LBL_XSWITCH_ID,
- (arbitrary_info_t)xbow_num);
- ASSERT(rv == GRAPH_SUCCESS);
-}
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/io.h>
-#include <asm/sn/iograph.h>
-#include <asm/sn/invent.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/labelcl.h>
-#include <asm/sn/hcl_util.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/xtalk/xswitch.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/xtalk/xtalk_private.h>
-
-/*
- * Implement crosstalk provider operations. The xtalk* layer provides a
- * platform-independent interface for crosstalk devices. This layer
- * switches among the possible implementations of a crosstalk adapter.
- *
- * On platforms with only one possible xtalk provider, macros can be
- * set up at the top that cause the table lookups and indirections to
- * completely disappear.
- */
-
-#define NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
-#define DEL(ptr) (kfree(ptr))
-
-char widget_info_fingerprint[] = "widget_info";
-
-cdl_p xtalk_registry = NULL;
-
-#define DEV_FUNC(dev,func) hub_##func
-#define CAST_PIOMAP(x) ((hub_piomap_t)(x))
-#define CAST_DMAMAP(x) ((hub_dmamap_t)(x))
-#define CAST_INTR(x) ((hub_intr_t)(x))
-
-/* =====================================================================
- * Function Table of Contents
- */
-xtalk_piomap_t xtalk_piomap_alloc(devfs_handle_t, device_desc_t, iopaddr_t, size_t, size_t, unsigned);
-void xtalk_piomap_free(xtalk_piomap_t);
-caddr_t xtalk_piomap_addr(xtalk_piomap_t, iopaddr_t, size_t);
-void xtalk_piomap_done(xtalk_piomap_t);
-caddr_t xtalk_piotrans_addr(devfs_handle_t, device_desc_t, iopaddr_t, size_t, unsigned);
-caddr_t xtalk_pio_addr(devfs_handle_t, device_desc_t, iopaddr_t, size_t, xtalk_piomap_t *, unsigned);
-void xtalk_set_early_piotrans_addr(xtalk_early_piotrans_addr_f *);
-caddr_t xtalk_early_piotrans_addr(xwidget_part_num_t, xwidget_mfg_num_t, int, iopaddr_t, size_t, unsigned);
-static caddr_t null_xtalk_early_piotrans_addr(xwidget_part_num_t, xwidget_mfg_num_t, int, iopaddr_t, size_t, unsigned);
-xtalk_dmamap_t xtalk_dmamap_alloc(devfs_handle_t, device_desc_t, size_t, unsigned);
-void xtalk_dmamap_free(xtalk_dmamap_t);
-iopaddr_t xtalk_dmamap_addr(xtalk_dmamap_t, paddr_t, size_t);
-alenlist_t xtalk_dmamap_list(xtalk_dmamap_t, alenlist_t, unsigned);
-void xtalk_dmamap_done(xtalk_dmamap_t);
-iopaddr_t xtalk_dmatrans_addr(devfs_handle_t, device_desc_t, paddr_t, size_t, unsigned);
-alenlist_t xtalk_dmatrans_list(devfs_handle_t, device_desc_t, alenlist_t, unsigned);
-void xtalk_dmamap_drain(xtalk_dmamap_t);
-void xtalk_dmaaddr_drain(devfs_handle_t, iopaddr_t, size_t);
-void xtalk_dmalist_drain(devfs_handle_t, alenlist_t);
-xtalk_intr_t xtalk_intr_alloc(devfs_handle_t, device_desc_t, devfs_handle_t);
-xtalk_intr_t xtalk_intr_alloc_nothd(devfs_handle_t, device_desc_t, devfs_handle_t);
-void xtalk_intr_free(xtalk_intr_t);
-int xtalk_intr_connect(xtalk_intr_t, xtalk_intr_setfunc_t, void *);
-void xtalk_intr_disconnect(xtalk_intr_t);
-devfs_handle_t xtalk_intr_cpu_get(xtalk_intr_t);
-int xtalk_error_handler(devfs_handle_t, int, ioerror_mode_t, ioerror_t *);
-int xtalk_error_devenable(devfs_handle_t, int, int);
-void xtalk_provider_startup(devfs_handle_t);
-void xtalk_provider_shutdown(devfs_handle_t);
-devfs_handle_t xtalk_intr_dev_get(xtalk_intr_t);
-xwidgetnum_t xtalk_intr_target_get(xtalk_intr_t);
-xtalk_intr_vector_t xtalk_intr_vector_get(xtalk_intr_t);
-iopaddr_t xtalk_intr_addr_get(struct xtalk_intr_s *);
-void *xtalk_intr_sfarg_get(xtalk_intr_t);
-devfs_handle_t xtalk_pio_dev_get(xtalk_piomap_t);
-xwidgetnum_t xtalk_pio_target_get(xtalk_piomap_t);
-iopaddr_t xtalk_pio_xtalk_addr_get(xtalk_piomap_t);
-ulong xtalk_pio_mapsz_get(xtalk_piomap_t);
-caddr_t xtalk_pio_kvaddr_get(xtalk_piomap_t);
-devfs_handle_t xtalk_dma_dev_get(xtalk_dmamap_t);
-xwidgetnum_t xtalk_dma_target_get(xtalk_dmamap_t);
-xwidget_info_t xwidget_info_chk(devfs_handle_t);
-xwidget_info_t xwidget_info_get(devfs_handle_t);
-void xwidget_info_set(devfs_handle_t, xwidget_info_t);
-devfs_handle_t xwidget_info_dev_get(xwidget_info_t);
-xwidgetnum_t xwidget_info_id_get(xwidget_info_t);
-devfs_handle_t xwidget_info_master_get(xwidget_info_t);
-xwidgetnum_t xwidget_info_masterid_get(xwidget_info_t);
-xwidget_part_num_t xwidget_info_part_num_get(xwidget_info_t);
-xwidget_mfg_num_t xwidget_info_mfg_num_get(xwidget_info_t);
-char *xwidget_info_name_get(xwidget_info_t);
-void xtalk_init(void);
-void xtalk_provider_register(devfs_handle_t, xtalk_provider_t *);
-void xtalk_provider_unregister(devfs_handle_t);
-xtalk_provider_t *xtalk_provider_fns_get(devfs_handle_t);
-int xwidget_driver_register(xwidget_part_num_t,
- xwidget_mfg_num_t,
- char *, unsigned);
-void xwidget_driver_unregister(char *);
-int xwidget_register(xwidget_hwid_t, devfs_handle_t,
- xwidgetnum_t, devfs_handle_t,
- xwidgetnum_t, async_attach_t);
-int xwidget_unregister(devfs_handle_t);
-void xwidget_reset(devfs_handle_t);
-char *xwidget_name_get(devfs_handle_t);
-#if !defined(DEV_FUNC)
-/*
- * There is more than one possible provider
- * for this platform. We need to examine the
- * master vertex of the current vertex for
- * a provider function structure, and indirect
- * through the appropriately named member.
- */
-#define DEV_FUNC(dev,func) xwidget_to_provider_fns(dev)->func
-#define CAST_PIOMAP(x) ((xtalk_piomap_t)(x))
-#define CAST_DMAMAP(x) ((xtalk_dmamap_t)(x))
-#define CAST_INTR(x) ((xtalk_intr_t)(x))
-
-static xtalk_provider_t *
-xwidget_to_provider_fns(devfs_handle_t xconn)
-{
- xwidget_info_t widget_info;
- xtalk_provider_t *provider_fns;
-
- widget_info = xwidget_info_get(xconn);
- ASSERT(widget_info != NULL);
-
- provider_fns = xwidget_info_pops_get(widget_info);
- ASSERT(provider_fns != NULL);
-
- return (provider_fns);
-}
-#endif
-
-/*
- * Many functions are not passed their vertex
- * information directly; rather, they must
- * dive through a resource map. These macros
- * are available to coordinate this detail.
- */
-#define PIOMAP_FUNC(map,func) DEV_FUNC(map->xp_dev,func)
-#define DMAMAP_FUNC(map,func) DEV_FUNC(map->xd_dev,func)
-#define INTR_FUNC(intr,func) DEV_FUNC(intr_hdl->xi_dev,func)
-
-/* =====================================================================
- * PIO MANAGEMENT
- *
- * For mapping system virtual address space to
- * xtalk space on a specified widget
- */
-
-xtalk_piomap_t
-xtalk_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
- device_desc_t dev_desc, /* device descriptor */
- iopaddr_t xtalk_addr, /* map for this xtalk_addr range */
- size_t byte_count,
- size_t byte_count_max, /* maximum size of a mapping */
- unsigned flags)
-{ /* defined in sys/pio.h */
- return (xtalk_piomap_t) DEV_FUNC(dev, piomap_alloc)
- (dev, dev_desc, xtalk_addr, byte_count, byte_count_max, flags);
-}
-
-
-void
-xtalk_piomap_free(xtalk_piomap_t xtalk_piomap)
-{
- PIOMAP_FUNC(xtalk_piomap, piomap_free)
- (CAST_PIOMAP(xtalk_piomap));
-}
-
-
-caddr_t
-xtalk_piomap_addr(xtalk_piomap_t xtalk_piomap, /* mapping resources */
- iopaddr_t xtalk_addr, /* map for this xtalk address */
- size_t byte_count)
-{ /* map this many bytes */
- return PIOMAP_FUNC(xtalk_piomap, piomap_addr)
- (CAST_PIOMAP(xtalk_piomap), xtalk_addr, byte_count);
-}
-
-
-void
-xtalk_piomap_done(xtalk_piomap_t xtalk_piomap)
-{
- PIOMAP_FUNC(xtalk_piomap, piomap_done)
- (CAST_PIOMAP(xtalk_piomap));
-}
-
-
-caddr_t
-xtalk_piotrans_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- iopaddr_t xtalk_addr, /* Crosstalk address */
- size_t byte_count, /* map this many bytes */
- unsigned flags)
-{ /* (currently unused) */
- return DEV_FUNC(dev, piotrans_addr)
- (dev, dev_desc, xtalk_addr, byte_count, flags);
-}
-
-caddr_t
-xtalk_pio_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- iopaddr_t addr, /* starting address (or offset in window) */
- size_t byte_count, /* map this many bytes */
- xtalk_piomap_t *mapp, /* where to return the map pointer */
- unsigned flags)
-{ /* PIO flags */
- xtalk_piomap_t map = 0;
- caddr_t res;
-
- if (mapp)
- *mapp = 0; /* record "no map used" */
-
- res = xtalk_piotrans_addr
- (dev, dev_desc, addr, byte_count, flags);
- if (res)
- return res; /* xtalk_piotrans worked */
-
- map = xtalk_piomap_alloc
- (dev, dev_desc, addr, byte_count, byte_count, flags);
- if (!map)
- return res; /* xtalk_piomap_alloc failed */
-
- res = xtalk_piomap_addr
- (map, addr, byte_count);
- if (!res) {
- xtalk_piomap_free(map);
- return res; /* xtalk_piomap_addr failed */
- }
- if (mapp)
- *mapp = map; /* pass back map used */
-
- return res; /* xtalk_piomap_addr succeeded */
-}
-
-/* =====================================================================
- * EARLY PIOTRANS SUPPORT
- *
- * There are places where drivers (mgras, for instance)
- * need to get PIO translations before the infrastructure
- * is extended to them (setting up textports, for
- * instance). These drivers should call
- * xtalk_early_piotrans_addr with their xtalk ID
- * information, a sequence number (so we can use the second
- * mgras for instance), and the usual piotrans parameters.
- *
- * Machine specific code should provide an implementation
- * of early_piotrans_addr, and present a pointer to this
- * function to xtalk_set_early_piotrans_addr so it can be
- * used by clients without the clients having to know what
- * platform or what xtalk provider is in use.
- */
-
-static xtalk_early_piotrans_addr_f null_xtalk_early_piotrans_addr;
-
-xtalk_early_piotrans_addr_f *impl_early_piotrans_addr = null_xtalk_early_piotrans_addr;
-
-/* xtalk_set_early_piotrans_addr:
- * specify the early_piotrans_addr implementation function.
- */
-void
-xtalk_set_early_piotrans_addr(xtalk_early_piotrans_addr_f *impl)
-{
- impl_early_piotrans_addr = impl;
-}
-
-/* xtalk_early_piotrans_addr:
- * figure out a PIO address for the "nth" crosstalk widget that
- * matches the specified part and mfgr number. Returns NULL if
- * there is no such widget, or if the requested mapping can not
- * be constructed.
- * Limitations on which crosstalk slots (and busses) are
- * checked, and definitions of the ordering of the search across
- * the crosstalk slots, are defined by the platform.
- */
-caddr_t
-xtalk_early_piotrans_addr(xwidget_part_num_t part_num,
- xwidget_mfg_num_t mfg_num,
- int which,
- iopaddr_t xtalk_addr,
- size_t byte_count,
- unsigned flags)
-{
- return impl_early_piotrans_addr
- (part_num, mfg_num, which, xtalk_addr, byte_count, flags);
-}
-
-/* null_xtalk_early_piotrans_addr:
- * used as the early_piotrans_addr implementation until and
- * unless a real implementation is provided. In DEBUG kernels,
- * we want to know who is calling before the implementation is
- * registered; in non-DEBUG kernels, return NULL representing
- * lack of mapping support.
- */
-/*ARGSUSED */
-static caddr_t
-null_xtalk_early_piotrans_addr(xwidget_part_num_t part_num,
- xwidget_mfg_num_t mfg_num,
- int which,
- iopaddr_t xtalk_addr,
- size_t byte_count,
- unsigned flags)
-{
-#if DEBUG
- PRINT_PANIC("null_xtalk_early_piotrans_addr");
-#endif
- return NULL;
-}
-
-/* =====================================================================
- * DMA MANAGEMENT
- *
- * For mapping from crosstalk space to system
- * physical space.
- */
-
-xtalk_dmamap_t
-xtalk_dmamap_alloc(devfs_handle_t dev, /* set up mappings for this device */
- device_desc_t dev_desc, /* device descriptor */
- size_t byte_count_max, /* max size of a mapping */
- unsigned flags)
-{ /* defined in dma.h */
- return (xtalk_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
- (dev, dev_desc, byte_count_max, flags);
-}
-
-
-void
-xtalk_dmamap_free(xtalk_dmamap_t xtalk_dmamap)
-{
- DMAMAP_FUNC(xtalk_dmamap, dmamap_free)
- (CAST_DMAMAP(xtalk_dmamap));
-}
-
-
-iopaddr_t
-xtalk_dmamap_addr(xtalk_dmamap_t xtalk_dmamap, /* use these mapping resources */
- paddr_t paddr, /* map for this address */
- size_t byte_count)
-{ /* map this many bytes */
- return DMAMAP_FUNC(xtalk_dmamap, dmamap_addr)
- (CAST_DMAMAP(xtalk_dmamap), paddr, byte_count);
-}
-
-
-alenlist_t
-xtalk_dmamap_list(xtalk_dmamap_t xtalk_dmamap, /* use these mapping resources */
- alenlist_t alenlist, /* map this Address/Length List */
- unsigned flags)
-{
- return DMAMAP_FUNC(xtalk_dmamap, dmamap_list)
- (CAST_DMAMAP(xtalk_dmamap), alenlist, flags);
-}
-
-
-void
-xtalk_dmamap_done(xtalk_dmamap_t xtalk_dmamap)
-{
- DMAMAP_FUNC(xtalk_dmamap, dmamap_done)
- (CAST_DMAMAP(xtalk_dmamap));
-}
-
-
-iopaddr_t
-xtalk_dmatrans_addr(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- paddr_t paddr, /* system physical address */
- size_t byte_count, /* length */
- unsigned flags)
-{ /* defined in dma.h */
- return DEV_FUNC(dev, dmatrans_addr)
- (dev, dev_desc, paddr, byte_count, flags);
-}
-
-
-alenlist_t
-xtalk_dmatrans_list(devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- alenlist_t palenlist, /* system address/length list */
- unsigned flags)
-{ /* defined in dma.h */
- return DEV_FUNC(dev, dmatrans_list)
- (dev, dev_desc, palenlist, flags);
-}
-
-void
-xtalk_dmamap_drain(xtalk_dmamap_t map)
-{
- DMAMAP_FUNC(map, dmamap_drain)
- (CAST_DMAMAP(map));
-}
-
-void
-xtalk_dmaaddr_drain(devfs_handle_t dev, paddr_t addr, size_t size)
-{
- DEV_FUNC(dev, dmaaddr_drain)
- (dev, addr, size);
-}
-
-void
-xtalk_dmalist_drain(devfs_handle_t dev, alenlist_t list)
-{
- DEV_FUNC(dev, dmalist_drain)
- (dev, list);
-}
-
-/* =====================================================================
- * INTERRUPT MANAGEMENT
- *
- * Allow crosstalk devices to establish interrupts
- */
-
-/*
- * Allocate resources required for an interrupt as specified in intr_desc.
- * Return resource handle in intr_hdl.
- */
-xtalk_intr_t
-xtalk_intr_alloc(devfs_handle_t dev, /* which Crosstalk device */
- device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev)
-{ /* owner of this interrupt */
- return (xtalk_intr_t) DEV_FUNC(dev, intr_alloc)
- (dev, dev_desc, owner_dev);
-}
-
-/*
- * Allocate resources required for an interrupt as specified in dev_desc.
- * Unconditionally setup resources to be non-threaded.
- * Return resource handle in intr_hdl.
- */
-xtalk_intr_t
-xtalk_intr_alloc_nothd(devfs_handle_t dev, /* which Crosstalk device */
- device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev) /* owner of this interrupt */
-{
- return (xtalk_intr_t) DEV_FUNC(dev, intr_alloc_nothd)
- (dev, dev_desc, owner_dev);
-}
-
-/*
- * Free resources consumed by intr_alloc.
- */
-void
-xtalk_intr_free(xtalk_intr_t intr_hdl)
-{
- INTR_FUNC(intr_hdl, intr_free)
- (CAST_INTR(intr_hdl));
-}
-
-
-/*
- * Associate resources allocated with a previous xtalk_intr_alloc call with the
- * described handler, arg, name, etc.
- *
- * Returns 0 on success, returns <0 on failure.
- */
-int
-xtalk_intr_connect(xtalk_intr_t intr_hdl, /* xtalk intr resource handle */
- xtalk_intr_setfunc_t setfunc, /* func to set intr hw */
- void *setfunc_arg) /* arg to setfunc */
-{
- return INTR_FUNC(intr_hdl, intr_connect)
- (CAST_INTR(intr_hdl), setfunc, setfunc_arg);
-}
-
-
-/*
- * Disassociate handler with the specified interrupt.
- */
-void
-xtalk_intr_disconnect(xtalk_intr_t intr_hdl)
-{
- INTR_FUNC(intr_hdl, intr_disconnect)
- (CAST_INTR(intr_hdl));
-}
-
-
-/*
- * Return a hwgraph vertex that represents the CPU currently
- * targeted by an interrupt.
- */
-devfs_handle_t
-xtalk_intr_cpu_get(xtalk_intr_t intr_hdl)
-{
- return INTR_FUNC(intr_hdl, intr_cpu_get)
- (CAST_INTR(intr_hdl));
-}
-
-
-/* =====================================================================
- * CONFIGURATION MANAGEMENT
- */
-
-/*
- * Startup a crosstalk provider
- */
-void
-xtalk_provider_startup(devfs_handle_t xtalk_provider)
-{
- DEV_FUNC(xtalk_provider, provider_startup)
- (xtalk_provider);
-}
-
-
-/*
- * Shutdown a crosstalk provider
- */
-void
-xtalk_provider_shutdown(devfs_handle_t xtalk_provider)
-{
- DEV_FUNC(xtalk_provider, provider_shutdown)
- (xtalk_provider);
-}
-
-/*
- * Enable a device on a xtalk widget
- */
-void
-xtalk_widgetdev_enable(devfs_handle_t xconn_vhdl, int devnum)
-{
- DEV_FUNC(xconn_vhdl, widgetdev_enable) (xconn_vhdl, devnum);
-}
-
-/*
- * Shutdown a device on a xtalk widget
- */
-void
-xtalk_widgetdev_shutdown(devfs_handle_t xconn_vhdl, int devnum)
-{
- DEV_FUNC(xconn_vhdl, widgetdev_shutdown) (xconn_vhdl, devnum);
-}
-
-int
-xtalk_dma_enabled(devfs_handle_t xconn_vhdl)
-{
- return DEV_FUNC(xconn_vhdl, dma_enabled) (xconn_vhdl);
-}
-/*
- * Generic crosstalk functions, for use with all crosstalk providers
- * and all crosstalk devices.
- */
-
-/****** Generic crosstalk interrupt interfaces ******/
-devfs_handle_t
-xtalk_intr_dev_get(xtalk_intr_t xtalk_intr)
-{
- return (xtalk_intr->xi_dev);
-}
-
-xwidgetnum_t
-xtalk_intr_target_get(xtalk_intr_t xtalk_intr)
-{
- return (xtalk_intr->xi_target);
-}
-
-xtalk_intr_vector_t
-xtalk_intr_vector_get(xtalk_intr_t xtalk_intr)
-{
- return (xtalk_intr->xi_vector);
-}
-
-iopaddr_t
-xtalk_intr_addr_get(struct xtalk_intr_s *xtalk_intr)
-{
- return (xtalk_intr->xi_addr);
-}
-
-void *
-xtalk_intr_sfarg_get(xtalk_intr_t xtalk_intr)
-{
- return (xtalk_intr->xi_sfarg);
-}
-
-/****** Generic crosstalk pio interfaces ******/
-devfs_handle_t
-xtalk_pio_dev_get(xtalk_piomap_t xtalk_piomap)
-{
- return (xtalk_piomap->xp_dev);
-}
-
-xwidgetnum_t
-xtalk_pio_target_get(xtalk_piomap_t xtalk_piomap)
-{
- return (xtalk_piomap->xp_target);
-}
-
-iopaddr_t
-xtalk_pio_xtalk_addr_get(xtalk_piomap_t xtalk_piomap)
-{
- return (xtalk_piomap->xp_xtalk_addr);
-}
-
-ulong
-xtalk_pio_mapsz_get(xtalk_piomap_t xtalk_piomap)
-{
- return (xtalk_piomap->xp_mapsz);
-}
-
-caddr_t
-xtalk_pio_kvaddr_get(xtalk_piomap_t xtalk_piomap)
-{
- return (xtalk_piomap->xp_kvaddr);
-}
-
-
-/****** Generic crosstalk dma interfaces ******/
-devfs_handle_t
-xtalk_dma_dev_get(xtalk_dmamap_t xtalk_dmamap)
-{
- return (xtalk_dmamap->xd_dev);
-}
-
-xwidgetnum_t
-xtalk_dma_target_get(xtalk_dmamap_t xtalk_dmamap)
-{
- return (xtalk_dmamap->xd_target);
-}
-
-
-/****** Generic crosstalk widget information interfaces ******/
-
-/* xwidget_info_chk:
- * check to see if this vertex is a widget;
- * if so, return its widget_info (if any).
- * if not, return NULL.
- */
-xwidget_info_t
-xwidget_info_chk(devfs_handle_t xwidget)
-{
- arbitrary_info_t ainfo = 0;
-
- hwgraph_info_get_LBL(xwidget, INFO_LBL_XWIDGET, &ainfo);
- return (xwidget_info_t) ainfo;
-}
-
-
-xwidget_info_t
-xwidget_info_get(devfs_handle_t xwidget)
-{
- xwidget_info_t widget_info;
-
- widget_info = (xwidget_info_t)
- hwgraph_fastinfo_get(xwidget);
-
-#ifdef LATER
- if ((widget_info != NULL) &&
- (widget_info->w_fingerprint != widget_info_fingerprint))
-#ifdef SUPPORT_PRINTING_V_FORMAT
- PRINT_PANIC("%v bad xwidget_info", xwidget);
-#else
- PRINT_PANIC("%x bad xwidget_info", xwidget);
-#endif
-#endif /* LATER */
-
- return (widget_info);
-}
-
-void
-xwidget_info_set(devfs_handle_t xwidget, xwidget_info_t widget_info)
-{
- if (widget_info != NULL)
- widget_info->w_fingerprint = widget_info_fingerprint;
-
- hwgraph_fastinfo_set(xwidget, (arbitrary_info_t) widget_info);
-
- /* Also, mark this vertex as an xwidget,
- * and use the widget_info, so xwidget_info_chk
- * can work (and be fairly efficient).
- */
- hwgraph_info_add_LBL(xwidget, INFO_LBL_XWIDGET,
- (arbitrary_info_t) widget_info);
-}
-
-devfs_handle_t
-xwidget_info_dev_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget_info");
- return (xwidget_info->w_vertex);
-}
-
-xwidgetnum_t
-xwidget_info_id_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget_info");
- return (xwidget_info->w_id);
-}
-
-
-devfs_handle_t
-xwidget_info_master_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget_info");
- return (xwidget_info->w_master);
-}
-
-xwidgetnum_t
-xwidget_info_masterid_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget_info");
- return (xwidget_info->w_masterid);
-}
-
-xwidget_part_num_t
-xwidget_info_part_num_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget_info");
- return (xwidget_info->w_hwid.part_num);
-}
-
-xwidget_mfg_num_t
-xwidget_info_mfg_num_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget_info");
- return (xwidget_info->w_hwid.mfg_num);
-}
-/* Extract the widget name from the widget information
- * for the xtalk widget.
- */
-char *
-xwidget_info_name_get(xwidget_info_t xwidget_info)
-{
- if (xwidget_info == NULL)
- panic("null xwidget info");
- return(xwidget_info->w_name);
-}
-/****** Generic crosstalk initialization interfaces ******/
-
-/*
- * One-time initialization needed for systems that support crosstalk.
- */
-void
-xtalk_init(void)
-{
- cdl_p cp;
-
-#if DEBUG && ATTACH_DEBUG
- printf("xtalk_init\n");
-#endif
- /* Allocate the registry.
- * We might already have one.
- * If we don't, go get one.
- * MPness: someone might have
- * set one up for us while we
- * were not looking; use an atomic
- * compare-and-swap to commit to
- * using the new registry if and
- * only if nobody else did first.
- * If someone did get there first,
- * toss the one we allocated back
- * into the pool.
- */
- if (xtalk_registry == NULL) {
- cp = cdl_new(EDGE_LBL_XIO, "part", "mfgr");
- if (!compare_and_swap_ptr((void **) &xtalk_registry, NULL, (void *) cp)) {
- cdl_del(cp);
- }
- }
- ASSERT(xtalk_registry != NULL);
-}
-
-/*
- * Associate a set of xtalk_provider functions with a vertex.
- */
-void
-xtalk_provider_register(devfs_handle_t provider, xtalk_provider_t *xtalk_fns)
-{
- hwgraph_fastinfo_set(provider, (arbitrary_info_t) xtalk_fns);
-}
-
-/*
- * Disassociate a set of xtalk_provider functions with a vertex.
- */
-void
-xtalk_provider_unregister(devfs_handle_t provider)
-{
- hwgraph_fastinfo_set(provider, (arbitrary_info_t)NULL);
-}
-
-/*
- * Obtain a pointer to the xtalk_provider functions for a specified Crosstalk
- * provider.
- */
-xtalk_provider_t *
-xtalk_provider_fns_get(devfs_handle_t provider)
-{
- return ((xtalk_provider_t *) hwgraph_fastinfo_get(provider));
-}
-
-/*
- * Announce a driver for a particular crosstalk part.
- * Returns 0 on success or -1 on failure. Failure occurs if the
- * specified hardware already has a driver.
- */
-/*ARGSUSED4 */
-int
-xwidget_driver_register(xwidget_part_num_t part_num,
- xwidget_mfg_num_t mfg_num,
- char *driver_prefix,
- unsigned flags)
-{
- /* a driver's init routine could call
- * xwidget_driver_register before the
- * system calls xtalk_init; so, we
- * make the call here.
- */
- if (xtalk_registry == NULL)
- xtalk_init();
-
- return cdl_add_driver(xtalk_registry,
- part_num, mfg_num,
- driver_prefix, flags, NULL);
-}
-
-/*
- * Inform xtalk infrastructure that a driver is no longer available for
- * handling any widgets.
- */
-void
-xwidget_driver_unregister(char *driver_prefix)
-{
- /* before a driver calls unregister,
- * it must have called registger; so we
- * can assume we have a registry here.
- */
- ASSERT(xtalk_registry != NULL);
-
- cdl_del_driver(xtalk_registry, driver_prefix, NULL);
-}
-
-/*
- * Call some function with each vertex that
- * might be one of this driver's attach points.
- */
-void
-xtalk_iterate(char *driver_prefix,
- xtalk_iter_f *func)
-{
- ASSERT(xtalk_registry != NULL);
-
- cdl_iterate(xtalk_registry, driver_prefix, (cdl_iter_f *)func);
-}
-
-/*
- * xwidget_register:
- * Register a xtalk device (xwidget) by doing the following.
- * -allocate and initialize xwidget_info data
- * -allocate a hwgraph vertex with name based on widget number (id)
- * -look up the widget's initialization function and call it,
- * or remember the vertex for later initialization.
- *
- */
-int
-xwidget_register(xwidget_hwid_t hwid, /* widget's hardware ID */
- devfs_handle_t widget, /* widget to initialize */
- xwidgetnum_t id, /* widget's target id (0..f) */
- devfs_handle_t master, /* widget's master vertex */
- xwidgetnum_t targetid, /* master's target id (9/a) */
- async_attach_t aa)
-{
- xwidget_info_t widget_info;
- char *s,devnm[MAXDEVNAME];
-
- /* Allocate widget_info and associate it with widget vertex */
- NEW(widget_info);
-
- /* Initialize widget_info */
- widget_info->w_vertex = widget;
- widget_info->w_id = id;
- widget_info->w_master = master;
- widget_info->w_masterid = targetid;
- widget_info->w_hwid = *hwid; /* structure copy */
- widget_info->w_efunc = 0;
- widget_info->w_einfo = 0;
- /*
- * get the name of this xwidget vertex and keep the info.
- * This is needed during errors and interrupts, but as
- * long as we have it, we can use it elsewhere.
- */
- s = dev_to_name(widget,devnm,MAXDEVNAME);
- widget_info->w_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
- strcpy(widget_info->w_name,s);
-
- xwidget_info_set(widget, widget_info);
-
- device_master_set(widget, master);
-
- /* All the driver init routines (including
- * xtalk_init) are called before we get into
- * attaching devices, so we can assume we
- * have a registry here.
- */
- ASSERT(xtalk_registry != NULL);
-
- /*
- * Add pointer to async attach info -- tear down will be done when
- * the particular descendant is done with the info.
- */
- if (aa)
- async_attach_add_info(widget, aa);
-
- return cdl_add_connpt(xtalk_registry, hwid->part_num, hwid->mfg_num,
- widget, 0);
-}
-
-/*
- * xwidget_unregister :
- * Unregister the xtalk device and detach all its hwgraph namespace.
- */
-int
-xwidget_unregister(devfs_handle_t widget)
-{
- xwidget_info_t widget_info;
- xwidget_hwid_t hwid;
-
- /* Make sure that we have valid widget information initialized */
- if (!(widget_info = xwidget_info_get(widget)))
- return(1);
-
- /* Remove the inventory information associated
- * with the widget.
- */
- hwgraph_inventory_remove(widget, -1, -1, -1, -1, -1);
-
- hwid = &(widget_info->w_hwid);
-
- cdl_del_connpt(xtalk_registry, hwid->part_num, hwid->mfg_num,
- widget, 0);
-
- /* Clean out the xwidget information */
- (void)kfree(widget_info->w_name);
- BZERO((void *)widget_info, sizeof(widget_info));
- DEL(widget_info);
-
- return(0);
-}
-
-/*
- * Issue a link reset to a widget.
- */
-void
-xwidget_reset(devfs_handle_t xwidget)
-{
- xswitch_reset_link(xwidget);
-
-}
-
-
-void
-xwidget_gfx_reset(devfs_handle_t xwidget)
-{
- xwidget_info_t info;
-
- xswitch_reset_link(xwidget);
- info = xwidget_info_get(xwidget);
-#ifdef LATER
- ASSERT_ALWAYS(info != NULL);
-#endif
-
- /*
- * Enable this for other architectures once we add widget_reset to the
- * xtalk provider interface.
- */
- DEV_FUNC(xtalk_provider, widget_reset)
- (xwidget_info_master_get(info), xwidget_info_id_get(info));
-}
-
-#define ANON_XWIDGET_NAME "No Name" /* Default Widget Name */
-
-/* Get the canonical hwgraph name of xtalk widget */
-char *
-xwidget_name_get(devfs_handle_t xwidget_vhdl)
-{
- xwidget_info_t info;
-
- /* If we have a bogus widget handle then return
- * a default anonymous widget name.
- */
- if (xwidget_vhdl == GRAPH_VERTEX_NONE)
- return(ANON_XWIDGET_NAME);
- /* Read the widget name stored in the widget info
- * for the widget setup during widget initialization.
- */
- info = xwidget_info_get(xwidget_vhdl);
- ASSERT(info != NULL);
- return(xwidget_info_name_get(info));
-}
-
-/*
- * xtalk_device_shutdown
- * Disable the specified xtalk widget and clean out all the software
- * state associated with it.
- */
-int
-xtalk_device_shutdown(devfs_handle_t xbus_vhdl, xwidgetnum_t widget)
-{
- devfs_handle_t widget_vhdl;
- char edge_name[8];
-
- sprintf(edge_name, "%d", widget);
- if (hwgraph_traverse(xbus_vhdl, edge_name, &widget_vhdl)
- != GRAPH_SUCCESS)
- return(1);
-
- xwidget_unregister(widget_vhdl);
-
- return(0);
-}
EXTRA_CFLAGS := -DLITTLE_ENDIAN
-obj-y := probe.o setup.o sn_asm.o sv.o bte.o iomv.o \
- irq.o mca.o
+obj-y := probe.o setup.o sv.o bte.o irq.o mca.o \
+ idle.o sn2/
-obj-$(CONFIG_IA64_SGI_SN2) += sn2/
-obj-$(CONFIG_IA64_SGI_AUTOTEST) += llsc4.o misctest.o
obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_MODULES) += sn_ksyms.o
-obj-$(CONFIG_IA64_SGI_SN_BRT) += bte_regr_test.o
/*
*
*
- * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
#include <asm/sn/arch.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/pda.h>
-#ifdef CONFIG_IA64_SGI_SN2
#include <asm/sn/sn2/shubio.h>
-#endif
#include <asm/nodedata.h>
#include <linux/bootmem.h>
#include <linux/string.h>
#include <linux/sched.h>
-#include <asm/sn/bte_copy.h>
+#include <asm/sn/bte.h>
-int bte_offsets[] = { IIO_IBLS0, IIO_IBLS1 };
+#ifndef L1_CACHE_MASK
+#define L1_CACHE_MASK (L1_CACHE_BYTES - 1)
+#endif
/*
- * bte_init_node(nodepda, cnode)
+ * The base address of for each set of bte registers.
+ */
+static int bte_offsets[] = { IIO_IBLS0, IIO_IBLS1 };
+
+
+/************************************************************************
+ * Block Transfer Engine copy related functions.
*
- * Initialize the nodepda structure with BTE base addresses and
- * spinlocks.
+ ***********************************************************************/
+
+
+/*
+ * bte_copy(src, dest, len, mode, notification)
+ *
+ * Use the block transfer engine to move kernel memory from src to dest
+ * using the assigned mode.
+ *
+ * Paramaters:
+ * src - physical address of the transfer source.
+ * dest - physical address of the transfer destination.
+ * len - number of bytes to transfer from source to dest.
+ * mode - hardware defined. See reference information
+ * for IBCT0/1 in the SHUB Programmers Reference
+ * notification - kernel virtual address of the notification cache
+ * line. If NULL, the default is used and
+ * the bte_copy is synchronous.
*
- * NOTE: The kernel parameter btetest will cause the initialization
- * code to reserve blocks of physically contiguous memory to be
- * used by the bte test module.
+ * NOTE: This function requires src, dest, and len to
+ * be cacheline aligned.
*/
-void
-bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
+bte_result_t
+bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
{
- int i;
+ int bte_to_use;
+ u64 transfer_size;
+ struct bteinfo_s *bte;
+ bte_result_t bte_status;
+ unsigned long irq_flags;
- /*
- * Indicate that all the block transfer engines on this node
- * are available.
- */
- for (i = 0; i < BTES_PER_NODE; i++) {
-#ifdef CONFIG_IA64_SGI_SN2
- /* >>> Don't know why the 0x1800000L is here. Robin */
- mynodepda->bte_if[i].bte_base_addr =
- (char *)LOCAL_MMR_ADDR(bte_offsets[i] | 0x1800000L);
+ BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n",
+ src, dest, len, mode, notification));
-#elif CONFIG_IA64_SGI_SN1
- mynodepda->bte_if[i].bte_base_addr =
- (char *)LOCAL_HUB_ADDR(bte_offsets[i]);
-#else
-#error BTE Not defined for this hardware platform.
-#endif
+ if (len == 0) {
+ return BTE_SUCCESS;
+ }
+
+ ASSERT(!((len & L1_CACHE_MASK) ||
+ (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK)));
+ ASSERT(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT));
+
+ do {
+ local_irq_save(irq_flags);
+
+ bte_to_use = 0;
+ /* Attempt to lock one of the BTE interfaces. */
+ while ((bte_to_use < BTES_PER_NODE) &&
+ BTE_LOCK_IF_AVAIL(bte_to_use)) {
+ bte_to_use++;
+ }
+
+ if (bte_to_use < BTES_PER_NODE) {
+ break;
+ }
+
+ local_irq_restore(irq_flags);
+
+ if (!(mode & BTE_WACQUIRE)) {
+ return BTEFAIL_NOTAVAIL;
+ }
+
+ /* Wait until a bte is available. */
+ udelay(10);
+ } while (1);
+
+ bte = pda->cpu_bte_if[bte_to_use];
+ BTE_PRINTKV(("Got a lock on bte %d\n", bte_to_use));
- /*
- * Initialize the notification and spinlock
- * so the first transfer can occur.
- */
- mynodepda->bte_if[i].most_rcnt_na =
- &(mynodepda->bte_if[i].notify);
- mynodepda->bte_if[i].notify = 0L;
-#ifdef CONFIG_IA64_SGI_BTE_LOCKING
- spin_lock_init(&mynodepda->bte_if[i].spinlock);
-#endif /* CONFIG_IA64_SGI_BTE_LOCKING */
- mynodepda->bte_if[i].bte_test_buf =
- alloc_bootmem_node(NODE_DATA(cnode), BTE_MAX_XFER);
+ if (notification == NULL) {
+ /* User does not want to be notified. */
+ bte->most_rcnt_na = &bte->notify;
+ } else {
+ bte->most_rcnt_na = notification;
}
-}
+ /* Calculate the number of cache lines to transfer. */
+ transfer_size = ((len >> L1_CACHE_SHIFT) & BTE_LEN_MASK);
+ /* Initialize the notification to a known value. */
+ *bte->most_rcnt_na = -1L;
-/*
- * bte_reset_nasid(nasid_t)
- *
- * Does a soft reset of the BTEs on the specified nasid.
- * This is followed by a one-line transfer from each of the
- * virtual interfaces.
- */
-void
-bte_reset_nasid(nasid_t n)
-{
- ii_ibcr_u_t ibcr;
-
- ibcr.ii_ibcr_regval = REMOTE_HUB_L(n, IIO_IBCR);
- ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
- REMOTE_HUB_S(n, IIO_IBCR, ibcr.ii_ibcr_regval);
-
- /* One line transfer on virtual interface 0 */
- REMOTE_HUB_S(n, IIO_IBLS_0, IBLS_BUSY | 1);
- REMOTE_HUB_S(n, IIO_IBSA_0, TO_PHYS(__pa(&nodepda->bte_cleanup)));
- REMOTE_HUB_S(n, IIO_IBDA_0,
- TO_PHYS(__pa(&nodepda->bte_cleanup[4*L1_CACHE_BYTES])));
- REMOTE_HUB_S(n, IIO_IBNA_0,
- TO_PHYS(__pa(&nodepda->bte_cleanup[4*L1_CACHE_BYTES])));
- REMOTE_HUB_S(n, IIO_IBCT_0, BTE_NOTIFY);
- while (REMOTE_HUB_L(n, IIO_IBLS0)) {
- /* >>> Need some way out in case of hang... */
+ /* Set the status reg busy bit and transfer length */
+ BTE_PRINTKV(("IBLS - HUB_S(0x%p, 0x%lx)\n",
+ BTEREG_LNSTAT_ADDR, IBLS_BUSY | transfer_size));
+ HUB_S(BTEREG_LNSTAT_ADDR, (IBLS_BUSY | transfer_size));
+
+ /* Set the source and destination registers */
+ BTE_PRINTKV(("IBSA - HUB_S(0x%p, 0x%lx)\n", BTEREG_SRC_ADDR,
+ (TO_PHYS(src))));
+ HUB_S(BTEREG_SRC_ADDR, (TO_PHYS(src)));
+ BTE_PRINTKV(("IBDA - HUB_S(0x%p, 0x%lx)\n", BTEREG_DEST_ADDR,
+ (TO_PHYS(dest))));
+ HUB_S(BTEREG_DEST_ADDR, (TO_PHYS(dest)));
+
+ /* Set the notification register */
+ BTE_PRINTKV(("IBNA - HUB_S(0x%p, 0x%lx)\n", BTEREG_NOTIF_ADDR,
+ (TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)))));
+ HUB_S(BTEREG_NOTIF_ADDR, (TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
+
+
+ /* Initiate the transfer */
+ BTE_PRINTK(("IBCT - HUB_S(0x%p, 0x%lx)\n", BTEREG_CTRL_ADDR,
+ BTE_VALID_MODE(mode)));
+ HUB_S(BTEREG_CTRL_ADDR, BTE_VALID_MODE(mode));
+
+ spin_unlock_irqrestore(&bte->spinlock, irq_flags);
+
+
+ if (notification != NULL) {
+ return BTE_SUCCESS;
}
- /* One line transfer on virtual interface 1 */
- REMOTE_HUB_S(n, IIO_IBLS_1, IBLS_BUSY | 1);
- REMOTE_HUB_S(n, IIO_IBSA_1, TO_PHYS(__pa(nodepda->bte_cleanup)));
- REMOTE_HUB_S(n, IIO_IBDA_1,
- TO_PHYS(__pa(nodepda->bte_cleanup[4 * L1_CACHE_BYTES])));
- REMOTE_HUB_S(n, IIO_IBNA_1,
- TO_PHYS(__pa(nodepda->bte_cleanup[5 * L1_CACHE_BYTES])));
- REMOTE_HUB_S(n, IIO_IBCT_1, BTE_NOTIFY);
- while (REMOTE_HUB_L(n, IIO_IBLS1)) {
- /* >>> Need some way out in case of hang... */
+ while (*bte->most_rcnt_na == -1UL) {
}
-}
-/*
- * bte_init_cpu()
- *
- * Initialize the cpupda structure with pointers to the
- * nodepda bte blocks.
- *
- */
-void
-bte_init_cpu(void)
-{
- pda->cpu_bte_if[0] = &(nodepda->bte_if[1]);
- pda->cpu_bte_if[1] = &(nodepda->bte_if[0]);
+ BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
+ HUB_L(BTEREG_LNSTAT_ADDR), *bte->most_rcnt_na));
+
+ if (*bte->most_rcnt_na & IBLS_ERROR) {
+ bte_status = *bte->most_rcnt_na & ~IBLS_ERROR;
+ *bte->most_rcnt_na = 0L;
+ } else {
+ bte_status = BTE_SUCCESS;
+ }
+ BTE_PRINTK(("Returning status is 0x%lx and most_rcnt_na is 0x%lx\n",
+ HUB_L(BTEREG_LNSTAT_ADDR), *bte->most_rcnt_na));
+
+ return bte_status;
}
char *bteBlock;
if (len == 0) {
- return (BTE_SUCCESS);
+ return BTE_SUCCESS;
}
-#ifdef CONFIG_IA64_SGI_BTE_LOCKING
-#error bte_unaligned_copy() assumes single BTE selection in bte_copy().
-#else
/* temporary buffer used during unaligned transfers */
- bteBlock = pda->cpu_bte_if[0]->bte_test_buf;
-#endif
+ bteBlock = pda->cpu_bte_if[0]->scratch_buf;
headBcopySrcOffset = src & L1_CACHE_MASK;
destFirstCacheOffset = dest & L1_CACHE_MASK;
headBteLen += footBteLen;
} else if (footBcopyLen > 0) {
rv = bte_copy(footBteSource,
- __pa(bteBlock),
+ ia64_tpa((unsigned long)bteBlock),
footBteLen, mode, NULL);
if (rv != BTE_SUCCESS) {
- return (rv);
+ return rv;
}
memcpy(__va(footBcopyDest),
- (char *)bteBlock, footBcopyLen);
+ (char *) bteBlock, footBcopyLen);
}
} else {
footBcopyLen = 0;
(len - headBcopyLen -
footBcopyLen), mode, NULL);
if (rv != BTE_SUCCESS) {
- return (rv);
+ return rv;
}
}
if (headBcopyLen > 0) {
rv = bte_copy(headBteSource,
- __pa(bteBlock), headBteLen, mode, NULL);
+ ia64_tpa((unsigned long)bteBlock), headBteLen, mode, NULL);
if (rv != BTE_SUCCESS) {
- return (rv);
+ return rv;
}
- memcpy(__va(headBcopyDest), ((char *)bteBlock +
+ memcpy(__va(headBcopyDest), ((char *) bteBlock +
headBcopySrcOffset),
headBcopyLen);
}
- return (BTE_SUCCESS);
+ return BTE_SUCCESS;
+}
+
+
+/************************************************************************
+ * Block Transfer Engine initialization functions.
+ *
+ ***********************************************************************/
+
+
+/*
+ * bte_init_node(nodepda, cnode)
+ *
+ * Initialize the nodepda structure with BTE base addresses and
+ * spinlocks.
+ */
+void
+bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
+{
+ int i;
+
+
+ /*
+ * Indicate that all the block transfer engines on this node
+ * are available.
+ */
+
+ /*
+ * Allocate one bte_recover_t structure per node. It holds
+ * the recovery lock for node. All the bte interface structures
+ * will point at this one bte_recover structure to get the lock.
+ */
+ spin_lock_init(&mynodepda->bte_recovery_lock);
+ init_timer(&mynodepda->bte_recovery_timer);
+ mynodepda->bte_recovery_timer.function = bte_error_handler;
+ mynodepda->bte_recovery_timer.data = (unsigned long) mynodepda;
+
+ for (i = 0; i < BTES_PER_NODE; i++) {
+ /* >>> Don't know why the 0x1800000L is here. Robin */
+ mynodepda->bte_if[i].bte_base_addr =
+ (char *) LOCAL_MMR_ADDR(bte_offsets[i] | 0x1800000L);
+
+ /*
+ * Initialize the notification and spinlock
+ * so the first transfer can occur.
+ */
+ mynodepda->bte_if[i].most_rcnt_na =
+ &(mynodepda->bte_if[i].notify);
+ mynodepda->bte_if[i].notify = 0L;
+ spin_lock_init(&mynodepda->bte_if[i].spinlock);
+
+ mynodepda->bte_if[i].scratch_buf =
+ alloc_bootmem_node(NODE_DATA(cnode), BTE_MAX_XFER);
+ mynodepda->bte_if[i].bte_cnode = cnode;
+ mynodepda->bte_if[i].bte_error_count = 0;
+ mynodepda->bte_if[i].bte_num = i;
+ mynodepda->bte_if[i].cleanup_active = 0;
+ mynodepda->bte_if[i].bh_error = 0;
+ }
+
+}
+
+/*
+ * bte_init_cpu()
+ *
+ * Initialize the cpupda structure with pointers to the
+ * nodepda bte blocks.
+ *
+ */
+void
+bte_init_cpu(void)
+{
+ /* Called by setup.c as each cpu is being added to the nodepda */
+ if (local_node_data->active_cpu_count & 0x1) {
+ pda->cpu_bte_if[0] = &(nodepda->bte_if[0]);
+ pda->cpu_bte_if[1] = &(nodepda->bte_if[1]);
+ } else {
+ pda->cpu_bte_if[0] = &(nodepda->bte_if[1]);
+ pda->cpu_bte_if[1] = &(nodepda->bte_if[0]);
+ }
}
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/config.h>
+#include <asm/sn/leds.h>
+#include <asm/sn/simulator.h>
+
+void snidle(int state) {
+ if (state) {
+ if (pda->idle_flag == 0) {
+ /*
+ * Turn the activity LED off.
+ */
+ set_led_bits(0, LED_CPU_ACTIVITY);
+ }
+
+#ifdef CONFIG_IA64_SGI_SN_SIM
+ if (IS_RUNNING_ON_SIMULATOR())
+ SIMULATOR_SLEEP();
+#endif
+
+ pda->idle_flag = 1;
+ } else {
+ /*
+ * Turn the activity LED on.
+ */
+ set_led_bits(LED_CPU_ACTIVITY, LED_CPU_ACTIVITY);
+
+ pda->idle_flag = 0;
+ }
+}
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <asm/io.h>
-#include <linux/module.h>
-
-extern void * sn_io_addr(unsigned long port); /* defined in sn[12]/iomv.c */
-
-/**
- * sn_inb - read a byte from a port
- * @port: port to read from
- *
- * Reads a byte from @port and returns it to the caller.
- */
-unsigned int
-sn_inb (unsigned long port)
-{
- volatile unsigned char *addr = sn_io_addr(port);
- unsigned char ret;
-
- ret = *addr;
- __ia64_mf_a();
- return ret;
-}
-
-/**
- * sn_inw - read a word from a port
- * @port: port to read from
- *
- * Reads a word from @port and returns it to the caller.
- */
-unsigned int
-sn_inw (unsigned long port)
-{
- volatile unsigned short *addr = sn_io_addr(port);
- unsigned short ret;
-
- ret = *addr;
- __ia64_mf_a();
- return ret;
-}
-
-/**
- * sn_inl - read a word from a port
- * @port: port to read from
- *
- * Reads a word from @port and returns it to the caller.
- */
-unsigned int
-sn_inl (unsigned long port)
-{
- volatile unsigned int *addr = sn_io_addr(port);
- unsigned int ret;
-
- ret = *addr;
- __ia64_mf_a();
- return ret;
-}
-
-/**
- * sn_outb - write a byte to a port
- * @port: port to write to
- * @val: value to write
- *
- * Writes @val to @port.
- */
-void
-sn_outb (unsigned char val, unsigned long port)
-{
- volatile unsigned char *addr = sn_io_addr(port);
-
- *addr = val;
-}
-
-/**
- * sn_outw - write a word to a port
- * @port: port to write to
- * @val: value to write
- *
- * Writes @val to @port.
- */
-void
-sn_outw (unsigned short val, unsigned long port)
-{
- volatile unsigned short *addr = sn_io_addr(port);
-
- *addr = val;
-}
-
-/**
- * sn_outl - write a word to a port
- * @port: port to write to
- * @val: value to write
- *
- * Writes @val to @port.
- */
-void
-sn_outl (unsigned int val, unsigned long port)
-{
- volatile unsigned int *addr = sn_io_addr(port);
-
- *addr = val;
-}
-
-EXPORT_SYMBOL(sn_inb);
-EXPORT_SYMBOL(sn_inw);
-EXPORT_SYMBOL(sn_inl);
-EXPORT_SYMBOL(sn_outb);
-EXPORT_SYMBOL(sn_outw);
-EXPORT_SYMBOL(sn_outl);
/*
- * Platform dependent support for SGI SN1
+ * Platform dependent support for SGI SN
*
- * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <asm/current.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/slab.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/sn/sgi.h>
#include <asm/sn/pci/bridge.h>
#include <asm/sn/pci/pciio.h>
#include <asm/sn/pci/pciio_private.h>
-#ifdef ajmtestintr
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/pci/pcibr_private.h>
-#endif /* ajmtestintr */
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/io.h>
#include <asm/sn/intr.h>
#include <asm/sn/addrs.h>
#include <asm/sn/driver.h>
#include <asm/sn/arch.h>
-#include <asm/sn/nodepda.h>
+#include <asm/sn/pda.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
int irq_to_bit_pos(int irq);
+static void force_interrupt(int irq);
+extern void pcibr_force_interrupt(pcibr_intr_t intr);
+extern int sn_force_interrupt_flag;
+
+
static unsigned int
sn_startup_irq(unsigned int irq)
static void
sn_ack_irq(unsigned int irq)
{
-#ifdef CONFIG_IA64_SGI_SN1
- int bit = -1;
- unsigned long long intpend_val;
- int subnode;
-#endif
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long event_occurred, mask = 0;
-#endif
int nasid;
irq = irq & 0xff;
nasid = smp_physical_node_id();
-#ifdef CONFIG_IA64_SGI_SN1
- subnode = cpuid_to_subnode(smp_processor_id());
- if (irq == SGI_UART_IRQ) {
- intpend_val = REMOTE_HUB_PI_L(nasid, subnode, PI_INT_PEND0);
- if (intpend_val & (1L<<GFX_INTR_A) ) {
- bit = GFX_INTR_A;
- REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit);
- }
- if ( intpend_val & (1L<<GFX_INTR_B) ) {
- bit = GFX_INTR_B;
- REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit);
- }
- if (intpend_val & (1L<<PG_MIG_INTR) ) {
- bit = PG_MIG_INTR;
- REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit);
- }
- if (intpend_val & (1L<<CC_PEND_A)) {
- bit = CC_PEND_A;
- REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit);
- }
- if (intpend_val & (1L<<CC_PEND_B)) {
- bit = CC_PEND_B;
- REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit);
- }
- return;
- }
- bit = irq_to_bit_pos(irq);
- REMOTE_HUB_PI_CLR_INTR(nasid, subnode, bit);
-#endif
-
-#ifdef CONFIG_IA64_SGI_SN2
event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
}
HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), mask );
-#endif
+ __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
}
static void
sn_end_irq(unsigned int irq)
{
-#ifdef CONFIG_IA64_SGI_SN1
- unsigned long long intpend_val, mask = 0x70L;
- int subnode;
-#endif
int nasid;
-#ifdef CONFIG_IA64_SGI_SN2
+ int ivec;
unsigned long event_occurred;
-#endif
- irq = irq & 0xff;
-#ifdef CONFIG_IA64_SGI_SN1
- if (irq == SGI_UART_IRQ) {
- nasid = smp_physical_node_id();
- subnode = cpuid_to_subnode(smp_processor_id());
- intpend_val = REMOTE_HUB_PI_L(nasid, subnode, PI_INT_PEND0);
- if (intpend_val & mask) {
- platform_send_ipi(smp_processor_id(), SGI_UART_IRQ, IA64_IPI_DM_INT, 0);
- }
- }
-#endif
-#ifdef CONFIG_IA64_SGI_SN2
- if (irq == SGI_UART_VECTOR) {
+ ivec = irq & 0xff;
+ if (ivec == SGI_UART_VECTOR) {
nasid = smp_physical_node_id();
event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
// If the UART bit is set here, we may have received an interrupt from the
platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, IA64_IPI_DM_INT, 0);
}
}
-#endif
-
+ __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
+ if (sn_force_interrupt_flag)
+ force_interrupt(irq);
}
static void
}
-struct hw_interrupt_type irq_type_iosapic_level = {
+struct hw_interrupt_type irq_type_sn = {
"SN hub",
sn_startup_irq,
sn_shutdown_irq,
};
-#define irq_type_sn irq_type_iosapic_level
-struct irq_desc *_sn_irq_desc[NR_CPUS];
-
struct irq_desc *
sn_irq_desc(unsigned int irq) {
- int cpu = irq >> 8;
- irq = irq & 0xff;
+ irq = SN_IVEC_FROM_IRQ(irq);
- return(_sn_irq_desc[cpu] + irq);
+ return(_irq_desc + irq);
}
u8
sn_irq_to_vector(u8 irq) {
- return(irq & 0xff);
-}
-
-int gsi_to_vector(u32 irq) {
- return irq & 0xff;
-}
-
-int gsi_to_irq(u32 irq) {
- return irq & 0xff;
+ return(irq);
}
unsigned int
return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
}
-void *kmalloc(size_t, int);
-
void
sn_irq_init (void)
{
int i;
irq_desc_t *base_desc = _irq_desc;
- for (i=IA64_FIRST_DEVICE_VECTOR; i<NR_IVECS; i++) {
+ for (i=IA64_FIRST_DEVICE_VECTOR; i<NR_IRQS; i++) {
if (base_desc[i].handler == &no_irq_type) {
base_desc[i].handler = &irq_type_sn;
}
}
}
-void
-sn_init_irq_desc(void) {
- int i;
- irq_desc_t *base_desc = _irq_desc, *p;
-
- for (i=0; i < NR_CPUS; i++) {
- p = page_address(alloc_pages_node(local_nodeid, GFP_KERNEL,
- get_order(sizeof(struct irq_desc) * NR_IVECS) ) );
- ASSERT(p);
- memcpy(p, base_desc, sizeof(struct irq_desc) * NR_IVECS);
- _sn_irq_desc[i] = p;
- }
-}
-
-
int
bit_pos_to_irq(int bit) {
#define BIT_TO_IRQ 64
if (bit > 118) bit = 118;
-#ifdef CONFIG_IA64_SGI_SN1
- if (bit >= GFX_INTR_A && bit <= CC_PEND_B) {
- return SGI_UART_IRQ;
- }
-#endif
-
return bit + BIT_TO_IRQ;
}
return bit;
}
-#ifdef ajmtestintr
-
-#include <linux/timer.h>
-struct timer_list intr_test_timer = TIMER_INITIALIZER(NULL, 0, 0);
-int intr_test_icount[NR_IRQS];
-struct intr_test_reg_struct {
- pcibr_soft_t pcibr_soft;
- int slot;
+struct pcibr_intr_list_t {
+ struct pcibr_intr_list_t *next;
+ pcibr_intr_t intr;
};
-struct intr_test_reg_struct intr_test_registered[NR_IRQS];
+
+static struct pcibr_intr_list_t **pcibr_intr_list;
void
-intr_test_handle_timer(unsigned long data) {
+register_pcibr_intr(int irq, pcibr_intr_t intr) {
+ struct pcibr_intr_list_t *p = kmalloc(sizeof(struct pcibr_intr_list_t), GFP_KERNEL);
+ struct pcibr_intr_list_t *list;
+ int cpu = SN_CPU_FROM_IRQ(irq);
+
+ if (pcibr_intr_list == NULL) {
+ pcibr_intr_list = kmalloc(sizeof(struct pcibr_intr_list_t *) * NR_IRQS, GFP_KERNEL);
+ if (pcibr_intr_list == NULL) panic("Could not allocate memory for pcibr_intr_list\n");
+ memset( (void *)pcibr_intr_list, 0, sizeof(struct pcibr_intr_list_t *) * NR_IRQS);
+ }
+ if (pdacpu(cpu)->sn_last_irq < irq) {
+ pdacpu(cpu)->sn_last_irq = irq;
+ }
+ if (pdacpu(cpu)->sn_first_irq > irq) pdacpu(cpu)->sn_first_irq = irq;
+ if (!p) panic("Could not allocate memory for pcibr_intr_list_t\n");
+ if ((list = pcibr_intr_list[irq])) {
+ while (list->next) list = list->next;
+ list->next = p;
+ p->next = NULL;
+ p->intr = intr;
+ } else {
+ pcibr_intr_list[irq] = p;
+ p->next = NULL;
+ p->intr = intr;
+ }
+}
+
+void
+force_polled_int(void) {
int i;
- bridge_t *bridge;
-
- for (i=0;i<NR_IRQS;i++) {
- if (intr_test_registered[i].pcibr_soft) {
- pcibr_soft_t pcibr_soft = intr_test_registered[i].pcibr_soft;
- xtalk_intr_t intr = pcibr_soft->bs_intr[intr_test_registered[i].slot].bsi_xtalk_intr;
- /* send interrupt */
- bridge = pcibr_soft->bs_base;
- bridge->b_force_always[intr_test_registered[i].slot].intr = 1;
+ struct pcibr_intr_list_t *p;
+
+ for (i=0; i<NR_IRQS;i++) {
+ p = pcibr_intr_list[i];
+ while (p) {
+ if (p->intr){
+ pcibr_force_interrupt(p->intr);
+ }
+ p = p->next;
}
}
- del_timer(&intr_test_timer);
- intr_test_timer.expires = jiffies + HZ/100;
- add_timer(&intr_test_timer);
}
-void
-intr_test_set_timer(void) {
- intr_test_timer.expires = jiffies + HZ/100;
- intr_test_timer.function = intr_test_handle_timer;
- add_timer(&intr_test_timer);
+static void
+force_interrupt(int irq) {
+ struct pcibr_intr_list_t *p = pcibr_intr_list[irq];
+
+ while (p) {
+ if (p->intr) {
+ pcibr_force_interrupt(p->intr);
+ }
+ p = p->next;
+ }
+}
+
+/*
+Check for lost interrupts. If the PIC int_status reg. says that
+an interrupt has been sent, but not handled, and the interrupt
+is not pending in either the cpu irr regs or in the soft irr regs,
+and the interrupt is not in service, then the interrupt may have
+been lost. Force an interrupt on that pin. It is possible that
+the interrupt is in flight, so we may generate a spurious interrupt,
+but we should never miss a real lost interrupt.
+*/
+
+static void
+sn_check_intr(int irq, pcibr_intr_t intr) {
+ unsigned long regval;
+ int irr_reg_num;
+ int irr_bit;
+ unsigned long irr_reg;
+
+
+ regval = intr->bi_soft->bs_base->p_int_status_64;
+ irr_reg_num = irq_to_vector(irq) / 64;
+ irr_bit = irq_to_vector(irq) % 64;
+ switch (irr_reg_num) {
+ case 0:
+ irr_reg = ia64_get_irr0();
+ break;
+ case 1:
+ irr_reg = ia64_get_irr1();
+ break;
+ case 2:
+ irr_reg = ia64_get_irr2();
+ break;
+ case 3:
+ irr_reg = ia64_get_irr3();
+ break;
+ }
+ if (!test_bit(irr_bit, &irr_reg) ) {
+ if (!test_bit(irq, pda->sn_soft_irr) ) {
+ if (!test_bit(irq, pda->sn_in_service_ivecs) ) {
+ regval &= 0xff;
+ if (intr->bi_ibits & regval & intr->bi_last_intr) {
+ regval &= ~(intr->bi_ibits & regval);
+ pcibr_force_interrupt(intr);
+ }
+ }
+ }
+ }
+ intr->bi_last_intr = regval;
}
void
-intr_test_register_irq(int irq, pcibr_soft_t pcibr_soft, int slot) {
- irq = irq & 0xff;
- intr_test_registered[irq].pcibr_soft = pcibr_soft;
- intr_test_registered[irq].slot = slot;
+sn_lb_int_war_check(void) {
+ int i;
+
+ if (pda->sn_first_irq == 0) return;
+ for (i=pda->sn_first_irq;
+ i <= pda->sn_last_irq; i++) {
+ struct pcibr_intr_list_t *p = pcibr_intr_list[i];
+ if (p == NULL) {
+ continue;
+ }
+ while (p) {
+ sn_check_intr(i, p->intr);
+ p = p->next;
+ }
+ }
+}
+
+static inline int
+sn_get_next_bit(void) {
+ int i;
+ int bit;
+
+ for (i = 3; i >= 0; i--) {
+ if (pda->sn_soft_irr[i] != 0) {
+ bit = (i * 64) + __ffs(pda->sn_soft_irr[i]);
+ __change_bit(bit, (volatile void *)pda->sn_soft_irr);
+ return(bit);
+ }
+ }
+ return IA64_SPURIOUS_INT_VECTOR;
}
void
-intr_test_handle_intr(int irq, void *junk, struct pt_regs *morejunk) {
- intr_test_icount[irq]++;
- printk("RECEIVED %d INTERRUPTS ON IRQ %d\n",intr_test_icount[irq], irq);
+sn_set_tpr(int vector) {
+ if (vector > IA64_LAST_DEVICE_VECTOR || vector < IA64_FIRST_DEVICE_VECTOR) {
+ ia64_set_tpr(vector);
+ } else {
+ ia64_set_tpr(IA64_LAST_DEVICE_VECTOR);
+ }
+}
+
+static inline void
+sn_get_all_ivr(void) {
+ int vector;
+
+ vector = ia64_get_ivr();
+ while (vector != IA64_SPURIOUS_INT_VECTOR) {
+ __set_bit(vector, (volatile void *)pda->sn_soft_irr);
+ ia64_eoi();
+ if (vector > IA64_LAST_DEVICE_VECTOR) return;
+ vector = ia64_get_ivr();
+ }
+}
+
+int
+sn_get_ivr(void) {
+ int vector;
+
+ vector = sn_get_next_bit();
+ if (vector == IA64_SPURIOUS_INT_VECTOR) {
+ sn_get_all_ivr();
+ vector = sn_get_next_bit();
+ }
+ return vector;
}
-#endif /* ajmtestintr */
+++ /dev/null
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/smp.h>
-#include <linux/kernel_stat.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/efi.h>
-#include <asm/page.h>
-#include <linux/threads.h>
-#include <asm/sn/simulator.h>
-#include <asm/sn/leds.h>
-
-#include "llsc4.h"
-
-
-#ifdef STANDALONE
-#include "lock.h"
-#endif
-
-#ifdef INTTEST
-static int inttest=0;
-#endif
-
-#ifdef IA64_SEMFIX_INSN
-#undef IA64_SEMFIX_INSN
-#endif
-#ifdef IA64_SEMFIX
-#undef IA64_SEMFIX
-#endif
-# define IA64_SEMFIX_INSN
-# define IA64_SEMFIX ""
-
-#define NOLOCK 0xdead
-#define BGUARD(linei) (0xbbbb0000 | (linei));
-#define EGUARD(linei) (0xeeee0000 | (linei));
-#define GUARDLINE(v) ((v)&0xffff)
-
-/*
- * Test parameter table for AUTOTEST
- */
-typedef struct {
- int passes;
- int linecount;
- int linepad;
-} autotest_table_t;
-
-autotest_table_t autotest_table[] = {
- {50000000, 2, 0x2b4 },
- {50000000, 16, 0, },
- {50000000, 16, 4, },
- {50000000, 128, 0x44 },
- {50000000, 128, 0x84 },
- {50000000, 128, 0x200 },
- {50000000, 128, 0x204 },
- {50000000, 128, 0x2b4 },
- {50000000, 2, 8*MB+0x2b4 },
- {50000000, 16, 8*MB+0 },
- {50000000, 16, 8*MB+4 },
- {50000000, 128, 8*MB+0x44 },
- {50000000, 128, 8*MB+0x84 },
- {50000000, 128, 8*MB+0x200 },
- {50000000, 128, 8*MB+0x204 },
- {50000000, 128, 8*MB+0x2b4 },
- {0}};
-
-/*
- * Array of virtual addresses available for test purposes.
- */
-
-typedef struct {
- long vstart;
- long vend;
- long nextaddr;
- long nextinit;
- int wrapcount;
-} memmap_t;
-
-#define MAPCHUNKS 128
-memmap_t memmap[MAPCHUNKS];
-int memmapx=0;
-
-typedef struct {
- void *addr;
- long data[16];
- long data_fc[16];
-} capture_line_t;
-
-typedef struct {
- int size;
- void *blockaddr;
- void *shadaddr;
- long blockdata[48];
- long shaddata[48];
- long blockdata_fc[48];
- long shaddata_fc[48];
- long synerr;
-} capture_t;
-
-/*
- * PORTING NOTE: revisit this statement. On hardware we put mbase at 0 and
- * the rest of the tables have to start at 1MB to skip PROM tables.
- */
-#define THREADPRIVATESZ() ((sizeof(threadprivate_t)+511)/512*512)
-#define THREADPRIVATE(t) ((threadprivate_t*)(((long)mbase)+4096+t*THREADPRIVATESZ()))
-
-#define k_capture mbase->sk_capture
-#define k_go mbase->sk_go
-#define k_linecount mbase->sk_linecount
-#define k_passes mbase->sk_passes
-#define k_napticks mbase->sk_napticks
-#define k_stop_on_error mbase->sk_stop_on_error
-#define k_verbose mbase->sk_verbose
-#define k_threadprivate mbase->sk_threadprivate
-#define k_blocks mbase->sk_blocks
-#define k_iter_msg mbase->sk_iter_msg
-#define k_vv mbase->sk_vv
-#define k_linepad mbase->sk_linepad
-#define k_options mbase->sk_options
-#define k_testnumber mbase->sk_testnumber
-#define k_currentpass mbase->sk_currentpass
-
-static long blocks[MAX_LINECOUNT]; /* addresses of data blocks */
-static control_t *mbase;
-static vint initialized=0;
-
-static unsigned int ran_conf_llsc(int);
-static int rerr(capture_t *, char *, void *, void *, int, int, int, int, int, int);
-static void dumpline(void *, char *, char *, void *, void *, int);
-static int checkstop(int, int, uint);
-static void spin(int);
-static void capturedata(capture_t *, uint, void *, void *, int);
-static int randn(uint max, uint *seed);
-static uint zrandom (uint *zranseed);
-static int set_lock(uint *, uint);
-static int clr_lock(uint *, uint);
-static void Speedo(void);
-
-int autotest_enabled=0;
-static int llsctest_number=-1;
-static int errstop_enabled=0;
-static int fail_enabled=0;
-static int l4_opt=0;
-static int selective_trigger=0;
-static int dump_block_addrs_opt=0;
-static lock_t errlock=NOLOCK;
-static private_t init_private[LLSC_MAXCPUS];
-
-static int __init autotest_enable(char *str)
-{
- autotest_enabled = 1;
- return 1;
-}
-static int __init set_llscblkadr(char *str)
-{
- dump_block_addrs_opt = 1;
- return 1;
-}
-static int __init set_llscselt(char *str)
-{
- selective_trigger = 1;
- return 1;
-}
-static int __init set_llsctest(char *str)
-{
- llsctest_number = simple_strtol(str, &str, 10);
- if (llsctest_number < 0 || llsctest_number > 15)
- llsctest_number = -1;
- return 1;
-}
-static int __init set_llscerrstop(char *str)
-{
- errstop_enabled = 1;
- return 1;
-}
-static int __init set_llscfail(char *str)
-{
- fail_enabled = 8;
- return 1;
-}
-static int __init set_llscl4(char *str)
-{
- l4_opt = 1;
- return 1;
-}
-
-static void print_params(void)
-{
- printk ("********* Enter AUTOTEST facility on master cpu *************\n");
- printk (" Test options:\n");
- printk (" llsctest=<n>\t%d\tTest number to run (all = -1)\n", llsctest_number);
- printk (" llscerrstop \t%s\tStop on error\n", errstop_enabled ? "on" : "off");
- printk (" llscfail \t%s\tForce a failure to test the trigger & error messages\n", fail_enabled ? "on" : "off");
- printk (" llscselt \t%s\tSelective triger on failures\n", selective_trigger ? "on" : "off");
- printk (" llscblkadr \t%s\tDump data block addresses\n", dump_block_addrs_opt ? "on" : "off");
- printk (" llscl4 \t%s\tRun only tests that evict from L4\n", l4_opt ? "on" : "off");
- printk (" SEMFIX: %s\n", IA64_SEMFIX);
- printk ("\n");
-}
-__setup("autotest", autotest_enable);
-__setup("llsctest=", set_llsctest);
-__setup("llscerrstop", set_llscerrstop);
-__setup("llscfail", set_llscfail);
-__setup("llscselt", set_llscselt);
-__setup("llscblkadr", set_llscblkadr);
-__setup("llscl4", set_llscl4);
-
-
-
-static inline int
-set_lock(uint *lock, uint id)
-{
- uint old;
- old = cmpxchg_acq(lock, NOLOCK, id);
- return (old == NOLOCK);
-}
-
-static inline int
-clr_lock(uint *lock, uint id)
-{
- uint old;
- old = cmpxchg_rel(lock, id, NOLOCK);
- return (old == id);
-}
-
-static inline void
-init_lock(uint *lock)
-{
- *lock = NOLOCK;
-}
-
-/*------------------------------------------------------------------------+
-| Routine : ran_conf_llsc - ll/sc shared data test |
-| Description: This test checks the coherency of shared data |
-+------------------------------------------------------------------------*/
-static unsigned int
-ran_conf_llsc(int thread)
-{
- private_t pval;
- share_t sval, sval2;
- uint vv, linei, slinei, sharei, pass;
- long t;
- lock_t lockpat;
- share_t *sharecopy;
- long verbose, napticks, passes, linecount, lcount;
- dataline_t *linep, *slinep;
- int s, seed;
- threadprivate_t *tp;
- uint iter_msg, iter_msg_i=0;
- int vv_mask;
- int correct_errors;
- int errs=0;
- int stillbad;
- capture_t capdata;
- private_t *privp;
- share_t *sharep;
-
-
- linecount = k_linecount;
- napticks = k_napticks;
- verbose = k_verbose;
- passes = k_passes;
- iter_msg = k_iter_msg;
- seed = (thread + 1) * 647;
- tp = THREADPRIVATE(thread);
- vv_mask = (k_vv>>((thread%16)*4)) & 0xf;
- correct_errors = k_options&0xff;
-
- memset (&capdata, 0, sizeof(capdata));
- for (linei=0; linei<linecount; linei++)
- tp->private[linei] = thread;
-
- for (pass = 1; passes == 0 || pass < passes; pass++) {
- lockpat = (pass & 0x0fffffff) + (thread <<28);
- if (lockpat == NOLOCK)
- continue;
- tp->threadpasses = pass;
- if (checkstop(thread, pass, lockpat))
- return 0;
- iter_msg_i++;
- if (iter_msg && iter_msg_i > iter_msg) {
- printk("Thread %d, Pass %d\n", thread, pass);
- iter_msg_i = 0;
- }
- lcount = 0;
-
- /*
- * Select line to perform operations on.
- */
- linei = randn(linecount, &seed);
- sharei = randn(2, &seed);
- slinei = (linei + (linecount/2))%linecount; /* I don't like this - fix later */
-
- linep = (dataline_t *)blocks[linei];
- slinep = (dataline_t *)blocks[slinei];
- if (sharei == 0)
- sharecopy = &slinep->share0;
- else
- sharecopy = &slinep->share1;
-
-
- vv = randn(4, &seed);
- if ((vv_mask & (1<<vv)) == 0)
- continue;
-
- if (napticks) {
- t = randn(napticks, &seed);
- udelay(t);
- }
- privp = &linep->private[thread];
- sharep = &linep->share[sharei];
-
- switch(vv) {
- case 0:
- /* Read and verify private count on line. */
- pval = *privp;
- if (verbose)
- printk("Line:%3d, Thread:%d:%d. Val: %x\n", linei, thread, vv, tp->private[linei]);
- if (pval != tp->private[linei]) {
- capturedata(&capdata, pass, privp, NULL, sizeof(*privp));
- stillbad = (*privp != tp->private[linei]);
- if (rerr(&capdata, "Private count", linep, slinep, thread, pass, linei, tp->private[linei], pval, stillbad)) {
- return 1;
- }
- if (correct_errors) {
- tp->private[linei] = *privp;
- }
- errs++;
- }
- break;
-
- case 1:
- /* Read, verify, and increment private count on line. */
- pval = *privp;
- if (verbose)
- printk("Line:%3d, Thread:%d:%d. Val: %x\n", linei, thread, vv, tp->private[linei]);
- if (pval != tp->private[linei]) {
- capturedata(&capdata, pass, privp, NULL, sizeof(*privp));
- stillbad = (*privp != tp->private[linei]);
- if (rerr(&capdata, "Private count & inc", linep, slinep, thread, pass, linei, tp->private[linei], pval, stillbad)) {
- return 1;
- }
- errs++;
- }
- pval = (pval==255) ? 0 : pval+1;
- *privp = pval;
- tp->private[linei] = pval;
- break;
-
- case 2:
- /* Lock line, read and verify shared data. */
- if (verbose)
- printk("Line:%3d, Thread:%d:%d. Val: %x\n", linei, thread, vv, *sharecopy);
- lcount = 0;
- while (LOCK(sharei) != 1) {
- if (checkstop(thread, pass, lockpat))
- return 0;
- if (lcount++>1000000) {
- capturedata(&capdata, pass, LOCKADDR(sharei), NULL, sizeof(lock_t));
- stillbad = (GETLOCK(sharei) != 0);
- rerr(&capdata, "Shared data lock", linep, slinep, thread, pass, linei, 0, GETLOCK(sharei), stillbad);
- return 1;
- }
- if ((lcount&0x3fff) == 0)
- udelay(1000);
- }
-
- sval = *sharep;
- sval2 = *sharecopy;
- if (pass > 12 && thread == 0 && fail_enabled == 1)
- sval++;
- if (sval != sval2) {
- capturedata(&capdata, pass, sharep, sharecopy, sizeof(*sharecopy));
- stillbad = (*sharep != *sharecopy);
- if (!stillbad && *sharep != sval && *sharecopy == sval2)
- stillbad = 2;
- if (rerr(&capdata, "Shared data", linep, slinep, thread, pass, linei, sval2, sval, stillbad)) {
- return 1;
- }
- if (correct_errors)
- *sharep = *sharecopy;
- errs++;
- }
-
-
- if ( (s=UNLOCK(sharei)) != 1) {
- capturedata(&capdata, pass, LOCKADDR(sharei), NULL, 4);
- stillbad = (GETLOCK(sharei) != lockpat);
- if (rerr(&capdata, "Shared data unlock", linep, slinep, thread, pass, linei, lockpat, GETLOCK(sharei), stillbad))
- return 1;
- if (correct_errors)
- ZEROLOCK(sharei);
- errs++;
- }
- break;
-
- case 3:
- /* Lock line, read and verify shared data, modify shared data. */
- if (verbose)
- printk("Line:%3d, Thread:%d:%d. Val: %x\n", linei, thread, vv, *sharecopy);
- lcount = 0;
- while (LOCK(sharei) != 1) {
- if (checkstop(thread, pass, lockpat))
- return 0;
- if (lcount++>1000000) {
- capturedata(&capdata, pass, LOCKADDR(sharei), NULL, sizeof(lock_t));
- stillbad = (GETLOCK(sharei) != 0);
- rerr(&capdata, "Shared data lock & inc", linep, slinep, thread, pass, linei, 0, GETLOCK(sharei), stillbad);
- return 1;
- }
- if ((lcount&0x3fff) == 0)
- udelay(1000);
- }
- sval = *sharep;
- sval2 = *sharecopy;
- if (sval != sval2) {
- capturedata(&capdata, pass, sharep, sharecopy, sizeof(*sharecopy));
- stillbad = (*sharep != *sharecopy);
- if (!stillbad && *sharep != sval && *sharecopy == sval2)
- stillbad = 2;
- if (rerr(&capdata, "Shared data & inc", linep, slinep, thread, pass, linei, sval2, sval, stillbad)) {
- return 1;
- }
- errs++;
- }
-
- *sharep = lockpat;
- *sharecopy = lockpat;
-
-
- if ( (s=UNLOCK(sharei)) != 1) {
- capturedata(&capdata, pass, LOCKADDR(sharei), NULL, 4);
- stillbad = (GETLOCK(sharei) != lockpat);
- if (rerr(&capdata, "Shared data & inc unlock", linep, slinep, thread, pass, linei, thread, GETLOCK(sharei), stillbad))
- return 1;
- if (correct_errors)
- ZEROLOCK(sharei);
- errs++;
- }
- break;
- }
- }
-
- return (errs > 0);
-}
-
-static void
-trigger_la(long val)
-{
- long *p;
-
- p = (long*)0xc0000a0001000020L; /* PI_CPU_NUM */
- *p = val;
-}
-
-static long
-getsynerr(void)
-{
- long err, *errp;
-
- errp = (long*)0xc0000e0000000340L; /* SYN_ERR */
- err = *errp;
- if (err)
- *errp = -1L;
- return (err & ~0x60);
-}
-
-static int
-rerr(capture_t *cap, char *msg, void *lp, void *slp, int thread, int pass, int badlinei, int exp, int found, int stillbad)
-{
- int cpu, i, linei;
- long synerr;
- int selt;
-
-
- selt = selective_trigger && stillbad > 1 &&
- memcmp(cap->blockdata, cap->blockdata_fc, 128) != 0 &&
- memcmp(cap->shaddata, cap->shaddata_fc, 128) == 0;
- if (selt) {
- trigger_la(pass);
- } else if (selective_trigger) {
- k_go = ST_STOP;
- return k_stop_on_error;;
- }
-
- spin(1);
- i = 100;
- while (i && set_lock(&errlock, 1) != 1) {
- spin(1);
- i--;
- }
- printk ("\nDataError!: %-20s, test %ld, thread %d, line:%d, pass %d (0x%x), time %ld expected:%x, found:%x\n",
- msg, k_testnumber, thread, badlinei, pass, pass, jiffies, exp, found);
-
- dumpline (lp, "Corrupted data", "D ", cap->blockaddr, cap->blockdata, cap->size);
-#ifdef ZZZ
- if (memcmp(cap->blockdata, cap->blockdata_fc, 128))
- dumpline (lp, "Corrupted data", "DF", cap->blockaddr, cap->blockdata_fc, cap->size);
-#endif
-
- if (cap->shadaddr) {
- dumpline (slp, "Shadow data", "S ", cap->shadaddr, cap->shaddata, cap->size);
-#ifdef ZZZ
- if (memcmp(cap->shaddata, cap->shaddata_fc, 128))
- dumpline (slp, "Shadow data", "SF", cap->shadaddr, cap->shaddata_fc, cap->size);
-#endif
- }
-
- printk("Threadpasses: ");
- for (cpu=0,i=0; cpu<LLSC_MAXCPUS; cpu++)
- if (k_threadprivate[cpu]->threadpasses) {
- if (i && (i%8) == 0)
- printk("\n : ");
- printk(" %d:0x%x", cpu, k_threadprivate[cpu]->threadpasses);
- i++;
- }
- printk("\n");
-
- for (linei=0; linei<k_linecount; linei++) {
- int slinei, g1linei, g2linei, g1err, g2err, sh0err, sh1err;
- dataline_t *linep, *slinep;
-
- slinei = (linei + (k_linecount/2))%k_linecount;
- linep = (dataline_t *)blocks[linei];
- slinep = (dataline_t *)blocks[slinei];
-
- g1linei = GUARDLINE(linep->guard1);
- g2linei = GUARDLINE(linep->guard2);
- g1err = (g1linei != linei);
- g2err = (g2linei != linei);
- sh0err = (linep->share[0] != slinep->share0);
- sh1err = (linep->share[1] != slinep->share1);
-
- if (g1err || g2err || sh0err || sh1err) {
- printk("Line 0x%lx (%03d), %sG1 0x%lx (%03d), %sG2 0x%lx (%03d), %sSH0 %08x (%08x), %sSH1 %08x (%08x)\n",
- blocks[linei], linei,
- g1err ? "*" : " ", blocks[g1linei], g1linei,
- g2err ? "*" : " ", blocks[g2linei], g2linei,
- sh0err ? "*" : " ", linep->share[0], slinep->share0,
- sh1err ? "*" : " ", linep->share[1], slinep->share1);
-
-
- }
- }
-
- printk("\nData was %sfixed by flushcache\n", (stillbad == 1 ? "**** NOT **** " : " "));
- synerr = getsynerr();
- if (synerr)
- printk("SYNERR: Thread %d, Synerr: 0x%lx\n", thread, synerr);
- spin(2);
- printk("\n\n");
- clr_lock(&errlock, 1);
-
- if (errstop_enabled) {
- local_irq_disable();
- while(1);
- }
- return k_stop_on_error;
-}
-
-
-static void
-dumpline(void *lp, char *str1, char *str2, void *addr, void *data, int size)
-{
- long *p;
- int i, off;
-
- printk("%s at 0x%lx, size %d, block starts at 0x%lx\n", str1, (long)addr, size, (long)lp);
- p = (long*) data;
- for (i=0; i<48; i++, p++) {
- if (i%8 == 0) printk("%2s", i==16 ? str2 : " ");
- printk(" %016lx", *p);
- if ((i&7)==7) printk("\n");
- }
- printk(" ");
- off = (((long)addr) ^ size) & 63L;
- for (i=0; i<off+size; i++) {
- printk("%s", (i>=off) ? "--" : " ");
- if ((i%8) == 7)
- printk(" ");
- }
-
- off = ((long)addr) & 127;
- printk(" (line %d)\n", 2+off/64+1);
-}
-
-
-static int
-randn(uint max, uint *seedp)
-{
- if (max == 1)
- return(0);
- else
- return((int)(zrandom(seedp)>>10) % max);
-}
-
-
-static int
-checkstop(int thread, int pass, uint lockpat)
-{
- long synerr;
-
- if (k_go == ST_RUN)
- return 0;
- if (k_go == ST_STOP)
- return 1;
-
- if (errstop_enabled) {
- local_irq_disable();
- while(1);
- }
- synerr = getsynerr();
- spin(2);
- if (k_go == ST_STOP)
- return 1;
- if (synerr)
- printk("SYNERR: Thread %d, Synerr: 0x%lx\n", thread, synerr);
- return 1;
-}
-
-
-static void
-spin(int j)
-{
- udelay(j * 500000);
-}
-
-static void
-capturedata(capture_t *cap, uint pass, void *blockaddr, void *shadaddr, int size)
-{
-
- if (!selective_trigger)
- trigger_la (pass);
-
- memcpy (cap->blockdata, CACHEALIGN(blockaddr)-128, 3*128);
- if (shadaddr)
- memcpy (cap->shaddata, CACHEALIGN(shadaddr)-128, 3*128);
-
- if (k_stop_on_error) {
- k_go = ST_ERRSTOP;
- }
-
- cap->size = size;
- cap->blockaddr = blockaddr;
- cap->shadaddr = shadaddr;
-
- asm volatile ("fc %0" :: "r"(blockaddr) : "memory");
- ia64_sync_i();
- ia64_srlz_d();
- memcpy (cap->blockdata_fc, CACHEALIGN(blockaddr)-128, 3*128);
-
- if (shadaddr) {
- asm volatile ("fc %0" :: "r"(shadaddr) : "memory");
- ia64_sync_i();
- ia64_srlz_d();
- memcpy (cap->shaddata_fc, CACHEALIGN(shadaddr)-128, 3*128);
- }
-}
-
-int zranmult = 0x48c27395;
-
-static uint
-zrandom (uint *seedp)
-{
- *seedp = (*seedp * zranmult) & 0x7fffffff;
- return (*seedp);
-}
-
-
-void
-set_autotest_params(void)
-{
- static int testnumber=-1;
-
- if (llsctest_number >= 0) {
- testnumber = llsctest_number;
- } else {
- testnumber++;
- if (autotest_table[testnumber].passes == 0) {
- testnumber = 0;
- dump_block_addrs_opt = 0;
- }
- }
- if (testnumber == 0 && l4_opt) testnumber = 9;
-
- k_passes = autotest_table[testnumber].passes;
- k_linepad = autotest_table[testnumber].linepad;
- k_linecount = autotest_table[testnumber].linecount;
- k_testnumber = testnumber;
-
- if (IS_RUNNING_ON_SIMULATOR()) {
- printk ("llsc start test %ld\n", k_testnumber);
- k_passes = 1000;
- }
-}
-
-
-static void
-set_leds(int errs)
-{
- unsigned char leds=0;
-
- /*
- * Leds are:
- * ppppeee-
- * where
- * pppp = test number
- * eee = error count but top bit is stick
- */
-
- leds = ((errs&7)<<1) | ((k_testnumber&15)<<4) | (errs ? 0x08 : 0);
- set_led_bits(leds, LED_MASK_AUTOTEST);
-}
-
-static void
-setup_block_addresses(void)
-{
- int i, stride, memmapi;
- dataline_t *dp;
- long *ip, *ipe;
-
-
- stride = k_linepad + sizeof(dataline_t);
- memmapi = 0;
- for (i=0; i<memmapx; i++) {
- memmap[i].nextaddr = memmap[i].vstart;
- memmap[i].nextinit = memmap[i].vstart;
- memmap[i].wrapcount = 0;
- }
-
- for (i=0; i<k_linecount; i++) {
- blocks[i] = memmap[memmapi].nextaddr;
- dp = (dataline_t*)blocks[i];
- memmap[memmapi].nextaddr += (stride & 0xffff);
- if (memmap[memmapi].nextaddr + sizeof(dataline_t) >= memmap[memmapi].vend) {
- memmap[memmapi].wrapcount++;
- memmap[memmapi].nextaddr = memmap[memmapi].vstart +
- memmap[memmapi].wrapcount * sizeof(dataline_t);
- }
-
- ip = (long*)((memmap[memmapi].nextinit+7)&~7);
- ipe = (long*)(memmap[memmapi].nextaddr+2*sizeof(dataline_t)+8);
- while(ip <= ipe && ip < ((long*)memmap[memmapi].vend-8))
- *ip++ = (long)ip;
- memmap[memmapi].nextinit = (long) ipe;
- dp->guard1 = BGUARD(i);
- dp->guard2 = EGUARD(i);
- dp->lock[0] = dp->lock[1] = NOLOCK;
- dp->share[0] = dp->share0 = 0x1111;
- dp->share[1] = dp->share1 = 0x2222;
- memcpy(dp->private, init_private, LLSC_MAXCPUS*sizeof(private_t));
-
-
- if (stride > 16384) {
- memmapi++;
- if (memmapi == memmapx)
- memmapi = 0;
- }
- }
-
-}
-
-static void
-dump_block_addrs(void)
-{
- int i;
-
- printk("LLSC TestNumber %ld\n", k_testnumber);
-
- for (i=0; i<k_linecount; i++) {
- printk(" %lx", blocks[i]);
- if (i%4 == 3)
- printk("\n");
- }
- printk("\n");
-}
-
-
-static void
-set_thread_state(int cpuid, int state)
-{
- if (k_threadprivate[cpuid]->threadstate == TS_KILLED) {
- set_led_bits(LED_MASK_AUTOTEST, LED_MASK_AUTOTEST);
- while(1);
- }
- k_threadprivate[cpuid]->threadstate = state;
-}
-
-#define MINBLK (16*1024*1024)
-static int
-build_mem_map(unsigned long start, unsigned long end, void *arg)
-{
- long lstart, lend;
- long align = 8*MB;
-
- printk ("LLSC memmap: start 0x%lx, end 0x%lx, (0x%lx - 0x%lx)\n",
- start, end, (long) virt_to_page(start), (long) virt_to_page(end-PAGE_SIZE));
-
- if (memmapx >= MAPCHUNKS || (end-start) < MINBLK)
- return 0;
-
- /*
- * Start in the middle of the range & find the first non-free page in both directions
- * from the midpoint. This is likely to be the bigest free block.
- */
- lend = lstart = start + (end-start)/2;
- while (lend < end && !PageReserved(virt_to_page(lend)) && virt_to_page(lend)->count.counter == 0)
- lend += PAGE_SIZE;
- lend -= PAGE_SIZE;
-
- while (lstart >= start && !PageReserved(virt_to_page(lstart)) && virt_to_page(lstart)->count.counter == 0)
- lstart -= PAGE_SIZE;
- lstart += PAGE_SIZE;
-
- lstart = (lstart + align -1) /align * align;
- end = end / align * align;
- if (lstart >= end)
- return 0;
- printk (" memmap: start 0x%lx, end 0x%lx\n", lstart, end);
-
- memmap[memmapx].vstart = lstart;
- memmap[memmapx].vend = end;
- memmapx++;
- return 0;
-}
-
-void int_test(void);
-
-int
-llsc_main (int cpuid)
-{
- int i, cpu, is_master, repeatcnt=0;
- unsigned int preverr=0, errs=0, pass=0;
- int automode=0;
-
-#ifdef INTTEST
- if (inttest)
- int_test();
-#endif
-
- if (!autotest_enabled)
- return 0;
-
-#ifdef CONFIG_SMP
- is_master = !smp_processor_id();
-#else
- is_master = 1;
-#endif
-
-
- if (is_master) {
- mbase = (control_t*) __get_free_pages(GFP_KERNEL, get_order(4096+THREADPRIVATESZ()*LLSC_MAXCPUS));
- printk("LLSC: mbase 0x%lx\n", (long)mbase);
- print_params();
- if(!IS_RUNNING_ON_SIMULATOR())
- spin(10);
- k_currentpass = 0;
- k_go = ST_IDLE;
- k_passes = DEF_PASSES;
- k_napticks = DEF_NAPTICKS;
- k_stop_on_error = DEF_STOP_ON_ERROR;
- k_verbose = DEF_VERBOSE;
- k_linecount = DEF_LINECOUNT;
- k_iter_msg = DEF_ITER_MSG;
- k_vv = DEF_VV;
- k_linepad = DEF_LINEPAD;
- k_blocks = (void*)blocks;
- efi_memmap_walk(build_mem_map, 0);
-
-#ifdef CONFIG_IA64_SGI_AUTOTEST
- automode = 1;
-#endif
-
- for (i=0; i<LLSC_MAXCPUS; i++) {
- k_threadprivate[i] = THREADPRIVATE(i);
- memset(k_threadprivate[i], 0, sizeof(*k_threadprivate[i]));
- init_private[i] = i;
- }
- mb();
- initialized = 1;
- } else {
- while (initialized == 0)
- udelay(100);
- }
-
-loop:
- if (is_master) {
- if (automode) {
- if (!preverr || repeatcnt++ > 5) {
- set_autotest_params();
- repeatcnt = 0;
- }
- } else {
- while (k_go == ST_IDLE);
- }
-
- k_go = ST_INIT;
- if (k_linecount > MAX_LINECOUNT) k_linecount = MAX_LINECOUNT;
- k_linecount = k_linecount & ~1;
- setup_block_addresses();
- if (!preverr && dump_block_addrs_opt)
- dump_block_addrs();
-
- k_currentpass = pass++;
- k_go = ST_RUN;
- if (fail_enabled)
- fail_enabled--;
-
- } else {
- while (k_go != ST_RUN || k_currentpass != pass);
- pass++;
- }
-
-
- set_leds(errs);
- set_thread_state(cpuid, TS_RUNNING);
-
- errs += ran_conf_llsc(cpuid);
- preverr = (k_go == ST_ERRSTOP);
-
- set_leds(errs);
- set_thread_state(cpuid, TS_STOPPED);
-
- if (is_master) {
- Speedo();
- for (i=0, cpu=0; cpu<LLSC_MAXCPUS; cpu++) {
- while (k_threadprivate[cpu]->threadstate == TS_RUNNING) {
- i++;
- if (i == 10000) {
- k_go = ST_STOP;
- printk (" llsc master stopping test number %ld\n", k_testnumber);
- }
- if (i > 100000) {
- k_threadprivate[cpu]->threadstate = TS_KILLED;
- printk (" llsc: master killing cpuid %d, running test number %ld\n",
- cpu, k_testnumber);
- }
- udelay(1000);
- }
- }
- }
-
- goto loop;
-}
-
-
-static void
-Speedo(void)
-{
- static int i = 0;
-
- switch (++i%4) {
- case 0:
- printk("|\b");
- break;
- case 1:
- printk("\\\b");
- break;
- case 2:
- printk("-\b");
- break;
- case 3:
- printk("/\b");
- break;
- }
-}
-
-#ifdef INTTEST
-
-/* ========================================================================================================
- *
- * Some test code to verify that interrupts work
- *
- * Add the following to the arch/ia64/kernel/smp.c after the comment "Reschedule callback"
- * if (zzzprint_resched) printk(" cpu %d got interrupt\n", smp_processor_id());
- *
- * Enable the code in arch/ia64/sn/sn1/smp.c to print sending IPIs.
- *
- */
-
-static int __init set_inttest(char *str)
-{
- inttest = 1;
- autotest_enabled = 1;
-
- return 1;
-}
-
-__setup("inttest=", set_inttest);
-
-int zzzprint_resched=0;
-
-void
-int_test() {
- int mycpu, cpu;
- static volatile int control_cpu=0;
-
- mycpu = smp_processor_id();
- zzzprint_resched = 2;
-
- printk("Testing cross interrupts\n");
-
- while (control_cpu != smp_num_cpus) {
- if (mycpu == cpu_logical_map(control_cpu)) {
- for (cpu=0; cpu<smp_num_cpus; cpu++) {
- printk("Sending interrupt from %d to %d\n", mycpu, cpu_logical_map(cpu));
- udelay(IS_RUNNING_ON_SIMULATOR ? 10000 : 400000);
- smp_send_reschedule(cpu_logical_map(cpu));
- udelay(IS_RUNNING_ON_SIMULATOR ? 10000 : 400000);
- smp_send_reschedule(cpu_logical_map(cpu));
- udelay(IS_RUNNING_ON_SIMULATOR ? 10000 : 400000);
- }
- control_cpu++;
- }
- }
-
- zzzprint_resched = 1;
-
- if (mycpu == cpu_logical_map(smp_num_cpus-1)) {
- printk("\nTight loop of cpu %d sending ints to cpu 0 (every 100 us)\n", mycpu);
- udelay(IS_RUNNING_ON_SIMULATOR ? 1000 : 1000000);
- __cli();
- while (1) {
- smp_send_reschedule(0);
- udelay(100);
- }
-
- }
-
- while(1);
-}
-#endif
+++ /dev/null
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifdef STANDALONE
-#include "lock.h"
-#endif
-
-
-#define DEF_NAPTICKS 0
-#define DEF_PASSES 0
-#define DEF_AUTO_PASSES 1000000
-#define DEF_STOP_ON_ERROR 1
-#define DEF_VERBOSE 0
-#define DEF_LINECOUNT 2
-#define DEF_ITER_MSG 0
-#define DEF_VV 0xffffffff
-#define DEF_LINEPAD 0x234
-
-
-
-#define LLSC_MAXCPUS 64
-#define CACHELINE 64
-#define MAX_LINECOUNT 1024
-#define K 1024
-#define MB (K*K)
-
-
-#define uint unsigned int
-#define ushort unsigned short
-#define uchar unsigned char
-#define vint volatile int
-#define vlong volatile long
-
-#define LOCKADDR(i) &linep->lock[(i)]
-#define LOCK(i) set_lock(LOCKADDR(i), lockpat)
-#define UNLOCK(i) clr_lock(LOCKADDR(i), lockpat)
-#define GETLOCK(i) *LOCKADDR(i)
-#define ZEROLOCK(i) init_lock(LOCKADDR(i))
-
-#define CACHEALIGN(a) ((char*)((long)(a) & ~127L))
-
-typedef uint guard_t;
-typedef uint lock_t;
-typedef uint share_t;
-typedef uchar private_t;
-
-typedef struct {
- guard_t guard1;
- lock_t lock[2];
- share_t share[2];
- private_t private[LLSC_MAXCPUS];
- share_t share0;
- share_t share1;
- guard_t guard2;
-} dataline_t ;
-
-
-#define LINEPAD k_linepad
-#define LINESTRIDE (((sizeof(dataline_t)+CACHELINE-1)/CACHELINE)*CACHELINE + LINEPAD)
-
-
-typedef struct {
- vint threadstate;
- uint threadpasses;
- private_t private[MAX_LINECOUNT];
-} threadprivate_t;
-
-typedef struct {
- vlong sk_go; /* 0=idle, 1=init, 2=run */
- long sk_linecount;
- long sk_passes;
- long sk_napticks;
- long sk_stop_on_error;
- long sk_verbose;
- long sk_iter_msg;
- long sk_vv;
- long sk_linepad;
- long sk_options;
- long sk_testnumber;
- vlong sk_currentpass;
- void *sk_blocks;
- threadprivate_t *sk_threadprivate[LLSC_MAXCPUS];
-} control_t;
-
-/* Run state (k_go) constants */
-#define ST_IDLE 0
-#define ST_INIT 1
-#define ST_RUN 2
-#define ST_STOP 3
-#define ST_ERRSTOP 4
-
-
-/* Threadstate constants */
-#define TS_STOPPED 0
-#define TS_RUNNING 1
-#define TS_KILLED 2
-
-
-
-int llsc_main (int cpuid);
-
/*
- * Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
-#include <linux/config.h>
-
-#ifdef CONFIG_IA64_SGI_SN1
-#define MACHVEC_PLATFORM_NAME sn1
-#define MACHVEC_PLATFORM_HEADER <asm/machvec_sn1.h>
-#else CONFIG_IA64_SGI_SN1
-#define MACHVEC_PLATFORM_NAME sn2
-#define MACHVEC_PLATFORM_HEADER <asm/machvec_sn2.h>
-#else
-#error "unknown platform"
-#endif
-
+#define MACHVEC_PLATFORM_NAME sn2
+#define MACHVEC_PLATFORM_HEADER <asm/machvec_sn2.h>
#include <asm/machvec_init.h>
-#include <asm/io.h>
-#include <linux/pci.h>
-void*
-sn_mk_io_addr_MACRO
-
-dma_addr_t
-sn_pci_map_single_MACRO
-
-int
-sn_pci_map_sg_MACRO
-
-unsigned long
-sn_virt_to_phys_MACRO
-
-void *
-sn_phys_to_virt_MACRO
* File: mca.c
* Purpose: SN specific MCA code.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
*/
#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/threads.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/smp_lock.h>
-#include <linux/acpi.h>
-#ifdef CONFIG_KDB
-#include <linux/kdb.h>
-#endif
-
-#include <asm/machvec.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <asm/mca.h>
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
-#include <asm/mca.h>
-#include <asm/sn/mca.h>
-
-#include <asm/irq.h>
-#include <asm/hw_irq.h>
-#include <asm/smp.h>
-#include <asm/sn/sn_cpuid.h>
-
-static char *shub_mmr_names[] = {
- "sh_event_occurred",
- "sh_first_error",
- "sh_event_overflow",
-
-/* PI */
- "sh_pi_first_error",
- "sh_pi_error_summary",
- "sh_pi_error_overflow",
-
-/* PI HW */
- "sh_pi_error_detail_1",
- "sh_pi_error_detail_2",
- "sh_pi_hw_time_stamp",
-
-/* PI UCE */
- "sh_pi_uncorrected_detail_1",
- "sh_pi_uncorrected_detail_2",
- "sh_pi_uncorrected_detail_3",
- "sh_pi_uncorrected_detail_4",
- "sh_pi_uncor_time_stamp",
-
-/* PI CE */
- "sh_pi_corrected_detail_1",
- "sh_pi_corrected_detail_2",
- "sh_pi_corrected_detail_3",
- "sh_pi_corrected_detail_4",
- "sh_pi_cor_time_stamp",
-
-/* MD */
- "sh_mem_error_summary",
- "sh_mem_error_overflow",
-/* MD HW */
- "sh_misc_err_hdr_upper",
- "sh_misc_err_hdr_lower",
- "sh_md_dqlp_mmr_xperr_val",
- "sh_md_dqlp_mmr_yperr_val",
- "sh_md_dqrp_mmr_xperr_val",
- "sh_md_dqrp_mmr_yperr_val",
- "sh_md_hw_time_stamp",
-
-/* MD UCE */
- "sh_dir_uc_err_hdr_lower",
- "sh_dir_uc_err_hdr_upper",
- "sh_md_dqlp_mmr_xuerr1",
- "sh_md_dqlp_mmr_xuerr2",
- "sh_md_dqlp_mmr_yuerr1",
- "sh_md_dqlp_mmr_yuerr2",
- "sh_md_dqrp_mmr_xuerr1",
- "sh_md_dqrp_mmr_xuerr2",
- "sh_md_dqrp_mmr_yuerr1",
- "sh_md_dqrp_mmr_yuerr2",
- "sh_md_uncor_time_stamp",
-/* MD CE */
- "sh_dir_cor_err_hdr_lower",
- "sh_dir_cor_err_hdr_upper",
- "sh_md_dqlp_mmr_xcerr1",
- "sh_md_dqlp_mmr_xcerr2",
- "sh_md_dqlp_mmr_ycerr1",
- "sh_md_dqlp_mmr_ycerr2",
- "sh_md_dqrp_mmr_xcerr1",
- "sh_md_dqrp_mmr_xcerr2",
- "sh_md_dqrp_mmr_ycerr1",
- "sh_md_dqrp_mmr_ycerr2",
- "sh_md_cor_time_stamp",
-/* MD CE, UCE */
- "sh_md_dqls_mmr_xamopw_err",
- "sh_md_dqrs_mmr_yamopw_err",
-/* XN */
- "sh_xn_error_summary",
- "sh_xn_first_error",
- "sh_xn_error_overflow",
-
-/* XN HW */
- "sh_xniilb_error_summary",
- "sh_xniilb_first_error",
- "sh_xniilb_error_overflow",
- "sh_xniilb_error_detail_1",
- "sh_xniilb_error_detail_2",
- "sh_xniilb_error_detail_3",
-
- "sh_ni0_error_summary_1",
- "sh_ni0_first_error_1",
- "sh_ni0_error_overflow_1",
-
- "sh_ni0_error_summary_2",
- "sh_ni0_first_error_2",
- "sh_ni0_error_overflow_2",
- "sh_ni0_error_detail_1",
- "sh_ni0_error_detail_2",
- "sh_ni0_error_detail_3",
+/*
+ * Interval for calling SAL to poll for errors that do NOT cause error
+ * interrupts. SAL will raise a CPEI if any errors are present that
+ * need to be logged.
+ */
+#define CPEI_INTERVAL (5*HZ)
- "sh_ni1_error_summary_1",
- "sh_ni1_first_error_1",
- "sh_ni1_error_overflow_1",
- "sh_ni1_error_summary_2",
- "sh_ni1_first_error_2",
- "sh_ni1_error_overflow_2",
+struct timer_list sn_cpei_timer;
+void sn_init_cpei_timer(void);
- "sh_ni1_error_detail_1",
- "sh_ni1_error_detail_2",
- "sh_ni1_error_detail_3",
- "sh_xn_hw_time_stamp",
+/*
+ * print_hook
+ *
+ * This function is the callback routine that SAL calls to log error
+ * info for platform errors.
+ */
+static int
+print_hook(const char *fmt, ...)
+{
+ static int newline=1;
+ char buf[400], *p;
+ va_list args;
+ int len=0;
-/* XN HW & UCE & SBE */
- "sh_xnpi_error_summary",
- "sh_xnpi_first_error",
- "sh_xnpi_error_overflow",
- "sh_xnpi_error_detail_1",
- "sh_xnmd_error_summary",
- "sh_xnmd_first_error",
- "sh_xnmd_error_overflow",
- "sh_xnmd_ecc_err_report",
- "sh_xnmd_error_detail_1",
+ va_start(args, fmt);
+ if (newline) {
+ strcpy(buf, "+ ");
+ len += 2;
+ }
+ len += vsnprintf(buf+len, sizeof(buf)-len, fmt, args);
+
+ /* Prefix each line with "+ " to be consistent with mca.c. */
+ p = buf;
+ while ((p=strchr(p, '\n')) && *++p != '\0') {
+ memmove(p+2, p, 1+strlen(p));
+ strncpy(p, "+ ", 2);
+ len += 2;
+ }
+ newline = (p != 0);
-/* XN UCE */
- "sh_xn_uncorrected_detail_1",
- "sh_xn_uncorrected_detail_2",
- "sh_xn_uncorrected_detail_3",
- "sh_xn_uncorrected_detail_4",
- "sh_xn_uncor_time_stamp",
+ va_end(args);
+ printk("%s", buf);
+ return len;
+}
-/* XN CE */
- "sh_xn_corrected_detail_1",
- "sh_xn_corrected_detail_2",
- "sh_xn_corrected_detail_3",
- "sh_xn_corrected_detail_4",
- "sh_xn_cor_time_stamp",
-/* LB HW */
- "sh_lb_error_summary",
- "sh_lb_first_error",
- "sh_lb_error_overflow",
- "sh_lb_error_detail_1",
- "sh_lb_error_detail_2",
- "sh_lb_error_detail_3",
- "sh_lb_error_detail_4",
- "sh_lb_error_detail_5",
- "sh_junk_error_status",
-};
-void
-sal_log_plat_print(int header_len, int sect_len, u8 *p_data, prfunc_t prfunc)
+/*
+ * ia64_sn2_platform_plat_specific_err_print
+ *
+ * Called by the MCA handler to log platform-specific errors.
+ */
+void
+ia64_sn2_platform_plat_specific_err_print(int header_len, int sect_len, u8 *p_data, prfunc_t prfunc)
{
- sal_log_plat_info_t *sh_info = (sal_log_plat_info_t *) p_data;
- u64 *mmr_val = (u64 *)&(sh_info->shub_state);
- char **mmr_name = shub_mmr_names;
- int mmr_count = sizeof(sal_log_shub_state_t)>>3;
+ ia64_sn_plat_specific_err_print(print_hook, p_data - sect_len);
+}
- while(mmr_count) {
- if(*mmr_val) {
- prfunc("%-40s: %#016lx\n",*mmr_name, *mmr_val);
- }
- mmr_name++;
- mmr_val++;
- mmr_count--;
- }
-}
-void
-sn_cpei_handler(int irq, void *devid, struct pt_regs *regs) {
+static void
+sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
+{
+ /*
+ * this function's sole purpose is to call SAL when we receive
+ * a CE interrupt from SHUB or when the timer routine decides
+ * we need to call SAL to check for CEs.
+ */
- struct ia64_sal_retval isrv;
-// this function's sole purpose is to call SAL when we receive
-// a CE interrupt from SHUB or when the timer routine decides
-// we need to call SAL to check for CEs.
+ /* CALL SAL_LOG_CE */
- // CALL SAL_LOG_CE
- SAL_CALL(isrv, SN_SAL_LOG_CE, irq, 0, 0, 0, 0, 0, 0);
+ ia64_sn_plat_cpei_handler();
}
-#include <linux/timer.h>
-#define CPEI_INTERVAL (HZ/100)
-struct timer_list sn_cpei_timer = TIMER_INITIALIZER(NULL, 0, 0);
-void sn_init_cpei_timer(void);
-
-void
+static void
sn_cpei_timer_handler(unsigned long dummy) {
- sn_cpei_handler(-1, NULL, NULL);
- del_timer(&sn_cpei_timer);
- sn_cpei_timer.expires = jiffies + CPEI_INTERVAL;
- add_timer(&sn_cpei_timer);
+ sn_cpei_handler(-1, NULL, NULL);
+ mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL);
}
void
sn_init_cpei_timer() {
- sn_cpei_timer.expires = jiffies + CPEI_INTERVAL;
+ sn_cpei_timer.expires = jiffies + CPEI_INTERVAL;
sn_cpei_timer.function = sn_cpei_timer_handler;
add_timer(&sn_cpei_timer);
}
-
-#ifdef ajmtestceintr
-
-struct timer_list sn_ce_timer;
-
-void
-sn_ce_timer_handler(long dummy) {
- unsigned long *pi_ce_error_inject_reg = 0xc00000092fffff00;
-
- *pi_ce_error_inject_reg = 0x0000000000000100;
- del_timer(&sn_ce_timer);
- sn_ce_timer.expires = jiffies + CPEI_INTERVAL;
- add_timer(&sn_ce_timer);
-}
-
-sn_init_ce_timer() {
- sn_ce_timer.expires = jiffies + CPEI_INTERVAL;
- sn_ce_timer.function = sn_ce_timer_handler;
- add_timer(&sn_ce_timer);
-}
-#endif /* ajmtestceintr */
+++ /dev/null
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <asm/sn/sn_sal.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/processor.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/timex.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/sn/intr.h>
-#include <asm/hw_irq.h>
-#include <asm/sn/leds.h>
-
-extern int autotest_enabled;
-long mcatest=0, debug0, debug1, debug2, debug3;
-
-#define HDELAY(t) (IS_RUNNING_ON_SIMULATOR() ? udelay(1) : udelay(t))
-
-/*
- * mcatest
- * mactest contains a decimal number (RPTT) where
- * R - flag, if non zero, run forever
- *
- * P - identifies when to run the test
- * 0 execute test at cpu 0 early init
- * 1 execute test at cpu 0 idle
- * 2 execute test at last (highest numbered) cpu idle
- * 3 execute test on all cpus at idle
- *
- * TT- identifies test to run
- * 01 = MCA via dup TLB dropin
- * 02 = MCA via garbage address
- * 03 = lfetch via garbage address
- * 05 = INIT self
- * 06 = INIT other cpu
- * 07 = INIT non-existent cpu
- * 10 = IPI stress test. Target cpu 0
- * 11 = IPI stress test. Target all cpus
- * 12 = TLB stress test
- * 13 = Park cpu (spinloop)
- * 14 = One shot TLB test with tlb spinlock
- * 15 = One shot TLB test
- * 16 = One shot TLB test sync'ed with RTC
- * 20 = set led to the cpuid & spin.
- * 21 = Try mixed cache/uncached refs & see what happens
- * 22 = Call SAL reboot
- * 23 = Call PAL halt
- */
-static int __init set_mcatest(char *str)
-{
- int val;
- get_option(&str, &val);
- mcatest = val;
- return 1;
-}
-__setup("mcatest=", set_mcatest);
-
-static int __init set_debug0(char *str)
-{
- int val;
- get_option(&str, &val);
- debug0 = val;
- return 1;
-}
-__setup("debug0=", set_debug0);
-
-static int __init set_debug1(char *str)
-{
- int val;
- get_option(&str, &val);
- debug1 = val;
- return 1;
-}
-__setup("debug1=", set_debug1);
-
-static int __init set_debug2(char *str)
-{
- int val;
- get_option(&str, &val);
- debug2 = val;
- return 1;
-}
-__setup("debug2=", set_debug2);
-
-static int __init set_debug3(char *str)
-{
- int val;
- get_option(&str, &val);
- debug3 = val;
- return 1;
-}
-__setup("debug3=", set_debug3);
-
-static volatile int go;
-
-static void
-do_sync(int pos) {
- if (pos != 3)
- return;
- else if (smp_processor_id() == 0)
- go = 1;
- else
- while (!go);
-}
-
-static void
-sgi_mcatest_bkpt(void)
-{
-}
-
-
-/*
- * Optional test
- * pos - 0 called from early init
- * pos - called when cpu about to go idle (fully initialized
- */
-void
-sgi_mcatest(int pos)
-{
- long spos, test, repeat;
- int cpu, curcpu, i, n;
-
- //if (IS_RUNNING_ON_SIMULATOR()) mcatest=1323;
- repeat = mcatest/1000;
- spos = (mcatest/100)%10;
- test = mcatest % 100;
- curcpu = smp_processor_id();
-
- if ( mcatest == 0 || !((pos == 0 && spos == 0) ||
- (pos == 1 && spos == 3) ||
- (pos == 1 && spos == 1 && curcpu == 0) ||
- (pos == 1 && spos == 2 && curcpu == smp_num_cpus-1)))
- return;
-
-again:
- if (test == 1 || test == 2 || test == 3) {
- void zzzmca(int);
- printk("CPU %d: About to cause unexpected MCA\n", curcpu);
- HDELAY(100000);
- sgi_mcatest_bkpt();
- do_sync(spos);
-
- zzzmca(test-1);
-
- HDELAY(100000);
- }
-
- if (test == 4) {
- long result, adrs[] = {0xe0021000009821e0UL, 0xc0003f3000000000UL, 0xc0000081101c0000UL, 0xc00000180e021004UL, 0xc00000180e022004UL, 0xc00000180e023004UL };
- long size[] = {1,2,4,8};
- int r, i, j, k;
-
- for (k=0; k<2; k++) {
- for (i=0; i<6; i++) {
- for (j=0; j<4; j++) {
- printk("Probing 0x%lx, size %ld\n", adrs[i], size[j]);
- result = -1;
- r = ia64_sn_probe_io_slot (adrs[i], size[j], &result);
- printk(" status %d, val 0x%lx\n", r, result);
- udelay(100000);
- }
- }
- }
-
- }
-
- if (test == 5) {
- cpu = curcpu;
- printk("CPU %d: About to send INIT to self (cpu %d)\n", curcpu, cpu);
- HDELAY(100000);
- sgi_mcatest_bkpt();
- do_sync(spos);
-
- platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
-
- HDELAY(100000);
- printk("CPU %d: Returned from INIT\n", curcpu);
- }
-
- if (test == 6) {
- cpu = curcpu ^ 1;
- printk("CPU %d: About to send INIT to other cpu (cpu %d)\n", curcpu, cpu);
- HDELAY(100000);
- sgi_mcatest_bkpt();
- do_sync(spos);
-
- platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
-
- HDELAY(100000);
- printk("CPU %d: Done\n", curcpu);
- }
-
- if (test == 7) {
- printk("CPU %d: About to send INIT to non-existent cpu\n", curcpu);
- HDELAY(100000);
- sgi_mcatest_bkpt();
- do_sync(spos);
-
- sn_send_IPI_phys(0xffff, 0, IA64_IPI_DM_INIT);
-
- HDELAY(100000);
- printk("CPU %d: Done\n", curcpu);
- }
-
- if (test == 10) {
- n = IS_RUNNING_ON_SIMULATOR() ? 10 : 10000000;
- cpu = 0;
- printk("CPU %d: IPI stress test. Target cpu 0\n", curcpu);
- HDELAY(100000);
- sgi_mcatest_bkpt();
- do_sync(spos);
-
- for (i=0; i<n; i++)
- platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
-
- HDELAY(100000);
- printk("CPU %d: Done\n", curcpu);
- }
-
- if (test == 11) {
- n = IS_RUNNING_ON_SIMULATOR() ? 100 : 10000000;
- printk("CPU %d: IPI stress test. Target all cpus\n", curcpu);
- HDELAY(100000);
- sgi_mcatest_bkpt();
- do_sync(spos);
-
- for (i=0; i<n; i++)
- for (cpu=0; cpu<smp_num_cpus; cpu++)
- if (smp_num_cpus > 2 && cpu != curcpu)
- platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
-
- HDELAY(100000);
- printk("CPU %d: Done\n", curcpu);
- }
-
- if (test == 12) {
- long adr = 0xe002200000000000UL;
- n = IS_RUNNING_ON_SIMULATOR() ? 1000 : 100000;
- printk("CPU %d: TLB flush stress test\n", curcpu);
- HDELAY(100000);
- sgi_mcatest_bkpt();
- do_sync(spos);
-
- for (i=0; i<n; i++)
- platform_global_tlb_purge(adr, adr+25*PAGE_SIZE, 14);
-
- HDELAY(100000);
- printk("CPU %d: Done\n", curcpu);
- }
-
- if (test == 13) {
- printk("CPU %d: Park cpu in spinloop\n", curcpu);
- while(1);
- }
- if (test == 14 || test == 15 || test == 16 || test == 17) {
- long adr = 0xe002200000000000UL;
- static int inited=0;
- if (inited == 0) {
- if (debug0 == 0) debug0 = 1;
- repeat = 1;
- do_sync(spos);
- if (curcpu >= smp_num_cpus-2) {
- printk("Parking cpu %d\n", curcpu);
- local_irq_disable();
- while(1);
- } else {
- printk("Waiting cpu %d\n", curcpu);
- HDELAY(1000000);
- }
- HDELAY(1000000);
- inited = 1;
- }
- if (test == 16 || test == 17) {
- unsigned long t, shift, mask;
- mask = (smp_num_cpus > 16) ? 0x1f : 0xf;
- shift = 25-debug1;
- do {
- t = get_cycles();
- if (IS_RUNNING_ON_SIMULATOR())
- t = (t>>8);
- else
- t = (t>>shift);
- t = t & mask;
- } while (t == curcpu);
- do {
- t = get_cycles();
- if (IS_RUNNING_ON_SIMULATOR())
- t = (t>>8);
- else
- t = (t>>shift);
- t = t & mask;
- } while (t != curcpu);
- }
- if(debug3) printk("CPU %d: One TLB start\n", curcpu);
- if (test != 17) platform_global_tlb_purge(adr, adr+PAGE_SIZE*debug0, 14);
- if(debug3) printk("CPU %d: One TLB flush done\n", curcpu);
- }
- if (test == 20) {
- local_irq_disable();
- set_led_bits(smp_processor_id(), 0xff);
- while(1);
- }
- if (test == 21) {
- extern long ia64_mca_stack[];
- int i, n;
- volatile long *p, *up;
- p = (volatile long*)__imva(ia64_mca_stack);
- up = (volatile long*)(__pa(p) | __IA64_UNCACHED_OFFSET);
-
- if(!IS_RUNNING_ON_SIMULATOR()) printk("ZZZ get data in cache\n");
- for (n=0, i=0; i<100; i++)
- n += *(p+i);
- if(!IS_RUNNING_ON_SIMULATOR()) printk("ZZZ Make uncached refs to same data\n");
- for (n=0, i=0; i<100; i++)
- n += *(up+i);
- if(!IS_RUNNING_ON_SIMULATOR()) printk("ZZZ dirty the data via cached refs\n");
- for (n=0, i=0; i<100; i++)
- *(p+i) = i;
- if(!IS_RUNNING_ON_SIMULATOR()) printk("ZZZ Make uncached refs to same data\n");
- for (n=0, i=0; i<100; i++)
- n += *(up+i);
- if(!IS_RUNNING_ON_SIMULATOR()) printk("ZZZ Flushing cache\n");
- for (n=0, i=0; i<100; i++)
- ia64_fc((void*)(p+i));
- printk("ZZZ done\n");
- }
- if (test == 21) {
- int i;
- volatile long tb, t[10];
- for (i=0; i<10; i++) {
- tb = debug3+ia64_get_itc();
- sgi_mcatest_bkpt();
- t[i] = ia64_get_itc() - tb;
- }
- for (i=0; i<10; i++) {
- printk("ZZZ NULL 0x%lx\n", t[i]);
- }
- for (i=0; i<10; i++) {
- tb = debug3+ia64_get_itc();
- ia64_pal_call_static(PAL_MC_DRAIN, 0, 0, 0, 0);
- t[i] = ia64_get_itc() - tb;
- }
- for (i=0; i<10; i++) {
- printk("ZZZ DRAIN 0x%lx\n", t[i]);
- }
- }
- if (test == 22) {
- extern void machine_restart(char*);
- printk("ZZZ machine_restart\n");
- machine_restart(0);
- }
- if (test == 23) {
- printk("ZZZ ia64_pal_halt_light\n");
- ia64_pal_halt_light();
- }
- if (repeat)
- goto again;
-
-}
/*
* Platform dependent support for IO probing.
*
- * Copyright (c) 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
/*
- * Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
#include <asm/sn/bte.h>
#include <asm/sn/clksupport.h>
#include <asm/sn/sn_sal.h>
-
-#ifdef CONFIG_IA64_SGI_SN2
#include <asm/sn/sn2/shub.h>
-#endif
DEFINE_PER_CPU(struct pda_s, pda_percpu);
+#define pxm_to_nasid(pxm) ((pxm)<<1)
+
+#define MAX_PHYS_MEMORY (1UL << 49) /* 1 TB */
+
extern void bte_init_node (nodepda_t *, cnodeid_t);
extern void bte_init_cpu (void);
+extern void sn_timer_init(void);
+extern void (*ia64_mark_idle)(int);
+extern void snidle(int);
unsigned long sn_rtc_cycles_per_second;
-unsigned long sn_rtc_usec_per_cyc;
partid_t sn_partid = -1;
char sn_system_serial_number_string[128];
u64 sn_partition_serial_number;
+short physical_node_map[MAX_PHYSNODE_ID];
+
/*
* This is the address of the RRegs in the HSpace of the global
* master. It is used by a hack in serial.c (serial_[in|out],
* early_printk won't try to access the UART before
* master_node_bedrock_address is properly calculated.
*/
-u64 master_node_bedrock_address = 0UL;
+u64 master_node_bedrock_address;
static void sn_init_pdas(char **);
-extern struct irq_desc *_sn_irq_desc[];
-
-#if defined(CONFIG_IA64_SGI_SN1)
-extern synergy_da_t *Synergy_da_indr[];
-#endif
static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
-#ifdef CONFIG_IA64_SGI_SN2
-irqpda_t *irqpdaindr[NR_CPUS];
-#endif /* CONFIG_IA64_SGI_SN2 */
+irqpda_t *irqpdaindr;
/*
* running in the simulator. Note that passing zeroes in DRIVE_INFO
* is sufficient (the IDE driver will autodetect the drive geometry).
*/
+#ifdef CONFIG_IA64_GENERIC
+extern char drive_info[4*16];
+#else
char drive_info[4*16];
-
-/**
- * sn_map_nr - return the mem_map entry for a given kernel address
- * @addr: kernel address to query
- *
- * Finds the mem_map entry for the kernel address given. Used by
- * virt_to_page() (asm-ia64/page.h), among other things.
- */
-unsigned long
-sn_map_nr (unsigned long addr)
-{
- return BANK_MAP_NR(addr);
-}
+#endif
/**
* early_sn_setup - early setup routine for SN platforms
*
* Sets up an initial console to aid debugging. Intended primarily
- * for bringup, it's only called if %BRINGUP and %CONFIG_IA64_EARLY_PRINTK
- * are turned on. See start_kernel() in init/main.c.
+ * for bringup. See start_kernel() in init/main.c.
*/
-#if defined(CONFIG_IA64_EARLY_PRINTK)
+#if defined(CONFIG_IA64_EARLY_PRINTK) || defined(CONFIG_IA64_SGI_SN_SIM)
void __init
early_sn_setup(void)
}
if ( IS_RUNNING_ON_SIMULATOR() ) {
-#if defined(CONFIG_IA64_SGI_SN1)
- master_node_bedrock_address = (u64)REMOTE_HSPEC_ADDR(get_nasid(), 0);
-#else
master_node_bedrock_address = (u64)REMOTE_HUB(get_nasid(), SH_JUNK_BUS_UART0);
-#endif
printk(KERN_DEBUG "early_sn_setup: setting master_node_bedrock_address to 0x%lx\n", master_node_bedrock_address);
}
}
-#endif /* CONFIG_IA64_SGI_SN1 */
+#endif /* CONFIG_IA64_EARLY_PRINTK */
#ifdef CONFIG_IA64_MCA
extern int platform_intr_list[];
#endif
extern nasid_t master_nasid;
+static int shub_1_1_found __initdata;
+
+
+/*
+ * sn_check_for_wars
+ *
+ * Set flag for enabling shub specific wars
+ */
+
+static inline int __init
+is_shub_1_1(int nasid)
+{
+ unsigned long id;
+ int rev;
+
+ id = REMOTE_HUB_L(nasid, SH_SHUB_ID);
+ rev = (id & SH_SHUB_ID_REVISION_MASK) >> SH_SHUB_ID_REVISION_SHFT;
+ return rev <= 2;
+}
+
+static void __init
+sn_check_for_wars(void)
+{
+ int cnode;
+
+ for (cnode=0; cnode< numnodes; cnode++)
+ if (is_shub_1_1(cnodeid_to_nasid(cnode)))
+ shub_1_1_found = 1;
+}
+
+
/**
* sn_setup - SN platform setup routine
sn_setup(char **cmdline_p)
{
long status, ticks_per_sec, drift;
- int i;
+ int pxm;
int major = sn_sal_rev_major(), minor = sn_sal_rev_minor();
+ extern void io_sh_swapper(int, int);
+ extern nasid_t get_master_baseio_nasid(void);
+ extern void sn_cpu_init(void);
+
+ MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
+
+ memset(physical_node_map, -1, sizeof(physical_node_map));
+ for (pxm=0; pxm<MAX_PXM_DOMAINS; pxm++)
+ if (pxm_to_nid_map[pxm] != -1)
+ physical_node_map[pxm_to_nasid(pxm)] = pxm_to_nid_map[pxm];
printk("SGI SAL version %x.%02x\n", major, minor);
"%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR);
panic("PROM version too old\n");
}
-#ifdef CONFIG_PCI
-#ifdef CONFIG_IA64_SGI_SN2
- {
- extern void io_sh_swapper(int, int);
- io_sh_swapper(get_nasid(), 0);
- }
-#endif
+
+ io_sh_swapper(get_nasid(), 0);
master_nasid = get_nasid();
(void)get_console_nasid();
-#ifndef CONFIG_IA64_SGI_SN1
- {
- extern nasid_t get_master_baseio_nasid(void);
- (void)get_master_baseio_nasid();
- }
-#endif
-#endif /* CONFIG_PCI */
+ (void)get_master_baseio_nasid();
+
status = ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, &drift);
if (status != 0 || ticks_per_sec < 100000) {
printk(KERN_WARNING "unable to determine platform RTC clock frequency, guessing.\n");
else
sn_rtc_cycles_per_second = ticks_per_sec;
-#ifdef CONFIG_IA64_SGI_SN1
- /* PROM has wrong value on SN1 */
- sn_rtc_cycles_per_second = 990177;
-#endif
- sn_rtc_usec_per_cyc = ((1000000000UL<<IA64_NSEC_PER_CYC_SHIFT)
- + sn_rtc_cycles_per_second/2) / sn_rtc_cycles_per_second;
-
- for (i=0;i<NR_CPUS;i++)
- _sn_irq_desc[i] = _irq_desc;
-
platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_PCE_VECTOR;
if ( IS_RUNNING_ON_SIMULATOR() )
{
-#ifdef CONFIG_IA64_SGI_SN2
master_node_bedrock_address = (u64)REMOTE_HUB(get_nasid(), SH_JUNK_BUS_UART0);
-#else
- master_node_bedrock_address = (u64)REMOTE_HSPEC_ADDR(get_nasid(), 0);
-#endif
printk(KERN_DEBUG "sn_setup: setting master_node_bedrock_address to 0x%lx\n",
master_node_bedrock_address);
}
*/
sn_init_pdas(cmdline_p);
+ /*
+ * Check for WARs.
+ */
+ sn_check_for_wars();
+
+ ia64_mark_idle = &snidle;
/*
* For the bootcpu, we do this here. All other cpus will make the
*/
sn_cpu_init();
-
#ifdef CONFIG_SMP
init_smp_config();
#endif
screen_info = sn_screen_info;
- /*
- * Turn off "floating-point assist fault" warnings by default.
- */
- current->thread.flags |= IA64_THREAD_FPEMU_NOPRINT;
+ sn_timer_init();
}
/**
* Make sure that the PDA fits entirely in the same page as the
* cpu_data area.
*/
- if ( (((unsigned long)pda & ~PAGE_MASK) + sizeof(pda_t)) > PAGE_SIZE)
+ if ((((unsigned long)pda & (~PAGE_MASK)) + sizeof(pda_t)) > PAGE_SIZE)
panic("overflow of cpu_data page");
+ memset(pda->cnodeid_to_nasid_table, -1, sizeof(pda->cnodeid_to_nasid_table));
+ for (cnode=0; cnode<numnodes; cnode++)
+ pda->cnodeid_to_nasid_table[cnode] = pxm_to_nasid(nid_to_pxm_map[cnode]);
+
/*
* Allocate & initalize the nodepda for each node.
*/
for (cnode=0; cnode < numnodes; cnode++) {
nodepdaindr[cnode] = alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
-
-#if defined(CONFIG_IA64_SGI_SN1)
- Synergy_da_indr[cnode * 2] = (synergy_da_t *) alloc_bootmem_node(NODE_DATA(cnode), sizeof(synergy_da_t));
- Synergy_da_indr[cnode * 2 + 1] = (synergy_da_t *) alloc_bootmem_node(NODE_DATA(cnode), sizeof(synergy_da_t));
- memset(Synergy_da_indr[cnode * 2], 0, sizeof(synergy_da_t));
- memset(Synergy_da_indr[cnode * 2 + 1], 0, sizeof(synergy_da_t));
-#endif
}
/*
for (cnode=0; cnode < numnodes; cnode++)
memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr, sizeof(nodepdaindr));
-#ifdef CONFIG_PCI
+
/*
* Set up IO related platform-dependent nodepda fields.
* The following routine actually sets up the hubinfo struct
init_platform_nodepda(nodepdaindr[cnode], cnode);
bte_init_node (nodepdaindr[cnode], cnode);
}
-#endif
}
/**
void __init
sn_cpu_init(void)
{
- int cpuid, cpuphyid, nasid, nodeid, slice;
+ int cpuid;
+ int cpuphyid;
+ int nasid;
+ int slice;
+ int cnode, i;
/*
* The boot cpu makes this call again after platform initialization is
cpuid = smp_processor_id();
cpuphyid = ((ia64_get_lid() >> 16) & 0xffff);
nasid = cpu_physical_id_to_nasid(cpuphyid);
- nodeid = cpu_to_node_map[cpuphyid];
+ cnode = nasid_to_cnodeid(nasid);
slice = cpu_physical_id_to_slice(cpuphyid);
- memset(pda, 0, sizeof(pda_t));
- pda->p_nodepda = nodepdaindr[nodeid];
+ printk("CPU %d: nasid %d, slice %d, cnode %d\n",
+ smp_processor_id(), nasid, slice, cnode);
+
+ memset(pda, 0, sizeof(pda));
+ pda->p_nodepda = nodepdaindr[cnode];
+ pda->led_address = (typeof(pda->led_address)) (LED0 + (slice<<LED_CPU_SHIFT));
+ pda->led_state = LED_ALWAYS_SET;
pda->hb_count = HZ/2;
pda->hb_state = 0;
pda->idle_flag = 0;
+ pda->shub_1_1_found = shub_1_1_found;
+
+ memset(pda->cnodeid_to_nasid_table, -1, sizeof(pda->cnodeid_to_nasid_table));
+ for (i=0; i<numnodes; i++)
+ pda->cnodeid_to_nasid_table[i] = pxm_to_nasid(nid_to_pxm_map[i]);
+
+ if (local_node_data->active_cpu_count == 1)
+ nodepda->node_first_cpu = cpuid;
+
+
+
+ /*
+ * We must use different memory allocators for first cpu (bootmem
+ * allocator) than for the other cpus (regular allocator).
+ */
+ if (cpuid == 0)
+ irqpdaindr = alloc_bootmem_node(NODE_DATA(cpuid_to_cnodeid(cpuid)),sizeof(irqpda_t));
+
+ memset(irqpdaindr, 0, sizeof(irqpda_t));
+ irqpdaindr->irq_flags[SGI_PCIBR_ERROR] = SN2_IRQ_SHARED;
+ irqpdaindr->irq_flags[SGI_PCIBR_ERROR] |= SN2_IRQ_RESERVED;
+ irqpdaindr->irq_flags[SGI_II_ERROR] = SN2_IRQ_SHARED;
+ irqpdaindr->irq_flags[SGI_II_ERROR] |= SN2_IRQ_RESERVED;
+
pda->pio_write_status_addr = (volatile unsigned long *)
LOCAL_MMR_ADDR((slice < 2 ? SH_PIO_WRITE_STATUS_0 : SH_PIO_WRITE_STATUS_1 ) );
pda->mem_write_status_addr = (volatile u64 *)
if (nodepda->node_first_cpu == cpuid) {
int buddy_nasid;
- buddy_nasid = cnodeid_to_nasid(local_nodeid == numnodes - 1 ? 0 : local_nodeid + 1);
+ buddy_nasid = cnodeid_to_nasid(numa_node_id() == numnodes-1 ? 0 : numa_node_id()+ 1);
pda->pio_shub_war_cam_addr = (volatile unsigned long*)GLOBAL_MMR_ADDR(nasid, SH_PI_CAM_CONTROL);
}
bte_init_cpu();
}
-
-#ifdef II_PRTE_TLB_WAR
-long iiprt_lock[16*64] __cacheline_aligned; /* allow for NASIDs up to 64 */
-#endif
-
-#ifdef BUS_INT_WAR
-
-#include <asm/hw_irq.h>
-#include <asm/sn/pda.h>
-
-void ia64_handle_irq (ia64_vector vector, struct pt_regs *regs);
-
-static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED;
-
-#define IRQCPU(irq) ((irq)>>8)
-
-void
-sn_add_polled_interrupt(int irq, int interval)
-{
- unsigned long flags, irq_cnt;
- sn_poll_entry_t *irq_list;
-
- irq_list = pdacpu(IRQCPU(irq)).pda_poll_entries;;
-
- spin_lock_irqsave(&irq_lock, flags);
- irq_cnt = pdacpu(IRQCPU(irq)).pda_poll_entry_count;
- irq_list[irq_cnt].irq = irq;
- irq_list[irq_cnt].interval = interval;
- irq_list[irq_cnt].tick = interval;
- pdacpu(IRQCPU(irq)).pda_poll_entry_count++;
- spin_unlock_irqrestore(&irq_lock, flags);
-
-
-}
-
-void
-sn_delete_polled_interrupt(int irq)
-{
- unsigned long flags, i, irq_cnt;
- sn_poll_entry_t *irq_list;
-
- irq_list = pdacpu(IRQCPU(irq)).pda_poll_entries;
-
- spin_lock_irqsave(&irq_lock, flags);
- irq_cnt = pdacpu(IRQCPU(irq)).pda_poll_entry_count;
- for (i=0; i<irq_cnt; i++) {
- if (irq_list[i].irq == irq) {
- irq_list[i] = irq_list[irq_cnt-1];
- pdacpu(IRQCPU(irq)).pda_poll_entry_count--;
- break;
- }
- }
- spin_unlock_irqrestore(&irq_lock, flags);
-}
-
-void
-sn_irq_poll(int cpu, int reason)
-{
- unsigned long flags, i;
- sn_poll_entry_t *irq_list;
-
-
- ia64_handle_irq(IA64_IPI_VECTOR, 0);
-
- if (reason == 0)
- return;
-
- irq_list = pda->pda_poll_entries;
-
- for (i=0; i<pda->pda_poll_entry_count; i++, irq_list++) {
- if (--irq_list->tick <= 0) {
- irq_list->tick = irq_list->interval;
- local_irq_save(flags);
- ia64_handle_irq(irq_to_vector(irq_list->irq), 0);
- local_irq_restore(flags);
- }
- }
-}
-
-#endif
+++ /dev/null
-#
-# arch/ia64/sn/kernel/sn1/Makefile
-#
-# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of version 2 of the GNU General Public License
-# as published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it would be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-#
-# Further, this software is distributed without any warranty that it is
-# free of the rightful claim of any third person regarding infringement
-# or the like. Any license provided herein, whether implied or
-# otherwise, applies only to this software file. Patent licenses, if
-# any, provided herein do not apply to combinations of this program with
-# other software, or any other product whatsoever.
-#
-# You should have received a copy of the GNU General Public
-# License along with this program; if not, write the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
-#
-# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
-# Mountain View, CA 94043, or:
-#
-# http://www.sgi.com
-#
-# For further information regarding this notice, see:
-#
-# http://oss.sgi.com/projects/GenInfo/NoticeExplan
-#
-
-
-EXTRA_CFLAGS := -DLITTLE_ENDIAN
-
-.S.s:
- $(CPP) $(AFLAGS) $(AFLAGS_KERNEL) -o $*.s $<
-.S.o:
- $(CC) $(AFLAGS) $(AFLAGS_KERNEL) -c -o $*.o $<
-
-O_TARGET = sn1.o
-
-obj-y = cache.o error.o iomv.o synergy.o sn1_smp.o
+++ /dev/null
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
- *
- */
-
-#include <linux/kernel.h>
-#include <asm/pgalloc.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/sn1/synergy.h>
-#include <asm/delay.h>
-
-#ifndef MB
-#define MB (1024*1024)
-#endif
-
-/*
- * Lock for protecting SYN_TAG_DISABLE_WAY.
- * Consider making this a per-FSB lock.
- */
-static spinlock_t flush_lock = SPIN_LOCK_UNLOCKED;
-
-/**
- * sn_flush_all_caches - flush a range of addresses from all caches (incl. L4)
- * @flush_addr: identity mapped region 7 address to start flushing
- * @bytes: number of bytes to flush
- *
- * Flush a range of addresses from all caches including L4. All addresses
- * fully or partially contained within @flush_addr to @flush_addr + @bytes
- * are flushed from the all caches.
- */
-void
-sn_flush_all_caches(long flush_addr, long bytes)
-{
- ulong addr, baddr, eaddr, bitbucket;
- int way, alias;
-
- /*
- * Because of the way synergy implements "fc", this flushes the
- * data from all caches on all cpus & L4's on OTHER FSBs. It also
- * flushes both cpus on the local FSB. It does NOT flush it from
- * the local FSB.
- */
- flush_icache_range(flush_addr, flush_addr+bytes);
-
- /*
- * Memory DIMMs are a minimum of 256MB and start on 256MB
- * boundaries. Convert the start address to an address
- * that is between +0MB & +128 of the same DIMM.
- * Then add 8MB to skip the uncached MinState areas if the address
- * is on the master node.
- */
- if (bytes > SYNERGY_L4_BYTES_PER_WAY)
- bytes = SYNERGY_L4_BYTES_PER_WAY;
- baddr = TO_NODE(smp_physical_node_id(), PAGE_OFFSET + (flush_addr & (128*MB-1)) + 8*MB);
- eaddr = (baddr+bytes+SYNERGY_BLOCK_SIZE-1) & ~(SYNERGY_BLOCK_SIZE-1);
- baddr = baddr & ~(SYNERGY_BLOCK_SIZE-1);
-
- /*
- * Now flush the local synergy.
- */
- spin_lock(&flush_lock);
- for(way=0; way<SYNERGY_L4_WAYS; way++) {
- WRITE_LOCAL_SYNERGY_REG(SYN_TAG_DISABLE_WAY, 0xffL ^ (1L<<way));
- mb();
- for(alias=0; alias < 9; alias++)
- for(addr=baddr; addr<eaddr; addr+=SYNERGY_BLOCK_SIZE)
- bitbucket = *(volatile ulong *)(addr+alias*8*MB);
- mb();
- }
- WRITE_LOCAL_SYNERGY_REG(SYN_TAG_DISABLE_WAY, 0);
- spin_unlock(&flush_lock);
-
-}
-
-
+++ /dev/null
-/*
- * SN1 Platform specific error Support
- *
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like. Any license provided herein, whether implied or
- * otherwise, applies only to this software file. Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
- * Mountain View, CA 94043, or:
- *
- * http://www.sgi.com
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-
-#include <asm/ptrace.h>
-#include <linux/devfs_fs_kernel.h>
-#include <asm/smp.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/sn1/bedrock.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/addrs.h>
-
-/**
- * snia_error_intr_handler - handle SN specific error interrupts
- * @irq: error interrupt received
- * @devid: device causing the interrupt
- * @pt_regs: saved register state
- *
- * This routine is called when certain interrupts occur on SN systems.
- * It will either recover from the situations that caused the interrupt
- * or panic.
- */
-void
-snia_error_intr_handler(int irq, void *devid, struct pt_regs *pt_regs)
-{
- unsigned long long intpend_val;
- unsigned long long bit;
-
- switch (irq) {
- case SGI_UART_IRQ:
- /*
- * This isn't really an error interrupt. We're just
- * here because we have to do something with them.
- * This is probably wrong, and this code will be
- * removed.
- */
- intpend_val = LOCAL_HUB_L(PI_INT_PEND0);
- if ( (bit = ~(1L<<GFX_INTR_A)) ==
- (intpend_val & ~(1L<<GFX_INTR_A)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- return;
- }
- if ( (bit = ~(1L<<GFX_INTR_B)) ==
- (intpend_val & ~(1L<<GFX_INTR_B)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- return;
- }
- if ( (bit = ~(1L<<PG_MIG_INTR)) ==
- (intpend_val & ~(1L<<PG_MIG_INTR)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- return;
- }
- if ( (bit = ~(1L<<UART_INTR)) ==
- (intpend_val & ~(1L<<UART_INTR)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- return;
- }
- if ( (bit = ~(1L<<CC_PEND_A)) ==
- (intpend_val & ~(1L<<CC_PEND_A)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- return;
- }
- if ( (bit = ~(1L<<CC_PEND_B)) ==
- (intpend_val & ~(1L<<CC_PEND_B)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- return;
- }
- printk("Received SGI_UART_IRQ (65), but no intpend0 bits were set???\n");
- return;
- case SGI_HUB_ERROR_IRQ:
- /*
- * These are mostly error interrupts of various
- * sorts. We need to do more than panic here, but
- * what the heck, this is bring up.
- */
- intpend_val = LOCAL_HUB_L(PI_INT_PEND1);
-
- if ( (bit = ~(1L<<XB_ERROR)) ==
- (intpend_val & ~(1L<<XB_ERROR)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- panic("RECEIVED XB_ERROR on cpu %d, cnode %d\n",
- smp_processor_id(),
- cpuid_to_cnodeid(smp_processor_id()));
- }
- if ( (bit = ~(1L<<LB_ERROR)) ==
- (intpend_val & ~(1L<<LB_ERROR)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- panic("RECEIVED LB_ERROR on cpu %d, cnode %d\n",
- smp_processor_id(),
- cpuid_to_cnodeid(smp_processor_id()));
- }
- if ( (bit = ~(1L<<NACK_INT_A)) ==
- (intpend_val & ~(1L<<NACK_INT_A)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- panic("RECEIVED NACK_INT_A on cpu %d, cnode %d\n",
- smp_processor_id(),
- cpuid_to_cnodeid(smp_processor_id()));
- }
- if ( (bit = ~(1L<<NACK_INT_B)) ==
- (intpend_val & ~(1L<<NACK_INT_B)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- panic("RECEIVED NACK_INT_B on cpu %d, cnode %d\n",
- smp_processor_id(),
- cpuid_to_cnodeid(smp_processor_id()));
- }
- if ( (bit = ~(1L<<CLK_ERR_INTR)) ==
- (intpend_val & ~(1L<<CLK_ERR_INTR)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- panic("RECEIVED CLK_ERR_INTR on cpu %d, cnode %d\n",
- smp_processor_id(),
- cpuid_to_cnodeid(smp_processor_id()));
- }
- if ( (bit = ~(1L<<COR_ERR_INTR_A)) ==
- (intpend_val & ~(1L<<COR_ERR_INTR_A)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- panic("RECEIVED COR_ERR_INTR_A on cpu %d, cnode %d\n",
- smp_processor_id(),
- cpuid_to_cnodeid(smp_processor_id()));
- }
- if ( (bit = ~(1L<<COR_ERR_INTR_B)) ==
- (intpend_val & ~(1L<<COR_ERR_INTR_B)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- panic("RECEIVED COR_ERR_INTR_B on cpu %d, cnode %d\n",
- smp_processor_id(),
- cpuid_to_cnodeid(smp_processor_id()));
- }
- if ( (bit = ~(1L<<MD_COR_ERR_INTR)) ==
- (intpend_val & ~(1L<<MD_COR_ERR_INTR)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- panic("RECEIVED MD_COR_ERR_INTR on cpu %d, cnode %d\n",
- smp_processor_id(),
- cpuid_to_cnodeid(smp_processor_id()));
- }
- if ( (bit = ~(1L<<NI_ERROR_INTR)) ==
- (intpend_val & ~(1L<<NI_ERROR_INTR)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- panic("RECEIVED NI_ERROR_INTR on cpu %d, cnode %d\n",
- smp_processor_id(),
- cpuid_to_cnodeid(smp_processor_id()));
- }
- if ( (bit = ~(1L<<MSC_PANIC_INTR)) ==
- (intpend_val & ~(1L<<MSC_PANIC_INTR)) ) {
- LOCAL_HUB_CLR_INTR(bit);
- panic("RECEIVED MSC_PANIC_INTR on cpu %d, cnode %d\n",
- smp_processor_id(),
- cpuid_to_cnodeid(smp_processor_id()));
- }
- printk("Received SGI_XB_ERROR_IRQ (182) but no intpend1 bits are set???\n");
- return;
- default:
- printk("Received invalid irq in snia_error_intr_handler()\n");
- }
-}
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <asm/sn/simulator.h>
-#include <asm/delay.h>
-#include <asm/sn/pda.h>
-
-/**
- * sn_io_addr - convert an in/out port to an i/o address
- * @port: port to convert
- *
- * Legacy in/out instructions are converted to ld/st instructions
- * on IA64. This routine will convert a port number into a valid
- * SN i/o address. Used by sn_in*() and sn_out*().
- */
-void *
-sn_io_addr(unsigned long port)
-{
- if (!IS_RUNNING_ON_SIMULATOR()) {
- return( (void *) (port | __IA64_UNCACHED_OFFSET));
- } else {
- unsigned long io_base;
- unsigned long addr;
-
- /*
- * word align port, but need more than 10 bits
- * for accessing registers in bedrock local block
- * (so we don't do port&0xfff)
- */
- if ((port >= 0x1f0 && port <= 0x1f7) ||
- port == 0x3f6 || port == 0x3f7) {
- io_base = __IA64_UNCACHED_OFFSET | 0x00000FFFFC000000;
- addr = io_base | ((port >> 2) << 12) | (port & 0xfff);
- } else {
- addr = __ia64_get_io_port_base() | ((port >> 2) << 2);
- }
- return(void *) addr;
- }
-}
-
-/**
- * sn1_mmiob - I/O space memory barrier
- *
- * Acts as a memory mapped I/O barrier for platforms that queue writes to
- * I/O space. This ensures that subsequent writes to I/O space arrive after
- * all previous writes. For most ia64 platforms, this is a simple
- * 'mf.a' instruction. For other platforms, mmiob() may have to read
- * a chipset register to ensure ordering.
- *
- * On SN1, we wait for the PIO_WRITE_STATUS Bedrock register to clear.
- */
-void
-sn1_mmiob (void)
-{
- (volatile unsigned long) (*pda.bedrock_rev_id);
- while (!(volatile unsigned long) (*pda.pio_write_status_addr))
- udelay(5);
-}
+++ /dev/null
-/*
- * SN1 Platform specific SMP Support
- *
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like. Any license provided herein, whether implied or
- * otherwise, applies only to this software file. Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
- * Mountain View, CA 94043, or:
- *
- * http://www.sgi.com
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
- */
-
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/threads.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mmzone.h>
-
-#include <asm/processor.h>
-#include <asm/irq.h>
-#include <asm/sal.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/hw_irq.h>
-#include <asm/current.h>
-#include <asm/delay.h>
-#include <asm/sn/sn_cpuid.h>
-
-/*
- * The following structure is used to pass params thru smp_call_function
- * to other cpus for flushing TLB ranges.
- */
-typedef struct {
- union {
- struct {
- unsigned long start;
- unsigned long end;
- unsigned long nbits;
- unsigned int rid;
- atomic_t unfinished_count;
- } ptc;
- char pad[SMP_CACHE_BYTES];
- };
-} ptc_params_t;
-
-#define NUMPTC 512
-
-static ptc_params_t ptcParamArray[NUMPTC] __attribute__((__aligned__(128)));
-
-/* use separate cache lines on ptcParamsNextByCpu to avoid false sharing */
-static ptc_params_t *ptcParamsNextByCpu[NR_CPUS*16] __attribute__((__aligned__(128)));
-static volatile ptc_params_t *ptcParamsEmpty __cacheline_aligned;
-
-/*REFERENCED*/
-static spinlock_t ptcParamsLock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
-
-static int ptcInit = 0;
-#ifdef PTCDEBUG
-static int ptcParamsAllBusy = 0; /* debugging/statistics */
-static int ptcCountBacklog = 0;
-static int ptcBacklog[NUMPTC+1];
-static char ptcParamsCounts[NR_CPUS][NUMPTC] __attribute__((__aligned__(128)));
-static char ptcParamsResults[NR_CPUS][NUMPTC] __attribute__((__aligned__(128)));
-#endif
-
-/*
- * Make smp_send_flush_tlbsmp_send_flush_tlb() a weak reference,
- * so that we get a clean compile with the ia64 patch without the
- * actual SN1 specific code in arch/ia64/kernel/smp.c.
- */
-extern void smp_send_flush_tlb (void) __attribute((weak));
-
-/*
- * The following table/struct is for remembering PTC coherency domains. It
- * is also used to translate sapicid into cpuids. We don't want to start
- * cpus unless we know their cache domain.
- */
-#ifdef PTC_NOTYET
-sn_sapicid_info_t sn_sapicid_info[NR_CPUS];
-#endif
-
-/**
- * sn1_ptc_l_range - purge local translation cache
- * @start: start of virtual address range
- * @end: end of virtual address range
- * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
- *
- * Purges the range specified from the local processor's translation cache
- * (as opposed to the translation registers). Note that more than the specified
- * range *may* be cleared from the cache by some processors.
- *
- * This is probably not good enough, but I don't want to try to make it better
- * until I get some statistics on a running system. At a minimum, we should only
- * send IPIs to 1 processor in each TLB domain & have it issue a ptc.g on it's
- * own FSB. Also, we only have to serialize per FSB, not globally.
- *
- * More likely, we will have to do some work to reduce the frequency of calls to
- * this routine.
- */
-static inline void
-sn1_ptc_l_range(unsigned long start, unsigned long end, unsigned long nbits)
-{
- do {
- __asm__ __volatile__ ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
- start += (1UL << nbits);
- } while (start < end);
- ia64_srlz_d();
-}
-
-/**
- * sn1_received_flush_tlb - cpu tlb flush routine
- *
- * Flushes the TLB of a given processor.
- */
-void
-sn1_received_flush_tlb(void)
-{
- unsigned long start, end, nbits;
- unsigned int rid, saved_rid;
- int cpu = smp_processor_id();
- int result;
- ptc_params_t *ptcParams;
-
- ptcParams = ptcParamsNextByCpu[cpu*16];
- if (ptcParams == ptcParamsEmpty)
- return;
-
- do {
- start = ptcParams->ptc.start;
- saved_rid = (unsigned int) ia64_get_rr(start);
- end = ptcParams->ptc.end;
- nbits = ptcParams->ptc.nbits;
- rid = ptcParams->ptc.rid;
-
- if (saved_rid != rid) {
- ia64_set_rr(start, (unsigned long)rid);
- ia64_srlz_d();
- }
-
- sn1_ptc_l_range(start, end, nbits);
-
- if (saved_rid != rid)
- ia64_set_rr(start, (unsigned long)saved_rid);
-
- ia64_srlz_i();
-
- result = atomic_dec(&ptcParams->ptc.unfinished_count);
-#ifdef PTCDEBUG
- {
- int i = ptcParams-&ptcParamArray[0];
- ptcParamsResults[cpu][i] = (char) result;
- ptcParamsCounts[cpu][i]++;
- }
-#endif /* PTCDEBUG */
-
- if (++ptcParams == &ptcParamArray[NUMPTC])
- ptcParams = &ptcParamArray[0];
-
- } while (ptcParams != ptcParamsEmpty);
-
- ptcParamsNextByCpu[cpu*16] = ptcParams;
-}
-
-/**
- * sn1_global_tlb_purge - flush a translation cache range on all processors
- * @start: start of virtual address range to flush
- * @end: end of virtual address range
- * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
- *
- * Flushes the translation cache of all processors from @start to @end.
- */
-void
-sn1_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
-{
- ptc_params_t *params;
- ptc_params_t *next;
- unsigned long irqflags;
-#ifdef PTCDEBUG
- ptc_params_t *nextnext;
- int backlog = 0;
-#endif
-
- if (smp_num_cpus == 1) {
- sn1_ptc_l_range(start, end, nbits);
- return;
- }
-
- if (in_interrupt()) {
- /*
- * If at interrupt level and cannot get spinlock,
- * then do something useful by flushing own tlbflush queue
- * so as to avoid a possible deadlock.
- */
- while (!spin_trylock(&ptcParamsLock)) {
- local_irq_save(irqflags);
- sn1_received_flush_tlb();
- local_irq_restore(irqflags);
- udelay(10); /* take it easier on the bus */
- }
- } else {
- spin_lock(&ptcParamsLock);
- }
-
- if (!ptcInit) {
- int cpu;
- ptcInit = 1;
- memset(ptcParamArray, 0, sizeof(ptcParamArray));
- ptcParamsEmpty = &ptcParamArray[0];
- for (cpu=0; cpu<NR_CPUS; cpu++)
- ptcParamsNextByCpu[cpu*16] = &ptcParamArray[0];
-
-#ifdef PTCDEBUG
- memset(ptcBacklog, 0, sizeof(ptcBacklog));
- memset(ptcParamsCounts, 0, sizeof(ptcParamsCounts));
- memset(ptcParamsResults, 0, sizeof(ptcParamsResults));
-#endif /* PTCDEBUG */
- }
-
- params = (ptc_params_t *) ptcParamsEmpty;
- next = (ptc_params_t *) ptcParamsEmpty + 1;
- if (next == &ptcParamArray[NUMPTC])
- next = &ptcParamArray[0];
-
-#ifdef PTCDEBUG
- nextnext = next + 1;
- if (nextnext == &ptcParamArray[NUMPTC])
- nextnext = &ptcParamArray[0];
-
- if (ptcCountBacklog) {
- /* quick count of backlog */
- ptc_params_t *ptr;
-
- /* check the current pointer to the beginning */
- ptr = params;
- while(--ptr >= &ptcParamArray[0]) {
- if (atomic_read(&ptr->ptc.unfinished_count) == 0)
- break;
- ++backlog;
- }
-
- if (backlog) {
- /* check the end of the array */
- ptr = &ptcParamArray[NUMPTC];
- while (--ptr > params) {
- if (atomic_read(&ptr->ptc.unfinished_count) == 0)
- break;
- ++backlog;
- }
- }
- ptcBacklog[backlog]++;
- }
-#endif /* PTCDEBUG */
-
- /* wait for the next entry to clear...should be rare */
- if (atomic_read(&next->ptc.unfinished_count) > 0) {
-#ifdef PTCDEBUG
- ptcParamsAllBusy++;
-
- if (atomic_read(&nextnext->ptc.unfinished_count) == 0) {
- if (atomic_read(&next->ptc.unfinished_count) > 0) {
- panic("\nnonzero next zero nextnext %lx %lx\n",
- (long)next, (long)nextnext);
- }
- }
-#endif
-
- /* it could be this cpu that is behind */
- local_irq_save(irqflags);
- sn1_received_flush_tlb();
- local_irq_restore(irqflags);
-
- /* now we know it's not this cpu, so just wait */
- while (atomic_read(&next->ptc.unfinished_count) > 0) {
- barrier();
- }
- }
-
- params->ptc.start = start;
- params->ptc.end = end;
- params->ptc.nbits = nbits;
- params->ptc.rid = (unsigned int) ia64_get_rr(start);
- atomic_set(¶ms->ptc.unfinished_count, smp_num_cpus);
-
- /* The atomic_set above can hit memory *after* the update
- * to ptcParamsEmpty below, which opens a timing window
- * that other cpus can squeeze into!
- */
- mb();
-
- /* everything is ready to process:
- * -- global lock is held
- * -- new entry + 1 is free
- * -- new entry is set up
- * so now:
- * -- update the global next pointer
- * -- unlock the global lock
- * -- send IPI to notify other cpus
- * -- process the data ourselves
- */
- ptcParamsEmpty = next;
- spin_unlock(&ptcParamsLock);
- smp_send_flush_tlb();
-
- local_irq_save(irqflags);
- sn1_received_flush_tlb();
- local_irq_restore(irqflags);
-
- /* Currently we don't think global TLB purges need to be atomic.
- * All CPUs get sent IPIs, so if they haven't done the purge,
- * they're busy with interrupts that are at the IPI level, which is
- * priority 15. We're asserting that any code at that level
- * shouldn't be using user TLB entries. To change this to wait
- * for all the flushes to complete, enable the following code.
- */
-#if defined(SN1_SYNCHRONOUS_GLOBAL_TLB_PURGE) || defined(BUS_INT_WAR)
- /* this code is not tested */
- /* wait for the flush to complete */
- while (atomic_read(¶ms->ptc.unfinished_count) > 0)
- barrier();
-#endif
-}
-
-/**
- * sn_send_IPI_phys - send an IPI to a Nasid and slice
- * @physid: physical cpuid to receive the interrupt.
- * @vector: command to send
- * @delivery_mode: delivery mechanism
- *
- * Sends an IPI (interprocessor interrupt) to the processor specified by
- * @physid
- *
- * @delivery_mode can be one of the following
- *
- * %IA64_IPI_DM_INT - pend an interrupt
- * %IA64_IPI_DM_PMI - pend a PMI
- * %IA64_IPI_DM_NMI - pend an NMI
- * %IA64_IPI_DM_INIT - pend an INIT interrupt
- */
-void
-sn_send_IPI_phys(long physid, int vector, int delivery_mode)
-{
- long *p;
- long nasid, slice;
-
- static int off[4] = {0x1800080, 0x1800088, 0x1a00080, 0x1a00088};
-
-#ifdef BUS_INT_WAR
- if (vector != ap_wakeup_vector) {
- return;
- }
-#endif
-
- nasid = cpu_physical_id_to_nasid(physid);
- slice = cpu_physical_id_to_slice(physid);
-
- p = (long*)(0xc0000a0000000000LL | (nasid<<33) | off[slice]);
-
- mb();
- *p = (delivery_mode << 8) | (vector & 0xff);
-}
-
-
-/**
- * sn1_send_IPI - send an IPI to a processor
- * @cpuid: target of the IPI
- * @vector: command to send
- * @delivery_mode: delivery mechanism
- * @redirect: redirect the IPI?
- *
- * Sends an IPI (interprocessor interrupt) to the processor specified by
- * @cpuid. @delivery_mode can be one of the following
- *
- * %IA64_IPI_DM_INT - pend an interrupt
- * %IA64_IPI_DM_PMI - pend a PMI
- * %IA64_IPI_DM_NMI - pend an NMI
- * %IA64_IPI_DM_INIT - pend an INIT interrupt
- */
-void
-sn1_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
-{
- long physid;
-
- physid = cpu_physical_id(cpuid);
-
- sn_send_IPI_phys(physid, vector, delivery_mode);
-}
-#ifdef CONFIG_SMP
-
-#ifdef PTC_NOTYET
-static void __init
-process_sal_ptc_domain_info(ia64_sal_ptc_domain_info_t *di, int domain)
-{
- ia64_sal_ptc_domain_proc_entry_t *pe;
- int i, sapicid, cpuid;
-
- pe = __va(di->proc_list);
- for (i=0; i<di->proc_count; i++, pe++) {
- sapicid = id_eid_to_sapicid(pe->id, pe->eid);
- cpuid = cpu_logical_id(sapicid);
- sn_sapicid_info[cpuid].domain = domain;
- sn_sapicid_info[cpuid].sapicid = sapicid;
- }
-}
-
-
-static void __init
-process_sal_desc_ptc(ia64_sal_desc_ptc_t *ptc)
-{
- ia64_sal_ptc_domain_info_t *di;
- int i;
-
- di = __va(ptc->domain_info);
- for (i=0; i<ptc->num_domains; i++, di++) {
- process_sal_ptc_domain_info(di, i);
- }
-}
-#endif /* PTC_NOTYET */
-
-/**
- * init_sn1_smp_config - setup PTC domains per processor
- */
-void __init
-init_sn1_smp_config(void)
-{
- if (!ia64_ptc_domain_info) {
- printk("SMP: Can't find PTC domain info. Forcing UP mode\n");
- smp_num_cpus = 1;
- return;
- }
-
-#ifdef PTC_NOTYET
- memset (sn_sapicid_info, -1, sizeof(sn_sapicid_info));
- process_sal_desc_ptc(ia64_ptc_domain_info);
-#endif
-}
-
-#else /* CONFIG_SMP */
-
-void __init
-init_sn1_smp_config(void)
-{
-
-#ifdef PTC_NOTYET
- sn_sapicid_info[0].sapicid = hard_smp_processor_id();
-#endif
-}
-
-#endif /* CONFIG_SMP */
+++ /dev/null
-/*
- * SN1 Platform specific synergy Support
- *
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like. Any license provided herein, whether implied or
- * otherwise, applies only to this software file. Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
- * Mountain View, CA 94043, or:
- *
- * http://www.sgi.com
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/proc_fs.h>
-
-#include <asm/ptrace.h>
-#include <linux/devfs_fs_kernel.h>
-#include <asm/smp.h>
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/sn1/bedrock.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/nodepda.h>
-#include <asm/sn/sn1/synergy.h>
-#include <asm/sn/sndrv.h>
-
-int bit_pos_to_irq(int bit);
-void setclear_mask_b(int irq, int cpuid, int set);
-void setclear_mask_a(int irq, int cpuid, int set);
-void * kmalloc(size_t size, int flags);
-
-static int synergy_perf_initialized = 0;
-
-void
-synergy_intr_alloc(int bit, int cpuid) {
- return;
-}
-
-int
-synergy_intr_connect(int bit,
- int cpuid)
-{
- int irq;
- unsigned is_b;
-
- irq = bit_pos_to_irq(bit);
-
- is_b = (cpuid_to_slice(cpuid)) & 1;
- if (is_b) {
- setclear_mask_b(irq,cpuid,1);
- setclear_mask_a(irq,cpuid, 0);
- } else {
- setclear_mask_a(irq, cpuid, 1);
- setclear_mask_b(irq, cpuid, 0);
- }
- return 0;
-}
-void
-setclear_mask_a(int irq, int cpuid, int set)
-{
- int synergy;
- int nasid;
- int reg_num;
- unsigned long mask;
- unsigned long addr;
- unsigned long reg;
- unsigned long val;
- int my_cnode, my_synergy;
- int target_cnode, target_synergy;
-
- /*
- * Perform some idiot checks ..
- */
- if ( (irq < 0) || (irq > 255) ||
- (cpuid < 0) || (cpuid > 512) ) {
- printk("clear_mask_a: Invalid parameter irq %d cpuid %d\n", irq, cpuid);
- return;
- }
-
- target_cnode = cpuid_to_cnodeid(cpuid);
- target_synergy = cpuid_to_synergy(cpuid);
- my_cnode = cpuid_to_cnodeid(smp_processor_id());
- my_synergy = cpuid_to_synergy(smp_processor_id());
-
- reg_num = irq / 64;
- mask = 1;
- mask <<= (irq % 64);
- switch (reg_num) {
- case 0:
- reg = VEC_MASK0A;
- addr = VEC_MASK0A_ADDR;
- break;
- case 1:
- reg = VEC_MASK1A;
- addr = VEC_MASK1A_ADDR;
- break;
- case 2:
- reg = VEC_MASK2A;
- addr = VEC_MASK2A_ADDR;
- break;
- case 3:
- reg = VEC_MASK3A;
- addr = VEC_MASK3A_ADDR;
- break;
- default:
- reg = addr = 0;
- break;
- }
- if (my_cnode == target_cnode && my_synergy == target_synergy) {
- // local synergy
- val = READ_LOCAL_SYNERGY_REG(addr);
- if (set) {
- val |= mask;
- } else {
- val &= ~mask;
- }
- WRITE_LOCAL_SYNERGY_REG(addr, val);
- val = READ_LOCAL_SYNERGY_REG(addr);
- } else { /* remote synergy */
- synergy = cpuid_to_synergy(cpuid);
- nasid = cpuid_to_nasid(cpuid);
- val = REMOTE_SYNERGY_LOAD(nasid, synergy, reg);
- if (set) {
- val |= mask;
- } else {
- val &= ~mask;
- }
- REMOTE_SYNERGY_STORE(nasid, synergy, reg, val);
- }
-}
-
-void
-setclear_mask_b(int irq, int cpuid, int set)
-{
- int synergy;
- int nasid;
- int reg_num;
- unsigned long mask;
- unsigned long addr;
- unsigned long reg;
- unsigned long val;
- int my_cnode, my_synergy;
- int target_cnode, target_synergy;
-
- /*
- * Perform some idiot checks ..
- */
- if ( (irq < 0) || (irq > 255) ||
- (cpuid < 0) || (cpuid > 512) ) {
- printk("clear_mask_b: Invalid parameter irq %d cpuid %d\n", irq, cpuid);
- return;
- }
-
- target_cnode = cpuid_to_cnodeid(cpuid);
- target_synergy = cpuid_to_synergy(cpuid);
- my_cnode = cpuid_to_cnodeid(smp_processor_id());
- my_synergy = cpuid_to_synergy(smp_processor_id());
-
- reg_num = irq / 64;
- mask = 1;
- mask <<= (irq % 64);
- switch (reg_num) {
- case 0:
- reg = VEC_MASK0B;
- addr = VEC_MASK0B_ADDR;
- break;
- case 1:
- reg = VEC_MASK1B;
- addr = VEC_MASK1B_ADDR;
- break;
- case 2:
- reg = VEC_MASK2B;
- addr = VEC_MASK2B_ADDR;
- break;
- case 3:
- reg = VEC_MASK3B;
- addr = VEC_MASK3B_ADDR;
- break;
- default:
- reg = addr = 0;
- break;
- }
- if (my_cnode == target_cnode && my_synergy == target_synergy) {
- // local synergy
- val = READ_LOCAL_SYNERGY_REG(addr);
- if (set) {
- val |= mask;
- } else {
- val &= ~mask;
- }
- WRITE_LOCAL_SYNERGY_REG(addr, val);
- val = READ_LOCAL_SYNERGY_REG(addr);
- } else { /* remote synergy */
- synergy = cpuid_to_synergy(cpuid);
- nasid = cpuid_to_nasid(cpuid);
- val = REMOTE_SYNERGY_LOAD(nasid, synergy, reg);
- if (set) {
- val |= mask;
- } else {
- val &= ~mask;
- }
- REMOTE_SYNERGY_STORE(nasid, synergy, reg, val);
- }
-}
-
-/*
- * Synergy perf stats. Multiplexed via timer_interrupt.
- */
-
-static int
-synergy_perf_append(uint64_t modesel)
-{
- int cnode;
- nodepda_t *npdap;
- synergy_perf_t *p;
- int checked = 0;
- int err = 0;
- unsigned long flags;
-
- /* bit 45 is enable */
- modesel |= (1UL << 45);
-
- for (cnode=0; cnode < numnodes; cnode++) {
- /* for each node, insert a new synergy_perf entry */
- if ((npdap = NODEPDA(cnode)) == NULL) {
- printk("synergy_perf_append: cnode=%d NODEPDA(cnode)==NULL, nodepda=%p\n", cnode, (void *)nodepda);
- continue;
- }
-
- if (npdap->synergy_perf_enabled) {
- /* user must disable counting to append new events */
- err = -EBUSY;
- break;
- }
-
- if (!checked && npdap->synergy_perf_data != NULL) {
- checked = 1;
- for (p = npdap->synergy_perf_first; ;) {
- if (p->modesel == modesel)
- return 0; /* event already registered */
- if ((p = p->next) == npdap->synergy_perf_first)
- break;
- }
- }
-
- /* XX use kmem_alloc_node() when it is implemented */
- p = (synergy_perf_t *)kmalloc(sizeof(synergy_perf_t), GFP_KERNEL);
- if ((((uint64_t)p) & 7UL) != 0)
- BUG(); /* bad alignment */
- if (p == NULL) {
- err = -ENOMEM;
- break;
- }
- else {
- memset(p, 0, sizeof(synergy_perf_t));
- p->modesel = modesel;
-
- spin_lock_irqsave(&npdap->synergy_perf_lock, flags);
- if (npdap->synergy_perf_data == NULL) {
- /* circular list */
- p->next = p;
- npdap->synergy_perf_first = p;
- npdap->synergy_perf_data = p;
- }
- else {
- p->next = npdap->synergy_perf_data->next;
- npdap->synergy_perf_data->next = p;
- }
- spin_unlock_irqrestore(&npdap->synergy_perf_lock, flags);
- }
- }
-
- return err;
-}
-
-static void
-synergy_perf_set_freq(int freq)
-{
- int cnode;
- nodepda_t *npdap;
-
- for (cnode=0; cnode < numnodes; cnode++) {
- if ((npdap = NODEPDA(cnode)) != NULL)
- npdap->synergy_perf_freq = freq;
- }
-}
-
-static void
-synergy_perf_set_enable(int enable)
-{
- int cnode;
- nodepda_t *npdap;
-
- for (cnode=0; cnode < numnodes; cnode++) {
- if ((npdap = NODEPDA(cnode)) != NULL)
- npdap->synergy_perf_enabled = enable;
- }
- printk("NOTICE: synergy perf counting %sabled on all nodes\n", enable ? "en" : "dis");
-}
-
-static int
-synergy_perf_size(nodepda_t *npdap)
-{
- synergy_perf_t *p;
- int n;
-
- if (npdap->synergy_perf_enabled == 0) {
- /* no stats to return */
- return 0;
- }
-
- spin_lock_irq(&npdap->synergy_perf_lock);
- for (n=0, p = npdap->synergy_perf_first; p;) {
- n++;
- p = p->next;
- if (p == npdap->synergy_perf_first)
- break;
- }
- spin_unlock_irq(&npdap->synergy_perf_lock);
-
- /* bytes == n pairs of {event,counter} */
- return n * 2 * sizeof(uint64_t);
-}
-
-static int
-synergy_perf_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int cnode;
- nodepda_t *npdap;
- synergy_perf_t *p;
- int intarg;
- int fsb;
- uint64_t longarg;
- uint64_t *stats;
- int n;
- devfs_handle_t d;
- arbitrary_info_t info;
-
- if ((d = devfs_get_handle_from_inode(inode)) == NULL)
- return -ENODEV;
- info = hwgraph_fastinfo_get(d);
-
- cnode = SYNERGY_PERF_INFO_CNODE(info);
- fsb = SYNERGY_PERF_INFO_FSB(info);
- npdap = NODEPDA(cnode);
-
- switch (cmd) {
- case SNDRV_GET_SYNERGY_VERSION:
- /* return int, version of data structure for SNDRV_GET_SYNERGYINFO */
- intarg = 1; /* version 1 */
- if (copy_to_user((void *)arg, &intarg, sizeof(intarg)))
- return -EFAULT;
- break;
-
- case SNDRV_GET_INFOSIZE:
- /* return int, sizeof buf needed for SYNERGY_PERF_GET_STATS */
- intarg = synergy_perf_size(npdap);
- if (copy_to_user((void *)arg, &intarg, sizeof(intarg)))
- return -EFAULT;
- break;
-
- case SNDRV_GET_SYNERGYINFO:
- /* return array of event/value pairs, this node only */
- if ((intarg = synergy_perf_size(npdap)) <= 0)
- return -ENODATA;
- if ((stats = (uint64_t *)kmalloc(intarg, GFP_KERNEL)) == NULL)
- return -ENOMEM;
- spin_lock_irq(&npdap->synergy_perf_lock);
- for (n=0, p = npdap->synergy_perf_first; p;) {
- stats[n++] = p->modesel;
- if (p->intervals > 0)
- stats[n++] = p->counts[fsb] * p->total_intervals / p->intervals;
- else
- stats[n++] = 0;
- p = p->next;
- if (p == npdap->synergy_perf_first)
- break;
- }
- spin_unlock_irq(&npdap->synergy_perf_lock);
-
- if (copy_to_user((void *)arg, stats, intarg)) {
- kfree(stats);
- return -EFAULT;
- }
-
- kfree(stats);
- break;
-
- case SNDRV_SYNERGY_APPEND:
- /* reads 64bit event, append synergy perf event to all nodes */
- if (copy_from_user(&longarg, (void *)arg, sizeof(longarg)))
- return -EFAULT;
- return synergy_perf_append(longarg);
- break;
-
- case SNDRV_GET_SYNERGY_STATUS:
- /* return int, 1 if enabled else 0 */
- intarg = npdap->synergy_perf_enabled;
- if (copy_to_user((void *)arg, &intarg, sizeof(intarg)))
- return -EFAULT;
- break;
-
- case SNDRV_SYNERGY_ENABLE:
- /* read int, if true enable counting else disable */
- if (copy_from_user(&intarg, (void *)arg, sizeof(intarg)))
- return -EFAULT;
- synergy_perf_set_enable(intarg);
- break;
-
- case SNDRV_SYNERGY_FREQ:
- /* read int, set jiffies per update */
- if (copy_from_user(&intarg, (void *)arg, sizeof(intarg)))
- return -EFAULT;
- if (intarg < 0 || intarg >= HZ)
- return -EINVAL;
- synergy_perf_set_freq(intarg);
- break;
-
- default:
- printk("Warning: invalid ioctl %d on synergy mon for cnode=%d fsb=%d\n", cmd, cnode, fsb);
- return -EINVAL;
- }
- return(0);
-}
-
-struct file_operations synergy_mon_fops = {
- .ioctl = synergy_perf_ioctl,
-};
-
-void
-synergy_perf_update(int cpu)
-{
- nasid_t nasid;
- cnodeid_t cnode;
- struct nodepda_s *npdap;
-
- /*
- * synergy_perf_initialized is set by synergy_perf_init()
- * which is called last thing by sn_mp_setup(), i.e. well
- * after nodepda has been initialized.
- */
- if (!synergy_perf_initialized)
- return;
-
- cnode = cpuid_to_cnodeid(cpu);
- npdap = NODEPDA(cnode);
-
- if (npdap == NULL || cnode < 0 || cnode >= numnodes)
- /* this should not happen: still in early io init */
- return;
-
-#if 0
- /* use this to check nodepda initialization */
- if (((uint64_t)npdap) & 0x7) {
- printk("\nERROR on cpu %d : cnode=%d, npdap == %p, not aligned\n", cpu, cnode, npdap);
- BUG();
- }
-#endif
-
- if (npdap->synergy_perf_enabled == 0 || npdap->synergy_perf_data == NULL) {
- /* Not enabled, or no events to monitor */
- return;
- }
-
- if (npdap->synergy_inactive_intervals++ % npdap->synergy_perf_freq != 0) {
- /* don't multiplex on every timer interrupt */
- return;
- }
-
- /*
- * Read registers for last interval and increment counters.
- * Hold the per-node synergy_perf_lock so concurrent readers get
- * consistent values.
- */
- spin_lock_irq(&npdap->synergy_perf_lock);
-
- nasid = cpuid_to_nasid(cpu);
- npdap->synergy_active_intervals++;
- npdap->synergy_perf_data->intervals++;
- npdap->synergy_perf_data->total_intervals = npdap->synergy_active_intervals;
-
- npdap->synergy_perf_data->counts[0] += 0xffffffffffUL &
- REMOTE_SYNERGY_LOAD(nasid, 0, PERF_CNTR0_A);
-
- npdap->synergy_perf_data->counts[1] += 0xffffffffffUL &
- REMOTE_SYNERGY_LOAD(nasid, 1, PERF_CNTR0_B);
-
- /* skip to next in circular list */
- npdap->synergy_perf_data = npdap->synergy_perf_data->next;
-
- spin_unlock_irq(&npdap->synergy_perf_lock);
-
- /* set the counter 0 selection modes for both A and B */
- REMOTE_SYNERGY_STORE(nasid, 0, PERF_CNTL0_A, npdap->synergy_perf_data->modesel);
- REMOTE_SYNERGY_STORE(nasid, 1, PERF_CNTL0_B, npdap->synergy_perf_data->modesel);
-
- /* and reset the counter registers to zero */
- REMOTE_SYNERGY_STORE(nasid, 0, PERF_CNTR0_A, 0UL);
- REMOTE_SYNERGY_STORE(nasid, 1, PERF_CNTR0_B, 0UL);
-}
-
-void
-synergy_perf_init(void)
-{
- printk("synergy_perf_init(), counting is initially disabled\n");
- synergy_perf_initialized++;
-}
EXTRA_CFLAGS := -DLITTLE_ENDIAN
-obj-y += cache.o iomv.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o
+obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
+ prominfo_proc.o timer.o
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*
*/
sn_flush_all_caches(long flush_addr, long bytes)
{
flush_icache_range(flush_addr, flush_addr+bytes);
+ mb();
}
-
-
* we wrap the inlines from asm/ia64/sn/sn2/io.h here.
*/
-#include <linux/config.h>
-#include <linux/types.h>
-
#include <asm/sn/sn2/io.h>
-#ifdef CONFIG_IA64_GENERIC
-
unsigned int
sn_inb (unsigned long port)
{
unsigned long
sn_readq (void *addr)
{
- return __sn_readq (addr)
+ return __sn_readq (addr);
}
asm ("__sn_readw = sn_readw");
asm ("__sn_readl = sn_readl");
asm ("__sn_readq = sn_readq");
-
-#endif /* CONFIG_IA64_GENERIC */
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/pci.h>
-#include <linux/module.h>
-#include <asm/io.h>
-#include <asm/delay.h>
-#include <asm/sn/simulator.h>
-#include <asm/sn/pda.h>
-#include <asm/sn/sn_cpuid.h>
-
-/**
- * sn_io_addr - convert an in/out port to an i/o address
- * @port: port to convert
- *
- * Legacy in/out instructions are converted to ld/st instructions
- * on IA64. This routine will convert a port number into a valid
- * SN i/o address. Used by sn_in*() and sn_out*().
- */
-void *
-sn_io_addr(unsigned long port)
-{
- if (!IS_RUNNING_ON_SIMULATOR()) {
- return( (void *) (port | __IA64_UNCACHED_OFFSET));
- } else {
- unsigned long io_base;
- unsigned long addr;
-
- /*
- * word align port, but need more than 10 bits
- * for accessing registers in bedrock local block
- * (so we don't do port&0xfff)
- */
- if ((port >= 0x1f0 && port <= 0x1f7) ||
- port == 0x3f6 || port == 0x3f7) {
- io_base = (0xc000000fcc000000 | ((unsigned long)get_nasid() << 38));
- addr = io_base | ((port >> 2) << 12) | (port & 0xfff);
- } else {
- addr = __ia64_get_io_port_base() | ((port >> 2) << 2);
- }
- return(void *) addr;
- }
-}
-
-EXPORT_SYMBOL(sn_io_addr);
-
-/**
- * sn_mmiob - I/O space memory barrier
- *
- * Acts as a memory mapped I/O barrier for platforms that queue writes to
- * I/O space. This ensures that subsequent writes to I/O space arrive after
- * all previous writes. For most ia64 platforms, this is a simple
- * 'mf.a' instruction. For other platforms, mmiob() may have to read
- * a chipset register to ensure ordering.
- *
- * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
- * See PV 871084 for details about the WAR about zero value.
- *
- */
-void
-sn_mmiob (void)
-{
- while ((((volatile unsigned long) (*pda.pio_write_status_addr)) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
- SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK)
- udelay(1);
-}
--- /dev/null
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * Module to export the system's Firmware Interface Tables, including
+ * PROM revision numbers, in /proc
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <asm/io.h>
+#include <asm/sn/simulator.h>
+
+/* to lookup nasids */
+#include <asm/sn/sn_cpuid.h>
+
+MODULE_DESCRIPTION("PROM version reporting for /proc");
+MODULE_AUTHOR("Chad Talbott");
+MODULE_LICENSE("GPL");
+
+#undef DEBUG_PROMINFO
+
+#define TRACE_PROMINFO
+
+#if defined(DEBUG_PROMINFO)
+# define DPRINTK(x...) printk(KERN_DEBUG x)
+#else
+# define DPRINTK(x...)
+#endif
+
+#if defined(TRACE_PROMINFO) && defined(DEBUG_PROMINFO)
+# if defined(__GNUC__)
+# define TRACE() printk(KERN_DEBUG "%s:%d:%s\n", \
+ __FILE__, __LINE__, __FUNCTION__)
+# else
+# define TRACE() printk(KERN_DEBUG "%s:%d\n", __LINE__, __FILE__)
+# endif
+#else
+# define TRACE()
+#endif
+
+/* Sub-regions determined by bits in Node Offset */
+#define LB_PROM_SPACE 0x0000000700000000ul /* Local LB PROM */
+
+#define FIT_SIGNATURE 0x2020205f5449465ful
+/* Standard Intel FIT entry types */
+#define FIT_ENTRY_FIT_HEADER 0x00 /* FIT header entry */
+#define FIT_ENTRY_PAL_B 0x01 /* PAL_B entry */
+/* Entries 0x02 through 0x0D reserved by Intel */
+#define FIT_ENTRY_PAL_A_PROC 0x0E /* Processor-specific PAL_A entry */
+#define FIT_ENTRY_PAL_A 0x0F /* PAL_A entry, same as... */
+#define FIT_ENTRY_PAL_A_GEN 0x0F /* ...Generic PAL_A entry */
+#define FIT_ENTRY_UNUSED 0x7F /* Unused (reserved by Intel?) */
+/* OEM-defined entries range from 0x10 to 0x7E. */
+#define FIT_ENTRY_SAL_A 0x10 /* SAL_A entry */
+#define FIT_ENTRY_SAL_B 0x11 /* SAL_B entry */
+#define FIT_ENTRY_SALRUNTIME 0x12 /* SAL runtime entry */
+#define FIT_ENTRY_EFI 0x1F /* EFI entry */
+#define FIT_ENTRY_FPSWA 0x20 /* embedded fpswa entry */
+#define FIT_ENTRY_VMLINUX 0x21 /* embedded vmlinux entry */
+
+#define FIT_MAJOR_SHIFT (32 + 8)
+#define FIT_MAJOR_MASK ((1 << 8) - 1)
+#define FIT_MINOR_SHIFT 32
+#define FIT_MINOR_MASK ((1 << 8) - 1)
+
+#define FIT_MAJOR(q) \
+ ((unsigned) ((q) >> FIT_MAJOR_SHIFT) & FIT_MAJOR_MASK)
+#define FIT_MINOR(q) \
+ ((unsigned) ((q) >> FIT_MINOR_SHIFT) & FIT_MINOR_MASK)
+
+#define FIT_TYPE_SHIFT (32 + 16)
+#define FIT_TYPE_MASK ((1 << 7) - 1)
+
+#define FIT_TYPE(q) \
+ ((unsigned) ((q) >> FIT_TYPE_SHIFT) & FIT_TYPE_MASK)
+
+#define FIT_ENTRY(type, maj, min, size) \
+ ((((unsigned long)(maj) & FIT_MAJOR_MASK) << FIT_MAJOR_SHIFT) | \
+ (((unsigned long)(min) & FIT_MINOR_MASK) << FIT_MINOR_SHIFT) | \
+ (((unsigned long)(type) & FIT_TYPE_MASK) << FIT_TYPE_SHIFT) | \
+ (size))
+
+struct fit_type_map_t {
+ unsigned char type;
+ const char *name;
+};
+
+static const struct fit_type_map_t fit_entry_types[] = {
+ { FIT_ENTRY_FIT_HEADER, "FIT Header" },
+ { FIT_ENTRY_PAL_A_GEN, "Generic PAL_A" },
+ { FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A" },
+ { FIT_ENTRY_PAL_A, "PAL_A" },
+ { FIT_ENTRY_PAL_B, "PAL_B" },
+ { FIT_ENTRY_SAL_A, "SAL_A" },
+ { FIT_ENTRY_SAL_B, "SAL_B" },
+ { FIT_ENTRY_SALRUNTIME, "SAL runtime" },
+ { FIT_ENTRY_EFI, "EFI" },
+ { FIT_ENTRY_VMLINUX, "Embedded Linux" },
+ { FIT_ENTRY_FPSWA, "Embedded FPSWA" },
+ { FIT_ENTRY_UNUSED, "Unused" },
+ { 0xff, "Error" },
+};
+
+static const char *
+fit_type_name(unsigned char type)
+{
+ struct fit_type_map_t const*mapp;
+
+ for (mapp = fit_entry_types; mapp->type != 0xff; mapp++)
+ if (type == mapp->type)
+ return mapp->name;
+
+ if ((type > FIT_ENTRY_PAL_A) && (type < FIT_ENTRY_UNUSED))
+ return "OEM type";
+ if ((type > FIT_ENTRY_PAL_B) && (type < FIT_ENTRY_PAL_A))
+ return "Reserved";
+
+ return "Unknown type";
+}
+
+/* These two routines read the FIT table directly from the FLASH PROM
+ * on a specific node. The PROM can only be accessed using aligned 64
+ * bit reads, so we do that and then shift and mask the result to get
+ * at each field.
+ */
+static int
+dump_fit_entry(char *page, unsigned long *fentry)
+{
+ unsigned long q1, q2;
+ unsigned type;
+
+ TRACE();
+
+ q1 = readq(fentry);
+ q2 = readq(fentry + 1);
+ type = FIT_TYPE(q2);
+ return sprintf(page, "%02x %-25s %x.%02x %016lx %u\n",
+ type,
+ fit_type_name(type),
+ FIT_MAJOR(q2), FIT_MINOR(q2),
+ q1,
+ /* mult by sixteen to get size in bytes */
+ (unsigned)q2 * 16);
+}
+
+/* We assume that the fit table will be small enough that we can print
+ * the whole thing into one page. (This is true for our default 16kB
+ * pages -- each entry is about 60 chars wide when printed.) I read
+ * somewhere that the maximum size of the FIT is 128 entries, so we're
+ * OK except for 4kB pages (and no one is going to do that on SN
+ * anyway).
+ */
+static int
+dump_fit(char *page, unsigned long *fit)
+{
+ unsigned long qw;
+ int nentries;
+ int fentry;
+ char *p;
+
+ TRACE();
+
+ DPRINTK("dumping fit from %p\n", (void *)fit);
+
+ qw = readq(fit);
+ DPRINTK("FIT signature: %016lx (%.8s)\n", qw, (char *)&qw);
+ if (qw != FIT_SIGNATURE)
+ printk(KERN_WARNING "Unrecognized FIT signature");
+
+ qw = readq(fit + 1);
+ nentries = (unsigned)qw;
+ DPRINTK("number of fit entries: %u\n", nentries);
+ /* check that we won't overflow the page -- see comment above */
+ BUG_ON(nentries * 60 > PAGE_SIZE);
+
+ p = page;
+ for (fentry = 0; fentry < nentries; fentry++)
+ /* each FIT entry is two 64 bit words */
+ p += dump_fit_entry(p, fit + 2 * fentry);
+
+ return p - page;
+}
+
+static int
+dump_version(char *page, unsigned long *fit)
+{
+ int nentries;
+ int fentry;
+ unsigned long qw;
+
+ TRACE();
+
+ nentries = (unsigned)readq(fit + 1);
+ BUG_ON(nentries * 60 > PAGE_SIZE);
+
+ for (fentry = 0; fentry < nentries; fentry++) {
+ qw = readq(fit + 2 * fentry + 1);
+ if (FIT_TYPE(qw) == FIT_ENTRY_SAL_A)
+ return sprintf(page, "%x.%02x\n",
+ FIT_MAJOR(qw), FIT_MINOR(qw));
+ }
+ return 0;
+}
+
+/* same as in proc_misc.c */
+static int
+proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof,
+ int len)
+{
+ if (len <= off+count) *eof = 1;
+ *start = page + off;
+ len -= off;
+ if (len>count) len = count;
+ if (len<0) len = 0;
+ return len;
+}
+
+static int
+read_version_entry(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ int len = 0;
+
+ MOD_INC_USE_COUNT;
+ /* data holds the pointer to this node's FIT */
+ len = dump_version(page, (unsigned long *)data);
+ len = proc_calc_metrics(page, start, off, count, eof, len);
+ MOD_DEC_USE_COUNT;
+ return len;
+}
+
+static int
+read_fit_entry(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ int len = 0;
+
+ MOD_INC_USE_COUNT;
+ /* data holds the pointer to this node's FIT */
+ len = dump_fit(page, (unsigned long *)data);
+ len = proc_calc_metrics(page, start, off, count, eof, len);
+ MOD_DEC_USE_COUNT;
+
+ return len;
+}
+
+/* this is a fake FIT that's used on the medusa simulator which
+ * doesn't usually run a complete PROM.
+ */
+#ifdef CONFIG_IA64_SGI_SN_SIM
+static unsigned long fakefit[] = {
+ /* this is all we need to satisfy the code below */
+ FIT_SIGNATURE,
+ FIT_ENTRY(FIT_ENTRY_FIT_HEADER, 0x02, 0x60, 2),
+ /* dump something arbitrary for
+ * /proc/sgi_prominfo/nodeX/version */
+ 0xbadbeef00fa3ef17ul,
+ FIT_ENTRY(FIT_ENTRY_SAL_A, 0, 0x99, 0x100)
+};
+#endif
+
+static unsigned long *
+lookup_fit(int nasid)
+{
+ unsigned long *fitp;
+ unsigned long fit_paddr;
+ unsigned long *fit_vaddr;
+
+#ifdef CONFIG_IA64_SGI_SN_SIM
+ if (IS_RUNNING_ON_SIMULATOR())
+ return fakefit;
+#endif
+
+ fitp = (void *)GLOBAL_MMR_ADDR(nasid, LB_PROM_SPACE - 32);
+ DPRINTK("pointer to fit at %p\n", (void *)fitp);
+ fit_paddr = readq(fitp);
+ DPRINTK("fit pointer contains %lx\n", fit_paddr);
+ /* snag just the node-relative offset */
+ fit_paddr &= ~0ul >> (63-35);
+ /* the pointer to the FIT is relative to IA-64 compatibility
+ * space. However, the PROM is mapped at a different offset
+ * in MMR space (both local and global)
+ */
+ fit_paddr += 0x700000000;
+ fit_vaddr = (void *)GLOBAL_MMR_ADDR(nasid, fit_paddr);
+ DPRINTK("fit at %p\n", (void *)fit_vaddr);
+ return fit_vaddr;
+}
+
+/* module entry points */
+int __init prominfo_init(void);
+void __exit prominfo_exit(void);
+
+module_init(prominfo_init);
+module_exit(prominfo_exit);
+
+static struct proc_dir_entry **proc_entries;
+static struct proc_dir_entry *sgi_prominfo_entry;
+
+#define NODE_NAME_LEN 11
+
+int __init
+prominfo_init(void)
+{
+ struct proc_dir_entry **entp;
+ cnodeid_t cnodeid;
+ nasid_t nasid;
+ char name[NODE_NAME_LEN];
+
+ TRACE();
+
+ DPRINTK("running on cpu %d\n", smp_processor_id());
+ DPRINTK("numnodes %d\n", numnodes);
+
+ proc_entries = kmalloc(numnodes * sizeof(struct proc_dir_entry *),
+ GFP_KERNEL);
+
+ sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);
+
+ for (cnodeid = 0, entp = proc_entries;
+ cnodeid < numnodes;
+ cnodeid++, entp++) {
+ sprintf(name, "node%d", cnodeid);
+ *entp = proc_mkdir(name, sgi_prominfo_entry);
+ nasid = cnodeid_to_nasid(cnodeid);
+ create_proc_read_entry(
+ "fit", 0, *entp, read_fit_entry,
+ lookup_fit(nasid));
+ create_proc_read_entry(
+ "version", 0, *entp, read_version_entry,
+ lookup_fit(nasid));
+ }
+
+ return 0;
+}
+
+void __exit
+prominfo_exit(void)
+{
+ struct proc_dir_entry **entp;
+ unsigned cnodeid;
+ char name[NODE_NAME_LEN];
+
+ TRACE();
+
+ for (cnodeid = 0, entp = proc_entries;
+ cnodeid < numnodes;
+ cnodeid++, entp++) {
+ remove_proc_entry("fit", *entp);
+ remove_proc_entry("version", *entp);
+ sprintf(name, "node%d", cnodeid);
+ remove_proc_entry(name, sgi_prominfo_entry);
+ }
+ remove_proc_entry("sgi_prominfo", NULL);
+ kfree(proc_entries);
+}
/*
* SN2 Platform specific SMP Support
*
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
return ws;
}
-#ifdef PTCG_WAR
-/*
- * The following structure is used to pass params thru smp_call_function
- * to other cpus for flushing TLB ranges.
- */
-typedef struct {
- unsigned long start;
- unsigned long end;
- unsigned long nbits;
- unsigned int rid;
- atomic_t unfinished_count;
- char fill[96];
-} ptc_params_t;
-
-#define NUMPTC 512
-
-static ptc_params_t ptcParamArray[NUMPTC] __attribute__((__aligned__(128)));
-
-/* use separate cache lines on ptcParamsNextByCpu to avoid false sharing */
-static ptc_params_t *ptcParamsNextByCpu[NR_CPUS*16] __attribute__((__aligned__(128)));
-static volatile ptc_params_t *ptcParamsEmpty __cacheline_aligned;
-
-/*REFERENCED*/
-static spinlock_t ptcParamsLock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
-
-static int ptcInit = 0;
-#ifdef PTCDEBUG
-static int ptcParamsAllBusy = 0; /* debugging/statistics */
-static int ptcCountBacklog = 0;
-static int ptcBacklog[NUMPTC+1];
-static char ptcParamsCounts[NR_CPUS][NUMPTC] __attribute__((__aligned__(128)));
-static char ptcParamsResults[NR_CPUS][NUMPTC] __attribute__((__aligned__(128)));
-#endif
-
-/*
- * Make smp_send_flush_tlbsmp_send_flush_tlb() a weak reference,
- * so that we get a clean compile with the ia64 patch without the
- * actual SN1 specific code in arch/ia64/kernel/smp.c.
- */
-extern void smp_send_flush_tlb (void) __attribute((weak));
-
-
-/**
- * sn1_ptc_l_range - purge local translation cache
- * @start: start of virtual address range
- * @end: end of virtual address range
- * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
- *
- * Purges the range specified from the local processor's translation cache
- * (as opposed to the translation registers). Note that more than the specified
- * range *may* be cleared from the cache by some processors.
- *
- * This is probably not good enough, but I don't want to try to make it better
- * until I get some statistics on a running system. At a minimum, we should only
- * send IPIs to 1 processor in each TLB domain & have it issue a ptc.g on it's
- * own FSB. Also, we only have to serialize per FSB, not globally.
- *
- * More likely, we will have to do some work to reduce the frequency of calls to
- * this routine.
- */
-static inline void
-sn1_ptc_l_range(unsigned long start, unsigned long end, unsigned long nbits)
-{
- do {
- __asm__ __volatile__ ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
- start += (1UL << nbits);
- } while (start < end);
- ia64_srlz_d();
-}
-
-/**
- * sn1_received_flush_tlb - cpu tlb flush routine
- *
- * Flushes the TLB of a given processor.
- */
-void
-sn1_received_flush_tlb(void)
-{
- unsigned long start, end, nbits;
- unsigned int rid, saved_rid;
- int cpu = smp_processor_id();
- int result;
- ptc_params_t *ptcParams;
-
- ptcParams = ptcParamsNextByCpu[cpu*16];
- if (ptcParams == ptcParamsEmpty)
- return;
-
- do {
- start = ptcParams->start;
- saved_rid = (unsigned int) ia64_get_rr(start);
- end = ptcParams->end;
- nbits = ptcParams->nbits;
- rid = ptcParams->rid;
-
- if (saved_rid != rid) {
- ia64_set_rr(start, (unsigned long)rid);
- ia64_srlz_d();
- }
-
- sn1_ptc_l_range(start, end, nbits);
-
- if (saved_rid != rid)
- ia64_set_rr(start, (unsigned long)saved_rid);
-
- ia64_srlz_i();
-
- result = atomic_dec(&ptcParams->unfinished_count);
-#ifdef PTCDEBUG
- {
- int i = ptcParams-&ptcParamArray[0];
- ptcParamsResults[cpu][i] = (char) result;
- ptcParamsCounts[cpu][i]++;
- }
-#endif /* PTCDEBUG */
-
- if (++ptcParams == &ptcParamArray[NUMPTC])
- ptcParams = &ptcParamArray[0];
-
- } while (ptcParams != ptcParamsEmpty);
-
- ptcParamsNextByCpu[cpu*16] = ptcParams;
-}
-
-/**
- * sn1_global_tlb_purge - flush a translation cache range on all processors
- * @start: start of virtual address range to flush
- * @end: end of virtual address range
- * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
- *
- * Flushes the translation cache of all processors from @start to @end.
- */
-void
-sn1_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
-{
- ptc_params_t *params;
- ptc_params_t *next;
- unsigned long irqflags;
-#ifdef PTCDEBUG
- ptc_params_t *nextnext;
- int backlog = 0;
-#endif
-
- if (smp_num_cpus == 1) {
- sn1_ptc_l_range(start, end, nbits);
- return;
- }
-
- if (in_interrupt()) {
- /*
- * If at interrupt level and cannot get spinlock,
- * then do something useful by flushing own tlbflush queue
- * so as to avoid a possible deadlock.
- */
- while (!spin_trylock(&ptcParamsLock)) {
- local_irq_save(irqflags);
- sn1_received_flush_tlb();
- local_irq_restore(irqflags);
- udelay(10); /* take it easier on the bus */
- }
- } else {
- spin_lock(&ptcParamsLock);
- }
-
- if (!ptcInit) {
- int cpu;
- ptcInit = 1;
- memset(ptcParamArray, 0, sizeof(ptcParamArray));
- ptcParamsEmpty = &ptcParamArray[0];
- for (cpu=0; cpu<NR_CPUS; cpu++)
- ptcParamsNextByCpu[cpu*16] = &ptcParamArray[0];
-
-#ifdef PTCDEBUG
- memset(ptcBacklog, 0, sizeof(ptcBacklog));
- memset(ptcParamsCounts, 0, sizeof(ptcParamsCounts));
- memset(ptcParamsResults, 0, sizeof(ptcParamsResults));
-#endif /* PTCDEBUG */
- }
-
- params = (ptc_params_t *) ptcParamsEmpty;
- next = (ptc_params_t *) ptcParamsEmpty + 1;
- if (next == &ptcParamArray[NUMPTC])
- next = &ptcParamArray[0];
-
-#ifdef PTCDEBUG
- nextnext = next + 1;
- if (nextnext == &ptcParamArray[NUMPTC])
- nextnext = &ptcParamArray[0];
-
- if (ptcCountBacklog) {
- /* quick count of backlog */
- ptc_params_t *ptr;
-
- /* check the current pointer to the beginning */
- ptr = params;
- while(--ptr >= &ptcParamArray[0]) {
- if (atomic_read(&ptr->unfinished_count) == 0)
- break;
- ++backlog;
- }
-
- if (backlog) {
- /* check the end of the array */
- ptr = &ptcParamArray[NUMPTC];
- while (--ptr > params) {
- if (atomic_read(&ptr->unfinished_count) == 0)
- break;
- ++backlog;
- }
- }
- ptcBacklog[backlog]++;
- }
-#endif /* PTCDEBUG */
-
- /* wait for the next entry to clear...should be rare */
- if (atomic_read(&next->unfinished_count) > 0) {
-#ifdef PTCDEBUG
- ptcParamsAllBusy++;
-
- if (atomic_read(&nextnext->unfinished_count) == 0) {
- if (atomic_read(&next->unfinished_count) > 0) {
- panic("\nnonzero next zero nextnext %lx %lx\n",
- (long)next, (long)nextnext);
- }
- }
-#endif
-
- /* it could be this cpu that is behind */
- local_irq_save(irqflags);
- sn1_received_flush_tlb();
- local_irq_restore(irqflags);
-
- /* now we know it's not this cpu, so just wait */
- while (atomic_read(&next->unfinished_count) > 0) {
- barrier();
- }
- }
-
- params->start = start;
- params->end = end;
- params->nbits = nbits;
- params->rid = (unsigned int) ia64_get_rr(start);
- atomic_set(¶ms->unfinished_count, smp_num_cpus);
-
- /* The atomic_set above can hit memory *after* the update
- * to ptcParamsEmpty below, which opens a timing window
- * that other cpus can squeeze into!
- */
- mb();
-
- /* everything is ready to process:
- * -- global lock is held
- * -- new entry + 1 is free
- * -- new entry is set up
- * so now:
- * -- update the global next pointer
- * -- unlock the global lock
- * -- send IPI to notify other cpus
- * -- process the data ourselves
- */
- ptcParamsEmpty = next;
- spin_unlock(&ptcParamsLock);
- smp_send_flush_tlb();
-
- local_irq_save(irqflags);
- sn1_received_flush_tlb();
- local_irq_restore(irqflags);
-
- /*
- * Since IPIs are polled event (for now), we need to wait til the
- * TLB flush has started.
- * wait for the flush to complete
- */
- while (atomic_read(¶ms->unfinished_count) > 0)
- barrier();
-}
-
-#endif /* PTCG_WAR */
/**
void
sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
{
- int cnode, mycnode, nasid;
+ int cnode, mycnode, nasid, flushed=0;
volatile unsigned long *ptc0, *ptc1;
unsigned long flags=0, data0, data1;
- /*
- * Special case 1 cpu & 1 node. Use local purges.
- */
-#ifdef PTCG_WAR
- sn1_global_tlb_purge(start, end, nbits);
- return;
-#endif /* PTCG_WAR */
-
data0 = (1UL<<SH_PTC_0_A_SHFT) |
(nbits<<SH_PTC_0_PS_SHFT) |
((ia64_get_rr(start)>>8)<<SH_PTC_0_RID_SHFT) |
ptc0 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
ptc1 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
- mycnode = local_nodeid;
-
- /*
- * For now, we don't want to spin uninterruptibly waiting
- * for the lock. Makes hangs hard to debug.
- */
- local_irq_save(flags);
- while (!spin_trylock(&sn2_global_ptc_lock)) {
- local_irq_restore(flags);
- udelay(1);
- local_irq_save(flags);
- }
+ mycnode = numa_node_id();
+
+ spin_lock_irqsave(&sn2_global_ptc_lock, flags);
do {
data1 = start | (1UL<<SH_PTC_1_START_SHFT);
ptc0 = CHANGE_NASID(nasid, ptc0);
ptc1 = CHANGE_NASID(nasid, ptc1);
pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
+ flushed = 1;
}
}
- if (wait_piowc() & SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_MASK)
+ if (flushed && (wait_piowc() & SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_MASK)) {
sn2_ptc_deadlock_recovery(data0, data1);
+ }
start += (1UL << nbits);
ptc1 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
piows = (long*)pda->pio_write_status_addr;
- mycnode = local_nodeid;
+ mycnode = numa_node_id();
for (cnode = 0; cnode < numnodes; cnode++) {
if (is_headless_node(cnode) || cnode == mycnode)
void
sn_send_IPI_phys(long physid, int vector, int delivery_mode)
{
- long nasid, slice;
- long val;
+ long nasid, slice, val;
+ unsigned long flags=0;
volatile long *p;
-#ifdef BUS_INT_WAR
- if (vector != ap_wakeup_vector && delivery_mode == IA64_IPI_DM_INT) {
- return;
- }
-#endif
-
nasid = cpu_physical_id_to_nasid(physid);
slice = cpu_physical_id_to_slice(physid);
(0x000feeUL<<SH_IPI_INT_BASE_SHFT);
mb();
+ if (enable_shub_wars_1_1() ) {
+ spin_lock_irqsave(&sn2_global_ptc_lock, flags);
+ }
pio_phys_write_mmr(p, val);
+ if (enable_shub_wars_1_1() ) {
+ wait_piowc();
+ spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
+ }
-#ifndef CONFIG_SHUB_1_0_SPECIFIC
- /* doesn't work on shub 1.0 */
- wait_piowc();
-#endif
}
/**
sn_send_IPI_phys(physid, vector, delivery_mode);
}
-
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
#include <linux/config.h>
+#include <asm/uaccess.h>
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
return sprintf(page, "%d\n", sn_local_partid());
}
-struct proc_dir_entry * sgi_proc_dir = NULL;
+static struct proc_dir_entry * sgi_proc_dir;
void
register_sn_partition_id(void) {
entry->write_proc = sn_force_interrupt_write_proc;
}
}
+
+extern int sn_linkstats_get(char *);
+extern int sn_linkstats_reset(unsigned long);
+
+static int
+sn_linkstats_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data) {
+
+ return sn_linkstats_get(page);
+}
+
+static int
+sn_linkstats_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char s[64];
+ unsigned long msecs;
+ int e = count;
+
+ if (copy_from_user(s, buffer, count < sizeof(s) ? count : sizeof(s)))
+ e = -EFAULT;
+ else {
+ if (sscanf(s, "%lu", &msecs) != 1 || msecs < 5)
+ /* at least 5 milliseconds between updates */
+ e = -EINVAL;
+ else
+ sn_linkstats_reset(msecs);
+ }
+
+ return e;
+}
+
+void
+register_sn_linkstats(void) {
+ struct proc_dir_entry *entry;
+
+ if (!sgi_proc_dir) {
+ sgi_proc_dir = proc_mkdir("sgi_sn", 0);
+ }
+ entry = create_proc_entry("linkstats", 0444, sgi_proc_dir);
+ if (entry) {
+ entry->nlink = 1;
+ entry->data = 0;
+ entry->read_proc = sn_linkstats_read_proc;
+ entry->write_proc = sn_linkstats_write_proc;
+ }
+}
+
void
register_sn_procfs(void) {
register_sn_partition_id();
register_sn_serial_numbers();
register_sn_force_interrupt();
+ register_sn_linkstats();
}
#endif /* CONFIG_PROC_FS */
--- /dev/null
+/*
+ * linux/arch/ia64/sn/kernel/sn2/timer.c
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc.
+ * Copyright (C) 2003 Hewlett-Packard Co
+ * David Mosberger <davidm@hpl.hp.com>: updated for new timer-interpolation infrastructure
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+
+#include <asm/hw_irq.h>
+#include <asm/system.h>
+
+#include <asm/sn/leds.h>
+#include <asm/sn/clksupport.h>
+
+
+extern unsigned long sn_rtc_cycles_per_second;
+static volatile unsigned long last_wall_rtc;
+
+static unsigned long rtc_offset; /* updated only when xtime write-lock is held! */
+static long rtc_nsecs_per_cycle;
+static long rtc_per_timer_tick;
+
+static unsigned long
+getoffset(void)
+{
+ return rtc_offset + (GET_RTC_COUNTER() - last_wall_rtc)*rtc_nsecs_per_cycle;
+}
+
+
+static void
+update(long delta_nsec)
+{
+ unsigned long rtc_counter = GET_RTC_COUNTER();
+ unsigned long offset = rtc_offset + (rtc_counter - last_wall_rtc)*rtc_nsecs_per_cycle;
+
+ /* Be careful about signed/unsigned comparisons here: */
+ if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
+ rtc_offset = offset - delta_nsec;
+ else
+ rtc_offset = 0;
+ last_wall_rtc = rtc_counter;
+}
+
+
+static void
+reset(void)
+{
+ rtc_offset = 0;
+ last_wall_rtc = GET_RTC_COUNTER();
+}
+
+
+static struct time_interpolator sn2_interpolator = {
+ .get_offset = getoffset,
+ .update = update,
+ .reset = reset
+};
+
+void __init
+sn_timer_init(void)
+{
+ sn2_interpolator.frequency = sn_rtc_cycles_per_second;
+ sn2_interpolator.drift = -1; /* unknown */
+ register_time_interpolator(&sn2_interpolator);
+
+ rtc_per_timer_tick = sn_rtc_cycles_per_second / HZ;
+ rtc_nsecs_per_cycle = 1000000000 / sn_rtc_cycles_per_second;
+
+ last_wall_rtc = GET_RTC_COUNTER();
+}
+++ /dev/null
-/*
- * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like. Any license provided herein, whether implied or
- * otherwise, applies only to this software file. Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
- * Mountain View, CA 94043, or:
- *
- * http://www.sgi.com
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
- */
-
-#include <linux/config.h>
-#ifdef CONFIG_IA64_SGI_AUTOTEST
-
-// Testing only.
-// Routine will cause MCAs
-// zzzmca(n)
-// n=0 MCA via duplicate TLB dropin
-// n=1 MCA via read of garbage address
-// n=2 MCA via lfetch read of garbage address
-//
-
-#define ITIR(key, ps) ((key<<8) | (ps<<2))
-#define TLB_PAGESIZE 28 // Use 256MB pages for now.
-
- .global zzzmca
- .proc zzzmca
-zzzmca:
- alloc loc4 = ar.pfs,2,8,1,0;;
- cmp.ne p6,p0=r32,r0;;
- movl r2=0x2dead
- movl r3=0x3dead
- movl r15=0x15dead
- movl r16=0x16dead
- movl r31=0x31dead
- movl loc0=0x34beef
- movl loc1=0x35beef
- movl loc2=0x36beef
- movl loc3=0x37beef
- movl out0=0x42beef
-
- movl r20=0x32feed;;
- mov ar32=r20
- movl r20=0x36feed;;
- mov ar36=r20
- movl r20=0x65feed;;
- mov ar65=r20
- movl r20=0x66feed;;
- mov ar66=r20
-
-(p6) br.cond.sptk 1f
-
- rsm 0x2000;;
- srlz.d;
- mov r11 = 5
- mov r3 = ITIR(0,TLB_PAGESIZE);;
- mov cr.itir = r3
- mov r10 = 0;;
- itr.d dtr[r11] = r10;;
- mov r11 = 6
-
- itr.d dtr[r11] = r10;;
- br 9f
-
-1:
- cmp.eq p6,p7=1,r32
-#ifdef CONFIG_IA64_SGI_SN1
- movl r8=0xe00000fe00000048;;
-#else
- movl r8=0xe0007fb000000048;;
-#endif
- (p6) ld8 r9=[r8]
- (p7) lfetch.fault.nt2 [r8]
- ;;
- mf
- ;;
- mf.a
- ;;
- srlz.d
-
-9: mov ar.pfs=loc4
- br.ret.sptk rp
-
- .endp zzzmca
-
-#endif
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/machvec.h>
#include <asm/sn/intr.h>
-#include <asm/sn/arch.h>
#include <linux/mm.h>
-#include <linux/devfs_fs_kernel.h>
-extern devfs_handle_t base_io_scsi_ctlr_vhdl[];
+#include <asm/sn/sgi.h>
+extern vertex_hdl_t base_io_scsi_ctlr_vhdl[];
#include <asm/sn/types.h>
extern cnodeid_t master_node_get(devfs_handle_t vhdl);
#include <asm/sn/arch.h>
EXPORT_SYMBOL(base_io_scsi_ctlr_vhdl);
EXPORT_SYMBOL(master_node_get);
-
-/*
- * symbols referenced by the PCIBA module
- */
-#include <asm/sn/invent.h>
-#include <asm/sn/hack.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/pci/pciio.h>
-
-devfs_handle_t
-devfn_to_vertex(unsigned char busnum, unsigned int devfn);
-EXPORT_SYMBOL(devfn_to_vertex);
-EXPORT_SYMBOL(hwgraph_vertex_unref);
-EXPORT_SYMBOL(pciio_config_get);
-EXPORT_SYMBOL(pciio_info_slot_get);
-EXPORT_SYMBOL(hwgraph_edge_add);
-EXPORT_SYMBOL(pciio_info_master_get);
-EXPORT_SYMBOL(pciio_info_get);
-
#ifdef CONFIG_IA64_SGI_SN_DEBUG
EXPORT_SYMBOL(__pa_debug);
EXPORT_SYMBOL(__va_debug);
EXPORT_SYMBOL(sn_send_IPI_phys);
/* symbols referenced by partitioning modules */
-#include <asm/sn/bte_copy.h>
+#include <asm/sn/bte.h>
+EXPORT_SYMBOL(bte_copy);
EXPORT_SYMBOL(bte_unaligned_copy);
#include <asm/sal.h>
EXPORT_SYMBOL(ia64_sal);
+EXPORT_SYMBOL(physical_node_map);
-#ifdef CONFIG_IA64_SGI_SN2
#include <asm/sn/sn_sal.h>
EXPORT_SYMBOL(sal_lock);
EXPORT_SYMBOL(sn_partid);
EXPORT_SYMBOL(sn_local_partid);
EXPORT_SYMBOL(sn_system_serial_number_string);
EXPORT_SYMBOL(sn_partition_serial_number);
-#endif
+
+EXPORT_SYMBOL(sn_mmiob);
/* added by tduffy 04.08.01 to fix depmod issues */
#include <linux/mmzone.h>
-#ifdef BUS_INT_WAR
-extern void sn_add_polled_interrupt(int, int);
-extern void sn_delete_polled_interrupt(int);
-EXPORT_SYMBOL(sn_add_polled_interrupt);
-EXPORT_SYMBOL(sn_delete_polled_interrupt);
-#endif
-
extern nasid_t master_nasid;
EXPORT_SYMBOL(master_nasid);
+
+EXPORT_SYMBOL(sn_flush_all_caches);
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This implemenation of synchronization variables is heavily based on
* one done by Steve Lord <lord@sgi.com>
+++ /dev/null
-CFLAGS = -g -O2 -Wall $(CPPFLAGS)
-
-TARGET = include/asm-ia64/offsets.h
-
-src = $(obj)
-
-clean-files := print_offsets.s print_offsets offsets.h
-
-$(TARGET): $(obj)/offsets.h
- @if ! cmp -s $(obj)/offsets.h ${TARGET}; then \
- echo -e "*** Updating ${TARGET}..."; \
- cp $(obj)/offsets.h ${TARGET}; \
- else \
- echo "*** ${TARGET} is up to date"; \
- fi
-
-#
-# If we're cross-compiling, we use the cross-compiler to translate
-# print_offsets.c into an assembly file and then awk to translate this
-# file into offsets.h. This avoids having to use a simulator to
-# generate this file. This is based on an idea suggested by Asit
-# Mallick. If we're running natively, we can of course just build
-# print_offsets and run it. --davidm
-#
-
-ifeq ($(CROSS_COMPILE),)
-
-$(obj)/offsets.h: $(obj)/print_offsets
- $(obj)/print_offsets > $(obj)/offsets.h
-
-comma := ,
-
-$(obj)/print_offsets: $(src)/print_offsets.c FORCE
- [ -r $(TARGET) ] || echo "#define IA64_TASK_SIZE 0" > $(TARGET)
- $(CC) $(CFLAGS) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) \
- $(src)/print_offsets.c -o $@
-
-FORCE:
-
-else
-
-$(obj)/offsets.h: $(obj)/print_offsets.s
- $(AWK) -f $(src)/print_offsets.awk $^ > $@
-
-$(obj)/print_offsets.s: $(src)/print_offsets.c
- [ -r $(TARGET) ] || echo "#define IA64_TASK_SIZE 0" > $(TARGET)
- $(CC) $(CFLAGS) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -S $^ -o $@
-
-endif
-
-.PHONY: all modules modules_install
+++ /dev/null
-BEGIN {
- print "#ifndef _ASM_IA64_OFFSETS_H"
- print "#define _ASM_IA64_OFFSETS_H"
- print "/*"
- print " * DO NOT MODIFY"
- print " *"
- print " * This file was generated by arch/ia64/tools/print_offsets.awk."
- print " *"
- print " */"
- print ""
- print "#define CLONE_IDLETASK_BIT 12"
- print "#define CLONE_SETTLS_BIT 19"
-}
-
-# look for .tab:
-# stringz "name"
-# data value
-# sequence
-
-/.*[.]size/ {
- inside_table = 0
-}
-
-/\/\/ end/ {
- inside_table = 0
-}
-
-/.*[.]rodata/ {
- inside_table = 0
-}
-
-{
- if (inside_table) {
- if ($1 == "//") getline;
- name=$2
- getline
- getline
- if ($1 == "//") getline;
- value=$2
- len = length(name)
- name = substr(name, 2, len - 2)
- len -= 2
- if (len == 0)
- print ""
- else {
- len += 8
- if (len >= 40) {
- space=" "
- } else {
- space=""
- while (len < 40) {
- len += 8
- space = space"\t"
- }
- }
- printf("#define %s%s%lu\t/* 0x%lx */\n", name, space, value, value)
- }
- }
-}
-
-/tab:/ {
- inside_table = 1
-}
-
-/tab\#:/ {
- inside_table = 1
-}
-
-END {
- print ""
- print "#endif /* _ASM_IA64_OFFSETS_H */"
-}
+++ /dev/null
-/*
- * Utility to generate asm-ia64/offsets.h.
- *
- * Copyright (C) 1999-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Note that this file has dual use: when building the kernel
- * natively, the file is translated into a binary and executed. When
- * building the kernel in a cross-development environment, this file
- * gets translated into an assembly file which, in turn, is processed
- * by awk to generate offsets.h. So if you make any changes to this
- * file, be sure to verify that the awk procedure still works (see
- * print_offsets.awk).
- */
-#include <linux/config.h>
-
-#include <linux/sched.h>
-
-#include <asm-ia64/processor.h>
-#include <asm-ia64/ptrace.h>
-#include <asm-ia64/siginfo.h>
-#include <asm-ia64/sigcontext.h>
-
-#include "../kernel/sigframe.h"
-
-#ifdef offsetof
-# undef offsetof
-#endif
-
-/*
- * We _can't_ include the host's standard header file, as those are in
- * potential conflict with the what the Linux kernel declares for the
- * target system.
- */
-extern int printf (const char *, ...);
-
-#define offsetof(type,field) ((char *) &((type *) 0)->field - (char *) 0)
-
-struct
- {
- const char name[256];
- unsigned long value;
- }
-tab[] =
- {
- { "IA64_TASK_SIZE", sizeof (struct task_struct) },
- { "IA64_THREAD_INFO_SIZE", sizeof (struct thread_info) },
- { "IA64_PT_REGS_SIZE", sizeof (struct pt_regs) },
- { "IA64_SWITCH_STACK_SIZE", sizeof (struct switch_stack) },
- { "IA64_SIGINFO_SIZE", sizeof (struct siginfo) },
- { "IA64_CPU_SIZE", sizeof (struct cpuinfo_ia64) },
- { "SIGFRAME_SIZE", sizeof (struct sigframe) },
- { "UNW_FRAME_INFO_SIZE", sizeof (struct unw_frame_info) },
- { "", 0 }, /* spacer */
- { "IA64_TASK_CLEAR_CHILD_TID_OFFSET",offsetof (struct task_struct, clear_child_tid) },
- { "IA64_TASK_GROUP_LEADER_OFFSET", offsetof (struct task_struct, group_leader) },
- { "IA64_TASK_PID_OFFSET", offsetof (struct task_struct, pid) },
- { "IA64_TASK_REAL_PARENT_OFFSET", offsetof (struct task_struct, real_parent) },
- { "IA64_TASK_TGID_OFFSET", offsetof (struct task_struct, tgid) },
- { "IA64_TASK_THREAD_KSP_OFFSET", offsetof (struct task_struct, thread.ksp) },
- { "IA64_TASK_THREAD_ON_USTACK_OFFSET", offsetof (struct task_struct, thread.on_ustack) },
- { "IA64_PT_REGS_CR_IPSR_OFFSET", offsetof (struct pt_regs, cr_ipsr) },
- { "IA64_PT_REGS_CR_IIP_OFFSET", offsetof (struct pt_regs, cr_iip) },
- { "IA64_PT_REGS_CR_IFS_OFFSET", offsetof (struct pt_regs, cr_ifs) },
- { "IA64_PT_REGS_AR_UNAT_OFFSET", offsetof (struct pt_regs, ar_unat) },
- { "IA64_PT_REGS_AR_PFS_OFFSET", offsetof (struct pt_regs, ar_pfs) },
- { "IA64_PT_REGS_AR_RSC_OFFSET", offsetof (struct pt_regs, ar_rsc) },
- { "IA64_PT_REGS_AR_RNAT_OFFSET", offsetof (struct pt_regs, ar_rnat) },
- { "IA64_PT_REGS_AR_BSPSTORE_OFFSET",offsetof (struct pt_regs, ar_bspstore) },
- { "IA64_PT_REGS_PR_OFFSET", offsetof (struct pt_regs, pr) },
- { "IA64_PT_REGS_B6_OFFSET", offsetof (struct pt_regs, b6) },
- { "IA64_PT_REGS_LOADRS_OFFSET", offsetof (struct pt_regs, loadrs) },
- { "IA64_PT_REGS_R1_OFFSET", offsetof (struct pt_regs, r1) },
- { "IA64_PT_REGS_R2_OFFSET", offsetof (struct pt_regs, r2) },
- { "IA64_PT_REGS_R3_OFFSET", offsetof (struct pt_regs, r3) },
- { "IA64_PT_REGS_R12_OFFSET", offsetof (struct pt_regs, r12) },
- { "IA64_PT_REGS_R13_OFFSET", offsetof (struct pt_regs, r13) },
- { "IA64_PT_REGS_R14_OFFSET", offsetof (struct pt_regs, r14) },
- { "IA64_PT_REGS_R15_OFFSET", offsetof (struct pt_regs, r15) },
- { "IA64_PT_REGS_R8_OFFSET", offsetof (struct pt_regs, r8) },
- { "IA64_PT_REGS_R9_OFFSET", offsetof (struct pt_regs, r9) },
- { "IA64_PT_REGS_R10_OFFSET", offsetof (struct pt_regs, r10) },
- { "IA64_PT_REGS_R11_OFFSET", offsetof (struct pt_regs, r11) },
- { "IA64_PT_REGS_R16_OFFSET", offsetof (struct pt_regs, r16) },
- { "IA64_PT_REGS_R17_OFFSET", offsetof (struct pt_regs, r17) },
- { "IA64_PT_REGS_R18_OFFSET", offsetof (struct pt_regs, r18) },
- { "IA64_PT_REGS_R19_OFFSET", offsetof (struct pt_regs, r19) },
- { "IA64_PT_REGS_R20_OFFSET", offsetof (struct pt_regs, r20) },
- { "IA64_PT_REGS_R21_OFFSET", offsetof (struct pt_regs, r21) },
- { "IA64_PT_REGS_R22_OFFSET", offsetof (struct pt_regs, r22) },
- { "IA64_PT_REGS_R23_OFFSET", offsetof (struct pt_regs, r23) },
- { "IA64_PT_REGS_R24_OFFSET", offsetof (struct pt_regs, r24) },
- { "IA64_PT_REGS_R25_OFFSET", offsetof (struct pt_regs, r25) },
- { "IA64_PT_REGS_R26_OFFSET", offsetof (struct pt_regs, r26) },
- { "IA64_PT_REGS_R27_OFFSET", offsetof (struct pt_regs, r27) },
- { "IA64_PT_REGS_R28_OFFSET", offsetof (struct pt_regs, r28) },
- { "IA64_PT_REGS_R29_OFFSET", offsetof (struct pt_regs, r29) },
- { "IA64_PT_REGS_R30_OFFSET", offsetof (struct pt_regs, r30) },
- { "IA64_PT_REGS_R31_OFFSET", offsetof (struct pt_regs, r31) },
- { "IA64_PT_REGS_AR_CCV_OFFSET", offsetof (struct pt_regs, ar_ccv) },
- { "IA64_PT_REGS_AR_FPSR_OFFSET", offsetof (struct pt_regs, ar_fpsr) },
- { "IA64_PT_REGS_B0_OFFSET", offsetof (struct pt_regs, b0) },
- { "IA64_PT_REGS_B7_OFFSET", offsetof (struct pt_regs, b7) },
- { "IA64_PT_REGS_F6_OFFSET", offsetof (struct pt_regs, f6) },
- { "IA64_PT_REGS_F7_OFFSET", offsetof (struct pt_regs, f7) },
- { "IA64_PT_REGS_F8_OFFSET", offsetof (struct pt_regs, f8) },
- { "IA64_PT_REGS_F9_OFFSET", offsetof (struct pt_regs, f9) },
- { "IA64_SWITCH_STACK_CALLER_UNAT_OFFSET", offsetof (struct switch_stack, caller_unat) },
- { "IA64_SWITCH_STACK_AR_FPSR_OFFSET", offsetof (struct switch_stack, ar_fpsr) },
- { "IA64_SWITCH_STACK_F2_OFFSET", offsetof (struct switch_stack, f2) },
- { "IA64_SWITCH_STACK_F3_OFFSET", offsetof (struct switch_stack, f3) },
- { "IA64_SWITCH_STACK_F4_OFFSET", offsetof (struct switch_stack, f4) },
- { "IA64_SWITCH_STACK_F5_OFFSET", offsetof (struct switch_stack, f5) },
- { "IA64_SWITCH_STACK_F10_OFFSET", offsetof (struct switch_stack, f10) },
- { "IA64_SWITCH_STACK_F11_OFFSET", offsetof (struct switch_stack, f11) },
- { "IA64_SWITCH_STACK_F12_OFFSET", offsetof (struct switch_stack, f12) },
- { "IA64_SWITCH_STACK_F13_OFFSET", offsetof (struct switch_stack, f13) },
- { "IA64_SWITCH_STACK_F14_OFFSET", offsetof (struct switch_stack, f14) },
- { "IA64_SWITCH_STACK_F15_OFFSET", offsetof (struct switch_stack, f15) },
- { "IA64_SWITCH_STACK_F16_OFFSET", offsetof (struct switch_stack, f16) },
- { "IA64_SWITCH_STACK_F17_OFFSET", offsetof (struct switch_stack, f17) },
- { "IA64_SWITCH_STACK_F18_OFFSET", offsetof (struct switch_stack, f18) },
- { "IA64_SWITCH_STACK_F19_OFFSET", offsetof (struct switch_stack, f19) },
- { "IA64_SWITCH_STACK_F20_OFFSET", offsetof (struct switch_stack, f20) },
- { "IA64_SWITCH_STACK_F21_OFFSET", offsetof (struct switch_stack, f21) },
- { "IA64_SWITCH_STACK_F22_OFFSET", offsetof (struct switch_stack, f22) },
- { "IA64_SWITCH_STACK_F23_OFFSET", offsetof (struct switch_stack, f23) },
- { "IA64_SWITCH_STACK_F24_OFFSET", offsetof (struct switch_stack, f24) },
- { "IA64_SWITCH_STACK_F25_OFFSET", offsetof (struct switch_stack, f25) },
- { "IA64_SWITCH_STACK_F26_OFFSET", offsetof (struct switch_stack, f26) },
- { "IA64_SWITCH_STACK_F27_OFFSET", offsetof (struct switch_stack, f27) },
- { "IA64_SWITCH_STACK_F28_OFFSET", offsetof (struct switch_stack, f28) },
- { "IA64_SWITCH_STACK_F29_OFFSET", offsetof (struct switch_stack, f29) },
- { "IA64_SWITCH_STACK_F30_OFFSET", offsetof (struct switch_stack, f30) },
- { "IA64_SWITCH_STACK_F31_OFFSET", offsetof (struct switch_stack, f31) },
- { "IA64_SWITCH_STACK_R4_OFFSET", offsetof (struct switch_stack, r4) },
- { "IA64_SWITCH_STACK_R5_OFFSET", offsetof (struct switch_stack, r5) },
- { "IA64_SWITCH_STACK_R6_OFFSET", offsetof (struct switch_stack, r6) },
- { "IA64_SWITCH_STACK_R7_OFFSET", offsetof (struct switch_stack, r7) },
- { "IA64_SWITCH_STACK_B0_OFFSET", offsetof (struct switch_stack, b0) },
- { "IA64_SWITCH_STACK_B1_OFFSET", offsetof (struct switch_stack, b1) },
- { "IA64_SWITCH_STACK_B2_OFFSET", offsetof (struct switch_stack, b2) },
- { "IA64_SWITCH_STACK_B3_OFFSET", offsetof (struct switch_stack, b3) },
- { "IA64_SWITCH_STACK_B4_OFFSET", offsetof (struct switch_stack, b4) },
- { "IA64_SWITCH_STACK_B5_OFFSET", offsetof (struct switch_stack, b5) },
- { "IA64_SWITCH_STACK_AR_PFS_OFFSET", offsetof (struct switch_stack, ar_pfs) },
- { "IA64_SWITCH_STACK_AR_LC_OFFSET", offsetof (struct switch_stack, ar_lc) },
- { "IA64_SWITCH_STACK_AR_UNAT_OFFSET", offsetof (struct switch_stack, ar_unat) },
- { "IA64_SWITCH_STACK_AR_RNAT_OFFSET", offsetof (struct switch_stack, ar_rnat) },
- { "IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET", offsetof (struct switch_stack, ar_bspstore) },
- { "IA64_SWITCH_STACK_PR_OFFSET", offsetof (struct switch_stack, pr) },
- { "IA64_SIGCONTEXT_IP_OFFSET", offsetof (struct sigcontext, sc_ip) },
- { "IA64_SIGCONTEXT_AR_BSP_OFFSET", offsetof (struct sigcontext, sc_ar_bsp) },
- { "IA64_SIGCONTEXT_AR_FPSR_OFFSET", offsetof (struct sigcontext, sc_ar_fpsr) },
- { "IA64_SIGCONTEXT_AR_RNAT_OFFSET", offsetof (struct sigcontext, sc_ar_rnat) },
- { "IA64_SIGCONTEXT_AR_UNAT_OFFSET", offsetof (struct sigcontext, sc_ar_unat) },
- { "IA64_SIGCONTEXT_B0_OFFSET", offsetof (struct sigcontext, sc_br[0]) },
- { "IA64_SIGCONTEXT_CFM_OFFSET", offsetof (struct sigcontext, sc_cfm) },
- { "IA64_SIGCONTEXT_FLAGS_OFFSET", offsetof (struct sigcontext, sc_flags) },
- { "IA64_SIGCONTEXT_FR6_OFFSET", offsetof (struct sigcontext, sc_fr[6]) },
- { "IA64_SIGCONTEXT_PR_OFFSET", offsetof (struct sigcontext, sc_pr) },
- { "IA64_SIGCONTEXT_R12_OFFSET", offsetof (struct sigcontext, sc_gr[12]) },
- { "IA64_SIGCONTEXT_RBS_BASE_OFFSET",offsetof (struct sigcontext, sc_rbs_base) },
- { "IA64_SIGCONTEXT_LOADRS_OFFSET", offsetof (struct sigcontext, sc_loadrs) },
- { "IA64_SIGFRAME_ARG0_OFFSET", offsetof (struct sigframe, arg0) },
- { "IA64_SIGFRAME_ARG1_OFFSET", offsetof (struct sigframe, arg1) },
- { "IA64_SIGFRAME_ARG2_OFFSET", offsetof (struct sigframe, arg2) },
- { "IA64_SIGFRAME_HANDLER_OFFSET", offsetof (struct sigframe, handler) },
- { "IA64_SIGFRAME_SIGCONTEXT_OFFSET", offsetof (struct sigframe, sc) },
- /* for assembly files which can't include sched.h: */
- { "IA64_CLONE_VFORK", CLONE_VFORK },
- { "IA64_CLONE_VM", CLONE_VM },
- /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
- { "IA64_CPUINFO_ITM_DELTA_OFFSET", offsetof (struct cpuinfo_ia64, itm_delta) },
- { "IA64_CPUINFO_ITM_NEXT_OFFSET", offsetof (struct cpuinfo_ia64, itm_next) },
- { "IA64_CPUINFO_NSEC_PER_CYC_OFFSET", offsetof (struct cpuinfo_ia64, nsec_per_cyc) },
- { "IA64_TIMESPEC_TV_NSEC_OFFSET", offsetof (struct timespec, tv_nsec) },
-
-};
-
-static const char *tabs = "\t\t\t\t\t\t\t\t\t\t";
-
-int
-main (int argc, char **argv)
-{
- const char *space;
- int i, num_tabs;
- size_t len;
-
- printf ("#ifndef _ASM_IA64_OFFSETS_H\n");
- printf ("#define _ASM_IA64_OFFSETS_H\n\n");
-
- printf ("/*\n * DO NOT MODIFY\n *\n * This file was generated by "
- "arch/ia64/tools/print_offsets.\n *\n */\n\n");
-
- for (i = 0; i < (int) (sizeof (tab) / sizeof (tab[0])); ++i)
- {
- if (tab[i].name[0] == '\0')
- printf ("\n");
- else
- {
- len = strlen (tab[i].name);
-
- num_tabs = (40 - len) / 8;
- if (num_tabs <= 0)
- space = " ";
- else
- space = strchr(tabs, '\0') - (40 - len) / 8;
-
- printf ("#define %s%s%lu\t/* 0x%lx */\n",
- tab[i].name, space, tab[i].value, tab[i].value);
- }
- }
-
- printf ("\n#define CLONE_IDLETASK_BIT %ld\n", ia64_fls (CLONE_IDLETASK));
- printf ("\n#define CLONE_SETTLS_BIT %ld\n", ia64_fls (CLONE_SETTLS));
-
- printf ("\n#endif /* _ASM_IA64_OFFSETS_H */\n");
- return 0;
-}
#include <asm/cache.h>
#include <asm/ptrace.h>
#include <asm/system.h>
+#include <asm/pgtable.h>
-#define LOAD_OFFSET PAGE_OFFSET
+#define LOAD_OFFSET KERNEL_START + KERNEL_TR_PAGE_SIZE
#include <asm-generic/vmlinux.lds.h>
OUTPUT_FORMAT("elf64-ia64-little")
}
v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
- phys_start = _start - PAGE_OFFSET;
+ phys_start = _start - LOAD_OFFSET;
. = KERNEL_START;
_text = .;
_stext = .;
- .text : AT(ADDR(.text) - PAGE_OFFSET)
+ .text : AT(ADDR(.text) - LOAD_OFFSET)
{
*(.text.ivt)
*(.text)
}
- .text2 : AT(ADDR(.text2) - PAGE_OFFSET)
+ .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
{ *(.text2) }
#ifdef CONFIG_SMP
- .text.lock : AT(ADDR(.text.lock) - PAGE_OFFSET)
+ .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET)
{ *(.text.lock) }
#endif
_etext = .;
/* Exception table */
. = ALIGN(16);
- __ex_table : AT(ADDR(__ex_table) - PAGE_OFFSET)
+ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET)
{
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
- __mckinley_e9_bundles : AT(ADDR(__mckinley_e9_bundles) - PAGE_OFFSET)
+ .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
+ {
+ __start___vtop_patchlist = .;
+ *(.data.patch.vtop)
+ __end____vtop_patchlist = .;
+ }
+
+ .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
{
__start___mckinley_e9_bundles = .;
- *(__mckinley_e9_bundles)
+ *(.data.patch.mckinley_e9)
__end___mckinley_e9_bundles = .;
}
#if defined(CONFIG_IA64_GENERIC)
/* Machine Vector */
. = ALIGN(16);
- .machvec : AT(ADDR(.machvec) - PAGE_OFFSET)
+ .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
{
machvec_start = .;
*(.machvec)
/* Unwind info & table: */
. = ALIGN(8);
- .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - PAGE_OFFSET)
+ .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
{ *(.IA_64.unwind_info*) }
- .IA_64.unwind : AT(ADDR(.IA_64.unwind) - PAGE_OFFSET)
+ .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET)
{
ia64_unw_start = .;
*(.IA_64.unwind*)
RODATA
- .opd : AT(ADDR(.opd) - PAGE_OFFSET)
+ .opd : AT(ADDR(.opd) - LOAD_OFFSET)
{ *(.opd) }
/* Initialization code and data: */
. = ALIGN(PAGE_SIZE);
__init_begin = .;
- .init.text : AT(ADDR(.init.text) - PAGE_OFFSET)
+ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET)
{
_sinittext = .;
*(.init.text)
_einittext = .;
}
- .init.data : AT(ADDR(.init.data) - PAGE_OFFSET)
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
{ *(.init.data) }
- .init.ramfs : AT(ADDR(.init.ramfs) - PAGE_OFFSET)
+ .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET)
{
__initramfs_start = .;
*(.init.ramfs)
}
. = ALIGN(16);
- .init.setup : AT(ADDR(.init.setup) - PAGE_OFFSET)
+ .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
{
__setup_start = .;
*(.init.setup)
__setup_end = .;
}
- __param : AT(ADDR(__param) - PAGE_OFFSET)
+ __param : AT(ADDR(__param) - LOAD_OFFSET)
{
__start___param = .;
*(__param)
__stop___param = .;
}
- .initcall.init : AT(ADDR(.initcall.init) - PAGE_OFFSET)
+ .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
{
__initcall_start = .;
*(.initcall1.init)
__initcall_end = .;
}
__con_initcall_start = .;
- .con_initcall.init : AT(ADDR(.con_initcall.init) - PAGE_OFFSET)
+ .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
{ *(.con_initcall.init) }
__con_initcall_end = .;
__security_initcall_start = .;
__init_end = .;
/* The initial task and kernel stack */
- .data.init_task : AT(ADDR(.data.init_task) - PAGE_OFFSET)
+ .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
{ *(.data.init_task) }
- .data.page_aligned : AT(ADDR(.data.page_aligned) - PAGE_OFFSET)
+ .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
{ *(__special_page_section)
__start_gate_section = .;
- *(.text.gate)
+ *(.data.gate)
__stop_gate_section = .;
}
+ . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose kernel data */
- . = ALIGN(SMP_CACHE_BYTES);
- .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET)
+ .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
{ *(.data.cacheline_aligned) }
/* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE);
__phys_per_cpu_start = .;
- .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - PAGE_OFFSET)
+ .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
{
__per_cpu_start = .;
*(.data.percpu)
}
. = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits into percpu page size */
- .data : AT(ADDR(.data) - PAGE_OFFSET)
+ .data : AT(ADDR(.data) - LOAD_OFFSET)
{ *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS }
. = ALIGN(16);
__gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */
- .got : AT(ADDR(.got) - PAGE_OFFSET)
+ .got : AT(ADDR(.got) - LOAD_OFFSET)
{ *(.got.plt) *(.got) }
/* We want the small data sections together, so single-instruction offsets
can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */
- .sdata : AT(ADDR(.sdata) - PAGE_OFFSET)
+ .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
{ *(.sdata) }
_edata = .;
_bss = .;
- .sbss : AT(ADDR(.sbss) - PAGE_OFFSET)
+ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
{ *(.sbss) *(.scommon) }
- .bss : AT(ADDR(.bss) - PAGE_OFFSET)
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET)
{ *(.bss) *(COMMON) }
_end = .;
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/string.h>
+#include <linux/irq.h>
#include <asm/io.h>
#include <asm/kdebug.h>
#include <asm/delay.h>
-#include <asm/hw_irq.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
irq = acpi_fadt.sci_int;
#ifdef CONFIG_IA64
- irq = gsi_to_vector(irq);
+ int vector;
+
+ vector = acpi_irq_to_vector(irq);
+ if (vector < 0) {
+ printk(KERN_ERR PREFIX "SCI (IRQ%d) not registerd\n", irq);
+ return AE_OK;
+ }
+ irq = vector;
#endif
acpi_irq_irq = irq;
acpi_irq_handler = handler;
{
if (acpi_irq_handler) {
#ifdef CONFIG_IA64
- irq = gsi_to_vector(irq);
+ irq = acpi_irq_to_vector(irq);
#endif
free_irq(irq, acpi_irq);
acpi_irq_handler = NULL;
int result = 0;
int size = 0;
struct pci_bus bus;
+#ifdef CONFIG_IA64
+ struct pci_controller ctrl;
+#endif
if (!value)
return AE_BAD_PARAMETER;
}
bus.number = pci_id->bus;
+#ifdef CONFIG_IA64
+ ctrl.segment = pci_id->segment;
+ bus.sysdata = &ctrl;
+#endif
result = pci_root_ops->read(&bus, PCI_DEVFN(pci_id->device,
pci_id->function),
reg, size, value);
int result = 0;
int size = 0;
struct pci_bus bus;
+#ifdef CONFIG_IA64
+ struct pci_controller ctrl;
+#endif
switch (width) {
case 8:
}
bus.number = pci_id->bus;
+#ifdef CONFIG_IA64
+ ctrl.segment = pci_id->segment;
+ bus.sysdata = &ctrl;
+#endif
result = pci_root_ops->write(&bus, PCI_DEVFN(pci_id->device,
pci_id->function),
reg, size, value);
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#include <linux/config.h>
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#ifdef CONFIG_X86_IO_APIC
#include <asm/mpspec.h>
#endif
+#ifdef CONFIG_IOSAPIC
+# include <asm/iosapic.h>
+#endif
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#ifdef CONFIG_X86
+# define PCI_SEGMENT(x) 0 /* XXX fix me */
+#endif
+
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME ("pci_irq")
return_VALUE(0);
}
+ entry->irq = entry->link.index;
+
if (!entry->irq && entry->link.handle) {
entry->irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index);
if (!entry->irq) {
}
}
+#ifdef CONFIG_IA64
+ dev->irq = gsi_to_irq(irq);
+#else
dev->irq = irq;
+#endif
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device %s using IRQ %d\n", dev->slot_name, dev->irq));
eisa_set_level_irq(dev->irq);
}
#endif
+#ifdef CONFIG_IOSAPIC
+ if (acpi_irq_model == ACPI_IRQ_MODEL_IOSAPIC)
+ iosapic_enable_intr(dev->irq);
+#endif
return_VALUE(dev->irq);
}
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#include <linux/config.h>
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
switch (status) {
case AE_OK:
root->id.segment = (u16) value;
- printk("_SEG exists! Unsupported. Abort.\n");
- BUG();
break;
case AE_NOT_FOUND:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
* PCI namespace does not get created until this call is made (and
* thus the root bridge's pci_dev does not exist).
*/
+#ifdef CONFIG_X86
root->bus = pcibios_scan_root(root->id.bus);
+#else
+ root->bus = pcibios_scan_root(root->handle,
+ root->id.segment, root->id.bus);
+#endif
if (!root->bus) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Bus %02x:%02x not present in PCI namespace\n",
/*
- * HP AGPGART routines.
+ * HP AGPGART routines.
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * Bjorn Helgaas <bjorn_helgaas@hp.com>
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
+
+#include <asm/acpi-ext.h>
+
#include "agp.h"
#ifndef log2
#define log2(x) ffz(~(x))
#endif
+#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
+
+/* HP ZX1 IOC registers */
+#define HP_ZX1_IBASE 0x300
+#define HP_ZX1_IMASK 0x308
+#define HP_ZX1_PCOM 0x310
+#define HP_ZX1_TCNFG 0x318
+#define HP_ZX1_PDIR_BASE 0x320
+
+/* HP ZX1 LBA registers */
+#define HP_ZX1_AGP_STATUS 0x64
+#define HP_ZX1_AGP_COMMAND 0x68
+
#define HP_ZX1_IOVA_BASE GB(1UL)
#define HP_ZX1_IOVA_SIZE GB(1UL)
#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2)
#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL
#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
+/* AGP bridge need not be PCI device, but DRM thinks it is. */
+static struct pci_dev fake_bridge_dev;
+
static struct aper_size_info_fixed hp_zx1_sizes[] =
{
{0, 0, 0}, /* filled in by hp_zx1_fetch_size() */
};
static struct _hp_private {
- struct pci_dev *ioc;
- volatile u8 *registers;
+ volatile u8 *ioc_regs;
+ volatile u8 *lba_regs;
u64 *io_pdir; // PDIR for entire IOVA
u64 *gatt; // PDIR just for GART (subset of above)
u64 gatt_entries;
* - IOVA space is 1Gb in size
* - first 512Mb is IOMMU, second 512Mb is GART
*/
- hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG);
+ hp->io_tlb_ps = INREG64(hp->ioc_regs, HP_ZX1_TCNFG);
switch (hp->io_tlb_ps) {
case 0: hp->io_tlb_shift = 12; break;
case 1: hp->io_tlb_shift = 13; break;
hp->io_page_size = 1 << hp->io_tlb_shift;
hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
- hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1;
+ hp->iova_base = INREG64(hp->ioc_regs, HP_ZX1_IBASE) & ~0x1;
hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
hp->gart_size = HP_ZX1_GART_SIZE;
hp->gatt_entries = hp->gart_size / hp->io_page_size;
- hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE));
+ hp->io_pdir = phys_to_virt(INREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE));
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
return 0;
}
-static int __init hp_zx1_ioc_owner(u8 ioc_rev)
+static int __init
+hp_zx1_ioc_owner (void)
{
struct _hp_private *hp = &hp_private;
return 0;
}
-static int __init hp_zx1_ioc_init(void)
+static int __init
+hp_zx1_ioc_init (u64 ioc_hpa, u64 lba_hpa)
{
struct _hp_private *hp = &hp_private;
- struct pci_dev *ioc;
- int i;
- u8 ioc_rev;
-
- ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL);
- if (!ioc) {
- printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n");
- return -ENODEV;
- }
- hp->ioc = ioc;
- pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev);
-
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) {
- hp->registers = (u8 *) ioremap(pci_resource_start(ioc, i),
- pci_resource_len(ioc, i));
- break;
- }
- }
- if (!hp->registers) {
- printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n");
- return -ENODEV;
- }
+ hp->ioc_regs = ioremap(ioc_hpa, 1024);
+ hp->lba_regs = ioremap(lba_hpa, 256);
/*
* If the IOTLB is currently disabled, we can take it over.
* Otherwise, we have to share with sba_iommu.
*/
- hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0;
+ hp->io_pdir_owner = (INREG64(hp->ioc_regs, HP_ZX1_IBASE) & 0x1) == 0;
if (hp->io_pdir_owner)
- return hp_zx1_ioc_owner(ioc_rev);
+ return hp_zx1_ioc_owner();
return hp_zx1_ioc_shared();
}
-static int hp_zx1_fetch_size(void)
+static int
+hp_zx1_fetch_size(void)
{
int size;
return size;
}
-static int hp_zx1_configure(void)
+static int
+hp_zx1_configure (void)
{
struct _hp_private *hp = &hp_private;
agp_bridge->gart_bus_addr = hp->gart_base;
+#if 0
+ /* ouch!! can't do that with a non-PCI AGP bridge... */
agp_bridge->capndx = pci_find_capability(agp_bridge->dev, PCI_CAP_ID_AGP);
- pci_read_config_dword(agp_bridge->dev,
- agp_bridge->capndx + PCI_AGP_STATUS, &agp_bridge->mode);
+#else
+ agp_bridge->capndx = 0;
+#endif
+ agp_bridge->mode = INREG32(hp->lba_regs, HP_ZX1_AGP_STATUS);
if (hp->io_pdir_owner) {
- OUTREG64(hp->registers, HP_ZX1_PDIR_BASE,
- virt_to_phys(hp->io_pdir));
- OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps);
- OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
- OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1);
- OUTREG64(hp->registers, HP_ZX1_PCOM,
- hp->iova_base | log2(HP_ZX1_IOVA_SIZE));
- INREG64(hp->registers, HP_ZX1_PCOM);
+ OUTREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE, virt_to_phys(hp->io_pdir));
+ OUTREG64(hp->ioc_regs, HP_ZX1_TCNFG, hp->io_tlb_ps);
+ OUTREG64(hp->ioc_regs, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
+ OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, hp->iova_base | 0x1);
+ OUTREG64(hp->ioc_regs, HP_ZX1_PCOM, hp->iova_base | log2(HP_ZX1_IOVA_SIZE));
+ INREG64(hp->ioc_regs, HP_ZX1_PCOM);
}
return 0;
}
-static void hp_zx1_cleanup(void)
+static void
+hp_zx1_cleanup (void)
{
struct _hp_private *hp = &hp_private;
if (hp->io_pdir_owner)
- OUTREG64(hp->registers, HP_ZX1_IBASE, 0);
- iounmap((void *) hp->registers);
+ OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, 0);
+ iounmap((void *) hp->ioc_regs);
}
-static void hp_zx1_tlbflush(struct agp_memory *mem)
+static void
+hp_zx1_tlbflush (struct agp_memory *mem)
{
struct _hp_private *hp = &hp_private;
- OUTREG64(hp->registers, HP_ZX1_PCOM, hp->gart_base | log2(hp->gart_size));
- INREG64(hp->registers, HP_ZX1_PCOM);
+ OUTREG64(hp->ioc_regs, HP_ZX1_PCOM, hp->gart_base | log2(hp->gart_size));
+ INREG64(hp->ioc_regs, HP_ZX1_PCOM);
}
-static int hp_zx1_create_gatt_table(void)
+static int
+hp_zx1_create_gatt_table (void)
{
struct _hp_private *hp = &hp_private;
int i;
return 0;
}
-static int hp_zx1_free_gatt_table(void)
+static int
+hp_zx1_free_gatt_table (void)
{
struct _hp_private *hp = &hp_private;
return 0;
}
-static int hp_zx1_insert_memory(struct agp_memory *mem, off_t pg_start,
- int type)
+static int
+hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
{
struct _hp_private *hp = &hp_private;
int i, k;
return 0;
}
-static int hp_zx1_remove_memory(struct agp_memory *mem, off_t pg_start,
- int type)
+static int
+hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
{
struct _hp_private *hp = &hp_private;
int i, io_pg_start, io_pg_count;
return 0;
}
-static unsigned long hp_zx1_mask_memory(unsigned long addr, int type)
+static unsigned long
+hp_zx1_mask_memory (unsigned long addr, int type)
{
return HP_ZX1_PDIR_VALID_BIT | addr;
}
+static void
+hp_zx1_enable (u32 mode)
+{
+ struct _hp_private *hp = &hp_private;
+ u32 command;
+
+ command = INREG32(hp->lba_regs, HP_ZX1_AGP_STATUS);
+
+ command = agp_collect_device_status(mode, command);
+ command |= 0x00000100;
+
+ OUTREG32(hp->lba_regs, HP_ZX1_AGP_COMMAND, command);
+
+ agp_device_command(command, 0);
+}
+
struct agp_bridge_driver hp_zx1_driver = {
.owner = THIS_MODULE,
.size_type = FIXED_APER_SIZE,
.tlb_flush = hp_zx1_tlbflush,
.mask_memory = hp_zx1_mask_memory,
.masks = hp_zx1_masks,
- .agp_enable = agp_generic_enable,
+ .agp_enable = hp_zx1_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = hp_zx1_create_gatt_table,
.free_gatt_table = hp_zx1_free_gatt_table,
.cant_use_aperture = 1,
};
-static int __init agp_hp_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __init
+hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
{
struct agp_bridge_data *bridge;
int error;
- /* ZX1 LBAs can be either PCI or AGP bridges */
- if (!pci_find_capability(pdev, PCI_CAP_ID_AGP))
- return -ENODEV;
-
- printk(KERN_INFO PFX "Detected HP ZX1 AGP chipset at %s\n",
- pdev->slot_name);
+ printk(KERN_INFO PFX "Detected HP ZX1 AGP chipset (ioc=%lx, lba=%lx)\n", ioc_hpa, lba_hpa);
- error = hp_zx1_ioc_init();
+ error = hp_zx1_ioc_init(ioc_hpa, lba_hpa);
if (error)
return error;
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
-
bridge->driver = &hp_zx1_driver;
- bridge->dev = pdev;
- pci_set_drvdata(pdev, bridge);
+ fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
+ fake_bridge_dev.device = PCI_DEVICE_ID_HP_ZX1_LBA;
+ bridge->dev = &fake_bridge_dev;
+
return agp_add_bridge(bridge);
}
-static void __devexit agp_hp_remove(struct pci_dev *pdev)
+static acpi_status __init
+zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
{
- struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
-
- agp_remove_bridge(bridge);
- agp_put_bridge(bridge);
-}
-
-static struct pci_device_id agp_hp_pci_table[] __initdata = {
- {
- .class = (PCI_CLASS_BRIDGE_HOST << 8),
- .class_mask = ~0,
- .vendor = PCI_VENDOR_ID_HP,
- .device = PCI_DEVICE_ID_HP_ZX1_LBA,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- { }
-};
+ acpi_handle handle, parent;
+ acpi_status status;
+ struct acpi_buffer buffer;
+ struct acpi_device_info *info;
+ u64 lba_hpa, sba_hpa, length;
+ int match;
+
+ status = hp_acpi_csr_space(obj, &lba_hpa, &length);
+ if (ACPI_FAILURE(status))
+ return 1;
+
+ /* Look for an enclosing IOC scope and find its CSR space */
+ handle = obj;
+ do {
+ buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+ status = acpi_get_object_info(handle, &buffer);
+ if (ACPI_SUCCESS(status)) {
+ /* TBD check _CID also */
+ info = buffer.pointer;
+ info->hardware_id.value[sizeof(info->hardware_id)-1] = '\0';
+ match = (strcmp(info->hardware_id.value, "HWP0001") == 0);
+ ACPI_MEM_FREE(info);
+ if (match) {
+ status = hp_acpi_csr_space(handle, &sba_hpa, &length);
+ if (ACPI_SUCCESS(status))
+ break;
+ else {
+ printk(KERN_ERR PFX "Detected HP ZX1 "
+ "AGP LBA but no IOC.\n");
+ return status;
+ }
+ }
+ }
-MODULE_DEVICE_TABLE(pci, agp_hp_pci_table);
+ status = acpi_get_parent(handle, &parent);
+ handle = parent;
+ } while (ACPI_SUCCESS(status));
-static struct pci_driver agp_hp_pci_driver = {
- .name = "agpgart-hp",
- .id_table = agp_hp_pci_table,
- .probe = agp_hp_probe,
- .remove = agp_hp_remove,
-};
+ if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
+ return 1;
+ return 0;
+}
-static int __init agp_hp_init(void)
+static int __init
+agp_hp_init (void)
{
- return pci_module_init(&agp_hp_pci_driver);
+ acpi_status status;
+
+ status = acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003 AGP LBA", NULL);
+ if (!(ACPI_SUCCESS(status))) {
+ agp_bridge->type = NOT_SUPPORTED;
+ printk(KERN_INFO PFX "Failed to initialize zx1 AGP.\n");
+ return -ENODEV;
+ }
+ return 0;
}
-static void __exit agp_hp_cleanup(void)
+static void __exit
+agp_hp_cleanup (void)
{
- pci_unregister_driver(&agp_hp_pci_driver);
}
module_init(agp_hp_init);
.name = "agpgart-intel-i460",
.id_table = agp_intel_i460_pci_table,
.probe = agp_intel_i460_probe,
- .remove = agp_intel_i460_remove,
+ .remove = __exit_p(agp_intel_i460_remove),
};
static int __init agp_intel_i460_init(void)
# Makefile for the kernel character device drivers.
#
+obj-y := dummy.o
+
miropcm20-objs := miropcm20-rds-core.o miropcm20-radio.o
obj-$(CONFIG_RADIO_AZTECH) += radio-aztech.o
--- /dev/null
+/* just so the linker knows what kind of object files it's deadling with... */
bttv-risc.o bttv-vbi.o
zoran-objs := zr36120.o zr36120_i2c.o zr36120_mem.o
+obj-y := dummy.o
+
obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o
obj-$(CONFIG_VIDEO_BT848) += bttv.o msp3400.o tvaudio.o \
--- /dev/null
+/* just so the linker knows what kind of object files it's deadling with... */
for (i = 0; i < init_length; i++)
outl(init_sequence[i], ioaddr + CSR12);
}
+
+ (void) inl(ioaddr + CSR6); /* flush CSR12 writes */
+ udelay(500); /* Give MII time to recover */
+
tmp_info = get_u16(&misc_info[1]);
if (tmp_info)
tp->advertising[phy_num] = tmp_info | 1;
#define QL1280_TARGET_MODE_SUPPORT 0 /* Target mode support */
#define QL1280_LUN_SUPPORT 0
#define WATCHDOGTIMER 0
-#define MEMORY_MAPPED_IO 0
+#define MEMORY_MAPPED_IO 1
#define DEBUG_QLA1280_INTR 0
#define USE_NVRAM_DEFAULTS 0
#define DEBUG_PRINT_NVRAM 0
/*
* Get memory mapped I/O address.
*/
- pci_read_config_word (ha->pdev, PCI_BASE_ADDRESS_1, &mmapbase);
+ pci_read_config_dword (ha->pdev, PCI_BASE_ADDRESS_1, &mmapbase);
mmapbase &= PCI_BASE_ADDRESS_MEM_MASK;
/*
#ifndef SYM_LINUX_DYNAMIC_DMA_MAPPING
typedef u_long bus_addr_t;
#else
-#if SYM_CONF_DMA_ADDRESSING_MODE > 0
-typedef dma64_addr_t bus_addr_t;
-#else
typedef dma_addr_t bus_addr_t;
#endif
-#endif
/*
* Used by the eh thread to wait for command completion.
a module, say M here and read <file:Documentation/modules.txt>.
If unsure, say N.
-config SERIAL_HCDP
+config SERIAL_8250_ACPI
+ bool "8250/16550 device discovery via ACPI namespace"
+ default y if IA64
+ depends on ACPI_BUS
+ ---help---
+ If you wish to enable serial port discovery via the ACPI
+ namespace, say Y here. If unsure, say N.
+
+config SERIAL_8250_HCDP
bool "8250/16550 device discovery support via EFI HCDP table"
depends on IA64
---help---
serial-8250-$(CONFIG_GSC) += 8250_gsc.o
serial-8250-$(CONFIG_PCI) += 8250_pci.o
serial-8250-$(CONFIG_PNP) += 8250_pnp.o
-serial-8250-$(CONFIG_SERIAL_HCDP) += 8250_hcdp.o
+serial-8250-$(CONFIG_SERIAL_8250_HCDP) += 8250_hcdp.o
+serial-8250-$(CONFIG_SERIAL_8250_ACPI) += 8250_acpi.o
obj-$(CONFIG_SERIAL_CORE) += core.o
obj-$(CONFIG_SERIAL_21285) += 21285.o
#define ACTUAL_NR_IRQS NR_IRQS
#endif
+extern irq_desc_t irq_desc [NR_IRQS];
+
#endif
#define N_TXTOFF(x) 0
#ifdef __KERNEL__
-# include <asm/page.h>
-# define STACK_TOP (0x6000000000000000UL + (1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)
-# define IA64_RBS_BOT (STACK_TOP - 0x80000000L + PAGE_SIZE) /* bottom of reg. backing store */
+#include <asm/ustack.h>
#endif
-
#endif /* _ASM_IA64_A_OUT_H */
/*
* IA-64 specific AGP definitions.
*
- * Copyright (C) 2002 Hewlett-Packard Co
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#define flush_agp_mappings() /* nothing */
#define flush_agp_cache() mb()
-/* Page-protection value to be used for AGP memory mapped into kernel space. */
-#define PAGE_AGP PAGE_KERNEL
-
#endif /* _ASM_IA64_AGP_H */
.section "__ex_table", "a" // declare section & section attributes
.previous
-#if __GNUC__ >= 3
# define EX(y,x...) \
.xdata4 "__ex_table", 99f-., y-.; \
[99:] x
# define EXCLR(y,x...) \
.xdata4 "__ex_table", 99f-., y-.+4; \
[99:] x
-#else
-# define EX(y,x...) \
- .xdata4 "__ex_table", 99f-., y-.; \
- 99: x
-# define EXCLR(y,x...) \
- .xdata4 "__ex_table", 99f-., y-.+4; \
- 99: x
-#endif
+
+/*
+ * Mark instructions that need a load of a virtual address patched to be
+ * a load of a physical address. We use this either in critical performance
+ * path (ivt.S - TLB miss processing) or in places where it might not be
+ * safe to use a "tpa" instruction (mca_asm.S - error recovery).
+ */
+ .section ".data.patch.vtop", "a" // declare section & section attributes
+ .previous
+
+#define LOAD_PHYSICAL(pr, reg, obj) \
+[1:](pr)movl reg = obj; \
+ .xdata4 ".data.patch.vtop", 1b-.
/*
* For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,
*/
#define DO_MCKINLEY_E9_WORKAROUND
#ifdef DO_MCKINLEY_E9_WORKAROUND
- .section "__mckinley_e9_bundles", "a"
+ .section ".data.patch.mckinley_e9", "a"
.previous
/* workaround for Itanium 2 Errata 9: */
# define MCKINLEY_E9_WORKAROUND \
- .xdata4 "__mckinley_e9_bundles", 1f-.; \
+ .xdata4 ".data.patch.mckinley_e9", 1f-.;\
1:{ .mib; \
nop.m 0; \
nop.i 0; \
return (void *) (unsigned long) uptr;
}
+static __inline__ void *
+compat_alloc_user_space (long len)
+{
+ struct pt_regs *regs = ia64_task_regs(current);
+ return (void *) ((regs->r12 & -16) - len);
+}
+
#endif /* _ASM_IA64_COMPAT_H */
*/
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000)
+#define PT_IA_64_UNWIND 0x70000001
+
+/* IA-64 relocations: */
+#define R_IA64_NONE 0x00 /* none */
+#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */
+#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */
+#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */
+#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */
+#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */
+#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */
+#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */
+#define R_IA64_GPREL22 0x2a /* @gprel(sym+add), add imm22 */
+#define R_IA64_GPREL64I 0x2b /* @gprel(sym+add), mov imm64 */
+#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym+add), data4 MSB */
+#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym+add), data4 LSB */
+#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym+add), data8 MSB */
+#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF22 0x32 /* @ltoff(sym+add), add imm22 */
+#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym+add), mov imm64 */
+#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym+add), add imm22 */
+#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym+add), mov imm64 */
+#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym+add), data8 MSB */
+#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym+add), data8 LSB */
+#define R_IA64_FPTR64I 0x43 /* @fptr(sym+add), mov imm64 */
+#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym+add), data4 MSB */
+#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym+add), data4 LSB */
+#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym+add), data8 MSB */
+#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym+add), data8 LSB */
+#define R_IA64_PCREL60B 0x48 /* @pcrel(sym+add), brl */
+#define R_IA64_PCREL21B 0x49 /* @pcrel(sym+add), ptb, call */
+#define R_IA64_PCREL21M 0x4a /* @pcrel(sym+add), chk.s */
+#define R_IA64_PCREL21F 0x4b /* @pcrel(sym+add), fchkf */
+#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym+add), data4 MSB */
+#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym+add), data4 LSB */
+#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym+add), data8 MSB */
+#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */
+#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */
+#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), 4 MSB */
+#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), 4 LSB */
+#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), 8 MSB */
+#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), 8 LSB */
+#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym+add), data4 MSB */
+#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym+add), data4 LSB */
+#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym+add), data8 MSB */
+#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym+add), data8 LSB */
+#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym+add), data4 MSB */
+#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym+add), data4 LSB */
+#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym+add), data8 MSB */
+#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym+add), data8 LSB */
+#define R_IA64_REL32MSB 0x6c /* data 4 + REL */
+#define R_IA64_REL32LSB 0x6d /* data 4 + REL */
+#define R_IA64_REL64MSB 0x6e /* data 8 + REL */
+#define R_IA64_REL64LSB 0x6f /* data 8 + REL */
+#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */
+#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */
+#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */
+#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */
+#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym+add), ptb, call */
+#define R_IA64_PCREL22 0x7a /* @pcrel(sym+add), imm22 */
+#define R_IA64_PCREL64I 0x7b /* @pcrel(sym+add), imm64 */
+#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */
+#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */
+#define R_IA64_COPY 0x84 /* dynamic reloc, data copy */
+#define R_IA64_SUB 0x85 /* -symbol + addend, add imm22 */
+#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */
+#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */
+#define R_IA64_TPREL14 0x91 /* @tprel(sym+add), add imm14 */
+#define R_IA64_TPREL22 0x92 /* @tprel(sym+add), add imm22 */
+#define R_IA64_TPREL64I 0x93 /* @tprel(sym+add), add imm64 */
+#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym+add), data8 MSB */
+#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), add imm22 */
+#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym+add), data8 MSB */
+#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym+add), data8 LSB */
+#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(s+a)), imm22 */
+#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym+add), imm14 */
+#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym+add), imm22 */
+#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym+add), imm64 */
+#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym+add), data4 MSB */
+#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym+add), data4 LSB */
+#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym+add), data8 MSB */
+#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym+add), data8 LSB */
+#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */
+
+/* IA-64 specific section flags: */
+#define SHF_IA_64_SHORT 0x10000000 /* section near gp */
/*
* We use (abuse?) this macro to insert the (empty) vm_area that is
* b0-b7
* ip cfm psr
* ar.rsc ar.bsp ar.bspstore ar.rnat
- * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
+ * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
*/
#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */
#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */
#define ELF_PLATFORM 0
/*
- * This should go into linux/elf.h...
+ * Architecture-neutral AT_ values are in the range 0-17. Leave some room for more of
+ * them, start the architecture-specific ones at 32.
*/
#define AT_SYSINFO 32
+#define AT_SYSINFO_EHDR 33
#ifdef __KERNEL__
struct elf64_hdr;
extern void ia64_set_personality (struct elf64_hdr *elf_ex, int ibcs2_interpreter);
#define SET_PERSONALITY(ex, ibcs2) ia64_set_personality(&(ex), ibcs2)
+struct task_struct;
+
extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs)
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
-#ifdef CONFIG_FSYS
-#define ARCH_DLINFO \
-do { \
- extern char syscall_via_epc[], __start_gate_section[]; \
- NEW_AUX_ENT(AT_SYSINFO, GATE_ADDR + (syscall_via_epc - __start_gate_section)); \
+#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR)
+
+#define ARCH_DLINFO \
+do { \
+ extern char __kernel_syscall_via_epc[]; \
+ NEW_AUX_ENT(AT_SYSINFO, __kernel_syscall_via_epc); \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \
+} while (0)
+
+/*
+ * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out extra segments
+ * containing the gate DSO contents. Dumping its contents makes post-mortem fully
+ * interpretable later without matching up the same kernel and hardware config to see what
+ * IP values meant. Dumping its extra ELF program headers includes all the other
+ * information a debugger needs to easily find how the gate DSO was being used.
+ */
+#define ELF_CORE_EXTRA_PHDRS (GATE_EHDR->e_phnum)
+#define ELF_CORE_WRITE_EXTRA_PHDRS \
+do { \
+ const struct elf_phdr *const gate_phdrs = \
+ (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \
+ int i; \
+ Elf64_Off ofs = 0; \
+ for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \
+ struct elf_phdr phdr = gate_phdrs[i]; \
+ if (phdr.p_type == PT_LOAD) { \
+ ofs = phdr.p_offset = offset; \
+ offset += phdr.p_filesz; \
+ } else \
+ phdr.p_offset += ofs; \
+ phdr.p_paddr = 0; /* match other core phdrs */ \
+ DUMP_WRITE(&phdr, sizeof(phdr)); \
+ } \
+} while (0)
+
+#define ELF_CORE_WRITE_EXTRA_DATA \
+do { \
+ const struct elf_phdr *const gate_phdrs = \
+ (const struct elf_phdr *) (GATE_ADDR \
+ + GATE_EHDR->e_phoff); \
+ int i; \
+ for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \
+ if (gate_phdrs[i].p_type == PT_LOAD) \
+ DUMP_WRITE((void *) gate_phdrs[i].p_vaddr, \
+ gate_phdrs[i].p_filesz); \
+ } \
} while (0)
-#endif
#endif /* __KERNEL__ */
#include <linux/config.h>
-#ifdef CONFIG_IA32_SUPPORT
-
-#include <linux/binfmts.h>
-#include <linux/compat.h>
-
-/*
- * 32 bit structures for IA32 support.
- */
-
-#define IA32_PAGE_SHIFT 12 /* 4KB pages */
-#define IA32_PAGE_SIZE (1UL << IA32_PAGE_SHIFT)
-#define IA32_PAGE_MASK (~(IA32_PAGE_SIZE - 1))
-#define IA32_PAGE_ALIGN(addr) (((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK)
-#define IA32_CLOCKS_PER_SEC 100 /* Cast in stone for IA32 Linux */
-
-/* sigcontext.h */
-/*
- * As documented in the iBCS2 standard..
- *
- * The first part of "struct _fpstate" is just the
- * normal i387 hardware setup, the extra "status"
- * word is used to save the coprocessor status word
- * before entering the handler.
- */
-struct _fpreg_ia32 {
- unsigned short significand[4];
- unsigned short exponent;
-};
-
-struct _fpxreg_ia32 {
- unsigned short significand[4];
- unsigned short exponent;
- unsigned short padding[3];
-};
-
-struct _xmmreg_ia32 {
- unsigned int element[4];
-};
-
-
-struct _fpstate_ia32 {
- unsigned int cw,
- sw,
- tag,
- ipoff,
- cssel,
- dataoff,
- datasel;
- struct _fpreg_ia32 _st[8];
- unsigned short status;
- unsigned short magic; /* 0xffff = regular FPU data only */
-
- /* FXSR FPU environment */
- unsigned int _fxsr_env[6]; /* FXSR FPU env is ignored */
- unsigned int mxcsr;
- unsigned int reserved;
- struct _fpxreg_ia32 _fxsr_st[8]; /* FXSR FPU reg data is ignored */
- struct _xmmreg_ia32 _xmm[8];
- unsigned int padding[56];
-};
-
-struct sigcontext_ia32 {
- unsigned short gs, __gsh;
- unsigned short fs, __fsh;
- unsigned short es, __esh;
- unsigned short ds, __dsh;
- unsigned int edi;
- unsigned int esi;
- unsigned int ebp;
- unsigned int esp;
- unsigned int ebx;
- unsigned int edx;
- unsigned int ecx;
- unsigned int eax;
- unsigned int trapno;
- unsigned int err;
- unsigned int eip;
- unsigned short cs, __csh;
- unsigned int eflags;
- unsigned int esp_at_signal;
- unsigned short ss, __ssh;
- unsigned int fpstate; /* really (struct _fpstate_ia32 *) */
- unsigned int oldmask;
- unsigned int cr2;
-};
-
-/* user.h */
-/*
- * IA32 (Pentium III/4) FXSR, SSE support
- *
- * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
- * interacting with the FXSR-format floating point environment. Floating
- * point data can be accessed in the regular format in the usual manner,
- * and both the standard and SIMD floating point data can be accessed via
- * the new ptrace requests. In either case, changes to the FPU environment
- * will be reflected in the task's state as expected.
- */
-struct ia32_user_i387_struct {
- int cwd;
- int swd;
- int twd;
- int fip;
- int fcs;
- int foo;
- int fos;
- int st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
-};
-
-struct ia32_user_fxsr_struct {
- unsigned short cwd;
- unsigned short swd;
- unsigned short twd;
- unsigned short fop;
- int fip;
- int fcs;
- int foo;
- int fos;
- int mxcsr;
- int reserved;
- int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
- int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
- int padding[56];
-};
-
-/* signal.h */
-#define IA32_SET_SA_HANDLER(ka,handler,restorer) \
- ((ka)->sa.sa_handler = (__sighandler_t) \
- (((unsigned long)(restorer) << 32) \
- | ((handler) & 0xffffffff)))
-#define IA32_SA_HANDLER(ka) ((unsigned long) (ka)->sa.sa_handler & 0xffffffff)
-#define IA32_SA_RESTORER(ka) ((unsigned long) (ka)->sa.sa_handler >> 32)
-
-struct sigaction32 {
- unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */
- unsigned int sa_flags;
- unsigned int sa_restorer; /* Another 32 bit pointer */
- compat_sigset_t sa_mask; /* A 32 bit mask */
-};
-
-struct old_sigaction32 {
- unsigned int sa_handler; /* Really a pointer, but need to deal
- with 32 bits */
- compat_old_sigset_t sa_mask; /* A 32 bit mask */
- unsigned int sa_flags;
- unsigned int sa_restorer; /* Another 32 bit pointer */
-};
-
-typedef struct sigaltstack_ia32 {
- unsigned int ss_sp;
- int ss_flags;
- unsigned int ss_size;
-} stack_ia32_t;
-
-struct ucontext_ia32 {
- unsigned int uc_flags;
- unsigned int uc_link;
- stack_ia32_t uc_stack;
- struct sigcontext_ia32 uc_mcontext;
- sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-struct stat64 {
- unsigned short st_dev;
- unsigned char __pad0[10];
- unsigned int __st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned short st_rdev;
- unsigned char __pad3[10];
- unsigned int st_size_lo;
- unsigned int st_size_hi;
- unsigned int st_blksize;
- unsigned int st_blocks; /* Number 512-byte blocks allocated. */
- unsigned int __pad4; /* future possible st_blocks high bits */
- unsigned int st_atime;
- unsigned int st_atime_nsec;
- unsigned int st_mtime;
- unsigned int st_mtime_nsec;
- unsigned int st_ctime;
- unsigned int st_ctime_nsec;
- unsigned int st_ino_lo;
- unsigned int st_ino_hi;
-};
-
-typedef union sigval32 {
- int sival_int;
- unsigned int sival_ptr;
-} sigval_t32;
-
-typedef struct siginfo32 {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[((128/sizeof(int)) - 3)];
-
- /* kill() */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- char _pad[sizeof(unsigned int) - sizeof(int)];
- sigval_t32 _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- sigval_t32 _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- unsigned int _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} siginfo_t32;
-
-struct linux32_dirent {
- u32 d_ino;
- u32 d_off;
- u16 d_reclen;
- char d_name[256];
-};
-
-struct old_linux32_dirent {
- u32 d_ino;
- u32 d_offset;
- u16 d_namlen;
- char d_name[1];
-};
-
-/*
- * IA-32 ELF specific definitions for IA-64.
- */
-
-#define _ASM_IA64_ELF_H /* Don't include elf.h */
-
-#include <linux/sched.h>
-#include <asm/processor.h>
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch(x) ((x)->e_machine == EM_386)
-
-/*
- * These are used to set parameters in the core dumps.
- */
-#define ELF_CLASS ELFCLASS32
-#define ELF_DATA ELFDATA2LSB
-#define ELF_ARCH EM_386
+#include <asm/ptrace.h>
+#include <asm/signal.h>
-#define IA32_PAGE_OFFSET 0xc0000000
-#define IA32_STACK_TOP IA32_PAGE_OFFSET
-
-/*
- * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can
- * access them.
- */
-#define IA32_GDT_OFFSET (IA32_PAGE_OFFSET)
-#define IA32_TSS_OFFSET (IA32_PAGE_OFFSET + PAGE_SIZE)
-#define IA32_LDT_OFFSET (IA32_PAGE_OFFSET + 2*PAGE_SIZE)
-
-#define USE_ELF_CORE_DUMP
-#define ELF_EXEC_PAGESIZE IA32_PAGE_SIZE
-
-/*
- * This is the location that an ET_DYN program is loaded if exec'ed.
- * Typical use of this is to invoke "./ld.so someprog" to test out a
- * new version of the loader. We need to make sure that it is out of
- * the way of the program that it will "exec", and that there is
- * sufficient room for the brk.
- */
-#define ELF_ET_DYN_BASE (IA32_PAGE_OFFSET/3 + 0x1000000)
-
-void ia64_elf32_init(struct pt_regs *regs);
-#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r)
-
-#define elf_addr_t u32
-
-/* ELF register definitions. This is needed for core dump support. */
-
-#define ELF_NGREG 128 /* XXX fix me */
-#define ELF_NFPREG 128 /* XXX fix me */
-
-typedef unsigned long elf_greg_t;
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-typedef struct {
- unsigned long w0;
- unsigned long w1;
-} elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
-
-/* This macro yields a bitmask that programs can use to figure out
- what instruction set this CPU supports. */
-#define ELF_HWCAP 0
-
-/* This macro yields a string that ld.so will use to load
- implementation specific libraries for optimization. Not terribly
- relevant until we have real hardware to play with... */
-#define ELF_PLATFORM 0
-
-#ifdef __KERNEL__
-# define SET_PERSONALITY(EX,IBCS2) \
- (current->personality = (IBCS2) ? PER_SVR4 : PER_LINUX)
-#endif
-
-#define IA32_EFLAG 0x200
-
-/*
- * IA-32 ELF specific definitions for IA-64.
- */
-
-#define __USER_CS 0x23
-#define __USER_DS 0x2B
-
-#define FIRST_TSS_ENTRY 6
-#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
-#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
-#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
-
-#define IA32_SEGSEL_RPL (0x3 << 0)
-#define IA32_SEGSEL_TI (0x1 << 2)
-#define IA32_SEGSEL_INDEX_SHIFT 3
-
-#define IA32_SEG_BASE 16
-#define IA32_SEG_TYPE 40
-#define IA32_SEG_SYS 44
-#define IA32_SEG_DPL 45
-#define IA32_SEG_P 47
-#define IA32_SEG_HIGH_LIMIT 48
-#define IA32_SEG_AVL 52
-#define IA32_SEG_DB 54
-#define IA32_SEG_G 55
-#define IA32_SEG_HIGH_BASE 56
-
-#define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, gran) \
- (((limit) & 0xffff) \
- | (((unsigned long) (base) & 0xffffff) << IA32_SEG_BASE) \
- | ((unsigned long) (segtype) << IA32_SEG_TYPE) \
- | ((unsigned long) (nonsysseg) << IA32_SEG_SYS) \
- | ((unsigned long) (dpl) << IA32_SEG_DPL) \
- | ((unsigned long) (segpresent) << IA32_SEG_P) \
- | ((((unsigned long) (limit) >> 16) & 0xf) << IA32_SEG_HIGH_LIMIT) \
- | ((unsigned long) (avl) << IA32_SEG_AVL) \
- | ((unsigned long) (segdb) << IA32_SEG_DB) \
- | ((unsigned long) (gran) << IA32_SEG_G) \
- | ((((unsigned long) (base) >> 24) & 0xff) << IA32_SEG_HIGH_BASE))
-
-#define SEG_LIM 32
-#define SEG_TYPE 52
-#define SEG_SYS 56
-#define SEG_DPL 57
-#define SEG_P 59
-#define SEG_AVL 60
-#define SEG_DB 62
-#define SEG_G 63
-
-/* Unscramble an IA-32 segment descriptor into the IA-64 format. */
-#define IA32_SEG_UNSCRAMBLE(sd) \
- ( (((sd) >> IA32_SEG_BASE) & 0xffffff) | ((((sd) >> IA32_SEG_HIGH_BASE) & 0xff) << 24) \
- | ((((sd) & 0xffff) | ((((sd) >> IA32_SEG_HIGH_LIMIT) & 0xf) << 16)) << SEG_LIM) \
- | ((((sd) >> IA32_SEG_TYPE) & 0xf) << SEG_TYPE) \
- | ((((sd) >> IA32_SEG_SYS) & 0x1) << SEG_SYS) \
- | ((((sd) >> IA32_SEG_DPL) & 0x3) << SEG_DPL) \
- | ((((sd) >> IA32_SEG_P) & 0x1) << SEG_P) \
- | ((((sd) >> IA32_SEG_AVL) & 0x1) << SEG_AVL) \
- | ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB) \
- | ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G))
-
-#define IA32_IOBASE 0x2000000000000000 /* Virtual address for I/O space */
-
-#define IA32_CR0 0x80000001 /* Enable PG and PE bits */
-#define IA32_CR4 0x600 /* MMXEX and FXSR on */
-
-/*
- * IA32 floating point control registers starting values
- */
-
-#define IA32_FSR_DEFAULT 0x55550000 /* set all tag bits */
-#define IA32_FCR_DEFAULT 0x17800000037fUL /* extended precision, all masks */
-
-#define IA32_PTRACE_GETREGS 12
-#define IA32_PTRACE_SETREGS 13
-#define IA32_PTRACE_GETFPREGS 14
-#define IA32_PTRACE_SETFPREGS 15
-#define IA32_PTRACE_GETFPXREGS 18
-#define IA32_PTRACE_SETFPXREGS 19
-
-#define ia32_start_thread(regs,new_ip,new_sp) do { \
- set_fs(USER_DS); \
- ia64_psr(regs)->cpl = 3; /* set user mode */ \
- ia64_psr(regs)->ri = 0; /* clear return slot number */ \
- ia64_psr(regs)->is = 1; /* IA-32 instruction set */ \
- regs->cr_iip = new_ip; \
- regs->ar_rsc = 0xc; /* enforced lazy mode, priv. level 3 */ \
- regs->ar_rnat = 0; \
- regs->loadrs = 0; \
- regs->r12 = new_sp; \
-} while (0)
-
-/*
- * Local Descriptor Table (LDT) related declarations.
- */
-
-#define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */
-#define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */
-
-struct ia32_modify_ldt_ldt_s {
- unsigned int entry_number;
- unsigned int base_addr;
- unsigned int limit;
- unsigned int seg_32bit:1;
- unsigned int contents:2;
- unsigned int read_exec_only:1;
- unsigned int limit_in_pages:1;
- unsigned int seg_not_present:1;
- unsigned int useable:1;
-};
-
-struct linux_binprm;
+#ifdef CONFIG_IA32_SUPPORT
+extern void ia32_cpu_init (void);
extern void ia32_gdt_init (void);
-extern void ia32_init_addr_space (struct pt_regs *regs);
-extern int ia32_setup_arg_pages (struct linux_binprm *bprm);
extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
-extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t);
-extern void ia32_load_segment_descriptors (struct task_struct *task);
-
-#define ia32f2ia64f(dst,src) \
- do { \
- register double f6 asm ("f6"); \
- asm volatile ("ldfe f6=[%2];; stf.spill [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
- } while(0)
-
-#define ia64f2ia32f(dst,src) \
- do { \
- register double f6 asm ("f6"); \
- asm volatile ("ldf.fill f6=[%2];; stfe [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
- } while(0)
#endif /* !CONFIG_IA32_SUPPORT */
-/* Declare this uncondiontally, so we don't get warnings for unreachable code. */
+/* Declare this unconditionally, so we don't get warnings for unreachable code. */
extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs);
# endif /* __KERNEL__ */
+/*
+ * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
+ * replaced by dma_merge_mask() or something of that sort. Note: the only way
+ * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
+ * expanded into:
+ *
+ * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
+ *
+ * which is precisely what we want.
+ */
+extern unsigned long ia64_max_iommu_merge_mask;
+#define BIO_VMERGE_BOUNDARY (0UL)//(ia64_max_iommu_merge_mask + 1)
+
#endif /* _ASM_IA64_IO_H */
--- /dev/null
+#include <linux/ioctl32.h>
#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \
IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \
IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA)
-#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH)
+#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH | IA64_PSR_SP)
#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT)
#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT)
# include <asm/machvec_dig.h>
# elif defined (CONFIG_IA64_HP_ZX1)
# include <asm/machvec_hpzx1.h>
-# elif defined (CONFIG_IA64_SGI_SN1)
-# include <asm/machvec_sn1.h>
# elif defined (CONFIG_IA64_SGI_SN2)
# include <asm/machvec_sn2.h>
# elif defined (CONFIG_IA64_GENERIC)
u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going
* back to SAL from OS after MCA handling.
*/
+ u64 pal_min_state; /* from PAL in r17 */
+ u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */
} ia64_mca_sal_to_os_state_t;
enum {
* 1. Lop off bits 61 thru 63 in the virtual address
*/
#define DATA_VA_TO_PA(addr) \
- dep addr = 0, addr, 61, 3
+ tpa addr = addr
/*
* This macro converts a data physical address to a virtual address
* Right now for simulation purposes the virtual addresses are
struct pglist_data;
struct ia64_node_data {
+ short active_cpu_count;
short node;
struct pglist_data *pg_data_ptrs[NR_NODES];
struct page *bank_mem_map_base[NR_BANKS];
#define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */
#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
+#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */
+
#ifdef CONFIG_HUGETLB_PAGE
# if defined(CONFIG_HUGETLB_PAGE_SIZE_4GB)
# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
# define HPAGE_MASK (~(HPAGE_SIZE - 1))
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+# define ARCH_HAS_VALID_HUGEPAGE_RANGE
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef __ASSEMBLY__
# define htlbpage_to_page(x) ((REGION_NUMBER(x) << 61) \
| (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-extern int is_invalid_hugepage_range(unsigned long addr, unsigned long len);
-#else
-#define is_invalid_hugepage_range(addr, len) 0
+extern int check_valid_hugepage_range(unsigned long addr, unsigned long len);
#endif
static __inline__ int
--- /dev/null
+#ifndef _ASM_IA64_PATCH_H
+#define _ASM_IA64_PATCH_H
+
+/*
+ * Copyright (C) 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * There are a number of reasons for patching instructions. Rather than duplicating code
+ * all over the place, we put the common stuff here. Reasons for patching: in-kernel
+ * module-loader, virtual-to-physical patch-list, McKinley Errata 9 workaround, and gate
+ * shared library. Undoubtedly, some of these reasons will disappear and others will
+ * be added over time.
+ */
+#include <linux/elf.h>
+#include <linux/types.h>
+
+extern void ia64_patch (u64 insn_addr, u64 mask, u64 val); /* patch any insn slot */
+extern void ia64_patch_imm64 (u64 insn_addr, u64 val); /* patch "movl" w/abs. value*/
+extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel value */
+
+extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
+extern void ia64_patch_vtop (unsigned long start, unsigned long end);
+extern void ia64_patch_gate (void);
+
+#endif /* _ASM_IA64_PATCH_H */
struct pci_dev;
/*
- * The PCI address space does equal the physical memory address space.
- * The networking and block device layers use this boolean for bounce
- * buffer decisions.
+ * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence
+ * between device bus addresses and CPU physical addresses. Platforms with a hardware I/O
+ * MMU _must_ turn this off to suppress the bounce buffer handling code in the block and
+ * network device layers. Platforms with separate bus address spaces _must_ turn this off
+ * and provide a device DMA mapping implementation that takes care of the necessary
+ * address translation.
+ *
+ * For now, the ia64 platforms which may have separate/multiple bus address spaces all
+ * have I/O MMUs which support the merging of physically discontiguous buffers, so we can
+ * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
*/
-#define PCI_DMA_BUS_IS_PHYS (1)
+extern unsigned long ia64_max_iommu_merge_mask;
+#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
static inline void
pcibios_set_master (struct pci_dev *dev)
#define PFM_READ_PMDS 0x03
#define PFM_STOP 0x04
#define PFM_START 0x05
-#define PFM_ENABLE 0x06
-#define PFM_DISABLE 0x07
+#define PFM_ENABLE 0x06 /* obsolete */
+#define PFM_DISABLE 0x07 /* obsolete */
#define PFM_CREATE_CONTEXT 0x08
-#define PFM_DESTROY_CONTEXT 0x09
+#define PFM_DESTROY_CONTEXT 0x09 /* obsolete use close() */
#define PFM_RESTART 0x0a
-#define PFM_PROTECT_CONTEXT 0x0b
+#define PFM_PROTECT_CONTEXT 0x0b /* obsolete */
#define PFM_GET_FEATURES 0x0c
#define PFM_DEBUG 0x0d
-#define PFM_UNPROTECT_CONTEXT 0x0e
+#define PFM_UNPROTECT_CONTEXT 0x0e /* obsolete */
#define PFM_GET_PMC_RESET_VAL 0x0f
-
+#define PFM_LOAD_CONTEXT 0x10
+#define PFM_UNLOAD_CONTEXT 0x11
/*
- * CPU model specific commands (may not be supported on all models)
+ * PMU model specific commands (may not be supported on all PMU models)
*/
#define PFM_WRITE_IBRS 0x20
#define PFM_WRITE_DBRS 0x21
/*
* context flags
*/
-#define PFM_FL_INHERIT_NONE 0x00 /* never inherit a context across fork (default) */
-#define PFM_FL_INHERIT_ONCE 0x01 /* clone pfm_context only once across fork() */
-#define PFM_FL_INHERIT_ALL 0x02 /* always clone pfm_context across fork() */
-#define PFM_FL_NOTIFY_BLOCK 0x04 /* block task on user level notifications */
-#define PFM_FL_SYSTEM_WIDE 0x08 /* create a system wide context */
-#define PFM_FL_EXCL_IDLE 0x20 /* exclude idle task from system wide session */
-#define PFM_FL_UNSECURE 0x40 /* allow unsecure monitoring for non self-monitoring task */
+#define PFM_FL_NOTIFY_BLOCK 0x01 /* block task on user level notifications */
+#define PFM_FL_SYSTEM_WIDE 0x02 /* create a system wide context */
+#define PFM_FL_UNSECURE 0x04 /* allow unsecure monitoring for non self-monitoring task */
+#define PFM_FL_OVFL_NO_MSG 0x80 /* do not post overflow/end messages for notification */
+
+/*
+ * event set flags
+ */
+#define PFM_SETFL_EXCL_IDLE 0x01 /* exclude idle task (syswide only) XXX: DO NOT USE YET */
/*
* PMC flags
*/
#define PFM_REGFL_OVFL_NOTIFY 0x1 /* send notification on overflow */
-#define PFM_REGFL_RANDOM 0x2 /* randomize sampling periods */
+#define PFM_REGFL_RANDOM 0x2 /* randomize sampling interval */
/*
* PMD/PMC/IBR/DBR return flags (ignored on input)
* Those flags are used on output and must be checked in case EAGAIN is returned
* by any of the calls using a pfarg_reg_t or pfarg_dbreg_t structure.
*/
-#define PFM_REG_RETFL_NOTAVAIL (1U<<31) /* set if register is implemented but not available */
-#define PFM_REG_RETFL_EINVAL (1U<<30) /* set if register entry is invalid */
+#define PFM_REG_RETFL_NOTAVAIL (1UL<<31) /* set if register is implemented but not available */
+#define PFM_REG_RETFL_EINVAL (1UL<<30) /* set if register entry is invalid */
#define PFM_REG_RETFL_MASK (PFM_REG_RETFL_NOTAVAIL|PFM_REG_RETFL_EINVAL)
#define PFM_REG_HAS_ERROR(flag) (((flag) & PFM_REG_RETFL_MASK) != 0)
+typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type */
+
/*
* Request structure used to define a context
*/
typedef struct {
- unsigned long ctx_smpl_entries; /* how many entries in sampling buffer */
- unsigned long ctx_smpl_regs[4]; /* which pmds to record on overflow */
-
- pid_t ctx_notify_pid; /* which process to notify on overflow */
- int ctx_flags; /* noblock/block, inherit flags */
- void *ctx_smpl_vaddr; /* returns address of BTB buffer */
-
- unsigned long ctx_cpu_mask; /* on which CPU to enable perfmon (systemwide) */
-
- unsigned long reserved[8]; /* for future use */
+ pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */
+ unsigned long ctx_flags; /* noblock/block */
+ unsigned int ctx_nextra_sets; /* number of extra event sets (you always get 1) */
+ int ctx_fd; /* return arg: unique identification for context */
+ void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */
+ unsigned long ctx_reserved[11]; /* for future use */
} pfarg_context_t;
/*
* Request structure used to write/read a PMC or PMD
*/
typedef struct {
- unsigned int reg_num; /* which register */
- unsigned int reg_flags; /* PMC: notify/don't notify. PMD/PMC: return flags */
- unsigned long reg_value; /* configuration (PMC) or initial value (PMD) */
+ unsigned int reg_num; /* which register */
+ unsigned int reg_set; /* event set for this register */
+
+ unsigned long reg_value; /* initial pmc/pmd value */
+ unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */
+
+ unsigned long reg_long_reset; /* reset after buffer overflow notification */
+ unsigned long reg_short_reset; /* reset after counter overflow */
- unsigned long reg_long_reset; /* reset after sampling buffer overflow (large) */
- unsigned long reg_short_reset;/* reset after counter overflow (small) */
+ unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
+ unsigned long reg_random_seed; /* seed value when randomization is used */
+ unsigned long reg_random_mask; /* bitmask used to limit random value */
+ unsigned long reg_last_reset_val;/* return: PMD last reset value */
- unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */
- unsigned long reg_random_seed; /* seed value when randomization is used */
- unsigned long reg_random_mask; /* bitmask used to limit random value */
- unsigned long reg_last_reset_value;/* last value used to reset the PMD (PFM_READ_PMDS) */
+ unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */
+ unsigned long reg_smpl_eventid; /* opaque sampling event identifier */
- unsigned long reserved[13]; /* for future use */
+ unsigned long reserved[3]; /* for future use */
} pfarg_reg_t;
typedef struct {
- unsigned int dbreg_num; /* which register */
- unsigned int dbreg_flags; /* dbregs return flags */
- unsigned long dbreg_value; /* configuration (PMC) or initial value (PMD) */
- unsigned long reserved[6];
+ unsigned int dbreg_num; /* which debug register */
+ unsigned int dbreg_set; /* event set for this register */
+ unsigned long dbreg_value; /* value for debug register */
+ unsigned long dbreg_flags; /* return: dbreg error */
+ unsigned long dbreg_reserved[1]; /* for future use */
} pfarg_dbreg_t;
-typedef struct {
+typedef struct {
unsigned int ft_version; /* perfmon: major [16-31], minor [0-15] */
- unsigned int ft_smpl_version;/* sampling format: major [16-31], minor [0-15] */
- unsigned long reserved[4]; /* for future use */
+ unsigned int ft_reserved; /* reserved for future use */
+ unsigned long reserved[4]; /* for future use */
} pfarg_features_t;
-/*
- * This header is at the beginning of the sampling buffer returned to the user.
- * It is exported as Read-Only at this point. It is directly followed by the
- * first record.
- */
typedef struct {
- unsigned int hdr_version; /* contains perfmon version (smpl format diffs) */
- unsigned int reserved;
- unsigned long hdr_entry_size; /* size of one entry in bytes */
- unsigned long hdr_count; /* how many valid entries */
- unsigned long hdr_pmds[4]; /* which pmds are recorded */
-} perfmon_smpl_hdr_t;
+ pid_t load_pid; /* process to load the context into */
+ unsigned int load_set; /* first event set to load */
+ unsigned long load_reserved[2]; /* for future use */
+} pfarg_load_t;
-/*
- * Define the version numbers for both perfmon as a whole and the sampling buffer format.
- */
-#define PFM_VERSION_MAJ 1U
-#define PFM_VERSION_MIN 4U
-#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
+typedef struct {
+ int msg_type; /* generic message header */
+ int msg_ctx_fd; /* generic message header */
+ unsigned long msg_tstamp; /* for perf tuning */
+ unsigned int msg_active_set; /* active set at the time of overflow */
+ unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */
+} pfm_ovfl_msg_t;
-#define PFM_SMPL_VERSION_MAJ 1U
-#define PFM_SMPL_VERSION_MIN 0U
-#define PFM_SMPL_VERSION (((PFM_SMPL_VERSION_MAJ&0xffff)<<16)|(PFM_SMPL_VERSION_MIN & 0xffff))
+typedef struct {
+ int msg_type; /* generic message header */
+ int msg_ctx_fd; /* generic message header */
+ unsigned long msg_tstamp; /* for perf tuning */
+} pfm_end_msg_t;
+typedef struct {
+ int msg_type; /* type of the message */
+ int msg_ctx_fd; /* unique identifier for the context */
+ unsigned long msg_tstamp; /* for perf tuning */
+} pfm_gen_msg_t;
-#define PFM_VERSION_MAJOR(x) (((x)>>16) & 0xffff)
-#define PFM_VERSION_MINOR(x) ((x) & 0xffff)
+#define PFM_MSG_OVFL 1 /* an overflow happened */
+#define PFM_MSG_END 2 /* task to which context was attached ended */
+
+typedef union {
+ pfm_ovfl_msg_t pfm_ovfl_msg;
+ pfm_end_msg_t pfm_end_msg;
+ pfm_gen_msg_t pfm_gen_msg;
+} pfm_msg_t;
/*
- * Entry header in the sampling buffer. The header is directly followed
- * with the PMDs saved in increasing index order: PMD4, PMD5, .... How
- * many PMDs are present is determined by the user program during
- * context creation.
- *
- * XXX: in this version of the entry, only up to 64 registers can be
- * recorded. This should be enough for quite some time. Always check
- * sampling format before parsing entries!
- *
- * In the case where multiple counters overflow at the same time, the
- * last_reset_value member indicates the initial value of the PMD with
- * the smallest index. For instance, if PMD2 and PMD5 have overflowed,
- * the last_reset_value member contains the initial value of PMD2.
+ * Define the version numbers for both perfmon as a whole and the sampling buffer format.
*/
-typedef struct {
- int pid; /* identification of process */
- int cpu; /* which cpu was used */
- unsigned long last_reset_value; /* initial value of counter that overflowed */
- unsigned long stamp; /* timestamp */
- unsigned long ip; /* where did the overflow interrupt happened */
- unsigned long regs; /* bitmask of which registers overflowed */
- unsigned long reserved; /* unused */
-} perfmon_smpl_entry_t;
+#define PFM_VERSION_MAJ 2U
+#define PFM_VERSION_MIN 0U
+#define PFM_SMPL_HDR_VERSION_MAJ 2U
+#define PFM_SMPL_HDR_VERSION_MIN 0U
+#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
+#define PFM_VERSION_MAJOR(x) (((x)>>16) & 0xffff)
+#define PFM_VERSION_MINOR(x) ((x) & 0xffff)
-extern long perfmonctl(pid_t pid, int cmd, void *arg, int narg);
+
+/*
+ * miscellaneous architected definitions
+ */
+#define PMU_FIRST_COUNTER 4 /* first counting monitor (PMC/PMD) */
+#define PMU_MAX_PMCS 256 /* maximum architected number of PMC registers */
+#define PMU_MAX_PMDS 256 /* maximum architected number of PMD registers */
#ifdef __KERNEL__
-typedef struct {
- void (*handler)(int irq, void *arg, struct pt_regs *regs);
-} pfm_intr_handler_desc_t;
+extern long perfmonctl(int fd, int cmd, void *arg, int narg);
extern void pfm_save_regs (struct task_struct *);
extern void pfm_load_regs (struct task_struct *);
-extern int pfm_inherit (struct task_struct *, struct pt_regs *);
-extern void pfm_context_exit (struct task_struct *);
-extern void pfm_flush_regs (struct task_struct *);
-extern void pfm_cleanup_notifiers (struct task_struct *);
-extern void pfm_cleanup_owners (struct task_struct *);
+extern void pfm_exit_thread(struct task_struct *);
extern int pfm_use_debug_registers(struct task_struct *);
extern int pfm_release_debug_registers(struct task_struct *);
-extern int pfm_cleanup_smpl_buf(struct task_struct *);
extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, int is_ctxswin);
-extern void pfm_ovfl_block_reset(void);
+extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs);
extern void pfm_init_percpu(void);
+extern void pfm_handle_work(void);
-/*
- * hooks to allow VTune/Prospect to cooperate with perfmon.
- * (reserved for system wide monitoring modules only)
+/*
+ * Reset PMD register flags
*/
-extern int pfm_install_alternate_syswide_subsystem(pfm_intr_handler_desc_t *h);
-extern int pfm_remove_alternate_syswide_subsystem(pfm_intr_handler_desc_t *h);
+#define PFM_PMD_NO_RESET 0
+#define PFM_PMD_LONG_RESET 1
+#define PFM_PMD_SHORT_RESET 2
+
+typedef struct {
+ unsigned int notify_user:1; /* notify user program of overflow */
+ unsigned int reset_pmds :2; /* PFM_PMD_NO_RESET, PFM_PMD_LONG_RESET, PFM_PMD_SHORT_RESET */
+ unsigned int block:1; /* block monitored task on kernel exit */
+ unsigned int stop_monitoring:1; /* will mask monitoring via PMCx.plm */
+ unsigned int reserved:26; /* for future use */
+} pfm_ovfl_ctrl_t;
+
+typedef struct {
+ unsigned long ovfl_pmds[4]; /* bitmask of overflowed pmds */
+ unsigned long ovfl_notify[4]; /* bitmask of overflow pmds which asked for notification */
+ unsigned long pmd_value; /* current 64-bit value of 1st pmd which overflowed */
+ unsigned long pmd_last_reset; /* last reset value of 1st pmd which overflowed */
+ unsigned long pmd_eventid; /* eventid associated with 1st pmd which overflowed */
+ unsigned int active_set; /* event set active at the time of the overflow */
+ unsigned int reserved1;
+ unsigned long smpl_pmds[4];
+ unsigned long smpl_pmds_values[PMU_MAX_PMDS];
+ pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */
+} pfm_ovfl_arg_t;
+
+
+typedef struct _pfm_buffer_fmt_t {
+ char *fmt_name;
+ pfm_uuid_t fmt_uuid;
+ size_t fmt_arg_size;
+ unsigned long fmt_flags;
+
+ int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg);
+ int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size);
+ int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg);
+ int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs);
+ int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
+ int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs);
+ int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs);
+
+ struct _pfm_buffer_fmt_t *fmt_next;
+ struct _pfm_buffer_fmt_t *fmt_prev;
+} pfm_buffer_fmt_t;
+
+extern int pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt);
+extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid);
+
+/*
+ * perfmon interface exported to modules
+ */
+extern long pfm_mod_fast_read_pmds(struct task_struct *, unsigned long mask[4], unsigned long *addr, struct pt_regs *regs);
+extern long pfm_mod_read_pmds(struct task_struct *, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs);
+extern long pfm_mod_write_pmcs(struct task_struct *, pfarg_reg_t *req, unsigned int nreq, struct pt_regs *regs);
/*
* describe the content of the local_cpu_date->pfm_syst_info field
*/
-#define PFM_CPUINFO_SYST_WIDE 0x1 /* if set a system wide session exist */
+#define PFM_CPUINFO_SYST_WIDE 0x1 /* if set a system wide session exists */
#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */
#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
-
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_PERFMON_H */
--- /dev/null
+/*
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * This file implements the default sampling buffer format
+ * for Linux/ia64 perfmon subsystem.
+ */
+#ifndef __PERFMON_DEFAULT_SMPL_H__
+#define __PERFMON_DEFAULT_SMPL_H__ 1
+
+#define PFM_DEFAULT_SMPL_UUID { \
+ 0x4d, 0x72, 0xbe, 0xc0, 0x06, 0x64, 0x41, 0x43, 0x82, 0xb4, 0xd3, 0xfd, 0x27, 0x24, 0x3c, 0x97}
+
+/*
+ * format specific parameters (passed at context creation)
+ */
+typedef struct {
+ unsigned long buf_size; /* size of the buffer in bytes */
+ unsigned long reserved[3]; /* for future use */
+} pfm_default_smpl_arg_t;
+
+/*
+ * combined context+format specific structure. Can be passed
+ * to PFM_CONTEXT_CREATE
+ */
+typedef struct {
+ pfarg_context_t ctx_arg;
+ pfm_default_smpl_arg_t buf_arg;
+} pfm_default_smpl_ctx_arg_t;
+
+/*
+ * This header is at the beginning of the sampling buffer returned to the user.
+ * It is directly followed by the first record.
+ */
+typedef struct {
+ unsigned long hdr_count; /* how many valid entries */
+ void *hdr_cur_pos; /* current position in the buffer */
+ void *hdr_last_pos; /* first byte beyond buffer */
+
+ unsigned long hdr_overflows; /* how many times the buffer overflowed */
+ unsigned long hdr_buf_size; /* how many bytes in the buffer */
+ unsigned int hdr_version; /* contains perfmon version (smpl format diffs) */
+ unsigned int hdr_reserved1; /* for future use */
+ unsigned long hdr_reserved[10]; /* for future use */
+} pfm_default_smpl_hdr_t;
+
+/*
+ * Entry header in the sampling buffer. The header is directly followed
+ * with the PMDs saved in increasing index order: PMD4, PMD5, .... How
+ * many PMDs are present depends on how the session was programmed.
+ *
+ * XXX: in this version of the entry, only up to 64 registers can be
+ * recorded. This should be enough for quite some time. Always check
+ * sampling format before parsing entries!
+ *
+ * In the case where multiple counters overflow at the same time, the
+ * last_reset_value member indicates the initial value of the
+ * overflowed PMD with the smallest index. For instance, if PMD2 and
+ * PMD5 have overflowed, the last_reset_value member contains the
+ * initial value of PMD2.
+ */
+typedef struct {
+ int pid; /* current process at PMU interrupt point */
+ int cpu; /* cpu on which the overfow occured */
+ unsigned long last_reset_val; /* initial value of 1st overflowed PMD */
+ unsigned long ip; /* where did the overflow interrupt happened */
+ unsigned long ovfl_pmds; /* which PMDS registers overflowed (64 max) */
+ unsigned long tstamp; /* ar.itc on the CPU that took the overflow */
+ unsigned int set; /* event set active when overflow ocurred */
+ unsigned int reserved1; /* for future use */
+} pfm_default_smpl_entry_t;
+
+#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */
+#define PFM_DEFAULT_MAX_ENTRY_SIZE (sizeof(pfm_default_smpl_entry_t)+(sizeof(unsigned long)*PFM_DEFAULT_MAX_PMDS))
+#define PFM_DEFAULT_SMPL_MIN_BUF_SIZE (sizeof(pfm_default_smpl_hdr_t)+PFM_DEFAULT_MAX_ENTRY_SIZE)
+
+#define PFM_DEFAULT_SMPL_VERSION_MAJ 2U
+#define PFM_DEFAULT_SMPL_VERSION_MIN 0U
+#define PFM_DEFAULT_SMPL_VERSION (((PFM_DEFAULT_SMPL_VERSION_MAJ&0xffff)<<16)|(PFM_DEFAULT_SMPL_VERSION_MIN & 0xffff))
+
+#endif /* __PERFMON_DEFAULT_SMPL_H__ */
extern void check_pgt_cache (void);
-/*
- * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
- * information. However, we use this macro to take care of any (delayed) i-cache flushing
- * that may be necessary.
- */
-static inline void
-update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
-{
- unsigned long addr;
- struct page *page;
-
- if (!pte_exec(pte))
- return; /* not an executable page... */
-
- page = pte_page(pte);
- /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
- addr = (unsigned long) page_address(page);
-
- if (test_bit(PG_arch_1, &page->flags))
- return; /* i-cache is already coherent with d-cache */
-
- flush_icache_range(addr, addr + PAGE_SIZE);
- set_bit(PG_arch_1, &page->flags); /* mark page as clean */
-}
-
#endif /* _ASM_IA64_PGALLOC_H */
* This hopefully works with any (fixed) IA-64 page-size, as defined
* in <asm/page.h> (currently 8192).
*
- * Copyright (C) 1998-2002 Hewlett-Packard Co
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#define _PAGE_SIZE_16M 24
#define _PAGE_SIZE_64M 26
#define _PAGE_SIZE_256M 28
+#define _PAGE_SIZE_1G 30
+#define _PAGE_SIZE_4G 32
#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
#define set_pte(ptep, pteval) (*(ptep) = (pteval))
#define RGN_SIZE (1UL << 61)
-#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */
#define RGN_KERNEL 7
-#define VMALLOC_START (0xa000000000000000 + 3*PERCPU_PAGE_SIZE)
+#define VMALLOC_START 0xa000000200000000
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
-#define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+# define VMALLOC_END_INIT (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
+# define VMALLOC_END vmalloc_end
+ extern unsigned long vmalloc_end;
+#else
+# define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
+#endif
/* fs/proc/kcore.c */
-#define kc_vaddr_to_offset(v) ((v) - 0xA000000000000000)
-#define kc_offset_to_vaddr(o) ((o) + 0xA000000000000000)
+#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000)
+#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000)
/*
* Conversion functions: convert page frame number (pfn) and a protection value to a page
/*
* The following have defined behavior only work if pte_present() is true.
*/
+#define pte_user(pte) ((pte_val(pte) & _PAGE_PL_MASK) == _PAGE_PL_3)
#define pte_read(pte) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)
#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
/*
- * Macro to make mark a page protection value as "uncacheable". Note
- * that "protection" is really a misnomer here as the protection value
- * contains the memory attribute bits, dirty bits, and various other
- * bits as well.
+ * Macro to a page protection value as "uncacheable". Note that "protection" is really a
+ * misnomer here as the protection value contains the memory attribute bits, dirty bits,
+ * and various other bits as well.
*/
#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+extern struct page *zero_page_memmap_ptr;
+#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA
typedef pte_t *pte_addr_t;
+/*
+ * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
+ * information. However, we use this routine to take care of any (delayed) i-cache
+ * flushing that may be necessary.
+ */
+extern void update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte);
+
+# ifdef CONFIG_VIRTUAL_MEM_MAP
+ /* arch mem_map init routine is needed due to holes in a virtual mem_map */
+# define __HAVE_ARCH_MEMMAP_INIT
+ extern void memmap_init (struct page *start, unsigned long size, int nid, unsigned long zone,
+ unsigned long start_pfn);
+# endif /* CONFIG_VIRTUAL_MEM_MAP */
# endif /* !__ASSEMBLY__ */
/*
*/
#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
-#define KERNEL_TR_PAGE_NUM ((KERNEL_START - PAGE_OFFSET) / KERNEL_TR_PAGE_SIZE)
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
+/* These tell get_user_pages() that the first gate page is accessible from user-level. */
+#define FIXADDR_USER_START GATE_ADDR
+#define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE)
+
#endif /* _ASM_IA64_PGTABLE_H */
#include <asm/ptrace.h>
#include <asm/kregs.h>
-#include <asm/types.h>
+#include <asm/ustack.h>
#define IA64_NUM_DBG_REGS 8
/*
#include <linux/cache.h>
#include <linux/compiler.h>
#include <linux/threads.h>
+#include <linux/types.h>
#include <asm/fpu.h>
-#include <asm/offsets.h>
#include <asm/page.h>
#include <asm/percpu.h>
#include <asm/rse.h>
__u64 ksp; /* kernel stack pointer */
__u64 map_base; /* base address for get_unmapped_area() */
__u64 task_size; /* limit for task size */
+ __u64 rbs_bot; /* the base address for the RBS */
int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
#ifdef CONFIG_IA32_SUPPORT
__u64 fcr; /* IA32 floating pt control reg */
__u64 fir; /* IA32 fp except. instr. reg */
__u64 fdr; /* IA32 fp except. data reg */
- __u64 csd; /* IA32 code selector descriptor */
- __u64 ssd; /* IA32 stack selector descriptor */
__u64 old_k1; /* old value of ar.k1 */
__u64 old_iob; /* old IOBase value */
# define INIT_THREAD_IA32 .eflag = 0, \
.fcr = 0x17800000037fULL, \
.fir = 0, \
.fdr = 0, \
- .csd = 0, \
- .ssd = 0, \
.old_k1 = 0, \
.old_iob = 0,
#else
# define INIT_THREAD_IA32
#endif /* CONFIG_IA32_SUPPORT */
#ifdef CONFIG_PERFMON
- __u64 pmc[IA64_NUM_PMC_REGS];
- __u64 pmd[IA64_NUM_PMD_REGS];
- unsigned long pfm_ovfl_block_reset;/* non-zero if we need to block or reset regs on ovfl */
- void *pfm_context; /* pointer to detailed PMU context */
- atomic_t pfm_notifiers_check; /* when >0, will cleanup ctx_notify_task in tasklist */
- atomic_t pfm_owners_check; /* when >0, will cleanup ctx_owner in tasklist */
- void *pfm_smpl_buf_list; /* list of sampling buffers to vfree */
-# define INIT_THREAD_PM .pmc = {0, }, \
- .pmd = {0, }, \
- .pfm_ovfl_block_reset = 0, \
- .pfm_context = NULL, \
- .pfm_notifiers_check = { 0 }, \
- .pfm_owners_check = { 0 }, \
- .pfm_smpl_buf_list = NULL,
+ __u64 pmcs[IA64_NUM_PMC_REGS];
+ __u64 pmds[IA64_NUM_PMD_REGS];
+ void *pfm_context; /* pointer to detailed PMU context */
+ unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
+# define INIT_THREAD_PM .pmcs = {0UL, }, \
+ .pmds = {0UL, }, \
+ .pfm_context = NULL, \
+ .pfm_needs_checking = 0UL,
#else
# define INIT_THREAD_PM
#endif
.on_ustack = 0, \
.ksp = 0, \
.map_base = DEFAULT_MAP_BASE, \
+ .rbs_bot = DEFAULT_USER_STACK_SIZE, \
.task_size = DEFAULT_TASK_SIZE, \
- .last_fph_cpu = 0, \
+ .last_fph_cpu = -1, \
INIT_THREAD_IA32 \
INIT_THREAD_PM \
.dbr = {0, }, \
regs->cr_iip = new_ip; \
regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
regs->ar_rnat = 0; \
- regs->ar_bspstore = IA64_RBS_BOT; \
+ regs->ar_bspstore = current->thread.rbs_bot; \
regs->ar_fpsr = FPSR_DEFAULT; \
regs->loadrs = 0; \
regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
- if (unlikely(!current->mm->dumpable)) { \
+ if (unlikely(!current->mm->dumpable)) { \
/* \
* Zap scratch regs to avoid leaking bits between processes with different \
* uid/privileges. \
*/ \
- regs->ar_pfs = 0; \
- regs->pr = 0; \
- /* \
- * XXX fix me: everything below can go away once we stop preserving scratch \
- * regs on a system call. \
- */ \
- regs->b6 = 0; \
- regs->r1 = 0; regs->r2 = 0; regs->r3 = 0; \
- regs->r13 = 0; regs->r14 = 0; regs->r15 = 0; \
- regs->r9 = 0; regs->r11 = 0; \
- regs->r16 = 0; regs->r17 = 0; regs->r18 = 0; regs->r19 = 0; \
- regs->r20 = 0; regs->r21 = 0; regs->r22 = 0; regs->r23 = 0; \
- regs->r24 = 0; regs->r25 = 0; regs->r26 = 0; regs->r27 = 0; \
- regs->r28 = 0; regs->r29 = 0; regs->r30 = 0; regs->r31 = 0; \
- regs->ar_ccv = 0; \
- regs->b0 = 0; regs->b7 = 0; \
- regs->f6.u.bits[0] = 0; regs->f6.u.bits[1] = 0; \
- regs->f7.u.bits[0] = 0; regs->f7.u.bits[1] = 0; \
- regs->f8.u.bits[0] = 0; regs->f8.u.bits[1] = 0; \
- regs->f9.u.bits[0] = 0; regs->f9.u.bits[1] = 0; \
+ regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
+ regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
} \
} while (0)
* parent of DEAD_TASK has collected the exist status of the task via
* wait().
*/
-#ifdef CONFIG_PERFMON
- extern void release_thread (struct task_struct *task);
-#else
-# define release_thread(dead_task)
-#endif
+#define release_thread(dead_task)
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
* do_basic_setup() and the timing is such that free_initmem() has
* been called already.
*/
-extern int kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
+extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
/* Get wait channel for task P. */
extern unsigned long get_wchan (struct task_struct *p);
}
}
-static inline struct task_struct *
-ia64_get_fpu_owner (void)
-{
- return (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER);
-}
+/*
+ * The following three macros can't be inline functions because we don't have struct
+ * task_struct at this point.
+ */
-static inline void
-ia64_set_fpu_owner (struct task_struct *t)
-{
- ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) t);
-}
+/* Return TRUE if task T owns the fph partition of the CPU we're running on. */
+#define ia64_is_local_fpu_owner(t) \
+({ \
+ struct task_struct *__ia64_islfo_task = (t); \
+ (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
+ && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
+})
+
+/* Mark task T as owning the fph partition of the CPU we're running on. */
+#define ia64_set_local_fpu_owner(t) do { \
+ struct task_struct *__ia64_slfo_task = (t); \
+ __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
+ ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
+} while (0)
+
+/* Mark the fph partition of task T as being invalid on all CPUs. */
+#define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
extern void __ia64_init_fpu (void);
extern void __ia64_save_fpu (struct ia64_fpreg *fph);
asm volatile ("mov cr.lrr0=%0;; srlz.d" :: "r"(val) : "memory");
}
-#define cpu_relax() barrier()
+static inline void
+ia64_hint_pause (void)
+{
+ asm volatile ("hint @pause" ::: "memory");
+}
+#define cpu_relax() ia64_hint_pause()
static inline void
ia64_set_lrr1 (unsigned long val)
return result;
}
+/*
+ * Take a mapped kernel address and return the equivalent address
+ * in the region 7 identity mapped virtual area.
+ */
+static inline void *
+ia64_imva (void *addr)
+{
+ void *result;
+ asm ("tpa %0=%1" : "=r"(result) : "r"(addr));
+ return __va(result);
+}
+
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 2003 Intel Co
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ * Fenghua Yu <fenghua.yu@intel.com>
+ * Arun Sharma <arun.sharma@intel.com>
*
* 12/07/98 S. Eranian added pt_regs & switch_stack
* 12/21/98 D. Mosberger updated to match latest code
*/
struct pt_regs {
/* The following registers are saved by SAVE_MIN: */
+ unsigned long b6; /* scratch */
+ unsigned long b7; /* scratch */
+
+ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
+ unsigned long ar_ssd; /* reserved for future use (scratch) */
+
+ unsigned long r8; /* scratch (return value register 0) */
+ unsigned long r9; /* scratch (return value register 1) */
+ unsigned long r10; /* scratch (return value register 2) */
+ unsigned long r11; /* scratch (return value register 3) */
unsigned long cr_ipsr; /* interrupted task's psr */
unsigned long cr_iip; /* interrupted task's instruction pointer */
unsigned long ar_bspstore; /* RSE bspstore */
unsigned long pr; /* 64 predicate registers (1 bit each) */
- unsigned long b6; /* scratch */
+ unsigned long b0; /* return pointer (bp) */
unsigned long loadrs; /* size of dirty partition << 16 */
unsigned long r1; /* the gp pointer */
- unsigned long r2; /* scratch */
- unsigned long r3; /* scratch */
unsigned long r12; /* interrupted task's memory stack pointer */
unsigned long r13; /* thread pointer */
- unsigned long r14; /* scratch */
+
+ unsigned long ar_fpsr; /* floating point status (preserved) */
unsigned long r15; /* scratch */
- unsigned long r8; /* scratch (return value register 0) */
- unsigned long r9; /* scratch (return value register 1) */
- unsigned long r10; /* scratch (return value register 2) */
- unsigned long r11; /* scratch (return value register 3) */
+ /* The remaining registers are NOT saved for system calls. */
- /* The following registers are saved by SAVE_REST: */
+ unsigned long r14; /* scratch */
+ unsigned long r2; /* scratch */
+ unsigned long r3; /* scratch */
+ /* The following registers are saved by SAVE_REST: */
unsigned long r16; /* scratch */
unsigned long r17; /* scratch */
unsigned long r18; /* scratch */
unsigned long r31; /* scratch */
unsigned long ar_ccv; /* compare/exchange value (scratch) */
- unsigned long ar_fpsr; /* floating point status (preserved) */
- unsigned long b0; /* return pointer (bp) */
- unsigned long b7; /* scratch */
/*
- * Floating point registers that the kernel considers
- * scratch:
+ * Floating point registers that the kernel considers scratch:
*/
struct ia64_fpreg f6; /* scratch */
struct ia64_fpreg f7; /* scratch */
struct ia64_fpreg f8; /* scratch */
struct ia64_fpreg f9; /* scratch */
+ struct ia64_fpreg f10; /* scratch */
+ struct ia64_fpreg f11; /* scratch */
};
/*
struct ia64_fpreg f4; /* preserved */
struct ia64_fpreg f5; /* preserved */
- struct ia64_fpreg f10; /* scratch, but untouched by kernel */
- struct ia64_fpreg f11; /* scratch, but untouched by kernel */
struct ia64_fpreg f12; /* scratch, but untouched by kernel */
struct ia64_fpreg f13; /* scratch, but untouched by kernel */
struct ia64_fpreg f14; /* scratch, but untouched by kernel */
!user_mode(_regs) && user_stack(_task, _regs); \
})
+ /*
+ * System call handlers that, upon successful completion, need to return a negative value
+ * should call force_successful_syscall_return() right before returning. On architectures
+ * where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
+ * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
+ * flag will not get set. On architectures which do not support a separate error flag,
+ * the macro is a no-op and the spurious error condition needs to be filtered out by some
+ * other means (e.g., in user-level, by passing an extra argument to the syscall handler,
+ * or something along those lines).
+ *
+ * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
+ */
+# define force_successful_syscall_return() (ia64_task_regs(current)->r8 = 0)
+
struct task_struct; /* forward decl */
struct unw_frame_info; /* forward decl */
extern void ia64_increment_ip (struct pt_regs *pt);
extern void ia64_decrement_ip (struct pt_regs *pt);
-#define force_successful_syscall_return() \
- do { \
- ia64_task_regs(current)->r8 = 0; \
- } while (0)
-
#endif /* !__KERNEL__ */
/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
#define _ASM_IA64_PTRACE_OFFSETS_H
/*
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999, 2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
* The "uarea" that can be accessed via PEEKUSER and POKEUSER is a
*
* struct uarea {
* struct ia64_fpreg fph[96]; // f32-f127
- * struct switch_stack sw;
- * struct pt_regs pt;
- * unsigned long rsvd1[712];
+ * unsigned long nat_bits;
+ * unsigned long empty1;
+ * struct ia64_fpreg f2; // f2-f5
+ * :
+ * struct ia64_fpreg f5;
+ * struct ia64_fpreg f10; // f10-f31
+ * :
+ * struct ia64_fpreg f31;
+ * unsigned long r4; // r4-r7
+ * :
+ * unsigned long r7;
+ * unsigned long b1; // b1-b5
+ * :
+ * unsigned long b5;
+ * unsigned long ar_ec;
+ * unsigned long ar_lc;
+ * unsigned long empty2[5];
+ * unsigned long cr_ipsr;
+ * unsigned long cr_iip;
+ * unsigned long cfm;
+ * unsigned long ar_unat;
+ * unsigned long ar_pfs;
+ * unsigned long ar_rsc;
+ * unsigned long ar_rnat;
+ * unsigned long ar_bspstore;
+ * unsigned long pr;
+ * unsigned long b6;
+ * unsigned long ar_bsp;
+ * unsigned long r1;
+ * unsigned long r2;
+ * unsigned long r3;
+ * unsigned long r12;
+ * unsigned long r13;
+ * unsigned long r14;
+ * unsigned long r15;
+ * unsigned long r8;
+ * unsigned long r9;
+ * unsigned long r10;
+ * unsigned long r11;
+ * unsigned long r16;
+ * :
+ * unsigned long r31;
+ * unsigned long ar_ccv;
+ * unsigned long ar_fpsr;
+ * unsigned long b0;
+ * unsigned long b7;
+ * unsigned long f6;
+ * unsigned long f7;
+ * unsigned long f8;
+ * unsigned long f9;
+ * unsigned long ar_csd;
+ * unsigned long ar_ssd;
+ * unsigned long rsvd1[710];
* unsigned long dbr[8];
* unsigned long rsvd2[504];
* unsigned long ibr[8];
#define PT_F125 0x05d0
#define PT_F126 0x05e0
#define PT_F127 0x05f0
-/* switch stack: */
+
#define PT_NAT_BITS 0x0600
#define PT_F2 0x0610
#define PT_AR_EC 0x0800
#define PT_AR_LC 0x0808
-/* pt_regs */
#define PT_CR_IPSR 0x0830
#define PT_CR_IIP 0x0838
#define PT_CFM 0x0840
#define PT_F7 0x0990
#define PT_F8 0x09a0
#define PT_F9 0x09b0
+#define PT_AR_CSD 0x09c0
+#define PT_AR_SSD 0x09c8
#define PT_DBR 0x2000 /* data breakpoint registers */
#define PT_IBR 0x3000 /* instruction breakpoint registers */
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <asm/ustack.h>
+
#define RLIMIT_CPU 0 /* CPU time in ms */
#define RLIMIT_FSIZE 1 /* Maximum filesize */
#define RLIMIT_DATA 2 /* max data size */
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
- { _STK_LIM, RLIM_INFINITY }, \
+ { _STK_LIM, DEFAULT_USER_STACK_SIZE }, \
{ 0, RLIM_INFINITY }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
* (plus examples of platform error info structures from smariset @ Intel)
*/
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT 0
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT 1
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT 2
+#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT 3
+
+#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK (1<<IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT)
+#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT (1<<IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT)
+
+#ifndef __ASSEMBLY__
+
#include <linux/spinlock.h>
#include <linux/efi.h>
u8 oem_reserved[8];
} ia64_sal_desc_memory_t;
-#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK (1 << 0)
-#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT (1 << 1)
-#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT (1 << 2)
-#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT (1 << 3)
-
typedef struct ia64_sal_desc_platform_feature {
u8 type;
u8 feature_mask;
extern unsigned long sal_platform_features;
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_IA64_PAL_H */
unsigned long sc_rbs_base; /* NULL or new base of sighandler's rbs */
unsigned long sc_loadrs; /* see description above */
- unsigned long sc_ar25; /* rsvd for scratch use */
+ unsigned long sc_ar25; /* cmp8xchg16 uses this */
unsigned long sc_ar26; /* rsvd for scratch use */
unsigned long sc_rsvd[12]; /* reserved for future use */
/*
long _band; /* POLL_IN, POLL_OUT, POLL_MSG (XPG requires a "long") */
int _fd;
} _sigpoll;
-
- /* SIGPROF */
- struct {
- pid_t _pid; /* which child */
- uid_t _uid; /* sender's uid */
- unsigned long _pfm_ovfl_counters[4]; /* which PMU counter overflowed */
- } _sigprof;
} _sifields;
} siginfo_t;
#define __ISR_VALID (1 << __ISR_VALID_BIT)
/*
- * si_code values
- * Positive values for kernel-generated signals.
- */
-#ifdef __KERNEL__
-#define __SI_PROF (6 << 16)
-#endif
-
-/*
* SIGILL si_codes
*/
#define ILL_BADIADDR (__SI_FAULT|9) /* unimplemented instruction address */
#undef NSIGTRAP
#define NSIGTRAP 4
-/*
- * SIGPROF si_codes
- */
-#define PROF_OVFL (__SI_PROF|1) /* some counters overflowed */
-
#ifdef __KERNEL__
#include <linux/string.h>
if (from->si_code < 0)
memcpy(to, from, sizeof(siginfo_t));
else
- /* _sigprof is currently the largest know union member */
- memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigprof));
+ /* _sigchld is currently the largest know union member */
+ memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigchld));
}
extern int copy_siginfo_from_user(siginfo_t *to, siginfo_t *from);
-
/*
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 1992-1999,2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 1992-1999,2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
-
#ifndef _ASM_IA64_SN_ADDRS_H
#define _ASM_IA64_SN_ADDRS_H
#include <linux/config.h>
-#if defined (CONFIG_IA64_SGI_SN1)
-#include <asm/sn/sn1/addrs.h>
-#elif defined (CONFIG_IA64_SGI_SN2)
#include <asm/sn/sn2/addrs.h>
-#else
-#error <<<BOMB! addrs.h defined only for SN1, or SN2 >>>
-#endif /* !SN1 && !SN2 */
#ifndef __ASSEMBLY__
#include <asm/sn/types.h>
#define PS_UINT_CAST (__psunsigned_t)
#define UINT64_CAST (uint64_t)
-#ifdef CONFIG_IA64_SGI_SN2
#define HUBREG_CAST (volatile mmr_t *)
-#else
-#define HUBREG_CAST (volatile hubreg_t *)
-#endif
#elif __ASSEMBLY__
* node's address space.
*/
-#ifdef CONFIG_IA64_SGI_SN2 /* SN2 has an extra AS field between node offset and node id (nasid) */
#define NODE_OFFSET(_n) (UINT64_CAST (_n) << NASID_SHFT)
-#else
-#define NODE_OFFSET(_n) (UINT64_CAST (_n) << NODE_SIZE_BITS)
-#endif
#define NODE_CAC_BASE(_n) (CAC_BASE + NODE_OFFSET(_n))
#define NODE_HSPEC_BASE(_n) (HSPEC_BASE + NODE_OFFSET(_n))
*/
#define SWIN_SIZE_BITS 24
-#define SWIN_SIZE (UINT64_CAST 1 << 24)
+#define SWIN_SIZE (1UL<<24)
#define SWIN_SIZEMASK (SWIN_SIZE - 1)
#define SWIN_WIDGET_MASK 0xF
* references to the local hub's registers.
*/
-#if defined CONFIG_IA64_SGI_SN1
-#define LREG_BASE (HSPEC_BASE + 0x10000000)
-#define LREG_SIZE 0x8000000 /* 128 MB */
-#define LREG_LIMIT (LREG_BASE + LREG_SIZE)
-#define LBOOT_BASE (LREG_LIMIT)
-#define LBOOT_SIZE 0x8000000 /* 128 MB */
-#define LBOOT_LIMIT (LBOOT_BASE + LBOOT_SIZE)
-#define LBOOT_STRIDE 0x2000000 /* two PROMs, on 32M boundaries */
-#endif
-
#define HUB_REGISTER_WIDGET 1
-#ifdef CONFIG_IA64_SGI_SN2
#define IALIAS_BASE LOCAL_SWIN_BASE(HUB_REGISTER_WIDGET)
-#else
-#define IALIAS_BASE NODE_SWIN_BASE(0, HUB_REGISTER_WIDGET)
-#endif
#define IALIAS_SIZE 0x800000 /* 8 Megabytes */
#define IS_IALIAS(_a) (((_a) >= IALIAS_BASE) && \
((_a) < (IALIAS_BASE + IALIAS_SIZE)))
/*
- * Macro for referring to Hub's RBOOT space
- */
-
-#if defined CONFIG_IA64_SGI_SN1
-
-#define NODE_LREG_BASE(_n) (NODE_HSPEC_BASE(_n) + 0x30000000)
-#define NODE_LREG_LIMIT(_n) (NODE_LREG_BASE(_n) + LREG_SIZE)
-#define RREG_BASE(_n) (NODE_LREG_BASE(_n))
-#define RREG_LIMIT(_n) (NODE_LREG_LIMIT(_n))
-#define RBOOT_SIZE 0x8000000 /* 128 Megabytes */
-#define NODE_RBOOT_BASE(_n) (NODE_HSPEC_BASE(_n) + 0x38000000)
-#define NODE_RBOOT_LIMIT(_n) (NODE_RBOOT_BASE(_n) + RBOOT_SIZE)
-
-#endif
-
-
-/*
* The following macros produce the correct base virtual address for
* the hub registers. The LOCAL_HUB_* macros produce the appropriate
* address for the local registers. The REMOTE_HUB_* macro produce
*/
-#ifdef CONFIG_IA64_SGI_SN2
/*
- * SN2 has II mmr's located inside small window space like SN0 & SN1,
- * but has all other non-II mmr's located at the top of big window
- * space, unlike SN0 & SN1.
+ * SN2 has II mmr's located inside small window space.
+ * As all other non-II mmr's located at the top of big window
+ * space.
*/
#define LOCAL_HUB_BASE(_x) (LOCAL_MMR_ADDR(_x) | (((~(_x)) & BWIN_TOP)>>8))
#define REMOTE_HUB_BASE(_x) \
#define REMOTE_HUB(_n, _x) \
(HUBREG_CAST (REMOTE_HUB_BASE(_x) | ((((long)(_n))<<NASID_SHFT))))
-#else /* not CONFIG_IA64_SGI_SN2 */
-
-#define LOCAL_HUB(_x) (HUBREG_CAST (IALIAS_BASE + (_x)))
-#define REMOTE_HUB(_n, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
- 0x800000 + (_x)))
-#endif
-
-#ifdef CONFIG_IA64_SGI_SN1
-#define LOCAL_HSPEC(_x) (HUBREG_CAST (LREG_BASE + (_x)))
-#define REMOTE_HSPEC(_n, _x) (HUBREG_CAST (RREG_BASE(_n) + (_x)))
-#endif /* CONFIG_IA64_SGI_SN1 */
-
/*
* WARNING:
* Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
* They're always safe.
*/
-#ifdef CONFIG_IA64_SGI_SN2
#define LOCAL_HUB_ADDR(_x) \
(((_x) & BWIN_TOP) ? (HUBREG_CAST (LOCAL_MMR_ADDR(_x))) \
: (HUBREG_CAST (IALIAS_BASE + (_x))))
#define REMOTE_HUB_ADDR(_n, _x) \
(((_x) & BWIN_TOP) ? (HUBREG_CAST (GLOBAL_MMR_ADDR(_n, _x))) \
: (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + 0x800000 + (_x))))
-#else
-#define LOCAL_HUB_ADDR(_x) (HUBREG_CAST (IALIAS_BASE + (_x)))
-#define REMOTE_HUB_ADDR(_n, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
- 0x800000 + (_x)))
-#endif
-#ifdef CONFIG_IA64_SGI_SN1
-#define REMOTE_HUB_PI_ADDR(_n, _sn, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
- 0x800000 + PIREG(_x, _sn)))
-#endif
-
-#ifdef CONFIG_IA64_SGI_SN1
-#define LOCAL_HSPEC_ADDR(_x) (HUBREG_CAST (LREG_BASE + (_x)))
-#define REMOTE_HSPEC_ADDR(_n, _x) (HUBREG_CAST (RREG_BASE(_n) + (_x)))
-#endif /* CONFIG_IA64_SGI_SN1 */
#ifndef __ASSEMBLY__
#define REMOTE_HUB_PI_L(_n, _sn, _r) HUB_L(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r)))
#define REMOTE_HUB_PI_S(_n, _sn, _r, _d) HUB_S(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r)), (_d))
-#ifdef CONFIG_IA64_SGI_SN1
-#define LOCAL_HSPEC_L(_r) HUB_L(LOCAL_HSPEC_ADDR(_r))
-#define LOCAL_HSPEC_S(_r, _d) HUB_S(LOCAL_HSPEC_ADDR(_r), (_d))
-#define REMOTE_HSPEC_L(_n, _r) HUB_L(REMOTE_HSPEC_ADDR((_n), (_r)))
-#define REMOTE_HSPEC_S(_n, _r, _d) HUB_S(REMOTE_HSPEC_ADDR((_n), (_r)), (_d))
-#endif /* CONFIG_IA64_SGI_SN1 */
-
#endif /* __ASSEMBLY__ */
/*
#define KLD_KERN_XP(nasid) (KLD_BASE(nasid) + KLI_KERN_XP)
#define KLD_KERN_PARTID(nasid) (KLD_BASE(nasid) + KLI_KERN_PARTID)
-#ifndef CONFIG_IA64_SGI_SN2
-#define KLCONFIG_OFFSET(nasid) KLD_KLCONFIG(nasid)->offset
-#else
-#define KLCONFIG_OFFSET(nasid) \
- ia64_sn_get_klconfig_addr(nasid)
-#endif /* CONFIG_IA64_SGI_SN2 */
+#define KLCONFIG_OFFSET(nasid) ia64_sn_get_klconfig_addr(nasid)
#define KLCONFIG_ADDR(nasid) \
TO_NODE_CAC((nasid), KLCONFIG_OFFSET(nasid))
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_ALENLIST_H
#define _ASM_IA64_SN_ALENLIST_H
/*
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
-
/* $Id$
*
* ARCS hardware/memory inventory/configuration and system ID definitions.
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
+ * Copyright (c) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright 1999 Ralf Baechle (ralf@gnu.org)
- * Copyright 1999,2001 Silicon Graphics, Inc.
*/
#ifndef _ASM_SN_ARC_TYPES_H
#define _ASM_SN_ARC_TYPES_H
*
* SGI specific setup.
*
- * Copyright (C) 1995-1997,1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1995-1997,1999,2001-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
*/
#ifndef _ASM_IA64_SN_ARCH_H
#include <linux/mmzone.h>
#include <asm/sn/types.h>
-#if defined(CONFIG_IA64_SGI_SN1)
-#include <asm/sn/sn1/arch.h>
-#elif defined(CONFIG_IA64_SGI_SN2)
#include <asm/sn/sn2/arch.h>
-#endif
-
-#if defined(CONFIG_IA64_SGI_SN1)
-typedef u64 bdrkreg_t;
-#elif defined(CONFIG_IA64_SGI_SN2)
typedef u64 shubreg_t;
-#endif
-
typedef u64 hubreg_t;
typedef u64 mmr_t;
typedef u64 nic_t;
-typedef char cnodeid_t;
#define CNODE_TO_CPU_BASE(_cnode) (NODEPDA(_cnode)->node_first_cpu)
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
/*
/*
*
*
- * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
#ifndef _ASM_IA64_SN_BTE_H
#define _ASM_IA64_SN_BTE_H
-#ident "$Revision: 1.1 $"
-
+#include <linux/timer.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <asm/sn/io.h>
+#include <asm/delay.h>
+
+
+/* #define BTE_DEBUG */
+/* #define BTE_DEBUG_VERBOSE */
+
+#ifdef BTE_DEBUG
+# define BTE_PRINTK(x) printk x /* Terse */
+# ifdef BTE_DEBUG_VERBOSE
+# define BTE_PRINTKV(x) printk x /* Verbose */
+# else
+# define BTE_PRINTKV(x)
+# endif /* BTE_DEBUG_VERBOSE */
+#else
+# define BTE_PRINTK(x)
+# define BTE_PRINTKV(x)
+#endif /* BTE_DEBUG */
+
/* BTE status register only supports 16 bits for length field */
#define BTE_LEN_BITS (16)
#define BTE_MAX_XFER ((1 << BTE_LEN_BITS) * L1_CACHE_BYTES)
-/*
- * Constants used in determining the best and worst case transfer
- * times. To help explain the two, the following graph of transfer
- * status vs time may help.
- *
- * active +------------------:-+ :
- * status | : | :
- * idle +__________________:_+=======
- * 0 Time MaxT MinT
- *
- * Therefore, MaxT is the maximum thoeretical rate for transfering
- * the request block (assuming ideal circumstances)
- *
- * MinT is the minimum theoretical rate for transferring the
- * requested block (assuming maximum link distance and contention)
- *
- * The following defines are the inverse of the above. They are
- * used for calculating the MaxT time and MinT time given the
- * number of lines in the transfer.
- */
-#define BTE_MAXT_LINES_PER_SECOND 800
-#define BTE_MINT_LINES_PER_SECOND 600
-
-
/* Define hardware */
#define BTES_PER_NODE 2
+
/* Define hardware modes */
#define BTE_NOTIFY (IBCT_NOTIFY)
#define BTE_NORMAL BTE_NOTIFY
#define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE)
-
/* Use a reserved bit to let the caller specify a wait for any BTE */
#define BTE_WACQUIRE (0x4000)
+/* macro to force the IBCT0 value valid */
+#define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE))
+
+
+/*
+ * Handle locking of the bte interfaces.
+ *
+ * All transfers spinlock the interface before setting up the SHUB
+ * registers. Sync transfers hold the lock until all processing is
+ * complete. Async transfers release the lock as soon as the transfer
+ * is initiated.
+ *
+ * To determine if an interface is available, we must check both the
+ * busy bit and the spinlock for that interface.
+ */
+#define BTE_LOCK_IF_AVAIL(_x) (\
+ (*pda->cpu_bte_if[_x]->most_rcnt_na & (IBLS_BUSY | IBLS_ERROR)) && \
+ (!(spin_trylock(&(pda->cpu_bte_if[_x]->spinlock)))) \
+ )
+
+/*
+ * Some macros to simplify reading.
+ * Start with macros to locate the BTE control registers.
+ */
+#define BTEREG_LNSTAT_ADDR ((u64 *)(bte->bte_base_addr))
+#define BTEREG_SRC_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_SRC))
+#define BTEREG_DEST_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_DEST))
+#define BTEREG_CTRL_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_CTRL))
+#define BTEREG_NOTIF_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_NOTIFY))
+
+
+/* Possible results from bte_copy and bte_unaligned_copy */
+typedef enum {
+ BTE_SUCCESS, /* 0 is success */
+ BTEFAIL_NOTAVAIL, /* BTE not available */
+ BTEFAIL_POISON, /* poison page */
+ BTEFAIL_PROT, /* Protection violation */
+ BTEFAIL_ACCESS, /* access error */
+ BTEFAIL_TOUT, /* Time out */
+ BTEFAIL_XTERR, /* Diretory error */
+ BTEFAIL_DIR, /* Diretory error */
+ BTEFAIL_ERROR, /* Generic error */
+} bte_result_t;
+
/*
* Structure defining a bte. An instance of this
* This structure contains everything necessary
* to work with a BTE.
*/
-typedef struct bteinfo_s {
+struct bteinfo_s {
u64 volatile notify ____cacheline_aligned;
char *bte_base_addr ____cacheline_aligned;
spinlock_t spinlock;
- u64 ideal_xfr_tmo; /* Time out */
- u64 ideal_xfr_tmo_cnt;
- /* u64 most_recent_src;
- * u64 most_recent_dest;
- * u64 most_recent_len;
- * u64 most_recent_mode; */
+ cnodeid_t bte_cnode; /* cnode */
+ int bte_error_count; /* Number of errors encountered */
+ int bte_num; /* 0 --> BTE0, 1 --> BTE1 */
+ int cleanup_active; /* Interface is locked for cleanup */
+ volatile bte_result_t bh_error; /* error while processing */
u64 volatile *most_rcnt_na;
- void *bte_test_buf;
-} bteinfo_t;
+ void *scratch_buf; /* Node local scratch buffer */
+};
-/* Possible results from bte_copy and bte_unaligned_copy */
-typedef enum {
- BTE_SUCCESS, /* 0 is success */
- BTEFAIL_NOTAVAIL, /* BTE not available */
- BTEFAIL_ERROR, /* Generic error */
- BTEFAIL_DIR /* Diretory error */
-} bte_result_t;
-void bte_reset_nasid(nasid_t);
+/*
+ * Function prototypes (functions defined in bte.c, used elsewhere)
+ */
+extern bte_result_t bte_copy(u64, u64, u64, u64, void *);
+extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64);
+extern void bte_error_handler(unsigned long);
+
+
+/*
+ * The following is the prefered way of calling bte_unaligned_copy
+ * If the copy is fully cache line aligned, then bte_copy is
+ * used instead. Since bte_copy is inlined, this saves a call
+ * stack. NOTE: bte_copy is called synchronously and does block
+ * until the transfer is complete. In order to get the asynch
+ * version of bte_copy, you must perform this check yourself.
+ */
+#define BTE_UNALIGNED_COPY(src, dest, len, mode) \
+ (((len & L1_CACHE_MASK) || (src & L1_CACHE_MASK) || \
+ (dest & L1_CACHE_MASK)) ? \
+ bte_unaligned_copy(src, dest, len, mode) : \
+ bte_copy(src, dest, len, mode, NULL))
+
-#endif /* _ASM_IA64_SN_BTE_H */
+#endif /* _ASM_IA64_SN_BTE_H */
+++ /dev/null
-/*
- *
- *
- * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like. Any license provided herein, whether implied or
- * otherwise, applies only to this software file. Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
- * Mountain View, CA 94043, or:
- *
- * http://www.sgi.com
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
- */
-
-#ifndef _ASM_IA64_SN_BTE_COPY_H
-#define _ASM_IA64_SN_BTE_COPY_H
-
-#ident "$Revision: 1.1 $"
-
-#include <linux/timer.h>
-#include <linux/cache.h>
-#include <asm/sn/bte.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/pda.h>
-#include <asm/delay.h>
-
-#define L1_CACHE_MASK (L1_CACHE_BYTES - 1)
-
-/*
- * BTE_LOCKING support - When CONFIG_IA64_SGI_BTE_LOCKING is
- * not defined, the bte_copy code supports one bte per cpu in
- * synchronous mode. Even if bte_copy is called with a
- * notify address, the bte will spin and wait for the transfer
- * to complete. By defining the following, spin_locks and
- * busy checks are placed around the initiation of a BTE
- * transfer and multiple bte's per cpu are supported.
- */
-#if 0
-#define CONFIG_IA64_SGI_BTE_LOCKING 1
-#endif
-
-/*
- * Handle locking of the bte interfaces.
- *
- * All transfers spinlock the interface before setting up the SHUB
- * registers. Sync transfers hold the lock until all processing is
- * complete. Async transfers release the lock as soon as the transfer
- * is initiated.
- *
- * To determine if an interface is available, we must check both the
- * busy bit and the spinlock for that interface.
- */
-#define BTE_LOCK_IF_AVAIL(_x) (\
- (*pda.cpu_bte_if[_x]->most_rcnt_na & IBLS_BUSY) && \
- (!(spin_trylock(&(pda.cpu_bte_if[_x]->spinlock)))) \
- )
-
-/*
- * Some macros to simplify reading.
- *
- * Start with macros to locate the BTE control registers.
- */
-
-#define BTEREG_LNSTAT_ADDR ((u64 *)(bte->bte_base_addr))
-#define BTEREG_SRC_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_SRC))
-#define BTEREG_DEST_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_DEST))
-#define BTEREG_CTRL_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_CTRL))
-#define BTEREG_NOTIF_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_NOTIFY))
-
-/* Some macros to force the IBCT0 value valid. */
-
-#define BTE_VALID_MODES BTE_NOTIFY
-#define BTE_VLD_MODE(x) (x & BTE_VALID_MODES)
-
-// #define BTE_DEBUG
-// #define BTE_DEBUG_VERBOSE
-// #define BTE_TIME
-
-#ifdef BTE_DEBUG
-# define BTE_PRINTK(x) printk x /* Terse */
-# ifdef BTE_DEBUG_VERBOSE
-# define BTE_PRINTKV(x) printk x /* Verbose */
-# else
-# define BTE_PRINTKV(x)
-# endif /* BTE_DEBUG_VERBOSE */
-#else
-# define BTE_PRINTK(x)
-# define BTE_PRINTKV(x)
-#endif /* BTE_DEBUG */
-
-#define BTE_IDEAL_TMO(x) (jiffies + \
- (HZ / BTE_MAXT_LINES_PER_SECOND * x))
-
-#ifdef BTE_TIME
-volatile extern u64 bte_setup_time;
-volatile extern u64 bte_transfer_time;
-volatile extern u64 bte_tear_down_time;
-volatile extern u64 bte_execute_time;
-
-#define BTE_TIME_DECLARE() \
- u64 btcp_strt_tm = 0; \
- u64 btcp_cplt_tm = 0; \
- u64 xfr_strt_tm = 0; \
- u64 xfr_cplt_tm = 0; \
-
-#define BTE_TIME_START() \
- btcp_strt_tm = xfr_strt_tm = xfr_cplt_tm = ia64_get_itc();
-
-#define BTE_TIME_XFR_START() \
- xfr_strt_tm = ia64_get_itc();
-
-#define BTE_TIME_XFR_STOP() \
- xfr_cplt_tm = ia64_get_itc();
-
-#define BTE_TIME_STOP() \
- btcp_cplt_tm = ia64_get_itc(); \
- bte_setup_time = xfr_strt_tm - btcp_strt_tm; \
- bte_transfer_time = xfr_cplt_tm - xfr_strt_tm; \
- bte_tear_down_time = btcp_cplt_tm - xfr_cplt_tm; \
- bte_execute_time = btcp_cplt_tm - btcp_strt_tm; \
-
-#else /* BTE_TIME */
-#define BTE_TIME_DECLARE()
-#define BTE_TIME_START()
-#define BTE_TIME_XFR_START()
-#define BTE_TIME_XFR_STOP()
-#define BTE_TIME_STOP()
-#endif /* BTE_TIME */
-
-/*
- * bte_copy(src, dest, len, mode, notification)
- *
- * use the block transfer engine to move kernel
- * memory from src to dest using the assigned mode.
- *
- * Paramaters:
- * src - physical address of the transfer source.
- * dest - physical address of the transfer destination.
- * len - number of bytes to transfer from source to dest.
- * mode - hardware defined. See reference information
- * for IBCT0/1 in the SHUB Programmers Reference
- * notification - kernel virtual address of the notification cache
- * line. If NULL, the default is used and
- * the bte_copy is synchronous.
- *
- * NOTE: This function requires src, dest, and len to
- * be cache line aligned.
- */
-extern __inline__ bte_result_t
-bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
-{
-#ifdef CONFIG_IA64_SGI_BTE_LOCKING
- int bte_to_use;
-#endif /* CONFIG_IA64_SGI_BTE_LOCKING */
- u64 transfer_size;
- u64 lines_remaining;
- bteinfo_t *bte;
- BTE_TIME_DECLARE();
-
- BTE_TIME_START();
-
- BTE_PRINTK(("bte_copy (0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx)\n",
- src, dest, len, mode, notification));
-
- if (len == 0) {
- BTE_TIME_STOP();
- return (BTE_SUCCESS);
- }
-
- ASSERT(!((len & L1_CACHE_MASK) ||
- (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK)));
-
- ASSERT(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT));
-
-#ifdef CONFIG_IA64_SGI_BTE_LOCKING
- {
- bte_to_use = 0;
-
- /* Attempt to lock one of the BTE interfaces */
- while ((bte_to_use < BTES_PER_NODE) &&
- BTE_LOCK_IF_AVAIL(bte_to_use)) {
-
- bte_to_use++;
- }
-
- if ((bte_to_use >= BTES_PER_NODE) &&
- !(mode & BTE_WACQUIRE)) {
- BTE_TIME_STOP();
- return (BTEFAIL_NOTAVAIL);
- }
-
- /* Wait until a bte is available. */
- }
- while (bte_to_use >= BTES_PER_NODE);
-
- bte = pda.cpu_bte_if[bte_to_use];
- BTE_PRINTKV(("Got a lock on bte %d\n", bte_to_use));
-#else
- /* Assuming one BTE per CPU. */
- bte = pda->cpu_bte_if[0];
-#endif /* CONFIG_IA64_SGI_BTE_LOCKING */
-
- /*
- * The following are removed for optimization but is
- * available in the event that the SHUB exhibits
- * notification problems similar to the hub, bedrock et al.
- *
- * bte->mostRecentSrc = src;
- * bte->mostRecentDest = dest;
- * bte->mostRecentLen = len;
- * bte->mostRecentMode = mode;
- */
- if (notification == NULL) {
- /* User does not want to be notified. */
- bte->most_rcnt_na = &bte->notify;
- } else {
- bte->most_rcnt_na = notification;
- }
-
- /* Calculate the number of cache lines to transfer. */
- transfer_size = ((len >> L1_CACHE_SHIFT) & BTE_LEN_MASK);
-
- BTE_PRINTKV(("Calculated transfer size of %d cache lines\n",
- transfer_size));
-
- /* Initialize the notification to a known value. */
- *bte->most_rcnt_na = -1L;
-
-
- BTE_PRINTKV(("Before, status is 0x%lx and notify is 0x%lx\n",
- HUB_L(BTEREG_LNSTAT_ADDR),
- *bte->most_rcnt_na));
-
- /* Set the status reg busy bit and transfer length */
- BTE_PRINTKV(("IBLS - HUB_S(0x%lx, 0x%lx)\n",
- BTEREG_LNSTAT_ADDR, IBLS_BUSY | transfer_size));
- HUB_S(BTEREG_LNSTAT_ADDR, (IBLS_BUSY | transfer_size));
-
- /* Set the source and destination registers */
- BTE_PRINTKV(("IBSA - HUB_S(0x%lx, 0x%lx)\n", BTEREG_SRC_ADDR,
- (TO_PHYS(src))));
- HUB_S(BTEREG_SRC_ADDR, (TO_PHYS(src)));
- BTE_PRINTKV(("IBDA - HUB_S(0x%lx, 0x%lx)\n", BTEREG_DEST_ADDR,
- (TO_PHYS(dest))));
- HUB_S(BTEREG_DEST_ADDR, (TO_PHYS(dest)));
-
- /* Set the notification register */
- BTE_PRINTKV(("IBNA - HUB_S(0x%lx, 0x%lx)\n", BTEREG_NOTIF_ADDR,
- (TO_PHYS(__pa(bte->most_rcnt_na)))));
- HUB_S(BTEREG_NOTIF_ADDR, (TO_PHYS(__pa(bte->most_rcnt_na))));
-
- /* Initiate the transfer */
- BTE_PRINTKV(("IBCT - HUB_S(0x%lx, 0x%lx)\n", BTEREG_CTRL_ADDR, mode));
- BTE_TIME_XFR_START();
- HUB_S(BTEREG_CTRL_ADDR, BTE_VLD_MODE(mode));
-
- BTE_PRINTKV(("Initiated, status is 0x%lx and notify is 0x%lx\n",
- HUB_L(BTEREG_LNSTAT_ADDR),
- *bte->most_rcnt_na));
-
- if (notification == NULL) {
- /*
- * Calculate our timeout
- *
- * What are we doing here? We are trying to determine
- * the fastest time the BTE could have transfered our
- * block of data. By takine the clock frequency (ticks/sec)
- * divided by the BTE MaxT Transfer Rate (lines/sec)
- * times the transfer size (lines), we get a tick
- * offset from current time that the transfer should
- * complete.
- *
- * Why do this? We are watching for a notification
- * failure from the BTE. This behaviour has been
- * seen in the SN0 and SN1 hardware on rare circumstances
- * and is expected in SN2. By checking at the
- * ideal transfer timeout, we minimize our time
- * delay from hardware completing our request and
- * our detecting the failure.
- */
- bte->ideal_xfr_tmo = BTE_IDEAL_TMO(transfer_size);
-
- while (bte->notify == -1UL) {
- /*
- * Notification Workaround: When the max
- * theoretical time has elapsed, read the hub
- * status register into the notification area.
- * This fakes the shub performing the copy.
- */
- BTE_PRINTKV((" Timing. IBLS = 0x%lx, "
- "notify= 0x%lx\n",
- HUB_L(BTEREG_LNSTAT_ADDR),
- bte->notify));
- if (time_after(jiffies, bte->ideal_xfr_tmo)) {
- lines_remaining = HUB_L(BTEREG_LNSTAT_ADDR) &
- BTE_LEN_MASK;
- bte->ideal_xfr_tmo_cnt++;
- bte->ideal_xfr_tmo =
- BTE_IDEAL_TMO(lines_remaining);
-
- BTE_PRINTKV((" Timeout. cpu %d "
- "IBLS = 0x%lx, "
- "notify= 0x%lx, "
- "Lines remaining = %d. "
- "New timeout = %d.\n",
- smp_processor_id(),
- HUB_L(BTEREG_LNSTAT_ADDR),
- bte->notify,
- lines_remaining,
- bte->ideal_xfr_tmo));
- }
- }
- BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, notify= 0x%lx\n",
- HUB_L(BTEREG_LNSTAT_ADDR),
- bte->notify));
- BTE_TIME_XFR_STOP();
- if (bte->notify & IBLS_ERROR) {
- /* >>> Need to do real error checking. */
- transfer_size = 0;
-
-#ifdef CONFIG_IA64_SGI_BTE_LOCKING
- spin_unlock(&(bte->spinlock));
-#endif /* CONFIG_IA64_SGI_BTE_LOCKING */
- BTE_PRINTKV(("Erroring status is 0x%lx and "
- "notify is 0x%lx\n",
- HUB_L(BTEREG_LNSTAT_ADDR),
- bte->notify));
-
- BTE_TIME_STOP();
- bte->notify = 0L;
- return (BTEFAIL_ERROR);
- }
-
- }
-#ifdef CONFIG_IA64_SGI_BTE_LOCKING
- spin_unlock(&(bte->spinlock));
-#endif /* CONFIG_IA64_SGI_BTE_LOCKING */
- BTE_TIME_STOP();
- BTE_PRINTKV(("Returning status is 0x%lx and notify is 0x%lx\n",
- HUB_L(BTEREG_LNSTAT_ADDR),
- *bte->most_rcnt_na));
-
- return (BTE_SUCCESS);
-}
-
-/*
- * Define the bte_unaligned_copy as an extern.
- */
-extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64);
-
-/*
- * The following is the prefered way of calling bte_unaligned_copy
- * If the copy is fully cache line aligned, then bte_copy is
- * used instead. Since bte_copy is inlined, this saves a call
- * stack. NOTE: bte_copy is called synchronously and does block
- * until the transfer is complete. In order to get the asynch
- * version of bte_copy, you must perform this check yourself.
- */
-#define BTE_UNALIGNED_COPY(src, dest, len, mode) \
- (((len & L1_CACHE_MASK) || (src & L1_CACHE_MASK) || \
- (dest & L1_CACHE_MASK)) ? \
- bte_unaligned_copy(src, dest, len, mode) : \
- bte_copy(src, dest, len, mode, NULL))
-
-#endif /* _ASM_IA64_SN_BTE_COPY_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_CDL_H
#define _ASM_IA64_SN_CDL_H
#include <asm/sn/sgi.h>
+struct cdl {
+ int part_num; /* Part part number */
+ int mfg_num; /* Part MFG number */
+ int (*attach)(vertex_hdl_t); /* Attach routine */
+};
+
+
/*
* cdl: connection/driver list
*
typedef struct cdl *cdl_p;
/*
- * cdl_itr_f is the type for the functions
- * that are handled by cdl_iterate.
- */
-
-typedef void
-cdl_iter_f (devfs_handle_t vhdl);
-
-/*
- * cdl_drv_f is the type for the functions
- * that are called by cdl_add_driver and
- * cdl_del_driver.
- */
-
-typedef void
-cdl_drv_f (devfs_handle_t vhdl, int key1, int key2, int error);
-
-/*
- * If CDL_PRI_HI is specified in the flags
- * parameter for cdl_add_driver, then that driver's
- * attach routine will be called for future connect
- * points before any (non-CDL_PRI_HI) drivers.
- *
- * The IOC3 driver uses this facility to make sure
- * that the ioc3_attach() function is called before
- * the attach routines of any subdevices.
- *
- * Drivers for bridge-based crosstalk cards that
- * are almost but not quite generic can use it to
- * arrange that their attach() functions get called
- * before the generic bridge drivers, so they can
- * leave behind "hint" structures that will
- * properly configure the generic driver.
- */
-#define CDL_PRI_HI 0x0001
-
-/*
- * cdl_new: construct a new connection/driver list
- *
- * Called once for each "kind" of bus. Returns an
- * opaque cookie representing the particular list
- * that will be operated on by the other calls.
- */
-extern cdl_p cdl_new(char *, char *, char *);
-
-/*
- * cdl_del: destroy a connection/driver list.
- *
- * Releases all dynamically allocated resources
- * associated with the specified list. Forgets what
- * drivers might be involved in this kind of bus,
- * forgets what connection points have been noticed
- * on this kind of bus.
- */
-extern void cdl_del(cdl_p reg);
-
-/*
- * cdl_add_driver: register a device driver
- *
- * Calls the driver's attach routine with all
- * connection points on the list that have the same
- * key information as the driver; call-back the
- * specified function to notify the driver of the
- * attach status for each device. Place the driver
- * on the list so that any connection points
- * discovered in the future that match the driver
- * can be handed off to the driver's attach
- * routine.
- *
- * CDL_PRI_HI may be specified (see above).
- */
-
-extern int cdl_add_driver(cdl_p reg,
- int key1,
- int key2,
- char *prefix,
- int flags,
- cdl_drv_f *func);
-
-/*
- * cdl_del_driver: remove a device driver
- *
- * Calls the driver's detach routine with all
- * connection points on the list that match the
- * driver; call-back the specified function to
- * notify the driver of the detach status for each
- * device. Then forget about the driver. Future
- * calls to cdl_add_connpt with connections that
- * would match this driver no longer trigger calls
- * to the driver's attach routine.
- *
- * NOTE: Yes, I said CONNECTION POINTS, not
- * verticies that the driver has been attached to
- * with hwgraph_driver_add(); this gives the driver
- * a chance to clean up anything it did to the
- * connection point in its attach routine. Also,
- * this is done whether or not the attach routine
- * was successful.
- */
-extern void cdl_del_driver(cdl_p reg,
- char *prefix,
- cdl_drv_f *func);
-
-/*
* cdl_add_connpt: add a connection point
*
* Calls the attach routines of all the drivers on
* the list that match this connection point, in
- * the order that they were added to the list,
- * except that CDL_PRI_HI drivers are called first.
- *
- * Then the vertex is added to the list, so it can
- * be presented to any matching drivers that may be
- * subsequently added to the list.
+ * the order that they were added to the list.
*/
-extern int cdl_add_connpt(cdl_p reg,
- int key1,
+extern int cdl_add_connpt(int key1,
int key2,
- devfs_handle_t conn,
+ vertex_hdl_t conn,
int drv_flags);
-
-/*
- * cdl_del_connpt: delete a connection point
- *
- * Calls the detach routines of all matching
- * drivers for this connection point, in the same
- * order that the attach routines were called; then
- * forgets about this vertex, so drivers added in
- * the future will not be told about it.
- *
- * NOTE: Same caveat here about the detach calls as
- * in the cdl_del_driver() comment above.
- */
-extern int cdl_del_connpt(cdl_p reg,
- int key1,
- int key2,
- devfs_handle_t conn,
- int drv_flags);
-
-/*
- * cdl_iterate: find all verticies in the registry
- * corresponding to the named driver and call them
- * with the specified function (giving the vertex
- * as the parameter).
- */
-
-extern void cdl_iterate(cdl_p reg,
- char *prefix,
- cdl_iter_f *func);
-
-/*
- * An INFO_LBL_ASYNC_ATTACH label is attached to a vertex, pointing to
- * an instance of async_attach_s to indicate that asynchronous
- * attachment may be applied to that device ... if the corresponding
- * driver allows it.
- */
-
-struct async_attach_s {
- struct semaphore async_sema;
- int async_count;
-};
-typedef struct async_attach_s *async_attach_t;
-
-async_attach_t async_attach_new(void);
-void async_attach_free(async_attach_t);
-async_attach_t async_attach_get_info(devfs_handle_t);
-void async_attach_add_info(devfs_handle_t, async_attach_t);
-void async_attach_del_info(devfs_handle_t);
-void async_attach_signal_start(async_attach_t);
-void async_attach_signal_done(async_attach_t);
-void async_attach_waitall(async_attach_t);
-
#endif /* _ASM_IA64_SN_CDL_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
/*
typedef long clkreg_t;
extern unsigned long sn_rtc_cycles_per_second;
-
-#if defined(CONFIG_IA64_SGI_SN1)
-#include <asm/sn/sn1/bedrock.h>
-#include <asm/sn/sn1/hubpi_next.h>
-
-extern nasid_t master_nasid;
-
-#define RTC_MASK (0x007fffffffffffff)
-/* clocks are not synchronized yet on SN1 - used node 0 (problem if no NASID 0) */
-#define RTC_COUNTER_ADDR ((clkreg_t*)REMOTE_HUB_ADDR(master_nasid, PI_RT_COUNTER))
-#define RTC_COMPARE_A_ADDR ((clkreg_t*)REMOTE_HUB_ADDR(master_nasid, PI_RT_COMPARE_A))
-#define RTC_COMPARE_B_ADDR ((clkreg_t*)REMOTE_HUB_ADDR(master_nasid, PI_RT_COMPARE_B))
-#define RTC_INT_PENDING_A_ADDR ((clkreg_t*)REMOTE_HUB_ADDR(master_nasid, PI_RT_INT_PEND_A))
-#define RTC_INT_PENDING_B_ADDR ((clkreg_t*)REMOTE_HUB_ADDR(master_nasid, PI_RT_INT_PEND_B))
-#define RTC_INT_ENABLED_A_ADDR ((clkreg_t*)REMOTE_HUB_ADDR(master_nasid, PI_RT_INT_EN_A))
-#define RTC_INT_ENABLED_B_ADDR ((clkreg_t*)REMOTE_HUB_ADDR(master_nasid, PI_RT_INT_EN_B))
-#else /* !CONFIG_IA64_SGI_SN1 */
#include <asm/sn/addrs.h>
#include <asm/sn/sn2/addrs.h>
#include <asm/sn/sn2/shubio.h>
#include <asm/sn/sn2/shub_mmr.h>
-#define RTC_MASK (SH_RTC_MASK)
#define RTC_COUNTER_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
#define RTC_COMPARE_A_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
#define RTC_COMPARE_B_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
#define RTC_INT_PENDING_B_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
#define RTC_INT_ENABLED_A_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
#define RTC_INT_ENABLED_B_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
-#endif /* CONFIG_IA64_SGI_SN1 */
-
#define GET_RTC_COUNTER() (*RTC_COUNTER_ADDR)
#define rtc_time() GET_RTC_COUNTER()
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_DMAMAP_H
#define _ASM_IA64_SN_DMAMAP_H
extern int dma_map(dmamap_t *, caddr_t, int);
extern int dma_map2(dmamap_t *, caddr_t, caddr_t, int);
extern paddr_t dma_mapaddr(dmamap_t *, caddr_t);
-#ifdef LATER
-extern int dma_mapbp(dmamap_t *, buf_t *, int);
-#endif
extern int dma_map_alenlist(dmamap_t *, struct alenlist_s *, size_t);
extern uint ev_kvtoiopnum(caddr_t);
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_DRIVER_H
#define _ASM_IA64_SN_DRIVER_H
-#include <linux/devfs_fs_kernel.h>
+#include <asm/sn/sgi.h>
#include <asm/types.h>
/*
/* TBD: allocated badwidth requirements */
/* interrupt description */
- devfs_handle_t intr_target; /* Hardware locator string */
+ vertex_hdl_t intr_target; /* Hardware locator string */
int intr_policy; /* TBD */
ilvl_t intr_swlevel; /* software level for blocking intr */
char *intr_name; /* name of interrupt, if any */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Public interface for reading Atmel EEPROMs via L1 system controllers
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_EEPROM_H
-#define _ASM_IA64_SN_EEPROM_H
-
-#include <linux/config.h>
-#include <asm/sn/sgi.h>
-#include <asm/sn/vector.h>
-#include <asm/sn/xtalk/xbow.h>
-#include <asm/sn/pci/bridge.h>
-#include <asm/sn/nic.h>
-
-/*
- * The following structures are an implementation of the EEPROM info
- * areas described in the SN1 EEPROM spec and the IPMI FRU Information
- * Storage definition
- */
-
-/* Maximum lengths for EEPROM fields
- */
-#define EEPROM_PARTNUM_LEN 20
-#define EEPROM_SERNUM_LEN 10
-#define EEPROM_MANUF_NAME_LEN 10
-#define EEPROM_PROD_NAME_LEN 14
-
-
-
-/* The EEPROM "common header", which contains offsets to the other
- * info areas in the EEPROM
- */
-typedef struct eeprom_common_hdr_t
-{
- uchar_t format; /* common header format byte */
- uchar_t internal_use; /* offsets to various info areas */
- uchar_t chassis; /* (in doubleword units) */
- uchar_t board;
- uchar_t product;
- uchar_t multi_record;
- uchar_t pad;
- uchar_t checksum;
-} eeprom_common_hdr_t;
-
-
-/* The chassis (brick) info area
- */
-typedef struct eeprom_chassis_ia_t
-{
- uchar_t format; /* format byte */
- uchar_t length; /* info area length in doublewords */
- uchar_t type; /* chassis type (always 0x17 "rack mount") */
- uchar_t part_num_tl; /* type/length of part number field */
-
- char part_num[EEPROM_PARTNUM_LEN];
- /* ASCII part number */
-
- uchar_t serial_num_tl; /* type/length of serial number field */
-
- char serial_num[EEPROM_SERNUM_LEN];
- /* ASCII serial number */
-
- uchar_t checksum;
-
-} eeprom_chassis_ia_t;
-
-
-/* The board info area
- */
-typedef struct eeprom_board_ia_t
-{
- uchar_t format; /* format byte */
- uchar_t length; /* info area length in doublewords */
- uchar_t language; /* language code, always 0x00 "English" */
- int mfg_date; /* date & time of manufacture, in minutes
- since 0:00 1/1/96 */
- uchar_t manuf_tl; /* type/length of manufacturer name field */
-
- char manuf[EEPROM_MANUF_NAME_LEN];
- /* ASCII manufacturer name */
-
- uchar_t product_tl; /* type/length of product name field */
-
- char product[EEPROM_PROD_NAME_LEN];
- /* ASCII product name */
-
- uchar_t serial_num_tl; /* type/length of board serial number */
-
- char serial_num[EEPROM_SERNUM_LEN];
- /* ASCII serial number */
-
- uchar_t part_num_tl; /* type/length of board part number */
-
- char part_num[EEPROM_PARTNUM_LEN];
- /* ASCII part number */
-
- /*
- * "custom" fields -- see SN1 EEPROM Spec
- */
- uchar_t board_rev_tl; /* type/length of board rev (always 0xC2) */
-
- char board_rev[2]; /* ASCII board revision */
-
- uchar_t eeprom_size_tl; /* type/length of eeprom size field */
- uchar_t eeprom_size; /* size code for eeprom */
- uchar_t temp_waiver_tl; /* type/length of temp waiver field (0xC2) */
- char temp_waiver[2]; /* temp waiver */
-
-
- /*
- * these fields only appear in main boards' EEPROMs
- */
- uchar_t ekey_G_tl; /* type/length of encryption key "G" */
- uint32_t ekey_G; /* encryption key "G" */
- uchar_t ekey_P_tl; /* type/length of encryption key "P" */
- uint32_t ekey_P; /* encryption key "P" */
- uchar_t ekey_Y_tl; /* type/length of encryption key "Y" */
- uint32_t ekey_Y; /* encryption key "Y" */
-
-
- /*
- * these fields are used for I bricks only
- */
- uchar_t mac_addr_tl; /* type/length of MAC address */
- char mac_addr[12]; /* MAC address */
- uchar_t ieee1394_cfg_tl; /* type/length of IEEE 1394 info */
- uchar_t ieee1394_cfg[32]; /* IEEE 1394 config info */
-
-
- /*
- * all boards have a checksum
- */
- uchar_t checksum;
-
-} eeprom_board_ia_t;
-
-/* given a pointer to the three-byte little-endian EEPROM representation
- * of date-of-manufacture, this function translates to a big-endian
- * integer format
- */
-int eeprom_xlate_board_mfr_date( uchar_t *src );
-
-
-/* EEPROM Serial Presence Detect record (used for DIMMs in IP35)
- */
-typedef struct eeprom_spd_t
-{
- /* 0*/ uchar_t spd_used; /* # of bytes written to serial memory by manufacturer */
- /* 1*/ uchar_t spd_size; /* Total # of bytes of SPD memory device */
- /* 2*/ uchar_t mem_type; /* Fundamental memory type (FPM, EDO, SDRAM..) */
- /* 3*/ uchar_t num_rows; /* # of row addresses on this assembly */
- /* 4*/ uchar_t num_cols; /* # Column Addresses on this assembly */
- /* 5*/ uchar_t mod_rows; /* # Module Rows on this assembly */
- /* 6*/ uchar_t data_width[2]; /* Data Width of this assembly (16b little-endian) */
- /* 8*/ uchar_t volt_if; /* Voltage interface standard of this assembly */
- /* 9*/ uchar_t cyc_time; /* SDRAM Cycle time, CL=X (highest CAS latency) */
- /* A*/ uchar_t acc_time; /* SDRAM Access from Clock (highest CAS latency) */
- /* B*/ uchar_t dimm_cfg; /* DIMM Configuration type (non-parity, ECC) */
- /* C*/ uchar_t refresh_rt; /* Refresh Rate/Type */
- /* D*/ uchar_t prim_width; /* Primary SDRAM Width */
- /* E*/ uchar_t ec_width; /* Error Checking SDRAM width */
- /* F*/ uchar_t min_delay; /* Min Clock Delay Back to Back Random Col Address */
- /*10*/ uchar_t burst_len; /* Burst Lengths Supported */
- /*11*/ uchar_t num_banks; /* # of Banks on Each SDRAM Device */
- /*12*/ uchar_t cas_latencies; /* CAS# Latencies Supported */
- /*13*/ uchar_t cs_latencies; /* CS# Latencies Supported */
- /*14*/ uchar_t we_latencies; /* Write Latencies Supported */
- /*15*/ uchar_t mod_attrib; /* SDRAM Module Attributes */
- /*16*/ uchar_t dev_attrib; /* SDRAM Device Attributes: General */
- /*17*/ uchar_t cyc_time2; /* Min SDRAM Cycle time at CL X-1 (2nd highest CAS latency) */
- /*18*/ uchar_t acc_time2; /* SDRAM Access from Clock at CL X-1 (2nd highest CAS latency) */
- /*19*/ uchar_t cyc_time3; /* Min SDRAM Cycle time at CL X-2 (3rd highest CAS latency) */
- /*1A*/ uchar_t acc_time3; /* Max SDRAM Access from Clock at CL X-2 (3nd highest CAS latency) */
- /*1B*/ uchar_t min_row_prechg; /* Min Row Precharge Time (Trp) */
- /*1C*/ uchar_t min_ra_to_ra; /* Min Row Active to Row Active (Trrd) */
- /*1D*/ uchar_t min_ras_to_cas; /* Min RAS to CAS Delay (Trcd) */
- /*1E*/ uchar_t min_ras_pulse; /* Minimum RAS Pulse Width (Tras) */
- /*1F*/ uchar_t row_density; /* Density of each row on module */
- /*20*/ uchar_t ca_setup; /* Command and Address signal input setup time */
- /*21*/ uchar_t ca_hold; /* Command and Address signal input hold time */
- /*22*/ uchar_t d_setup; /* Data signal input setup time */
- /*23*/ uchar_t d_hold; /* Data signal input hold time */
-
- /*24*/ uchar_t pad0[26]; /* unused */
-
- /*3E*/ uchar_t data_rev; /* SPD Data Revision Code */
- /*3F*/ uchar_t checksum; /* Checksum for bytes 0-62 */
- /*40*/ uchar_t jedec_id[8]; /* Manufacturer's JEDEC ID code */
-
- /*48*/ uchar_t mfg_loc; /* Manufacturing Location */
- /*49*/ uchar_t part_num[18]; /* Manufacturer's Part Number */
-
- /*5B*/ uchar_t rev_code[2]; /* Revision Code */
-
- /*5D*/ uchar_t mfg_date[2]; /* Manufacturing Date */
-
- /*5F*/ uchar_t ser_num[4]; /* Assembly Serial Number */
-
- /*63*/ uchar_t manuf_data[27]; /* Manufacturer Specific Data */
-
- /*7E*/ uchar_t intel_freq; /* Intel specification frequency */
- /*7F*/ uchar_t intel_100MHz; /* Intel spec details for 100MHz support */
-
-} eeprom_spd_t;
-
-
-#define EEPROM_SPD_RECORD_MAXLEN 256
-
-typedef union eeprom_spd_u
-{
- eeprom_spd_t fields;
- char bytes[EEPROM_SPD_RECORD_MAXLEN];
-
-} eeprom_spd_u;
-
-
-/* EEPROM board record
- */
-typedef struct eeprom_brd_record_t
-{
- eeprom_chassis_ia_t *chassis_ia;
- eeprom_board_ia_t *board_ia;
- eeprom_spd_u *spd;
-
-} eeprom_brd_record_t;
-
-
-/* End-of-fields marker
- */
-#define EEPROM_EOF 0xc1
-
-
-/* masks for dissecting the type/length bytes
- */
-#define FIELD_FORMAT_MASK 0xc0
-#define FIELD_LENGTH_MASK 0x3f
-
-
-/* field format codes (used in type/length bytes)
- */
-#define FIELD_FORMAT_BINARY 0x00 /* binary format */
-#define FIELD_FORMAT_BCD 0x40 /* BCD */
-#define FIELD_FORMAT_PACKED 0x80 /* packed 6-bit ASCII */
-#define FIELD_FORMAT_ASCII 0xC0 /* 8-bit ASCII */
-
-
-
-
-/* codes specifying brick and board type
- */
-#define C_BRICK 0x100
-
-#define C_PIMM (C_BRICK | 0x10)
-#define C_PIMM_0 (C_PIMM) /* | 0x0 */
-#define C_PIMM_1 (C_PIMM | 0x1)
-
-#define C_DIMM (C_BRICK | 0x20)
-#define C_DIMM_0 (C_DIMM) /* | 0x0 */
-#define C_DIMM_1 (C_DIMM | 0x1)
-#define C_DIMM_2 (C_DIMM | 0x2)
-#define C_DIMM_3 (C_DIMM | 0x3)
-#define C_DIMM_4 (C_DIMM | 0x4)
-#define C_DIMM_5 (C_DIMM | 0x5)
-#define C_DIMM_6 (C_DIMM | 0x6)
-#define C_DIMM_7 (C_DIMM | 0x7)
-
-#define R_BRICK 0x200
-#define R_POWER (R_BRICK | 0x10)
-
-#define VECTOR 0x300 /* used in vector ops when the destination
- * could be a cbrick or an rbrick */
-
-#define IO_BRICK 0x400
-#define IO_POWER (IO_BRICK | 0x10)
-
-#define BRICK_MASK 0xf00
-#define SUBORD_MASK 0xf0 /* AND with component specification; if the
- the result is non-zero, then the component
- is a subordinate board of some kind */
-#define COMPT_MASK 0xf /* if there's more than one instance of a
- particular type of subordinate board, this
- masks out which one we're talking about */
-
-
-
-/* functions & macros for obtaining "NIC-like" strings from EEPROMs
- */
-
-#ifdef CONFIG_IA64_SGI_SN1
-
-int eeprom_str( char *nic_str, nasid_t nasid, int component );
-int vector_eeprom_str( char *nic_str, nasid_t nasid,
- int component, net_vec_t path );
-
-#define CBRICK_EEPROM_STR(s,n) eeprom_str((s),(n),C_BRICK)
-#define IOBRICK_EEPROM_STR(s,n) eeprom_str((s),(n),IO_BRICK)
-#define RBRICK_EEPROM_STR(s,n,p) vector_eeprom_str((s),(n),R_BRICK,p)
-#define VECTOR_EEPROM_STR(s,n,p) vector_eeprom_str((s),(n),VECTOR,p)
-
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-
-/* functions for obtaining formatted records from EEPROMs
- */
-
-int cbrick_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
- int component );
-int iobrick_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
- int component );
-int vector_eeprom_read( eeprom_brd_record_t *buf, nasid_t nasid,
- net_vec_t path, int component );
-
-
-
-/* retrieve the ethernet MAC address for an I-brick
- */
-
-int ibrick_mac_addr_get( nasid_t nasid, char *eaddr );
-
-
-/* error codes
- */
-
-#define EEP_OK 0
-#define EEP_L1 1
-#define EEP_FAIL 2
-#define EEP_BAD_CHECKSUM 3
-#define EEP_NICIFY 4
-#define EEP_PARAM 6
-#define EEP_NOMEM 7
-
-
-
-/* given a hardware graph vertex and an indication of the brick type,
- * brick and board to be read, this functions reads the eeprom and
- * attaches a "NIC"-format string of manufacturing information to the
- * vertex. If the vertex already has the string, just returns the
- * string. If component is not VECTOR or R_BRICK, the path parameter
- * is ignored.
- */
-
-#ifdef LATER
-char *eeprom_vertex_info_set( int component, int nasid, devfs_handle_t v,
- net_vec_t path );
-#endif
-
-
-
-/* We may need to differentiate between an XBridge and other types of
- * bridges during discovery to tell whether the bridge in question
- * is part of an IO brick. The following function reads the WIDGET_ID
- * register of the bridge under examination and returns a positive value
- * if the part and mfg numbers stored there indicate that this widget
- * is an XBridge (and so must be part of a brick).
- */
-#ifdef LATER
-int is_iobrick( int nasid, int widget_num );
-#endif
-
-/* the following macro derives the widget number from the register
- * address passed to it and uses is_iobrick to determine whether
- * the widget in question is part of an SN1 IO brick.
- */
-#define IS_IOBRICK(rg) is_iobrick( NASID_GET((rg)), SWIN_WIDGETNUM((rg)) )
-
-
-
-/* macros for NIC compatibility */
-/* always invoked on "this" cbrick */
-#define HUB_VERTEX_MFG_INFO(v) \
- eeprom_vertex_info_set( C_BRICK, get_nasid(), (v), 0 )
-
-#define BRIDGE_VERTEX_MFG_INFO(v, r) \
- ( IS_IOBRICK((r)) ? eeprom_vertex_info_set \
- ( IO_BRICK, NASID_GET((r)), (v), 0 ) \
- : nic_bridge_vertex_info((v), (r)) )
-
-#endif /* _ASM_IA64_SN_EEPROM_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_FETCHOP_H
#ifdef __KERNEL__
/*
- * Initialize a FETCHOP line. The argument should point to the beginning
- * of the line.
- * SN1 - region mask is in word 0, data in word 1
- * SN2 - no region mask. Data in word 0
- */
-#ifdef CONFIG_IA64_SGI_SN1
-#define FETCHOP_INIT_LINE(p) *(p) = 0xffffffffffffffffUL
-#elif CONFIG_IA64_SGI_SN2
-#define FETCHOP_INIT_LINE(p)
-#endif
-
-/*
- * Convert a region 7 (kaddr) address to the address of the fetchop variable
+ * Convert a region 6 (kaddr) address to the address of the fetchop variable
*/
#define FETCHOP_KADDR_TO_MSPEC_ADDR(kaddr) TO_MSPEC(kaddr)
-/*
- * Convert a page struct (page) address to the address of the first
- * fetchop variable in the page
- */
-#define FETCHOP_PAGE_TO_MSPEC_ADDR(page) FETCHOP_KADDR_TO_MSPEC_ADDR(__pa(page_address(page)))
-
/*
* Each Atomic Memory Operation (AMO formerly known as fetchop)
* inconsistency.
*/
typedef struct {
-
-#ifdef CONFIG_IA64_SGI_SN1
- u64 permissions;
-#endif
u64 variable;
-
-#ifdef CONFIG_IA64_SGI_SN1
- u64 unused[6];
-#else
u64 unused[7];
-#endif
-
} AMO_t;
+/*
+ * The following APIs are externalized to the kernel to allocate/free fetchop variables.
+ * fetchop_kalloc_one - Allocate/initialize 1 fetchop variable on the specified cnode.
+ * fetchop_kfree_one - Free a previously allocated fetchop variable
+ */
+
+unsigned long fetchop_kalloc_one(int nid);
+void fetchop_kfree_one(unsigned long maddr);
+
+
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_SN_FETCHOP_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Derived from IRIX <sys/SN/gda.h>.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- *
- * gda.h -- Contains the data structure for the global data area,
- * The GDA contains information communicated between the
- * PROM, SYMMON, and the kernel.
- */
-#ifndef _ASM_IA64_SN_GDA_H
-#define _ASM_IA64_SN_GDA_H
-
-#include <asm/sn/addrs.h>
-#include <asm/sn/sn_cpuid.h>
-
-#define GDA_MAGIC 0x58464552
-
-/*
- * GDA Version History
- *
- * Version # | Change
- * -------------+-------------------------------------------------------
- * 1 | Initial IP27 version
- * 2 | Prom sets g_partid field to the partition number. 0 IS
- * | a valid partition #.
- */
-
-#define GDA_VERSION 2 /* Current GDA version # */
-
-#define G_MAGICOFF 0
-#define G_VERSIONOFF 4
-#define G_PROMOPOFF 6
-#define G_MASTEROFF 8
-#define G_VDSOFF 12
-#define G_HKDNORMOFF 16
-#define G_HKDUTLBOFF 24
-#define G_HKDXUTLBOFF 32
-#define G_PARTIDOFF 40
-#define G_TABLEOFF 128
-
-#ifndef __ASSEMBLY__
-
-typedef struct gda {
- u32 g_magic; /* GDA magic number */
- u16 g_version; /* Version of this structure */
- u16 g_masterid; /* The NASID:CPUNUM of the master cpu */
- u32 g_promop; /* Passes requests from the kernel to prom */
- u32 g_vds; /* Store the virtual dipswitches here */
- void **g_hooked_norm;/* ptr to pda loc for norm hndlr */
- void **g_hooked_utlb;/* ptr to pda loc for utlb hndlr */
- void **g_hooked_xtlb;/* ptr to pda loc for xtlb hndlr */
- int g_partid; /* partition id */
- int g_symmax; /* Max symbols in name table. */
- void *g_dbstab; /* Address of idbg symbol table */
- char *g_nametab; /* Address of idbg name table */
- void *g_ktext_repmask;
- /* Pointer to a mask of nodes with copies
- * of the kernel. */
- char g_padding[56]; /* pad out to 128 bytes */
- nasid_t g_nasidtable[MAX_COMPACT_NODES+1]; /* NASID of each node,
- * indexed by cnodeid.
- */
-} gda_t;
-
-#define GDA ((gda_t*) GDA_ADDR(get_nasid()))
-
-#endif /* __ASSEMBLY__ */
-/*
- * Define: PART_GDA_VERSION
- * Purpose: Define the minimum version of the GDA required, lower
- * revisions assume GDA is NOT set up, and read partition
- * information from the board info.
- */
-#define PART_GDA_VERSION 2
-
-/*
- * The following requests can be sent to the PROM during startup.
- */
-
-#define PROMOP_MAGIC 0x0ead0000
-#define PROMOP_MAGIC_MASK 0x0fff0000
-
-#define PROMOP_BIST_SHIFT 11
-#define PROMOP_BIST_MASK (0x3 << 11)
-
-#define PROMOP_REG PI_ERR_STACK_ADDR_A
-
-#define PROMOP_INVALID (PROMOP_MAGIC | 0x00)
-#define PROMOP_HALT (PROMOP_MAGIC | 0x10)
-#define PROMOP_POWERDOWN (PROMOP_MAGIC | 0x20)
-#define PROMOP_RESTART (PROMOP_MAGIC | 0x30)
-#define PROMOP_REBOOT (PROMOP_MAGIC | 0x40)
-#define PROMOP_IMODE (PROMOP_MAGIC | 0x50)
-
-#define PROMOP_CMD_MASK 0x00f0
-#define PROMOP_OPTIONS_MASK 0xfff0
-
-#define PROMOP_SKIP_DIAGS 0x0100 /* don't bother running diags */
-#define PROMOP_SKIP_MEMINIT 0x0200 /* don't bother initing memory */
-#define PROMOP_SKIP_DEVINIT 0x0400 /* don't bother initing devices */
-#define PROMOP_BIST1 0x0800 /* keep track of which BIST ran */
-#define PROMOP_BIST2 0x1000 /* keep track of which BIST ran */
-
-#endif /* _ASM_IA64_SN_GDA_H */
* GEO_MAX_LEN: The maximum length of a geoid, formatted for printing
*/
-#include <linux/config.h>
-
-#ifdef CONFIG_IA64_SGI_SN2
#include <asm/sn/sn2/geo.h>
-#else
-
-#error <<BOMB! need geo.h for this platform >>
-
-#endif /* !SN2 && ... */
/* Declarations applicable to all platforms */
+++ /dev/null
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-#ifndef _ASM_IA64_SN_HACK_H
-#define _ASM_IA64_SN_HACK_H
-
-#include <linux/mmzone.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/types.h>
-#include <asm/uaccess.h> /* for copy_??_user */
-
-/******************************************
- * Definitions that do not exist in linux *
- ******************************************/
-
-typedef int cred_t; /* This is for compilation reasons */
-struct cred { int x; };
-
-
-/*
- * Hardware Graph routines that are currently stubbed!
- */
-#include <linux/devfs_fs_kernel.h>
-
-#define DELAY(a)
-
-/************************************************
- * Routines redefined to use linux equivalents. *
- ************************************************/
-
-/* #define FIXME(s) printk("FIXME: [ %s ] in %s at %s:%d\n", s, __FUNCTION__, __FILE__, __LINE__) */
-
-#define FIXME(s)
-
-extern devfs_handle_t dummy_vrtx;
-#define cpuid_to_vertex(cpuid) dummy_vrtx /* (pdaindr[cpuid].pda->p_vertex) */
-
-#define PUTBUF_LOCK(a) { FIXME("PUTBUF_LOCK"); }
-#define PUTBUF_UNLOCK(a) { FIXME("PUTBUF_UNLOCK"); }
-
-typedef int (*splfunc_t)(void);
-
-/* move to stubs.c yet */
-#define dev_to_vhdl(dev) 0
-#define get_timestamp() 0
-#define us_delay(a)
-#define v_mapphys(a,b,c) 0 // printk("Fixme: v_mapphys - soft->base 0x%p\n", b);
-#define splhi() 0
-#define spl7 splhi()
-#define splx(s)
-
-extern void * snia_kmem_alloc_node(register size_t, register int, cnodeid_t);
-extern void * snia_kmem_zalloc(size_t, int);
-extern void * snia_kmem_zalloc_node(register size_t, register int, cnodeid_t );
-extern void * snia_kmem_zone_alloc(register struct zone *, int);
-extern struct zone * snia_kmem_zone_init(register int , char *);
-extern void snia_kmem_zone_free(register struct zone *, void *);
-extern int is_specified(char *);
-extern int cap_able(uint64_t);
-extern int compare_and_swap_ptr(void **, void *, void *);
-
-#endif /* _ASM_IA64_SN_HACK_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_HCL_H
#define _ASM_IA64_SN_HCL_H
#include <asm/sn/sgi.h>
-#include <asm/sn/invent.h>
-#include <linux/devfs_fs_kernel.h>
-extern devfs_handle_t hcl_handle; /* HCL driver */
-extern devfs_handle_t hwgraph_root;
-extern devfs_handle_t linux_busnum;
+extern vertex_hdl_t hwgraph_root;
+extern vertex_hdl_t linux_busnum;
typedef long labelcl_info_place_t;
/*
* External declarations of EXPORTED SYMBOLS in hcl.c
*/
-extern devfs_handle_t hwgraph_register(devfs_handle_t, const char *,
+extern vertex_hdl_t hwgraph_register(vertex_hdl_t, const char *,
unsigned int, unsigned int, unsigned int, unsigned int,
umode_t, uid_t, gid_t, struct file_operations *, void *);
-extern int hwgraph_mk_symlink(devfs_handle_t, const char *, unsigned int,
- unsigned int, const char *, unsigned int, devfs_handle_t *, void *);
+extern int hwgraph_mk_symlink(vertex_hdl_t, const char *, unsigned int,
+ unsigned int, const char *, unsigned int, vertex_hdl_t *, void *);
-extern int hwgraph_vertex_destroy(devfs_handle_t);
+extern int hwgraph_vertex_destroy(vertex_hdl_t);
-extern int hwgraph_edge_add(devfs_handle_t, devfs_handle_t, char *);
-extern int hwgraph_edge_get(devfs_handle_t, char *, devfs_handle_t *);
+extern int hwgraph_edge_add(vertex_hdl_t, vertex_hdl_t, char *);
+extern int hwgraph_edge_get(vertex_hdl_t, char *, vertex_hdl_t *);
-extern arbitrary_info_t hwgraph_fastinfo_get(devfs_handle_t);
-extern void hwgraph_fastinfo_set(devfs_handle_t, arbitrary_info_t );
-extern devfs_handle_t hwgraph_mk_dir(devfs_handle_t, const char *, unsigned int, void *);
+extern arbitrary_info_t hwgraph_fastinfo_get(vertex_hdl_t);
+extern void hwgraph_fastinfo_set(vertex_hdl_t, arbitrary_info_t );
+extern vertex_hdl_t hwgraph_mk_dir(vertex_hdl_t, const char *, unsigned int, void *);
-extern int hwgraph_connectpt_set(devfs_handle_t, devfs_handle_t);
-extern devfs_handle_t hwgraph_connectpt_get(devfs_handle_t);
-extern int hwgraph_edge_get_next(devfs_handle_t, char *, devfs_handle_t *, uint *);
-extern graph_error_t hwgraph_edge_remove(devfs_handle_t, char *, devfs_handle_t *);
+extern int hwgraph_connectpt_set(vertex_hdl_t, vertex_hdl_t);
+extern vertex_hdl_t hwgraph_connectpt_get(vertex_hdl_t);
+extern int hwgraph_edge_get_next(vertex_hdl_t, char *, vertex_hdl_t *, uint *);
+extern graph_error_t hwgraph_edge_remove(vertex_hdl_t, char *, vertex_hdl_t *);
-extern graph_error_t hwgraph_traverse(devfs_handle_t, char *, devfs_handle_t *);
+extern graph_error_t hwgraph_traverse(vertex_hdl_t, char *, vertex_hdl_t *);
-extern int hwgraph_vertex_get_next(devfs_handle_t *, devfs_handle_t *);
-extern int hwgraph_inventory_get_next(devfs_handle_t, invplace_t *,
+extern int hwgraph_vertex_get_next(vertex_hdl_t *, vertex_hdl_t *);
+extern int hwgraph_inventory_get_next(vertex_hdl_t, invplace_t *,
inventory_t **);
-extern int hwgraph_inventory_add(devfs_handle_t, int, int, major_t, minor_t, int);
-extern int hwgraph_inventory_remove(devfs_handle_t, int, int, major_t, minor_t, int);
-extern int hwgraph_controller_num_get(devfs_handle_t);
-extern void hwgraph_controller_num_set(devfs_handle_t, int);
-extern int hwgraph_path_ad(devfs_handle_t, char *, devfs_handle_t *);
-extern devfs_handle_t hwgraph_path_to_vertex(char *);
-extern devfs_handle_t hwgraph_path_to_dev(char *);
-extern devfs_handle_t hwgraph_block_device_get(devfs_handle_t);
-extern devfs_handle_t hwgraph_char_device_get(devfs_handle_t);
-extern graph_error_t hwgraph_char_device_add(devfs_handle_t, char *, char *, devfs_handle_t *);
-extern int hwgraph_path_add(devfs_handle_t, char *, devfs_handle_t *);
-extern int hwgraph_info_add_LBL(devfs_handle_t, char *, arbitrary_info_t);
-extern int hwgraph_info_get_LBL(devfs_handle_t, char *, arbitrary_info_t *);
-extern int hwgraph_info_replace_LBL(devfs_handle_t, char *, arbitrary_info_t,
+extern int hwgraph_inventory_add(vertex_hdl_t, int, int, major_t, minor_t, int);
+extern int hwgraph_inventory_remove(vertex_hdl_t, int, int, major_t, minor_t, int);
+extern int hwgraph_controller_num_get(vertex_hdl_t);
+extern void hwgraph_controller_num_set(vertex_hdl_t, int);
+extern int hwgraph_path_ad(vertex_hdl_t, char *, vertex_hdl_t *);
+extern vertex_hdl_t hwgraph_path_to_vertex(char *);
+extern vertex_hdl_t hwgraph_path_to_dev(char *);
+extern vertex_hdl_t hwgraph_block_device_get(vertex_hdl_t);
+extern vertex_hdl_t hwgraph_char_device_get(vertex_hdl_t);
+extern graph_error_t hwgraph_char_device_add(vertex_hdl_t, char *, char *, vertex_hdl_t *);
+extern int hwgraph_path_add(vertex_hdl_t, char *, vertex_hdl_t *);
+extern int hwgraph_info_add_LBL(vertex_hdl_t, char *, arbitrary_info_t);
+extern int hwgraph_info_get_LBL(vertex_hdl_t, char *, arbitrary_info_t *);
+extern int hwgraph_info_replace_LBL(vertex_hdl_t, char *, arbitrary_info_t,
arbitrary_info_t *);
-extern int hwgraph_info_get_exported_LBL(devfs_handle_t, char *, int *, arbitrary_info_t *);
-extern int hwgraph_info_get_next_LBL(devfs_handle_t, char *, arbitrary_info_t *,
+extern int hwgraph_info_get_exported_LBL(vertex_hdl_t, char *, int *, arbitrary_info_t *);
+extern int hwgraph_info_get_next_LBL(vertex_hdl_t, char *, arbitrary_info_t *,
labelcl_info_place_t *);
-
-extern int hwgraph_path_lookup(devfs_handle_t, char *, devfs_handle_t *, char **);
-extern int hwgraph_info_export_LBL(devfs_handle_t, char *, int);
-extern int hwgraph_info_unexport_LBL(devfs_handle_t, char *);
-extern int hwgraph_info_remove_LBL(devfs_handle_t, char *, arbitrary_info_t *);
-extern char * vertex_to_name(devfs_handle_t, char *, uint);
-extern graph_error_t hwgraph_vertex_unref(devfs_handle_t);
-
+extern int hwgraph_path_lookup(vertex_hdl_t, char *, vertex_hdl_t *, char **);
+extern int hwgraph_info_export_LBL(vertex_hdl_t, char *, int);
+extern int hwgraph_info_unexport_LBL(vertex_hdl_t, char *);
+extern int hwgraph_info_remove_LBL(vertex_hdl_t, char *, arbitrary_info_t *);
+extern char * vertex_to_name(vertex_hdl_t, char *, uint);
+extern graph_error_t hwgraph_vertex_unref(vertex_hdl_t);
#endif /* _ASM_IA64_SN_HCL_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_HCL_UTIL_H
#include <linux/devfs_fs_kernel.h>
-extern char * dev_to_name(devfs_handle_t, char *, uint);
-extern int device_master_set(devfs_handle_t, devfs_handle_t);
-extern devfs_handle_t device_master_get(devfs_handle_t);
-extern cnodeid_t master_node_get(devfs_handle_t);
-extern cnodeid_t nodevertex_to_cnodeid(devfs_handle_t);
-extern void mark_nodevertex_as_node(devfs_handle_t, cnodeid_t);
+extern char * dev_to_name(vertex_hdl_t, char *, uint);
+extern int device_master_set(vertex_hdl_t, vertex_hdl_t);
+extern vertex_hdl_t device_master_get(vertex_hdl_t);
+extern cnodeid_t master_node_get(vertex_hdl_t);
+extern cnodeid_t nodevertex_to_cnodeid(vertex_hdl_t);
+extern void mark_nodevertex_as_node(vertex_hdl_t, cnodeid_t);
#endif /* _ASM_IA64_SN_HCL_UTIL_H */
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 Silicon Graphics, Inc. All rights reserved.
- *
- * SGI Hi Resolution Clock
- *
- * SGI SN platforms provide a high resolution clock that is
- * synchronized across all nodes. The clock can be memory mapped
- * and directly read from user space.
- *
- * Access to the clock is thru the following:
- * (error checking not shown)
- *
- * (Note: should library routines be provided to encapsulate this??)
- *
- * int fd:
- * volatile long *clk;
- *
- * fd = open (HIRES_FULLNAME, O_RDONLY);
- * clk = mmap(0, getpagesize(), PROT_READ, MAP_SHARED, fd, 0);
- * clk += ioctl(fd, HIRES_IOCQGETOFFSET, 0);
- *
- * At this point, clk is a pointer to the high resolution clock.
- *
- * The clock period can be obtained via:
- *
- * long picosec_per_tick;
- * picosec_per_tick = ioctl(fd, HIRES_IOCQGETPICOSEC, 0);
- */
-
-#ifndef _ASM_IA64_SN_HIRES_CLOCK_H
-#define _ASM_IA64_SN_HIRES_CLOCK_H
-
-
-#define HIRES_BASENAME "sgi_hires_clock"
-#define HIRES_FULLNAME "/dev/sgi_hires_clock"
-#define HIRES_IOC_BASE 's'
-
-
-/* Get page offset of hires timer */
-#define HIRES_IOCQGETOFFSET _IO( HIRES_IOC_BASE, 0 )
-
-/* get clock period in picoseconds per tick */
-#define HIRES_IOCQGETPICOSEC _IO( HIRES_IOC_BASE, 1 )
-
-/* get number of significant bits in clock counter */
-#define HIRES_IOCQGETCLOCKBITS _IO( HIRES_IOC_BASE, 2 )
-
-#endif /* _ASM_IA64_SN_HIRES_CLOCK_H */
--- /dev/null
+#ifndef _ASM_IA64_SN_HWGFS_H
+#define _ASM_IA64_SN_HWGFS_H
+
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+typedef struct dentry *hwgfs_handle_t;
+
+extern hwgfs_handle_t hwgfs_register(hwgfs_handle_t dir, const char *name,
+ unsigned int flags,
+ unsigned int major, unsigned int minor,
+ umode_t mode, void *ops, void *info);
+extern int hwgfs_mk_symlink(hwgfs_handle_t dir, const char *name,
+ unsigned int flags, const char *link,
+ hwgfs_handle_t *handle, void *info);
+extern hwgfs_handle_t hwgfs_mk_dir(hwgfs_handle_t dir, const char *name,
+ void *info);
+extern void hwgfs_unregister(hwgfs_handle_t de);
+
+extern hwgfs_handle_t hwgfs_find_handle(hwgfs_handle_t dir, const char *name,
+ unsigned int major,unsigned int minor,
+ char type, int traverse_symlinks);
+extern hwgfs_handle_t hwgfs_get_parent(hwgfs_handle_t de);
+extern int hwgfs_generate_path(hwgfs_handle_t de, char *path, int buflen);
+
+extern void *hwgfs_get_info(hwgfs_handle_t de);
+extern int hwgfs_set_info(hwgfs_handle_t de, void *info);
+
+#endif
+++ /dev/null
-#ifndef _ASM_IA64_SN_IDLE_H
-#define _ASM_IA64_SN_IDLE_H
-
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2001-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/config.h>
-#include <asm/sn/leds.h>
-#include <asm/sn/simulator.h>
-
-static __inline__ void
-snidle(void) {
-#if 0
-#ifdef CONFIG_IA64_SGI_AUTOTEST
- {
- extern int autotest_enabled;
- if (autotest_enabled) {
- extern void llsc_main(int);
- llsc_main(smp_processor_id());
- }
- }
-#endif
-
- if (pda.idle_flag == 0) {
- /*
- * Turn the activity LED off.
- */
- set_led_bits(0, LED_CPU_ACTIVITY);
- }
-
-#ifdef CONFIG_IA64_SGI_SN_SIM
- if (IS_RUNNING_ON_SIMULATOR())
- SIMULATOR_SLEEP();
-#endif
-
- pda.idle_flag = 1;
-#endif
-}
-
-static __inline__ void
-snidleoff(void) {
-#if 0
- /*
- * Turn the activity LED on.
- */
- set_led_bits(LED_CPU_ACTIVITY, LED_CPU_ACTIVITY);
-
- pda.idle_flag = 0;
-#endif
-}
-
-#endif /* _ASM_IA64_SN_IDLE_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_IFCONFIG_NET_H
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_INTR_H
#define _ASM_IA64_SN_INTR_H
#include <linux/config.h>
-
-#if defined(CONFIG_IA64_SGI_SN1)
-#include <asm/sn/sn1/intr.h>
-#elif defined(CONFIG_IA64_SGI_SN2)
#include <asm/sn/sn2/intr.h>
-#endif
extern void sn_send_IPI_phys(long, int, int);
-#define CPU_VECTOR_TO_IRQ(cpuid,vector) ((cpuid) << 8 | (vector))
+#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
+#define SN_CPU_FROM_IRQ(irq) (0)
+#define SN_IVEC_FROM_IRQ(irq) (irq)
#endif /* _ASM_IA64_SN_INTR_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_INTR_PUBLIC_H
-#define _ASM_IA64_SN_INTR_PUBLIC_H
-
-#include <linux/config.h>
-
-#if defined(CONFIG_IA64_SGI_SN1)
-#include <asm/sn/sn1/intr_public.h>
-#elif defined(CONFIG_IA64_SGI_SN2)
-#endif
-
-#endif /* _ASM_IA64_SN_INTR_PUBLIC_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_INVENT_H
#define _ASM_IA64_SN_INVENT_H
#include <linux/types.h>
-#include <linux/devfs_fs_kernel.h>
-
+#include <asm/sn/sgi.h>
/*
* sys/sn/invent.h -- Kernel Hardware Inventory
*
#define minor_t int
#define app32_ptr_t unsigned long
#define graph_vertex_place_t long
-#define GRAPH_VERTEX_NONE ((devfs_handle_t)-1)
+#define GRAPH_VERTEX_NONE ((vertex_hdl_t)-1)
#define GRAPH_EDGE_PLACE_NONE ((graph_edge_place_t)0)
#define GRAPH_INFO_PLACE_NONE ((graph_info_place_t)0)
#define GRAPH_VERTEX_PLACE_NONE ((graph_vertex_place_t)0)
} irix5_inventory_t;
typedef struct invplace_s {
- devfs_handle_t invplace_vhdl; /* current vertex */
- devfs_handle_t invplace_vplace; /* place in vertex list */
+ vertex_hdl_t invplace_vhdl; /* current vertex */
+ vertex_hdl_t invplace_vplace; /* place in vertex list */
inventory_t *invplace_inv; /* place in inv list on vertex */
} invplace_t; /* Magic cookie placeholder in inventory list */
extern int scaninvent(int (*)(inventory_t *, void *), void *);
extern int get_sizeof_inventory(int);
-extern void device_inventory_add( devfs_handle_t device,
+extern void device_inventory_add( vertex_hdl_t device,
int class,
int type,
major_t ctlr,
int state);
-extern inventory_t *device_inventory_get_next( devfs_handle_t device,
+extern inventory_t *device_inventory_get_next( vertex_hdl_t device,
invplace_t *);
-extern void device_controller_num_set( devfs_handle_t,
+extern void device_controller_num_set( vertex_hdl_t,
int);
-extern int device_controller_num_get( devfs_handle_t);
+extern int device_controller_num_get( vertex_hdl_t);
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_SN_INVENT_H */
(_x) : \
(_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
-#if defined(CONFIG_IA64_SGI_SN1)
-#include <asm/sn/sn1/bedrock.h>
-#include <asm/sn/sn1/hubio.h>
-#include <asm/sn/sn1/hubio_next.h>
-#include <asm/sn/sn1/hubmd.h>
-#include <asm/sn/sn1/hubmd_next.h>
-#include <asm/sn/sn1/hubpi.h>
-#include <asm/sn/sn1/hubpi_next.h>
-#include <asm/sn/sn1/hublb.h>
-#include <asm/sn/sn1/hublb_next.h>
-#include <asm/sn/sn1/hubni.h>
-#include <asm/sn/sn1/hubni_next.h>
-#include <asm/sn/sn1/hubxb.h>
-#include <asm/sn/sn1/hubxb_next.h>
-#include <asm/sn/sn1/hubstat.h>
-#include <asm/sn/sn1/hubdev.h>
-#include <asm/sn/sn1/synergy.h>
-#elif defined(CONFIG_IA64_SGI_SN2)
#include <asm/sn/sn2/shub.h>
#include <asm/sn/sn2/shubio.h>
-#endif
/*
* Used to ensure write ordering (like mb(), but for I/O space)
/*
- * Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
--- /dev/null
+/*
+ * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ */
+
+#ifndef _ASM_IA64_SN_IOC4_H
+#define _ASM_IA64_SN_IOC4_H
+
+#if 0
+
+/*
+ * ioc4.h - IOC4 chip header file
+ */
+
+/* Notes:
+ * The IOC4 chip is a 32-bit PCI device that provides 4 serial ports,
+ * an IDE bus interface, a PC keyboard/mouse interface, and a real-time
+ * external interrupt interface.
+ *
+ * It includes an optimized DMA buffer management, and a store-and-forward
+ * buffer RAM.
+ *
+ * All IOC4 registers are 32 bits wide.
+ */
+typedef __uint32_t ioc4reg_t;
+
+/*
+ * PCI Configuration Space Register Address Map, use offset from IOC4 PCI
+ * configuration base such that this can be used for multiple IOC4s
+ */
+#define IOC4_PCI_ID 0x0 /* ID */
+
+#define IOC4_VENDOR_ID_NUM 0x10A9
+#define IOC4_DEVICE_ID_NUM 0x100A
+#define IOC4_ADDRSPACE_MASK 0xfff00000ULL
+
+#define IOC4_PCI_SCR 0x4 /* Status/Command */
+#define IOC4_PCI_REV 0x8 /* Revision */
+#define IOC4_PCI_LAT 0xC /* Latency Timer */
+#define IOC4_PCI_BAR0 0x10 /* IOC4 base address 0 */
+#define IOC4_PCI_SIDV 0x2c /* Subsys ID and vendor */
+#define IOC4_PCI_CAP 0x34 /* Capability pointer */
+#define IOC4_PCI_LATGNTINT 0x3c /* Max_lat, min_gnt, int_pin, int_line */
+
+/*
+ * PCI Memory Space Map
+ */
+#define IOC4_PCI_ERR_ADDR_L 0x000 /* Low Error Address */
+#define IOC4_PCI_ERR_ADDR_VLD (0x1 << 0)
+#define IOC4_PCI_ERR_ADDR_MST_ID_MSK (0xf << 1)
+#define IOC4_PCI_ERR_ADDR_MUL_ERR (0x1 << 5)
+#define IOC4_PCI_ERR_ADDR_ADDR_MSK (0x3ffffff << 6)
+
+/* Master IDs contained in PCI_ERR_ADDR_MST_ID_MSK */
+#define IOC4_MST_ID_S0_TX 0
+#define IOC4_MST_ID_S0_RX 1
+#define IOC4_MST_ID_S1_TX 2
+#define IOC4_MST_ID_S1_RX 3
+#define IOC4_MST_ID_S2_TX 4
+#define IOC4_MST_ID_S2_RX 5
+#define IOC4_MST_ID_S3_TX 6
+#define IOC4_MST_ID_S3_RX 7
+#define IOC4_MST_ID_ATA 8
+
+#define IOC4_PCI_ERR_ADDR_H 0x004 /* High Error Address */
+
+#define IOC4_SIO_IR 0x008 /* SIO Interrupt Register */
+#define IOC4_OTHER_IR 0x00C /* Other Interrupt Register */
+
+/* These registers are read-only for general kernel code. To modify
+ * them use the functions in ioc4.c
+ */
+#define IOC4_SIO_IES_RO 0x010 /* SIO Interrupt Enable Set Reg */
+#define IOC4_OTHER_IES_RO 0x014 /* Other Interrupt Enable Set Reg */
+#define IOC4_SIO_IEC_RO 0x018 /* SIO Interrupt Enable Clear Reg */
+#define IOC4_OTHER_IEC_RO 0x01C /* Other Interrupt Enable Clear Reg */
+
+#define IOC4_SIO_CR 0x020 /* SIO Control Reg */
+#define IOC4_INT_OUT 0x028 /* INT_OUT Reg (realtime interrupt) */
+#define IOC4_GPCR_S 0x030 /* GenericPIO Cntrl Set Register */
+#define IOC4_GPCR_C 0x034 /* GenericPIO Cntrl Clear Register */
+#define IOC4_GPDR 0x038 /* GenericPIO Data Register */
+#define IOC4_GPPR_0 0x040 /* GenericPIO Pin Registers */
+#define IOC4_GPPR_OFF 0x4
+#define IOC4_GPPR(x) (IOC4_GPPR_0+(x)*IOC4_GPPR_OFF)
+
+/* ATAPI Registers */
+#define IOC4_ATA_0 0x100 /* Data w/timing */
+#define IOC4_ATA_1 0x104 /* Error/Features w/timing */
+#define IOC4_ATA_2 0x108 /* Sector Count w/timing */
+#define IOC4_ATA_3 0x10C /* Sector Number w/timing */
+#define IOC4_ATA_4 0x110 /* Cyliner Low w/timing */
+#define IOC4_ATA_5 0x114 /* Cylinder High w/timing */
+#define IOC4_ATA_6 0x118 /* Device/Head w/timing */
+#define IOC4_ATA_7 0x11C /* Status/Command w/timing */
+#define IOC4_ATA_0_AUX 0x120 /* Aux Status/Device Cntrl w/timing */
+#define IOC4_ATA_TIMING 0x140 /* Timing value register 0 */
+#define IOC4_ATA_DMA_PTR_L 0x144 /* Low Memory Pointer to DMA List */
+#define IOC4_ATA_DMA_PTR_H 0x148 /* High Memory Pointer to DMA List */
+#define IOC4_ATA_DMA_ADDR_L 0x14C /* Low Memory DMA Address */
+#define IOC4_ATA_DMA_ADDR_H 0x150 /* High Memory DMA Addresss */
+#define IOC4_ATA_BC_DEV 0x154 /* DMA Byte Count at Device */
+#define IOC4_ATA_BC_MEM 0x158 /* DMA Byte Count at Memory */
+#define IOC4_ATA_DMA_CTRL 0x15C /* DMA Control/Status */
+
+/* Keyboard and Mouse Registers */
+#define IOC4_KM_CSR 0x200 /* Kbd and Mouse Cntrl/Status Reg */
+#define IOC4_K_RD 0x204 /* Kbd Read Data Register */
+#define IOC4_M_RD 0x208 /* Mouse Read Data Register */
+#define IOC4_K_WD 0x20C /* Kbd Write Data Register */
+#define IOC4_M_WD 0x210 /* Mouse Write Data Register */
+
+/* Serial Port Registers used for DMA mode serial I/O */
+#define IOC4_SBBR01_H 0x300 /* Serial Port Ring Buffers
+ Base Reg High for Channels 0 1*/
+#define IOC4_SBBR01_L 0x304 /* Serial Port Ring Buffers
+ Base Reg Low for Channels 0 1 */
+#define IOC4_SBBR23_H 0x308 /* Serial Port Ring Buffers
+ Base Reg High for Channels 2 3*/
+#define IOC4_SBBR23_L 0x30C /* Serial Port Ring Buffers
+ Base Reg Low for Channels 2 3 */
+
+#define IOC4_SSCR_0 0x310 /* Serial Port 0 Control */
+#define IOC4_STPIR_0 0x314 /* Serial Port 0 TX Produce */
+#define IOC4_STCIR_0 0x318 /* Serial Port 0 TX Consume */
+#define IOC4_SRPIR_0 0x31C /* Serial Port 0 RX Produce */
+#define IOC4_SRCIR_0 0x320 /* Serial Port 0 RX Consume */
+#define IOC4_SRTR_0 0x324 /* Serial Port 0 Receive Timer Reg */
+#define IOC4_SHADOW_0 0x328 /* Serial Port 0 16550 Shadow Reg */
+
+#define IOC4_SSCR_1 0x32C /* Serial Port 1 Control */
+#define IOC4_STPIR_1 0x330 /* Serial Port 1 TX Produce */
+#define IOC4_STCIR_1 0x334 /* Serial Port 1 TX Consume */
+#define IOC4_SRPIR_1 0x338 /* Serial Port 1 RX Produce */
+#define IOC4_SRCIR_1 0x33C /* Serial Port 1 RX Consume */
+#define IOC4_SRTR_1 0x340 /* Serial Port 1 Receive Timer Reg */
+#define IOC4_SHADOW_1 0x344 /* Serial Port 1 16550 Shadow Reg */
+
+#define IOC4_SSCR_2 0x348 /* Serial Port 2 Control */
+#define IOC4_STPIR_2 0x34C /* Serial Port 2 TX Produce */
+#define IOC4_STCIR_2 0x350 /* Serial Port 2 TX Consume */
+#define IOC4_SRPIR_2 0x354 /* Serial Port 2 RX Produce */
+#define IOC4_SRCIR_2 0x358 /* Serial Port 2 RX Consume */
+#define IOC4_SRTR_2 0x35C /* Serial Port 2 Receive Timer Reg */
+#define IOC4_SHADOW_2 0x360 /* Serial Port 2 16550 Shadow Reg */
+
+#define IOC4_SSCR_3 0x364 /* Serial Port 3 Control */
+#define IOC4_STPIR_3 0x368 /* Serial Port 3 TX Produce */
+#define IOC4_STCIR_3 0x36C /* Serial Port 3 TX Consume */
+#define IOC4_SRPIR_3 0x370 /* Serial Port 3 RX Produce */
+#define IOC4_SRCIR_3 0x374 /* Serial Port 3 RX Consume */
+#define IOC4_SRTR_3 0x378 /* Serial Port 3 Receive Timer Reg */
+#define IOC4_SHADOW_3 0x37C /* Serial Port 3 16550 Shadow Reg */
+
+#define IOC4_UART0_BASE 0x380 /* UART 0 */
+#define IOC4_UART1_BASE 0x388 /* UART 1 */
+#define IOC4_UART2_BASE 0x390 /* UART 2 */
+#define IOC4_UART3_BASE 0x398 /* UART 3 */
+
+/* Private page address aliases for usermode mapping */
+#define IOC4_INT_OUT_P 0x04000 /* INT_OUT Reg */
+
+#define IOC4_SSCR_0_P 0x08000 /* Serial Port 0 */
+#define IOC4_STPIR_0_P 0x08004
+#define IOC4_STCIR_0_P 0x08008 /* (read-only) */
+#define IOC4_SRPIR_0_P 0x0800C /* (read-only) */
+#define IOC4_SRCIR_0_P 0x08010
+#define IOC4_SRTR_0_P 0x08014
+#define IOC4_UART_LSMSMCR_0_P 0x08018 /* (read-only) */
+
+#define IOC4_SSCR_1_P 0x0C000 /* Serial Port 1 */
+#define IOC4_STPIR_1_P 0x0C004
+#define IOC4_STCIR_1_P 0x0C008 /* (read-only) */
+#define IOC4_SRPIR_1_P 0x0C00C /* (read-only) */
+#define IOC4_SRCIR_1_P 0x0C010
+#define IOC4_SRTR_1_P 0x0C014
+#define IOC4_UART_LSMSMCR_1_P 0x0C018 /* (read-only) */
+
+#define IOC4_SSCR_2_P 0x10000 /* Serial Port 2 */
+#define IOC4_STPIR_2_P 0x10004
+#define IOC4_STCIR_2_P 0x10008 /* (read-only) */
+#define IOC4_SRPIR_2_P 0x1000C /* (read-only) */
+#define IOC4_SRCIR_2_P 0x10010
+#define IOC4_SRTR_2_P 0x10014
+#define IOC4_UART_LSMSMCR_2_P 0x10018 /* (read-only) */
+
+#define IOC4_SSCR_3_P 0x14000 /* Serial Port 3 */
+#define IOC4_STPIR_3_P 0x14004
+#define IOC4_STCIR_3_P 0x14008 /* (read-only) */
+#define IOC4_SRPIR_3_P 0x1400C /* (read-only) */
+#define IOC4_SRCIR_3_P 0x14010
+#define IOC4_SRTR_3_P 0x14014
+#define IOC4_UART_LSMSMCR_3_P 0x14018 /* (read-only) */
+
+#define IOC4_ALIAS_PAGE_SIZE 0x4000
+
+/* Interrupt types */
+typedef enum ioc4_intr_type_e {
+ ioc4_sio_intr_type,
+ ioc4_other_intr_type,
+ ioc4_num_intr_types
+} ioc4_intr_type_t;
+#define ioc4_first_intr_type ioc4_sio_intr_type
+
+/* Bitmasks for IOC4_SIO_IR, IOC4_SIO_IEC, and IOC4_SIO_IES */
+#define IOC4_SIO_IR_S0_TX_MT 0x00000001 /* Serial port 0 TX empty */
+#define IOC4_SIO_IR_S0_RX_FULL 0x00000002 /* Port 0 RX buf full */
+#define IOC4_SIO_IR_S0_RX_HIGH 0x00000004 /* Port 0 RX hiwat */
+#define IOC4_SIO_IR_S0_RX_TIMER 0x00000008 /* Port 0 RX timeout */
+#define IOC4_SIO_IR_S0_DELTA_DCD 0x00000010 /* Port 0 delta DCD */
+#define IOC4_SIO_IR_S0_DELTA_CTS 0x00000020 /* Port 0 delta CTS */
+#define IOC4_SIO_IR_S0_INT 0x00000040 /* Port 0 pass-thru intr */
+#define IOC4_SIO_IR_S0_TX_EXPLICIT 0x00000080 /* Port 0 explicit TX thru */
+#define IOC4_SIO_IR_S1_TX_MT 0x00000100 /* Serial port 1 */
+#define IOC4_SIO_IR_S1_RX_FULL 0x00000200 /* */
+#define IOC4_SIO_IR_S1_RX_HIGH 0x00000400 /* */
+#define IOC4_SIO_IR_S1_RX_TIMER 0x00000800 /* */
+#define IOC4_SIO_IR_S1_DELTA_DCD 0x00001000 /* */
+#define IOC4_SIO_IR_S1_DELTA_CTS 0x00002000 /* */
+#define IOC4_SIO_IR_S1_INT 0x00004000 /* */
+#define IOC4_SIO_IR_S1_TX_EXPLICIT 0x00008000 /* */
+#define IOC4_SIO_IR_S2_TX_MT 0x00010000 /* Serial port 2 */
+#define IOC4_SIO_IR_S2_RX_FULL 0x00020000 /* */
+#define IOC4_SIO_IR_S2_RX_HIGH 0x00040000 /* */
+#define IOC4_SIO_IR_S2_RX_TIMER 0x00080000 /* */
+#define IOC4_SIO_IR_S2_DELTA_DCD 0x00100000 /* */
+#define IOC4_SIO_IR_S2_DELTA_CTS 0x00200000 /* */
+#define IOC4_SIO_IR_S2_INT 0x00400000 /* */
+#define IOC4_SIO_IR_S2_TX_EXPLICIT 0x00800000 /* */
+#define IOC4_SIO_IR_S3_TX_MT 0x01000000 /* Serial port 3 */
+#define IOC4_SIO_IR_S3_RX_FULL 0x02000000 /* */
+#define IOC4_SIO_IR_S3_RX_HIGH 0x04000000 /* */
+#define IOC4_SIO_IR_S3_RX_TIMER 0x08000000 /* */
+#define IOC4_SIO_IR_S3_DELTA_DCD 0x10000000 /* */
+#define IOC4_SIO_IR_S3_DELTA_CTS 0x20000000 /* */
+#define IOC4_SIO_IR_S3_INT 0x40000000 /* */
+#define IOC4_SIO_IR_S3_TX_EXPLICIT 0x80000000 /* */
+
+/* Per device interrupt masks */
+#define IOC4_SIO_IR_S0 (IOC4_SIO_IR_S0_TX_MT | \
+ IOC4_SIO_IR_S0_RX_FULL | \
+ IOC4_SIO_IR_S0_RX_HIGH | \
+ IOC4_SIO_IR_S0_RX_TIMER | \
+ IOC4_SIO_IR_S0_DELTA_DCD | \
+ IOC4_SIO_IR_S0_DELTA_CTS | \
+ IOC4_SIO_IR_S0_INT | \
+ IOC4_SIO_IR_S0_TX_EXPLICIT)
+#define IOC4_SIO_IR_S1 (IOC4_SIO_IR_S1_TX_MT | \
+ IOC4_SIO_IR_S1_RX_FULL | \
+ IOC4_SIO_IR_S1_RX_HIGH | \
+ IOC4_SIO_IR_S1_RX_TIMER | \
+ IOC4_SIO_IR_S1_DELTA_DCD | \
+ IOC4_SIO_IR_S1_DELTA_CTS | \
+ IOC4_SIO_IR_S1_INT | \
+ IOC4_SIO_IR_S1_TX_EXPLICIT)
+#define IOC4_SIO_IR_S2 (IOC4_SIO_IR_S2_TX_MT | \
+ IOC4_SIO_IR_S2_RX_FULL | \
+ IOC4_SIO_IR_S2_RX_HIGH | \
+ IOC4_SIO_IR_S2_RX_TIMER | \
+ IOC4_SIO_IR_S2_DELTA_DCD | \
+ IOC4_SIO_IR_S2_DELTA_CTS | \
+ IOC4_SIO_IR_S2_INT | \
+ IOC4_SIO_IR_S2_TX_EXPLICIT)
+#define IOC4_SIO_IR_S3 (IOC4_SIO_IR_S3_TX_MT | \
+ IOC4_SIO_IR_S3_RX_FULL | \
+ IOC4_SIO_IR_S3_RX_HIGH | \
+ IOC4_SIO_IR_S3_RX_TIMER | \
+ IOC4_SIO_IR_S3_DELTA_DCD | \
+ IOC4_SIO_IR_S3_DELTA_CTS | \
+ IOC4_SIO_IR_S3_INT | \
+ IOC4_SIO_IR_S3_TX_EXPLICIT)
+
+/* Bitmasks for IOC4_OTHER_IR, IOC4_OTHER_IEC, and IOC4_OTHER_IES */
+#define IOC4_OTHER_IR_ATA_INT 0x00000001 /* ATAPI intr pass-thru */
+#define IOC4_OTHER_IR_ATA_MEMERR 0x00000002 /* ATAPI DMA PCI error */
+#define IOC4_OTHER_IR_S0_MEMERR 0x00000004 /* Port 0 PCI error */
+#define IOC4_OTHER_IR_S1_MEMERR 0x00000008 /* Port 1 PCI error */
+#define IOC4_OTHER_IR_S2_MEMERR 0x00000010 /* Port 2 PCI error */
+#define IOC4_OTHER_IR_S3_MEMERR 0x00000020 /* Port 3 PCI error */
+#define IOC4_OTHER_IR_KBD_INT 0x00000040 /* Kbd/mouse intr */
+#define IOC4_OTHER_IR_ATA_DMAINT 0x00000089 /* ATAPI DMA intr */
+#define IOC4_OTHER_IR_RT_INT 0x00800000 /* RT output pulse */
+#define IOC4_OTHER_IR_GEN_INT1 0x02000000 /* RT input pulse */
+#define IOC4_OTHER_IR_GEN_INT_SHIFT 25
+
+/* Per device interrupt masks */
+#define IOC4_OTHER_IR_ATA (IOC4_OTHER_IR_ATA_INT | \
+ IOC4_OTHER_IR_ATA_MEMERR | \
+ IOC4_OTHER_IR_ATA_DMAINT)
+#define IOC4_OTHER_IR_RT (IOC4_OTHER_IR_RT_INT | IOC4_OTHER_IR_GEN_INT1)
+
+/* Macro to load pending interrupts */
+#define IOC4_PENDING_SIO_INTRS(mem) (PCI_INW(&((mem)->sio_ir)) & \
+ PCI_INW(&((mem)->sio_ies_ro)))
+#define IOC4_PENDING_OTHER_INTRS(mem) (PCI_INW(&((mem)->other_ir)) & \
+ PCI_INW(&((mem)->other_ies_ro)))
+
+/* Bitmasks for IOC4_SIO_CR */
+#define IOC4_SIO_SR_CMD_PULSE 0x00000004 /* Byte bus strobe length */
+#define IOC4_SIO_CR_CMD_PULSE_SHIFT 0
+#define IOC4_SIO_CR_ARB_DIAG 0x00000070 /* Current non-ATA PCI bus
+ requester (ro) */
+#define IOC4_SIO_CR_ARB_DIAG_TX0 0x00000000
+#define IOC4_SIO_CR_ARB_DIAG_RX0 0x00000010
+#define IOC4_SIO_CR_ARB_DIAG_TX1 0x00000020
+#define IOC4_SIO_CR_ARB_DIAG_RX1 0x00000030
+#define IOC4_SIO_CR_ARB_DIAG_TX2 0x00000040
+#define IOC4_SIO_CR_ARB_DIAG_RX2 0x00000050
+#define IOC4_SIO_CR_ARB_DIAG_TX3 0x00000060
+#define IOC4_SIO_CR_ARB_DIAG_RX3 0x00000070
+#define IOC4_SIO_CR_SIO_DIAG_IDLE 0x00000080 /* 0 -> active request among
+ serial ports (ro) */
+#define IOC4_SIO_CR_ATA_DIAG_IDLE 0x00000100 /* 0 -> active request from
+ ATA port */
+#define IOC4_SIO_CR_ATA_DIAG_ACTIVE 0x00000200 /* 1 -> ATA request is winner */
+
+/* Bitmasks for IOC4_INT_OUT */
+#define IOC4_INT_OUT_COUNT 0x0000ffff /* Pulse interval timer */
+#define IOC4_INT_OUT_MODE 0x00070000 /* Mode mask */
+#define IOC4_INT_OUT_MODE_0 0x00000000 /* Set output to 0 */
+#define IOC4_INT_OUT_MODE_1 0x00040000 /* Set output to 1 */
+#define IOC4_INT_OUT_MODE_1PULSE 0x00050000 /* Send 1 pulse */
+#define IOC4_INT_OUT_MODE_PULSES 0x00060000 /* Send 1 pulse every interval */
+#define IOC4_INT_OUT_MODE_SQW 0x00070000 /* Toggle output every interval */
+#define IOC4_INT_OUT_DIAG 0x40000000 /* Diag mode */
+#define IOC4_INT_OUT_INT_OUT 0x80000000 /* Current state of INT_OUT */
+
+/* Time constants for IOC4_INT_OUT */
+#define IOC4_INT_OUT_NS_PER_TICK (15 * 520) /* 15 ns PCI clock, multi=520 */
+#define IOC4_INT_OUT_TICKS_PER_PULSE 3 /* Outgoing pulse lasts 3
+ ticks */
+#define IOC4_INT_OUT_US_TO_COUNT(x) /* Convert uS to a count value */ \
+ (((x) * 10 + IOC4_INT_OUT_NS_PER_TICK / 200) * \
+ 100 / IOC4_INT_OUT_NS_PER_TICK - 1)
+#define IOC4_INT_OUT_COUNT_TO_US(x) /* Convert count value to uS */ \
+ (((x) + 1) * IOC4_INT_OUT_NS_PER_TICK / 1000)
+#define IOC4_INT_OUT_MIN_TICKS 3 /* Min period is width of
+ pulse in "ticks" */
+#define IOC4_INT_OUT_MAX_TICKS IOC4_INT_OUT_COUNT /* Largest possible count */
+
+/* Bitmasks for IOC4_GPCR */
+#define IOC4_GPCR_DIR 0x000000ff /* Tristate pin in or out */
+#define IOC4_GPCR_DIR_PIN(x) (1<<(x)) /* Access one of the DIR bits */
+#define IOC4_GPCR_EDGE 0x0000ff00 /* Extint edge or level
+ sensitive */
+#define IOC4_GPCR_EDGE_PIN(x) (1<<((x)+7 )) /* Access one of the EDGE bits */
+
+/* Values for IOC4_GPCR */
+#define IOC4_GPCR_INT_OUT_EN 0x00100000 /* Enable INT_OUT to pin 0 */
+#define IOC4_GPCR_DIR_SER0_XCVR 0x00000010 /* Port 0 Transceiver select
+ enable */
+#define IOC4_GPCR_DIR_SER1_XCVR 0x00000020 /* Port 1 Transceiver select
+ enable */
+#define IOC4_GPCR_DIR_SER2_XCVR 0x00000040 /* Port 2 Transceiver select
+ enable */
+#define IOC4_GPCR_DIR_SER3_XCVR 0x00000080 /* Port 3 Transceiver select
+ enable */
+
+/* Defs for some of the generic I/O pins */
+#define IOC4_GPCR_UART0_MODESEL 0x10 /* Pin is output to port 0
+ mode sel */
+#define IOC4_GPCR_UART1_MODESEL 0x20 /* Pin is output to port 1
+ mode sel */
+#define IOC4_GPCR_UART2_MODESEL 0x40 /* Pin is output to port 2
+ mode sel */
+#define IOC4_GPCR_UART3_MODESEL 0x80 /* Pin is output to port 3
+ mode sel */
+
+#define IOC4_GPPR_UART0_MODESEL_PIN 4 /* GIO pin controlling
+ uart 0 mode select */
+#define IOC4_GPPR_UART1_MODESEL_PIN 5 /* GIO pin controlling
+ uart 1 mode select */
+#define IOC4_GPPR_UART2_MODESEL_PIN 6 /* GIO pin controlling
+ uart 2 mode select */
+#define IOC4_GPPR_UART3_MODESEL_PIN 7 /* GIO pin controlling
+ uart 3 mode select */
+
+/* Bitmasks for IOC4_ATA_TIMING */
+#define IOC4_ATA_TIMING_ADR_SETUP 0x00000003 /* Clocks of addr set-up */
+#define IOC4_ATA_TIMING_PULSE_WIDTH 0x000001f8 /* Clocks of read or write
+ pulse width */
+#define IOC4_ATA_TIMING_RECOVERY 0x0000fe00 /* Clocks before next read
+ or write */
+#define IOC4_ATA_TIMING_USE_IORDY 0x00010000 /* PIO uses IORDY */
+
+/* Bitmasks for address list elements pointed to by IOC4_ATA_DMA_PTR_<L|H> */
+#define IOC4_ATA_ALE_DMA_ADDRESS 0xfffffffffffffffe
+
+/* Bitmasks for byte count list elements pointed to by IOC4_ATA_DMA_PTR_<L|H> */
+#define IOC4_ATA_BCLE_BYTE_COUNT 0x000000000000fffe
+#define IOC4_ATA_BCLE_LIST_END 0x0000000080000000
+
+/* Bitmasks for IOC4_ATA_BC_<DEV|MEM> */
+#define IOC4_ATA_BC_BYTE_CNT 0x0001fffe /* Byte count */
+
+/* Bitmasks for IOC4_ATA_DMA_CTRL */
+#define IOC4_ATA_DMA_CTRL_STRAT 0x00000001 /* 1 -> start DMA engine */
+#define IOC4_ATA_DMA_CTRL_STOP 0x00000002 /* 1 -> stop DMA engine */
+#define IOC4_ATA_DMA_CTRL_DIR 0x00000004 /* 1 -> ATA bus data copied
+ to memory */
+#define IOC4_ATA_DMA_CTRL_ACTIVE 0x00000008 /* DMA channel is active */
+#define IOC4_ATA_DMA_CTRL_MEM_ERROR 0x00000010 /* DMA engine encountered
+ a PCI error */
+/* Bitmasks for IOC4_KM_CSR */
+#define IOC4_KM_CSR_K_WRT_PEND 0x00000001 /* Kbd port xmitting or resetting */
+#define IOC4_KM_CSR_M_WRT_PEND 0x00000002 /* Mouse port xmitting or resetting */
+#define IOC4_KM_CSR_K_LCB 0x00000004 /* Line Cntrl Bit for last KBD write */
+#define IOC4_KM_CSR_M_LCB 0x00000008 /* Same for mouse */
+#define IOC4_KM_CSR_K_DATA 0x00000010 /* State of kbd data line */
+#define IOC4_KM_CSR_K_CLK 0x00000020 /* State of kbd clock line */
+#define IOC4_KM_CSR_K_PULL_DATA 0x00000040 /* Pull kbd data line low */
+#define IOC4_KM_CSR_K_PULL_CLK 0x00000080 /* Pull kbd clock line low */
+#define IOC4_KM_CSR_M_DATA 0x00000100 /* State of mouse data line */
+#define IOC4_KM_CSR_M_CLK 0x00000200 /* State of mouse clock line */
+#define IOC4_KM_CSR_M_PULL_DATA 0x00000400 /* Pull mouse data line low */
+#define IOC4_KM_CSR_M_PULL_CLK 0x00000800 /* Pull mouse clock line low */
+#define IOC4_KM_CSR_EMM_MODE 0x00001000 /* Emulation mode */
+#define IOC4_KM_CSR_SIM_MODE 0x00002000 /* Clock X8 */
+#define IOC4_KM_CSR_K_SM_IDLE 0x00004000 /* Keyboard is idle */
+#define IOC4_KM_CSR_M_SM_IDLE 0x00008000 /* Mouse is idle */
+#define IOC4_KM_CSR_K_TO 0x00010000 /* Keyboard trying to send/receive */
+#define IOC4_KM_CSR_M_TO 0x00020000 /* Mouse trying to send/receive */
+#define IOC4_KM_CSR_K_TO_EN 0x00040000 /* KM_CSR_K_TO + KM_CSR_K_TO_EN =
+ cause SIO_IR to assert */
+#define IOC4_KM_CSR_M_TO_EN 0x00080000 /* KM_CSR_M_TO + KM_CSR_M_TO_EN =
+ cause SIO_IR to assert */
+#define IOC4_KM_CSR_K_CLAMP_ONE 0x00100000 /* Pull K_CLK low after rec. one char */
+#define IOC4_KM_CSR_M_CLAMP_ONE 0x00200000 /* Pull M_CLK low after rec. one char */
+#define IOC4_KM_CSR_K_CLAMP_THREE \
+ 0x00400000 /* Pull K_CLK low after rec. three chars */
+#define IOC4_KM_CSR_M_CLAMP_THREE \
+ 0x00800000 /* Pull M_CLK low after rec. three char */
+
+/* Bitmasks for IOC4_K_RD and IOC4_M_RD */
+#define IOC4_KM_RD_DATA_2 0x000000ff /* 3rd char recvd since last read */
+#define IOC4_KM_RD_DATA_2_SHIFT 0
+#define IOC4_KM_RD_DATA_1 0x0000ff00 /* 2nd char recvd since last read */
+#define IOC4_KM_RD_DATA_1_SHIFT 8
+#define IOC4_KM_RD_DATA_0 0x00ff0000 /* 1st char recvd since last read */
+#define IOC4_KM_RD_DATA_0_SHIFT 16
+#define IOC4_KM_RD_FRAME_ERR_2 0x01000000 /* Framing or parity error in byte 2 */
+#define IOC4_KM_RD_FRAME_ERR_1 0x02000000 /* Same for byte 1 */
+#define IOC4_KM_RD_FRAME_ERR_0 0x04000000 /* Same for byte 0 */
+
+#define IOC4_KM_RD_KBD_MSE 0x08000000 /* 0 if from kbd, 1 if from mouse */
+#define IOC4_KM_RD_OFLO 0x10000000 /* 4th char recvd before this read */
+#define IOC4_KM_RD_VALID_2 0x20000000 /* DATA_2 valid */
+#define IOC4_KM_RD_VALID_1 0x40000000 /* DATA_1 valid */
+#define IOC4_KM_RD_VALID_0 0x80000000 /* DATA_0 valid */
+#define IOC4_KM_RD_VALID_ALL (IOC4_KM_RD_VALID_0 | IOC4_KM_RD_VALID_1 | \
+ IOC4_KM_RD_VALID_2)
+
+/* Bitmasks for IOC4_K_WD & IOC4_M_WD */
+#define IOC4_KM_WD_WRT_DATA 0x000000ff /* Write to keyboard/mouse port */
+#define IOC4_KM_WD_WRT_DATA_SHIFT 0
+
+/* Bitmasks for serial RX status byte */
+#define IOC4_RXSB_OVERRUN 0x01 /* Char(s) lost */
+#define IOC4_RXSB_PAR_ERR 0x02 /* Parity error */
+#define IOC4_RXSB_FRAME_ERR 0x04 /* Framing error */
+#define IOC4_RXSB_BREAK 0x08 /* Break character */
+#define IOC4_RXSB_CTS 0x10 /* State of CTS */
+#define IOC4_RXSB_DCD 0x20 /* State of DCD */
+#define IOC4_RXSB_MODEM_VALID 0x40 /* DCD, CTS, and OVERRUN are valid */
+#define IOC4_RXSB_DATA_VALID 0x80 /* Data byte, FRAME_ERR PAR_ERR & BREAK valid */
+
+/* Bitmasks for serial TX control byte */
+#define IOC4_TXCB_INT_WHEN_DONE 0x20 /* Interrupt after this byte is sent */
+#define IOC4_TXCB_INVALID 0x00 /* Byte is invalid */
+#define IOC4_TXCB_VALID 0x40 /* Byte is valid */
+#define IOC4_TXCB_MCR 0x80 /* Data<7:0> to modem control register */
+#define IOC4_TXCB_DELAY 0xc0 /* Delay data<7:0> mSec */
+
+/* Bitmasks for IOC4_SBBR_L */
+#define IOC4_SBBR_L_SIZE 0x00000001 /* 0 == 1KB rings, 1 == 4KB rings */
+#define IOC4_SBBR_L_BASE 0xfffff000 /* Lower serial ring base addr */
+
+/* Bitmasks for IOC4_SSCR_<3:0> */
+#define IOC4_SSCR_RX_THRESHOLD 0x000001ff /* Hiwater mark */
+#define IOC4_SSCR_TX_TIMER_BUSY 0x00010000 /* TX timer in progress */
+#define IOC4_SSCR_HFC_EN 0x00020000 /* Hardware flow control enabled */
+#define IOC4_SSCR_RX_RING_DCD 0x00040000 /* Post RX record on delta-DCD */
+#define IOC4_SSCR_RX_RING_CTS 0x00080000 /* Post RX record on delta-CTS */
+#define IOC4_SSCR_DIAG 0x00200000 /* Bypass clock divider for sim */
+#define IOC4_SSCR_RX_DRAIN 0x08000000 /* Drain RX buffer to memory */
+#define IOC4_SSCR_DMA_EN 0x10000000 /* Enable ring buffer DMA */
+#define IOC4_SSCR_DMA_PAUSE 0x20000000 /* Pause DMA */
+#define IOC4_SSCR_PAUSE_STATE 0x40000000 /* Sets when PAUSE takes effect */
+#define IOC4_SSCR_RESET 0x80000000 /* Reset DMA channels */
+
+/* All producer/comsumer pointers are the same bitfield */
+#define IOC4_PROD_CONS_PTR_4K 0x00000ff8 /* For 4K buffers */
+#define IOC4_PROD_CONS_PTR_1K 0x000003f8 /* For 1K buffers */
+#define IOC4_PROD_CONS_PTR_OFF 3
+
+/* Bitmasks for IOC4_STPIR_<3:0> */
+/* Reserved for future register definitions */
+
+/* Bitmasks for IOC4_STCIR_<3:0> */
+#define IOC4_STCIR_BYTE_CNT 0x0f000000 /* Bytes in unpacker */
+#define IOC4_STCIR_BYTE_CNT_SHIFT 24
+
+/* Bitmasks for IOC4_SRPIR_<3:0> */
+#define IOC4_SRPIR_BYTE_CNT 0x0f000000 /* Bytes in packer */
+#define IOC4_SRPIR_BYTE_CNT_SHIFT 24
+
+/* Bitmasks for IOC4_SRCIR_<3:0> */
+#define IOC4_SRCIR_ARM 0x80000000 /* Arm RX timer */
+
+/* Bitmasks for IOC4_SHADOW_<3:0> */
+#define IOC4_SHADOW_DR 0x00000001 /* Data ready */
+#define IOC4_SHADOW_OE 0x00000002 /* Overrun error */
+#define IOC4_SHADOW_PE 0x00000004 /* Parity error */
+#define IOC4_SHADOW_FE 0x00000008 /* Framing error */
+#define IOC4_SHADOW_BI 0x00000010 /* Break interrupt */
+#define IOC4_SHADOW_THRE 0x00000020 /* Xmit holding register empty */
+#define IOC4_SHADOW_TEMT 0x00000040 /* Xmit shift register empty */
+#define IOC4_SHADOW_RFCE 0x00000080 /* Char in RX fifo has an error */
+#define IOC4_SHADOW_DCTS 0x00010000 /* Delta clear to send */
+#define IOC4_SHADOW_DDCD 0x00080000 /* Delta data carrier detect */
+#define IOC4_SHADOW_CTS 0x00100000 /* Clear to send */
+#define IOC4_SHADOW_DCD 0x00800000 /* Data carrier detect */
+#define IOC4_SHADOW_DTR 0x01000000 /* Data terminal ready */
+#define IOC4_SHADOW_RTS 0x02000000 /* Request to send */
+#define IOC4_SHADOW_OUT1 0x04000000 /* 16550 OUT1 bit */
+#define IOC4_SHADOW_OUT2 0x08000000 /* 16550 OUT2 bit */
+#define IOC4_SHADOW_LOOP 0x10000000 /* Loopback enabled */
+
+/* Bitmasks for IOC4_SRTR_<3:0> */
+#define IOC4_SRTR_CNT 0x00000fff /* Reload value for RX timer */
+#define IOC4_SRTR_CNT_VAL 0x0fff0000 /* Current value of RX timer */
+#define IOC4_SRTR_CNT_VAL_SHIFT 16
+#define IOC4_SRTR_HZ 16000 /* SRTR clock frequency */
+
+/* Serial port register map used for DMA and PIO serial I/O */
+typedef volatile struct ioc4_serialregs {
+ ioc4reg_t sscr;
+ ioc4reg_t stpir;
+ ioc4reg_t stcir;
+ ioc4reg_t srpir;
+ ioc4reg_t srcir;
+ ioc4reg_t srtr;
+ ioc4reg_t shadow;
+} ioc4_sregs_t;
+
+/* IOC4 UART register map */
+typedef volatile struct ioc4_uartregs {
+ union {
+ char rbr; /* read only, DLAB == 0 */
+ char thr; /* write only, DLAB == 0 */
+ char dll; /* DLAB == 1 */
+ } u1;
+ union {
+ char ier; /* DLAB == 0 */
+ char dlm; /* DLAB == 1 */
+ } u2;
+ union {
+ char iir; /* read only */
+ char fcr; /* write only */
+ } u3;
+ char i4u_lcr;
+ char i4u_mcr;
+ char i4u_lsr;
+ char i4u_msr;
+ char i4u_scr;
+} ioc4_uart_t;
+
+#define i4u_rbr u1.rbr
+#define i4u_thr u1.thr
+#define i4u_dll u1.dll
+#define i4u_ier u2.ier
+#define i4u_dlm u2.dlm
+#define i4u_iir u3.iir
+#define i4u_fcr u3.fcr
+
+/* PCI config space register map */
+typedef volatile struct ioc4_configregs {
+ ioc4reg_t pci_id;
+ ioc4reg_t pci_scr;
+ ioc4reg_t pci_rev;
+ ioc4reg_t pci_lat;
+ ioc4reg_t pci_bar0;
+ ioc4reg_t pci_bar1;
+ ioc4reg_t pci_bar2_not_implemented;
+ ioc4reg_t pci_cis_ptr_not_implemented;
+ ioc4reg_t pci_sidv;
+ ioc4reg_t pci_rom_bar_not_implemented;
+ ioc4reg_t pci_cap;
+ ioc4reg_t pci_rsv;
+ ioc4reg_t pci_latgntint;
+
+ char pci_fill1[0x58 - 0x3c - 4];
+
+ ioc4reg_t pci_pcix;
+ ioc4reg_t pci_pcixstatus;
+} ioc4_cfg_t;
+
+/* PCI memory space register map addressed using pci_bar0 */
+typedef volatile struct ioc4_memregs {
+
+ /* Miscellaneous IOC4 registers */
+ ioc4reg_t pci_err_addr_l;
+ ioc4reg_t pci_err_addr_h;
+ ioc4reg_t sio_ir;
+ ioc4reg_t other_ir;
+
+ /* These registers are read-only for general kernel code. To
+ * modify them use the functions in ioc4.c.
+ */
+ ioc4reg_t sio_ies_ro;
+ ioc4reg_t other_ies_ro;
+ ioc4reg_t sio_iec_ro;
+ ioc4reg_t other_iec_ro;
+ ioc4reg_t sio_cr;
+ ioc4reg_t misc_fill1;
+ ioc4reg_t int_out;
+ ioc4reg_t misc_fill2;
+ ioc4reg_t gpcr_s;
+ ioc4reg_t gpcr_c;
+ ioc4reg_t gpdr;
+ ioc4reg_t misc_fill3;
+ ioc4reg_t gppr_0;
+ ioc4reg_t gppr_1;
+ ioc4reg_t gppr_2;
+ ioc4reg_t gppr_3;
+ ioc4reg_t gppr_4;
+ ioc4reg_t gppr_5;
+ ioc4reg_t gppr_6;
+ ioc4reg_t gppr_7;
+
+ char misc_fill4[0x100 - 0x5C - 4];
+
+ /* ATA/ATAP registers */
+ ioc4reg_t ata_0;
+ ioc4reg_t ata_1;
+ ioc4reg_t ata_2;
+ ioc4reg_t ata_3;
+ ioc4reg_t ata_4;
+ ioc4reg_t ata_5;
+ ioc4reg_t ata_6;
+ ioc4reg_t ata_7;
+ ioc4reg_t ata_aux;
+
+ char ata_fill1[0x140 - 0x120 - 4];
+
+ ioc4reg_t ata_timing;
+ ioc4reg_t ata_dma_ptr_l;
+ ioc4reg_t ata_dma_ptr_h;
+ ioc4reg_t ata_dma_addr_l;
+ ioc4reg_t ata_dma_addr_h;
+ ioc4reg_t ata_bc_dev;
+ ioc4reg_t ata_bc_mem;
+ ioc4reg_t ata_dma_ctrl;
+
+ char ata_fill2[0x200 - 0x15C - 4];
+
+ /* Keyboard and mouse registers */
+ ioc4reg_t km_csr;
+ ioc4reg_t k_rd;
+ ioc4reg_t m_rd;
+ ioc4reg_t k_wd;
+ ioc4reg_t m_wd;
+
+ char km_fill1[0x300 - 0x210 - 4];
+
+ /* Serial port registers used for DMA serial I/O */
+ ioc4reg_t sbbr01_l;
+ ioc4reg_t sbbr01_h;
+ ioc4reg_t sbbr23_l;
+ ioc4reg_t sbbr23_h;
+
+ ioc4_sregs_t port_0;
+ ioc4_sregs_t port_1;
+ ioc4_sregs_t port_2;
+ ioc4_sregs_t port_3;
+
+ ioc4_uart_t uart_0;
+ ioc4_uart_t uart_1;
+ ioc4_uart_t uart_2;
+ ioc4_uart_t uart_3;
+} ioc4_mem_t;
+
+#endif /* 0 */
+
+/*
+ * Bytebus device space
+ */
+#define IOC4_BYTEBUS_DEV0 0x80000L /* Addressed using pci_bar0 */
+#define IOC4_BYTEBUS_DEV1 0xA0000L /* Addressed using pci_bar0 */
+#define IOC4_BYTEBUS_DEV2 0xC0000L /* Addressed using pci_bar0 */
+#define IOC4_BYTEBUS_DEV3 0xE0000L /* Addressed using pci_bar0 */
+
+#if 0
+/* UART clock speed */
+#define IOC4_SER_XIN_CLK 66000000
+
+typedef enum ioc4_subdevs_e {
+ ioc4_subdev_generic,
+ ioc4_subdev_kbms,
+ ioc4_subdev_tty0,
+ ioc4_subdev_tty1,
+ ioc4_subdev_tty2,
+ ioc4_subdev_tty3,
+ ioc4_subdev_rt,
+ ioc4_nsubdevs
+} ioc4_subdev_t;
+
+/* Subdevice disable bits,
+ * from the standard INFO_LBL_SUBDEVS
+ */
+#define IOC4_SDB_TTY0 (1 << ioc4_subdev_tty0)
+#define IOC4_SDB_TTY1 (1 << ioc4_subdev_tty1)
+#define IOC4_SDB_TTY2 (1 << ioc4_subdev_tty2)
+#define IOC4_SDB_TTY3 (1 << ioc4_subdev_tty3)
+#define IOC4_SDB_KBMS (1 << ioc4_subdev_kbms)
+#define IOC4_SDB_RT (1 << ioc4_subdev_rt)
+#define IOC4_SDB_GENERIC (1 << ioc4_subdev_generic)
+
+#define IOC4_ALL_SUBDEVS ((1 << ioc4_nsubdevs) - 1)
+
+#define IOC4_SDB_SERIAL (IOC4_SDB_TTY0 | IOC4_SDB_TTY1 | IOC4_SDB_TTY2 | IOC4_SDB_TTY3)
+
+#define IOC4_STD_SUBDEVS IOC4_ALL_SUBDEVS
+
+#define IOC4_INTA_SUBDEVS (IOC4_SDB_SERIAL | IOC4_SDB_KBMS | IOC4_SDB_RT | IOC4_SDB_GENERIC)
+
+extern int ioc4_subdev_enabled(vertex_hdl_t, ioc4_subdev_t);
+extern void ioc4_subdev_enables(vertex_hdl_t, ulong_t);
+extern void ioc4_subdev_enable(vertex_hdl_t, ioc4_subdev_t);
+extern void ioc4_subdev_disable(vertex_hdl_t, ioc4_subdev_t);
+
+/* Macros to read and write the SIO_IEC and SIO_IES registers (see the
+ * comments in ioc4.c for details on why this is necessary
+ */
+#define IOC4_W_IES 0
+#define IOC4_W_IEC 1
+extern void ioc4_write_ireg(void *, ioc4reg_t, int, ioc4_intr_type_t);
+
+#define IOC4_WRITE_IES(ioc4, val, type) ioc4_write_ireg(ioc4, val, IOC4_W_IES, type)
+#define IOC4_WRITE_IEC(ioc4, val, type) ioc4_write_ireg(ioc4, val, IOC4_W_IEC, type)
+
+typedef void
+ioc4_intr_func_f (intr_arg_t, ioc4reg_t);
+
+typedef void
+ioc4_intr_connect_f (vertex_hdl_t conn_vhdl,
+ ioc4_intr_type_t,
+ ioc4reg_t,
+ ioc4_intr_func_f *,
+ intr_arg_t info,
+ vertex_hdl_t owner_vhdl,
+ vertex_hdl_t intr_dev_vhdl,
+ int (*)(intr_arg_t));
+
+typedef void
+ioc4_intr_disconnect_f (vertex_hdl_t conn_vhdl,
+ ioc4_intr_type_t,
+ ioc4reg_t,
+ ioc4_intr_func_f *,
+ intr_arg_t info,
+ vertex_hdl_t owner_vhdl);
+
+ioc4_intr_disconnect_f ioc4_intr_disconnect;
+ioc4_intr_connect_f ioc4_intr_connect;
+
+extern int ioc4_is_console(vertex_hdl_t conn_vhdl);
+
+extern void ioc4_mlreset(ioc4_cfg_t *, ioc4_mem_t *);
+
+extern intr_func_f ioc4_intr;
+
+extern ioc4_mem_t *ioc4_mem_ptr(void *ioc4_fastinfo);
+
+typedef ioc4_intr_func_f *ioc4_intr_func_t;
+
+#endif /* 0 */
+#endif /* _ASM_IA64_SN_IOC4_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_IOERROR_H
#define _ASM_IA64_SN_IOERROR_H
* we have a single structure, and the appropriate fields get filled in
* at each layer.
* - This provides a way to dump all error related information in any layer
- * of error handling (debugging aid).
+ * of erorr handling (debugging aid).
*
* A second possibility is to allow each layer to define its own error
* data structure, and fill in the proper fields. This has the advantage
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_IOERROR_HANDLING_H
#define _ASM_IA64_SN_IOERROR_HANDLING_H
/* Error state interfaces */
#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
-extern error_return_code_t error_state_set(devfs_handle_t,error_state_t);
-extern error_state_t error_state_get(devfs_handle_t);
+extern error_return_code_t error_state_set(vertex_hdl_t,error_state_t);
+extern error_state_t error_state_get(vertex_hdl_t);
#endif
-/* System critical graph interfaces */
-
-extern boolean_t is_sys_critical_vertex(devfs_handle_t);
-extern devfs_handle_t sys_critical_first_child_get(devfs_handle_t);
-extern devfs_handle_t sys_critical_next_child_get(devfs_handle_t);
-extern devfs_handle_t sys_critical_parent_get(devfs_handle_t);
-extern error_return_code_t sys_critical_graph_vertex_add(devfs_handle_t,
- devfs_handle_t new);
-
/* Error action interfaces */
-extern error_return_code_t error_action_set(devfs_handle_t,
+extern error_return_code_t error_action_set(vertex_hdl_t,
error_action_f,
error_context_t,
error_priority_t);
-extern error_return_code_t error_action_perform(devfs_handle_t);
+extern error_return_code_t error_action_perform(vertex_hdl_t);
#define INFO_LBL_ERROR_SKIP_ENV "error_skip_env"
hwgraph_info_remove_LBL(v, INFO_LBL_ERROR_SKIP_ENV, 0)
/* Skip point interfaces */
-extern error_return_code_t error_skip_point_jump(devfs_handle_t, boolean_t);
-extern error_return_code_t error_skip_point_clear(devfs_handle_t);
+extern error_return_code_t error_skip_point_jump(vertex_hdl_t, boolean_t);
+extern error_return_code_t error_skip_point_clear(vertex_hdl_t);
/* REFERENCED */
#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
inline static int
-error_skip_point_mark(devfs_handle_t v)
+error_skip_point_mark(vertex_hdl_t v)
{
label_t *error_env = NULL;
int code = 0;
typedef uint64_t counter_t;
-extern counter_t error_retry_count_get(devfs_handle_t);
-extern error_return_code_t error_retry_count_set(devfs_handle_t,counter_t);
-extern counter_t error_retry_count_increment(devfs_handle_t);
-extern counter_t error_retry_count_decrement(devfs_handle_t);
+extern counter_t error_retry_count_get(vertex_hdl_t);
+extern error_return_code_t error_retry_count_set(vertex_hdl_t,counter_t);
+extern counter_t error_retry_count_increment(vertex_hdl_t);
+extern counter_t error_retry_count_decrement(vertex_hdl_t);
/* Except for the PIO Read error typically the other errors are handled in
* the context of an asynchronous error interrupt.
* thru the calls the io error handling layer.
*/
#if defined(CONFIG_SGI_IO_ERROR_HANDLING)
-extern boolean_t is_device_shutdown(devfs_handle_t);
+extern boolean_t is_device_shutdown(vertex_hdl_t);
#define IS_DEVICE_SHUTDOWN(_d) (is_device_shutdown(_d))
#endif
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_IOGRAPH_H
#define _ASM_IA64_SN_IOGRAPH_H
#define EDGE_LBL_IOC3 "ioc3"
#define EDGE_LBL_LUN "lun"
#define EDGE_LBL_LINUX "linux"
-#define EDGE_LBL_LINUX_BUS EDGE_LBL_LINUX "/busnum"
+#define EDGE_LBL_LINUX_BUS EDGE_LBL_LINUX "/bus/pci-x"
#define EDGE_LBL_MACE "mace" /* O2 mace */
#define EDGE_LBL_MACHDEP "machdep" /* Platform depedent devices */
#define EDGE_LBL_MASTER ".master"
#define EDGE_LBL_XBOX_RPS "xbox_rps" /* redundant power supply for xbox unit */
#define EDGE_LBL_IOBRICK "iobrick"
#define EDGE_LBL_PBRICK "Pbrick"
+#define EDGE_LBL_PEBRICK "PEbrick"
+#define EDGE_LBL_PXBRICK "PXbrick"
+#define EDGE_LBL_IXBRICK "IXbrick"
#define EDGE_LBL_IBRICK "Ibrick"
#define EDGE_LBL_XBRICK "Xbrick"
+#define EDGE_LBL_CGBRICK "CGbrick"
#define EDGE_LBL_CPUBUS "cpubus" /* CPU Interfaces (SysAd) */
/* vertex info labels in hwgraph */
#include <asm/sn/xtalk/xbow.h> /* For get MAX_PORT_NUM */
int io_brick_map_widget(int, int);
-int io_path_map_widget(devfs_handle_t);
+int io_path_map_widget(vertex_hdl_t);
/*
* Map a brick's widget number to a meaningful int
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1996, 2001-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2001 by Ralf Baechle
*/
#ifndef _ASM_IA64_SN_KLCLOCK_H
*
* Derived from IRIX <sys/SN/klconfig.h>.
*
- * Copyright (C) 1992-1997,1999,2001-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997,1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 1999 by Ralf Baechle
*/
#ifndef _ASM_IA64_SN_KLCONFIG_H
#include <asm/sn/xtalk/xtalk.h>
#include <asm/sn/kldir.h>
#include <asm/sn/sn_fru.h>
-
-#ifdef CONFIG_IA64_SGI_SN1
-#include <asm/sn/sn1/hubmd_next.h>
-#endif
-
-#ifdef CONFIG_IA64_SGI_SN2
#include <asm/sn/sn2/shub_md.h>
-#endif
-
-#ifdef CONFIG_IA64_SGI_SN2
#include <asm/sn/geo.h>
-#endif
#define KLCFGINFO_MAGIC 0xbeedbabe
#define KLTYPE_IBRICK (KLCLASS_IOBRICK | 0x1)
#define KLTYPE_PBRICK (KLCLASS_IOBRICK | 0x2)
#define KLTYPE_XBRICK (KLCLASS_IOBRICK | 0x3)
+#define KLTYPE_NBRICK (KLCLASS_IOBRICK | 0x4)
+#define KLTYPE_PEBRICK (KLCLASS_IOBRICK | 0x5)
+#define KLTYPE_PXBRICK (KLCLASS_IOBRICK | 0x6)
+#define KLTYPE_IXBRICK (KLCLASS_IOBRICK | 0x7)
+#define KLTYPE_CGBRICK (KLCLASS_IOBRICK | 0x8)
+
#define KLTYPE_PBRICK_BRIDGE KLTYPE_PBRICK
unsigned char brd_flags; /* Enabled, Disabled etc */
unsigned char brd_slot; /* slot number */
unsigned short brd_debugsw; /* Debug switches */
-#ifdef CONFIG_IA64_SGI_SN2
geoid_t brd_geoid; /* geo id */
-#else
- moduleid_t brd_module; /* module to which it belongs */
-#endif
partid_t brd_partition; /* Partition number */
unsigned short brd_diagval; /* diagnostic value */
unsigned short brd_diagparm; /* diagnostic parameter */
klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
klconf_off_t brd_errinfo; /* Board's error information */
struct lboard_s *brd_parent; /* Logical parent for this brd */
- devfs_handle_t brd_graph_link; /* vertex hdl to connect extern compts */
+ vertex_hdl_t brd_graph_link; /* vertex hdl to connect extern compts */
confidence_t brd_confidence; /* confidence that the board is bad */
nasid_t brd_owner; /* who owns this board */
unsigned char brd_nic_flags; /* To handle 8 more NICs */
-#ifdef CONFIG_IA64_SGI_SN2
char pad[32]; /* future expansion */
-#endif
char brd_name[32];
} lboard_t;
((_brd)->brd_next ? \
(NODE_OFFSET_TO_LBOARD(NASID_GET(_brd), (_brd)->brd_next)): NULL)
#define KLCF_COMP(_brd, _ndx) \
- (NODE_OFFSET_TO_KLINFO(NASID_GET(_brd), (_brd)->brd_compts[(_ndx)]))
+ ((((_brd)->brd_compts[(_ndx)]) == 0) ? 0 : \
+ (NODE_OFFSET_TO_KLINFO(NASID_GET(_brd), (_brd)->brd_compts[(_ndx)])))
#define KLCF_COMP_ERROR(_brd, _comp) \
(NODE_OFFSET_TO_K0(NASID_GET(_brd), (_comp)->errinfo))
nasid_t port_nasid;
unsigned char port_flag;
klconf_off_t port_offset;
-#ifdef CONFIG_IA64_SGI_SN2
short port_num;
-#endif
} klport_t;
typedef struct klcpu_s { /* CPU */
unsigned short cpu_speed; /* Speed in MHZ */
unsigned short cpu_scachesz; /* secondary cache size in MB */
unsigned short cpu_scachespeed;/* secondary cache speed in MHz */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klcpu_t ;
#define CPU_STRUCT_VERSION 2
typedef struct klhub_s { /* HUB */
klinfo_t hub_info;
uint hub_flags; /* PCFG_HUB_xxx flags */
-#ifdef CONFIG_IA64_SGI_SN2
#define MAX_NI_PORTS 2
klport_t hub_port[MAX_NI_PORTS + 1];/* hub is connected to this */
-#else
- klport_t hub_port; /* hub is connected to this */
-#endif
nic_t hub_box_nic; /* nic of containing box */
klconf_off_t hub_mfg_nic; /* MFG NIC string */
u64 hub_speed; /* Speed of hub in HZ */
-#ifdef CONFIG_IA64_SGI_SN2
moduleid_t hub_io_module; /* attached io module */
unsigned long pad;
-#endif
} klhub_t ;
typedef struct klhub_uart_s { /* HUB */
klinfo_t hubuart_info;
uint hubuart_flags; /* PCFG_HUB_xxx flags */
nic_t hubuart_box_nic; /* nic of containing box */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klhub_uart_t ;
#define MEMORY_STRUCT_VERSION 2
short membnk_dimm_select; /* bank to physical addr mapping*/
short membnk_bnksz[MD_MEM_BANKS]; /* Memory bank sizes */
short membnk_attr;
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klmembnk_t ;
#define KLCONFIG_MEMBNK_SIZE(_info, _bank) \
char snum_str[MAX_SERIAL_NUM_SIZE];
unsigned long long snum_int;
} snum;
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klmod_serial_num_t;
/* Macros needed to access serial number structure in lboard_t.
klport_t xbow_port_info[MAX_XBOW_LINKS] ; /* Module number */
int xbow_master_hub_link;
/* type of brd connected+component struct ptr+flags */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klxbow_t ;
#define MAX_PCI_SLOTS 8
pci_t pci_specific ; /* PCI Board config info */
klpci_device_t bri_devices[MAX_PCI_DEVS] ; /* PCI IDs */
klconf_off_t bri_mfg_nic ;
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klbri_t ;
#define MAX_IOC3_TTY 2
klinfo_t ioc3_enet ;
klconf_off_t ioc3_enet_off ;
klconf_off_t ioc3_kbd_off ;
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klioc3_t ;
#define MAX_VME_SLOTS 8
klinfo_t vmeb_info ;
vmeb_t vmeb_specific ;
klconf_off_t vmeb_brdinfo[MAX_VME_SLOTS] ; /* VME Board config info */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klvmeb_t ;
typedef struct klvmed_s { /* VME DEVICE - VME BOARD */
klinfo_t vmed_info ;
vmed_t vmed_specific ;
klconf_off_t vmed_brdinfo[MAX_VME_SLOTS] ; /* VME Board config info */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klvmed_t ;
#define ROUTER_VECTOR_VERS 2
klport_t rou_port[MAX_ROUTER_PORTS + 1] ; /* array index 1 to 6 */
klconf_off_t rou_mfg_nic ; /* MFG NIC string */
u64 rou_vector; /* vector from master node */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klrou_t ;
/*
graphics_t gfx_specific;
klconf_off_t pad0; /* for compatibility with older proms */
klconf_off_t gfx_mfg_nic;
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klgfx_t;
typedef struct klxthd_s {
klinfo_t xthd_info ;
klconf_off_t xthd_mfg_nic ; /* MFG NIC string */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klxthd_t ;
typedef struct kltpu_s { /* TPU board */
klinfo_t tpu_info ;
klconf_off_t tpu_mfg_nic ; /* MFG NIC string */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} kltpu_t ;
typedef struct klgsn_s { /* GSN board */
scsi_t scsi_specific ;
unsigned char scsi_numdevs ;
klconf_off_t scsi_devinfo[MAX_SCSI_DEVS] ;
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klscsi_t ;
typedef struct klscctl_s { /* SCSI Controller */
uint type;
uint scsi_buscnt; /* # busses this cntlr */
void *scsi_bus[2]; /* Pointer to 2 klscsi_t's */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klscctl_t ;
typedef struct klscdev_s { /* SCSI device */
klinfo_t scdev_info ;
struct scsidisk_data *scdev_cfg ; /* driver fills up this */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klscdev_t ;
typedef struct klttydev_s { /* TTY device */
klinfo_t ttydev_info ;
struct terminal_data *ttydev_cfg ; /* driver fills up this */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klttydev_t ;
typedef struct klenetdev_s { /* ENET device */
klinfo_t enetdev_info ;
struct net_data *enetdev_cfg ; /* driver fills up this */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klenetdev_t ;
typedef struct klkbddev_s { /* KBD device */
klinfo_t kbddev_info ;
struct keyboard_data *kbddev_cfg ; /* driver fills up this */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klkbddev_t ;
typedef struct klmsdev_s { /* mouse device */
klinfo_t msdev_info ;
void *msdev_cfg ;
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klmsdev_t ;
#define MAX_FDDI_DEVS 10 /* XXX Is this true */
klinfo_t fddi_info ;
fddi_t fddi_specific ;
klconf_off_t fddi_devinfo[MAX_FDDI_DEVS] ;
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klfddi_t ;
typedef struct klmio_s { /* MIO */
klinfo_t mio_info ;
mio_t mio_specific ;
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klmio_t ;
/*
klinfo_t usb_info; /* controller info */
void *usb_bus; /* handle to usb_bus_t */
uint64_t usb_controller; /* ptr to controller info */
-#ifdef CONFIG_IA64_SGI_SN2
unsigned long pad;
-#endif
} klusb_t ;
typedef union klcomp_s {
extern klcpu_t *nasid_slice_to_cpuinfo(nasid_t, int);
-extern xwidgetnum_t nodevertex_widgetnum_get(devfs_handle_t node_vtx);
-extern devfs_handle_t nodevertex_xbow_peer_get(devfs_handle_t node_vtx);
extern lboard_t *find_gfxpipe(int pipenum);
-extern void setup_gfxpipe_link(devfs_handle_t vhdl,int pipenum);
extern lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_class);
-#ifdef CONFIG_IA64_SGI_SN2
-extern lboard_t *find_lboard_module_class(lboard_t *start, geoid_t geoid,
- unsigned char brd_class);
-#else
-extern lboard_t *find_lboard_module_class(lboard_t *start, moduleid_t mod,
- unsigned char brd_class);
-#endif
extern lboard_t *find_nic_lboard(lboard_t *, nic_t);
extern lboard_t *find_nic_type_lboard(nasid_t, unsigned char, nic_t);
-#ifdef CONFIG_IA64_SGI_SN2
extern lboard_t *find_lboard_modslot(lboard_t *start, geoid_t geoid);
extern lboard_t *find_lboard_module(lboard_t *start, geoid_t geoid);
-extern lboard_t *get_board_name(nasid_t nasid, geoid_t geoid, slotid_t slot, char *name);
-#else
-extern lboard_t *find_lboard_modslot(lboard_t *start, moduleid_t mod, slotid_t slot);
-extern lboard_t *find_lboard_module(lboard_t *start, moduleid_t mod);
-extern lboard_t *get_board_name(nasid_t nasid, moduleid_t mod, slotid_t slot, char *name);
-#endif
extern int config_find_nic_router(nasid_t, nic_t, lboard_t **, klrou_t**);
extern int config_find_nic_hub(nasid_t, nic_t, lboard_t **, klhub_t**);
extern int config_find_xbow(nasid_t, lboard_t **, klxbow_t**);
extern int update_klcfg_cpuinfo(nasid_t, int);
extern void board_to_path(lboard_t *brd, char *path);
-#ifdef CONFIG_IA64_SGI_SN2
extern moduleid_t get_module_id(nasid_t nasid);
-#endif
extern void nic_name_convert(char *old_name, char *new_name);
extern int module_brds(nasid_t nasid, lboard_t **module_brds, int n);
extern lboard_t *brd_from_key(uint64_t key);
*
* Derived from IRIX <sys/SN/kldir.h>, revision 1.21.
*
- * Copyright (C) 1992-1997,1999,2001-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997,1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 1999 by Ralf Baechle
*/
#ifndef _ASM_IA64_SN_KLDIR_H
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_SN_KSYS_ELSC_H
#define _ASM_SN_KSYS_ELSC_H
#include <linux/config.h>
#include <asm/sn/ksys/l1.h>
-#ifdef CONFIG_IA64_SGI_SN1
-
-#define ELSC_ACP_MAX 86 /* 84+cr+lf */
-#define ELSC_LINE_MAX (ELSC_ACP_MAX - 2)
-
-typedef sc_cq_t elsc_cq_t;
-
-/*
- * ELSC structure passed around as handle
- */
-
-typedef l1sc_t elsc_t;
-
-void elsc_init(elsc_t *e, nasid_t nasid);
-
-int elsc_process(elsc_t *e);
-int elsc_msg_check(elsc_t *e, char *msg, int msg_max);
-int elsc_msg_callback(elsc_t *e,
- void (*callback)(void *callback_data, char *msg),
- void *callback_data);
-char *elsc_errmsg(int code);
-
-int elsc_nvram_write(elsc_t *e, int addr, char *buf, int len);
-int elsc_nvram_read(elsc_t *e, int addr, char *buf, int len);
-int elsc_nvram_magic(elsc_t *e);
-int elsc_command(elsc_t *e, int only_if_message);
-int elsc_parse(elsc_t *e, char *p1, char *p2, char *p3);
-int elsc_ust_write(elsc_t *e, uchar_t c);
-int elsc_ust_read(elsc_t *e, char *c);
-
-
-
-/*
- * System controller commands
- */
-
-int elsc_version(elsc_t *e, char *result);
-int elsc_debug_set(elsc_t *e, u_char byte1, u_char byte2);
-int elsc_debug_get(elsc_t *e, u_char *byte1, u_char *byte2);
-int elsc_module_set(elsc_t *e, int module);
-int elsc_module_get(elsc_t *e);
-int elsc_partition_set(elsc_t *e, int partition);
-int elsc_partition_get(elsc_t *e);
-int elsc_domain_set(elsc_t *e, int domain);
-int elsc_domain_get(elsc_t *e);
-int elsc_cluster_set(elsc_t *e, int cluster);
-int elsc_cluster_get(elsc_t *e);
-int elsc_cell_set(elsc_t *e, int cell);
-int elsc_cell_get(elsc_t *e);
-int elsc_bist_set(elsc_t *e, char bist_status);
-char elsc_bist_get(elsc_t *e);
-int elsc_lock(elsc_t *e, int retry_interval_usec, int timeout_usec, u_char lock_val);
-int elsc_unlock(elsc_t *e);
-int elsc_display_char(elsc_t *e, int led, int chr);
-int elsc_display_digit(elsc_t *e, int led, int num, int l_case);
-int elsc_display_mesg(elsc_t *e, char *chr); /* 8-char input */
-int elsc_password_set(elsc_t *e, char *password); /* 4-char input */
-int elsc_password_get(elsc_t *e, char *password); /* 4-char output */
-int elsc_rpwr_query(elsc_t *e, int is_master);
-int elsc_power_query(elsc_t *e);
-int elsc_power_down(elsc_t *e, int sec);
-int elsc_power_cycle(elsc_t *e);
-int elsc_system_reset(elsc_t *e);
-int elsc_dip_switches(elsc_t *e);
-
-int _elsc_hbt(elsc_t *e, int ival, int rdly);
-
-#define elsc_hbt_enable(e, ival, rdly) _elsc_hbt(e, ival, rdly)
-#define elsc_hbt_disable(e) _elsc_hbt(e, 0, 0)
-#define elsc_hbt_send(e) _elsc_hbt(e, 0, 1)
-
-elsc_t *get_elsc(void);
-
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-
/*
* Error codes
*
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997,2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_SN_KSYS_L1_H
#include <asm/atomic.h>
#include <asm/sn/sv.h>
-
-#ifdef CONFIG_IA64_SGI_SN1
-
-#define BRL1_QSIZE 128 /* power of 2 is more efficient */
-#define BRL1_BUFSZ 264 /* needs to be large enough
- * to hold 2 flags, escaped
- * CRC, type/subchannel byte,
- * and escaped payload
- */
-
-#define BRL1_IQS 32
-#define BRL1_OQS 4
-
-
-typedef struct sc_cq_s {
- u_char buf[BRL1_QSIZE];
- int ipos, opos, tent_next;
-} sc_cq_t;
-
-/* An l1sc_t struct can be associated with the local (C-brick) L1 or an L1
- * on an R-brick. In the R-brick case, the l1sc_t records a vector path
- * to the R-brick's junk bus UART. In the C-brick case, we just use the
- * following flag to denote the local uart.
- *
- * This value can't be confused with a network vector because the least-
- * significant nibble of a network vector cannot be greater than 8.
- */
-#define BRL1_LOCALHUB_UART ((net_vec_t)0xf)
-
-/* L1<->Bedrock reserved subchannels */
-
-/* console channels */
-#define SC_CONS_CPU0 0x00
-#define SC_CONS_CPU1 0x01
-#define SC_CONS_CPU2 0x02
-#define SC_CONS_CPU3 0x03
-
-#define L1_ELSCUART_SUBCH(p) (p)
-#define L1_ELSCUART_CPU(ch) (ch)
-
-#define SC_CONS_SYSTEM CPUS_PER_NODE
-
-/* mapping subchannels to queues */
-#define MAP_IQ(s) (s)
-#define MAP_OQ(s) (s)
-
-#define BRL1_NUM_SUBCHANS 32
-#define BRL1_CMD_SUBCH 16
-#define BRL1_EVENT_SUBCH (BRL1_NUM_SUBCHANS - 1)
-#define BRL1_SUBCH_RSVD 0
-#define BRL1_SUBCH_FREE (-1)
-
-/* constants for L1 hwgraph vertex info */
-#define CBRICK_L1 (__psint_t)1
-#define IOBRICK_L1 (__psint_t)2
-#define RBRICK_L1 (__psint_t)3
-
-
-struct l1sc_s;
-/* Saved off interrupt frame */
-typedef struct brl1_intr_frame {
- int bf_irq; /* irq received */
- void *bf_dev_id; /* device information */
- struct pt_regs *bf_regs; /* register frame */
-} brl1_intr_frame_t;
-
-typedef void (*brl1_notif_t)(int, void *, struct pt_regs *, struct l1sc_s *, int);
-typedef int (*brl1_uartf_t)(struct l1sc_s *);
-
-/* structure for controlling a subchannel */
-typedef struct brl1_sch_s {
- int use; /* if this subchannel is free,
- * use == BRL1_SUBCH_FREE */
- uint target; /* type, rack and slot of component to
- * which this subchannel is directed */
- atomic_t packet_arrived; /* true if packet arrived on
- * this subchannel */
- sc_cq_t * iqp; /* input queue for this subchannel */
- sv_t arrive_sv; /* used to wait for a packet */
- spinlock_t data_lock; /* synchronize access to input queues and
- * other fields of the brl1_sch_s struct */
- brl1_notif_t tx_notify; /* notify higher layer that transmission may
- * continue */
- brl1_notif_t rx_notify; /* notify higher layer that a packet has been
- * received */
- brl1_intr_frame_t irq_frame; /* saved off irq information */
-} brl1_sch_t;
-
-/* br<->l1 protocol states */
-#define BRL1_IDLE 0
-#define BRL1_FLAG 1
-#define BRL1_HDR 2
-#define BRL1_BODY 3
-#define BRL1_ESC 4
-#define BRL1_RESET 7
-
-
-/*
- * l1sc_t structure-- tracks protocol state, open subchannels, etc.
- */
-typedef struct l1sc_s {
- nasid_t nasid; /* nasid with which this instance
- * of the structure is associated */
- moduleid_t modid; /* module id of this brick */
- u_char verbose; /* non-zero if elscuart routines should
- * prefix output */
- net_vec_t uart; /* vector path to UART, or BRL1_LOCALUART */
- int sent; /* number of characters sent */
- int send_len; /* number of characters in send buf */
- brl1_uartf_t putc_f; /* pointer to UART putc function */
- brl1_uartf_t getc_f; /* pointer to UART getc function */
-
- spinlock_t send_lock; /* arbitrates send synchronization */
- spinlock_t recv_lock; /* arbitrates uart receive access */
- spinlock_t subch_lock; /* arbitrates subchannel allocation */
- cpuid_t intr_cpu; /* cpu that receives L1 interrupts */
-
- u_char send_in_use; /* non-zero if send buffer contains an
- * unsent or partially-sent packet */
- u_char fifo_space; /* current depth of UART send FIFO */
-
- u_char brl1_state; /* current state of the receive side */
- u_char brl1_last_hdr; /* last header byte received */
-
- char send[BRL1_BUFSZ]; /* send buffer */
-
- int sol; /* "start of line" (see elscuart routines) */
- int cons_listen; /* non-zero if the elscuart interface should
- * also check the system console subchannel */
- brl1_sch_t subch[BRL1_NUM_SUBCHANS];
- /* subchannels provided by link */
-
- sc_cq_t garbage_q; /* a place to put unsolicited packets */
- sc_cq_t oq[BRL1_OQS]; /* elscuart output queues */
-} l1sc_t;
-
-
-/* error codes */
-#define BRL1_VALID 0
-#define BRL1_FULL_Q (-1)
-#define BRL1_CRC (-2)
-#define BRL1_PROTOCOL (-3)
-#define BRL1_NO_MESSAGE (-4)
-#define BRL1_LINK (-5)
-#define BRL1_BUSY (-6)
-
-#define SC_SUCCESS BRL1_VALID
-#define SC_NMSG BRL1_NO_MESSAGE
-#define SC_BUSY BRL1_BUSY
-#define SC_NOPEN (-7)
-#define SC_BADSUBCH (-8)
-#define SC_TIMEDOUT (-9)
-#define SC_NSUBCH (-10)
-
-#endif /* CONFIG_IA64_SGI_SN1 */
-
/* L1 Target Addresses */
/*
* L1 commands and responses use source/target addresses that are
* id (L1 functionality is divided into several independent "tasks"
* that can each receive command requests and transmit responses)
*/
-#ifdef CONFIG_IA64_SGI_SN1
-#define L1_ADDR_TYPE_SHFT 28
-#define L1_ADDR_TYPE_MASK 0xF0000000
-#else
-#define L1_ADDR_TYPE_SHFT 8
-#define L1_ADDR_TYPE_MASK 0xFF00
-#endif /* CONFIG_IA64_SGI_SN1 */
#define L1_ADDR_TYPE_L1 0x00 /* L1 system controller */
#define L1_ADDR_TYPE_L2 0x01 /* L2 system controller */
#define L1_ADDR_TYPE_L3 0x02 /* L3 system controller */
#define L1_ADDR_TYPE_CBRICK 0x03 /* attached C brick */
#define L1_ADDR_TYPE_IOBRICK 0x04 /* attached I/O brick */
-
-#ifdef CONFIG_IA64_SGI_SN1
-#define L1_ADDR_RACK_SHFT 18
-#define L1_ADDR_RACK_MASK 0x0FFC0000
-#define L1_ADDR_RACK_LOCAL 0x3ff /* local brick's rack */
-#else
-#define L1_ADDR_RACK_SHFT 16
-#define L1_ADDR_RACK_MASK 0xFFFF00
-#define L1_ADDR_RACK_LOCAL 0xffff /* local brick's rack */
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-#ifdef CONFIG_IA64_SGI_SN1
-#define L1_ADDR_BAY_SHFT 12
-#define L1_ADDR_BAY_MASK 0x0003F000
-#define L1_ADDR_BAY_LOCAL 0x3f /* local brick's bay */
-#else
-#define L1_ADDR_BAY_SHFT 0
-#define L1_ADDR_BAY_MASK 0xFF
-#define L1_ADDR_BAY_LOCAL 0xff /* local brick's bay */
-#endif /* CONFIG_IA64_SGI_SN1 */
-
#define L1_ADDR_TASK_SHFT 0
#define L1_ADDR_TASK_MASK 0x0000001F
#define L1_ADDR_TASK_INVALID 0x00 /* invalid task */
#define L1_BRICKTYPE_X 0x58 /* X */
#define L1_BRICKTYPE_X2 0x59 /* Y */
#define L1_BRICKTYPE_N 0x4e /* N */
+#define L1_BRICKTYPE_PE 0x25 /* % */
#define L1_BRICKTYPE_PX 0x23 /* # */
+#define L1_BRICKTYPE_IX 0x3d /* = */
/* EEPROM codes (for the "read EEPROM" request) */
/* c brick */
#define bzero(d, n) memset((d), 0, (n))
-#ifdef CONFIG_IA64_SGI_SN1
-
-#define SC_EVENT_CLASS_MASK ((unsigned short)0xff00)
-
-/* public interfaces to L1 system controller */
-
-int sc_open( l1sc_t *sc, uint target );
-int sc_close( l1sc_t *sc, int ch );
-int sc_construct_msg( l1sc_t *sc, int ch,
- char *msg, int msg_len,
- uint addr_task, short req_code,
- int req_nargs, ... );
-int sc_interpret_resp( char *resp, int resp_nargs, ... );
-int sc_send( l1sc_t *sc, int ch, char *msg, int len, int wait );
-int sc_recv( l1sc_t *sc, int ch, char *msg, int *len, uint64_t block );
-int sc_command( l1sc_t *sc, int ch, char *cmd, char *resp, int *len );
-int sc_command_kern( l1sc_t *sc, int ch, char *cmd, char *resp, int *len );
-int sc_poll( l1sc_t *sc, int ch );
-void sc_init( l1sc_t *sc, nasid_t nasid, net_vec_t uart );
-void sc_intr_enable( l1sc_t *sc );
-
-int elsc_rack_bay_get(l1sc_t *e, uint *rack, uint *bay);
-int elsc_rack_bay_type_get(l1sc_t *e, uint *rack,
- uint *bay, uint *brick_type);
-int elsc_cons_subch(l1sc_t *e, uint ch);
-int elsc_cons_node(l1sc_t *e);
-int elsc_display_line(l1sc_t *e, char *line, int lnum);
-
-extern l1sc_t *get_elsc( void );
-#define get_l1sc get_elsc
-#define get_master_l1sc get_l1sc
-
-int iobrick_rack_bay_type_get( l1sc_t *sc, uint *rack,
- uint *bay, uint *brick_type );
-int iobrick_module_get( l1sc_t *sc );
-int iobrick_pci_slot_pwr( l1sc_t *sc, int bus, int slot, int up );
-int iobrick_pci_bus_pwr( l1sc_t *sc, int bus, int up );
-int iobrick_sc_version( l1sc_t *sc, char *result );
-#else
int elsc_display_line(nasid_t nasid, char *line, int lnum);
int iobrick_rack_bay_type_get( nasid_t nasid, uint *rack,
uint *bay, uint *brick_type );
int iobrick_module_get( nasid_t nasid );
-#endif /* CONFIG_IA64_SGI_SN1 */
#endif /* _ASM_SN_KSYS_L1_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_LABELCL_H
#define _ASM_IA64_SN_LABELCL_H
-#include <asm/sn/hcl.h>
-
#define LABELCL_MAGIC 0x4857434c /* 'HWLC' */
#define LABEL_LENGTH_MAX 256 /* Includes NULL char */
#define INFO_DESC_PRIVATE (-1) /* default */
extern labelcl_info_t *labelcl_info_create(void);
extern int labelcl_info_destroy(labelcl_info_t *);
-extern int labelcl_info_add_LBL(struct devfs_entry *, char *, arb_info_desc_t, arbitrary_info_t);
-extern int labelcl_info_remove_LBL(struct devfs_entry *, char *, arb_info_desc_t *, arbitrary_info_t *);
-extern int labelcl_info_replace_LBL(struct devfs_entry *, char *, arb_info_desc_t,
+extern int labelcl_info_add_LBL(vertex_hdl_t, char *, arb_info_desc_t, arbitrary_info_t);
+extern int labelcl_info_remove_LBL(vertex_hdl_t, char *, arb_info_desc_t *, arbitrary_info_t *);
+extern int labelcl_info_replace_LBL(vertex_hdl_t, char *, arb_info_desc_t,
arbitrary_info_t, arb_info_desc_t *, arbitrary_info_t *);
-extern int labelcl_info_get_LBL(struct devfs_entry *, char *, arb_info_desc_t *,
+extern int labelcl_info_get_LBL(vertex_hdl_t, char *, arb_info_desc_t *,
arbitrary_info_t *);
-extern int labelcl_info_get_next_LBL(struct devfs_entry *, char *, arb_info_desc_t *,
+extern int labelcl_info_get_next_LBL(vertex_hdl_t, char *, arb_info_desc_t *,
arbitrary_info_t *, labelcl_info_place_t *);
-extern int labelcl_info_replace_IDX(struct devfs_entry *, int, arbitrary_info_t,
+extern int labelcl_info_replace_IDX(vertex_hdl_t, int, arbitrary_info_t,
arbitrary_info_t *);
-extern int labelcl_info_connectpt_set(struct devfs_entry *, struct devfs_entry *);
-extern int labelcl_info_get_IDX(struct devfs_entry *, int, arbitrary_info_t *);
-extern struct devfs_entry *device_info_connectpt_get(struct devfs_entry *);
+extern int labelcl_info_connectpt_set(vertex_hdl_t, vertex_hdl_t);
+extern int labelcl_info_get_IDX(vertex_hdl_t, int, arbitrary_info_t *);
+extern struct devfs_handle_t device_info_connectpt_get(vertex_hdl_t);
#endif /* _ASM_IA64_SN_LABELCL_H */
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/config.h>
#include <asm/sn/addrs.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/pda.h>
-
-#ifdef CONFIG_IA64_SGI_SN1
-#define LED0 0xc0000b00100000c0LL
-#define LED_CPU_SHIFT 3
-#else
#include <asm/sn/sn2/shub.h>
+
#define LED0 (LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0))
#define LED_CPU_SHIFT 16
-#endif
#define LED_CPU_HEARTBEAT 0x01
#define LED_CPU_ACTIVITY 0x02
-#ifdef LED_WAR
-#define LED_ALWAYS_SET 0x64 /* SN2 hw workaround: always set 0x60 */
-#define LED_MASK_AUTOTEST 0x9e
-#else /* LED_WAR */
#define LED_ALWAYS_SET 0x00
-#define LED_MASK_AUTOTEST 0xfe
-#endif /* LED_WAR */
/*
* Basic macros for flashing the LEDS on an SGI, SN1.
static __inline__ void
set_led_bits(u8 value, u8 mask)
{
-#if 0
- pda.led_state = (pda.led_state & ~mask) | (value & mask);
-#ifdef CONFIG_IA64_SGI_SN1
- *pda.led_address = (long) pda.led_state;
-#else
- *pda.led_address = (short) pda.led_state;
-#endif
-#endif
+ pda->led_state = (pda->led_state & ~mask) | (value & mask);
+ *pda->led_address = (short) pda->led_state;
}
#endif /* _ASM_IA64_SN_LEDS_H */
+++ /dev/null
-/*
- * File: mca.h
- * Purpose: Machine check handling specific to the SN platform defines
- *
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Further, this software is distributed without any warranty that it is
- * free of the rightful claim of any third person regarding infringement
- * or the like. Any license provided herein, whether implied or
- * otherwise, applies only to this software file. Patent licenses, if
- * any, provided herein do not apply to combinations of this program with
- * other software, or any other product whatsoever.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
- * Mountain View, CA 94043, or:
- *
- * http://www.sgi.com
- *
- * For further information regarding this notice, see:
- *
- * http://oss.sgi.com/projects/GenInfo/NoticeExplan
- */
-
-#include <linux/config.h>
-#include <linux/types.h>
-#include <asm/sal.h>
-#include <asm/mca.h>
-
-#ifdef CONFIG_IA64_SGI_SN
-
-typedef u64 __uint64_t;
-
-typedef struct {
- __uint64_t sh_event_occurred;
- __uint64_t sh_first_error;
- __uint64_t sh_event_overflow;
- __uint64_t sh_pi_first_error;
- __uint64_t sh_pi_error_summary;
- __uint64_t sh_pi_error_overflow;
- __uint64_t sh_pi_error_detail_1;
- __uint64_t sh_pi_error_detail_2;
- __uint64_t sh_pi_hw_time_stamp;
- __uint64_t sh_pi_uncorrected_detail_1;
- __uint64_t sh_pi_uncorrected_detail_2;
- __uint64_t sh_pi_uncorrected_detail_3;
- __uint64_t sh_pi_uncorrected_detail_4;
- __uint64_t sh_pi_uncor_time_stamp;
- __uint64_t sh_pi_corrected_detail_1;
- __uint64_t sh_pi_corrected_detail_2;
- __uint64_t sh_pi_corrected_detail_3;
- __uint64_t sh_pi_corrected_detail_4;
- __uint64_t sh_pi_cor_time_stamp;
- __uint64_t sh_mem_error_summary;
- __uint64_t sh_mem_error_overflow;
- __uint64_t sh_misc_err_hdr_lower;
- __uint64_t sh_misc_err_hdr_upper;
- __uint64_t sh_dir_uc_err_hdr_lower;
- __uint64_t sh_dir_uc_err_hdr_upper;
- __uint64_t sh_dir_cor_err_hdr_lower;
- __uint64_t sh_dir_cor_err_hdr_upper;
- __uint64_t sh_mem_error_mask;
- __uint64_t sh_md_uncor_time_stamp;
- __uint64_t sh_md_cor_time_stamp;
- __uint64_t sh_md_hw_time_stamp;
- __uint64_t sh_xn_error_summary;
- __uint64_t sh_xn_first_error;
- __uint64_t sh_xn_error_overflow;
- __uint64_t sh_xniilb_error_summary;
- __uint64_t sh_xniilb_first_error;
- __uint64_t sh_xniilb_error_overflow;
- __uint64_t sh_xniilb_error_detail_1;
- __uint64_t sh_xniilb_error_detail_2;
- __uint64_t sh_xniilb_error_detail_3;
- __uint64_t sh_xnpi_error_summary;
- __uint64_t sh_xnpi_first_error;
- __uint64_t sh_xnpi_error_overflow;
- __uint64_t sh_xnpi_error_detail_1;
- __uint64_t sh_xnmd_error_summary;
- __uint64_t sh_xnmd_first_error;
- __uint64_t sh_xnmd_error_overflow;
- __uint64_t sh_xnmd_ecc_err_report;
- __uint64_t sh_xnmd_error_detail_1;
- __uint64_t sh_lb_error_summary;
- __uint64_t sh_lb_first_error;
- __uint64_t sh_lb_error_overflow;
- __uint64_t sh_lb_error_detail_1;
- __uint64_t sh_lb_error_detail_2;
- __uint64_t sh_lb_error_detail_3;
- __uint64_t sh_lb_error_detail_4;
- __uint64_t sh_lb_error_detail_5;
-} sal_log_shub_state_t;
-
-typedef struct {
-sal_log_section_hdr_t header;
- struct
- {
- __uint64_t err_status : 1,
- guid : 1,
- oem_data : 1,
- reserved : 61;
- } valid;
- __uint64_t err_status;
- efi_guid_t guid;
- __uint64_t shub_nic;
- sal_log_shub_state_t shub_state;
-} sal_log_plat_info_t;
-
-
-extern void sal_log_plat_print(int header_len, int sect_len, u8 *p_data, prfunc_t prfunc);
-
-#ifdef platform_plat_specific_err_print
-#undef platform_plat_specific_err_print
-#endif
-#define platform_plat_specific_err_print sal_log_plat_print
-
-#endif /* CONFIG_IA64_SGI_SN */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*
* Helper file for the SN implementation of mmtimers
*
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_MODULE_H
#define _ASM_IA64_SN_MODULE_H
#define MODULE_FORMAT_BRIEF 1
#define MODULE_FORMAT_LONG 2
-
-#ifdef CONFIG_IA64_SGI_SN2
-
/*
* Module id format
*
#define MODULE_NBRICK 7
#define MODULE_PEBRICK 8
#define MODULE_PXBRICK 9
+#define MODULE_IXBRICK 10
/*
* Moduleid_t comparison macros
((_m2)&(MODULE_RACK_MASK|MODULE_BPOS_MASK)))
#define MODULE_MATCH(_m1, _m2) (MODULE_CMP((_m1),(_m2)) == 0)
-
-#else
-#if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
-
-/*
- * Module id format
- *
- * 15-12 Brick type (enumerated)
- * 11-6 Rack ID (encoded class, group, number)
- * 5-0 Brick position in rack (0-63)
- */
-/*
- * Macros for getting the brick type
- */
-#define MODULE_BTYPE_MASK 0xf000
-#define MODULE_BTYPE_SHFT 12
-#define MODULE_GET_BTYPE(_m) (((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT)
-#define MODULE_BT_TO_CHAR(_b) (brick_types[(_b)])
-#define MODULE_GET_BTCHAR(_m) (MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m)))
-
-/*
- * Macros for getting the rack ID.
- */
-#define MODULE_RACK_MASK 0x0fc0
-#define MODULE_RACK_SHFT 6
-#define MODULE_GET_RACK(_m) (((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT)
-
-/*
- * Macros for getting the brick position
- */
-#define MODULE_BPOS_MASK 0x003f
-#define MODULE_BPOS_SHFT 0
-#define MODULE_GET_BPOS(_m) (((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT)
-
-/*
- * Macros for constructing moduleid_t's
- */
-#define RBT_TO_MODULE(_r, _b, _t) ((_r) << MODULE_RACK_SHFT | \
- (_b) << MODULE_BPOS_SHFT | \
- (_t) << MODULE_BTYPE_SHFT)
-
-/*
- * Macros for encoding and decoding rack IDs
- * A rack number consists of three parts:
- * class 1 bit, 0==CPU/mixed, 1==I/O
- * group 2 bits for CPU/mixed, 3 bits for I/O
- * number 3 bits for CPU/mixed, 2 bits for I/O (1 based)
- */
-#define RACK_GROUP_BITS(_r) (RACK_GET_CLASS(_r) ? 3 : 2)
-#define RACK_NUM_BITS(_r) (RACK_GET_CLASS(_r) ? 2 : 3)
-
-#define RACK_CLASS_MASK(_r) 0x20
-#define RACK_CLASS_SHFT(_r) 5
-#define RACK_GET_CLASS(_r) \
- (((_r) & RACK_CLASS_MASK(_r)) >> RACK_CLASS_SHFT(_r))
-#define RACK_ADD_CLASS(_r, _c) \
- ((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r))
-
-#define RACK_GROUP_SHFT(_r) RACK_NUM_BITS(_r)
-#define RACK_GROUP_MASK(_r) \
- ( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) )
-#define RACK_GET_GROUP(_r) \
- (((_r) & RACK_GROUP_MASK(_r)) >> RACK_GROUP_SHFT(_r))
-#define RACK_ADD_GROUP(_r, _g) \
- ((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r))
-
-#define RACK_NUM_SHFT(_r) 0
-#define RACK_NUM_MASK(_r) \
- ( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) )
-#define RACK_GET_NUM(_r) \
- ( (((_r) & RACK_NUM_MASK(_r)) >> RACK_NUM_SHFT(_r)) + 1 )
-#define RACK_ADD_NUM(_r, _n) \
- ((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r))
-
-/*
- * Brick type definitions
- */
-#define MAX_BRICK_TYPES 16 /* 1 << (MODULE_RACK_SHFT - MODULE_BTYPE_SHFT */
-
-extern char brick_types[];
-
-#define MODULE_CBRICK 0
-#define MODULE_RBRICK 1
-#define MODULE_IBRICK 2
-#define MODULE_KBRICK 3
-#define MODULE_XBRICK 4
-#define MODULE_DBRICK 5
-#define MODULE_PBRICK 6
-#define MODULE_NBRICK 7
-#define MODULE_PEBRICK 8
-#define MODULE_PXBRICK 9
-
-/*
- * Moduleid_t comparison macros
- */
-/* Don't compare the brick type: only the position is significant */
-#define MODULE_CMP(_m1, _m2) (((_m1)&(MODULE_RACK_MASK|MODULE_BPOS_MASK)) -\
- ((_m2)&(MODULE_RACK_MASK|MODULE_BPOS_MASK)))
-#define MODULE_MATCH(_m1, _m2) (MODULE_CMP((_m1),(_m2)) == 0)
-
-#else
-
-/*
- * Some code that uses this macro will not be conditionally compiled.
- */
-#define MODULE_GET_BTCHAR(_m) ('?')
-#define MODULE_CMP(_m1, _m2) ((_m1) - (_m2))
-#define MODULE_MATCH(_m1, _m2) (MODULE_CMP((_m1),(_m2)) == 0)
-
-#endif /* SN1 */
-#endif /* SN2 */
-
typedef struct module_s module_t;
struct module_s {
/* List of nodes in this module */
cnodeid_t nodes[MODULE_MAX_NODES];
-#ifdef CONFIG_IA64_SGI_SN2
geoid_t geoid[MODULE_MAX_NODES];
struct {
char moduleid[8];
} io[MODULE_MAX_NODES];
-#endif
int nodecnt; /* Number of nodes in array */
-
/* Fields for Module System Controller */
int mesgpend; /* Message pending */
int shutdown; /* Shutdown in progress */
struct semaphore thdcnt; /* Threads finished counter */
-
-#ifdef CONFIG_IA64_SGI_SN1
- elsc_t elsc;
- spinlock_t elsclock;
-#endif
time_t intrhist[MODULE_HIST_CNT];
int histptr;
extern module_t *module_lookup(moduleid_t id);
-#if defined(CONFIG_IA64_SGI_SN1)
-extern elsc_t *get_elsc(void);
-#endif
-
extern int get_kmod_info(cmoduleid_t cmod,
module_info_t *mod_info);
extern int get_kmod_sys_snum(cmoduleid_t cmod,
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_NIC_H
-#define _ASM_IA64_SN_NIC_H
-
-#include <asm/types.h>
-#include <asm/sn/types.h>
-#include <linux/devfs_fs_kernel.h>
-
-#define MCR_DATA(x) ((int) ((x) & 1))
-#define MCR_DONE(x) ((x) & 2)
-#define MCR_PACK(pulse, sample) ((pulse) << 10 | (sample) << 2)
-
-typedef __psunsigned_t nic_data_t;
-
-typedef int
-nic_access_f(nic_data_t data,
- int pulse, int sample, int delay);
-
-typedef nic_access_f *nic_access_t;
-
-typedef struct nic_vmce_s *nic_vmce_t;
-typedef void nic_vmc_func(devfs_handle_t v);
-
-/*
- * PRIVATE data for Dallas NIC
- */
-
-typedef struct nic_state_t {
- nic_access_t access;
- nic_data_t data;
- int last_disc;
- int done;
- int bit_index;
- int disc_marker;
- uchar_t bits[64];
-} nic_state_t;
-
-/*
- * Public interface for Dallas NIC
- *
- *
- * Access Routine
- *
- * nic_setup requires an access routine that pulses the NIC line for a
- * specified duration, samples the NIC line after a specified duration,
- * then delays for a third specified duration (for precharge).
- *
- * This general scheme allows us to access NICs through any medium
- * (e.g. hub regs, bridge regs, vector writes, system ctlr commands).
- *
- * The access routine should return the sample value 0 or 1, or if an
- * error occurs, return a negative error code. Negative error codes from
- * the access routine will abort the NIC operation and be propagated
- * through out of the top-level NIC call.
- */
-
-#define NIC_OK 0
-#define NIC_DONE 1
-#define NIC_FAIL 2
-#define NIC_BAD_CRC 3
-#define NIC_NOT_PRESENT 4
-#define NIC_REDIR_LOOP 5
-#define NIC_PARAM 6
-#define NIC_NOMEM 7
-
-uint64_t nic_get_phase_bits(void);
-
-extern int nic_setup(nic_state_t *ns,
- nic_access_t access,
- nic_data_t data);
-
-extern int nic_next(nic_state_t *ns,
- char *serial,
- char *family,
- char *crc);
-
-extern int nic_read_one_page(nic_state_t *ns,
- char *family,
- char *serial,
- char *crc,
- int start,
- uchar_t *redirect,
- uchar_t *byte);
-
-extern int nic_read_mfg(nic_state_t *ns,
- char *family,
- char *serial,
- char *crc,
- uchar_t *pageA,
- uchar_t *pageB);
-
-extern int nic_info_get(nic_access_t access,
- nic_data_t data,
- char *info);
-
-extern int nic_item_info_get(char *buf, char *item, char **item_info);
-
-nic_access_f nic_access_mcr32;
-
-extern char *nic_vertex_info_get(devfs_handle_t v);
-
-extern char *nic_vertex_info_set(nic_access_t access,
- nic_data_t data,
- devfs_handle_t v);
-
-extern int nic_vertex_info_match(devfs_handle_t vertex,
- char *name);
-
-extern char *nic_bridge_vertex_info(devfs_handle_t vertex,
- nic_data_t data);
-extern char *nic_hq4_vertex_info(devfs_handle_t vertex,
- nic_data_t data);
-extern char *nic_ioc3_vertex_info(devfs_handle_t vertex,
- nic_data_t data,
- int32_t *gpcr_s);
-
-extern char *nic_hub_vertex_info(devfs_handle_t vertex);
-
-extern nic_vmce_t nic_vmc_add(char *, nic_vmc_func *);
-extern void nic_vmc_del(nic_vmce_t);
-
-#endif /* _ASM_IA64_SN_NIC_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_NODEPDA_H
#define _ASM_IA64_SN_NODEPDA_H
#include <linux/config.h>
+#include <asm/sn/sgi.h>
#include <asm/irq.h>
#include <asm/sn/intr.h>
#include <asm/sn/router.h>
-#if defined(CONFIG_IA64_SGI_SN1)
-#include <asm/sn/sn1/synergy.h>
-#endif
#include <asm/sn/pda.h>
#include <asm/sn/module.h>
#include <asm/sn/bte.h>
-#if defined(CONFIG_IA64_SGI_SN1)
-#include <asm/sn/sn1/hubstat.h>
-#endif
-
/*
* NUMA Node-Specific Data structures are defined in this file.
* In particular, this is the location of the node PDA.
*/
/*
- * Subnode PDA structures. Each node needs a few data structures that
- * correspond to the PIs on the HUB chip that supports the node.
- */
-#if defined(CONFIG_IA64_SGI_SN1)
-struct subnodepda_s {
- intr_vecblk_t intr_dispatch0;
- intr_vecblk_t intr_dispatch1;
-};
-
-typedef struct subnodepda_s subnode_pda_t;
-
-
-struct synergy_perf_s;
-#endif
-
-
-/*
* Node-specific data structure.
*
* One of these structures is allocated on each node of a NUMA system.
/* the second cpu on a node is */
/* node_first_cpu+1. */
- devfs_handle_t xbow_vhdl;
+ vertex_hdl_t xbow_vhdl;
nasid_t xbow_peer; /* NASID of our peer hub on xbow */
struct semaphore xbow_sema; /* Sema for xbow synchronization */
slotid_t slotdesc;
-#ifdef CONFIG_IA64_SGI_SN2
geoid_t geoid;
-#else
- moduleid_t module_id; /* Module ID (redundant local copy) */
-#endif
module_t *module; /* Pointer to containing module */
xwidgetnum_t basew_id;
- devfs_handle_t basew_xc;
+ vertex_hdl_t basew_xc;
int hubticks;
int num_routers; /* XXX not setup! Total routers in the system */
char *hwg_node_name; /* hwgraph node name */
- devfs_handle_t node_vertex; /* Hwgraph vertex for this node */
+ vertex_hdl_t node_vertex; /* Hwgraph vertex for this node */
void *pdinfo; /* Platform-dependent per-node info */
/*
* The BTEs on this node are shared by the local cpus
*/
- bteinfo_t bte_if[BTES_PER_NODE]; /* Virtual Interface */
- char bte_cleanup[5 * L1_CACHE_BYTES] ____cacheline_aligned;
-
-#if defined(CONFIG_IA64_SGI_SN1)
- subnode_pda_t snpda[NUM_SUBNODES];
- /*
- * New extended memory reference counters
- */
- void *migr_refcnt_counterbase;
- void *migr_refcnt_counterbuffer;
- size_t migr_refcnt_cbsize;
- int migr_refcnt_numsets;
- hubstat_t hubstats;
- int synergy_perf_enabled;
- int synergy_perf_freq;
- spinlock_t synergy_perf_lock;
- uint64_t synergy_inactive_intervals;
- uint64_t synergy_active_intervals;
- struct synergy_perf_s *synergy_perf_data;
- struct synergy_perf_s *synergy_perf_first; /* reporting consistency .. */
-#endif /* CONFIG_IA64_SGI_SN1 */
+ struct bteinfo_s bte_if[BTES_PER_NODE]; /* Virtual Interface */
+ struct timer_list bte_recovery_timer;
+ spinlock_t bte_recovery_lock;
/*
* Array of pointers to the nodepdas for each node.
typedef struct nodepda_s nodepda_t;
-#ifdef CONFIG_IA64_SGI_SN2
-#define NR_IVECS 256
struct irqpda_s {
int num_irq_used;
- char irq_flags[NR_IVECS];
+ char irq_flags[NR_IRQS];
+ struct pci_dev *device_dev[NR_IRQS];
+ char share_count[NR_IRQS];
+ struct pci_dev *current;
};
typedef struct irqpda_s irqpda_t;
-#endif /* CONFIG_IA64_SGI_SN2 */
-
-
/*
* Access Functions for node PDA.
#define nodepda pda->p_nodepda /* Ptr to this node's PDA */
#define NODEPDA(cnode) (nodepda->pernode_pdaindr[cnode])
-#if defined(CONFIG_IA64_SGI_SN1)
-#define subnodepda pda.p_subnodepda /* Ptr to this node's subnode PDA */
-#define SUBNODEPDA(cnode,sn) (&(NODEPDA(cnode)->snpda[sn]))
-#define SNPDA(npda,sn) (&(npda)->snpda[sn])
-#endif
-
/*
* Macros to access data structures inside nodepda
*/
-#ifdef CONFIG_IA64_SGI_SN2
#define NODE_MODULEID(cnode) geo_module((NODEPDA(cnode)->geoid))
-#else
-#define NODE_MODULEID(cnode) (NODEPDA(cnode)->module_id)
-#endif
#define NODE_SLOTID(cnode) (NODEPDA(cnode)->slotdesc)
* Check if given a compact node id the corresponding node has all the
* cpus disabled.
*/
-#define is_headless_node(cnode) 0 /*((cnode == CNODEID_NONE) || \
- (node_data(cnode)->active_cpu_count == 0)) */
+#define is_headless_node(cnode) ((cnode == CNODEID_NONE) || \
+ (node_data(cnode)->active_cpu_count == 0))
/*
* Check if given a node vertex handle the corresponding node has all the
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_PCI_BRIDGE_H
#define _ASM_SN_PCI_BRIDGE_H
#include <linux/config.h>
#include <asm/sn/xtalk/xwidget.h>
-#ifndef CONFIG_IA64_SGI_SN1
#include <asm/sn/pci/pic.h>
extern int io_get_sh_swapper(nasid_t);
#define BRIDGE_REG_SET32(reg) \
*(volatile uint32_t *) (((uint64_t)reg)^4)
-#endif /* CONFIG_IA64_SGI_SN1 */
/* I/O page size */
* Generated from Bridge spec dated 04oct95
*/
-#ifndef CONFIG_IA64_SGI_SN1
/*
* pic_widget_cfg_s is a local definition of widget_cfg_t but with
} b_external_flash;
} bridge_t;
-#else /* CONFIG_IA64_SGI_SN1 */
-
-
-typedef volatile struct bridge_s {
-
- /* Local Registers 0x000000-0x00FFFF */
-
- /* standard widget configuration 0x000000-0x000057 */
- widget_cfg_t b_widget; /* 0x000000 */
-
- /* helper fieldnames for accessing bridge widget */
-
-#define b_wid_id b_widget.w_id
-#define b_wid_stat b_widget.w_status
-#define b_wid_err_upper b_widget.w_err_upper_addr
-#define b_wid_err_lower b_widget.w_err_lower_addr
-#define b_wid_control b_widget.w_control
-#define b_wid_req_timeout b_widget.w_req_timeout
-#define b_wid_int_upper b_widget.w_intdest_upper_addr
-#define b_wid_int_lower b_widget.w_intdest_lower_addr
-#define b_wid_err_cmdword b_widget.w_err_cmd_word
-#define b_wid_llp b_widget.w_llp_cfg
-#define b_wid_tflush b_widget.w_tflush
-
- /*
- * we access these through synergy unswizzled space, so the address
- * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
- * That's why we put the register first and filler second.
- */
- /* bridge-specific widget configuration 0x000058-0x00007F */
- bridgereg_t b_wid_aux_err; /* 0x00005C */
- bridgereg_t _pad_000058;
-
- bridgereg_t b_wid_resp_upper; /* 0x000064 */
- bridgereg_t _pad_000060;
-
- bridgereg_t b_wid_resp_lower; /* 0x00006C */
- bridgereg_t _pad_000068;
-
- bridgereg_t b_wid_tst_pin_ctrl; /* 0x000074 */
- bridgereg_t _pad_000070;
-
- bridgereg_t _pad_000078[2];
-
- /* PMU & Map 0x000080-0x00008F */
- bridgereg_t b_dir_map; /* 0x000084 */
- bridgereg_t _pad_000080;
- bridgereg_t _pad_000088[2];
-
- /* SSRAM 0x000090-0x00009F */
- bridgereg_t b_ram_perr_or_map_fault;/* 0x000094 */
- bridgereg_t _pad_000090;
-#define b_ram_perr b_ram_perr_or_map_fault /* Bridge */
-#define b_map_fault b_ram_perr_or_map_fault /* Xbridge */
- bridgereg_t _pad_000098[2];
-
- /* Arbitration 0x0000A0-0x0000AF */
- bridgereg_t b_arb; /* 0x0000A4 */
- bridgereg_t _pad_0000A0;
- bridgereg_t _pad_0000A8[2];
-
- /* Number In A Can 0x0000B0-0x0000BF */
- bridgereg_t b_nic; /* 0x0000B4 */
- bridgereg_t _pad_0000B0;
- bridgereg_t _pad_0000B8[2];
-
- /* PCI/GIO 0x0000C0-0x0000FF */
- bridgereg_t b_bus_timeout; /* 0x0000C4 */
- bridgereg_t _pad_0000C0;
-#define b_pci_bus_timeout b_bus_timeout
-
- bridgereg_t b_pci_cfg; /* 0x0000CC */
- bridgereg_t _pad_0000C8;
-
- bridgereg_t b_pci_err_upper; /* 0x0000D4 */
- bridgereg_t _pad_0000D0;
-
- bridgereg_t b_pci_err_lower; /* 0x0000DC */
- bridgereg_t _pad_0000D8;
- bridgereg_t _pad_0000E0[8];
-#define b_gio_err_lower b_pci_err_lower
-#define b_gio_err_upper b_pci_err_upper
-
- /* Interrupt 0x000100-0x0001FF */
- bridgereg_t b_int_status; /* 0x000104 */
- bridgereg_t _pad_000100;
-
- bridgereg_t b_int_enable; /* 0x00010C */
- bridgereg_t _pad_000108;
-
- bridgereg_t b_int_rst_stat; /* 0x000114 */
- bridgereg_t _pad_000110;
-
- bridgereg_t b_int_mode; /* 0x00011C */
- bridgereg_t _pad_000118;
-
- bridgereg_t b_int_device; /* 0x000124 */
- bridgereg_t _pad_000120;
-
- bridgereg_t b_int_host_err; /* 0x00012C */
- bridgereg_t _pad_000128;
-
- struct {
- bridgereg_t addr; /* 0x0001{34,,,6C} */
- bridgereg_t __pad; /* 0x0001{30,,,68} */
- } b_int_addr[8]; /* 0x000130 */
-
- bridgereg_t b_err_int_view; /* 0x000174 */
- bridgereg_t _pad_000170;
-
- bridgereg_t b_mult_int; /* 0x00017c */
- bridgereg_t _pad_000178;
-
- struct {
- bridgereg_t intr; /* 0x0001{84,,,BC} */
- bridgereg_t __pad; /* 0x0001{80,,,B8} */
- } b_force_always[8]; /* 0x000180 */
-
- struct {
- bridgereg_t intr; /* 0x0001{C4,,,FC} */
- bridgereg_t __pad; /* 0x0001{C0,,,F8} */
- } b_force_pin[8]; /* 0x0001C0 */
-
- /* Device 0x000200-0x0003FF */
- struct {
- bridgereg_t reg; /* 0x0002{04,,,3C} */
- bridgereg_t __pad; /* 0x0002{00,,,38} */
- } b_device[8]; /* 0x000200 */
-
- struct {
- bridgereg_t reg; /* 0x0002{44,,,7C} */
- bridgereg_t __pad; /* 0x0002{40,,,78} */
- } b_wr_req_buf[8]; /* 0x000240 */
-
- struct {
- bridgereg_t reg; /* 0x0002{84,,,8C} */
- bridgereg_t __pad; /* 0x0002{80,,,88} */
- } b_rrb_map[2]; /* 0x000280 */
-#define b_even_resp b_rrb_map[0].reg /* 0x000284 */
-#define b_odd_resp b_rrb_map[1].reg /* 0x00028C */
-
- bridgereg_t b_resp_status; /* 0x000294 */
- bridgereg_t _pad_000290;
-
- bridgereg_t b_resp_clear; /* 0x00029C */
- bridgereg_t _pad_000298;
-
- bridgereg_t _pad_0002A0[24];
-
- /* Xbridge only */
- struct {
- bridgereg_t upper; /* 0x0003{04,,,F4} */
- bridgereg_t __pad1; /* 0x0003{00,,,F0} */
- bridgereg_t lower; /* 0x0003{0C,,,FC} */
- bridgereg_t __pad2; /* 0x0003{08,,,F8} */
- } b_buf_addr_match[16];
-
- /* Performance Monitor Registers (even only) */
- struct {
- bridgereg_t flush_w_touch; /* 0x000404,,,5C4 */
- bridgereg_t __pad1; /* 0x000400,,,5C0 */
-
- bridgereg_t flush_wo_touch; /* 0x00040C,,,5CC */
- bridgereg_t __pad2; /* 0x000408,,,5C8 */
-
- bridgereg_t inflight; /* 0x000414,,,5D4 */
- bridgereg_t __pad3; /* 0x000410,,,5D0 */
-
- bridgereg_t prefetch; /* 0x00041C,,,5DC */
- bridgereg_t __pad4; /* 0x000418,,,5D8 */
-
- bridgereg_t total_pci_retry; /* 0x000424,,,5E4 */
- bridgereg_t __pad5; /* 0x000420,,,5E0 */
-
- bridgereg_t max_pci_retry; /* 0x00042C,,,5EC */
- bridgereg_t __pad6; /* 0x000428,,,5E8 */
-
- bridgereg_t max_latency; /* 0x000434,,,5F4 */
- bridgereg_t __pad7; /* 0x000430,,,5F0 */
-
- bridgereg_t clear_all; /* 0x00043C,,,5FC */
- bridgereg_t __pad8; /* 0x000438,,,5F8 */
- } b_buf_count[8];
-
- char _pad_000600[0x010000 - 0x000600];
-
- /*
- * The Xbridge has 1024 internal ATE's and the Bridge has 128.
- * Make enough room for the Xbridge ATE's and depend on runtime
- * checks to limit access to bridge ATE's.
- */
-
- /* Internal Address Translation Entry RAM 0x010000-0x011fff */
- union {
- bridge_ate_t wr; /* write-only */
- struct {
- bridgereg_t rd; /* read-only */
- bridgereg_t _p_pad;
- } hi;
- } b_int_ate_ram[XBRIDGE_INTERNAL_ATES];
-
-#define b_int_ate_ram_lo(idx) b_int_ate_ram[idx+512].hi.rd
-
- /* the xbridge read path for internal ates starts at 0x12000.
- * I don't believe we ever try to read the ates.
- */
- /* Internal Address Translation Entry RAM LOW 0x012000-0x013fff */
- struct {
- bridgereg_t rd;
- bridgereg_t _p_pad;
- } xb_int_ate_ram_lo[XBRIDGE_INTERNAL_ATES];
-
- char _pad_014000[0x20000 - 0x014000];
-
- /* PCI Device Configuration Spaces 0x020000-0x027FFF */
- union { /* make all access sizes available. */
- uchar_t c[0x1000 / 1];
- uint16_t s[0x1000 / 2];
- uint32_t l[0x1000 / 4];
- uint64_t d[0x1000 / 8];
- union {
- uchar_t c[0x100 / 1];
- uint16_t s[0x100 / 2];
- uint32_t l[0x100 / 4];
- uint64_t d[0x100 / 8];
- } f[8];
- } b_type0_cfg_dev[8]; /* 0x020000 */
-
- /* PCI Type 1 Configuration Space 0x028000-0x028FFF */
- union { /* make all access sizes available. */
- uchar_t c[0x1000 / 1];
- uint16_t s[0x1000 / 2];
- uint32_t l[0x1000 / 4];
- uint64_t d[0x1000 / 8];
- union {
- uchar_t c[0x100 / 1];
- uint16_t s[0x100 / 2];
- uint32_t l[0x100 / 4];
- uint64_t d[0x100 / 8];
- } f[8];
- } b_type1_cfg; /* 0x028000-0x029000 */
-
- char _pad_029000[0x007000]; /* 0x029000-0x030000 */
-
- /* PCI Interrupt Acknowledge Cycle 0x030000 */
- union {
- uchar_t c[8 / 1];
- uint16_t s[8 / 2];
- uint32_t l[8 / 4];
- uint64_t d[8 / 8];
- } b_pci_iack; /* 0x030000 */
-
- uchar_t _pad_030007[0x04fff8]; /* 0x030008-0x07FFFF */
-
- /* External Address Translation Entry RAM 0x080000-0x0FFFFF */
- bridge_ate_t b_ext_ate_ram[0x10000];
-
- /* Reserved 0x100000-0x1FFFFF */
- char _pad_100000[0x200000-0x100000];
-
- /* PCI/GIO Device Spaces 0x200000-0xBFFFFF */
- union { /* make all access sizes available. */
- uchar_t c[0x100000 / 1];
- uint16_t s[0x100000 / 2];
- uint32_t l[0x100000 / 4];
- uint64_t d[0x100000 / 8];
- } b_devio_raw[10]; /* 0x200000 */
-
- /* b_devio macro is a bit strange; it reflects the
- * fact that the Bridge ASIC provides 2M for the
- * first two DevIO windows and 1M for the other six.
- */
-#define b_devio(n) b_devio_raw[((n)<2)?(n*2):(n+2)]
-
- /* External Flash Proms 1,0 0xC00000-0xFFFFFF */
- union { /* make all access sizes available. */
- uchar_t c[0x400000 / 1]; /* read-only */
- uint16_t s[0x400000 / 2]; /* read-write */
- uint32_t l[0x400000 / 4]; /* read-only */
- uint64_t d[0x400000 / 8]; /* read-only */
- } b_external_flash; /* 0xC00000 */
-} bridge_t;
-
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-
#define berr_field berr_un.berr_st
#endif /* __ASSEMBLY__ */
#define BRIDGE_ISR_ERRORS \
(BRIDGE_ISR_LINK_ERROR|BRIDGE_ISR_PCIBUS_ERROR| \
BRIDGE_ISR_XTALK_ERROR|BRIDGE_ISR_SSRAM_PERR| \
- BRIDGE_ISR_PMU_ESIZE_FAULT|PIC_ISR_PCIX_ARB_ERR| \
- PIC_ISR_INT_RAM_PERR)
+ BRIDGE_ISR_PMU_ESIZE_FAULT|PIC_ISR_INT_RAM_PERR)
/*
* List of Errors which are fatal and kill the sytem
#define BRIDGE_TMO_PCI_RETRY_CNT_MAX 0x3ff
-#ifdef SN0
-/*
- * The NASID should be shifted by this amount and stored into the
- * interrupt(x) register.
- */
-#define BRIDGE_INT_ADDR_NASID_SHFT 8
-
-/*
- * The BRIDGE_INT_ADDR_DEST_IO bit should be set to send an interrupt to
- * memory.
- */
-#define BRIDGE_INT_ADDR_DEST_IO (1 << 17)
-#define BRIDGE_INT_ADDR_DEST_MEM 0
-#define BRIDGE_INT_ADDR_MASK (1 << 17)
-#endif
-
/* Bridge device(x) register bits definition */
#define BRIDGE_DEV_ERR_LOCK_EN (1ull << 28)
#define BRIDGE_DEV_PAGE_CHK_DIS (1ull << 27)
#define BRIDGE_PCI_IO_LIMIT BRIDGE_PCIIO_XTALK_ALIAS_LIMIT
/*
+ * Macros for Xtalk to Bridge bus (PCI) PIO
+ * refer to section 5.2.1 Figure 4 of the "PCI Interface Chip (PIC) Volume II
+ * Programmer's Reference" (Revision 0.8 as of this writing).
+ *
+ * These are PIC bridge specific. A separate set of macros was defined
+ * because PIC deviates from Bridge/Xbridge by not supporting a big-window
+ * alias for PCI I/O space, and also redefines XTALK addresses
+ * 0x0000C0000000L and 0x000100000000L to be PCI MEM aliases for the second
+ * bus.
+ */
+
+/* XTALK addresses that map into PIC Bridge Bus addr space */
+#define PICBRIDGE0_PIO32_XTALK_ALIAS_BASE 0x000040000000L
+#define PICBRIDGE0_PIO32_XTALK_ALIAS_LIMIT 0x00007FFFFFFFL
+#define PICBRIDGE0_PIO64_XTALK_ALIAS_BASE 0x000080000000L
+#define PICBRIDGE0_PIO64_XTALK_ALIAS_LIMIT 0x0000BFFFFFFFL
+#define PICBRIDGE1_PIO32_XTALK_ALIAS_BASE 0x0000C0000000L
+#define PICBRIDGE1_PIO32_XTALK_ALIAS_LIMIT 0x0000FFFFFFFFL
+#define PICBRIDGE1_PIO64_XTALK_ALIAS_BASE 0x000100000000L
+#define PICBRIDGE1_PIO64_XTALK_ALIAS_LIMIT 0x00013FFFFFFFL
+
+/* XTALK addresses that map into PCI addresses */
+#define PICBRIDGE0_PCI_MEM32_BASE PICBRIDGE0_PIO32_XTALK_ALIAS_BASE
+#define PICBRIDGE0_PCI_MEM32_LIMIT PICBRIDGE0_PIO32_XTALK_ALIAS_LIMIT
+#define PICBRIDGE0_PCI_MEM64_BASE PICBRIDGE0_PIO64_XTALK_ALIAS_BASE
+#define PICBRIDGE0_PCI_MEM64_LIMIT PICBRIDGE0_PIO64_XTALK_ALIAS_LIMIT
+#define PICBRIDGE1_PCI_MEM32_BASE PICBRIDGE1_PIO32_XTALK_ALIAS_BASE
+#define PICBRIDGE1_PCI_MEM32_LIMIT PICBRIDGE1_PIO32_XTALK_ALIAS_LIMIT
+#define PICBRIDGE1_PCI_MEM64_BASE PICBRIDGE1_PIO64_XTALK_ALIAS_BASE
+#define PICBRIDGE1_PCI_MEM64_LIMIT PICBRIDGE1_PIO64_XTALK_ALIAS_LIMIT
+
+/*
* Macros for Bridge bus (PCI/GIO) to Xtalk DMA
*/
/* Bridge Bus DMA addresses */
#define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT))
#define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT))
-#define is_xbridge(bridge) IS_XBRIDGE(bridge->b_wid_id)
-#define is_pic(bridge) IS_PIC_BRIDGE(bridge->b_wid_id)
-
/* extern declarations */
#ifndef __ASSEMBLY__
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_PCI_CVLINK_H
#define _ASM_SN_PCI_CVLINK_H
#include <asm/sn/types.h>
-#include <asm/sn/hack.h>
#include <asm/sn/sgi.h>
#include <asm/sn/driver.h>
#include <asm/sn/iograph.h>
(((struct sn_widget_sysdata *)((pci_bus)->sysdata))->vhdl)
struct sn_widget_sysdata {
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
};
struct sn_device_sysdata {
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
int isa64;
int isPIC;
volatile unsigned int *dma_buf_sync;
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_PCI_PCI_DEFS_H
#define _ASM_SN_PCI_PCI_DEFS_H
#ifndef __ASSEMBLY__
+#ifdef LITTLE_ENDIAN
+
+/*
+ * PCI config space definition
+ */
+typedef volatile struct pci_cfg_s {
+ uint16_t vendor_id;
+ uint16_t dev_id;
+ uint16_t cmd;
+ uint16_t status;
+ uchar_t rev;
+ uchar_t prog_if;
+ uchar_t sub_class;
+ uchar_t class;
+ uchar_t line_size;
+ uchar_t lt;
+ uchar_t hdr_type;
+ uchar_t bist;
+ uint32_t bar[6];
+ uint32_t cardbus;
+ uint16_t subsys_vendor_id;
+ uint16_t subsys_dev_id;
+ uint32_t exp_rom;
+ uint32_t res[2];
+ uchar_t int_line;
+ uchar_t int_pin;
+ uchar_t min_gnt;
+ uchar_t max_lat;
+} pci_cfg_t;
+
+/*
+ * PCI Type 1 config space definition for PCI to PCI Bridges (PPBs)
+ */
+typedef volatile struct pci_cfg1_s {
+ uint16_t vendor_id;
+ uint16_t dev_id;
+ uint16_t cmd;
+ uint16_t status;
+ uchar_t rev;
+ uchar_t prog_if;
+ uchar_t sub_class;
+ uchar_t class;
+ uchar_t line_size;
+ uchar_t lt;
+ uchar_t hdr_type;
+ uchar_t bist;
+ uint32_t bar[2];
+ uchar_t pri_bus_num;
+ uchar_t snd_bus_num;
+ uchar_t sub_bus_num;
+ uchar_t slt;
+ uchar_t io_base;
+ uchar_t io_limit;
+ uint16_t snd_status;
+ uint16_t mem_base;
+ uint16_t mem_limit;
+ uint16_t pmem_base;
+ uint16_t pmem_limit;
+ uint32_t pmem_base_upper;
+ uint32_t pmem_limit_upper;
+ uint16_t io_base_upper;
+ uint16_t io_limit_upper;
+ uint32_t res;
+ uint32_t exp_rom;
+ uchar_t int_line;
+ uchar_t int_pin;
+ uint16_t ppb_control;
+
+} pci_cfg1_t;
+
+/*
+ * PCI-X Capability
+ */
+typedef volatile struct cap_pcix_cmd_reg_s {
+ uint16_t data_parity_enable: 1,
+ enable_relaxed_order: 1,
+ max_mem_read_cnt: 2,
+ max_split: 3,
+ reserved1: 9;
+} cap_pcix_cmd_reg_t;
+
+typedef volatile struct cap_pcix_stat_reg_s {
+ uint32_t func_num: 3,
+ dev_num: 5,
+ bus_num: 8,
+ bit64_device: 1,
+ mhz133_capable: 1,
+ split_complt_discard: 1,
+ unexpect_split_complt: 1,
+ device_complex: 1,
+ max_mem_read_cnt: 2,
+ max_out_split: 3,
+ max_cum_read: 3,
+ split_complt_err: 1,
+ reserved1: 2;
+} cap_pcix_stat_reg_t;
+
+typedef volatile struct cap_pcix_type0_s {
+ uchar_t pcix_cap_id;
+ uchar_t pcix_cap_nxt;
+ cap_pcix_cmd_reg_t pcix_type0_command;
+ cap_pcix_stat_reg_t pcix_type0_status;
+} cap_pcix_type0_t;
+
+#else
+
/*
* PCI config space definition
*/
uchar_t int_line;
} pci_cfg1_t;
+
+
/*
* PCI-X Capability
*/
cap_pcix_stat_reg_t pcix_type0_status;
} cap_pcix_type0_t;
+#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SN_PCI_PCI_DEFS_H */
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of
- * this archive for more details.
- *
- * Copyright (C) 1997, 2001 Silicon Graphics, Inc. All rights reserved.
- *
- */
-
-#ifndef _ASM_SN_PCI_PCIBA_H
-#define _ASM_SN_PCI_PCIBA_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-
-/* for application compatibility with IRIX (why do I bother?) */
-
-#ifndef __KERNEL__
-typedef u_int8_t uint8_t;
-typedef u_int16_t uint16_t;
-typedef u_int32_t uint32_t;
-#endif
-
-#define PCI_CFG_VENDOR_ID PCI_VENDOR_ID
-#define PCI_CFG_COMMAND PCI_COMMAND
-#define PCI_CFG_REV_ID PCI_REVISION_ID
-#define PCI_CFG_HEADER_TYPE PCI_HEADER_TYPE
-#define PCI_CFG_BASE_ADDR(n) PCI_BASE_ADDRESS_##n
-
-
-/* /hw/.../pci/[slot]/config accepts ioctls to read
- * and write specific registers as follows:
- *
- * "t" is the native type (char, short, uint32, uint64)
- * to read from CFG space; results will be arranged in
- * byte significance (ie. first byte from PCI is lowest
- * or last byte in result).
- *
- * "r" is the byte offset in PCI CFG space of the first
- * byte of the register (it's least significant byte,
- * in the little-endian PCI numbering). This can actually
- * be as much as 16 bits wide, and is intended to match
- * the layout of a "Type 1 Configuration Space" address:
- * the register number in the low eight bits, then three
- * bits for the function number and five bits for the
- * slot number.
- */
-#define PCIIOCCFGRD(t,r) _IOR(0,(r),t)
-#define PCIIOCCFGWR(t,r) _IOW(0,(r),t)
-
-/* Some common config register access commands.
- * Use these as examples of how to construct
- * values for other registers you want to access.
- */
-
-/* PCIIOCGETID: arg is ptr to 32-bit int,
- * returns the 32-bit ID value with VENDOR
- * in the bottom 16 bits and DEVICE in the top.
- */
-#define PCIIOCGETID PCIIOCCFGRD(uint32_t,PCI_CFG_VENDOR_ID)
-
-/* PCIIOCSETCMD: arg is ptr to a 16-bit short,
- * which will be written to the CMD register.
- */
-#define PCIIOCSETCMD PCIIOCCFGWR(uint16_t,PCI_CFG_COMMAND)
-
-/* PCIIOCGETREV: arg is ptr to an 8-bit char,
- * which will get the 8-bit revision number.
- */
-#define PCIIOCGETREV PCIIOCCFGRD(uint8_t,PCI_CFG_REV_ID)
-
-/* PCIIOCGETHTYPE: arg is ptr to an 8-bit char,
- * which will get the 8-bit header type.
- */
-#define PCIIOCGETHTYPE PCIIOCCFGRD(uint8_t,PCI_CFG_HEADER_TYPE)
-
-/* PCIIOCGETBASE(n): arg is ptr to a 32-bit int,
- * which will get the value of the BASE<n> register.
- */
-
-/* FIXME chadt: this doesn't tell me whether or not this will work
- with non-constant 'n.' */
-#define PCIIOCGETBASE(n) PCIIOCCFGRD(uint32_t,PCI_CFG_BASE_ADDR(n))
-
-
-/* /hw/.../pci/[slot]/dma accepts ioctls to allocate
- * and free physical memory for use in user-triggered
- * DMA operations.
- */
-#define PCIIOCDMAALLOC _IOWR(0,1,uint64_t)
-#define PCIIOCDMAFREE _IOW(0,1,uint64_t)
-
-/* pio cache-mode ioctl defines. current only uncached accelerated */
-#define PCIBA_CACHE_MODE_SET 1
-#define PCIBA_CACHE_MODE_CLEAR 2
-#ifdef PIOMAP_UNC_ACC
-#define PCIBA_UNCACHED_ACCEL PIOMAP_UNC_ACC
-#endif
-
-/* The parameter for PCIIOCDMAALLOC needs to contain
- * both the size of the request and the flag values
- * to be used in setting up the DMA.
- *
-
-FIXME chadt: gonna have to revisit this: what flags would an IRIXer like to
- have available?
-
- * Any flags normally useful in pciio_dmamap
- * or pciio_dmatrans function calls can6 be used here. */
-#define PCIIOCDMAALLOC_REQUEST_PACK(flags,size) \
- ((((uint64_t)(flags))<<32)| \
- (((uint64_t)(size))&0xFFFFFFFF))
-
-
-#ifdef __KERNEL__
-extern int pciba_init(void);
-#endif
-
-
-#endif /* _ASM_SN_PCI_PCIBA_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_PCI_PCIBR_H
#define _ASM_SN_PCI_PCIBR_H
* code and part number registered by pcibr_init().
*/
-extern void pcibr_init(void);
-
-extern int pcibr_attach(devfs_handle_t);
+extern int pcibr_attach(vertex_hdl_t);
/* =====================================================================
* bus provider function table
* smarts on the part of the compilation system).
*/
-extern pcibr_piomap_t pcibr_piomap_alloc(devfs_handle_t dev,
+extern pcibr_piomap_t pcibr_piomap_alloc(vertex_hdl_t dev,
device_desc_t dev_desc,
pciio_space_t space,
iopaddr_t pci_addr,
extern void pcibr_piomap_done(pcibr_piomap_t piomap);
-extern caddr_t pcibr_piotrans_addr(devfs_handle_t dev,
+extern caddr_t pcibr_piotrans_addr(vertex_hdl_t dev,
device_desc_t dev_desc,
pciio_space_t space,
iopaddr_t pci_addr,
size_t byte_count,
unsigned flags);
-extern iopaddr_t pcibr_piospace_alloc(devfs_handle_t dev,
+extern iopaddr_t pcibr_piospace_alloc(vertex_hdl_t dev,
device_desc_t dev_desc,
pciio_space_t space,
size_t byte_count,
size_t alignment);
-extern void pcibr_piospace_free(devfs_handle_t dev,
+extern void pcibr_piospace_free(vertex_hdl_t dev,
pciio_space_t space,
iopaddr_t pciaddr,
size_t byte_count);
-extern pcibr_dmamap_t pcibr_dmamap_alloc(devfs_handle_t dev,
+extern pcibr_dmamap_t pcibr_dmamap_alloc(vertex_hdl_t dev,
device_desc_t dev_desc,
size_t byte_count_max,
unsigned flags);
* (This node id can be different for each PCI bus.)
*/
-extern cnodeid_t pcibr_get_dmatrans_node(devfs_handle_t pconn_vhdl);
+extern cnodeid_t pcibr_get_dmatrans_node(vertex_hdl_t pconn_vhdl);
-extern iopaddr_t pcibr_dmatrans_addr(devfs_handle_t dev,
+extern iopaddr_t pcibr_dmatrans_addr(vertex_hdl_t dev,
device_desc_t dev_desc,
paddr_t paddr,
size_t byte_count,
unsigned flags);
-extern alenlist_t pcibr_dmatrans_list(devfs_handle_t dev,
+extern alenlist_t pcibr_dmatrans_list(vertex_hdl_t dev,
device_desc_t dev_desc,
alenlist_t palenlist,
unsigned flags);
extern void pcibr_dmamap_drain(pcibr_dmamap_t map);
-extern void pcibr_dmaaddr_drain(devfs_handle_t vhdl,
+extern void pcibr_dmaaddr_drain(vertex_hdl_t vhdl,
paddr_t addr,
size_t bytes);
-extern void pcibr_dmalist_drain(devfs_handle_t vhdl,
+extern void pcibr_dmalist_drain(vertex_hdl_t vhdl,
alenlist_t list);
typedef unsigned pcibr_intr_ibit_f(pciio_info_t info,
pciio_intr_line_t lines);
-extern void pcibr_intr_ibit_set(devfs_handle_t, pcibr_intr_ibit_f *);
+extern void pcibr_intr_ibit_set(vertex_hdl_t, pcibr_intr_ibit_f *);
-extern pcibr_intr_t pcibr_intr_alloc(devfs_handle_t dev,
+extern pcibr_intr_t pcibr_intr_alloc(vertex_hdl_t dev,
device_desc_t dev_desc,
pciio_intr_line_t lines,
- devfs_handle_t owner_dev);
+ vertex_hdl_t owner_dev);
extern void pcibr_intr_free(pcibr_intr_t intr);
-#ifdef CONFIG_IA64_SGI_SN1
-extern int pcibr_intr_connect(pcibr_intr_t intr);
-#else
extern int pcibr_intr_connect(pcibr_intr_t intr, intr_func_t, intr_arg_t);
-#endif
extern void pcibr_intr_disconnect(pcibr_intr_t intr);
-extern devfs_handle_t pcibr_intr_cpu_get(pcibr_intr_t intr);
+extern vertex_hdl_t pcibr_intr_cpu_get(pcibr_intr_t intr);
-extern void pcibr_provider_startup(devfs_handle_t pcibr);
+extern void pcibr_provider_startup(vertex_hdl_t pcibr);
-extern void pcibr_provider_shutdown(devfs_handle_t pcibr);
+extern void pcibr_provider_shutdown(vertex_hdl_t pcibr);
-extern int pcibr_reset(devfs_handle_t dev);
+extern int pcibr_reset(vertex_hdl_t dev);
-extern int pcibr_write_gather_flush(devfs_handle_t dev);
+extern int pcibr_write_gather_flush(vertex_hdl_t dev);
-extern pciio_endian_t pcibr_endian_set(devfs_handle_t dev,
+extern pciio_endian_t pcibr_endian_set(vertex_hdl_t dev,
pciio_endian_t device_end,
pciio_endian_t desired_end);
-extern pciio_priority_t pcibr_priority_set(devfs_handle_t dev,
+extern pciio_priority_t pcibr_priority_set(vertex_hdl_t dev,
pciio_priority_t device_prio);
-extern uint64_t pcibr_config_get(devfs_handle_t conn,
+extern uint64_t pcibr_config_get(vertex_hdl_t conn,
unsigned reg,
unsigned size);
-extern void pcibr_config_set(devfs_handle_t conn,
+extern void pcibr_config_set(vertex_hdl_t conn,
unsigned reg,
unsigned size,
uint64_t value);
-extern int pcibr_error_devenable(devfs_handle_t pconn_vhdl,
+extern int pcibr_error_devenable(vertex_hdl_t pconn_vhdl,
int error_code);
-#ifdef PIC_LATER
-extern pciio_slot_t pcibr_error_extract(devfs_handle_t pcibr_vhdl,
- pciio_space_t *spacep,
- iopaddr_t *addrp);
-#endif
-
-extern int pcibr_wrb_flush(devfs_handle_t pconn_vhdl);
-extern int pcibr_rrb_check(devfs_handle_t pconn_vhdl,
+extern int pcibr_wrb_flush(vertex_hdl_t pconn_vhdl);
+extern int pcibr_rrb_check(vertex_hdl_t pconn_vhdl,
int *count_vchan0,
int *count_vchan1,
int *count_reserved,
int *count_pool);
-#ifndef CONFIG_IA64_SGI_SN1
-extern int pcibr_alloc_all_rrbs(devfs_handle_t vhdl, int even_odd,
+extern int pcibr_alloc_all_rrbs(vertex_hdl_t vhdl, int even_odd,
int dev_1_rrbs, int virt1,
int dev_2_rrbs, int virt2,
int dev_3_rrbs, int virt3,
int dev_4_rrbs, int virt4);
-#endif
typedef void
-rrb_alloc_funct_f (devfs_handle_t xconn_vhdl,
+rrb_alloc_funct_f (vertex_hdl_t xconn_vhdl,
int *vendor_list);
typedef rrb_alloc_funct_f *rrb_alloc_funct_t;
-void pcibr_set_rrb_callback(devfs_handle_t xconn_vhdl,
+void pcibr_set_rrb_callback(vertex_hdl_t xconn_vhdl,
rrb_alloc_funct_f *func);
-extern int pcibr_device_unregister(devfs_handle_t);
-extern int pcibr_dma_enabled(devfs_handle_t);
+extern int pcibr_device_unregister(vertex_hdl_t);
+extern int pcibr_dma_enabled(vertex_hdl_t);
/*
* Bridge-specific flags that can be set via pcibr_device_flags_set
* and cleared via pcibr_device_flags_clear. Other flags are
* "flags" are defined above. NOTE: this includes turning
* things *OFF* as well as turning them *ON* ...
*/
-extern int pcibr_device_flags_set(devfs_handle_t dev,
+extern int pcibr_device_flags_set(vertex_hdl_t dev,
pcibr_device_flags_t flags);
/*
* <0 on failure, which occurs when we're unable to allocate any
* buffers to a channel that desires at least one buffer.
*/
-extern int pcibr_rrb_alloc(devfs_handle_t pconn_vhdl,
+extern int pcibr_rrb_alloc(vertex_hdl_t pconn_vhdl,
int *count_vchan0,
int *count_vchan1);
extern xwidget_intr_preset_f pcibr_xintr_preset;
-extern void pcibr_hints_fix_rrbs(devfs_handle_t);
-extern void pcibr_hints_dualslot(devfs_handle_t, pciio_slot_t, pciio_slot_t);
-extern void pcibr_hints_subdevs(devfs_handle_t, pciio_slot_t, ulong);
-extern void pcibr_hints_handsoff(devfs_handle_t);
+extern void pcibr_hints_fix_rrbs(vertex_hdl_t);
+extern void pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
+extern void pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, ulong);
+extern void pcibr_hints_handsoff(vertex_hdl_t);
-#ifdef CONFIG_IA64_SGI_SN1
-typedef unsigned pcibr_intr_bits_f(pciio_info_t, pciio_intr_line_t);
-#else
typedef unsigned pcibr_intr_bits_f(pciio_info_t, pciio_intr_line_t, int);
-#endif
-extern void pcibr_hints_intr_bits(devfs_handle_t, pcibr_intr_bits_f *);
+extern void pcibr_hints_intr_bits(vertex_hdl_t, pcibr_intr_bits_f *);
-extern int pcibr_asic_rev(devfs_handle_t);
+extern int pcibr_asic_rev(vertex_hdl_t);
#endif /* __ASSEMBLY__ */
#endif /* #if defined(__KERNEL__) */
short resp_bs_bridge_mode;
int resp_has_host;
char resp_host_slot;
- devfs_handle_t resp_slot_conn;
+ vertex_hdl_t resp_slot_conn;
char resp_slot_conn_name[MAXDEVNAME];
int resp_slot_status;
int resp_l1_bus_num;
bridgereg_t resp_b_int_device;
bridgereg_t resp_b_int_enable;
bridgereg_t resp_b_int_host;
-#ifndef CONFIG_IA64_SGI_SN1
picreg_t resp_p_int_enable;
picreg_t resp_p_int_host;
-#endif
struct pcibr_slot_func_info_resp_s {
int resp_f_status;
char resp_f_slot_name[MAXDEVNAME];
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_PCI_PCIBR_PRIVATE_H
#define _ASM_SN_PCI_PCIBR_PRIVATE_H
*/
#include <linux/config.h>
+#include <linux/pci.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/pci/pciio_private.h>
#include <asm/sn/ksys/l1.h>
cfg_p pcibr_func_config_addr(bridge_t *, pciio_bus_t bus, pciio_slot_t, pciio_function_t, int);
unsigned pcibr_slot_config_get(bridge_t *, pciio_slot_t, int);
unsigned pcibr_func_config_get(bridge_t *, pciio_slot_t, pciio_function_t, int);
-void pcibr_debug(uint32_t, devfs_handle_t, char *, ...);
+void pcibr_debug(uint32_t, vertex_hdl_t, char *, ...);
void pcibr_slot_config_set(bridge_t *, pciio_slot_t, int, unsigned);
void pcibr_func_config_set(bridge_t *, pciio_slot_t, pciio_function_t, int,
unsigned);
unsigned bi_ibits; /* which Bridge interrupt bit(s) */
pcibr_soft_t bi_soft; /* shortcut to soft info */
struct pcibr_intr_cbuf_s bi_ibuf; /* circular buffer of wrap ptrs */
+ unsigned bi_last_intr; /* For Shub lb lost intr. bug */
};
struct pcibr_intr_wrap_s {
pcibr_soft_t iw_soft; /* which bridge */
volatile bridgereg_t *iw_stat; /* ptr to b_int_status */
-#ifdef CONFIG_IA64_SGI_SN1
- bridgereg_t iw_intr; /* bit in b_int_status */
-#else
bridgereg_t iw_ibit; /* bit in b_int_status */
-#endif
pcibr_intr_list_t iw_list; /* ghostbusters! */
int iw_hdlrcnt; /* running handler count */
int iw_shared; /* if Bridge bit is shared */
#define PCIBR_BRIDGETYPE_PIC 2
#define IS_XBRIDGE_SOFT(ps) (ps->bs_bridge_type == PCIBR_BRIDGETYPE_XBRIDGE)
#define IS_PIC_SOFT(ps) (ps->bs_bridge_type == PCIBR_BRIDGETYPE_PIC)
+#define IS_PIC_BUSNUM_SOFT(ps, bus) \
+ (IS_PIC_SOFT(ps) && ((ps)->bs_busnum == (bus)))
#define IS_BRIDGE_SOFT(ps) (ps->bs_bridge_type == PCIBR_BRIDGETYPE_BRIDGE)
#define IS_XBRIDGE_OR_PIC_SOFT(ps) (IS_XBRIDGE_SOFT(ps) || IS_PIC_SOFT(ps))
*/
struct pcibr_soft_s {
- devfs_handle_t bs_conn; /* xtalk connection point */
- devfs_handle_t bs_vhdl; /* vertex owned by pcibr */
+ vertex_hdl_t bs_conn; /* xtalk connection point */
+ vertex_hdl_t bs_vhdl; /* vertex owned by pcibr */
uint64_t bs_int_enable; /* Mask of enabled intrs */
bridge_t *bs_base; /* PIO pointer to Bridge chip */
char *bs_name; /* hw graph name */
xwidgetnum_t bs_xid; /* Bridge's xtalk ID number */
- devfs_handle_t bs_master; /* xtalk master vertex */
+ vertex_hdl_t bs_master; /* xtalk master vertex */
xwidgetnum_t bs_mxid; /* master's xtalk ID number */
pciio_slot_t bs_first_slot; /* first existing slot */
pciio_slot_t bs_last_slot; /* last existing slot */
short bs_int_ate_size; /* number of internal ates */
short bs_bridge_type; /* see defines above */
short bs_bridge_mode; /* see defines above */
-#ifdef CONFIG_IA64_SGI_SN1
-#define bs_xbridge bs_bridge_type
-#endif
int bs_rev_num; /* revision number of Bridge */
/* bs_dma_flags are the forced dma flags used on all DMAs. Used for
*/
unsigned bs_dma_flags; /* forced DMA flags */
-#ifdef CONFIG_IA64_SGI_SN1
- l1sc_t *bs_l1sc; /* io brick l1 system cntr */
-#endif
moduleid_t bs_moduleid; /* io brick moduleid */
short bs_bricktype; /* io brick type */
*/
spinlock_t bs_lock;
- devfs_handle_t bs_noslot_conn; /* NO-SLOT connection point */
+ vertex_hdl_t bs_noslot_conn; /* NO-SLOT connection point */
pcibr_info_t bs_noslot_info;
struct pcibr_soft_slot_s {
/* information we keep about each CFG slot */
*/
int has_host;
pciio_slot_t host_slot;
- devfs_handle_t slot_conn;
+ vertex_hdl_t slot_conn;
/* PCI Hot-Plug status word */
int slot_status;
int bs_rrb_avail[2];
int bs_rrb_res[8];
int bs_rrb_res_dflt[8];
-#ifdef CONFIG_IA64_SGI_SN1
- int bs_rrb_valid[16];
- int bs_rrb_valid_dflt[16];
-#else
int bs_rrb_valid[8][4];
int bs_rrb_valid_dflt[8][4];
-#endif
struct {
/* Each Bridge interrupt bit has a single XIO
* interrupt channel allocated.
#ifdef LATER
toid_t bserr_toutid; /* Timeout started by errintr */
#endif /* LATER */
- iopaddr_t bserr_addr; /* Address where error occurred */
+ iopaddr_t bserr_addr; /* Address where error occured */
uint64_t bserr_intstat; /* interrupts active at error dump */
} bs_errinfo;
* in Megabytes), and they generally tend to take once and never
* release.
*/
-#ifdef CONFIG_IA64_SGI_SN1
- struct br_pcisp_info {
- iopaddr_t pci_io_base;
- iopaddr_t pci_io_last;
- iopaddr_t pci_swin_base;
- iopaddr_t pci_swin_last;
- iopaddr_t pci_mem_base;
- iopaddr_t pci_mem_last;
- } bs_spinfo;
-#endif /* CONFIG_IA64_SGI_SN1 */
struct pciio_win_map_s bs_io_win_map; /* I/O addr space */
struct pciio_win_map_s bs_swin_map; /* Small window addr space */
struct pciio_win_map_s bs_mem_win_map; /* Memory addr space */
pcibr_intr_bits_f *ph_intr_bits; /* map PCI INT[ABCD] to Bridge Int(n) */
};
-extern int pcibr_prefetch_enable_rev, pcibr_wg_enable_rev;
-
/*
* Number of bridge non-fatal error interrupts we can see before
* we decide to disable that interrupt.
#define NEW(ptr) NEWA(ptr,1)
#define DEL(ptr) DELA(ptr,1)
-#ifndef CONFIG_IA64_SGI_SN1
/*
* Additional PIO spaces per slot are
* recorded in this structure.
iopaddr_t start; /* Starting address of the PIO space */
size_t count; /* size of PIO space */
};
-#endif /* CONFIG_IA64_SGI_SN1 */
/* Use io spin locks. This ensures that all the PIO writes from a particular
* CPU to a particular IO device are synched before the start of the next
#define pcibr_unlock(pcibr_soft, s)
#endif /* PCI_LATER */
-#ifndef CONFIG_IA64_SGI_SN1
#define PCIBR_VALID_SLOT(ps, s) (s < PCIBR_NUM_SLOTS(ps))
#define PCIBR_D64_BASE_UNSET (0xFFFFFFFFFFFFFFFF)
#define PCIBR_D32_BASE_UNSET (0xFFFFFFFF)
-#endif
#define INFO_LBL_PCIBR_ASIC_REV "_pcibr_asic_rev"
#define PCIBR_SOFT_LIST 1
struct pcibr_list_s {
pcibr_list_p bl_next;
pcibr_soft_t bl_soft;
- devfs_handle_t bl_vhdl;
+ vertex_hdl_t bl_vhdl;
};
#endif /* PCIBR_SOFT_LIST */
+
+// Devices per widget: 2 buses, 2 slots per bus, 8 functions per slot.
+#define DEV_PER_WIDGET (2*2*8)
+
+struct sn_flush_device_list {
+ int bus;
+ int pin;
+ struct bar_list {
+ unsigned long start;
+ unsigned long end;
+ } bar_list[PCI_ROM_RESOURCE];
+ unsigned long force_int_addr;
+ volatile unsigned long flush_addr;
+ spinlock_t flush_lock;
+};
+
+struct sn_flush_nasid_entry {
+ struct sn_flush_device_list **widget_p;
+ unsigned long iio_itte1;
+ unsigned long iio_itte2;
+ unsigned long iio_itte3;
+ unsigned long iio_itte4;
+ unsigned long iio_itte5;
+ unsigned long iio_itte6;
+ unsigned long iio_itte7;
+};
+
#endif /* _ASM_SN_PCI_PCIBR_PRIVATE_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_PCI_PCIIO_H
#define _ASM_SN_PCI_PCIIO_H
#define PCIIO_PIOMAP_WIN(n) (0x8+(n))
typedef pciio_piomap_t
-pciio_piomap_alloc_f (devfs_handle_t dev, /* set up mapping for this device */
+pciio_piomap_alloc_f (vertex_hdl_t dev, /* set up mapping for this device */
device_desc_t dev_desc, /* device descriptor */
pciio_space_t space, /* which address space */
iopaddr_t pcipio_addr, /* starting address */
pciio_piomap_done_f (pciio_piomap_t pciio_piomap);
typedef caddr_t
-pciio_piotrans_addr_f (devfs_handle_t dev, /* translate for this device */
+pciio_piotrans_addr_f (vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
pciio_space_t space, /* which address space */
iopaddr_t pciio_addr, /* starting address */
unsigned flags);
typedef caddr_t
-pciio_pio_addr_f (devfs_handle_t dev, /* translate for this device */
+pciio_pio_addr_f (vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
pciio_space_t space, /* which address space */
iopaddr_t pciio_addr, /* starting address */
unsigned flags);
typedef iopaddr_t
-pciio_piospace_alloc_f (devfs_handle_t dev, /* PIO space for this device */
+pciio_piospace_alloc_f (vertex_hdl_t dev, /* PIO space for this device */
device_desc_t dev_desc, /* Device descriptor */
pciio_space_t space, /* which address space */
size_t byte_count, /* Number of bytes of space */
size_t alignment); /* Alignment of allocation */
typedef void
-pciio_piospace_free_f (devfs_handle_t dev, /* Device freeing space */
+pciio_piospace_free_f (vertex_hdl_t dev, /* Device freeing space */
pciio_space_t space, /* Which space is freed */
iopaddr_t pci_addr, /* Address being freed */
size_t size); /* Size freed */
/* DMA MANAGEMENT */
typedef pciio_dmamap_t
-pciio_dmamap_alloc_f (devfs_handle_t dev, /* set up mappings for this device */
+pciio_dmamap_alloc_f (vertex_hdl_t dev, /* set up mappings for this device */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags); /* defined in dma.h */
paddr_t paddr, /* map for this address */
size_t byte_count); /* map this many bytes */
-typedef alenlist_t
-pciio_dmamap_list_f (pciio_dmamap_t dmamap, /* use these mapping resources */
- alenlist_t alenlist, /* map this address/length list */
- unsigned flags);
-
typedef void
pciio_dmamap_done_f (pciio_dmamap_t dmamap);
typedef iopaddr_t
-pciio_dmatrans_addr_f (devfs_handle_t dev, /* translate for this device */
+pciio_dmatrans_addr_f (vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
unsigned flags); /* defined in dma.h */
-typedef alenlist_t
-pciio_dmatrans_list_f (devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- alenlist_t palenlist, /* system address/length list */
- unsigned flags); /* defined in dma.h */
-
typedef void
pciio_dmamap_drain_f (pciio_dmamap_t map);
typedef void
-pciio_dmaaddr_drain_f (devfs_handle_t vhdl,
+pciio_dmaaddr_drain_f (vertex_hdl_t vhdl,
paddr_t addr,
size_t bytes);
typedef void
-pciio_dmalist_drain_f (devfs_handle_t vhdl,
+pciio_dmalist_drain_f (vertex_hdl_t vhdl,
alenlist_t list);
/* INTERRUPT MANAGEMENT */
typedef pciio_intr_t
-pciio_intr_alloc_f (devfs_handle_t dev, /* which PCI device */
+pciio_intr_alloc_f (vertex_hdl_t dev, /* which PCI device */
device_desc_t dev_desc, /* device descriptor */
pciio_intr_line_t lines, /* which line(s) will be used */
- devfs_handle_t owner_dev); /* owner of this intr */
+ vertex_hdl_t owner_dev); /* owner of this intr */
typedef void
pciio_intr_free_f (pciio_intr_t intr_hdl);
-#ifdef CONFIG_IA64_SGI_SN1
-typedef int
-pciio_intr_connect_f (pciio_intr_t intr_hdl); /* pciio intr resource handle */
-#else
typedef int
pciio_intr_connect_f (pciio_intr_t intr_hdl, intr_func_t intr_func, intr_arg_t intr_arg); /* pciio intr resource handle */
-#endif
typedef void
pciio_intr_disconnect_f (pciio_intr_t intr_hdl);
-typedef devfs_handle_t
+typedef vertex_hdl_t
pciio_intr_cpu_get_f (pciio_intr_t intr_hdl); /* pciio intr resource handle */
/* CONFIGURATION MANAGEMENT */
typedef void
-pciio_provider_startup_f (devfs_handle_t pciio_provider);
+pciio_provider_startup_f (vertex_hdl_t pciio_provider);
typedef void
-pciio_provider_shutdown_f (devfs_handle_t pciio_provider);
+pciio_provider_shutdown_f (vertex_hdl_t pciio_provider);
typedef int
-pciio_reset_f (devfs_handle_t conn); /* pci connection point */
+pciio_reset_f (vertex_hdl_t conn); /* pci connection point */
typedef int
-pciio_write_gather_flush_f (devfs_handle_t dev); /* Device flushing buffers */
+pciio_write_gather_flush_f (vertex_hdl_t dev); /* Device flushing buffers */
typedef pciio_endian_t /* actual endianness */
-pciio_endian_set_f (devfs_handle_t dev, /* specify endianness for this device */
+pciio_endian_set_f (vertex_hdl_t dev, /* specify endianness for this device */
pciio_endian_t device_end, /* endianness of device */
pciio_endian_t desired_end); /* desired endianness */
typedef pciio_priority_t
-pciio_priority_set_f (devfs_handle_t pcicard,
+pciio_priority_set_f (vertex_hdl_t pcicard,
pciio_priority_t device_prio);
typedef uint64_t
-pciio_config_get_f (devfs_handle_t conn, /* pci connection point */
+pciio_config_get_f (vertex_hdl_t conn, /* pci connection point */
unsigned reg, /* register byte offset */
unsigned size); /* width in bytes (1..4) */
typedef void
-pciio_config_set_f (devfs_handle_t conn, /* pci connection point */
+pciio_config_set_f (vertex_hdl_t conn, /* pci connection point */
unsigned reg, /* register byte offset */
unsigned size, /* width in bytes (1..4) */
uint64_t value); /* value to store */
typedef int
-pciio_error_devenable_f (devfs_handle_t pconn_vhdl, int error_code);
+pciio_error_devenable_f (vertex_hdl_t pconn_vhdl, int error_code);
typedef pciio_slot_t
-pciio_error_extract_f (devfs_handle_t vhdl,
+pciio_error_extract_f (vertex_hdl_t vhdl,
pciio_space_t *spacep,
iopaddr_t *addrp);
typedef void
-pciio_driver_reg_callback_f (devfs_handle_t conn,
+pciio_driver_reg_callback_f (vertex_hdl_t conn,
int key1,
int key2,
int error);
typedef void
-pciio_driver_unreg_callback_f (devfs_handle_t conn, /* pci connection point */
+pciio_driver_unreg_callback_f (vertex_hdl_t conn, /* pci connection point */
int key1,
int key2,
int error);
typedef int
-pciio_device_unregister_f (devfs_handle_t conn);
+pciio_device_unregister_f (vertex_hdl_t conn);
typedef int
-pciio_dma_enabled_f (devfs_handle_t conn);
+pciio_dma_enabled_f (vertex_hdl_t conn);
/*
* Adapters that provide a PCI interface adhere to this software interface.
pciio_dmamap_alloc_f *dmamap_alloc;
pciio_dmamap_free_f *dmamap_free;
pciio_dmamap_addr_f *dmamap_addr;
- pciio_dmamap_list_f *dmamap_list;
pciio_dmamap_done_f *dmamap_done;
pciio_dmatrans_addr_f *dmatrans_addr;
- pciio_dmatrans_list_f *dmatrans_list;
pciio_dmamap_drain_f *dmamap_drain;
pciio_dmaaddr_drain_f *dmaaddr_drain;
pciio_dmalist_drain_f *dmalist_drain;
extern pciio_dmamap_alloc_f pciio_dmamap_alloc;
extern pciio_dmamap_free_f pciio_dmamap_free;
extern pciio_dmamap_addr_f pciio_dmamap_addr;
-extern pciio_dmamap_list_f pciio_dmamap_list;
extern pciio_dmamap_done_f pciio_dmamap_done;
extern pciio_dmatrans_addr_f pciio_dmatrans_addr;
-extern pciio_dmatrans_list_f pciio_dmatrans_list;
extern pciio_dmamap_drain_f pciio_dmamap_drain;
extern pciio_dmaaddr_drain_f pciio_dmaaddr_drain;
extern pciio_dmalist_drain_f pciio_dmalist_drain;
unsigned flags);
extern void
-pciio_error_register (devfs_handle_t pconn, /* which slot */
+pciio_error_register (vertex_hdl_t pconn, /* which slot */
error_handler_f *efunc, /* function to call */
error_handler_arg_t einfo); /* first parameter */
extern void pciio_driver_unregister(char *driver_prefix);
-typedef void pciio_iter_f(devfs_handle_t pconn); /* a connect point */
-
-extern void pciio_iterate(char *driver_prefix,
- pciio_iter_f *func);
+typedef void pciio_iter_f(vertex_hdl_t pconn); /* a connect point */
/* Interfaces used by PCI Bus Providers to talk to
* the Generic PCI layer.
*/
-extern devfs_handle_t
-pciio_device_register (devfs_handle_t connectpt, /* vertex at center of bus */
- devfs_handle_t master, /* card's master ASIC (pci provider) */
+extern vertex_hdl_t
+pciio_device_register (vertex_hdl_t connectpt, /* vertex at center of bus */
+ vertex_hdl_t master, /* card's master ASIC (pci provider) */
pciio_slot_t slot, /* card's slot (0..?) */
pciio_function_t func, /* card's func (0..?) */
pciio_vendor_id_t vendor, /* card's vendor number */
pciio_device_id_t device); /* card's device number */
extern void
-pciio_device_unregister(devfs_handle_t connectpt);
+pciio_device_unregister(vertex_hdl_t connectpt);
extern pciio_info_t
pciio_device_info_new (pciio_info_t pciio_info, /* preallocated info struct */
- devfs_handle_t master, /* card's master ASIC (pci provider) */
+ vertex_hdl_t master, /* card's master ASIC (pci provider) */
pciio_slot_t slot, /* card's slot (0..?) */
pciio_function_t func, /* card's func (0..?) */
pciio_vendor_id_t vendor, /* card's vendor number */
extern void
pciio_device_info_free(pciio_info_t pciio_info);
-extern devfs_handle_t
+extern vertex_hdl_t
pciio_device_info_register(
- devfs_handle_t connectpt, /* vertex at center of bus */
+ vertex_hdl_t connectpt, /* vertex at center of bus */
pciio_info_t pciio_info); /* details about conn point */
extern void
pciio_device_info_unregister(
- devfs_handle_t connectpt, /* vertex at center of bus */
+ vertex_hdl_t connectpt, /* vertex at center of bus */
pciio_info_t pciio_info); /* details about conn point */
extern int
pciio_device_attach(
- devfs_handle_t pcicard, /* vertex created by pciio_device_register */
+ vertex_hdl_t pcicard, /* vertex created by pciio_device_register */
int drv_flags);
extern int
pciio_device_detach(
- devfs_handle_t pcicard, /* vertex created by pciio_device_register */
+ vertex_hdl_t pcicard, /* vertex created by pciio_device_register */
int drv_flags);
size_t size); /* size of free range */
/* allocate window from mapping resource */
-#ifdef CONFIG_IA64_SGI_SN1
-extern iopaddr_t
-pciio_device_win_alloc(pciio_win_map_t win_map, /* win map */
- pciio_win_alloc_t win_alloc, /* opaque allocation cookie */
- size_t size, /* size of allocation */
- size_t align); /* alignment of allocation */
-#else
extern iopaddr_t
pciio_device_win_alloc(pciio_win_map_t win_map, /* win map */
pciio_win_alloc_t win_alloc, /* opaque allocation cookie */
size_t start, /* start unit, or 0 */
size_t size, /* size of allocation */
size_t align); /* alignment of allocation */
-#endif
/* free previously allocated window */
extern void
*/
/* Generic PCI interrupt interfaces */
-extern devfs_handle_t pciio_intr_dev_get(pciio_intr_t pciio_intr);
-extern devfs_handle_t pciio_intr_cpu_get(pciio_intr_t pciio_intr);
+extern vertex_hdl_t pciio_intr_dev_get(pciio_intr_t pciio_intr);
+extern vertex_hdl_t pciio_intr_cpu_get(pciio_intr_t pciio_intr);
/* Generic PCI pio interfaces */
-extern devfs_handle_t pciio_pio_dev_get(pciio_piomap_t pciio_piomap);
+extern vertex_hdl_t pciio_pio_dev_get(pciio_piomap_t pciio_piomap);
extern pciio_slot_t pciio_pio_slot_get(pciio_piomap_t pciio_piomap);
extern pciio_space_t pciio_pio_space_get(pciio_piomap_t pciio_piomap);
extern iopaddr_t pciio_pio_pciaddr_get(pciio_piomap_t pciio_piomap);
extern caddr_t pciio_pio_kvaddr_get(pciio_piomap_t pciio_piomap);
/* Generic PCI dma interfaces */
-extern devfs_handle_t pciio_dma_dev_get(pciio_dmamap_t pciio_dmamap);
+extern vertex_hdl_t pciio_dma_dev_get(pciio_dmamap_t pciio_dmamap);
/* Register/unregister PCI providers and get implementation handle */
-extern void pciio_provider_register(devfs_handle_t provider, pciio_provider_t *pciio_fns);
-extern void pciio_provider_unregister(devfs_handle_t provider);
-extern pciio_provider_t *pciio_provider_fns_get(devfs_handle_t provider);
+extern void pciio_provider_register(vertex_hdl_t provider, pciio_provider_t *pciio_fns);
+extern void pciio_provider_unregister(vertex_hdl_t provider);
+extern pciio_provider_t *pciio_provider_fns_get(vertex_hdl_t provider);
/* Generic pci slot information access interface */
-extern pciio_info_t pciio_info_chk(devfs_handle_t vhdl);
-extern pciio_info_t pciio_info_get(devfs_handle_t vhdl);
-extern pciio_info_t pciio_hostinfo_get(devfs_handle_t vhdl);
-extern void pciio_info_set(devfs_handle_t vhdl, pciio_info_t widget_info);
-extern devfs_handle_t pciio_info_dev_get(pciio_info_t pciio_info);
-extern devfs_handle_t pciio_info_hostdev_get(pciio_info_t pciio_info);
+extern pciio_info_t pciio_info_chk(vertex_hdl_t vhdl);
+extern pciio_info_t pciio_info_get(vertex_hdl_t vhdl);
+extern pciio_info_t pciio_hostinfo_get(vertex_hdl_t vhdl);
+extern void pciio_info_set(vertex_hdl_t vhdl, pciio_info_t widget_info);
+extern vertex_hdl_t pciio_info_dev_get(pciio_info_t pciio_info);
+extern vertex_hdl_t pciio_info_hostdev_get(pciio_info_t pciio_info);
extern pciio_bus_t pciio_info_bus_get(pciio_info_t pciio_info);
extern pciio_slot_t pciio_info_slot_get(pciio_info_t pciio_info);
extern pciio_function_t pciio_info_function_get(pciio_info_t pciio_info);
extern pciio_vendor_id_t pciio_info_vendor_id_get(pciio_info_t pciio_info);
extern pciio_device_id_t pciio_info_device_id_get(pciio_info_t pciio_info);
-extern devfs_handle_t pciio_info_master_get(pciio_info_t pciio_info);
+extern vertex_hdl_t pciio_info_master_get(pciio_info_t pciio_info);
extern arbitrary_info_t pciio_info_mfast_get(pciio_info_t pciio_info);
extern pciio_provider_t *pciio_info_pops_get(pciio_info_t pciio_info);
extern error_handler_f *pciio_info_efunc_get(pciio_info_t);
extern iopaddr_t pciio_info_rom_base_get(pciio_info_t);
extern size_t pciio_info_rom_size_get(pciio_info_t);
extern int pciio_info_type1_get(pciio_info_t);
-extern int pciio_error_handler(devfs_handle_t, int, ioerror_mode_t, ioerror_t *);
-extern int pciio_dma_enabled(devfs_handle_t);
+extern int pciio_error_handler(vertex_hdl_t, int, ioerror_mode_t, ioerror_t *);
+extern int pciio_dma_enabled(vertex_hdl_t);
#endif /* C or C++ */
#endif /* _ASM_SN_PCI_PCIIO_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_PCI_PCIIO_PRIVATE_H
#define _ASM_SN_PCI_PCIIO_PRIVATE_H
*/
struct pciio_piomap_s {
unsigned pp_flags; /* PCIIO_PIOMAP flags */
- devfs_handle_t pp_dev; /* associated pci card */
+ vertex_hdl_t pp_dev; /* associated pci card */
pciio_slot_t pp_slot; /* which slot the card is in */
pciio_space_t pp_space; /* which address space */
iopaddr_t pp_pciaddr; /* starting offset of mapping */
*/
struct pciio_dmamap_s {
unsigned pd_flags; /* PCIIO_DMAMAP flags */
- devfs_handle_t pd_dev; /* associated pci card */
+ vertex_hdl_t pd_dev; /* associated pci card */
pciio_slot_t pd_slot; /* which slot the card is in */
};
struct pciio_intr_s {
unsigned pi_flags; /* PCIIO_INTR flags */
- devfs_handle_t pi_dev; /* associated pci card */
+ vertex_hdl_t pi_dev; /* associated pci card */
device_desc_t pi_dev_desc; /* override device descriptor */
pciio_intr_line_t pi_lines; /* which interrupt line(s) */
intr_func_t pi_func; /* handler function (when connected) */
struct pciio_info_s {
char *c_fingerprint;
- devfs_handle_t c_vertex; /* back pointer to vertex */
+ vertex_hdl_t c_vertex; /* back pointer to vertex */
pciio_bus_t c_bus; /* which bus the card is in */
pciio_slot_t c_slot; /* which slot the card is in */
pciio_function_t c_func; /* which func (on multi-func cards) */
pciio_vendor_id_t c_vendor; /* PCI card "vendor" code */
pciio_device_id_t c_device; /* PCI card "device" code */
- devfs_handle_t c_master; /* PCI bus provider */
+ vertex_hdl_t c_master; /* PCI bus provider */
arbitrary_info_t c_mfast; /* cached fastinfo from c_master */
pciio_provider_t *c_pops; /* cached provider from c_master */
error_handler_f *c_efunc; /* error handling function */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PDA_H
#define _ASM_IA64_SN_PDA_H
* all SN per-cpu data structures.
*/
-#ifdef BUS_INT_WAR
-#define POLL_ENTRIES 50
-typedef struct {
- int irq;
- int interval;
- short tick;
-} sn_poll_entry_t;
-#endif
-
typedef struct pda_s {
/* Having a pointer in the begining of PDA tends to increase
/*
* Support for SN LEDs
*/
-#ifdef CONFIG_IA64_SGI_SN1
- volatile long *led_address;
-#else
volatile short *led_address;
-#endif
u8 led_state;
u8 hb_state; /* supports blinking heartbeat leds */
+ u8 shub_1_1_found;
unsigned int hb_count;
unsigned int idle_flag;
-#ifdef CONFIG_IA64_SGI_SN2
- struct irqpda_s *p_irqpda; /* Pointer to CPU irq data */
-#endif
volatile unsigned long *bedrock_rev_id;
volatile unsigned long *pio_write_status_addr;
volatile unsigned long *pio_shub_war_cam_addr;
volatile unsigned long *mem_write_status_addr;
- bteinfo_t *cpu_bte_if[BTES_PER_NODE]; /* cpu interface order */
+ struct bteinfo_s *cpu_bte_if[BTES_PER_NODE]; /* cpu interface order */
-#ifdef BUS_INT_WAR
- sn_poll_entry_t pda_poll_entries[POLL_ENTRIES];
- int pda_poll_entry_count;
-#endif
+ unsigned long sn_soft_irr[4];
+ unsigned long sn_in_service_ivecs[4];
+ short cnodeid_to_nasid_table[NR_NODES];
+ int sn_lb_int_war_ticks;
+ int sn_last_irq;
+ int sn_first_irq;
} pda_t;
#define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
+/*
+ * Use this macro to test if shub 1.1 wars should be enabled
+ */
+#define enable_shub_wars_1_1() (pda->shub_1_1_found)
#endif /* _ASM_IA64_SN_PDA_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PIO_H
#define _ASM_IA64_SN_PIO_H
typedef struct piomap {
uint pio_bus;
uint pio_adap;
-#ifdef LATER
- iospace_t pio_iospace;
-#endif
int pio_flag;
int pio_reg;
char pio_name[7]; /* to identify the mapped device */
struct piomap *pio_next; /* dlist to link active piomap's */
struct piomap *pio_prev; /* for debug and error reporting */
-#ifdef LATER
- void (*pio_errfunc)(); /* Pointer to an error function */
- /* Used only for piomaps allocated
- * in user level vme driver */
-#endif
iopaddr_t pio_iopmask; /* valid iop address bit mask */
iobush_t pio_bushandle; /* bus-level handle */
} piomap_t;
/*
- * pio_mapalloc() - allocates a handle that specifies a mapping from kernel
- * virtual to io space. The returned handle piomap is used
- * with the access functions to make sure that the mapping
- * to the iospace exists.
- * pio_mapfree() - frees the mapping as specified in the piomap handle.
- * pio_mapaddr() - returns the kv address that maps to piomap'ed io address.
- */
-#ifdef LATER
-extern piomap_t *pio_mapalloc(uint,uint,iospace_t*,int,char*);
-extern void pio_mapfree(piomap_t*);
-extern caddr_t pio_mapaddr(piomap_t*,iopaddr_t);
-extern piomap_t *pio_ioaddr(int, iobush_t, iopaddr_t, piomap_t *);
-
-/*
- * PIO access functions.
- */
-extern int pio_badaddr(piomap_t*,iopaddr_t,int);
-extern int pio_badaddr_val(piomap_t*,iopaddr_t,int,void*);
-extern int pio_wbadaddr(piomap_t*,iopaddr_t,int);
-extern int pio_wbadaddr_val(piomap_t*,iopaddr_t,int,int);
-extern int pio_bcopyin(piomap_t*,iopaddr_t,void *,int, int, int);
-extern int pio_bcopyout(piomap_t*,iopaddr_t,void *,int, int, int);
-
-
-/*
- * PIO RMW functions using piomap.
- */
-extern void pio_orb_rmw(piomap_t*, iopaddr_t, unsigned char);
-extern void pio_orh_rmw(piomap_t*, iopaddr_t, unsigned short);
-extern void pio_orw_rmw(piomap_t*, iopaddr_t, unsigned long);
-extern void pio_andb_rmw(piomap_t*, iopaddr_t, unsigned char);
-extern void pio_andh_rmw(piomap_t*, iopaddr_t, unsigned short);
-extern void pio_andw_rmw(piomap_t*, iopaddr_t, unsigned long);
-
-
-/*
- * Old RMW function interface
- */
-extern void orb_rmw(volatile void*, unsigned int);
-extern void orh_rmw(volatile void*, unsigned int);
-extern void orw_rmw(volatile void*, unsigned int);
-extern void andb_rmw(volatile void*, unsigned int);
-extern void andh_rmw(volatile void*, unsigned int);
-extern void andw_rmw(volatile void*, unsigned int);
-#endif /* LATER */
-
-
-/*
* piomap_t type defines
*/
+++ /dev/null
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-#include <linux/config.h>
-
-#ifndef _ASM_IA64_PIO_FLUSH_H
-#define _ASM_IA64_PIO_FLUSH_H
-
-/*
- * This macro flushes all outstanding PIOs performed by this cpu to the
- * intended destination SHUB. This in essence ensures that all PIO's
- * issues by this cpu has landed at it's destination.
- *
- * This macro expects the caller:
- * 1. The thread is locked.
- * 2. All prior PIO operations has been fenced.
- *
- */
-
-#if defined (CONFIG_IA64_SGI_SN)
-
-#include <asm/sn/pda.h>
-
-#if defined (CONFIG_IA64_SGI_SN2)
-
-#define PIO_FLUSH() \
- { \
- while ( !((volatile unsigned long) (*pda.pio_write_status_addr)) & 0x8000000000000000) { \
- udelay(5); \
- } \
- __ia64_mf_a(); \
- }
-
-#elif defined (CONFIG_IA64_SGI_SN1)
-
-/*
- * For SN1 we need to first read any local Bedrock's MMR and then poll on the
- * Synergy MMR.
- */
-#define PIO_FLUSH() \
- { \
- (volatile unsigned long) (*pda.bedrock_rev_id); \
- while (!(volatile unsigned long) (*pda.pio_write_status_addr)) { \
- udelay(5); \
- } \
- __ia64_mf_a(); \
- }
-#endif
-#else
-/*
- * For all ARCHITECTURE type, this is a NOOP.
- */
-
-#define PIO_FLUSH()
-
-#endif
-
-#endif /* _ASM_IA64_PIO_FLUSH_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PRIO_H
#define _ASM_IA64_SN_PRIO_H
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_ROUTER_H
slotid_t ri_slotnum; /* Which slot are we in? */
router_reg_t ri_glbl_parms[GLBL_PARMS_REGS];
/* Global parms0&1 register contents*/
- devfs_handle_t ri_vertex; /* hardware graph vertex */
+ vertex_hdl_t ri_vertex; /* hardware graph vertex */
router_reg_t ri_prot_conf; /* protection config. register */
int64_t ri_per_minute; /* Ticks per minute */
* the bottom of the structure, below the user stuff.
*/
char ri_hist_type; /* histogram type */
- devfs_handle_t ri_guardian; /* guardian node for the router */
+ vertex_hdl_t ri_guardian; /* guardian node for the router */
int64_t ri_last_print; /* When did we last print */
char ri_print; /* Should we print */
char ri_just_blink; /* Should we blink the LEDs */
* Router info hanging in the nodepda
*/
typedef struct nodepda_router_info_s {
- devfs_handle_t router_vhdl; /* vertex handle of the router */
+ vertex_hdl_t router_vhdl; /* vertex handle of the router */
short router_port; /* port thru which we entered */
short router_portmask;
moduleid_t router_module; /* module in which router is there */
*/
struct {
/* vertex handle for the router */
- devfs_handle_t vhdl;
+ vertex_hdl_t vhdl;
/* guardian for this router */
- devfs_handle_t guard;
+ vertex_hdl_t guard;
/* vector router from the guardian to the router */
net_vec_t vec;
} k_elt;
int router_reg_read(router_info_t *rip, int regno, router_reg_t *val);
int router_reg_write(router_info_t *rip, int regno, router_reg_t val);
-int router_get_info(devfs_handle_t routerv, router_info_t *, int);
-int router_init(cnodeid_t cnode,int writeid, nodepda_router_info_t *npda_rip);
+int router_get_info(vertex_hdl_t routerv, router_info_t *, int);
int router_set_leds(router_info_t *rip);
void router_print_state(router_info_t *rip, int level,
void (*pf)(int, char *, ...),int print_where);
int probe_routers(void);
void get_routername(unsigned char brd_type,char *rtrname);
-void router_guardians_set(devfs_handle_t hwgraph_root);
+void router_guardians_set(vertex_hdl_t hwgraph_root);
int router_hist_reselect(router_info_t *, int64_t);
#endif /* __ASSEMBLY__ */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/uaccess.h> /* for copy_??_user */
#include <linux/mm.h>
#include <linux/devfs_fs_kernel.h>
+#include <linux/fs.h>
+#include <asm/sn/hwgfs.h>
+
+typedef hwgfs_handle_t vertex_hdl_t;
typedef int64_t __psint_t; /* needed by klgraph.c */
typedef enum { B_FALSE, B_TRUE } boolean_t;
-#define ctob(x) ((uint64_t)(x)*NBPC)
-#define btoc(x) (((uint64_t)(x)+(NBPC-1))/NBPC)
-
/*
** Possible return values from graph routines.
typedef uint64_t vhandl_t;
-#ifndef NBPP
-#define NBPP 4096
-#endif
-
-#ifndef D_MP
-#define D_MP 1
-#endif
+#define NBPP PAGE_SIZE
+#define _PAGESZ PAGE_SIZE
#ifndef MAXDEVNAME
#define MAXDEVNAME 256
#endif
-#ifndef NBPC
-#define NBPC 0
-#endif
-
-#ifndef _PAGESZ
-#define _PAGESZ 4096
-#endif
-
-typedef uint64_t mrlock_t; /* needed by devsupport.c */
-
#define HUB_PIO_CONVEYOR 0x1
#define CNODEID_NONE ((cnodeid_t)-1)
#define XTALK_PCI_PART_NUM "030-1275-"
#define COPYIN(a, b, c) copy_from_user(b,a,c)
#define COPYOUT(a, b, c) copy_to_user(b,a,c)
-#define kvtophys(x) (alenaddr_t) (x)
-#define POFFMASK (NBPP - 1)
-#define poff(X) ((__psunsigned_t)(X) & POFFMASK)
-
#define BZERO(a,b) memset(a, 0, b)
#define kern_malloc(x) kmalloc(x, GFP_KERNEL)
#define PRINT_PANIC panic
-#ifdef CONFIG_SMP
-#define cpu_enabled(cpu) (test_bit(cpu, &cpu_online_map))
-#else
-#define cpu_enabled(cpu) (1)
-#endif
-
/* print_register() defs */
/*
extern void print_register(unsigned long long, struct reg_desc *);
-#include <asm/sn/hack.h> /* for now */
+/******************************************
+ * Definitions that do not exist in linux *
+ ******************************************/
+
+#define DELAY(a)
+
+/************************************************
+ * Routines redefined to use linux equivalents. *
+ ************************************************/
+
+/* #define FIXME(s) printk("FIXME: [ %s ] in %s at %s:%d\n", s, __FUNCTION__, __FILE__, __LINE__) */
+
+#define FIXME(s)
+
+/* move to stubs.c yet */
+#define dev_to_vhdl(dev) 0
+#define get_timestamp() 0
+#define us_delay(a)
+#define v_mapphys(a,b,c) 0 // printk("Fixme: v_mapphys - soft->base 0x%p\n", b);
+#define splhi() 0
+#define splx(s)
+
+extern void * snia_kmem_alloc_node(register size_t, register int, cnodeid_t);
+extern void * snia_kmem_zalloc(size_t, int);
+extern void * snia_kmem_zalloc_node(register size_t, register int, cnodeid_t );
+extern int is_specified(char *);
#endif /* _ASM_IA64_SN_SGI_H */
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
- * Copyright (C) 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/config.h>
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SLOTNUM_H
#define _ASM_IA64_SN_SLOTNUM_H
-#include <linux/config.h>
typedef unsigned char slotid_t;
-#if defined (CONFIG_IA64_SGI_SN1)
-#include <asm/sn/sn1/slotnum.h>
-#elif defined (CONFIG_IA64_SGI_SN2)
#include <asm/sn/sn2/slotnum.h>
-#else
-
-#error <<BOMB! slotnum defined only for SN0 and SN1 >>
-
-#endif /* !CONFIG_IA64_SGI_SN1 */
#endif /* _ASM_IA64_SN_SLOTNUM_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_IA64_SN_SN1_ADDRS_H
-#define _ASM_IA64_SN_SN1_ADDRS_H
-
-#include <linux/config.h>
-
-#ifdef CONFIG_IA64_SGI_SN1
-/*
- * SN1 (on a TRex) Address map
- *
- * This file contains a set of definitions and macros which are used
- * to reference into the major address spaces (CAC, HSPEC, IO, MSPEC,
- * and UNCAC) used by the SN1 architecture. It also contains addresses
- * for "major" statically locatable PROM/Kernel data structures, such as
- * the partition table, the configuration data structure, etc.
- * We make an implicit assumption that the processor using this file
- * follows the R12K's provisions for specifying uncached attributes;
- * should this change, the base registers may very well become processor-
- * dependent.
- *
- * For more information on the address spaces, see the "Local Resources"
- * chapter of the Hub specification.
- *
- * NOTE: This header file is included both by C and by assembler source
- * files. Please bracket any language-dependent definitions
- * appropriately.
- */
-
-
-/*
- * Some of the macros here need to be casted to appropriate types when used
- * from C. They definitely must not be casted from assembly language so we
- * use some new ANSI preprocessor stuff to paste these on where needed.
- */
-
-#define CACHEABLE_MEM_SPACE 0xe000000000000000
-#define CAC_BASE CACHEABLE_MEM_SPACE
-#define HSPEC_BASE 0xc0000b0000000000
-#define HSPEC_SWIZ_BASE 0xc000030000000000
-#define IO_BASE 0xc0000a0000000000
-#define IO_SWIZ_BASE 0xc000020000000000
-#define MSPEC_BASE 0xc000090000000000
-#define UNCAC_BASE 0xc000000000000000
-#define TO_PHYS_MASK 0x000000ffffffffff
-
-#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
-#define TO_CAC(x) (CAC_BASE | ((x) & TO_PHYS_MASK))
-#define TO_UNCAC(x) (UNCAC_BASE | ((x) & TO_PHYS_MASK))
-#define TO_MSPEC(x) (MSPEC_BASE | ((x) & TO_PHYS_MASK))
-#define TO_HSPEC(x) (HSPEC_BASE | ((x) & TO_PHYS_MASK))
-
-
-/*
- * The following couple of definitions will eventually need to be variables,
- * since the amount of address space assigned to each node depends on
- * whether the system is running in N-mode (more nodes with less memory)
- * or M-mode (fewer nodes with more memory). We expect that it will
- * be a while before we need to make this decision dynamically, though,
- * so for now we just use defines bracketed by an ifdef.
- */
-
-#if defined(N_MODE)
-
-#define NODE_SIZE_BITS 32
-#define BWIN_SIZE_BITS 28
-
-#define NASID_BITS 8
-#define NASID_BITMASK (0xffLL)
-#define NASID_SHFT 32
-#define NASID_META_BITS 1
-#define NASID_LOCAL_BITS 7
-
-#define BDDIR_UPPER_MASK (UINT64_CAST 0x1ffffff << 4)
-#define BDECC_UPPER_MASK (UINT64_CAST 0x1fffffff )
-
-#else /* !defined(N_MODE), assume that M-mode is desired */
-
-#define NODE_SIZE_BITS 33
-#define BWIN_SIZE_BITS 29
-
-#define NASID_BITMASK (0x7fLL)
-#define NASID_BITS 7
-#define NASID_SHFT 33
-#define NASID_META_BITS 0
-#define NASID_LOCAL_BITS 7
-
-#define BDDIR_UPPER_MASK (UINT64_CAST 0x3ffffff << 4)
-#define BDECC_UPPER_MASK (UINT64_CAST 0x3fffffff)
-
-#endif /* defined(N_MODE) */
-
-#define NODE_ADDRSPACE_SIZE (UINT64_CAST 1 << NODE_SIZE_BITS)
-
-#define NASID_MASK (UINT64_CAST NASID_BITMASK << NASID_SHFT)
-#define NASID_GET(_pa) (int) ((UINT64_CAST (_pa) >> \
- NASID_SHFT) & NASID_BITMASK)
-
-#ifndef __ASSEMBLY__
-#define NODE_SWIN_BASE(nasid, widget) \
- ((widget == 0) ? NODE_BWIN_BASE((nasid), SWIN0_BIGWIN) \
- : RAW_NODE_SWIN_BASE(nasid, widget))
-#else
-#define NODE_SWIN_BASE(nasid, widget) \
- (NODE_IO_BASE(nasid) + (UINT64_CAST (widget) << SWIN_SIZE_BITS))
-#endif /* __ASSEMBLY__ */
-
-/*
- * The following definitions pertain to the IO special address
- * space. They define the location of the big and little windows
- * of any given node.
- */
-
-#define BWIN_INDEX_BITS 3
-#define BWIN_SIZE (UINT64_CAST 1 << BWIN_SIZE_BITS)
-#define BWIN_SIZEMASK (BWIN_SIZE - 1)
-#define BWIN_WIDGET_MASK 0x7
-#define NODE_BWIN_BASE0(nasid) (NODE_IO_BASE(nasid) + BWIN_SIZE)
-#define NODE_BWIN_BASE(nasid, bigwin) (NODE_BWIN_BASE0(nasid) + \
- (UINT64_CAST (bigwin) << BWIN_SIZE_BITS))
-
-#define BWIN_WIDGETADDR(addr) ((addr) & BWIN_SIZEMASK)
-#define BWIN_WINDOWNUM(addr) (((addr) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
-/*
- * Verify if addr belongs to large window address of node with "nasid"
- *
- *
- * NOTE: "addr" is expected to be XKPHYS address, and NOT physical
- * address
- *
- *
- */
-
-#define NODE_BWIN_ADDR(nasid, addr) \
- (((addr) >= NODE_BWIN_BASE0(nasid)) && \
- ((addr) < (NODE_BWIN_BASE(nasid, HUB_NUM_BIG_WINDOW) + \
- BWIN_SIZE)))
-
-/*
- * The following define the major position-independent aliases used
- * in SN1.
- * CALIAS -- Varies in size, points to the first n bytes of memory
- * on the reader's node.
- */
-
-#define CALIAS_BASE CAC_BASE
-
-
-
-#define BRIDGE_REG_PTR(_base, _off) ((volatile bridgereg_t *) \
- ((__psunsigned_t)(_base) + (__psunsigned_t)(_off)))
-
-#define SN0_WIDGET_BASE(_nasid, _wid) (NODE_SWIN_BASE((_nasid), (_wid)))
-
-
-
-/*
- * needed by symmon so it needs to be outside #if PROM
- * (see also POD_ELSCSIZE)
- */
-#define IP27PROM_ELSC_BASE_A PHYS_TO_K0(0x020e0000)
-#define IP27PROM_ELSC_BASE_B PHYS_TO_K0(0x020e0800)
-#define IP27PROM_ELSC_BASE_C PHYS_TO_K0(0x020e1000)
-#define IP27PROM_ELSC_BASE_D PHYS_TO_K0(0x020e1800)
-#define IP27PROM_ELSC_SHFT 11
-#define IP27PROM_ELSC_SIZE (1 << IP27PROM_ELSC_SHFT)
-
-#define FREEMEM_BASE PHYS_TO_K0(0x4000000)
-
-#define IO6PROM_STACK_SHFT 14 /* stack per cpu */
-#define IO6PROM_STACK_SIZE (1 << IO6PROM_STACK_SHFT)
-
-
-#define KL_UART_BASE LOCAL_HSPEC(HSPEC_UART_0) /* base of UART regs */
-#define KL_UART_CMD LOCAL_HSPEC(HSPEC_UART_0) /* UART command reg */
-#define KL_UART_DATA LOCAL_HSPEC(HSPEC_UART_1) /* UART data reg */
-
-#if !__ASSEMBLY__
-/* Address 0x400 to 0x1000 ualias points to cache error eframe + misc
- * CACHE_ERR_SP_PTR could either contain an address to the stack, or
- * the stack could start at CACHE_ERR_SP_PTR
- */
-#define CACHE_ERR_EFRAME 0x400
-
-#define CACHE_ERR_ECCFRAME (CACHE_ERR_EFRAME + EF_SIZE)
-#define CACHE_ERR_SP_PTR (0x1000 - 32) /* why -32? TBD */
-#define CACHE_ERR_IBASE_PTR (0x1000 - 40)
-#define CACHE_ERR_SP (CACHE_ERR_SP_PTR - 16)
-#define CACHE_ERR_AREA_SIZE (ARCS_SPB_OFFSET - CACHE_ERR_EFRAME)
-
-#endif /* !__ASSEMBLY__ */
-
-
-
-#define _ARCSPROM
-
-#ifdef _STANDALONE
-
-/*
- * The PROM needs to pass the device base address and the
- * device pci cfg space address to the device drivers during
- * install. The COMPONENT->Key field is used for this purpose.
- * Macros needed by SN1 device drivers to convert the
- * COMPONENT->Key field to the respective base address.
- * Key field looks as follows:
- *
- * +----------------------------------------------------+
- * |devnasid | widget |pciid |hubwidid|hstnasid | adap |
- * | 2 | 1 | 1 | 1 | 2 | 1 |
- * +----------------------------------------------------+
- * | | | | | | |
- * 64 48 40 32 24 8 0
- *
- * These are used by standalone drivers till the io infrastructure
- * is in place.
- */
-
-#ifndef __ASSEMBLY__
-
-#define uchar unsigned char
-
-#define KEY_DEVNASID_SHFT 48
-#define KEY_WIDID_SHFT 40
-#define KEY_PCIID_SHFT 32
-#define KEY_HUBWID_SHFT 24
-#define KEY_HSTNASID_SHFT 8
-
-#define MK_SN0_KEY(nasid, widid, pciid) \
- ((((__psunsigned_t)nasid)<< KEY_DEVNASID_SHFT |\
- ((__psunsigned_t)widid) << KEY_WIDID_SHFT) |\
- ((__psunsigned_t)pciid) << KEY_PCIID_SHFT)
-
-#define ADD_HUBWID_KEY(key,hubwid)\
- (key|=((__psunsigned_t)hubwid << KEY_HUBWID_SHFT))
-
-#define ADD_HSTNASID_KEY(key,hstnasid)\
- (key|=((__psunsigned_t)hstnasid << KEY_HSTNASID_SHFT))
-
-#define GET_DEVNASID_FROM_KEY(key) ((short)(key >> KEY_DEVNASID_SHFT))
-#define GET_WIDID_FROM_KEY(key) ((uchar)(key >> KEY_WIDID_SHFT))
-#define GET_PCIID_FROM_KEY(key) ((uchar)(key >> KEY_PCIID_SHFT))
-#define GET_HUBWID_FROM_KEY(key) ((uchar)(key >> KEY_HUBWID_SHFT))
-#define GET_HSTNASID_FROM_KEY(key) ((short)(key >> KEY_HSTNASID_SHFT))
-
-#define PCI_64_TARGID_SHFT 60
-
-#define GET_PCIBASE_FROM_KEY(key) (NODE_SWIN_BASE(GET_DEVNASID_FROM_KEY(key),\
- GET_WIDID_FROM_KEY(key))\
- | BRIDGE_DEVIO(GET_PCIID_FROM_KEY(key)))
-
-#define GET_PCICFGBASE_FROM_KEY(key) \
- (NODE_SWIN_BASE(GET_DEVNASID_FROM_KEY(key),\
- GET_WIDID_FROM_KEY(key))\
- | BRIDGE_TYPE0_CFG_DEV(GET_PCIID_FROM_KEY(key)))
-
-#define GET_WIDBASE_FROM_KEY(key) \
- (NODE_SWIN_BASE(GET_DEVNASID_FROM_KEY(key),\
- GET_WIDID_FROM_KEY(key)))
-
-#define PUT_INSTALL_STATUS(c,s) c->Revision = s
-#define GET_INSTALL_STATUS(c) c->Revision
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _STANDALONE */
-#endif /* CONFIG_IA64_SGI_SN1 */
-
-#endif /* _ASM_IA64_SN_SN1_ADDRS_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_ARCH_H
-#define _ASM_IA64_SN_SN1_ARCH_H
-
-#if defined(N_MODE)
-#error "ERROR constants defined only for M-mode"
-#endif
-
-#include <linux/threads.h>
-#include <asm/types.h>
-
-#define CPUS_PER_NODE 4 /* CPUs on a single hub */
-#define CPUS_PER_SUBNODE 2 /* CPUs on a single hub PI */
-
-/*
- * This is the maximum number of NASIDS that can be present in a system.
- * This include ALL nodes in ALL partitions connected via NUMALINK.
- * (Highest NASID plus one.)
- */
-#define MAX_NASIDS 128
-
-/*
- * This is the maximum number of nodes that can be part of a kernel.
- * Effectively, it's the maximum number of compact node ids (cnodeid_t).
- * This is not necessarily the same as MAX_NASIDS.
- */
-#define MAX_COMPACT_NODES 128
-
-/*
- * MAX_REGIONS refers to the maximum number of hardware partitioned regions.
- */
-#define MAX_REGIONS 64
-#define MAX_NONPREMIUM_REGIONS 16
-#define MAX_PREMIUM_REGIONS MAX_REGIONS
-
-/*
- * Slot constants for IP35
- */
-
-#define MAX_MEM_SLOTS 8 /* max slots per node */
-
-#if defined(N_MODE)
-#error "N-mode not supported"
-#endif
-
-#define SLOT_SHIFT (30)
-#define SLOT_MIN_MEM_SIZE (64*1024*1024)
-
-
-/*
- * MAX_PARITIONS refers to the maximum number of logically defined
- * partitions the system can support.
- */
-#define MAX_PARTITIONS MAX_REGIONS
-
-
-#define NASID_MASK_BYTES ((MAX_NASIDS + 7) / 8)
-
-/*
- * New stuff in here from Irix sys/pfdat.h.
- */
-#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
-#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
-#define slot_getbasepfn(node,slot) (mkpfn(COMPACT_TO_NASID_NODEID(node), slot<<SLOT_PFNSHIFT))
-#define mkpfn(nasid, off) (((pfn_t)(nasid) << PFN_NASIDSHFT) | (off))
-
-
-
-/*
- * two PIs per bedrock, two CPUs per PI
- */
-#define NUM_SUBNODES 2
-#define SUBNODE_SHFT 1
-#define SUBNODE_MASK (0x1 << SUBNODE_SHFT)
-#define LOCALCPU_SHFT 0
-#define LOCALCPU_MASK (0x1 << LOCALCPU_SHFT)
-#define SUBNODE(slice) (((slice) & SUBNODE_MASK) >> SUBNODE_SHFT)
-#define LOCALCPU(slice) (((slice) & LOCALCPU_MASK) >> LOCALCPU_SHFT)
-#define TO_SLICE(subn, local) (((subn) << SUBNODE_SHFT) | \
- ((local) << LOCALCPU_SHFT))
-
-#endif /* _ASM_IA64_SN_SN1_ARCH_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_IA64_SN_SN1_BEDROCK_H
-#define _ASM_IA64_SN_SN1_BEDROCK_H
-
-
-/* The secret password; used to release protection */
-#define HUB_PASSWORD 0x53474972756c6573ull
-
-#define CHIPID_HUB 0x3012
-#define CHIPID_ROUTER 0x3017
-
-#define BEDROCK_REV_1_0 1
-#define BEDROCK_REV_1_1 2
-
-#define MAX_HUB_PATH 80
-
-#include <asm/sn/arch.h>
-#include <asm/sn/sn1/addrs.h>
-#include <asm/sn/sn1/hubpi.h>
-#include <asm/sn/sn1/hubmd.h>
-#include <asm/sn/sn1/hubio.h>
-#include <asm/sn/sn1/hubni.h>
-#include <asm/sn/sn1/hublb.h>
-#include <asm/sn/sn1/hubxb.h>
-#include <asm/sn/sn1/hubpi_next.h>
-#include <asm/sn/sn1/hubmd_next.h>
-#include <asm/sn/sn1/hubio_next.h>
-#include <asm/sn/sn1/hubni_next.h>
-#include <asm/sn/sn1/hublb_next.h>
-#include <asm/sn/sn1/hubxb_next.h>
-
-/* Translation of uncached attributes */
-#define UATTR_HSPEC 0
-#define UATTR_IO 1
-#define UATTR_MSPEC 2
-#define UATTR_UNCAC 3
-
-#if __ASSEMBLY__
-
-/*
- * Get nasid into register, r (uses at)
- */
-#define GET_NASID_ASM(r) \
- dli r, LOCAL_HUB_ADDR(LB_REV_ID); \
- ld r, (r); \
- and r, LRI_NODEID_MASK; \
- dsrl r, LRI_NODEID_SHFT
-
-#endif /* __ASSEMBLY__ */
-
-#ifndef __ASSEMBLY__
-
-#include <asm/sn/xtalk/xwidget.h>
-
-/* hub-as-widget iograph info, labelled by INFO_LBL_XWIDGET */
-typedef struct v_hub_s *v_hub_t;
-typedef uint64_t rtc_time_t;
-
-struct nodepda_s;
-int hub_check_pci_equiv(void *addra, void *addrb);
-void capture_hub_stats(cnodeid_t, struct nodepda_s *);
-void init_hub_stats(cnodeid_t, struct nodepda_s *);
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_IA64_SN_SN1_BEDROCK_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_IA64_SN_SN1_HUBDEV_H
-#define _ASM_IA64_SN_SN1_HUBDEV_H
-
-extern void hubdev_init(void);
-extern void hubdev_register(int (*attach_method)(devfs_handle_t));
-extern int hubdev_unregister(int (*attach_method)(devfs_handle_t));
-extern int hubdev_docallouts(devfs_handle_t hub);
-
-extern caddr_t hubdev_prombase_get(devfs_handle_t hub);
-extern cnodeid_t hubdev_cnodeid_get(devfs_handle_t hub);
-
-#endif /* _ASM_IA64_SN_SN1_HUBDEV_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-
-/************************************************************************
- * *
- * WARNING!!! WARNING!!! WARNING!!! WARNING!!! WARNING!!! *
- * *
- * This file is created by an automated script. Any (minimal) changes *
- * made manually to this file should be made with care. *
- * *
- * MAKE ALL ADDITIONS TO THE END OF THIS FILE *
- * *
- ************************************************************************/
-
-
-#ifndef _ASM_IA64_SN_SN1_HUBIO_H
-#define _ASM_IA64_SN_SN1_HUBIO_H
-
-
-#define IIO_WID 0x00400000 /*
- * Crosstalk Widget
- * Identification This
- * register is also
- * accessible from
- * Crosstalk at
- * address 0x0.
- */
-
-
-
-#define IIO_WSTAT 0x00400008 /*
- * Crosstalk Widget
- * Status
- */
-
-
-
-#define IIO_WCR 0x00400020 /*
- * Crosstalk Widget
- * Control Register
- */
-
-
-
-#define IIO_ILAPR 0x00400100 /*
- * IO Local Access
- * Protection Register
- */
-
-
-
-#define IIO_ILAPO 0x00400108 /*
- * IO Local Access
- * Protection Override
- */
-
-
-
-#define IIO_IOWA 0x00400110 /*
- * IO Outbound Widget
- * Access
- */
-
-
-
-#define IIO_IIWA 0x00400118 /*
- * IO Inbound Widget
- * Access
- */
-
-
-
-#define IIO_IIDEM 0x00400120 /*
- * IO Inbound Device
- * Error Mask
- */
-
-
-
-#define IIO_ILCSR 0x00400128 /*
- * IO LLP Control and
- * Status Register
- */
-
-
-
-#define IIO_ILLR 0x00400130 /* IO LLP Log Register */
-
-
-
-#define IIO_IIDSR 0x00400138 /*
- * IO Interrupt
- * Destination
- */
-
-
-
-#define IIO_IGFX0 0x00400140 /*
- * IO Graphics
- * Node-Widget Map 0
- */
-
-
-
-#define IIO_IGFX1 0x00400148 /*
- * IO Graphics
- * Node-Widget Map 1
- */
-
-
-
-#define IIO_ISCR0 0x00400150 /*
- * IO Scratch Register
- * 0
- */
-
-
-
-#define IIO_ISCR1 0x00400158 /*
- * IO Scratch Register
- * 1
- */
-
-
-
-#define IIO_ITTE1 0x00400160 /*
- * IO Translation
- * Table Entry 1
- */
-
-
-
-#define IIO_ITTE2 0x00400168 /*
- * IO Translation
- * Table Entry 2
- */
-
-
-
-#define IIO_ITTE3 0x00400170 /*
- * IO Translation
- * Table Entry 3
- */
-
-
-
-#define IIO_ITTE4 0x00400178 /*
- * IO Translation
- * Table Entry 4
- */
-
-
-
-#define IIO_ITTE5 0x00400180 /*
- * IO Translation
- * Table Entry 5
- */
-
-
-
-#define IIO_ITTE6 0x00400188 /*
- * IO Translation
- * Table Entry 6
- */
-
-
-
-#define IIO_ITTE7 0x00400190 /*
- * IO Translation
- * Table Entry 7
- */
-
-
-
-#define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */
-
-
-
-#define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */
-
-
-
-#define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */
-
-
-
-#define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */
-
-
-
-#define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */
-
-
-
-#define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */
-
-
-
-#define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */
-
-
-
-#define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */
-
-
-
-#define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */
-
-
-
-#define IIO_IXCC 0x004001E0 /*
- * IO Crosstalk Credit
- * Count Timeout
- */
-
-
-
-#define IIO_IMEM 0x004001E8 /*
- * IO Miscellaneous
- * Error Mask
- */
-
-
-
-#define IIO_IXTT 0x004001F0 /*
- * IO Crosstalk
- * Timeout Threshold
- */
-
-
-
-#define IIO_IECLR 0x004001F8 /*
- * IO Error Clear
- * Register
- */
-
-
-
-#define IIO_IBCR 0x00400200 /*
- * IO BTE Control
- * Register
- */
-
-
-
-#define IIO_IXSM 0x00400208 /*
- * IO Crosstalk
- * Spurious Message
- */
-
-
-
-#define IIO_IXSS 0x00400210 /*
- * IO Crosstalk
- * Spurious Sideband
- */
-
-
-
-#define IIO_ILCT 0x00400218 /* IO LLP Channel Test */
-
-
-
-#define IIO_IIEPH1 0x00400220 /*
- * IO Incoming Error
- * Packet Header, Part
- * 1
- */
-
-
-
-#define IIO_IIEPH2 0x00400228 /*
- * IO Incoming Error
- * Packet Header, Part
- * 2
- */
-
-
-
-#define IIO_IPCA 0x00400300 /*
- * IO PRB Counter
- * Adjust
- */
-
-
-
-#define IIO_IPRTE0 0x00400308 /*
- * IO PIO Read Address
- * Table Entry 0
- */
-
-
-
-#define IIO_IPRTE1 0x00400310 /*
- * IO PIO Read Address
- * Table Entry 1
- */
-
-
-
-#define IIO_IPRTE2 0x00400318 /*
- * IO PIO Read Address
- * Table Entry 2
- */
-
-
-
-#define IIO_IPRTE3 0x00400320 /*
- * IO PIO Read Address
- * Table Entry 3
- */
-
-
-
-#define IIO_IPRTE4 0x00400328 /*
- * IO PIO Read Address
- * Table Entry 4
- */
-
-
-
-#define IIO_IPRTE5 0x00400330 /*
- * IO PIO Read Address
- * Table Entry 5
- */
-
-
-
-#define IIO_IPRTE6 0x00400338 /*
- * IO PIO Read Address
- * Table Entry 6
- */
-
-
-
-#define IIO_IPRTE7 0x00400340 /*
- * IO PIO Read Address
- * Table Entry 7
- */
-
-
-
-#define IIO_IPDR 0x00400388 /*
- * IO PIO Deallocation
- * Register
- */
-
-
-
-#define IIO_ICDR 0x00400390 /*
- * IO CRB Entry
- * Deallocation
- * Register
- */
-
-
-
-#define IIO_IFDR 0x00400398 /*
- * IO IOQ FIFO Depth
- * Register
- */
-
-
-
-#define IIO_IIAP 0x004003A0 /*
- * IO IIQ Arbitration
- * Parameters
- */
-
-
-
-#define IIO_ICMR 0x004003A8 /*
- * IO CRB Management
- * Register
- */
-
-
-
-#define IIO_ICCR 0x004003B0 /*
- * IO CRB Control
- * Register
- */
-
-
-
-#define IIO_ICTO 0x004003B8 /* IO CRB Timeout */
-
-
-
-#define IIO_ICTP 0x004003C0 /*
- * IO CRB Timeout
- * Prescalar
- */
-
-
-
-#define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */
-
-
-
-#define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */
-
-
-
-#define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */
-
-
-
-#define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */
-
-
-
-#define IIO_ICRB1_A 0x00400420 /* IO CRB Entry 1_A */
-
-
-
-#define IIO_ICRB1_B 0x00400428 /* IO CRB Entry 1_B */
-
-
-
-#define IIO_ICRB1_C 0x00400430 /* IO CRB Entry 1_C */
-
-
-
-#define IIO_ICRB1_D 0x00400438 /* IO CRB Entry 1_D */
-
-
-
-#define IIO_ICRB2_A 0x00400440 /* IO CRB Entry 2_A */
-
-
-
-#define IIO_ICRB2_B 0x00400448 /* IO CRB Entry 2_B */
-
-
-
-#define IIO_ICRB2_C 0x00400450 /* IO CRB Entry 2_C */
-
-
-
-#define IIO_ICRB2_D 0x00400458 /* IO CRB Entry 2_D */
-
-
-
-#define IIO_ICRB3_A 0x00400460 /* IO CRB Entry 3_A */
-
-
-
-#define IIO_ICRB3_B 0x00400468 /* IO CRB Entry 3_B */
-
-
-
-#define IIO_ICRB3_C 0x00400470 /* IO CRB Entry 3_C */
-
-
-
-#define IIO_ICRB3_D 0x00400478 /* IO CRB Entry 3_D */
-
-
-
-#define IIO_ICRB4_A 0x00400480 /* IO CRB Entry 4_A */
-
-
-
-#define IIO_ICRB4_B 0x00400488 /* IO CRB Entry 4_B */
-
-
-
-#define IIO_ICRB4_C 0x00400490 /* IO CRB Entry 4_C */
-
-
-
-#define IIO_ICRB4_D 0x00400498 /* IO CRB Entry 4_D */
-
-
-
-#define IIO_ICRB5_A 0x004004A0 /* IO CRB Entry 5_A */
-
-
-
-#define IIO_ICRB5_B 0x004004A8 /* IO CRB Entry 5_B */
-
-
-
-#define IIO_ICRB5_C 0x004004B0 /* IO CRB Entry 5_C */
-
-
-
-#define IIO_ICRB5_D 0x004004B8 /* IO CRB Entry 5_D */
-
-
-
-#define IIO_ICRB6_A 0x004004C0 /* IO CRB Entry 6_A */
-
-
-
-#define IIO_ICRB6_B 0x004004C8 /* IO CRB Entry 6_B */
-
-
-
-#define IIO_ICRB6_C 0x004004D0 /* IO CRB Entry 6_C */
-
-
-
-#define IIO_ICRB6_D 0x004004D8 /* IO CRB Entry 6_D */
-
-
-
-#define IIO_ICRB7_A 0x004004E0 /* IO CRB Entry 7_A */
-
-
-
-#define IIO_ICRB7_B 0x004004E8 /* IO CRB Entry 7_B */
-
-
-
-#define IIO_ICRB7_C 0x004004F0 /* IO CRB Entry 7_C */
-
-
-
-#define IIO_ICRB7_D 0x004004F8 /* IO CRB Entry 7_D */
-
-
-
-#define IIO_ICRB8_A 0x00400500 /* IO CRB Entry 8_A */
-
-
-
-#define IIO_ICRB8_B 0x00400508 /* IO CRB Entry 8_B */
-
-
-
-#define IIO_ICRB8_C 0x00400510 /* IO CRB Entry 8_C */
-
-
-
-#define IIO_ICRB8_D 0x00400518 /* IO CRB Entry 8_D */
-
-
-
-#define IIO_ICRB9_A 0x00400520 /* IO CRB Entry 9_A */
-
-
-
-#define IIO_ICRB9_B 0x00400528 /* IO CRB Entry 9_B */
-
-
-
-#define IIO_ICRB9_C 0x00400530 /* IO CRB Entry 9_C */
-
-
-
-#define IIO_ICRB9_D 0x00400538 /* IO CRB Entry 9_D */
-
-
-
-#define IIO_ICRBA_A 0x00400540 /* IO CRB Entry A_A */
-
-
-
-#define IIO_ICRBA_B 0x00400548 /* IO CRB Entry A_B */
-
-
-
-#define IIO_ICRBA_C 0x00400550 /* IO CRB Entry A_C */
-
-
-
-#define IIO_ICRBA_D 0x00400558 /* IO CRB Entry A_D */
-
-
-
-#define IIO_ICRBB_A 0x00400560 /* IO CRB Entry B_A */
-
-
-
-#define IIO_ICRBB_B 0x00400568 /* IO CRB Entry B_B */
-
-
-
-#define IIO_ICRBB_C 0x00400570 /* IO CRB Entry B_C */
-
-
-
-#define IIO_ICRBB_D 0x00400578 /* IO CRB Entry B_D */
-
-
-
-#define IIO_ICRBC_A 0x00400580 /* IO CRB Entry C_A */
-
-
-
-#define IIO_ICRBC_B 0x00400588 /* IO CRB Entry C_B */
-
-
-
-#define IIO_ICRBC_C 0x00400590 /* IO CRB Entry C_C */
-
-
-
-#define IIO_ICRBC_D 0x00400598 /* IO CRB Entry C_D */
-
-
-
-#define IIO_ICRBD_A 0x004005A0 /* IO CRB Entry D_A */
-
-
-
-#define IIO_ICRBD_B 0x004005A8 /* IO CRB Entry D_B */
-
-
-
-#define IIO_ICRBD_C 0x004005B0 /* IO CRB Entry D_C */
-
-
-
-#define IIO_ICRBD_D 0x004005B8 /* IO CRB Entry D_D */
-
-
-
-#define IIO_ICRBE_A 0x004005C0 /* IO CRB Entry E_A */
-
-
-
-#define IIO_ICRBE_B 0x004005C8 /* IO CRB Entry E_B */
-
-
-
-#define IIO_ICRBE_C 0x004005D0 /* IO CRB Entry E_C */
-
-
-
-#define IIO_ICRBE_D 0x004005D8 /* IO CRB Entry E_D */
-
-
-
-#define IIO_ICSML 0x00400600 /*
- * IO CRB Spurious
- * Message Low
- */
-
-
-
-#define IIO_ICSMH 0x00400608 /*
- * IO CRB Spurious
- * Message High
- */
-
-
-
-#define IIO_IDBSS 0x00400610 /*
- * IO Debug Submenu
- * Select
- */
-
-
-
-#define IIO_IBLS0 0x00410000 /*
- * IO BTE Length
- * Status 0
- */
-
-
-
-#define IIO_IBSA0 0x00410008 /*
- * IO BTE Source
- * Address 0
- */
-
-
-
-#define IIO_IBDA0 0x00410010 /*
- * IO BTE Destination
- * Address 0
- */
-
-
-
-#define IIO_IBCT0 0x00410018 /*
- * IO BTE Control
- * Terminate 0
- */
-
-
-
-#define IIO_IBNA0 0x00410020 /*
- * IO BTE Notification
- * Address 0
- */
-
-
-
-#define IIO_IBIA0 0x00410028 /*
- * IO BTE Interrupt
- * Address 0
- */
-
-
-
-#define IIO_IBLS1 0x00420000 /*
- * IO BTE Length
- * Status 1
- */
-
-
-
-#define IIO_IBSA1 0x00420008 /*
- * IO BTE Source
- * Address 1
- */
-
-
-
-#define IIO_IBDA1 0x00420010 /*
- * IO BTE Destination
- * Address 1
- */
-
-
-
-#define IIO_IBCT1 0x00420018 /*
- * IO BTE Control
- * Terminate 1
- */
-
-
-
-#define IIO_IBNA1 0x00420020 /*
- * IO BTE Notification
- * Address 1
- */
-
-
-
-#define IIO_IBIA1 0x00420028 /*
- * IO BTE Interrupt
- * Address 1
- */
-
-
-
-#define IIO_IPCR 0x00430000 /*
- * IO Performance
- * Control
- */
-
-
-
-#define IIO_IPPR 0x00430008 /*
- * IO Performance
- * Profiling
- */
-
-
-
-
-
-#ifndef __ASSEMBLY__
-
-/************************************************************************
- * *
- * Description: This register echoes some information from the *
- * LB_REV_ID register. It is available through Crosstalk as described *
- * above. The REV_NUM and MFG_NUM fields receive their values from *
- * the REVISION and MANUFACTURER fields in the LB_REV_ID register. *
- * The PART_NUM field's value is the Crosstalk device ID number that *
- * Steve Miller assigned to the Bedrock chip. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_wid_u {
- bdrkreg_t ii_wid_regval;
- struct {
- bdrkreg_t w_rsvd_1 : 1;
- bdrkreg_t w_mfg_num : 11;
- bdrkreg_t w_part_num : 16;
- bdrkreg_t w_rev_num : 4;
- bdrkreg_t w_rsvd : 32;
- } ii_wid_fld_s;
-} ii_wid_u_t;
-
-#else
-
-typedef union ii_wid_u {
- bdrkreg_t ii_wid_regval;
- struct {
- bdrkreg_t w_rsvd : 32;
- bdrkreg_t w_rev_num : 4;
- bdrkreg_t w_part_num : 16;
- bdrkreg_t w_mfg_num : 11;
- bdrkreg_t w_rsvd_1 : 1;
- } ii_wid_fld_s;
-} ii_wid_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * The fields in this register are set upon detection of an error *
- * and cleared by various mechanisms, as explained in the *
- * description. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_wstat_u {
- bdrkreg_t ii_wstat_regval;
- struct {
- bdrkreg_t w_pending : 4;
- bdrkreg_t w_xt_crd_to : 1;
- bdrkreg_t w_xt_tail_to : 1;
- bdrkreg_t w_rsvd_3 : 3;
- bdrkreg_t w_tx_mx_rty : 1;
- bdrkreg_t w_rsvd_2 : 6;
- bdrkreg_t w_llp_tx_cnt : 8;
- bdrkreg_t w_rsvd_1 : 8;
- bdrkreg_t w_crazy : 1;
- bdrkreg_t w_rsvd : 31;
- } ii_wstat_fld_s;
-} ii_wstat_u_t;
-
-#else
-
-typedef union ii_wstat_u {
- bdrkreg_t ii_wstat_regval;
- struct {
- bdrkreg_t w_rsvd : 31;
- bdrkreg_t w_crazy : 1;
- bdrkreg_t w_rsvd_1 : 8;
- bdrkreg_t w_llp_tx_cnt : 8;
- bdrkreg_t w_rsvd_2 : 6;
- bdrkreg_t w_tx_mx_rty : 1;
- bdrkreg_t w_rsvd_3 : 3;
- bdrkreg_t w_xt_tail_to : 1;
- bdrkreg_t w_xt_crd_to : 1;
- bdrkreg_t w_pending : 4;
- } ii_wstat_fld_s;
-} ii_wstat_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This is a read-write enabled register. It controls *
- * various aspects of the Crosstalk flow control. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_wcr_u {
- bdrkreg_t ii_wcr_regval;
- struct {
- bdrkreg_t w_wid : 4;
- bdrkreg_t w_tag : 1;
- bdrkreg_t w_rsvd_1 : 8;
- bdrkreg_t w_dst_crd : 3;
- bdrkreg_t w_f_bad_pkt : 1;
- bdrkreg_t w_dir_con : 1;
- bdrkreg_t w_e_thresh : 5;
- bdrkreg_t w_rsvd : 41;
- } ii_wcr_fld_s;
-} ii_wcr_u_t;
-
-#else
-
-typedef union ii_wcr_u {
- bdrkreg_t ii_wcr_regval;
- struct {
- bdrkreg_t w_rsvd : 41;
- bdrkreg_t w_e_thresh : 5;
- bdrkreg_t w_dir_con : 1;
- bdrkreg_t w_f_bad_pkt : 1;
- bdrkreg_t w_dst_crd : 3;
- bdrkreg_t w_rsvd_1 : 8;
- bdrkreg_t w_tag : 1;
- bdrkreg_t w_wid : 4;
- } ii_wcr_fld_s;
-} ii_wcr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This register's value is a bit vector that guards *
- * access to local registers within the II as well as to external *
- * Crosstalk widgets. Each bit in the register corresponds to a *
- * particular region in the system; a region consists of one, two or *
- * four nodes (depending on the value of the REGION_SIZE field in the *
- * LB_REV_ID register, which is documented in Section 8.3.1.1). The *
- * protection provided by this register applies to PIO read *
- * operations as well as PIO write operations. The II will perform a *
- * PIO read or write request only if the bit for the requestor's *
- * region is set; otherwise, the II will not perform the requested *
- * operation and will return an error response. When a PIO read or *
- * write request targets an external Crosstalk widget, then not only *
- * must the bit for the requestor's region be set in the ILAPR, but *
- * also the target widget's bit in the IOWA register must be set in *
- * order for the II to perform the requested operation; otherwise, *
- * the II will return an error response. Hence, the protection *
- * provided by the IOWA register supplements the protection provided *
- * by the ILAPR for requests that target external Crosstalk widgets. *
- * This register itself can be accessed only by the nodes whose *
- * region ID bits are enabled in this same register. It can also be *
- * accessed through the IAlias space by the local processors. *
- * The reset value of this register allows access by all nodes. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union ii_ilapr_u {
- bdrkreg_t ii_ilapr_regval;
- struct {
- bdrkreg_t i_region : 64;
- } ii_ilapr_fld_s;
-} ii_ilapr_u_t;
-
-
-
-
-/************************************************************************
- * *
- * Description: A write to this register of the 64-bit value *
- * "SGIrules" in ASCII, will cause the bit in the ILAPR register *
- * corresponding to the region of the requestor to be set (allow *
- * access). A write of any other value will be ignored. Access *
- * protection for this register is "SGIrules". *
- * This register can also be accessed through the IAlias space. *
- * However, this access will not change the access permissions in the *
- * ILAPR. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ilapo_u {
- bdrkreg_t ii_ilapo_regval;
- struct {
- bdrkreg_t i_io_ovrride : 9;
- bdrkreg_t i_rsvd : 55;
- } ii_ilapo_fld_s;
-} ii_ilapo_u_t;
-
-#else
-
-typedef union ii_ilapo_u {
- bdrkreg_t ii_ilapo_regval;
- struct {
- bdrkreg_t i_rsvd : 55;
- bdrkreg_t i_io_ovrride : 9;
- } ii_ilapo_fld_s;
-} ii_ilapo_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register qualifies all the PIO and Graphics writes launched *
- * from the Bedrock towards a widget. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iowa_u {
- bdrkreg_t ii_iowa_regval;
- struct {
- bdrkreg_t i_w0_oac : 1;
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_wx_oac : 8;
- bdrkreg_t i_rsvd : 48;
- } ii_iowa_fld_s;
-} ii_iowa_u_t;
-
-#else
-
-typedef union ii_iowa_u {
- bdrkreg_t ii_iowa_regval;
- struct {
- bdrkreg_t i_rsvd : 48;
- bdrkreg_t i_wx_oac : 8;
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_w0_oac : 1;
- } ii_iowa_fld_s;
-} ii_iowa_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This register qualifies all the requests launched *
- * from a widget towards the Bedrock. This register is intended to be *
- * used by software in case of misbehaving widgets. *
- * *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iiwa_u {
- bdrkreg_t ii_iiwa_regval;
- struct {
- bdrkreg_t i_w0_iac : 1;
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_wx_iac : 8;
- bdrkreg_t i_rsvd : 48;
- } ii_iiwa_fld_s;
-} ii_iiwa_u_t;
-
-#else
-
-typedef union ii_iiwa_u {
- bdrkreg_t ii_iiwa_regval;
- struct {
- bdrkreg_t i_rsvd : 48;
- bdrkreg_t i_wx_iac : 8;
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_w0_iac : 1;
- } ii_iiwa_fld_s;
-} ii_iiwa_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This register qualifies all the operations launched *
- * from a widget towards the Bedrock. It allows individual access *
- * control for up to 8 devices per widget. A device refers to *
- * individual DMA master hosted by a widget. *
- * The bits in each field of this register are cleared by the Bedrock *
- * upon detection of an error which requires the device to be *
- * disabled. These fields assume that 0=TNUM=7 (i.e., Bridge-centric *
- * Crosstalk). Whether or not a device has access rights to this *
- * Bedrock is determined by an AND of the device enable bit in the *
- * appropriate field of this register and the corresponding bit in *
- * the Wx_IAC field (for the widget which this device belongs to). *
- * The bits in this field are set by writing a 1 to them. Incoming *
- * replies from Crosstalk are not subject to this access control *
- * mechanism. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iidem_u {
- bdrkreg_t ii_iidem_regval;
- struct {
- bdrkreg_t i_w8_dxs : 8;
- bdrkreg_t i_w9_dxs : 8;
- bdrkreg_t i_wa_dxs : 8;
- bdrkreg_t i_wb_dxs : 8;
- bdrkreg_t i_wc_dxs : 8;
- bdrkreg_t i_wd_dxs : 8;
- bdrkreg_t i_we_dxs : 8;
- bdrkreg_t i_wf_dxs : 8;
- } ii_iidem_fld_s;
-} ii_iidem_u_t;
-
-#else
-
-typedef union ii_iidem_u {
- bdrkreg_t ii_iidem_regval;
- struct {
- bdrkreg_t i_wf_dxs : 8;
- bdrkreg_t i_we_dxs : 8;
- bdrkreg_t i_wd_dxs : 8;
- bdrkreg_t i_wc_dxs : 8;
- bdrkreg_t i_wb_dxs : 8;
- bdrkreg_t i_wa_dxs : 8;
- bdrkreg_t i_w9_dxs : 8;
- bdrkreg_t i_w8_dxs : 8;
- } ii_iidem_fld_s;
-} ii_iidem_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the various programmable fields necessary *
- * for controlling and observing the LLP signals. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ilcsr_u {
- bdrkreg_t ii_ilcsr_regval;
- struct {
- bdrkreg_t i_nullto : 6;
- bdrkreg_t i_rsvd_4 : 2;
- bdrkreg_t i_wrmrst : 1;
- bdrkreg_t i_rsvd_3 : 1;
- bdrkreg_t i_llp_en : 1;
- bdrkreg_t i_bm8 : 1;
- bdrkreg_t i_llp_stat : 2;
- bdrkreg_t i_remote_power : 1;
- bdrkreg_t i_rsvd_2 : 1;
- bdrkreg_t i_maxrtry : 10;
- bdrkreg_t i_d_avail_sel : 2;
- bdrkreg_t i_rsvd_1 : 4;
- bdrkreg_t i_maxbrst : 10;
- bdrkreg_t i_rsvd : 22;
-
- } ii_ilcsr_fld_s;
-} ii_ilcsr_u_t;
-
-#else
-
-typedef union ii_ilcsr_u {
- bdrkreg_t ii_ilcsr_regval;
- struct {
- bdrkreg_t i_rsvd : 22;
- bdrkreg_t i_maxbrst : 10;
- bdrkreg_t i_rsvd_1 : 4;
- bdrkreg_t i_d_avail_sel : 2;
- bdrkreg_t i_maxrtry : 10;
- bdrkreg_t i_rsvd_2 : 1;
- bdrkreg_t i_remote_power : 1;
- bdrkreg_t i_llp_stat : 2;
- bdrkreg_t i_bm8 : 1;
- bdrkreg_t i_llp_en : 1;
- bdrkreg_t i_rsvd_3 : 1;
- bdrkreg_t i_wrmrst : 1;
- bdrkreg_t i_rsvd_4 : 2;
- bdrkreg_t i_nullto : 6;
- } ii_ilcsr_fld_s;
-} ii_ilcsr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This is simply a status registers that monitors the LLP error *
- * rate. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_illr_u {
- bdrkreg_t ii_illr_regval;
- struct {
- bdrkreg_t i_sn_cnt : 16;
- bdrkreg_t i_cb_cnt : 16;
- bdrkreg_t i_rsvd : 32;
- } ii_illr_fld_s;
-} ii_illr_u_t;
-
-#else
-
-typedef union ii_illr_u {
- bdrkreg_t ii_illr_regval;
- struct {
- bdrkreg_t i_rsvd : 32;
- bdrkreg_t i_cb_cnt : 16;
- bdrkreg_t i_sn_cnt : 16;
- } ii_illr_fld_s;
-} ii_illr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: All II-detected non-BTE error interrupts are *
- * specified via this register. *
- * NOTE: The PI interrupt register address is hardcoded in the II. If *
- * PI_ID==0, then the II sends an interrupt request (Duplonet PWRI *
- * packet) to address offset 0x0180_0090 within the local register *
- * address space of PI0 on the node specified by the NODE field. If *
- * PI_ID==1, then the II sends the interrupt request to address *
- * offset 0x01A0_0090 within the local register address space of PI1 *
- * on the node specified by the NODE field. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iidsr_u {
- bdrkreg_t ii_iidsr_regval;
- struct {
- bdrkreg_t i_level : 7;
- bdrkreg_t i_rsvd_4 : 1;
- bdrkreg_t i_pi_id : 1;
- bdrkreg_t i_node : 8;
- bdrkreg_t i_rsvd_3 : 7;
- bdrkreg_t i_enable : 1;
- bdrkreg_t i_rsvd_2 : 3;
- bdrkreg_t i_int_sent : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_pi0_forward_int : 1;
- bdrkreg_t i_pi1_forward_int : 1;
- bdrkreg_t i_rsvd : 30;
- } ii_iidsr_fld_s;
-} ii_iidsr_u_t;
-
-#else
-
-typedef union ii_iidsr_u {
- bdrkreg_t ii_iidsr_regval;
- struct {
- bdrkreg_t i_rsvd : 30;
- bdrkreg_t i_pi1_forward_int : 1;
- bdrkreg_t i_pi0_forward_int : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_int_sent : 1;
- bdrkreg_t i_rsvd_2 : 3;
- bdrkreg_t i_enable : 1;
- bdrkreg_t i_rsvd_3 : 7;
- bdrkreg_t i_node : 8;
- bdrkreg_t i_pi_id : 1;
- bdrkreg_t i_rsvd_4 : 1;
- bdrkreg_t i_level : 7;
- } ii_iidsr_fld_s;
-} ii_iidsr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There are two instances of this register. This register is used *
- * for matching up the incoming responses from the graphics widget to *
- * the processor that initiated the graphics operation. The *
- * write-responses are converted to graphics credits and returned to *
- * the processor so that the processor interface can manage the flow *
- * control. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_igfx0_u {
- bdrkreg_t ii_igfx0_regval;
- struct {
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_pi_id : 1;
- bdrkreg_t i_n_num : 8;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_p_num : 1;
- bdrkreg_t i_rsvd : 47;
- } ii_igfx0_fld_s;
-} ii_igfx0_u_t;
-
-#else
-
-typedef union ii_igfx0_u {
- bdrkreg_t ii_igfx0_regval;
- struct {
- bdrkreg_t i_rsvd : 47;
- bdrkreg_t i_p_num : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_n_num : 8;
- bdrkreg_t i_pi_id : 1;
- bdrkreg_t i_w_num : 4;
- } ii_igfx0_fld_s;
-} ii_igfx0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There are two instances of this register. This register is used *
- * for matching up the incoming responses from the graphics widget to *
- * the processor that initiated the graphics operation. The *
- * write-responses are converted to graphics credits and returned to *
- * the processor so that the processor interface can manage the flow *
- * control. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_igfx1_u {
- bdrkreg_t ii_igfx1_regval;
- struct {
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_pi_id : 1;
- bdrkreg_t i_n_num : 8;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_p_num : 1;
- bdrkreg_t i_rsvd : 47;
- } ii_igfx1_fld_s;
-} ii_igfx1_u_t;
-
-#else
-
-typedef union ii_igfx1_u {
- bdrkreg_t ii_igfx1_regval;
- struct {
- bdrkreg_t i_rsvd : 47;
- bdrkreg_t i_p_num : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_n_num : 8;
- bdrkreg_t i_pi_id : 1;
- bdrkreg_t i_w_num : 4;
- } ii_igfx1_fld_s;
-} ii_igfx1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There are two instances of this registers. These registers are *
- * used as scratch registers for software use. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union ii_iscr0_u {
- bdrkreg_t ii_iscr0_regval;
- struct {
- bdrkreg_t i_scratch : 64;
- } ii_iscr0_fld_s;
-} ii_iscr0_u_t;
-
-
-
-
-/************************************************************************
- * *
- * There are two instances of this registers. These registers are *
- * used as scratch registers for software use. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union ii_iscr1_u {
- bdrkreg_t ii_iscr1_regval;
- struct {
- bdrkreg_t i_scratch : 64;
- } ii_iscr1_fld_s;
-} ii_iscr1_u_t;
-
-
-
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a Bedrock Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the Bedrock is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the Bedrock is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_itte1_u {
- bdrkreg_t ii_itte1_regval;
- struct {
- bdrkreg_t i_offset : 5;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_rsvd : 51;
- } ii_itte1_fld_s;
-} ii_itte1_u_t;
-
-#else
-
-typedef union ii_itte1_u {
- bdrkreg_t ii_itte1_regval;
- struct {
- bdrkreg_t i_rsvd : 51;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_offset : 5;
- } ii_itte1_fld_s;
-} ii_itte1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a Bedrock Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the Bedrock is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the Bedrock is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_itte2_u {
- bdrkreg_t ii_itte2_regval;
- struct {
- bdrkreg_t i_offset : 5;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_rsvd : 51;
- } ii_itte2_fld_s;
-} ii_itte2_u_t;
-
-#else
-typedef union ii_itte2_u {
- bdrkreg_t ii_itte2_regval;
- struct {
- bdrkreg_t i_rsvd : 51;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_offset : 5;
- } ii_itte2_fld_s;
-} ii_itte2_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a Bedrock Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the Bedrock is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the Bedrock is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_itte3_u {
- bdrkreg_t ii_itte3_regval;
- struct {
- bdrkreg_t i_offset : 5;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_rsvd : 51;
- } ii_itte3_fld_s;
-} ii_itte3_u_t;
-
-#else
-
-typedef union ii_itte3_u {
- bdrkreg_t ii_itte3_regval;
- struct {
- bdrkreg_t i_rsvd : 51;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_offset : 5;
- } ii_itte3_fld_s;
-} ii_itte3_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a Bedrock Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the Bedrock is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the Bedrock is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_itte4_u {
- bdrkreg_t ii_itte4_regval;
- struct {
- bdrkreg_t i_offset : 5;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_rsvd : 51;
- } ii_itte4_fld_s;
-} ii_itte4_u_t;
-
-#else
-
-typedef union ii_itte4_u {
- bdrkreg_t ii_itte4_regval;
- struct {
- bdrkreg_t i_rsvd : 51;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_offset : 5;
- } ii_itte4_fld_s;
-} ii_itte4_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a Bedrock Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the Bedrock is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the Bedrock is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_itte5_u {
- bdrkreg_t ii_itte5_regval;
- struct {
- bdrkreg_t i_offset : 5;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_rsvd : 51;
- } ii_itte5_fld_s;
-} ii_itte5_u_t;
-
-#else
-
-typedef union ii_itte5_u {
- bdrkreg_t ii_itte5_regval;
- struct {
- bdrkreg_t i_rsvd : 51;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_offset : 5;
- } ii_itte5_fld_s;
-} ii_itte5_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a Bedrock Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the Bedrock is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the Bedrock is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_itte6_u {
- bdrkreg_t ii_itte6_regval;
- struct {
- bdrkreg_t i_offset : 5;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_rsvd : 51;
- } ii_itte6_fld_s;
-} ii_itte6_u_t;
-
-#else
-
-typedef union ii_itte6_u {
- bdrkreg_t ii_itte6_regval;
- struct {
- bdrkreg_t i_rsvd : 51;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_offset : 5;
- } ii_itte6_fld_s;
-} ii_itte6_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are seven instances of translation table entry *
- * registers. Each register maps a Bedrock Big Window to a 48-bit *
- * address on Crosstalk. *
- * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window *
- * number) are used to select one of these 7 registers. The Widget *
- * number field is then derived from the W_NUM field for synthesizing *
- * a Crosstalk packet. The 5 bits of OFFSET are concatenated with *
- * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] *
- * are padded with zeros. Although the maximum Crosstalk space *
- * addressable by the Bedrock is thus the lower 16 GBytes per widget *
- * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this *
- * space can be accessed. *
- * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big *
- * Window number) are used to select one of these 7 registers. The *
- * Widget number field is then derived from the W_NUM field for *
- * synthesizing a Crosstalk packet. The 5 bits of OFFSET are *
- * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP *
- * field is used as Crosstalk[47], and remainder of the Crosstalk *
- * address bits (Crosstalk[46:34]) are always zero. While the maximum *
- * Crosstalk space addressable by the Bedrock is thus the lower *
- * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> *
- * of this space can be accessed. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_itte7_u {
- bdrkreg_t ii_itte7_regval;
- struct {
- bdrkreg_t i_offset : 5;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_rsvd : 51;
- } ii_itte7_fld_s;
-} ii_itte7_u_t;
-
-#else
-
-typedef union ii_itte7_u {
- bdrkreg_t ii_itte7_regval;
- struct {
- bdrkreg_t i_rsvd : 51;
- bdrkreg_t i_iosp : 1;
- bdrkreg_t i_w_num : 4;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_offset : 5;
- } ii_itte7_fld_s;
-} ii_itte7_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of Bedrock and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprb0_u {
- bdrkreg_t ii_iprb0_regval;
- struct {
- bdrkreg_t i_c : 8;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_mult_err : 1;
- } ii_iprb0_fld_s;
-} ii_iprb0_u_t;
-
-#else
-
-typedef union ii_iprb0_u {
- bdrkreg_t ii_iprb0_regval;
- struct {
- bdrkreg_t i_mult_err : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_c : 8;
- } ii_iprb0_fld_s;
-} ii_iprb0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of Bedrock and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprb8_u {
- bdrkreg_t ii_iprb8_regval;
- struct {
- bdrkreg_t i_c : 8;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_mult_err : 1;
- } ii_iprb8_fld_s;
-} ii_iprb8_u_t;
-
-#else
-
-
-typedef union ii_iprb8_u {
- bdrkreg_t ii_iprb8_regval;
- struct {
- bdrkreg_t i_mult_err : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_c : 8;
- } ii_iprb8_fld_s;
-} ii_iprb8_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of Bedrock and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprb9_u {
- bdrkreg_t ii_iprb9_regval;
- struct {
- bdrkreg_t i_c : 8;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_mult_err : 1;
- } ii_iprb9_fld_s;
-} ii_iprb9_u_t;
-
-#else
-
-typedef union ii_iprb9_u {
- bdrkreg_t ii_iprb9_regval;
- struct {
- bdrkreg_t i_mult_err : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_c : 8;
- } ii_iprb9_fld_s;
-} ii_iprb9_u_t;
-
-#endif
-
-
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of Bedrock and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprba_u {
- bdrkreg_t ii_iprba_regval;
- struct {
- bdrkreg_t i_c : 8;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_mult_err : 1;
- } ii_iprba_fld_s;
-} ii_iprba_u_t;
-
-#else
-
-typedef union ii_iprba_u {
- bdrkreg_t ii_iprba_regval;
- struct {
- bdrkreg_t i_mult_err : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_c : 8;
- } ii_iprba_fld_s;
-} ii_iprba_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of Bedrock and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprbb_u {
- bdrkreg_t ii_iprbb_regval;
- struct {
- bdrkreg_t i_c : 8;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_mult_err : 1;
- } ii_iprbb_fld_s;
-} ii_iprbb_u_t;
-
-#else
-
-typedef union ii_iprbb_u {
- bdrkreg_t ii_iprbb_regval;
- struct {
- bdrkreg_t i_mult_err : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_c : 8;
- } ii_iprbb_fld_s;
-} ii_iprbb_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of Bedrock and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprbc_u {
- bdrkreg_t ii_iprbc_regval;
- struct {
- bdrkreg_t i_c : 8;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_mult_err : 1;
- } ii_iprbc_fld_s;
-} ii_iprbc_u_t;
-
-#else
-
-typedef union ii_iprbc_u {
- bdrkreg_t ii_iprbc_regval;
- struct {
- bdrkreg_t i_mult_err : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_c : 8;
- } ii_iprbc_fld_s;
-} ii_iprbc_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of Bedrock and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprbd_u {
- bdrkreg_t ii_iprbd_regval;
- struct {
- bdrkreg_t i_c : 8;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_mult_err : 1;
- } ii_iprbd_fld_s;
-} ii_iprbd_u_t;
-
-#else
-
-typedef union ii_iprbd_u {
- bdrkreg_t ii_iprbd_regval;
- struct {
- bdrkreg_t i_mult_err : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_c : 8;
- } ii_iprbd_fld_s;
-} ii_iprbd_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of Bedrock and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprbe_u {
- bdrkreg_t ii_iprbe_regval;
- struct {
- bdrkreg_t i_c : 8;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_mult_err : 1;
- } ii_iprbe_fld_s;
-} ii_iprbe_u_t;
-
-#else
-
-typedef union ii_iprbe_u {
- bdrkreg_t ii_iprbe_regval;
- struct {
- bdrkreg_t i_mult_err : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_c : 8;
- } ii_iprbe_fld_s;
-} ii_iprbe_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are 9 instances of this register, one per *
- * actual widget in this implementation of Bedrock and Crossbow. *
- * Note: Crossbow only has ports for Widgets 8 through F, widget 0 *
- * refers to Crossbow's internal space. *
- * This register contains the state elements per widget that are *
- * necessary to manage the PIO flow control on Crosstalk and on the *
- * Router Network. See the PIO Flow Control chapter for a complete *
- * description of this register *
- * The SPUR_WR bit requires some explanation. When this register is *
- * written, the new value of the C field is captured in an internal *
- * register so the hardware can remember what the programmer wrote *
- * into the credit counter. The SPUR_WR bit sets whenever the C field *
- * increments above this stored value, which indicates that there *
- * have been more responses received than requests sent. The SPUR_WR *
- * bit cannot be cleared until a value is written to the IPRBx *
- * register; the write will correct the C field and capture its new *
- * value in the internal register. Even if IECLR[E_PRB_x] is set, the *
- * SPUR_WR bit will persist if IPRBx hasn't yet been written. *
- * . *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprbf_u {
- bdrkreg_t ii_iprbf_regval;
- struct {
- bdrkreg_t i_c : 8;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_mult_err : 1;
- } ii_iprbe_fld_s;
-} ii_iprbf_u_t;
-
-#else
-
-typedef union ii_iprbf_u {
- bdrkreg_t ii_iprbf_regval;
- struct {
- bdrkreg_t i_mult_err : 1;
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_spur_rd : 1;
- bdrkreg_t i_spur_wr : 1;
- bdrkreg_t i_rd_to : 1;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_of_cnt : 5;
- bdrkreg_t i_f : 1;
- bdrkreg_t i_m : 2;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_nb : 14;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_na : 14;
- bdrkreg_t i_c : 8;
- } ii_iprbf_fld_s;
-} ii_iprbf_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register specifies the timeout value to use for monitoring *
- * Crosstalk credits which are used outbound to Crosstalk. An *
- * internal counter called the Crosstalk Credit Timeout Counter *
- * increments every 128 II clocks. The counter starts counting *
- * anytime the credit count drops below a threshold, and resets to *
- * zero (stops counting) anytime the credit count is at or above the *
- * threshold. The threshold is 1 credit in direct connect mode and 2 *
- * in Crossbow connect mode. When the internal Crosstalk Credit *
- * Timeout Counter reaches the value programmed in this register, a *
- * Crosstalk Credit Timeout has occurred. The internal counter is not *
- * readable from software, and stops counting at its maximum value, *
- * so it cannot cause more than one interrupt. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ixcc_u {
- bdrkreg_t ii_ixcc_regval;
- struct {
- bdrkreg_t i_time_out : 26;
- bdrkreg_t i_rsvd : 38;
- } ii_ixcc_fld_s;
-} ii_ixcc_u_t;
-
-#else
-
-typedef union ii_ixcc_u {
- bdrkreg_t ii_ixcc_regval;
- struct {
- bdrkreg_t i_rsvd : 38;
- bdrkreg_t i_time_out : 26;
- } ii_ixcc_fld_s;
-} ii_ixcc_u_t;
-
-#endif
-
-
-
-/************************************************************************
- * *
- * Description: This register qualifies all the PIO and DMA *
- * operations launched from widget 0 towards the Bedrock. In *
- * addition, it also qualifies accesses by the BTE streams. *
- * The bits in each field of this register are cleared by the Bedrock *
- * upon detection of an error which requires widget 0 or the BTE *
- * streams to be terminated. Whether or not widget x has access *
- * rights to this Bedrock is determined by an AND of the device *
- * enable bit in the appropriate field of this register and bit 0 in *
- * the Wx_IAC field. The bits in this field are set by writing a 1 to *
- * them. Incoming replies from Crosstalk are not subject to this *
- * access control mechanism. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_imem_u {
- bdrkreg_t ii_imem_regval;
- struct {
- bdrkreg_t i_w0_esd : 1;
- bdrkreg_t i_rsvd_3 : 3;
- bdrkreg_t i_b0_esd : 1;
- bdrkreg_t i_rsvd_2 : 3;
- bdrkreg_t i_b1_esd : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_clr_precise : 1;
- bdrkreg_t i_rsvd : 51;
- } ii_imem_fld_s;
-} ii_imem_u_t;
-
-#else
-
-typedef union ii_imem_u {
- bdrkreg_t ii_imem_regval;
- struct {
- bdrkreg_t i_rsvd : 51;
- bdrkreg_t i_clr_precise : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_b1_esd : 1;
- bdrkreg_t i_rsvd_2 : 3;
- bdrkreg_t i_b0_esd : 1;
- bdrkreg_t i_rsvd_3 : 3;
- bdrkreg_t i_w0_esd : 1;
- } ii_imem_fld_s;
-} ii_imem_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This register specifies the timeout value to use for *
- * monitoring Crosstalk tail flits coming into the Bedrock in the *
- * TAIL_TO field. An internal counter associated with this register *
- * is incremented every 128 II internal clocks (7 bits). The counter *
- * starts counting anytime a header micropacket is received and stops *
- * counting (and resets to zero) any time a micropacket with a Tail *
- * bit is received. Once the counter reaches the threshold value *
- * programmed in this register, it generates an interrupt to the *
- * processor that is programmed into the IIDSR. The counter saturates *
- * (does not roll over) at its maximum value, so it cannot cause *
- * another interrupt until after it is cleared. *
- * The register also contains the Read Response Timeout values. The *
- * Prescalar is 23 bits, and counts II clocks. An internal counter *
- * increments on every II clock and when it reaches the value in the *
- * Prescalar field, all IPRTE registers with their valid bits set *
- * have their Read Response timers bumped. Whenever any of them match *
- * the value in the RRSP_TO field, a Read Response Timeout has *
- * occurred, and error handling occurs as described in the Error *
- * Handling section of this document. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ixtt_u {
- bdrkreg_t ii_ixtt_regval;
- struct {
- bdrkreg_t i_tail_to : 26;
- bdrkreg_t i_rsvd_1 : 6;
- bdrkreg_t i_rrsp_ps : 23;
- bdrkreg_t i_rrsp_to : 5;
- bdrkreg_t i_rsvd : 4;
- } ii_ixtt_fld_s;
-} ii_ixtt_u_t;
-
-#else
-
-typedef union ii_ixtt_u {
- bdrkreg_t ii_ixtt_regval;
- struct {
- bdrkreg_t i_rsvd : 4;
- bdrkreg_t i_rrsp_to : 5;
- bdrkreg_t i_rrsp_ps : 23;
- bdrkreg_t i_rsvd_1 : 6;
- bdrkreg_t i_tail_to : 26;
- } ii_ixtt_fld_s;
-} ii_ixtt_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Writing a 1 to the fields of this register clears the appropriate *
- * error bits in other areas of Bedrock_II. Note that when the *
- * E_PRB_x bits are used to clear error bits in PRB registers, *
- * SPUR_RD and SPUR_WR may persist, because they require additional *
- * action to clear them. See the IPRBx and IXSS Register *
- * specifications. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ieclr_u {
- bdrkreg_t ii_ieclr_regval;
- struct {
- bdrkreg_t i_e_prb_0 : 1;
- bdrkreg_t i_rsvd : 7;
- bdrkreg_t i_e_prb_8 : 1;
- bdrkreg_t i_e_prb_9 : 1;
- bdrkreg_t i_e_prb_a : 1;
- bdrkreg_t i_e_prb_b : 1;
- bdrkreg_t i_e_prb_c : 1;
- bdrkreg_t i_e_prb_d : 1;
- bdrkreg_t i_e_prb_e : 1;
- bdrkreg_t i_e_prb_f : 1;
- bdrkreg_t i_e_crazy : 1;
- bdrkreg_t i_e_bte_0 : 1;
- bdrkreg_t i_e_bte_1 : 1;
- bdrkreg_t i_reserved_1 : 9;
- bdrkreg_t i_ii_internal : 1;
- bdrkreg_t i_spur_rd_hdr : 1;
- bdrkreg_t i_pi0_forward_int : 1;
- bdrkreg_t i_pi1_forward_int : 1;
- bdrkreg_t i_reserved : 32;
- } ii_ieclr_fld_s;
-} ii_ieclr_u_t;
-
-#else
-
-typedef union ii_ieclr_u {
- bdrkreg_t ii_ieclr_regval;
- struct {
- bdrkreg_t i_reserved : 32;
- bdrkreg_t i_pi1_forward_int : 1;
- bdrkreg_t i_pi0_forward_int : 1;
- bdrkreg_t i_spur_rd_hdr : 1;
- bdrkreg_t i_ii_internal : 1;
- bdrkreg_t i_reserved_1 : 9;
- bdrkreg_t i_e_bte_1 : 1;
- bdrkreg_t i_e_bte_0 : 1;
- bdrkreg_t i_e_crazy : 1;
- bdrkreg_t i_e_prb_f : 1;
- bdrkreg_t i_e_prb_e : 1;
- bdrkreg_t i_e_prb_d : 1;
- bdrkreg_t i_e_prb_c : 1;
- bdrkreg_t i_e_prb_b : 1;
- bdrkreg_t i_e_prb_a : 1;
- bdrkreg_t i_e_prb_9 : 1;
- bdrkreg_t i_e_prb_8 : 1;
- bdrkreg_t i_rsvd : 7;
- bdrkreg_t i_e_prb_0 : 1;
- } ii_ieclr_fld_s;
-} ii_ieclr_u_t;
-
-#endif
-
-
-
-
-
-/************************************************************************
- * *
- * This register controls both BTEs. SOFT_RESET is intended for *
- * recovery after an error. COUNT controls the total number of CRBs *
- * that both BTEs (combined) can use, which affects total BTE *
- * bandwidth. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ibcr_u {
- bdrkreg_t ii_ibcr_regval;
- struct {
- bdrkreg_t i_count : 4;
- bdrkreg_t i_rsvd_1 : 4;
- bdrkreg_t i_soft_reset : 1;
- bdrkreg_t i_rsvd : 55;
- } ii_ibcr_fld_s;
-} ii_ibcr_u_t;
-
-#else
-
-typedef union ii_ibcr_u {
- bdrkreg_t ii_ibcr_regval;
- struct {
- bdrkreg_t i_rsvd : 55;
- bdrkreg_t i_soft_reset : 1;
- bdrkreg_t i_rsvd_1 : 4;
- bdrkreg_t i_count : 4;
- } ii_ibcr_fld_s;
-} ii_ibcr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the header of a spurious read response *
- * received from Crosstalk. A spurious read response is defined as a *
- * read response received by II from a widget for which (1) the SIDN *
- * has a value between 1 and 7, inclusive (II never sends requests to *
- * these widgets (2) there is no valid IPRTE register which *
- * corresponds to the TNUM, or (3) the widget indicated in SIDN is *
- * not the same as the widget recorded in the IPRTE register *
- * referenced by the TNUM. If this condition is true, and if the *
- * IXSS[VALID] bit is clear, then the header of the spurious read *
- * response is capture in IXSM and IXSS, and IXSS[VALID] is set. The *
- * errant header is thereby captured, and no further spurious read *
- * respones are captured until IXSS[VALID] is cleared by setting the *
- * appropriate bit in IECLR.Everytime a spurious read response is *
- * detected, the SPUR_RD bit of the PRB corresponding to the incoming *
- * message's SIDN field is set. This always happens, regarless of *
- * whether a header is captured. The programmer should check *
- * IXSM[SIDN] to determine which widget sent the spurious response, *
- * because there may be more than one SPUR_RD bit set in the PRB *
- * registers. The widget indicated by IXSM[SIDN] was the first *
- * spurious read response to be received since the last time *
- * IXSS[VALID] was clear. The SPUR_RD bit of the corresponding PRB *
- * will be set. Any SPUR_RD bits in any other PRB registers indicate *
- * spurious messages from other widets which were detected after the *
- * header was captured.. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ixsm_u {
- bdrkreg_t ii_ixsm_regval;
- struct {
- bdrkreg_t i_byte_en : 32;
- bdrkreg_t i_reserved : 1;
- bdrkreg_t i_tag : 3;
- bdrkreg_t i_alt_pactyp : 4;
- bdrkreg_t i_bo : 1;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_vbpm : 1;
- bdrkreg_t i_gbr : 1;
- bdrkreg_t i_ds : 2;
- bdrkreg_t i_ct : 1;
- bdrkreg_t i_tnum : 5;
- bdrkreg_t i_pactyp : 4;
- bdrkreg_t i_sidn : 4;
- bdrkreg_t i_didn : 4;
- } ii_ixsm_fld_s;
-} ii_ixsm_u_t;
-
-#else
-
-typedef union ii_ixsm_u {
- bdrkreg_t ii_ixsm_regval;
- struct {
- bdrkreg_t i_didn : 4;
- bdrkreg_t i_sidn : 4;
- bdrkreg_t i_pactyp : 4;
- bdrkreg_t i_tnum : 5;
- bdrkreg_t i_ct : 1;
- bdrkreg_t i_ds : 2;
- bdrkreg_t i_gbr : 1;
- bdrkreg_t i_vbpm : 1;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_bo : 1;
- bdrkreg_t i_alt_pactyp : 4;
- bdrkreg_t i_tag : 3;
- bdrkreg_t i_reserved : 1;
- bdrkreg_t i_byte_en : 32;
- } ii_ixsm_fld_s;
-} ii_ixsm_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the sideband bits of a spurious read *
- * response received from Crosstalk. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ixss_u {
- bdrkreg_t ii_ixss_regval;
- struct {
- bdrkreg_t i_sideband : 8;
- bdrkreg_t i_rsvd : 55;
- bdrkreg_t i_valid : 1;
- } ii_ixss_fld_s;
-} ii_ixss_u_t;
-
-#else
-
-typedef union ii_ixss_u {
- bdrkreg_t ii_ixss_regval;
- struct {
- bdrkreg_t i_valid : 1;
- bdrkreg_t i_rsvd : 55;
- bdrkreg_t i_sideband : 8;
- } ii_ixss_fld_s;
-} ii_ixss_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register enables software to access the II LLP's test port. *
- * Refer to the LLP 2.5 documentation for an explanation of the test *
- * port. Software can write to this register to program the values *
- * for the control fields (TestErrCapture, TestClear, TestFlit, *
- * TestMask and TestSeed). Similarly, software can read from this *
- * register to obtain the values of the test port's status outputs *
- * (TestCBerr, TestValid and TestData). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ilct_u {
- bdrkreg_t ii_ilct_regval;
- struct {
- bdrkreg_t i_test_seed : 20;
- bdrkreg_t i_test_mask : 8;
- bdrkreg_t i_test_data : 20;
- bdrkreg_t i_test_valid : 1;
- bdrkreg_t i_test_cberr : 1;
- bdrkreg_t i_test_flit : 3;
- bdrkreg_t i_test_clear : 1;
- bdrkreg_t i_test_err_capture : 1;
- bdrkreg_t i_rsvd : 9;
- } ii_ilct_fld_s;
-} ii_ilct_u_t;
-
-#else
-
-typedef union ii_ilct_u {
- bdrkreg_t ii_ilct_regval;
- struct {
- bdrkreg_t i_rsvd : 9;
- bdrkreg_t i_test_err_capture : 1;
- bdrkreg_t i_test_clear : 1;
- bdrkreg_t i_test_flit : 3;
- bdrkreg_t i_test_cberr : 1;
- bdrkreg_t i_test_valid : 1;
- bdrkreg_t i_test_data : 20;
- bdrkreg_t i_test_mask : 8;
- bdrkreg_t i_test_seed : 20;
- } ii_ilct_fld_s;
-} ii_ilct_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * If the II detects an illegal incoming Duplonet packet (request or *
- * reply) when VALID==0 in the IIEPH1 register, then it saves the *
- * contents of the packet's header flit in the IIEPH1 and IIEPH2 *
- * registers, sets the VALID bit in IIEPH1, clears the OVERRUN bit, *
- * and assigns a value to the ERR_TYPE field which indicates the *
- * specific nature of the error. The II recognizes four different *
- * types of errors: short request packets (ERR_TYPE==2), short reply *
- * packets (ERR_TYPE==3), long request packets (ERR_TYPE==4) and long *
- * reply packets (ERR_TYPE==5). The encodings for these types of *
- * errors were chosen to be consistent with the same types of errors *
- * indicated by the ERR_TYPE field in the LB_ERROR_HDR1 register (in *
- * the LB unit). If the II detects an illegal incoming Duplonet *
- * packet when VALID==1 in the IIEPH1 register, then it merely sets *
- * the OVERRUN bit to indicate that a subsequent error has happened, *
- * and does nothing further. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iieph1_u {
- bdrkreg_t ii_iieph1_regval;
- struct {
- bdrkreg_t i_command : 7;
- bdrkreg_t i_rsvd_5 : 1;
- bdrkreg_t i_suppl : 11;
- bdrkreg_t i_rsvd_4 : 1;
- bdrkreg_t i_source : 11;
- bdrkreg_t i_rsvd_3 : 1;
- bdrkreg_t i_err_type : 4;
- bdrkreg_t i_rsvd_2 : 4;
- bdrkreg_t i_overrun : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_valid : 1;
- bdrkreg_t i_rsvd : 19;
- } ii_iieph1_fld_s;
-} ii_iieph1_u_t;
-
-#else
-
-typedef union ii_iieph1_u {
- bdrkreg_t ii_iieph1_regval;
- struct {
- bdrkreg_t i_rsvd : 19;
- bdrkreg_t i_valid : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_overrun : 1;
- bdrkreg_t i_rsvd_2 : 4;
- bdrkreg_t i_err_type : 4;
- bdrkreg_t i_rsvd_3 : 1;
- bdrkreg_t i_source : 11;
- bdrkreg_t i_rsvd_4 : 1;
- bdrkreg_t i_suppl : 11;
- bdrkreg_t i_rsvd_5 : 1;
- bdrkreg_t i_command : 7;
- } ii_iieph1_fld_s;
-} ii_iieph1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register holds the Address field from the header flit of an *
- * incoming erroneous Duplonet packet, along with the tail bit which *
- * accompanied this header flit. This register is essentially an *
- * extension of IIEPH1. Two registers were necessary because the 64 *
- * bits available in only a single register were insufficient to *
- * capture the entire header flit of an erroneous packet. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iieph2_u {
- bdrkreg_t ii_iieph2_regval;
- struct {
- bdrkreg_t i_address : 38;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_tail : 1;
- bdrkreg_t i_rsvd : 23;
- } ii_iieph2_fld_s;
-} ii_iieph2_u_t;
-
-#else
-
-typedef union ii_iieph2_u {
- bdrkreg_t ii_iieph2_regval;
- struct {
- bdrkreg_t i_rsvd : 23;
- bdrkreg_t i_tail : 1;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_address : 38;
- } ii_iieph2_fld_s;
-} ii_iieph2_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * A write to this register causes a particular field in the *
- * corresponding widget's PRB entry to be adjusted up or down by 1. *
- * This counter should be used when recovering from error and reset *
- * conditions. Note that software would be capable of causing *
- * inadvertent overflow or underflow of these counters. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ipca_u {
- bdrkreg_t ii_ipca_regval;
- struct {
- bdrkreg_t i_wid : 4;
- bdrkreg_t i_adjust : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_field : 2;
- bdrkreg_t i_rsvd : 54;
- } ii_ipca_fld_s;
-} ii_ipca_u_t;
-
-#else
-
-typedef union ii_ipca_u {
- bdrkreg_t ii_ipca_regval;
- struct {
- bdrkreg_t i_rsvd : 54;
- bdrkreg_t i_field : 2;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_adjust : 1;
- bdrkreg_t i_wid : 4;
- } ii_ipca_fld_s;
-} ii_ipca_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprte0_u {
- bdrkreg_t ii_iprte0_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_vld : 1;
- } ii_iprte0_fld_s;
-} ii_iprte0_u_t;
-
-#else
-
-typedef union ii_iprte0_u {
- bdrkreg_t ii_iprte0_regval;
- struct {
- bdrkreg_t i_vld : 1;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_rsvd_1 : 3;
- } ii_iprte0_fld_s;
-} ii_iprte0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprte1_u {
- bdrkreg_t ii_iprte1_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_vld : 1;
- } ii_iprte1_fld_s;
-} ii_iprte1_u_t;
-
-#else
-
-typedef union ii_iprte1_u {
- bdrkreg_t ii_iprte1_regval;
- struct {
- bdrkreg_t i_vld : 1;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_rsvd_1 : 3;
- } ii_iprte1_fld_s;
-} ii_iprte1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprte2_u {
- bdrkreg_t ii_iprte2_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_vld : 1;
- } ii_iprte2_fld_s;
-} ii_iprte2_u_t;
-
-#else
-
-typedef union ii_iprte2_u {
- bdrkreg_t ii_iprte2_regval;
- struct {
- bdrkreg_t i_vld : 1;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_rsvd_1 : 3;
- } ii_iprte2_fld_s;
-} ii_iprte2_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprte3_u {
- bdrkreg_t ii_iprte3_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_vld : 1;
- } ii_iprte3_fld_s;
-} ii_iprte3_u_t;
-
-#else
-
-typedef union ii_iprte3_u {
- bdrkreg_t ii_iprte3_regval;
- struct {
- bdrkreg_t i_vld : 1;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_rsvd_1 : 3;
- } ii_iprte3_fld_s;
-} ii_iprte3_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprte4_u {
- bdrkreg_t ii_iprte4_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_vld : 1;
- } ii_iprte4_fld_s;
-} ii_iprte4_u_t;
-
-#else
-
-typedef union ii_iprte4_u {
- bdrkreg_t ii_iprte4_regval;
- struct {
- bdrkreg_t i_vld : 1;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_rsvd_1 : 3;
- } ii_iprte4_fld_s;
-} ii_iprte4_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprte5_u {
- bdrkreg_t ii_iprte5_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_vld : 1;
- } ii_iprte5_fld_s;
-} ii_iprte5_u_t;
-
-#else
-
-typedef union ii_iprte5_u {
- bdrkreg_t ii_iprte5_regval;
- struct {
- bdrkreg_t i_vld : 1;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_rsvd_1 : 3;
- } ii_iprte5_fld_s;
-} ii_iprte5_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprte6_u {
- bdrkreg_t ii_iprte6_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_vld : 1;
- } ii_iprte6_fld_s;
-} ii_iprte6_u_t;
-
-#else
-
-typedef union ii_iprte6_u {
- bdrkreg_t ii_iprte6_regval;
- struct {
- bdrkreg_t i_vld : 1;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_rsvd_1 : 3;
- } ii_iprte6_fld_s;
-} ii_iprte6_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There are 8 instances of this register. This register contains *
- * the information that the II has to remember once it has launched a *
- * PIO Read operation. The contents are used to form the correct *
- * Router Network packet and direct the Crosstalk reply to the *
- * appropriate processor. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iprte7_u {
- bdrkreg_t ii_iprte7_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_vld : 1;
- } ii_iprte7_fld_s;
-} ii_iprte7_u_t;
-
-#else
-
-typedef union ii_iprte7_u {
- bdrkreg_t ii_iprte7_regval;
- struct {
- bdrkreg_t i_vld : 1;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_rsvd_1 : 3;
- } ii_iprte7_fld_s;
-} ii_iprte7_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: Bedrock_II contains a feature which did not exist in *
- * the Hub which automatically cleans up after a Read Response *
- * timeout, including deallocation of the IPRTE and recovery of IBuf *
- * space. The inclusion of this register in Bedrock is for backward *
- * compatibility *
- * A write to this register causes an entry from the table of *
- * outstanding PIO Read Requests to be freed and returned to the *
- * stack of free entries. This register is used in handling the *
- * timeout errors that result in a PIO Reply never returning from *
- * Crosstalk. *
- * Note that this register does not affect the contents of the IPRTE *
- * registers. The Valid bits in those registers have to be *
- * specifically turned off by software. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ipdr_u {
- bdrkreg_t ii_ipdr_regval;
- struct {
- bdrkreg_t i_te : 3;
- bdrkreg_t i_rsvd_1 : 1;
- bdrkreg_t i_pnd : 1;
- bdrkreg_t i_init_rpcnt : 1;
- bdrkreg_t i_rsvd : 58;
- } ii_ipdr_fld_s;
-} ii_ipdr_u_t;
-
-#else
-
-typedef union ii_ipdr_u {
- bdrkreg_t ii_ipdr_regval;
- struct {
- bdrkreg_t i_rsvd : 58;
- bdrkreg_t i_init_rpcnt : 1;
- bdrkreg_t i_pnd : 1;
- bdrkreg_t i_rsvd_1 : 1;
- bdrkreg_t i_te : 3;
- } ii_ipdr_fld_s;
-} ii_ipdr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * A write to this register causes a CRB entry to be returned to the *
- * queue of free CRBs. The entry should have previously been cleared *
- * (mark bit) via backdoor access to the pertinent CRB entry. This *
- * register is used in the last step of handling the errors that are *
- * captured and marked in CRB entries. Briefly: 1) first error for *
- * DMA write from a particular device, and first error for a *
- * particular BTE stream, lead to a marked CRB entry, and processor *
- * interrupt, 2) software reads the error information captured in the *
- * CRB entry, and presumably takes some corrective action, 3) *
- * software clears the mark bit, and finally 4) software writes to *
- * the ICDR register to return the CRB entry to the list of free CRB *
- * entries. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_icdr_u {
- bdrkreg_t ii_icdr_regval;
- struct {
- bdrkreg_t i_crb_num : 4;
- bdrkreg_t i_pnd : 1;
- bdrkreg_t i_rsvd : 59;
- } ii_icdr_fld_s;
-} ii_icdr_u_t;
-
-#else
-
-typedef union ii_icdr_u {
- bdrkreg_t ii_icdr_regval;
- struct {
- bdrkreg_t i_rsvd : 59;
- bdrkreg_t i_pnd : 1;
- bdrkreg_t i_crb_num : 4;
- } ii_icdr_fld_s;
-} ii_icdr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register provides debug access to two FIFOs inside of II. *
- * Both IOQ_MAX* fields of this register contain the instantaneous *
- * depth (in units of the number of available entries) of the *
- * associated IOQ FIFO. A read of this register will return the *
- * number of free entries on each FIFO at the time of the read. So *
- * when a FIFO is idle, the associated field contains the maximum *
- * depth of the FIFO. This register is writable for debug reasons *
- * and is intended to be written with the maximum desired FIFO depth *
- * while the FIFO is idle. Software must assure that II is idle when *
- * this register is written. If there are any active entries in any *
- * of these FIFOs when this register is written, the results are *
- * undefined. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ifdr_u {
- bdrkreg_t ii_ifdr_regval;
- struct {
- bdrkreg_t i_ioq_max_rq : 7;
- bdrkreg_t i_set_ioq_rq : 1;
- bdrkreg_t i_ioq_max_rp : 7;
- bdrkreg_t i_set_ioq_rp : 1;
- bdrkreg_t i_rsvd : 48;
- } ii_ifdr_fld_s;
-} ii_ifdr_u_t;
-
-#else
-
-typedef union ii_ifdr_u {
- bdrkreg_t ii_ifdr_regval;
- struct {
- bdrkreg_t i_rsvd : 48;
- bdrkreg_t i_set_ioq_rp : 1;
- bdrkreg_t i_ioq_max_rp : 7;
- bdrkreg_t i_set_ioq_rq : 1;
- bdrkreg_t i_ioq_max_rq : 7;
- } ii_ifdr_fld_s;
-} ii_ifdr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register allows the II to become sluggish in removing *
- * messages from its inbound queue (IIQ). This will cause messages to *
- * back up in either virtual channel. Disabling the "molasses" mode *
- * subsequently allows the II to be tested under stress. In the *
- * sluggish ("Molasses") mode, the localized effects of congestion *
- * can be observed. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iiap_u {
- bdrkreg_t ii_iiap_regval;
- struct {
- bdrkreg_t i_rq_mls : 6;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_rp_mls : 6;
- bdrkreg_t i_rsvd : 50;
- } ii_iiap_fld_s;
-} ii_iiap_u_t;
-
-#else
-
-typedef union ii_iiap_u {
- bdrkreg_t ii_iiap_regval;
- struct {
- bdrkreg_t i_rsvd : 50;
- bdrkreg_t i_rp_mls : 6;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_rq_mls : 6;
- } ii_iiap_fld_s;
-} ii_iiap_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register allows several parameters of CRB operation to be *
- * set. Note that writing to this register can have catastrophic side *
- * effects, if the CRB is not quiescent, i.e. if the CRB is *
- * processing protocol messages when the write occurs. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_icmr_u {
- bdrkreg_t ii_icmr_regval;
- struct {
- bdrkreg_t i_sp_msg : 1;
- bdrkreg_t i_rd_hdr : 1;
- bdrkreg_t i_rsvd_4 : 2;
- bdrkreg_t i_c_cnt : 4;
- bdrkreg_t i_rsvd_3 : 4;
- bdrkreg_t i_clr_rqpd : 1;
- bdrkreg_t i_clr_rppd : 1;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_fc_cnt : 4;
- bdrkreg_t i_crb_vld : 15;
- bdrkreg_t i_crb_mark : 15;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_precise : 1;
- bdrkreg_t i_rsvd : 11;
- } ii_icmr_fld_s;
-} ii_icmr_u_t;
-
-#else
-
-typedef union ii_icmr_u {
- bdrkreg_t ii_icmr_regval;
- struct {
- bdrkreg_t i_rsvd : 11;
- bdrkreg_t i_precise : 1;
- bdrkreg_t i_rsvd_1 : 2;
- bdrkreg_t i_crb_mark : 15;
- bdrkreg_t i_crb_vld : 15;
- bdrkreg_t i_fc_cnt : 4;
- bdrkreg_t i_rsvd_2 : 2;
- bdrkreg_t i_clr_rppd : 1;
- bdrkreg_t i_clr_rqpd : 1;
- bdrkreg_t i_rsvd_3 : 4;
- bdrkreg_t i_c_cnt : 4;
- bdrkreg_t i_rsvd_4 : 2;
- bdrkreg_t i_rd_hdr : 1;
- bdrkreg_t i_sp_msg : 1;
- } ii_icmr_fld_s;
-} ii_icmr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register allows control of the table portion of the CRB *
- * logic via software. Control operations from this register have *
- * priority over all incoming Crosstalk or BTE requests. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_iccr_u {
- bdrkreg_t ii_iccr_regval;
- struct {
- bdrkreg_t i_crb_num : 4;
- bdrkreg_t i_rsvd_1 : 4;
- bdrkreg_t i_cmd : 8;
- bdrkreg_t i_pending : 1;
- bdrkreg_t i_rsvd : 47;
- } ii_iccr_fld_s;
-} ii_iccr_u_t;
-
-#else
-
-typedef union ii_iccr_u {
- bdrkreg_t ii_iccr_regval;
- struct {
- bdrkreg_t i_rsvd : 47;
- bdrkreg_t i_pending : 1;
- bdrkreg_t i_cmd : 8;
- bdrkreg_t i_rsvd_1 : 4;
- bdrkreg_t i_crb_num : 4;
- } ii_iccr_fld_s;
-} ii_iccr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register allows the maximum timeout value to be programmed. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_icto_u {
- bdrkreg_t ii_icto_regval;
- struct {
- bdrkreg_t i_timeout : 8;
- bdrkreg_t i_rsvd : 56;
- } ii_icto_fld_s;
-} ii_icto_u_t;
-
-#else
-
-typedef union ii_icto_u {
- bdrkreg_t ii_icto_regval;
- struct {
- bdrkreg_t i_rsvd : 56;
- bdrkreg_t i_timeout : 8;
- } ii_icto_fld_s;
-} ii_icto_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register allows the timeout prescalar to be programmed. An *
- * internal counter is associated with this register. When the *
- * internal counter reaches the value of the PRESCALE field, the *
- * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT] *
- * field). The internal counter resets to zero, and then continues *
- * counting. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ictp_u {
- bdrkreg_t ii_ictp_regval;
- struct {
- bdrkreg_t i_prescale : 24;
- bdrkreg_t i_rsvd : 40;
- } ii_ictp_fld_s;
-} ii_ictp_u_t;
-
-#else
-
-typedef union ii_ictp_u {
- bdrkreg_t ii_ictp_regval;
- struct {
- bdrkreg_t i_rsvd : 40;
- bdrkreg_t i_prescale : 24;
- } ii_ictp_fld_s;
-} ii_ictp_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
- * used for Crosstalk operations (both cacheline and partial *
- * operations) or BTE/IO. Because the CRB entries are very wide, four *
- * registers (_A to _D) are required to read and write each entry. *
- * The CRB Entry registers can be conceptualized as rows and columns *
- * (illustrated in the table above). Each row contains the 4 *
- * registers required for a single CRB Entry. The first doubleword *
- * (column) for each entry is labeled A, and the second doubleword *
- * (higher address) is labeled B, the third doubleword is labeled C, *
- * and the fourth doubleword is labeled D. All CRB entries have their *
- * addresses on a quarter cacheline aligned boundary. *
- * Upon reset, only the following fields are initialized: valid *
- * (VLD), priority count, timeout, timeout valid, and context valid. *
- * All other bits should be cleared by software before use (after *
- * recovering any potential error state from before the reset). *
- * The following four tables summarize the format for the four *
- * registers that are used for each ICRB# Entry. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_icrb0_a_u {
- bdrkreg_t ii_icrb0_a_regval;
- struct {
- bdrkreg_t ia_iow : 1;
- bdrkreg_t ia_vld : 1;
- bdrkreg_t ia_addr : 38;
- bdrkreg_t ia_tnum : 5;
- bdrkreg_t ia_sidn : 4;
- bdrkreg_t ia_xt_err : 1;
- bdrkreg_t ia_mark : 1;
- bdrkreg_t ia_ln_uce : 1;
- bdrkreg_t ia_errcode : 3;
- bdrkreg_t ia_error : 1;
- bdrkreg_t ia_stall__bte_1 : 1;
- bdrkreg_t ia_stall__bte_0 : 1;
- bdrkreg_t ia_rsvd : 6;
- } ii_icrb0_a_fld_s;
-} ii_icrb0_a_u_t;
-
-#else
-
-typedef union ii_icrb0_a_u {
- bdrkreg_t ii_icrb0_a_regval;
- struct {
- bdrkreg_t ia_rsvd : 6;
- bdrkreg_t ia_stall__bte_0 : 1;
- bdrkreg_t ia_stall__bte_1 : 1;
- bdrkreg_t ia_error : 1;
- bdrkreg_t ia_errcode : 3;
- bdrkreg_t ia_ln_uce : 1;
- bdrkreg_t ia_mark : 1;
- bdrkreg_t ia_xt_err : 1;
- bdrkreg_t ia_sidn : 4;
- bdrkreg_t ia_tnum : 5;
- bdrkreg_t ia_addr : 38;
- bdrkreg_t ia_vld : 1;
- bdrkreg_t ia_iow : 1;
- } ii_icrb0_a_fld_s;
-} ii_icrb0_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
- * used for Crosstalk operations (both cacheline and partial *
- * operations) or BTE/IO. Because the CRB entries are very wide, four *
- * registers (_A to _D) are required to read and write each entry. *
- * *
- ************************************************************************/
-
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_icrb0_b_u {
- bdrkreg_t ii_icrb0_b_regval;
- struct {
- bdrkreg_t ib_stall__intr : 1;
- bdrkreg_t ib_stall_ib : 1;
- bdrkreg_t ib_intvn : 1;
- bdrkreg_t ib_wb : 1;
- bdrkreg_t ib_hold : 1;
- bdrkreg_t ib_ack : 1;
- bdrkreg_t ib_resp : 1;
- bdrkreg_t ib_ack_cnt : 11;
- bdrkreg_t ib_rsvd_1 : 7;
- bdrkreg_t ib_exc : 5;
- bdrkreg_t ib_init : 3;
- bdrkreg_t ib_imsg : 8;
- bdrkreg_t ib_imsgtype : 2;
- bdrkreg_t ib_use_old : 1;
- bdrkreg_t ib_source : 12;
- bdrkreg_t ib_size : 2;
- bdrkreg_t ib_ct : 1;
- bdrkreg_t ib_bte_num : 1;
- bdrkreg_t ib_rsvd : 4;
- } ii_icrb0_b_fld_s;
-} ii_icrb0_b_u_t;
-
-#else
-
-typedef union ii_icrb0_b_u {
- bdrkreg_t ii_icrb0_b_regval;
- struct {
- bdrkreg_t ib_rsvd : 4;
- bdrkreg_t ib_bte_num : 1;
- bdrkreg_t ib_ct : 1;
- bdrkreg_t ib_size : 2;
- bdrkreg_t ib_source : 12;
- bdrkreg_t ib_use_old : 1;
- bdrkreg_t ib_imsgtype : 2;
- bdrkreg_t ib_imsg : 8;
- bdrkreg_t ib_init : 3;
- bdrkreg_t ib_exc : 5;
- bdrkreg_t ib_rsvd_1 : 7;
- bdrkreg_t ib_ack_cnt : 11;
- bdrkreg_t ib_resp : 1;
- bdrkreg_t ib_ack : 1;
- bdrkreg_t ib_hold : 1;
- bdrkreg_t ib_wb : 1;
- bdrkreg_t ib_intvn : 1;
- bdrkreg_t ib_stall_ib : 1;
- bdrkreg_t ib_stall__intr : 1;
- } ii_icrb0_b_fld_s;
-} ii_icrb0_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
- * used for Crosstalk operations (both cacheline and partial *
- * operations) or BTE/IO. Because the CRB entries are very wide, four *
- * registers (_A to _D) are required to read and write each entry. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_icrb0_c_u {
- bdrkreg_t ii_icrb0_c_regval;
- struct {
- bdrkreg_t ic_gbr : 1;
- bdrkreg_t ic_resprqd : 1;
- bdrkreg_t ic_bo : 1;
- bdrkreg_t ic_suppl : 12;
- bdrkreg_t ic_pa_be : 34;
- bdrkreg_t ic_bte_op : 1;
- bdrkreg_t ic_pr_psc : 4;
- bdrkreg_t ic_pr_cnt : 4;
- bdrkreg_t ic_sleep : 1;
- bdrkreg_t ic_rsvd : 5;
- } ii_icrb0_c_fld_s;
-} ii_icrb0_c_u_t;
-
-#else
-
-typedef union ii_icrb0_c_u {
- bdrkreg_t ii_icrb0_c_regval;
- struct {
- bdrkreg_t ic_rsvd : 5;
- bdrkreg_t ic_sleep : 1;
- bdrkreg_t ic_pr_cnt : 4;
- bdrkreg_t ic_pr_psc : 4;
- bdrkreg_t ic_bte_op : 1;
- bdrkreg_t ic_pa_be : 34;
- bdrkreg_t ic_suppl : 12;
- bdrkreg_t ic_bo : 1;
- bdrkreg_t ic_resprqd : 1;
- bdrkreg_t ic_gbr : 1;
- } ii_icrb0_c_fld_s;
-} ii_icrb0_c_u_t;
-
-#endif
-
-
-
-/************************************************************************
- * *
- * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are *
- * used for Crosstalk operations (both cacheline and partial *
- * operations) or BTE/IO. Because the CRB entries are very wide, four *
- * registers (_A to _D) are required to read and write each entry. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_icrb0_d_u {
- bdrkreg_t ii_icrb0_d_regval;
- struct {
- bdrkreg_t id_timeout : 8;
- bdrkreg_t id_context : 15;
- bdrkreg_t id_rsvd_1 : 1;
- bdrkreg_t id_tvld : 1;
- bdrkreg_t id_cvld : 1;
- bdrkreg_t id_rsvd : 38;
- } ii_icrb0_d_fld_s;
-} ii_icrb0_d_u_t;
-
-#else
-
-typedef union ii_icrb0_d_u {
- bdrkreg_t ii_icrb0_d_regval;
- struct {
- bdrkreg_t id_rsvd : 38;
- bdrkreg_t id_cvld : 1;
- bdrkreg_t id_tvld : 1;
- bdrkreg_t id_rsvd_1 : 1;
- bdrkreg_t id_context : 15;
- bdrkreg_t id_timeout : 8;
- } ii_icrb0_d_fld_s;
-} ii_icrb0_d_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the lower 64 bits of the header of the *
- * spurious message captured by II. Valid when the SP_MSG bit in ICMR *
- * register is set. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_icsml_u {
- bdrkreg_t ii_icsml_regval;
- struct {
- bdrkreg_t i_tt_addr : 38;
- bdrkreg_t i_tt_ack_cnt : 11;
- bdrkreg_t i_newsuppl_ex : 11;
- bdrkreg_t i_reserved : 3;
- bdrkreg_t i_overflow : 1;
- } ii_icsml_fld_s;
-} ii_icsml_u_t;
-
-#else
-
-typedef union ii_icsml_u {
- bdrkreg_t ii_icsml_regval;
- struct {
- bdrkreg_t i_overflow : 1;
- bdrkreg_t i_reserved : 3;
- bdrkreg_t i_newsuppl_ex : 11;
- bdrkreg_t i_tt_ack_cnt : 11;
- bdrkreg_t i_tt_addr : 38;
- } ii_icsml_fld_s;
-} ii_icsml_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the microscopic state, all the inputs to *
- * the protocol table, captured with the spurious message. Valid when *
- * the SP_MSG bit in the ICMR register is set. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_icsmh_u {
- bdrkreg_t ii_icsmh_regval;
- struct {
- bdrkreg_t i_tt_vld : 1;
- bdrkreg_t i_xerr : 1;
- bdrkreg_t i_ft_cwact_o : 1;
- bdrkreg_t i_ft_wact_o : 1;
- bdrkreg_t i_ft_active_o : 1;
- bdrkreg_t i_sync : 1;
- bdrkreg_t i_mnusg : 1;
- bdrkreg_t i_mnusz : 1;
- bdrkreg_t i_plusz : 1;
- bdrkreg_t i_plusg : 1;
- bdrkreg_t i_tt_exc : 5;
- bdrkreg_t i_tt_wb : 1;
- bdrkreg_t i_tt_hold : 1;
- bdrkreg_t i_tt_ack : 1;
- bdrkreg_t i_tt_resp : 1;
- bdrkreg_t i_tt_intvn : 1;
- bdrkreg_t i_g_stall_bte1 : 1;
- bdrkreg_t i_g_stall_bte0 : 1;
- bdrkreg_t i_g_stall_il : 1;
- bdrkreg_t i_g_stall_ib : 1;
- bdrkreg_t i_tt_imsg : 8;
- bdrkreg_t i_tt_imsgtype : 2;
- bdrkreg_t i_tt_use_old : 1;
- bdrkreg_t i_tt_respreqd : 1;
- bdrkreg_t i_tt_bte_num : 1;
- bdrkreg_t i_cbn : 1;
- bdrkreg_t i_match : 1;
- bdrkreg_t i_rpcnt_lt_34 : 1;
- bdrkreg_t i_rpcnt_ge_34 : 1;
- bdrkreg_t i_rpcnt_lt_18 : 1;
- bdrkreg_t i_rpcnt_ge_18 : 1;
- bdrkreg_t i_rpcnt_lt_2 : 1;
- bdrkreg_t i_rpcnt_ge_2 : 1;
- bdrkreg_t i_rqcnt_lt_18 : 1;
- bdrkreg_t i_rqcnt_ge_18 : 1;
- bdrkreg_t i_rqcnt_lt_2 : 1;
- bdrkreg_t i_rqcnt_ge_2 : 1;
- bdrkreg_t i_tt_device : 7;
- bdrkreg_t i_tt_init : 3;
- bdrkreg_t i_reserved : 5;
- } ii_icsmh_fld_s;
-} ii_icsmh_u_t;
-
-#else
-
-typedef union ii_icsmh_u {
- bdrkreg_t ii_icsmh_regval;
- struct {
- bdrkreg_t i_reserved : 5;
- bdrkreg_t i_tt_init : 3;
- bdrkreg_t i_tt_device : 7;
- bdrkreg_t i_rqcnt_ge_2 : 1;
- bdrkreg_t i_rqcnt_lt_2 : 1;
- bdrkreg_t i_rqcnt_ge_18 : 1;
- bdrkreg_t i_rqcnt_lt_18 : 1;
- bdrkreg_t i_rpcnt_ge_2 : 1;
- bdrkreg_t i_rpcnt_lt_2 : 1;
- bdrkreg_t i_rpcnt_ge_18 : 1;
- bdrkreg_t i_rpcnt_lt_18 : 1;
- bdrkreg_t i_rpcnt_ge_34 : 1;
- bdrkreg_t i_rpcnt_lt_34 : 1;
- bdrkreg_t i_match : 1;
- bdrkreg_t i_cbn : 1;
- bdrkreg_t i_tt_bte_num : 1;
- bdrkreg_t i_tt_respreqd : 1;
- bdrkreg_t i_tt_use_old : 1;
- bdrkreg_t i_tt_imsgtype : 2;
- bdrkreg_t i_tt_imsg : 8;
- bdrkreg_t i_g_stall_ib : 1;
- bdrkreg_t i_g_stall_il : 1;
- bdrkreg_t i_g_stall_bte0 : 1;
- bdrkreg_t i_g_stall_bte1 : 1;
- bdrkreg_t i_tt_intvn : 1;
- bdrkreg_t i_tt_resp : 1;
- bdrkreg_t i_tt_ack : 1;
- bdrkreg_t i_tt_hold : 1;
- bdrkreg_t i_tt_wb : 1;
- bdrkreg_t i_tt_exc : 5;
- bdrkreg_t i_plusg : 1;
- bdrkreg_t i_plusz : 1;
- bdrkreg_t i_mnusz : 1;
- bdrkreg_t i_mnusg : 1;
- bdrkreg_t i_sync : 1;
- bdrkreg_t i_ft_active_o : 1;
- bdrkreg_t i_ft_wact_o : 1;
- bdrkreg_t i_ft_cwact_o : 1;
- bdrkreg_t i_xerr : 1;
- bdrkreg_t i_tt_vld : 1;
- } ii_icsmh_fld_s;
-} ii_icsmh_u_t;
-
-#endif
-
-
-/************************************************************************
- * *
- * The Bedrock DEBUG unit provides a 3-bit selection signal to the *
- * II unit, thus allowing a choice of one set of debug signal outputs *
- * from a menu of 8 options. Each option is limited to 32 bits in *
- * size. There are more signals of interest than can be accommodated *
- * in this 8*32 framework, so the IDBSS register has been defined to *
- * extend the range of choices available. For each menu option *
- * available to the DEBUG unit, the II provides a "submenu" of *
- * several options. The value of the SUBMENU field in the IDBSS *
- * register selects the desired submenu. Hence, the particular debug *
- * signals provided by the II are determined by the 3-bit selection *
- * signal from the DEBUG unit and the value of the SUBMENU field *
- * within the IDBSS register. For a detailed description of the *
- * available menus and submenus for II debug signals, refer to the *
- * documentation in ii_interface.doc.. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LIITLE_ENDIAN
-
-typedef union ii_idbss_u {
- bdrkreg_t ii_idbss_regval;
- struct {
- bdrkreg_t i_submenu : 3;
- bdrkreg_t i_rsvd : 61;
- } ii_idbss_fld_s;
-} ii_idbss_u_t;
-
-#else
-
-typedef union ii_idbss_u {
- bdrkreg_t ii_idbss_regval;
- struct {
- bdrkreg_t i_rsvd : 61;
- bdrkreg_t i_submenu : 3;
- } ii_idbss_fld_s;
-} ii_idbss_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This register is used to set up the length for a *
- * transfer and then to monitor the progress of that transfer. This *
- * register needs to be initialized before a transfer is started. A *
- * legitimate write to this register will set the Busy bit, clear the *
- * Error bit, and initialize the length to the value desired. *
- * While the transfer is in progress, hardware will decrement the *
- * length field with each successful block that is copied. Once the *
- * transfer completes, hardware will clear the Busy bit. The length *
- * field will also contain the number of cache lines left to be *
- * transferred. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LIITLE_ENDIAN
-
-typedef union ii_ibls0_u {
- bdrkreg_t ii_ibls0_regval;
- struct {
- bdrkreg_t i_length : 16;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_busy : 1;
- bdrkreg_t i_rsvd : 43;
- } ii_ibls0_fld_s;
-} ii_ibls0_u_t;
-
-#else
-
-typedef union ii_ibls0_u {
- bdrkreg_t ii_ibls0_regval;
- struct {
- bdrkreg_t i_rsvd : 43;
- bdrkreg_t i_busy : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_length : 16;
- } ii_ibls0_fld_s;
-} ii_ibls0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register should be loaded before a transfer is started. The *
- * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
- * address as described in Section 1.3, Figure2 and Figure3. Since *
- * the bottom 7 bits of the address are always taken to be zero, BTE *
- * transfers are always cacheline-aligned. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ibsa0_u {
- bdrkreg_t ii_ibsa0_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_addr : 33;
- bdrkreg_t i_rsvd : 24;
- } ii_ibsa0_fld_s;
-} ii_ibsa0_u_t;
-
-#else
-
-typedef union ii_ibsa0_u {
- bdrkreg_t ii_ibsa0_regval;
- struct {
- bdrkreg_t i_rsvd : 24;
- bdrkreg_t i_addr : 33;
- bdrkreg_t i_rsvd_1 : 7;
- } ii_ibsa0_fld_s;
-} ii_ibsa0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register should be loaded before a transfer is started. The *
- * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
- * address as described in Section 1.3, Figure2 and Figure3. Since *
- * the bottom 7 bits of the address are always taken to be zero, BTE *
- * transfers are always cacheline-aligned. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ibda0_u {
- bdrkreg_t ii_ibda0_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_addr : 33;
- bdrkreg_t i_rsvd : 24;
- } ii_ibda0_fld_s;
-} ii_ibda0_u_t;
-
-#else
-
-typedef union ii_ibda0_u {
- bdrkreg_t ii_ibda0_regval;
- struct {
- bdrkreg_t i_rsvd : 24;
- bdrkreg_t i_addr : 33;
- bdrkreg_t i_rsvd_1 : 7;
- } ii_ibda0_fld_s;
-} ii_ibda0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Writing to this register sets up the attributes of the transfer *
- * and initiates the transfer operation. Reading this register has *
- * the side effect of terminating any transfer in progress. Note: *
- * stopping a transfer midstream could have an adverse impact on the *
- * other BTE. If a BTE stream has to be stopped (due to error *
- * handling for example), both BTE streams should be stopped and *
- * their transfers discarded. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ibct0_u {
- bdrkreg_t ii_ibct0_regval;
- struct {
- bdrkreg_t i_zerofill : 1;
- bdrkreg_t i_rsvd_2 : 3;
- bdrkreg_t i_notify : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_poison : 1;
- bdrkreg_t i_rsvd : 55;
- } ii_ibct0_fld_s;
-} ii_ibct0_u_t;
-
-#else
-
-typedef union ii_ibct0_u {
- bdrkreg_t ii_ibct0_regval;
- struct {
- bdrkreg_t i_rsvd : 55;
- bdrkreg_t i_poison : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_notify : 1;
- bdrkreg_t i_rsvd_2 : 3;
- bdrkreg_t i_zerofill : 1;
- } ii_ibct0_fld_s;
-} ii_ibct0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the address to which the WINV is sent. *
- * This address has to be cache line aligned. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ibna0_u {
- bdrkreg_t ii_ibna0_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_addr : 33;
- bdrkreg_t i_rsvd : 24;
- } ii_ibna0_fld_s;
-} ii_ibna0_u_t;
-
-#else
-
-typedef union ii_ibna0_u {
- bdrkreg_t ii_ibna0_regval;
- struct {
- bdrkreg_t i_rsvd : 24;
- bdrkreg_t i_addr : 33;
- bdrkreg_t i_rsvd_1 : 7;
- } ii_ibna0_fld_s;
-} ii_ibna0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the programmable level as well as the node *
- * ID and PI unit of the processor to which the interrupt will be *
- * sent. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ibia0_u {
- bdrkreg_t ii_ibia0_regval;
- struct {
- bdrkreg_t i_pi_id : 1;
- bdrkreg_t i_node_id : 8;
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_level : 7;
- bdrkreg_t i_rsvd : 41;
- } ii_ibia0_fld_s;
-} ii_ibia0_u_t;
-
-#else
-
-typedef union ii_ibia0_u {
- bdrkreg_t ii_ibia0_regval;
- struct {
- bdrkreg_t i_rsvd : 41;
- bdrkreg_t i_level : 7;
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_node_id : 8;
- bdrkreg_t i_pi_id : 1;
- } ii_ibia0_fld_s;
-} ii_ibia0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This register is used to set up the length for a *
- * transfer and then to monitor the progress of that transfer. This *
- * register needs to be initialized before a transfer is started. A *
- * legitimate write to this register will set the Busy bit, clear the *
- * Error bit, and initialize the length to the value desired. *
- * While the transfer is in progress, hardware will decrement the *
- * length field with each successful block that is copied. Once the *
- * transfer completes, hardware will clear the Busy bit. The length *
- * field will also contain the number of cache lines left to be *
- * transferred. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ibls1_u {
- bdrkreg_t ii_ibls1_regval;
- struct {
- bdrkreg_t i_length : 16;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_busy : 1;
- bdrkreg_t i_rsvd : 43;
- } ii_ibls1_fld_s;
-} ii_ibls1_u_t;
-
-#else
-
-typedef union ii_ibls1_u {
- bdrkreg_t ii_ibls1_regval;
- struct {
- bdrkreg_t i_rsvd : 43;
- bdrkreg_t i_busy : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_error : 1;
- bdrkreg_t i_length : 16;
- } ii_ibls1_fld_s;
-} ii_ibls1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register should be loaded before a transfer is started. The *
- * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
- * address as described in Section 1.3, Figure2 and Figure3. Since *
- * the bottom 7 bits of the address are always taken to be zero, BTE *
- * transfers are always cacheline-aligned. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ibsa1_u {
- bdrkreg_t ii_ibsa1_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_addr : 33;
- bdrkreg_t i_rsvd : 24;
- } ii_ibsa1_fld_s;
-} ii_ibsa1_u_t;
-
-#else
-
-typedef union ii_ibsa1_u {
- bdrkreg_t ii_ibsa1_regval;
- struct {
- bdrkreg_t i_rsvd : 24;
- bdrkreg_t i_addr : 33;
- bdrkreg_t i_rsvd_1 : 7;
- } ii_ibsa1_fld_s;
-} ii_ibsa1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register should be loaded before a transfer is started. The *
- * address to be loaded in bits 39:0 is the 40-bit TRex+ physical *
- * address as described in Section 1.3, Figure2 and Figure3. Since *
- * the bottom 7 bits of the address are always taken to be zero, BTE *
- * transfers are always cacheline-aligned. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ibda1_u {
- bdrkreg_t ii_ibda1_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_addr : 33;
- bdrkreg_t i_rsvd : 24;
- } ii_ibda1_fld_s;
-} ii_ibda1_u_t;
-
-#else
-
-typedef union ii_ibda1_u {
- bdrkreg_t ii_ibda1_regval;
- struct {
- bdrkreg_t i_rsvd : 24;
- bdrkreg_t i_addr : 33;
- bdrkreg_t i_rsvd_1 : 7;
- } ii_ibda1_fld_s;
-} ii_ibda1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Writing to this register sets up the attributes of the transfer *
- * and initiates the transfer operation. Reading this register has *
- * the side effect of terminating any transfer in progress. Note: *
- * stopping a transfer midstream could have an adverse impact on the *
- * other BTE. If a BTE stream has to be stopped (due to error *
- * handling for example), both BTE streams should be stopped and *
- * their transfers discarded. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ibct1_u {
- bdrkreg_t ii_ibct1_regval;
- struct {
- bdrkreg_t i_zerofill : 1;
- bdrkreg_t i_rsvd_2 : 3;
- bdrkreg_t i_notify : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_poison : 1;
- bdrkreg_t i_rsvd : 55;
- } ii_ibct1_fld_s;
-} ii_ibct1_u_t;
-
-#else
-
-typedef union ii_ibct1_u {
- bdrkreg_t ii_ibct1_regval;
- struct {
- bdrkreg_t i_rsvd : 55;
- bdrkreg_t i_poison : 1;
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_notify : 1;
- bdrkreg_t i_rsvd_2 : 3;
- bdrkreg_t i_zerofill : 1;
- } ii_ibct1_fld_s;
-} ii_ibct1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the address to which the WINV is sent. *
- * This address has to be cache line aligned. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ibna1_u {
- bdrkreg_t ii_ibna1_regval;
- struct {
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_addr : 33;
- bdrkreg_t i_rsvd : 24;
- } ii_ibna1_fld_s;
-} ii_ibna1_u_t;
-
-#else
-
-typedef union ii_ibna1_u {
- bdrkreg_t ii_ibna1_regval;
- struct {
- bdrkreg_t i_rsvd : 24;
- bdrkreg_t i_addr : 33;
- bdrkreg_t i_rsvd_1 : 7;
- } ii_ibna1_fld_s;
-} ii_ibna1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the programmable level as well as the node *
- * ID and PI unit of the processor to which the interrupt will be *
- * sent. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ibia1_u {
- bdrkreg_t ii_ibia1_regval;
- struct {
- bdrkreg_t i_pi_id : 1;
- bdrkreg_t i_node_id : 8;
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_level : 7;
- bdrkreg_t i_rsvd : 41;
- } ii_ibia1_fld_s;
-} ii_ibia1_u_t;
-
-#else
-
-typedef union ii_ibia1_u {
- bdrkreg_t ii_ibia1_regval;
- struct {
- bdrkreg_t i_rsvd : 41;
- bdrkreg_t i_level : 7;
- bdrkreg_t i_rsvd_1 : 7;
- bdrkreg_t i_node_id : 8;
- bdrkreg_t i_pi_id : 1;
- } ii_ibia1_fld_s;
-} ii_ibia1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register defines the resources that feed information into *
- * the two performance counters located in the IO Performance *
- * Profiling Register. There are 17 different quantities that can be *
- * measured. Given these 17 different options, the two performance *
- * counters have 15 of them in common; menu selections 0 through 0xE *
- * are identical for each performance counter. As for the other two *
- * options, one is available from one performance counter and the *
- * other is available from the other performance counter. Hence, the *
- * II supports all 17*16=272 possible combinations of quantities to *
- * measure. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ipcr_u {
- bdrkreg_t ii_ipcr_regval;
- struct {
- bdrkreg_t i_ippr0_c : 4;
- bdrkreg_t i_ippr1_c : 4;
- bdrkreg_t i_icct : 8;
- bdrkreg_t i_rsvd : 48;
- } ii_ipcr_fld_s;
-} ii_ipcr_u_t;
-
-#else
-
-typedef union ii_ipcr_u {
- bdrkreg_t ii_ipcr_regval;
- struct {
- bdrkreg_t i_rsvd : 48;
- bdrkreg_t i_icct : 8;
- bdrkreg_t i_ippr1_c : 4;
- bdrkreg_t i_ippr0_c : 4;
- } ii_ipcr_fld_s;
-} ii_ipcr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ii_ippr_u {
- bdrkreg_t ii_ippr_regval;
- struct {
- bdrkreg_t i_ippr0 : 32;
- bdrkreg_t i_ippr1 : 32;
- } ii_ippr_fld_s;
-} ii_ippr_u_t;
-
-#else
-
-typedef union ii_ippr_u {
- bdrkreg_t ii_ippr_regval;
- struct {
- bdrkreg_t i_ippr1 : 32;
- bdrkreg_t i_ippr0 : 32;
- } ii_ippr_fld_s;
-} ii_ippr_u_t;
-
-#endif
-
-
-
-
-
-
-#endif /* __ASSEMBLY__ */
-
-/************************************************************************
- * *
- * The following defines which were not formed into structures are *
- * probably indentical to another register, and the name of the *
- * register is provided against each of these registers. This *
- * information needs to be checked carefully *
- * *
- * IIO_ICRB1_A IIO_ICRB0_A *
- * IIO_ICRB1_B IIO_ICRB0_B *
- * IIO_ICRB1_C IIO_ICRB0_C *
- * IIO_ICRB1_D IIO_ICRB0_D *
- * IIO_ICRB2_A IIO_ICRB0_A *
- * IIO_ICRB2_B IIO_ICRB0_B *
- * IIO_ICRB2_C IIO_ICRB0_C *
- * IIO_ICRB2_D IIO_ICRB0_D *
- * IIO_ICRB3_A IIO_ICRB0_A *
- * IIO_ICRB3_B IIO_ICRB0_B *
- * IIO_ICRB3_C IIO_ICRB0_C *
- * IIO_ICRB3_D IIO_ICRB0_D *
- * IIO_ICRB4_A IIO_ICRB0_A *
- * IIO_ICRB4_B IIO_ICRB0_B *
- * IIO_ICRB4_C IIO_ICRB0_C *
- * IIO_ICRB4_D IIO_ICRB0_D *
- * IIO_ICRB5_A IIO_ICRB0_A *
- * IIO_ICRB5_B IIO_ICRB0_B *
- * IIO_ICRB5_C IIO_ICRB0_C *
- * IIO_ICRB5_D IIO_ICRB0_D *
- * IIO_ICRB6_A IIO_ICRB0_A *
- * IIO_ICRB6_B IIO_ICRB0_B *
- * IIO_ICRB6_C IIO_ICRB0_C *
- * IIO_ICRB6_D IIO_ICRB0_D *
- * IIO_ICRB7_A IIO_ICRB0_A *
- * IIO_ICRB7_B IIO_ICRB0_B *
- * IIO_ICRB7_C IIO_ICRB0_C *
- * IIO_ICRB7_D IIO_ICRB0_D *
- * IIO_ICRB8_A IIO_ICRB0_A *
- * IIO_ICRB8_B IIO_ICRB0_B *
- * IIO_ICRB8_C IIO_ICRB0_C *
- * IIO_ICRB8_D IIO_ICRB0_D *
- * IIO_ICRB9_A IIO_ICRB0_A *
- * IIO_ICRB9_B IIO_ICRB0_B *
- * IIO_ICRB9_C IIO_ICRB0_C *
- * IIO_ICRB9_D IIO_ICRB0_D *
- * IIO_ICRBA_A IIO_ICRB0_A *
- * IIO_ICRBA_B IIO_ICRB0_B *
- * IIO_ICRBA_C IIO_ICRB0_C *
- * IIO_ICRBA_D IIO_ICRB0_D *
- * IIO_ICRBB_A IIO_ICRB0_A *
- * IIO_ICRBB_B IIO_ICRB0_B *
- * IIO_ICRBB_C IIO_ICRB0_C *
- * IIO_ICRBB_D IIO_ICRB0_D *
- * IIO_ICRBC_A IIO_ICRB0_A *
- * IIO_ICRBC_B IIO_ICRB0_B *
- * IIO_ICRBC_C IIO_ICRB0_C *
- * IIO_ICRBC_D IIO_ICRB0_D *
- * IIO_ICRBD_A IIO_ICRB0_A *
- * IIO_ICRBD_B IIO_ICRB0_B *
- * IIO_ICRBD_C IIO_ICRB0_C *
- * IIO_ICRBD_D IIO_ICRB0_D *
- * IIO_ICRBE_A IIO_ICRB0_A *
- * IIO_ICRBE_B IIO_ICRB0_B *
- * IIO_ICRBE_C IIO_ICRB0_C *
- * IIO_ICRBE_D IIO_ICRB0_D *
- * *
- ************************************************************************/
-
-
-/************************************************************************
- * *
- * MAKE ALL ADDITIONS AFTER THIS LINE *
- * *
- ************************************************************************/
-
-
-
-
-
-#endif /* _ASM_IA64_SN_SN1_HUBIO_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_HUBIO_NEXT_H
-#define _ASM_IA64_SN_SN1_HUBIO_NEXT_H
-
-/*
- * Slightly friendlier names for some common registers.
- */
-#define IIO_WIDGET IIO_WID /* Widget identification */
-#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */
-#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */
-#define IIO_PROTECT IIO_ILAPR /* IO interface protection */
-#define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */
-#define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */
-#define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */
-#define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */
-#define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */
-#define IIO_LLP_LOG IIO_ILLR /* LLP log */
-#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout*/
-#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */
-#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */
-#define IIO_IGFX_0 IIO_IGFX0
-#define IIO_IGFX_1 IIO_IGFX1
-#define IIO_IBCT_0 IIO_IBCT0
-#define IIO_IBCT_1 IIO_IBCT1
-#define IIO_IBLS_0 IIO_IBLS0
-#define IIO_IBLS_1 IIO_IBLS1
-#define IIO_IBSA_0 IIO_IBSA0
-#define IIO_IBSA_1 IIO_IBSA1
-#define IIO_IBDA_0 IIO_IBDA0
-#define IIO_IBDA_1 IIO_IBDA1
-#define IIO_IBNA_0 IIO_IBNA0
-#define IIO_IBNA_1 IIO_IBNA1
-#define IIO_IBIA_0 IIO_IBIA0
-#define IIO_IBIA_1 IIO_IBIA1
-#define IIO_IOPRB_0 IIO_IPRB0
-#define IIO_PRTE_0 IIO_IPRTE0 /* PIO Read address table entry 0 */
-#define IIO_PRTE(_x) (IIO_PRTE_0 + (8 * (_x)))
-#define IIO_NUM_IPRBS (9)
-#define IIO_WIDPRTE(x) IIO_PRTE(((x) - 8)) /* widget ID to its PRTE num */
-
-#define IIO_LLP_CSR_IS_UP 0x00002000
-#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000
-#define IIO_LLP_CSR_LLP_STAT_SHFT 12
-
-#define IIO_LLP_CB_MAX 0xffff /* in ILLR CB_CNT, Max Check Bit errors */
-#define IIO_LLP_SN_MAX 0xffff /* in ILLR SN_CNT, Max Sequence Number errors */
-
-/* key to IIO_PROTECT_OVRRD */
-#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */
-
-/* BTE register names */
-#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */
-#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */
-#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */
-#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */
-#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */
-#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */
-#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */
-#define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */
-
-/* BTE register offsets from base */
-#define BTEOFF_STAT 0
-#define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0)
-#define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0)
-#define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0)
-#define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0)
-#define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0)
-
-
-/* names used in hub_diags.c; carried over from SN0 */
-#define IIO_BASE_BTE0 IIO_IBLS_0
-#define IIO_BASE_BTE1 IIO_IBLS_1
-
-/*
- * Macro which takes the widget number, and returns the
- * IO PRB address of that widget.
- * value _x is expected to be a widget number in the range
- * 0, 8 - 0xF
- */
-#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
- (_x) : \
- (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
-
-
-/* GFX Flow Control Node/Widget Register */
-#define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */
-#define IIO_IGFX_W_NUM_MASK ((1<<IIO_IGFX_W_NUM_BITS)-1)
-#define IIO_IGFX_W_NUM_SHIFT 0
-#define IIO_IGFX_PI_NUM_BITS 1 /* size of PI num field */
-#define IIO_IGFX_PI_NUM_MASK ((1<<IIO_IGFX_PI_NUM_BITS)-1)
-#define IIO_IGFX_PI_NUM_SHIFT 4
-#define IIO_IGFX_N_NUM_BITS 8 /* size of node num field */
-#define IIO_IGFX_N_NUM_MASK ((1<<IIO_IGFX_N_NUM_BITS)-1)
-#define IIO_IGFX_N_NUM_SHIFT 5
-#define IIO_IGFX_P_NUM_BITS 1 /* size of processor num field */
-#define IIO_IGFX_P_NUM_MASK ((1<<IIO_IGFX_P_NUM_BITS)-1)
-#define IIO_IGFX_P_NUM_SHIFT 16
-#define IIO_IGFX_INIT(widget, pi, node, cpu) (\
- (((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) | \
- (((pi) & IIO_IGFX_PI_NUM_MASK)<< IIO_IGFX_PI_NUM_SHIFT)| \
- (((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \
- (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT))
-
-
-/* Scratch registers (all bits available) */
-#define IIO_SCRATCH_REG0 IIO_ISCR0
-#define IIO_SCRATCH_REG1 IIO_ISCR1
-#define IIO_SCRATCH_MASK 0xffffffffffffffff
-
-#define IIO_SCRATCH_BIT0_0 0x0000000000000001
-#define IIO_SCRATCH_BIT0_1 0x0000000000000002
-#define IIO_SCRATCH_BIT0_2 0x0000000000000004
-#define IIO_SCRATCH_BIT0_3 0x0000000000000008
-#define IIO_SCRATCH_BIT0_4 0x0000000000000010
-#define IIO_SCRATCH_BIT0_5 0x0000000000000020
-#define IIO_SCRATCH_BIT0_6 0x0000000000000040
-#define IIO_SCRATCH_BIT0_7 0x0000000000000080
-#define IIO_SCRATCH_BIT0_8 0x0000000000000100
-#define IIO_SCRATCH_BIT0_9 0x0000000000000200
-#define IIO_SCRATCH_BIT0_A 0x0000000000000400
-
-#define IIO_SCRATCH_BIT1_0 0x0000000000000001
-#define IIO_SCRATCH_BIT1_1 0x0000000000000002
-/* IO Translation Table Entries */
-#define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */
- /* Hw manuals number them 1..7! */
-/*
- * IIO_IMEM Register fields.
- */
-#define IIO_IMEM_W0ESD 0x1 /* Widget 0 shut down due to error */
-#define IIO_IMEM_B0ESD (1 << 4) /* BTE 0 shut down due to error */
-#define IIO_IMEM_B1ESD (1 << 8) /* BTE 1 Shut down due to error */
-
-/*
- * As a permanent workaround for a bug in the PI side of the hub, we've
- * redefined big window 7 as small window 0.
- XXX does this still apply for SN1??
- */
-#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
-
-/*
- * Use the top big window as a surrogate for the first small window
- */
-#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
-
-#define IIO_NUM_PRTES 8 /* Total number of PRB table entries */
-
-#define ILCSR_WARM_RESET 0x100
-
-/*
- * CRB manipulation macros
- * The CRB macros are slightly complicated, since there are up to
- * four registers associated with each CRB entry.
- */
-#define IIO_NUM_CRBS 15 /* Number of CRBs */
-#define IIO_NUM_NORMAL_CRBS 12 /* Number of regular CRB entries */
-#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */
-#define IIO_ICRB_OFFSET 8
-#define IIO_ICRB_0 IIO_ICRB0_A
-#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */
-/* XXX - This is now tuneable:
- #define IIO_FIRST_PC_ENTRY 12
- */
-
-#define IIO_ICRB_A(_x) (IIO_ICRB_0 + (4 * IIO_ICRB_OFFSET * (_x)))
-#define IIO_ICRB_B(_x) (IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)
-#define IIO_ICRB_C(_x) (IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)
-#define IIO_ICRB_D(_x) (IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)
-
-#define TNUM_TO_WIDGET_DEV(_tnum) (_tnum & 0x7)
-
-/*
- * values for "ecode" field
- */
-#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */
-#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */
-#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access
- * e.g. WINV to a Read only line. */
-#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */
-#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */
-#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */
-#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */
-#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */
-
-/*
- * Number of credits Hub widget has while sending req/response to
- * xbow.
- * Value of 3 is required by Xbow 1.1
- * We may be able to increase this to 4 with Xbow 1.2.
- */
-#define HUBII_XBOW_CREDIT 3
-#define HUBII_XBOW_REV2_CREDIT 4
-
-/*
- * Number of credits that xtalk devices should use when communicating
- * with a Bedrock (depth of Bedrock's queue).
- */
-#define HUB_CREDIT 4
-
-/*
- * Some IIO_PRB fields
- */
-#define IIO_PRB_MULTI_ERR (1LL << 63)
-#define IIO_PRB_SPUR_RD (1LL << 51)
-#define IIO_PRB_SPUR_WR (1LL << 50)
-#define IIO_PRB_RD_TO (1LL << 49)
-#define IIO_PRB_ERROR (1LL << 48)
-
-/*************************************************************************
-
- Some of the IIO field masks and shifts are defined here.
- This is in order to maintain compatibility in SN0 and SN1 code
-
-**************************************************************************/
-
-/*
- * ICMR register fields
- * (Note: the IIO_ICMR_P_CNT and IIO_ICMR_PC_VLD from Hub are not
- * present in Bedrock)
- */
-
-#define IIO_ICMR_CRB_VLD_SHFT 20
-#define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT)
-
-#define IIO_ICMR_FC_CNT_SHFT 16
-#define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT)
-
-#define IIO_ICMR_C_CNT_SHFT 4
-#define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT)
-
-#define IIO_ICMR_PRECISE (1UL << 52)
-#define IIO_ICMR_CLR_RPPD (1UL << 13)
-#define IIO_ICMR_CLR_RQPD (1UL << 12)
-
-/*
- * IIO PIO Deallocation register field masks : (IIO_IPDR)
- XXX present but not needed in bedrock? See the manual.
- */
-#define IIO_IPDR_PND (1 << 4)
-
-/*
- * IIO CRB deallocation register field masks: (IIO_ICDR)
- */
-#define IIO_ICDR_PND (1 << 4)
-
-/*
- * IO BTE Length/Status (IIO_IBLS) register bit field definitions
- */
-#define IBLS_BUSY (0x1 << 20)
-#define IBLS_ERROR_SHFT 16
-#define IBLS_ERROR (0x1 << IBLS_ERROR_SHFT)
-#define IBLS_LENGTH_MASK 0xffff
-
-/*
- * IO BTE Control/Terminate register (IBCT) register bit field definitions
- */
-#define IBCT_POISON (0x1 << 8)
-#define IBCT_NOTIFY (0x1 << 4)
-#define IBCT_ZFIL_MODE (0x1 << 0)
-
-/*
- * IIO Incoming Error Packet Header (IIO_IIEPH1/IIO_IIEPH2)
- */
-#define IIEPH1_VALID (1 << 44)
-#define IIEPH1_OVERRUN (1 << 40)
-#define IIEPH1_ERR_TYPE_SHFT 32
-#define IIEPH1_ERR_TYPE_MASK 0xf
-#define IIEPH1_SOURCE_SHFT 20
-#define IIEPH1_SOURCE_MASK 11
-#define IIEPH1_SUPPL_SHFT 8
-#define IIEPH1_SUPPL_MASK 11
-#define IIEPH1_CMD_SHFT 0
-#define IIEPH1_CMD_MASK 7
-
-#define IIEPH2_TAIL (1 << 40)
-#define IIEPH2_ADDRESS_SHFT 0
-#define IIEPH2_ADDRESS_MASK 38
-
-#define IIEPH1_ERR_SHORT_REQ 2
-#define IIEPH1_ERR_SHORT_REPLY 3
-#define IIEPH1_ERR_LONG_REQ 4
-#define IIEPH1_ERR_LONG_REPLY 5
-
-/*
- * IO Error Clear register bit field definitions
- */
-#define IECLR_PI1_FWD_INT (1 << 31) /* clear PI1_FORWARD_INT in iidsr */
-#define IECLR_PI0_FWD_INT (1 << 30) /* clear PI0_FORWARD_INT in iidsr */
-#define IECLR_SPUR_RD_HDR (1 << 29) /* clear valid bit in ixss reg */
-#define IECLR_BTE1 (1 << 18) /* clear bte error 1 */
-#define IECLR_BTE0 (1 << 17) /* clear bte error 0 */
-#define IECLR_CRAZY (1 << 16) /* clear crazy bit in wstat reg */
-#define IECLR_PRB_F (1 << 15) /* clear err bit in PRB_F reg */
-#define IECLR_PRB_E (1 << 14) /* clear err bit in PRB_E reg */
-#define IECLR_PRB_D (1 << 13) /* clear err bit in PRB_D reg */
-#define IECLR_PRB_C (1 << 12) /* clear err bit in PRB_C reg */
-#define IECLR_PRB_B (1 << 11) /* clear err bit in PRB_B reg */
-#define IECLR_PRB_A (1 << 10) /* clear err bit in PRB_A reg */
-#define IECLR_PRB_9 (1 << 9) /* clear err bit in PRB_9 reg */
-#define IECLR_PRB_8 (1 << 8) /* clear err bit in PRB_8 reg */
-#define IECLR_PRB_0 (1 << 0) /* clear err bit in PRB_0 reg */
-
-/*
- * IIO CRB control register Fields: IIO_ICCR
- */
-#define IIO_ICCR_PENDING (0x10000)
-#define IIO_ICCR_CMD_MASK (0xFF)
-#define IIO_ICCR_CMD_SHFT (7)
-#define IIO_ICCR_CMD_NOP (0x0) /* No Op */
-#define IIO_ICCR_CMD_WAKE (0x100) /* Reactivate CRB entry and process */
-#define IIO_ICCR_CMD_TIMEOUT (0x200) /* Make CRB timeout & mark invalid */
-#define IIO_ICCR_CMD_EJECT (0x400) /* Contents of entry written to memory
- * via a WB
- */
-#define IIO_ICCR_CMD_FLUSH (0x800)
-
-/*
- *
- * CRB Register description.
- *
- * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
- * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
- * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
- * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
- * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING
- *
- * Many of the fields in CRB are status bits used by hardware
- * for implementation of the protocol. It's very dangerous to
- * mess around with the CRB registers.
- *
- * It's OK to read the CRB registers and try to make sense out of the
- * fields in CRB.
- *
- * Updating CRB requires all activities in Hub IIO to be quiesced.
- * otherwise, a write to CRB could corrupt other CRB entries.
- * CRBs are here only as a back door peek to hub IIO's status.
- * Quiescing implies no dmas no PIOs
- * either directly from the cpu or from sn0net.
- * this is not something that can be done easily. So, AVOID updating
- * CRBs.
- */
-
-#ifndef __ASSEMBLY__
-
-/*
- * Easy access macros for CRBs, all 4 registers (A-D)
- */
-typedef ii_icrb0_a_u_t icrba_t; /* what it was called on SN0/hub */
-#define a_error ii_icrb0_a_fld_s.ia_error
-#define a_ecode ii_icrb0_a_fld_s.ia_errcode
-#define a_lnetuce ii_icrb0_a_fld_s.ia_ln_uce
-#define a_mark ii_icrb0_a_fld_s.ia_mark
-#define a_xerr ii_icrb0_a_fld_s.ia_xt_err
-#define a_sidn ii_icrb0_a_fld_s.ia_sidn
-#define a_tnum ii_icrb0_a_fld_s.ia_tnum
-#define a_addr ii_icrb0_a_fld_s.ia_addr
-#define a_valid ii_icrb0_a_fld_s.ia_vld
-#define a_iow ii_icrb0_a_fld_s.ia_iow
-#define a_regvalue ii_icrb0_a_regval
-
-typedef ii_icrb0_b_u_t icrbb_t;
-#define b_btenum ii_icrb0_b_fld_s.ib_bte_num
-#define b_cohtrans ii_icrb0_b_fld_s.ib_ct
-#define b_xtsize ii_icrb0_b_fld_s.ib_size
-#define b_source ii_icrb0_b_fld_s.ib_source
-#define b_imsgtype ii_icrb0_b_fld_s.ib_imsgtype
-#define b_imsg ii_icrb0_b_fld_s.ib_imsg
-#define b_initiator ii_icrb0_b_fld_s.ib_init
-#define b_regvalue ii_icrb0_b_regval
-
-typedef ii_icrb0_c_u_t icrbc_t;
-#define c_pricnt ii_icrb0_c_fld_s.ic_pr_cnt
-#define c_pripsc ii_icrb0_c_fld_s.ic_pr_psc
-#define c_bteop ii_icrb0_c_fld_s.ic_bte_op
-#define c_bteaddr ii_icrb0_c_fld_s.ic_pa_be /* ic_pa_be fld has 2 names*/
-#define c_benable ii_icrb0_c_fld_s.ic_pa_be /* ic_pa_be fld has 2 names*/
-#define c_suppl ii_icrb0_c_fld_s.ic_suppl
-#define c_barrop ii_icrb0_c_fld_s.ic_bo
-#define c_doresp ii_icrb0_c_fld_s.ic_resprqd
-#define c_gbr ii_icrb0_c_fld_s.ic_gbr
-#define c_regvalue ii_icrb0_c_regval
-
-typedef ii_icrb0_d_u_t icrbd_t;
-#define icrbd_ctxtvld ii_icrb0_d_fld_s.id_cvld
-#define icrbd_toutvld ii_icrb0_d_fld_s.id_tvld
-#define icrbd_context ii_icrb0_d_fld_s.id_context
-#define d_regvalue ii_icrb0_d_regval
-
-#endif /* __ASSEMBLY__ */
-
-/* Number of widgets supported by hub */
-#define HUB_NUM_WIDGET 9
-#define HUB_WIDGET_ID_MIN 0x8
-#define HUB_WIDGET_ID_MAX 0xf
-
-#define HUB_WIDGET_PART_NUM 0xc110
-#define MAX_HUBS_PER_XBOW 2
-
-#ifndef __ASSEMBLY__
-/* A few more #defines for backwards compatibility */
-#define iprb_t ii_iprb0_u_t
-#define iprb_regval ii_iprb0_regval
-#define iprb_mult_err ii_iprb0_fld_s.i_mult_err
-#define iprb_spur_rd ii_iprb0_fld_s.i_spur_rd
-#define iprb_spur_wr ii_iprb0_fld_s.i_spur_wr
-#define iprb_rd_to ii_iprb0_fld_s.i_rd_to
-#define iprb_ovflow ii_iprb0_fld_s.i_of_cnt
-#define iprb_error ii_iprb0_fld_s.i_error
-#define iprb_ff ii_iprb0_fld_s.i_f
-#define iprb_mode ii_iprb0_fld_s.i_m
-#define iprb_bnakctr ii_iprb0_fld_s.i_nb
-#define iprb_anakctr ii_iprb0_fld_s.i_na
-#define iprb_xtalkctr ii_iprb0_fld_s.i_c
-#endif
-
-#define LNK_STAT_WORKING 0x2
-
-#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */
-#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */
-#define IIO_WSTAT_TXRETRY_MASK (0x7F) /* should be 0xFF?? */
-#define IIO_WSTAT_TXRETRY_SHFT (16)
-#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
- IIO_WSTAT_TXRETRY_MASK)
-
-/* Number of II perf. counters we can multiplex at once */
-
-#define IO_PERF_SETS 32
-
-#if __KERNEL__
-#ifndef __ASSEMBLY__
-/* XXX moved over from SN/SN0/hubio.h -- each should be checked for SN1 */
-#include <asm/sn/alenlist.h>
-#include <asm/sn/dmamap.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/xtalk/xtalk.h>
-
-/* Bit for the widget in inbound access register */
-#define IIO_IIWA_WIDGET(_w) ((uint64_t)(1ULL << _w))
-/* Bit for the widget in outbound access register */
-#define IIO_IOWA_WIDGET(_w) ((uint64_t)(1ULL << _w))
-
-/* NOTE: The following define assumes that we are going to get
- * widget numbers from 8 thru F and the device numbers within
- * widget from 0 thru 7.
- */
-#define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((uint64_t)(1ULL << (8 * ((w) - 8) + (d))))
-
-/* IO Interrupt Destination Register */
-#define IIO_IIDSR_SENT_SHIFT 28
-#define IIO_IIDSR_SENT_MASK 0x10000000
-#define IIO_IIDSR_ENB_SHIFT 24
-#define IIO_IIDSR_ENB_MASK 0x01000000
-#define IIO_IIDSR_NODE_SHIFT 8
-#define IIO_IIDSR_NODE_MASK 0x0000ff00
-#define IIO_IIDSR_PI_ID_SHIFT 8
-#define IIO_IIDSR_PI_ID_MASK 0x00000010
-#define IIO_IIDSR_LVL_SHIFT 0
-#define IIO_IIDSR_LVL_MASK 0x0000007f
-
-/* Xtalk timeout threshhold register (IIO_IXTT) */
-#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */
-#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT)
-#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */
-#define IXTT_RRSP_PS_MASK (0x7FFFFFULL << IXTT_RRSP_PS_SHFT)
-#define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */
-#define IXTT_TAIL_TO_MASK (0x3FFFFFFULL << IXTT_TAIL_TO_SHFT)
-
-/*
- * The IO LLP control status register and widget control register
- */
-
-#ifdef LITTLE_ENDIAN
-
-typedef union hubii_wcr_u {
- uint64_t wcr_reg_value;
- struct {
- uint64_t wcr_widget_id: 4, /* LLP crossbar credit */
- wcr_tag_mode: 1, /* Tag mode */
- wcr_rsvd1: 8, /* Reserved */
- wcr_xbar_crd: 3, /* LLP crossbar credit */
- wcr_f_bad_pkt: 1, /* Force bad llp pkt enable */
- wcr_dir_con: 1, /* widget direct connect */
- wcr_e_thresh: 5, /* elasticity threshold */
- wcr_rsvd: 41; /* unused */
- } wcr_fields_s;
-} hubii_wcr_t;
-
-#else
-
-typedef union hubii_wcr_u {
- uint64_t wcr_reg_value;
- struct {
- uint64_t wcr_rsvd: 41, /* unused */
- wcr_e_thresh: 5, /* elasticity threshold */
- wcr_dir_con: 1, /* widget direct connect */
- wcr_f_bad_pkt: 1, /* Force bad llp pkt enable */
- wcr_xbar_crd: 3, /* LLP crossbar credit */
- wcr_rsvd1: 8, /* Reserved */
- wcr_tag_mode: 1, /* Tag mode */
- wcr_widget_id: 4; /* LLP crossbar credit */
- } wcr_fields_s;
-} hubii_wcr_t;
-
-#endif
-
-#define iwcr_dir_con wcr_fields_s.wcr_dir_con
-
-/* The structures below are defined to extract and modify the ii
-performance registers */
-
-/* io_perf_sel allows the caller to specify what tests will be
- performed */
-#ifdef LITTLE_ENDIAN
-
-typedef union io_perf_sel {
- uint64_t perf_sel_reg;
- struct {
- uint64_t perf_ippr0 : 4,
- perf_ippr1 : 4,
- perf_icct : 8,
- perf_rsvd : 48;
- } perf_sel_bits;
-} io_perf_sel_t;
-
-#else
-
-typedef union io_perf_sel {
- uint64_t perf_sel_reg;
- struct {
- uint64_t perf_rsvd : 48,
- perf_icct : 8,
- perf_ippr1 : 4,
- perf_ippr0 : 4;
- } perf_sel_bits;
-} io_perf_sel_t;
-
-#endif
-
-/* io_perf_cnt is to extract the count from the hub registers. Due to
- hardware problems there is only one counter, not two. */
-
-#ifdef LITTLE_ENDIAN
-
-typedef union io_perf_cnt {
- uint64_t perf_cnt;
- struct {
- uint64_t perf_cnt : 20,
- perf_rsvd2 : 12,
- perf_rsvd1 : 32;
- } perf_cnt_bits;
-
-} io_perf_cnt_t;
-
-#else
-
-typedef union io_perf_cnt {
- uint64_t perf_cnt;
- struct {
- uint64_t perf_rsvd1 : 32,
- perf_rsvd2 : 12,
- perf_cnt : 20;
- } perf_cnt_bits;
-
-} io_perf_cnt_t;
-
-#endif
-
-#ifdef LITTLE_ENDIAN
-
-typedef union iprte_a {
- bdrkreg_t entry;
- struct {
- bdrkreg_t i_rsvd_1 : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_vld : 1;
- } iprte_fields;
-} iprte_a_t;
-
-#else
-
-typedef union iprte_a {
- bdrkreg_t entry;
- struct {
- bdrkreg_t i_vld : 1;
- bdrkreg_t i_to_cnt : 5;
- bdrkreg_t i_widget : 4;
- bdrkreg_t i_rsvd : 2;
- bdrkreg_t i_source : 8;
- bdrkreg_t i_init : 3;
- bdrkreg_t i_addr : 38;
- bdrkreg_t i_rsvd_1 : 3;
- } iprte_fields;
-} iprte_a_t;
-
-#endif
-
-/* PIO MANAGEMENT */
-typedef struct hub_piomap_s *hub_piomap_t;
-
-extern hub_piomap_t
-hub_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
- device_desc_t dev_desc, /* device descriptor */
- iopaddr_t xtalk_addr, /* map for this xtalk_addr range */
- size_t byte_count,
- size_t byte_count_max, /* maximum size of a mapping */
- unsigned flags); /* defined in sys/pio.h */
-
-extern void hub_piomap_free(hub_piomap_t hub_piomap);
-
-extern caddr_t
-hub_piomap_addr(hub_piomap_t hub_piomap, /* mapping resources */
- iopaddr_t xtalk_addr, /* map for this xtalk addr */
- size_t byte_count); /* map this many bytes */
-
-extern void
-hub_piomap_done(hub_piomap_t hub_piomap);
-
-extern caddr_t
-hub_piotrans_addr( devfs_handle_t dev, /* translate to this device */
- device_desc_t dev_desc, /* device descriptor */
- iopaddr_t xtalk_addr, /* Crosstalk address */
- size_t byte_count, /* map this many bytes */
- unsigned flags); /* (currently unused) */
-
-/* DMA MANAGEMENT */
-typedef struct hub_dmamap_s *hub_dmamap_t;
-
-extern hub_dmamap_t
-hub_dmamap_alloc( devfs_handle_t dev, /* set up mappings for dev */
- device_desc_t dev_desc, /* device descriptor */
- size_t byte_count_max, /* max size of a mapping */
- unsigned flags); /* defined in dma.h */
-
-extern void
-hub_dmamap_free(hub_dmamap_t dmamap);
-
-extern iopaddr_t
-hub_dmamap_addr( hub_dmamap_t dmamap, /* use mapping resources */
- paddr_t paddr, /* map for this address */
- size_t byte_count); /* map this many bytes */
-
-extern alenlist_t
-hub_dmamap_list( hub_dmamap_t dmamap, /* use mapping resources */
- alenlist_t alenlist, /* map this Addr/Length List */
- unsigned flags);
-
-extern void
-hub_dmamap_done( hub_dmamap_t dmamap); /* done w/ mapping resources */
-
-extern iopaddr_t
-hub_dmatrans_addr( devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- paddr_t paddr, /* system physical address */
- size_t byte_count, /* length */
- unsigned flags); /* defined in dma.h */
-
-extern alenlist_t
-hub_dmatrans_list( devfs_handle_t dev, /* translate for this device */
- device_desc_t dev_desc, /* device descriptor */
- alenlist_t palenlist, /* system addr/length list */
- unsigned flags); /* defined in dma.h */
-
-extern void
-hub_dmamap_drain( hub_dmamap_t map);
-
-extern void
-hub_dmaaddr_drain( devfs_handle_t vhdl,
- paddr_t addr,
- size_t bytes);
-
-extern void
-hub_dmalist_drain( devfs_handle_t vhdl,
- alenlist_t list);
-
-
-/* INTERRUPT MANAGEMENT */
-typedef struct hub_intr_s *hub_intr_t;
-
-extern hub_intr_t
-hub_intr_alloc( devfs_handle_t dev, /* which device */
- device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev); /* owner of this interrupt */
-
-extern hub_intr_t
-hub_intr_alloc_nothd(devfs_handle_t dev, /* which device */
- device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev); /* owner of this interrupt */
-
-extern void
-hub_intr_free(hub_intr_t intr_hdl);
-
-extern int
-hub_intr_connect( hub_intr_t intr_hdl, /* xtalk intr resource hndl */
- xtalk_intr_setfunc_t setfunc,
- /* func to set intr hw */
- void *setfunc_arg); /* arg to setfunc */
-
-extern void
-hub_intr_disconnect(hub_intr_t intr_hdl);
-
-extern devfs_handle_t
-hub_intr_cpu_get(hub_intr_t intr_hdl);
-
-/* CONFIGURATION MANAGEMENT */
-
-extern void
-hub_provider_startup(devfs_handle_t hub);
-
-extern void
-hub_provider_shutdown(devfs_handle_t hub);
-
-#define HUB_PIO_CONVEYOR 0x1 /* PIO in conveyor belt mode */
-#define HUB_PIO_FIRE_N_FORGET 0x2 /* PIO in fire-and-forget mode */
-
-/* Flags that make sense to hub_widget_flags_set */
-#define HUB_WIDGET_FLAGS ( \
- HUB_PIO_CONVEYOR | \
- HUB_PIO_FIRE_N_FORGET \
- )
-
-
-typedef int hub_widget_flags_t;
-
-/* Set the PIO mode for a widget. These two functions perform the
- * same operation, but hub_device_flags_set() takes a hardware graph
- * vertex while hub_widget_flags_set() takes a nasid and widget
- * number. In most cases, hub_device_flags_set() should be used.
- */
-extern int hub_widget_flags_set(nasid_t nasid,
- xwidgetnum_t widget_num,
- hub_widget_flags_t flags);
-
-/* Depending on the flags set take the appropriate actions */
-extern int hub_device_flags_set(devfs_handle_t widget_dev,
- hub_widget_flags_t flags);
-
-
-/* Error Handling. */
-extern int hub_ioerror_handler(devfs_handle_t, int, int, struct io_error_s *);
-extern int kl_ioerror_handler(cnodeid_t, cnodeid_t, cpuid_t,
- int, paddr_t, caddr_t, ioerror_mode_t);
-extern void hub_widget_reset(devfs_handle_t, xwidgetnum_t);
-extern int hub_error_devenable(devfs_handle_t, int, int);
-extern void hub_widgetdev_enable(devfs_handle_t, int);
-extern void hub_widgetdev_shutdown(devfs_handle_t, int);
-extern int hub_dma_enabled(devfs_handle_t);
-
-#endif /* __ASSEMBLY__ */
-#endif /* _KERNEL */
-#endif /* _ASM_IA64_SN_SN1_HUBIO_NEXT_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-
-/************************************************************************
- * *
- * WARNING!!! WARNING!!! WARNING!!! WARNING!!! WARNING!!! *
- * *
- * This file is created by an automated script. Any (minimal) changes *
- * made manually to this file should be made with care. *
- * *
- * MAKE ALL ADDITIONS TO THE END OF THIS FILE *
- * *
- ************************************************************************/
-
-
-#ifndef _ASM_IA64_SN_SN1_HUBLB_H
-#define _ASM_IA64_SN_SN1_HUBLB_H
-
-
-#define LB_REV_ID 0x00600000 /*
- * Bedrock Revision
- * and ID
- */
-
-
-
-#define LB_CPU_PERMISSION 0x00604000 /*
- * CPU PIO access
- * permission bits
- */
-
-
-
-#define LB_CPU_PERM_OVRRD 0x00604008 /*
- * CPU PIO access
- * permission bit
- * override
- */
-
-
-
-#define LB_IO_PERMISSION 0x00604010 /*
- * IO PIO access
- * permission bits
- */
-
-
-
-#define LB_SOFT_RESET 0x00604018 /*
- * Soft reset the
- * Bedrock chip
- */
-
-
-
-#define LB_REGION_PRESENT 0x00604020 /*
- * Regions Present for
- * Invalidates
- */
-
-
-
-#define LB_NODES_ABSENT 0x00604028 /*
- * Nodes Absent for
- * Invalidates
- */
-
-
-
-#define LB_MICROLAN_CTL 0x00604030 /*
- * Microlan Control
- * (NIC)
- */
-
-
-
-#define LB_ERROR_BITS 0x00604040 /*
- * Local Block error
- * bits
- */
-
-
-
-#define LB_ERROR_MASK_CLR 0x00604048 /*
- * Bit mask write to
- * clear error bits
- */
-
-
-
-#define LB_ERROR_HDR1 0x00604050 /*
- * Source, Suppl and
- * Cmd fields
- */
-
-
-
-#define LB_ERROR_HDR2 0x00604058 /*
- * Address field from
- * first error
- */
-
-
-
-#define LB_ERROR_DATA 0x00604060 /*
- * Data flit (if any)
- * from first error
- */
-
-
-
-#define LB_DEBUG_SELECT 0x00604100 /*
- * Choice of debug
- * signals from chip
- */
-
-
-
-#define LB_DEBUG_PINS 0x00604108 /*
- * Value on the chip's
- * debug pins
- */
-
-
-
-#define LB_RT_LOCAL_CTRL 0x00604200 /*
- * Local generation of
- * real-time clock
- */
-
-
-
-#define LB_RT_FILTER_CTRL 0x00604208 /*
- * Control of
- * filtering of global
- * clock
- */
-
-
-
-#define LB_SCRATCH_REG0 0x00608000 /* Scratch Register 0 */
-
-
-
-#define LB_SCRATCH_REG1 0x00608008 /* Scratch Register 1 */
-
-
-
-#define LB_SCRATCH_REG2 0x00608010 /* Scratch Register 2 */
-
-
-
-#define LB_SCRATCH_REG3 0x00608018 /* Scratch Register 3 */
-
-
-
-#define LB_SCRATCH_REG4 0x00608020 /* Scratch Register 4 */
-
-
-
-#define LB_SCRATCH_REG0_WZ 0x00608040 /*
- * Scratch Register 0
- * (WZ alias)
- */
-
-
-
-#define LB_SCRATCH_REG1_WZ 0x00608048 /*
- * Scratch Register 1
- * (WZ alias)
- */
-
-
-
-#define LB_SCRATCH_REG2_WZ 0x00608050 /*
- * Scratch Register 2
- * (WZ alias)
- */
-
-
-
-#define LB_SCRATCH_REG3_RZ 0x00608058 /*
- * Scratch Register 3
- * (RZ alias)
- */
-
-
-
-#define LB_SCRATCH_REG4_RZ 0x00608060 /*
- * Scratch Register 4
- * (RZ alias)
- */
-
-
-
-#define LB_VECTOR_PARMS 0x0060C000 /*
- * Vector PIO
- * parameters
- */
-
-
-
-#define LB_VECTOR_ROUTE 0x0060C008 /*
- * Vector PIO Vector
- * Route
- */
-
-
-
-#define LB_VECTOR_DATA 0x0060C010 /*
- * Vector PIO Write
- * Data
- */
-
-
-
-#define LB_VECTOR_STATUS 0x0060C020 /*
- * Vector PIO Return
- * Status
- */
-
-
-
-#define LB_VECTOR_RETURN 0x0060C028 /*
- * Vector PIO Return
- * Route
- */
-
-
-
-#define LB_VECTOR_READ_DATA 0x0060C030 /*
- * Vector PIO Read
- * Data
- */
-
-
-
-#define LB_VECTOR_STATUS_CLEAR 0x0060C038 /*
- * Clear Vector PIO
- * Return Status
- */
-
-
-
-
-
-#ifndef __ASSEMBLY__
-
-/************************************************************************
- * *
- * Description: This register contains information that allows *
- * exploratory software to probe for chip type. This is also the *
- * register that sets this node's ID and the size of each region *
- * (which affects the maximum possible system size). IBM assigns the *
- * values for the REVISION, PART_NUMBER and MANUFACTURER fields, in *
- * accordance with the IEEE 1149.1 standard; SGI is not at liberty to *
- * unilaterally change the values of these fields. *
- * . *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_rev_id_u {
- bdrkreg_t lb_rev_id_regval;
- struct {
- bdrkreg_t ri_reserved_2 : 1;
- bdrkreg_t ri_manufacturer : 11;
- bdrkreg_t ri_part_number : 16;
- bdrkreg_t ri_revision : 4;
- bdrkreg_t ri_node_id : 8;
- bdrkreg_t ri_reserved_1 : 6;
- bdrkreg_t ri_region_size : 2;
- bdrkreg_t ri_reserved : 16;
- } lb_rev_id_fld_s;
-} lb_rev_id_u_t;
-
-#else
-
-typedef union lb_rev_id_u {
- bdrkreg_t lb_rev_id_regval;
- struct {
- bdrkreg_t ri_reserved : 16;
- bdrkreg_t ri_region_size : 2;
- bdrkreg_t ri_reserved_1 : 6;
- bdrkreg_t ri_node_id : 8;
- bdrkreg_t ri_revision : 4;
- bdrkreg_t ri_part_number : 16;
- bdrkreg_t ri_manufacturer : 11;
- bdrkreg_t ri_reserved_2 : 1;
- } lb_rev_id_fld_s;
-} lb_rev_id_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the PI-access-rights bit-vector for the *
- * LB, NI, XB and MD portions of the Bedrock local register space. If *
- * a bit in the bit-vector is set, the region corresponding to that *
- * bit has read/write permission on the LB, NI, XB and MD local *
- * registers. If the bit is clear, that region has no write access to *
- * the local registers and no read access if the read will cause any *
- * state change. If a write or a read with side effects is attempted *
- * by a PI in a region for which access is restricted, the LB will *
- * not perform the operation and will send back a reply which *
- * indicates an error. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_cpu_permission_u {
- bdrkreg_t lb_cpu_permission_regval;
- struct {
- bdrkreg_t cp_cpu_access : 64;
- } lb_cpu_permission_fld_s;
-} lb_cpu_permission_u_t;
-
-
-
-
-/************************************************************************
- * *
- * A write to this register of the 64-bit value "SGIrules" will *
- * cause the bit in the LB_CPU_PROTECT register corresponding to the *
- * region of the requester to be set. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_cpu_perm_ovrrd_u {
- bdrkreg_t lb_cpu_perm_ovrrd_regval;
- struct {
- bdrkreg_t cpo_cpu_perm_ovr : 64;
- } lb_cpu_perm_ovrrd_fld_s;
-} lb_cpu_perm_ovrrd_u_t;
-
-
-
-
-/************************************************************************
- * *
- * This register contains the II-access-rights bit-vector for the *
- * LB, NI, XB and MD portions of the Bedrock local register space. If *
- * a bit in the bit-vector is set, the region corresponding to that *
- * bit has read/write permission on the LB, NI, XB and MD local *
- * registers. If the bit is clear, then that region has no write *
- * access to the local registers and no read access if the read *
- * results in any state change. If a write or a read with side *
- * effects is attempted by an II in a region for which access is *
- * restricted, the LB will not perform the operation and will send *
- * back a reply which indicates an error. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_io_permission_u {
- bdrkreg_t lb_io_permission_regval;
- struct {
- bdrkreg_t ip_io_permission : 64;
- } lb_io_permission_fld_s;
-} lb_io_permission_u_t;
-
-
-
-
-/************************************************************************
- * *
- * A write to this bit resets the Bedrock chip with a soft reset. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_soft_reset_u {
- bdrkreg_t lb_soft_reset_regval;
- struct {
- bdrkreg_t sr_soft_reset : 1;
- bdrkreg_t sr_reserved : 63;
- } lb_soft_reset_fld_s;
-} lb_soft_reset_u_t;
-
-#else
-
-typedef union lb_soft_reset_u {
- bdrkreg_t lb_soft_reset_regval;
- struct {
- bdrkreg_t sr_reserved : 63;
- bdrkreg_t sr_soft_reset : 1;
- } lb_soft_reset_fld_s;
-} lb_soft_reset_u_t;
-
-#endif
-
-
-
-/************************************************************************
- * *
- * This register indicates which regions are present and capable of *
- * receiving an invalidate (INVAL) request. The LB samples this *
- * register at the start of processing each LINVAL. When an LINVAL *
- * indicates that a particular PI unit might hold a shared copy of a *
- * cache block but this PI is in a region which is not present (i.e., *
- * its bit in LB_REGION_PRESENT is clear), then the LB sends an IVACK *
- * reply packet on behalf of this PI. The REGION_SIZE field in the *
- * LB_REV_ID register determines the number of nodes per region (and *
- * hence, the number of PI units which share a common bit in the *
- * LB_REGION_PRESENT register). *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_region_present_u {
- bdrkreg_t lb_region_present_regval;
- struct {
- bdrkreg_t rp_present_bits : 64;
- } lb_region_present_fld_s;
-} lb_region_present_u_t;
-
-
-
-
-/************************************************************************
- * *
- * Description: This register indicates which nodes are absent and *
- * not capable of receiving an invalidate (INVAL) request. The LB *
- * samples this register at the start of processing each LINVAL. When *
- * an LINVAL indicates that a particular PI unit might hold a shared *
- * copy of a cache block but this PI unit's node is not present *
- * (i.e., its node ID is listed in the LB_NODES_ABSENT register), *
- * then the LB sends an IVACK reply packet on behalf of this PI. *
- * *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_nodes_absent_u {
- bdrkreg_t lb_nodes_absent_regval;
- struct {
- bdrkreg_t na_node_0 : 8;
- bdrkreg_t na_reserved_3 : 7;
- bdrkreg_t na_node_0_valid : 1;
- bdrkreg_t na_node_1 : 8;
- bdrkreg_t na_reserved_2 : 7;
- bdrkreg_t na_node_1_valid : 1;
- bdrkreg_t na_node_2 : 8;
- bdrkreg_t na_reserved_1 : 7;
- bdrkreg_t na_node_2_valid : 1;
- bdrkreg_t na_node_3 : 8;
- bdrkreg_t na_reserved : 7;
- bdrkreg_t na_node_3_valid : 1;
- } lb_nodes_absent_fld_s;
-} lb_nodes_absent_u_t;
-
-#else
-
-typedef union lb_nodes_absent_u {
- bdrkreg_t lb_nodes_absent_regval;
- struct {
- bdrkreg_t na_node_3_valid : 1;
- bdrkreg_t na_reserved : 7;
- bdrkreg_t na_node_3 : 8;
- bdrkreg_t na_node_2_valid : 1;
- bdrkreg_t na_reserved_1 : 7;
- bdrkreg_t na_node_2 : 8;
- bdrkreg_t na_node_1_valid : 1;
- bdrkreg_t na_reserved_2 : 7;
- bdrkreg_t na_node_1 : 8;
- bdrkreg_t na_node_0_valid : 1;
- bdrkreg_t na_reserved_3 : 7;
- bdrkreg_t na_node_0 : 8;
- } lb_nodes_absent_fld_s;
-} lb_nodes_absent_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register provides access to the Number-In-a-Can add-only *
- * serial PROM that is used to store node board serial number and *
- * configuration information. (Refer to NIC datasheet Dallas 1990A *
- * that is viewable at *
- * URL::http://www.dalsemi.com/DocControl/PDFs/pdfindex.html). Data *
- * comes from this interface LSB first. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_microlan_ctl_u {
- bdrkreg_t lb_microlan_ctl_regval;
- struct {
- bdrkreg_t mc_rd_data : 1;
- bdrkreg_t mc_done : 1;
- bdrkreg_t mc_sample : 8;
- bdrkreg_t mc_pulse : 10;
- bdrkreg_t mc_clkdiv_phi0 : 7;
- bdrkreg_t mc_clkdiv_phi1 : 7;
- bdrkreg_t mc_reserved : 30;
- } lb_microlan_ctl_fld_s;
-} lb_microlan_ctl_u_t;
-
-#else
-
-typedef union lb_microlan_ctl_u {
- bdrkreg_t lb_microlan_ctl_regval;
- struct {
- bdrkreg_t mc_reserved : 30;
- bdrkreg_t mc_clkdiv_phi1 : 7;
- bdrkreg_t mc_clkdiv_phi0 : 7;
- bdrkreg_t mc_pulse : 10;
- bdrkreg_t mc_sample : 8;
- bdrkreg_t mc_done : 1;
- bdrkreg_t mc_rd_data : 1;
- } lb_microlan_ctl_fld_s;
-} lb_microlan_ctl_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This register contains the LB error status bits. *
- * Whenever a particular type of error occurs, the LB sets its bit in *
- * this register so that software will be aware that such an event *
- * has happened. Reads from this register are non-destructive and the *
- * contents of this register remain intact across reset operations. *
- * Whenever any of these bits is set, the LB will assert its *
- * interrupt request output signals that go to the PI units. *
- * Software can simulate the occurrence of an error by first writing *
- * appropriate values into the LB_ERROR_HDR1, LB_ERROR_HDR2 and *
- * LB_ERROR_DATA registers, and then writing to the LB_ERROR_BITS *
- * register to set the error bits in a particular way. Setting one or *
- * more error bits will cause the LB to interrupt a processor and *
- * invoke error-handling software. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_error_bits_u {
- bdrkreg_t lb_error_bits_regval;
- struct {
- bdrkreg_t eb_rq_bad_cmd : 1;
- bdrkreg_t eb_rp_bad_cmd : 1;
- bdrkreg_t eb_rq_short : 1;
- bdrkreg_t eb_rp_short : 1;
- bdrkreg_t eb_rq_long : 1;
- bdrkreg_t eb_rp_long : 1;
- bdrkreg_t eb_rq_bad_data : 1;
- bdrkreg_t eb_rp_bad_data : 1;
- bdrkreg_t eb_rq_bad_addr : 1;
- bdrkreg_t eb_rq_bad_linval : 1;
- bdrkreg_t eb_gclk_drop : 1;
- bdrkreg_t eb_reserved : 53;
- } lb_error_bits_fld_s;
-} lb_error_bits_u_t;
-
-#else
-
-typedef union lb_error_bits_u {
- bdrkreg_t lb_error_bits_regval;
- struct {
- bdrkreg_t eb_reserved : 53;
- bdrkreg_t eb_gclk_drop : 1;
- bdrkreg_t eb_rq_bad_linval : 1;
- bdrkreg_t eb_rq_bad_addr : 1;
- bdrkreg_t eb_rp_bad_data : 1;
- bdrkreg_t eb_rq_bad_data : 1;
- bdrkreg_t eb_rp_long : 1;
- bdrkreg_t eb_rq_long : 1;
- bdrkreg_t eb_rp_short : 1;
- bdrkreg_t eb_rq_short : 1;
- bdrkreg_t eb_rp_bad_cmd : 1;
- bdrkreg_t eb_rq_bad_cmd : 1;
- } lb_error_bits_fld_s;
-} lb_error_bits_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register lets software clear some of the bits in the *
- * LB_ERROR_BITS register without affecting other bits. Essentially, *
- * it provides bit mask functionality. When software writes to the *
- * LB_ERROR_MASK_CLR register, the bits which are set in the data *
- * value indicate which bits are to be cleared in LB_ERROR_BITS. If a *
- * bit is clear in the data value written to the LB_ERROR_MASK_CLR *
- * register, then its corresponding bit in the LB_ERROR_BITS register *
- * is not affected. Hence, software can atomically clear any subset *
- * of the error bits in the LB_ERROR_BITS register. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_error_mask_clr_u {
- bdrkreg_t lb_error_mask_clr_regval;
- struct {
- bdrkreg_t emc_clr_rq_bad_cmd : 1;
- bdrkreg_t emc_clr_rp_bad_cmd : 1;
- bdrkreg_t emc_clr_rq_short : 1;
- bdrkreg_t emc_clr_rp_short : 1;
- bdrkreg_t emc_clr_rq_long : 1;
- bdrkreg_t emc_clr_rp_long : 1;
- bdrkreg_t emc_clr_rq_bad_data : 1;
- bdrkreg_t emc_clr_rp_bad_data : 1;
- bdrkreg_t emc_clr_rq_bad_addr : 1;
- bdrkreg_t emc_clr_rq_bad_linval : 1;
- bdrkreg_t emc_clr_gclk_drop : 1;
- bdrkreg_t emc_reserved : 53;
- } lb_error_mask_clr_fld_s;
-} lb_error_mask_clr_u_t;
-
-#else
-
-typedef union lb_error_mask_clr_u {
- bdrkreg_t lb_error_mask_clr_regval;
- struct {
- bdrkreg_t emc_reserved : 53;
- bdrkreg_t emc_clr_gclk_drop : 1;
- bdrkreg_t emc_clr_rq_bad_linval : 1;
- bdrkreg_t emc_clr_rq_bad_addr : 1;
- bdrkreg_t emc_clr_rp_bad_data : 1;
- bdrkreg_t emc_clr_rq_bad_data : 1;
- bdrkreg_t emc_clr_rp_long : 1;
- bdrkreg_t emc_clr_rq_long : 1;
- bdrkreg_t emc_clr_rp_short : 1;
- bdrkreg_t emc_clr_rq_short : 1;
- bdrkreg_t emc_clr_rp_bad_cmd : 1;
- bdrkreg_t emc_clr_rq_bad_cmd : 1;
- } lb_error_mask_clr_fld_s;
-} lb_error_mask_clr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * If the LB detects an error when VALID==0 in the LB_ERROR_HDR1 *
- * register, then it saves the contents of the offending packet's *
- * header flit in the LB_ERROR_HDR1 and LB_ERROR_HDR2 registers, sets *
- * the VALID bit in LB_ERROR_HDR1 and clears the OVERRUN bit in *
- * LB_ERROR_HDR1 (and it will also set the corresponding bit in the *
- * LB_ERROR_BITS register). The ERR_TYPE field indicates specifically *
- * what kind of error occurred. Its encoding corresponds to the bit *
- * positions in the LB_ERROR_BITS register (e.g., ERR_TYPE==5 *
- * indicates a RP_LONG error). If an error (of any type except *
- * GCLK_DROP) subsequently happens while VALID==1, then the LB sets *
- * the OVERRUN bit in LB_ERROR_HDR1. This register is not relevant *
- * when a GCLK_DROP error occurs; the LB does not even attempt to *
- * change the ERR_TYPE, VALID or OVERRUN field when a GCLK_DROP error *
- * happens. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_error_hdr1_u {
- bdrkreg_t lb_error_hdr1_regval;
- struct {
- bdrkreg_t eh_command : 7;
- bdrkreg_t eh_reserved_5 : 1;
- bdrkreg_t eh_suppl : 11;
- bdrkreg_t eh_reserved_4 : 1;
- bdrkreg_t eh_source : 11;
- bdrkreg_t eh_reserved_3 : 1;
- bdrkreg_t eh_err_type : 4;
- bdrkreg_t eh_reserved_2 : 4;
- bdrkreg_t eh_overrun : 1;
- bdrkreg_t eh_reserved_1 : 3;
- bdrkreg_t eh_valid : 1;
- bdrkreg_t eh_reserved : 19;
- } lb_error_hdr1_fld_s;
-} lb_error_hdr1_u_t;
-
-#else
-
-typedef union lb_error_hdr1_u {
- bdrkreg_t lb_error_hdr1_regval;
- struct {
- bdrkreg_t eh_reserved : 19;
- bdrkreg_t eh_valid : 1;
- bdrkreg_t eh_reserved_1 : 3;
- bdrkreg_t eh_overrun : 1;
- bdrkreg_t eh_reserved_2 : 4;
- bdrkreg_t eh_err_type : 4;
- bdrkreg_t eh_reserved_3 : 1;
- bdrkreg_t eh_source : 11;
- bdrkreg_t eh_reserved_4 : 1;
- bdrkreg_t eh_suppl : 11;
- bdrkreg_t eh_reserved_5 : 1;
- bdrkreg_t eh_command : 7;
- } lb_error_hdr1_fld_s;
-} lb_error_hdr1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contents of the Address field from header flit of first packet *
- * that causes an error. This register is not relevant when a *
- * GCLK_DROP error occurs. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_error_hdr2_u {
- bdrkreg_t lb_error_hdr2_regval;
- struct {
- bdrkreg_t eh_address : 38;
- bdrkreg_t eh_reserved : 26;
- } lb_error_hdr2_fld_s;
-} lb_error_hdr2_u_t;
-
-#else
-
-typedef union lb_error_hdr2_u {
- bdrkreg_t lb_error_hdr2_regval;
- struct {
- bdrkreg_t eh_reserved : 26;
- bdrkreg_t eh_address : 38;
- } lb_error_hdr2_fld_s;
-} lb_error_hdr2_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This register accompanies the LB_ERROR_HDR1 and *
- * LB_ERROR_HDR2 registers. The LB updates the value in this *
- * register when an incoming packet with a data flit causes an error *
- * while VALID==0 in the LB_ERROR_HDR1 register. This register *
- * retains the contents of the data flit from the incoming packet *
- * that caused the error. This register is relevant for the following *
- * types of errors: *
- * <UL > *
- * <UL > *
- * <UL > *
- * <UL > *
- * <UL > *
- * <LI >RQ_BAD_LINVAL for a LINVAL request. *
- * <LI >RQ_BAD_ADDR for a normal or vector PIO request. *
- * <LI >RP_BAD_DATA for a vector PIO reply. *
- * <LI >RQ_BAD DATA for an incoming request with data. *
- * <LI >RP_LONG for a vector PIO reply. *
- * <LI >RQ_LONG for an incoming request with expected data. *
- * <BLOCKQUOTE > *
- * In the case of RQ_BAD_LINVAL, the register retains the 64-bit data *
- * value that followed the header flit. In the case of RQ_BAD_ADDR *
- * or RQ_BAD_DATA, the register retains the incoming packet's 64-bit *
- * data value (i.e., 2nd flit in the packet for a normal PIO write or *
- * an LINVAL, 3rd flit for a vector PIO read or write). In the case *
- * of RP_BAD_DATA, the register retains the 64-bit data value in the *
- * 3rd flit of the packet. When a RP_LONG or RQ_LONG error occurs, *
- * the LB loads the LB_ERROR_DATA register with the contents of the *
- * expected data flit (i.e., the 3rd flit in the packet for a vector *
- * PIO request or reply, the 2nd flit for other packets), if any. The *
- * contents of the LB_ERROR_DATA register are undefined after a *
- * RP_SHORT, RQ_SHORT, RP_BAD_CMD or RQ_BAD_CMD error. The contents *
- * of the LB_ERROR_DATA register are also undefined after an incoming *
- * normal PIO read request which encounters a RQ_LONG error. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_error_data_u {
- bdrkreg_t lb_error_data_regval;
- struct {
- bdrkreg_t ed_data : 64;
- } lb_error_data_fld_s;
-} lb_error_data_u_t;
-
-
-
-
-/************************************************************************
- * *
- * This register enables software to control what internal Bedrock *
- * signals are visible on the chip's debug pins. The LB provides the *
- * 6-bit value in this register to Bedrock's DEBUG unit. The JTAG *
- * unit provides a similar 6-bit selection input to the DEBUG unit, *
- * along with another signal that tells the DEBUG unit whether to use *
- * the selection signal from the LB or the JTAG unit. For a *
- * description of the menu of choices for debug signals, refer to the *
- * documentation for the DEBUG unit. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_debug_select_u {
- bdrkreg_t lb_debug_select_regval;
- struct {
- bdrkreg_t ds_debug_sel : 6;
- bdrkreg_t ds_reserved : 58;
- } lb_debug_select_fld_s;
-} lb_debug_select_u_t;
-
-#else
-
-typedef union lb_debug_select_u {
- bdrkreg_t lb_debug_select_regval;
- struct {
- bdrkreg_t ds_reserved : 58;
- bdrkreg_t ds_debug_sel : 6;
- } lb_debug_select_fld_s;
-} lb_debug_select_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * A PIO read from this register returns the 32-bit value that is *
- * currently on the Bedrock chip's debug pins. This register allows *
- * software to observe debug pin output values which do not change *
- * frequently (i.e., they remain constant over a period of many *
- * cycles). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_debug_pins_u {
- bdrkreg_t lb_debug_pins_regval;
- struct {
- bdrkreg_t dp_debug_pins : 32;
- bdrkreg_t dp_reserved : 32;
- } lb_debug_pins_fld_s;
-} lb_debug_pins_u_t;
-
-#else
-
-typedef union lb_debug_pins_u {
- bdrkreg_t lb_debug_pins_regval;
- struct {
- bdrkreg_t dp_reserved : 32;
- bdrkreg_t dp_debug_pins : 32;
- } lb_debug_pins_fld_s;
-} lb_debug_pins_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * The LB unit provides the PI0 and PI1 units with a real-time clock *
- * signal. The LB can generate this signal itself, based on the *
- * Bedrock chip's system clock which the LB receives as an input. *
- * Alternatively, the LB can filter a global clock signal which it *
- * receives as an input and provide the filtered version to PI0 and *
- * PI1. The user can program the LB_RT_LOCAL_CTRL register to choose *
- * the source of the real-time clock. If the user chooses to generate *
- * the real-time clock internally within the LB, then the user can *
- * specify the period for the real-time clock signal. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_rt_local_ctrl_u {
- bdrkreg_t lb_rt_local_ctrl_regval;
- struct {
- bdrkreg_t rlc_gclk_enable : 1;
- bdrkreg_t rlc_reserved_4 : 3;
- bdrkreg_t rlc_max_count : 10;
- bdrkreg_t rlc_reserved_3 : 2;
- bdrkreg_t rlc_gclk_counter : 10;
- bdrkreg_t rlc_reserved_2 : 2;
- bdrkreg_t rlc_gclk : 1;
- bdrkreg_t rlc_reserved_1 : 3;
- bdrkreg_t rlc_use_internal : 1;
- bdrkreg_t rlc_reserved : 31;
- } lb_rt_local_ctrl_fld_s;
-} lb_rt_local_ctrl_u_t;
-
-#else
-
-typedef union lb_rt_local_ctrl_u {
- bdrkreg_t lb_rt_local_ctrl_regval;
- struct {
- bdrkreg_t rlc_reserved : 31;
- bdrkreg_t rlc_use_internal : 1;
- bdrkreg_t rlc_reserved_1 : 3;
- bdrkreg_t rlc_gclk : 1;
- bdrkreg_t rlc_reserved_2 : 2;
- bdrkreg_t rlc_gclk_counter : 10;
- bdrkreg_t rlc_reserved_3 : 2;
- bdrkreg_t rlc_max_count : 10;
- bdrkreg_t rlc_reserved_4 : 3;
- bdrkreg_t rlc_gclk_enable : 1;
- } lb_rt_local_ctrl_fld_s;
-} lb_rt_local_ctrl_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * When the value of the USE_INTERNAL field in the LB_RT_LOCAL_CTRL *
- * register is 0, the LB filters an incoming global clock signal and *
- * provides the result to PI0 and PI1 for their real-time clock *
- * inputs. The LB can perform either simple filtering or complex *
- * filtering, depending on the value of the MASK_ENABLE bit. For the *
- * simple filtering option, the LB merely removes glitches from the *
- * incoming global clock; if the global clock goes high (or low) for *
- * only a single cycle, the LB considers it to be a glitch and does *
- * not pass it through to PI0 and PI1. For the complex filtering *
- * option, the LB expects positive edges on the incoming global clock *
- * to be spaced at fairly regular intervals and it looks for them at *
- * these times; the LB keeps track of unexpected or missing positive *
- * edges, and it generates an edge itself whenever the incoming *
- * global clock apparently misses an edge. For each filtering option, *
- * the real-time clock which the LB provides to PI0 and PI1 is not *
- * necessarily a square wave; when a positive edge happens, the *
- * real-time clock stays high for (2*MAX_COUNT+1-OFFSET)/2 cycles of *
- * the LB's system clock, and then is low until the next positive *
- * edge. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_rt_filter_ctrl_u {
- bdrkreg_t lb_rt_filter_ctrl_regval;
- struct {
- bdrkreg_t rfc_offset : 5;
- bdrkreg_t rfc_reserved_4 : 3;
- bdrkreg_t rfc_mask_counter : 12;
- bdrkreg_t rfc_mask_enable : 1;
- bdrkreg_t rfc_reserved_3 : 3;
- bdrkreg_t rfc_dropout_counter : 10;
- bdrkreg_t rfc_reserved_2 : 2;
- bdrkreg_t rfc_dropout_thresh : 10;
- bdrkreg_t rfc_reserved_1 : 2;
- bdrkreg_t rfc_error_counter : 10;
- bdrkreg_t rfc_reserved : 6;
- } lb_rt_filter_ctrl_fld_s;
-} lb_rt_filter_ctrl_u_t;
-
-#else
-
-typedef union lb_rt_filter_ctrl_u {
- bdrkreg_t lb_rt_filter_ctrl_regval;
- struct {
- bdrkreg_t rfc_reserved : 6;
- bdrkreg_t rfc_error_counter : 10;
- bdrkreg_t rfc_reserved_1 : 2;
- bdrkreg_t rfc_dropout_thresh : 10;
- bdrkreg_t rfc_reserved_2 : 2;
- bdrkreg_t rfc_dropout_counter : 10;
- bdrkreg_t rfc_reserved_3 : 3;
- bdrkreg_t rfc_mask_enable : 1;
- bdrkreg_t rfc_mask_counter : 12;
- bdrkreg_t rfc_reserved_4 : 3;
- bdrkreg_t rfc_offset : 5;
- } lb_rt_filter_ctrl_fld_s;
-} lb_rt_filter_ctrl_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is a scratch register that is reset to 0x0. At the *
- * normal address, the register is a simple storage location. At the *
- * Write-If-Zero address, the register accepts a new value from a *
- * write operation only if the current value is zero. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_scratch_reg0_u {
- bdrkreg_t lb_scratch_reg0_regval;
- struct {
- bdrkreg_t sr_scratch_bits : 64;
- } lb_scratch_reg0_fld_s;
-} lb_scratch_reg0_u_t;
-
-
-
-
-/************************************************************************
- * *
- * These registers are scratch registers that are not reset. At a *
- * register's normal address, it is a simple storage location. At a *
- * register's Write-If-Zero address, it accepts a new value from a *
- * write operation only if the current value is zero. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_scratch_reg1_u {
- bdrkreg_t lb_scratch_reg1_regval;
- struct {
- bdrkreg_t sr_scratch_bits : 64;
- } lb_scratch_reg1_fld_s;
-} lb_scratch_reg1_u_t;
-
-
-
-
-/************************************************************************
- * *
- * These registers are scratch registers that are not reset. At a *
- * register's normal address, it is a simple storage location. At a *
- * register's Write-If-Zero address, it accepts a new value from a *
- * write operation only if the current value is zero. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_scratch_reg2_u {
- bdrkreg_t lb_scratch_reg2_regval;
- struct {
- bdrkreg_t sr_scratch_bits : 64;
- } lb_scratch_reg2_fld_s;
-} lb_scratch_reg2_u_t;
-
-
-
-
-/************************************************************************
- * *
- * These one-bit registers are scratch registers. At a register's *
- * normal address, it is a simple storage location. At a register's *
- * Read-Set-If-Zero address, it returns the original contents and *
- * sets the bit if the original value is zero. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_scratch_reg3_u {
- bdrkreg_t lb_scratch_reg3_regval;
- struct {
- bdrkreg_t sr_scratch_bit : 1;
- bdrkreg_t sr_reserved : 63;
- } lb_scratch_reg3_fld_s;
-} lb_scratch_reg3_u_t;
-
-#else
-
-typedef union lb_scratch_reg3_u {
- bdrkreg_t lb_scratch_reg3_regval;
- struct {
- bdrkreg_t sr_reserved : 63;
- bdrkreg_t sr_scratch_bit : 1;
- } lb_scratch_reg3_fld_s;
-} lb_scratch_reg3_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * These one-bit registers are scratch registers. At a register's *
- * normal address, it is a simple storage location. At a register's *
- * Read-Set-If-Zero address, it returns the original contents and *
- * sets the bit if the original value is zero. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_scratch_reg4_u {
- bdrkreg_t lb_scratch_reg4_regval;
- struct {
- bdrkreg_t sr_scratch_bit : 1;
- bdrkreg_t sr_reserved : 63;
- } lb_scratch_reg4_fld_s;
-} lb_scratch_reg4_u_t;
-
-#else
-
-typedef union lb_scratch_reg4_u {
- bdrkreg_t lb_scratch_reg4_regval;
- struct {
- bdrkreg_t sr_reserved : 63;
- bdrkreg_t sr_scratch_bit : 1;
- } lb_scratch_reg4_fld_s;
-} lb_scratch_reg4_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is a scratch register that is reset to 0x0. At the *
- * normal address, the register is a simple storage location. At the *
- * Write-If-Zero address, the register accepts a new value from a *
- * write operation only if the current value is zero. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_scratch_reg0_wz_u {
- bdrkreg_t lb_scratch_reg0_wz_regval;
- struct {
- bdrkreg_t srw_scratch_bits : 64;
- } lb_scratch_reg0_wz_fld_s;
-} lb_scratch_reg0_wz_u_t;
-
-
-
-
-/************************************************************************
- * *
- * These registers are scratch registers that are not reset. At a *
- * register's normal address, it is a simple storage location. At a *
- * register's Write-If-Zero address, it accepts a new value from a *
- * write operation only if the current value is zero. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_scratch_reg1_wz_u {
- bdrkreg_t lb_scratch_reg1_wz_regval;
- struct {
- bdrkreg_t srw_scratch_bits : 64;
- } lb_scratch_reg1_wz_fld_s;
-} lb_scratch_reg1_wz_u_t;
-
-
-
-
-/************************************************************************
- * *
- * These registers are scratch registers that are not reset. At a *
- * register's normal address, it is a simple storage location. At a *
- * register's Write-If-Zero address, it accepts a new value from a *
- * write operation only if the current value is zero. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_scratch_reg2_wz_u {
- bdrkreg_t lb_scratch_reg2_wz_regval;
- struct {
- bdrkreg_t srw_scratch_bits : 64;
- } lb_scratch_reg2_wz_fld_s;
-} lb_scratch_reg2_wz_u_t;
-
-
-
-
-/************************************************************************
- * *
- * These one-bit registers are scratch registers. At a register's *
- * normal address, it is a simple storage location. At a register's *
- * Read-Set-If-Zero address, it returns the original contents and *
- * sets the bit if the original value is zero. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_scratch_reg3_rz_u {
- bdrkreg_t lb_scratch_reg3_rz_regval;
- struct {
- bdrkreg_t srr_scratch_bit : 1;
- bdrkreg_t srr_reserved : 63;
- } lb_scratch_reg3_rz_fld_s;
-} lb_scratch_reg3_rz_u_t;
-
-#else
-
-typedef union lb_scratch_reg3_rz_u {
- bdrkreg_t lb_scratch_reg3_rz_regval;
- struct {
- bdrkreg_t srr_reserved : 63;
- bdrkreg_t srr_scratch_bit : 1;
- } lb_scratch_reg3_rz_fld_s;
-} lb_scratch_reg3_rz_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * These one-bit registers are scratch registers. At a register's *
- * normal address, it is a simple storage location. At a register's *
- * Read-Set-If-Zero address, it returns the original contents and *
- * sets the bit if the original value is zero. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_scratch_reg4_rz_u {
- bdrkreg_t lb_scratch_reg4_rz_regval;
- struct {
- bdrkreg_t srr_scratch_bit : 1;
- bdrkreg_t srr_reserved : 63;
- } lb_scratch_reg4_rz_fld_s;
-} lb_scratch_reg4_rz_u_t;
-
-#else
-
-typedef union lb_scratch_reg4_rz_u {
- bdrkreg_t lb_scratch_reg4_rz_regval;
- struct {
- bdrkreg_t srr_reserved : 63;
- bdrkreg_t srr_scratch_bit : 1;
- } lb_scratch_reg4_rz_fld_s;
-} lb_scratch_reg4_rz_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This register contains vector PIO parameters. A *
- * write to this register triggers the LB to send out a vector PIO *
- * request packet. Immediately after servicing a write request to the *
- * LB_VECTOR_PARMS register, the LB sends back a reply (i.e., the LB *
- * doesn't wait for the vector PIO operation to finish first). Three *
- * LB registers provide the contents for an outgoing vector PIO *
- * request packet. Software should wait until the BUSY bit in *
- * LB_VECTOR_PARMS is clear and then initialize all three of these *
- * registers before initiating a vector PIO operation. The three *
- * vector PIO registers are: *
- * LB_VECTOR_ROUTE *
- * LB_VECTOR_DATA *
- * LB_VECTOR_PARMS (should be written last) *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_vector_parms_u {
- bdrkreg_t lb_vector_parms_regval;
- struct {
- bdrkreg_t vp_type : 1;
- bdrkreg_t vp_reserved_2 : 2;
- bdrkreg_t vp_address : 21;
- bdrkreg_t vp_reserved_1 : 8;
- bdrkreg_t vp_write_id : 8;
- bdrkreg_t vp_pio_id : 11;
- bdrkreg_t vp_reserved : 12;
- bdrkreg_t vp_busy : 1;
- } lb_vector_parms_fld_s;
-} lb_vector_parms_u_t;
-
-#else
-
-typedef union lb_vector_parms_u {
- bdrkreg_t lb_vector_parms_regval;
- struct {
- bdrkreg_t vp_busy : 1;
- bdrkreg_t vp_reserved : 12;
- bdrkreg_t vp_pio_id : 11;
- bdrkreg_t vp_write_id : 8;
- bdrkreg_t vp_reserved_1 : 8;
- bdrkreg_t vp_address : 21;
- bdrkreg_t vp_reserved_2 : 2;
- bdrkreg_t vp_type : 1;
- } lb_vector_parms_fld_s;
-} lb_vector_parms_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the vector PIO route. This is one of the 3 *
- * vector PIO control registers. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_vector_route_u {
- bdrkreg_t lb_vector_route_regval;
- struct {
- bdrkreg_t vr_vector : 64;
- } lb_vector_route_fld_s;
-} lb_vector_route_u_t;
-
-
-
-
-/************************************************************************
- * *
- * This register contains the vector PIO write data. This is one of *
- * the 3 vector PIO control registers. The contents of this register *
- * also provide the data value to be sent in outgoing vector PIO read *
- * requests and vector PIO write replies. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_vector_data_u {
- bdrkreg_t lb_vector_data_regval;
- struct {
- bdrkreg_t vd_write_data : 64;
- } lb_vector_data_fld_s;
-} lb_vector_data_u_t;
-
-
-
-
-/************************************************************************
- * *
- * Description: This register contains the vector PIO return status. *
- * Software should clear this register before launching a vector PIO *
- * request from the LB. The LB will not modify this register's value *
- * if an incoming reply packet encounters any kind of error. If an *
- * incoming reply packet does not encounter an error but the *
- * STATUS_VALID bit is already set, then the LB sets the OVERRUN bit *
- * and leaves the other fields unchanged. The LB updates the values *
- * of the SOURCE, PIO_ID, WRITE_ID, ADDRESS and TYPE fields only if *
- * an incoming vector PIO reply packet does not encounter an error *
- * and the STATUS_VALID bit is clear; at the same time, the LB sets *
- * the STATUS_VALID bit and will also update the LB_VECTOR_RETURN and *
- * LB_VECTOR_READ_DATA registers. *
- * *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_vector_status_u {
- bdrkreg_t lb_vector_status_regval;
- struct {
- bdrkreg_t vs_type : 3;
- bdrkreg_t vs_address : 21;
- bdrkreg_t vs_reserved : 8;
- bdrkreg_t vs_write_id : 8;
- bdrkreg_t vs_pio_id : 11;
- bdrkreg_t vs_source : 11;
- bdrkreg_t vs_overrun : 1;
- bdrkreg_t vs_status_valid : 1;
- } lb_vector_status_fld_s;
-} lb_vector_status_u_t;
-
-#else
-
-typedef union lb_vector_status_u {
- bdrkreg_t lb_vector_status_regval;
- struct {
- bdrkreg_t vs_status_valid : 1;
- bdrkreg_t vs_overrun : 1;
- bdrkreg_t vs_source : 11;
- bdrkreg_t vs_pio_id : 11;
- bdrkreg_t vs_write_id : 8;
- bdrkreg_t vs_reserved : 8;
- bdrkreg_t vs_address : 21;
- bdrkreg_t vs_type : 3;
- } lb_vector_status_fld_s;
-} lb_vector_status_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the return vector PIO route. The LB will *
- * not modify this register's value if an incoming reply packet *
- * encounters any kind of error. The LB also will not modify this *
- * register's value if the STATUS_VALID bit in the LB_VECTOR_STATUS *
- * register is set when it receives an incoming vector PIO reply. The *
- * LB stores an incoming vector PIO reply packet's vector route flit *
- * in this register only if the packet does not encounter an error *
- * and the STATUS_VALID bit is clear. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_vector_return_u {
- bdrkreg_t lb_vector_return_regval;
- struct {
- bdrkreg_t vr_return_vector : 64;
- } lb_vector_return_fld_s;
-} lb_vector_return_u_t;
-
-
-
-
-/************************************************************************
- * *
- * This register contains the vector PIO read data, if any. The LB *
- * will not modify this register's value if an incoming reply packet *
- * encounters any kind of error. The LB also will not modify this *
- * register's value if the STATUS_VALID bit in the LB_VECTOR_STATUS *
- * register is set when it receives an incoming vector PIO reply. The *
- * LB stores an incoming vector PIO reply packet's data flit in this *
- * register only if the packet does not encounter an error and the *
- * STATUS_VALID bit is clear. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union lb_vector_read_data_u {
- bdrkreg_t lb_vector_read_data_regval;
- struct {
- bdrkreg_t vrd_read_data : 64;
- } lb_vector_read_data_fld_s;
-} lb_vector_read_data_u_t;
-
-
-
-
-/************************************************************************
- * *
- * Description: This register contains the vector PIO return status. *
- * Software should clear this register before launching a vector PIO *
- * request from the LB. The LB will not modify this register's value *
- * if an incoming reply packet encounters any kind of error. If an *
- * incoming reply packet does not encounter an error but the *
- * STATUS_VALID bit is already set, then the LB sets the OVERRUN bit *
- * and leaves the other fields unchanged. The LB updates the values *
- * of the SOURCE, PIO_ID, WRITE_ID, ADDRESS and TYPE fields only if *
- * an incoming vector PIO reply packet does not encounter an error *
- * and the STATUS_VALID bit is clear; at the same time, the LB sets *
- * the STATUS_VALID bit and will also update the LB_VECTOR_RETURN and *
- * LB_VECTOR_READ_DATA registers. *
- * *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union lb_vector_status_clear_u {
- bdrkreg_t lb_vector_status_clear_regval;
- struct {
- bdrkreg_t vsc_type : 3;
- bdrkreg_t vsc_address : 21;
- bdrkreg_t vsc_reserved : 8;
- bdrkreg_t vsc_write_id : 8;
- bdrkreg_t vsc_pio_id : 11;
- bdrkreg_t vsc_source : 11;
- bdrkreg_t vsc_overrun : 1;
- bdrkreg_t vsc_status_valid : 1;
- } lb_vector_status_clear_fld_s;
-} lb_vector_status_clear_u_t;
-
-#else
-
-typedef union lb_vector_status_clear_u {
- bdrkreg_t lb_vector_status_clear_regval;
- struct {
- bdrkreg_t vsc_status_valid : 1;
- bdrkreg_t vsc_overrun : 1;
- bdrkreg_t vsc_source : 11;
- bdrkreg_t vsc_pio_id : 11;
- bdrkreg_t vsc_write_id : 8;
- bdrkreg_t vsc_reserved : 8;
- bdrkreg_t vsc_address : 21;
- bdrkreg_t vsc_type : 3;
- } lb_vector_status_clear_fld_s;
-} lb_vector_status_clear_u_t;
-
-#endif
-
-
-
-
-
-
-#endif /* __ASSEMBLY__ */
-
-/************************************************************************
- * *
- * MAKE ALL ADDITIONS AFTER THIS LINE *
- * *
- ************************************************************************/
-
-
-
-
-
-#endif /* _ASM_IA64_SN_SN1_HUBLB_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_HUBLB_NEXT_H
-#define _ASM_IA64_SN_SN1_HUBLB_NEXT_H
-
-/**********************************************************************
-
- This contains some mask and shift values for LB defined as required
- for compatibility.
-
- **********************************************************************/
-
-#define LRI_SYSTEM_SIZE_SHFT 46
-#define LRI_SYSTEM_SIZE_MASK (UINT64_CAST 0x3 << LRI_SYSTEM_SIZE_SHFT)
-#define LRI_NODEID_SHFT 32
-#define LRI_NODEID_MASK (UINT64_CAST 0xff << LRI_NODEID_SHFT)/* Node ID */
-#define LRI_CHIPID_SHFT 12
-#define LRI_CHIPID_MASK (UINT64_CAST 0xffff << LRI_CHIPID_SHFT) /* should be 0x3012 */
-#define LRI_REV_SHFT 28
-#define LRI_REV_MASK (UINT64_CAST 0xf << LRI_REV_SHFT)/* Chip revision */
-
-/* Values for LRI_SYSTEM_SIZE */
-#define SYSTEM_SIZE_INVALID 0x3
-#define SYSTEM_SIZE_NMODE 0x2
-#define SYSTEM_SIZE_COARSE 0x1
-#define SYSTEM_SIZE_SMALL 0x0
-
-/* In fine mode, each node is a region. In coarse mode, there are
- * 2 nodes per region. In N-mode, there are 4 nodes per region. */
-#define NASID_TO_FINEREG_SHFT 0
-#define NASID_TO_COARSEREG_SHFT 1
-#define NASID_TO_NMODEREG_SHFT 2
-
-#define LR_LOCALRESET (UINT64_CAST 1)
-/*
- * LB_VECTOR_PARMS mask and shift definitions.
- * TYPE may be any of the first four PIOTYPEs defined under NI_VECTOR_STATUS.
- */
-
-#define LVP_BUSY (UINT64_CAST 1 << 63)
-#define LVP_PIOID_SHFT 40
-#define LVP_PIOID_MASK (UINT64_CAST 0x7ff << 40)
-#define LVP_WRITEID_SHFT 32
-#define LVP_WRITEID_MASK (UINT64_CAST 0xff << 32)
-#define LVP_ADDRESS_MASK (UINT64_CAST 0xfffff8) /* Bits 23:3 */
-#define LVP_TYPE_SHFT 0
-#define LVP_TYPE_MASK (UINT64_CAST 0x3)
-
-/* LB_VECTOR_STATUS mask and shift definitions */
-
-#define LVS_VALID (UINT64_CAST 1 << 63)
-#define LVS_OVERRUN (UINT64_CAST 1 << 62)
-#define LVS_TARGET_SHFT 51
-#define LVS_TARGET_MASK (UINT64_CAST 0x7ff << 51)
-#define LVS_PIOID_SHFT 40
-#define LVS_PIOID_MASK (UINT64_CAST 0x7ff << 40)
-#define LVS_WRITEID_SHFT 32
-#define LVS_WRITEID_MASK (UINT64_CAST 0xff << 32)
-#define LVS_ADDRESS_MASK (UINT64_CAST 0xfffff8) /* Bits 23:3 */
-#define LVS_TYPE_SHFT 0
-#define LVS_TYPE_MASK (UINT64_CAST 0x7)
-#define LVS_ERROR_MASK (UINT64_CAST 0x4) /* bit set means error */
-
-/* LB_RT_LOCAL_CTRL mask and shift definitions */
-
-#define LRLC_USE_INT_SHFT 32
-#define LRLC_USE_INT_MASK (UINT64_CAST 1 << 32)
-#define LRLC_USE_INT (UINT64_CAST 1 << 32)
-#define LRLC_GCLK_SHFT 28
-#define LRLC_GCLK_MASK (UINT64_CAST 1 << 28)
-#define LRLC_GCLK (UINT64_CAST 1 << 28)
-#define LRLC_GCLK_COUNT_SHFT 16
-#define LRLC_GCLK_COUNT_MASK (UINT64_CAST 0x3ff << 16)
-#define LRLC_MAX_COUNT_SHFT 4
-#define LRLC_MAX_COUNT_MASK (UINT64_CAST 0x3ff << 4)
-#define LRLC_GCLK_EN_SHFT 0
-#define LRLC_GCLK_EN_MASK (UINT64_CAST 1)
-#define LRLC_GCLK_EN (UINT64_CAST 1)
-
-/* LB_NODES_ABSENT mask and shift definitions */
-#define LNA_VALID_SHFT 15
-#define LNA_VALID_MASK (UINT64_CAST 1 << LNA_VALID_SHFT)
-#define LNA_VALID (UINT64_CAST 1 << LNA_VALID_SHFT)
-#define LNA_NODE_SHFT 0
-#define LNA_NODE_MASK (UINT64_CAST 0xff << LNA_NODE_SHFT)
-
-/* LB_NODES_ABSENT has 4 identical sub-registers, on 16-bit boundaries */
-#define LNA_ENTRY_SHFT 16
-#define LNA_MAX_ENTRIES 4
-#define LNA_ADD(_reg, _n) ((_reg) = (_reg) << LNA_ENTRY_SHFT | \
- LNA_VALID | (_n) << LNA_NODE_SHFT)
-
-#define PIOTYPE_READ 0 /* VECTOR_PARMS and VECTOR_STATUS */
-#define PIOTYPE_WRITE 1 /* VECTOR_PARMS and VECTOR_STATUS */
-#define PIOTYPE_UNDEFINED 2 /* VECTOR_PARMS and VECTOR_STATUS */
-/* XXX IP35 doesn't support vector exchange: scr. regs. do locks directly */
-#define PIOTYPE_EXCHANGE 3 /* VECTOR_PARMS and VECTOR_STATUS */
-#define PIOTYPE_ADDR_ERR 4 /* VECTOR_STATUS only */
-#define PIOTYPE_CMD_ERR 5 /* VECTOR_STATUS only */
-#define PIOTYPE_PROT_ERR 6 /* VECTOR_STATUS only */
-#define PIOTYPE_UNKNOWN 7 /* VECTOR_STATUS only */
-
-#endif /* _ASM_IA64_SN_SN1_HUBLB_NEXT_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_HUBMD_H
-#define _ASM_IA64_SN_SN1_HUBMD_H
-
-
-/************************************************************************
- * *
- * WARNING!!! WARNING!!! WARNING!!! WARNING!!! WARNING!!! *
- * *
- * This file is created by an automated script. Any (minimal) changes *
- * made manually to this file should be made with care. *
- * *
- * MAKE ALL ADDITIONS TO THE END OF THIS FILE *
- * *
- ************************************************************************/
-
-
-#define MD_CURRENT_CELL 0x00780000 /*
- * BDDIR, LREG, LBOOT,
- * RREG, RBOOT
- * protection and mask
- * for using Local
- * Access protection.
- */
-
-
-
-#define MD_MEMORY_CONFIG 0x00780008 /*
- * Memory/Directory
- * DIMM control
- */
-
-
-
-#define MD_ARBITRATION_CONTROL 0x00780010 /*
- * Arbitration
- * Parameters
- */
-
-
-
-#define MD_MIG_CONFIG 0x00780018 /*
- * Page Migration
- * control
- */
-
-
-
-#define MD_FANDOP_CAC_STAT0 0x00780020 /*
- * Fetch-and-op cache
- * 0 status
- */
-
-
-
-#define MD_FANDOP_CAC_STAT1 0x00780028 /*
- * Fetch-and-op cache
- * 1 status
- */
-
-
-
-#define MD_MISC0_ERROR 0x00780040 /*
- * Miscellaneous MD
- * error
- */
-
-
-
-#define MD_MISC1_ERROR 0x00780048 /*
- * Miscellaneous MD
- * error
- */
-
-
-
-#define MD_MISC1_ERROR_CLR 0x00780058 /*
- * Miscellaneous MD
- * error clear
- */
-
-
-
-#define MD_OUTGOING_RP_QUEUE_SIZE 0x00780060 /*
- * MD outgoing reply
- * queues sizing
- */
-
-
-
-#define MD_PERF_SEL0 0x00790000 /*
- * Selects events
- * monitored by
- * MD_PERF_CNT0
- */
-
-
-
-#define MD_PERF_SEL1 0x00790008 /*
- * Selects events
- * monitored by
- * MD_PERF_CNT1
- */
-
-
-
-#define MD_PERF_CNT0 0x00790010 /*
- * Performance counter
- * 0
- */
-
-
-
-#define MD_PERF_CNT1 0x00790018 /*
- * Performance counter
- * 1
- */
-
-
-
-#define MD_REFRESH_CONTROL 0x007A0000 /*
- * Memory/Directory
- * refresh control
- */
-
-
-
-#define MD_JUNK_BUS_TIMING 0x007A0008 /* Junk Bus Timing */
-
-
-
-#define MD_LED0 0x007A0010 /* Reads of 8-bit LED0 */
-
-
-
-#define MD_LED1 0x007A0018 /* Reads of 8-bit LED1 */
-
-
-
-#define MD_LED2 0x007A0020 /* Reads of 8-bit LED2 */
-
-
-
-#define MD_LED3 0x007A0028 /* Reads of 8-bit LED3 */
-
-
-
-#define MD_BIST_CTL 0x007A0030 /*
- * BIST general
- * control
- */
-
-
-
-#define MD_BIST_DATA 0x007A0038 /*
- * BIST initial data
- * pattern and
- * variation control
- */
-
-
-
-#define MD_BIST_AB_ERR_ADDR 0x007A0040 /* BIST error address */
-
-
-
-#define MD_BIST_STATUS 0x007A0048 /* BIST status */
-
-
-
-#define MD_IB_DEBUG 0x007A0060 /* IB debug select */
-
-
-
-#define MD_DIR_CONFIG 0x007C0000 /*
- * Directory mode
- * control
- */
-
-
-
-#define MD_DIR_ERROR 0x007C0010 /*
- * Directory DIMM
- * error
- */
-
-
-
-#define MD_DIR_ERROR_CLR 0x007C0018 /*
- * Directory DIMM
- * error clear
- */
-
-
-
-#define MD_PROTOCOL_ERROR 0x007C0020 /*
- * Directory protocol
- * error
- */
-
-
-
-#define MD_PROTOCOL_ERR_CLR 0x007C0028 /*
- * Directory protocol
- * error clear
- */
-
-
-
-#define MD_MIG_CANDIDATE 0x007C0030 /*
- * Page migration
- * candidate
- */
-
-
-
-#define MD_MIG_CANDIDATE_CLR 0x007C0038 /*
- * Page migration
- * candidate clear
- */
-
-
-
-#define MD_MIG_DIFF_THRESH 0x007C0040 /*
- * Page migration
- * count difference
- * threshold
- */
-
-
-
-#define MD_MIG_VALUE_THRESH 0x007C0048 /*
- * Page migration
- * count absolute
- * threshold
- */
-
-
-
-#define MD_OUTGOING_RQ_QUEUE_SIZE 0x007C0050 /*
- * MD outgoing request
- * queues sizing
- */
-
-
-
-#define MD_BIST_DB_ERR_DATA 0x007C0058 /*
- * BIST directory
- * error data
- */
-
-
-
-#define MD_DB_DEBUG 0x007C0060 /* DB debug select */
-
-
-
-#define MD_MB_ECC_CONFIG 0x007E0000 /*
- * Data ECC
- * Configuration
- */
-
-
-
-#define MD_MEM_ERROR 0x007E0010 /* Memory DIMM error */
-
-
-
-#define MD_MEM_ERROR_CLR 0x007E0018 /*
- * Memory DIMM error
- * clear
- */
-
-
-
-#define MD_BIST_MB_ERR_DATA_0 0x007E0020 /*
- * BIST memory error
- * data
- */
-
-
-
-#define MD_BIST_MB_ERR_DATA_1 0x007E0028 /*
- * BIST memory error
- * data
- */
-
-
-
-#define MD_BIST_MB_ERR_DATA_2 0x007E0030 /*
- * BIST memory error
- * data
- */
-
-
-
-#define MD_BIST_MB_ERR_DATA_3 0x007E0038 /*
- * BIST memory error
- * data
- */
-
-
-
-#define MD_MB_DEBUG 0x007E0040 /* MB debug select */
-
-
-
-
-
-#ifndef __ASSEMBLY__
-
-/************************************************************************
- * *
- * Description: This register shows which regions are in the current *
- * cell. If a region has its bit set in this register, then it uses *
- * the Local Access protection in the directory instead of the *
- * separate per-region protection (which would cause a small *
- * performance penalty). In addition, writeback and write reply *
- * commands from outside the current cell will always check the *
- * directory protection before writing data to memory. Writeback and *
- * write reply commands from inside the current cell will write *
- * memory regardless of the protection value. *
- * This register is also used as the access-rights bit-vector for *
- * most of the ASIC-special (HSpec) portion of the address space. It *
- * covers the BDDIR, LREG, LBOOT, RREG, and RBOOT spaces. It does not *
- * cover the UALIAS and BDECC spaces, as they are covered by the *
- * protection in the directory. If a bit in the bit-vector is set, *
- * the region corresponding to that bit has read/write permission on *
- * these spaces. If the bit is clear, then that region has read-only *
- * access to these spaces (except for LREG/RREG which have no access *
- * when the bit is clear). *
- * The granularity of a region is set by the REGION_SIZE register in *
- * the NI local register space. *
- * NOTE: This means that no processor outside the current cell can *
- * write into the BDDIR, LREG, LBOOT, RREG, or RBOOT spaces. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union md_current_cell_u {
- bdrkreg_t md_current_cell_regval;
- struct {
- bdrkreg_t cc_hspec_prot : 64;
- } md_current_cell_fld_s;
-} md_current_cell_u_t;
-
-
-
-
-/************************************************************************
- * *
- * Description: This register contains three sets of information. *
- * The first set describes the size and configuration of DIMMs that *
- * are plugged into a system, the second set controls which set of *
- * protection checks are performed on each access and the third set *
- * controls various DDR SDRAM timing parameters. *
- * In order to config a DIMM bank, three fields must be initialized: *
- * BANK_SIZE, DRAM_WIDTH, and BANK_ENABLE. The BANK_SIZE field sets *
- * the address range that the MD unit will accept for that DIMM bank. *
- * All addresses larger than the specified size will return errors on *
- * access. In order to read from a DIMM bank, Bedrock must know *
- * whether or not the bank contains x4 or x8/x16 DRAM. The operating *
- * system must query the System Controller for this information and *
- * then set the DRAM_WIDTH field accordingly. The BANK_ENABLE field *
- * can be used to individually enable the two physical banks located *
- * on each DIMM bank. *
- * The contents of this register are preserved through soft-resets. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_memory_config_u {
- bdrkreg_t md_memory_config_regval;
- struct {
- bdrkreg_t mc_dimm0_bank_enable : 2;
- bdrkreg_t mc_reserved_7 : 1;
- bdrkreg_t mc_dimm0_dram_width : 1;
- bdrkreg_t mc_dimm0_bank_size : 4;
- bdrkreg_t mc_dimm1_bank_enable : 2;
- bdrkreg_t mc_reserved_6 : 1;
- bdrkreg_t mc_dimm1_dram_width : 1;
- bdrkreg_t mc_dimm1_bank_size : 4;
- bdrkreg_t mc_dimm2_bank_enable : 2;
- bdrkreg_t mc_reserved_5 : 1;
- bdrkreg_t mc_dimm2_dram_width : 1;
- bdrkreg_t mc_dimm2_bank_size : 4;
- bdrkreg_t mc_dimm3_bank_enable : 2;
- bdrkreg_t mc_reserved_4 : 1;
- bdrkreg_t mc_dimm3_dram_width : 1;
- bdrkreg_t mc_dimm3_bank_size : 4;
- bdrkreg_t mc_dimm0_sel : 2;
- bdrkreg_t mc_reserved_3 : 10;
- bdrkreg_t mc_cc_enable : 1;
- bdrkreg_t mc_io_prot_en : 1;
- bdrkreg_t mc_io_prot_ignore : 1;
- bdrkreg_t mc_cpu_prot_ignore : 1;
- bdrkreg_t mc_db_neg_edge : 1;
- bdrkreg_t mc_phase_delay : 1;
- bdrkreg_t mc_delay_mux_sel : 2;
- bdrkreg_t mc_sample_time : 2;
- bdrkreg_t mc_reserved_2 : 2;
- bdrkreg_t mc_mb_neg_edge : 3;
- bdrkreg_t mc_reserved_1 : 1;
- bdrkreg_t mc_rcd_config : 1;
- bdrkreg_t mc_rp_config : 1;
- bdrkreg_t mc_reserved : 2;
- } md_memory_config_fld_s;
-} md_memory_config_u_t;
-
-#else
-
-typedef union md_memory_config_u {
- bdrkreg_t md_memory_config_regval;
- struct {
- bdrkreg_t mc_reserved : 2;
- bdrkreg_t mc_rp_config : 1;
- bdrkreg_t mc_rcd_config : 1;
- bdrkreg_t mc_reserved_1 : 1;
- bdrkreg_t mc_mb_neg_edge : 3;
- bdrkreg_t mc_reserved_2 : 2;
- bdrkreg_t mc_sample_time : 2;
- bdrkreg_t mc_delay_mux_sel : 2;
- bdrkreg_t mc_phase_delay : 1;
- bdrkreg_t mc_db_neg_edge : 1;
- bdrkreg_t mc_cpu_prot_ignore : 1;
- bdrkreg_t mc_io_prot_ignore : 1;
- bdrkreg_t mc_io_prot_en : 1;
- bdrkreg_t mc_cc_enable : 1;
- bdrkreg_t mc_reserved_3 : 10;
- bdrkreg_t mc_dimm0_sel : 2;
- bdrkreg_t mc_dimm3_bank_size : 4;
- bdrkreg_t mc_dimm3_dram_width : 1;
- bdrkreg_t mc_reserved_4 : 1;
- bdrkreg_t mc_dimm3_bank_enable : 2;
- bdrkreg_t mc_dimm2_bank_size : 4;
- bdrkreg_t mc_dimm2_dram_width : 1;
- bdrkreg_t mc_reserved_5 : 1;
- bdrkreg_t mc_dimm2_bank_enable : 2;
- bdrkreg_t mc_dimm1_bank_size : 4;
- bdrkreg_t mc_dimm1_dram_width : 1;
- bdrkreg_t mc_reserved_6 : 1;
- bdrkreg_t mc_dimm1_bank_enable : 2;
- bdrkreg_t mc_dimm0_bank_size : 4;
- bdrkreg_t mc_dimm0_dram_width : 1;
- bdrkreg_t mc_reserved_7 : 1;
- bdrkreg_t mc_dimm0_bank_enable : 2;
- } md_memory_config_fld_s;
-} md_memory_config_u_t;
-
-#endif
-
-
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_arbitration_control_u {
- bdrkreg_t md_arbitration_control_regval;
- struct {
- bdrkreg_t ac_reply_guar : 4;
- bdrkreg_t ac_write_guar : 4;
- bdrkreg_t ac_reserved : 56;
- } md_arbitration_control_fld_s;
-} md_arbitration_control_u_t;
-
-#else
-
-typedef union md_arbitration_control_u {
- bdrkreg_t md_arbitration_control_regval;
- struct {
- bdrkreg_t ac_reserved : 56;
- bdrkreg_t ac_write_guar : 4;
- bdrkreg_t ac_reply_guar : 4;
- } md_arbitration_control_fld_s;
-} md_arbitration_control_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains page migration control fields. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_mig_config_u {
- bdrkreg_t md_mig_config_regval;
- struct {
- bdrkreg_t mc_mig_interval : 10;
- bdrkreg_t mc_reserved_2 : 6;
- bdrkreg_t mc_mig_node_mask : 8;
- bdrkreg_t mc_reserved_1 : 8;
- bdrkreg_t mc_mig_enable : 1;
- bdrkreg_t mc_reserved : 31;
- } md_mig_config_fld_s;
-} md_mig_config_u_t;
-
-#else
-
-typedef union md_mig_config_u {
- bdrkreg_t md_mig_config_regval;
- struct {
- bdrkreg_t mc_reserved : 31;
- bdrkreg_t mc_mig_enable : 1;
- bdrkreg_t mc_reserved_1 : 8;
- bdrkreg_t mc_mig_node_mask : 8;
- bdrkreg_t mc_reserved_2 : 6;
- bdrkreg_t mc_mig_interval : 10;
- } md_mig_config_fld_s;
-} md_mig_config_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Each register contains the valid bit and address of the entry in *
- * the fetch-and-op for cache 0 (or 1). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_fandop_cac_stat0_u {
- bdrkreg_t md_fandop_cac_stat0_regval;
- struct {
- bdrkreg_t fcs_reserved_1 : 6;
- bdrkreg_t fcs_addr : 27;
- bdrkreg_t fcs_reserved : 30;
- bdrkreg_t fcs_valid : 1;
- } md_fandop_cac_stat0_fld_s;
-} md_fandop_cac_stat0_u_t;
-
-#else
-
-typedef union md_fandop_cac_stat0_u {
- bdrkreg_t md_fandop_cac_stat0_regval;
- struct {
- bdrkreg_t fcs_valid : 1;
- bdrkreg_t fcs_reserved : 30;
- bdrkreg_t fcs_addr : 27;
- bdrkreg_t fcs_reserved_1 : 6;
- } md_fandop_cac_stat0_fld_s;
-} md_fandop_cac_stat0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Each register contains the valid bit and address of the entry in *
- * the fetch-and-op for cache 0 (or 1). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_fandop_cac_stat1_u {
- bdrkreg_t md_fandop_cac_stat1_regval;
- struct {
- bdrkreg_t fcs_reserved_1 : 6;
- bdrkreg_t fcs_addr : 27;
- bdrkreg_t fcs_reserved : 30;
- bdrkreg_t fcs_valid : 1;
- } md_fandop_cac_stat1_fld_s;
-} md_fandop_cac_stat1_u_t;
-
-#else
-
-typedef union md_fandop_cac_stat1_u {
- bdrkreg_t md_fandop_cac_stat1_regval;
- struct {
- bdrkreg_t fcs_valid : 1;
- bdrkreg_t fcs_reserved : 30;
- bdrkreg_t fcs_addr : 27;
- bdrkreg_t fcs_reserved_1 : 6;
- } md_fandop_cac_stat1_fld_s;
-} md_fandop_cac_stat1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: Contains a number of fields to capture various *
- * random memory/directory errors. For each 2-bit field, the LSB *
- * indicates that additional information has been captured for the *
- * error and the MSB indicates overrun, thus: *
- * x1: bits 51...0 of this register contain additional information *
- * for the message that caused this error *
- * 1x: overrun occurred *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_misc0_error_u {
- bdrkreg_t md_misc0_error_regval;
- struct {
- bdrkreg_t me_command : 7;
- bdrkreg_t me_reserved_4 : 1;
- bdrkreg_t me_source : 11;
- bdrkreg_t me_reserved_3 : 1;
- bdrkreg_t me_suppl : 11;
- bdrkreg_t me_reserved_2 : 1;
- bdrkreg_t me_virtual_channel : 2;
- bdrkreg_t me_reserved_1 : 2;
- bdrkreg_t me_tail : 1;
- bdrkreg_t me_reserved : 11;
- bdrkreg_t me_xb_error : 4;
- bdrkreg_t me_bad_partial_data : 2;
- bdrkreg_t me_missing_dv : 2;
- bdrkreg_t me_short_pack : 2;
- bdrkreg_t me_long_pack : 2;
- bdrkreg_t me_ill_msg : 2;
- bdrkreg_t me_ill_revision : 2;
- } md_misc0_error_fld_s;
-} md_misc0_error_u_t;
-
-#else
-
-typedef union md_misc0_error_u {
- bdrkreg_t md_misc0_error_regval;
- struct {
- bdrkreg_t me_ill_revision : 2;
- bdrkreg_t me_ill_msg : 2;
- bdrkreg_t me_long_pack : 2;
- bdrkreg_t me_short_pack : 2;
- bdrkreg_t me_missing_dv : 2;
- bdrkreg_t me_bad_partial_data : 2;
- bdrkreg_t me_xb_error : 4;
- bdrkreg_t me_reserved : 11;
- bdrkreg_t me_tail : 1;
- bdrkreg_t me_reserved_1 : 2;
- bdrkreg_t me_virtual_channel : 2;
- bdrkreg_t me_reserved_2 : 1;
- bdrkreg_t me_suppl : 11;
- bdrkreg_t me_reserved_3 : 1;
- bdrkreg_t me_source : 11;
- bdrkreg_t me_reserved_4 : 1;
- bdrkreg_t me_command : 7;
- } md_misc0_error_fld_s;
-} md_misc0_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Address for error captured in MISC0_ERROR. Error valid bits are *
- * repeated in both MISC0_ERROR and MISC1_ERROR (allowing them to be *
- * read sequentially without missing any errors). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_misc1_error_u {
- bdrkreg_t md_misc1_error_regval;
- struct {
- bdrkreg_t me_reserved_1 : 3;
- bdrkreg_t me_address : 38;
- bdrkreg_t me_reserved : 7;
- bdrkreg_t me_xb_error : 4;
- bdrkreg_t me_bad_partial_data : 2;
- bdrkreg_t me_missing_dv : 2;
- bdrkreg_t me_short_pack : 2;
- bdrkreg_t me_long_pack : 2;
- bdrkreg_t me_ill_msg : 2;
- bdrkreg_t me_ill_revision : 2;
- } md_misc1_error_fld_s;
-} md_misc1_error_u_t;
-
-#else
-
-typedef union md_misc1_error_u {
- bdrkreg_t md_misc1_error_regval;
- struct {
- bdrkreg_t me_ill_revision : 2;
- bdrkreg_t me_ill_msg : 2;
- bdrkreg_t me_long_pack : 2;
- bdrkreg_t me_short_pack : 2;
- bdrkreg_t me_missing_dv : 2;
- bdrkreg_t me_bad_partial_data : 2;
- bdrkreg_t me_xb_error : 4;
- bdrkreg_t me_reserved : 7;
- bdrkreg_t me_address : 38;
- bdrkreg_t me_reserved_1 : 3;
- } md_misc1_error_fld_s;
-} md_misc1_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Address for error captured in MISC0_ERROR. Error valid bits are *
- * repeated in both MISC0_ERROR and MISC1_ERROR (allowing them to be *
- * read sequentially without missing any errors). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_misc1_error_clr_u {
- bdrkreg_t md_misc1_error_clr_regval;
- struct {
- bdrkreg_t mec_reserved_1 : 3;
- bdrkreg_t mec_address : 38;
- bdrkreg_t mec_reserved : 7;
- bdrkreg_t mec_xb_error : 4;
- bdrkreg_t mec_bad_partial_data : 2;
- bdrkreg_t mec_missing_dv : 2;
- bdrkreg_t mec_short_pack : 2;
- bdrkreg_t mec_long_pack : 2;
- bdrkreg_t mec_ill_msg : 2;
- bdrkreg_t mec_ill_revision : 2;
- } md_misc1_error_clr_fld_s;
-} md_misc1_error_clr_u_t;
-
-#else
-
-typedef union md_misc1_error_clr_u {
- bdrkreg_t md_misc1_error_clr_regval;
- struct {
- bdrkreg_t mec_ill_revision : 2;
- bdrkreg_t mec_ill_msg : 2;
- bdrkreg_t mec_long_pack : 2;
- bdrkreg_t mec_short_pack : 2;
- bdrkreg_t mec_missing_dv : 2;
- bdrkreg_t mec_bad_partial_data : 2;
- bdrkreg_t mec_xb_error : 4;
- bdrkreg_t mec_reserved : 7;
- bdrkreg_t mec_address : 38;
- bdrkreg_t mec_reserved_1 : 3;
- } md_misc1_error_clr_fld_s;
-} md_misc1_error_clr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: The MD no longer allows for arbitrarily sizing the *
- * reply queues, so all of the fields in this register are read-only *
- * and contain the reset default value of 12 for the MOQHs (for *
- * headers) and 24 for the MOQDs (for data). *
- * Reading from this register returns the values currently held in *
- * the MD's credit counters. Writing to the register resets the *
- * counters to the default reset values specified in the table below. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_outgoing_rp_queue_size_u {
- bdrkreg_t md_outgoing_rp_queue_size_regval;
- struct {
- bdrkreg_t orqs_reserved_6 : 8;
- bdrkreg_t orqs_moqh_p0_rp_size : 4;
- bdrkreg_t orqs_reserved_5 : 4;
- bdrkreg_t orqs_moqh_p1_rp_size : 4;
- bdrkreg_t orqs_reserved_4 : 4;
- bdrkreg_t orqs_moqh_np_rp_size : 4;
- bdrkreg_t orqs_reserved_3 : 4;
- bdrkreg_t orqs_moqd_pi0_rp_size : 5;
- bdrkreg_t orqs_reserved_2 : 3;
- bdrkreg_t orqs_moqd_pi1_rp_size : 5;
- bdrkreg_t orqs_reserved_1 : 3;
- bdrkreg_t orqs_moqd_np_rp_size : 5;
- bdrkreg_t orqs_reserved : 11;
- } md_outgoing_rp_queue_size_fld_s;
-} md_outgoing_rp_queue_size_u_t;
-
-#else
-
-typedef union md_outgoing_rp_queue_size_u {
- bdrkreg_t md_outgoing_rp_queue_size_regval;
- struct {
- bdrkreg_t orqs_reserved : 11;
- bdrkreg_t orqs_moqd_np_rp_size : 5;
- bdrkreg_t orqs_reserved_1 : 3;
- bdrkreg_t orqs_moqd_pi1_rp_size : 5;
- bdrkreg_t orqs_reserved_2 : 3;
- bdrkreg_t orqs_moqd_pi0_rp_size : 5;
- bdrkreg_t orqs_reserved_3 : 4;
- bdrkreg_t orqs_moqh_np_rp_size : 4;
- bdrkreg_t orqs_reserved_4 : 4;
- bdrkreg_t orqs_moqh_p1_rp_size : 4;
- bdrkreg_t orqs_reserved_5 : 4;
- bdrkreg_t orqs_moqh_p0_rp_size : 4;
- bdrkreg_t orqs_reserved_6 : 8;
- } md_outgoing_rp_queue_size_fld_s;
-} md_outgoing_rp_queue_size_u_t;
-
-#endif
-
-
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_perf_sel0_u {
- bdrkreg_t md_perf_sel0_regval;
- struct {
- bdrkreg_t ps_cnt_mode : 2;
- bdrkreg_t ps_reserved_2 : 2;
- bdrkreg_t ps_activity : 4;
- bdrkreg_t ps_source : 7;
- bdrkreg_t ps_reserved_1 : 1;
- bdrkreg_t ps_channel : 4;
- bdrkreg_t ps_command : 40;
- bdrkreg_t ps_reserved : 3;
- bdrkreg_t ps_interrupt : 1;
- } md_perf_sel0_fld_s;
-} md_perf_sel0_u_t;
-
-#else
-
-typedef union md_perf_sel0_u {
- bdrkreg_t md_perf_sel0_regval;
- struct {
- bdrkreg_t ps_interrupt : 1;
- bdrkreg_t ps_reserved : 3;
- bdrkreg_t ps_command : 40;
- bdrkreg_t ps_channel : 4;
- bdrkreg_t ps_reserved_1 : 1;
- bdrkreg_t ps_source : 7;
- bdrkreg_t ps_activity : 4;
- bdrkreg_t ps_reserved_2 : 2;
- bdrkreg_t ps_cnt_mode : 2;
- } md_perf_sel0_fld_s;
-} md_perf_sel0_u_t;
-
-#endif
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_perf_sel1_u {
- bdrkreg_t md_perf_sel1_regval;
- struct {
- bdrkreg_t ps_cnt_mode : 2;
- bdrkreg_t ps_reserved_2 : 2;
- bdrkreg_t ps_activity : 4;
- bdrkreg_t ps_source : 7;
- bdrkreg_t ps_reserved_1 : 1;
- bdrkreg_t ps_channel : 4;
- bdrkreg_t ps_command : 40;
- bdrkreg_t ps_reserved : 3;
- bdrkreg_t ps_interrupt : 1;
- } md_perf_sel1_fld_s;
-} md_perf_sel1_u_t;
-
-#else
-
-typedef union md_perf_sel1_u {
- bdrkreg_t md_perf_sel1_regval;
- struct {
- bdrkreg_t ps_interrupt : 1;
- bdrkreg_t ps_reserved : 3;
- bdrkreg_t ps_command : 40;
- bdrkreg_t ps_channel : 4;
- bdrkreg_t ps_reserved_1 : 1;
- bdrkreg_t ps_source : 7;
- bdrkreg_t ps_activity : 4;
- bdrkreg_t ps_reserved_2 : 2;
- bdrkreg_t ps_cnt_mode : 2;
- } md_perf_sel1_fld_s;
-} md_perf_sel1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Performance counter. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_perf_cnt0_u {
- bdrkreg_t md_perf_cnt0_regval;
- struct {
- bdrkreg_t pc_perf_cnt : 41;
- bdrkreg_t pc_reserved : 23;
- } md_perf_cnt0_fld_s;
-} md_perf_cnt0_u_t;
-
-#else
-
-typedef union md_perf_cnt0_u {
- bdrkreg_t md_perf_cnt0_regval;
- struct {
- bdrkreg_t pc_reserved : 23;
- bdrkreg_t pc_perf_cnt : 41;
- } md_perf_cnt0_fld_s;
-} md_perf_cnt0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Performance counter. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_perf_cnt1_u {
- bdrkreg_t md_perf_cnt1_regval;
- struct {
- bdrkreg_t pc_perf_cnt : 41;
- bdrkreg_t pc_reserved : 23;
- } md_perf_cnt1_fld_s;
-} md_perf_cnt1_u_t;
-
-#else
-
-typedef union md_perf_cnt1_u {
- bdrkreg_t md_perf_cnt1_regval;
- struct {
- bdrkreg_t pc_reserved : 23;
- bdrkreg_t pc_perf_cnt : 41;
- } md_perf_cnt1_fld_s;
-} md_perf_cnt1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This register contains the control for *
- * memory/directory refresh. Once the MEMORY_CONFIG register contains *
- * the correct DIMM information, the hardware takes care of *
- * refreshing all the banks in the system. Therefore, the value in *
- * the counter threshold is corresponds exactly to the refresh value *
- * required by the SDRAM parts (expressed in Bedrock clock cycles). *
- * The refresh will execute whenever there is a free cycle and there *
- * are still banks that have not been refreshed in the current *
- * window. If the window expires with banks still waiting to be *
- * refreshed, all other transactions are halted until the banks are *
- * refreshed. *
- * The upper order bit contains an enable, which may be needed for *
- * correct initialization of the DIMMs (according to the specs, the *
- * first operation to the DIMMs should be a mode register write, not *
- * a refresh, so this bit is cleared on reset) and is also useful for *
- * diagnostic purposes. *
- * For the SDRAM parts used by Bedrock, 4096 refreshes need to be *
- * issued during every 64 ms window, resulting in a refresh threshold *
- * of 3125 Bedrock cycles. *
- * The ENABLE and CNT_THRESH fields of this register are preserved *
- * through soft-resets. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_refresh_control_u {
- bdrkreg_t md_refresh_control_regval;
- struct {
- bdrkreg_t rc_cnt_thresh : 12;
- bdrkreg_t rc_counter : 12;
- bdrkreg_t rc_reserved : 39;
- bdrkreg_t rc_enable : 1;
- } md_refresh_control_fld_s;
-} md_refresh_control_u_t;
-
-#else
-
-typedef union md_refresh_control_u {
- bdrkreg_t md_refresh_control_regval;
- struct {
- bdrkreg_t rc_enable : 1;
- bdrkreg_t rc_reserved : 39;
- bdrkreg_t rc_counter : 12;
- bdrkreg_t rc_cnt_thresh : 12;
- } md_refresh_control_fld_s;
-} md_refresh_control_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register controls the read and write timing for Flash PROM, *
- * UART and Synergy junk bus devices. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_junk_bus_timing_u {
- bdrkreg_t md_junk_bus_timing_regval;
- struct {
- bdrkreg_t jbt_fprom_setup_hold : 8;
- bdrkreg_t jbt_fprom_enable : 8;
- bdrkreg_t jbt_uart_setup_hold : 8;
- bdrkreg_t jbt_uart_enable : 8;
- bdrkreg_t jbt_synergy_setup_hold : 8;
- bdrkreg_t jbt_synergy_enable : 8;
- bdrkreg_t jbt_reserved : 16;
- } md_junk_bus_timing_fld_s;
-} md_junk_bus_timing_u_t;
-
-#else
-
-typedef union md_junk_bus_timing_u {
- bdrkreg_t md_junk_bus_timing_regval;
- struct {
- bdrkreg_t jbt_reserved : 16;
- bdrkreg_t jbt_synergy_enable : 8;
- bdrkreg_t jbt_synergy_setup_hold : 8;
- bdrkreg_t jbt_uart_enable : 8;
- bdrkreg_t jbt_uart_setup_hold : 8;
- bdrkreg_t jbt_fprom_enable : 8;
- bdrkreg_t jbt_fprom_setup_hold : 8;
- } md_junk_bus_timing_fld_s;
-} md_junk_bus_timing_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Each of these addresses allows the value on one 8-bit bank of *
- * LEDs to be read. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_led0_u {
- bdrkreg_t md_led0_regval;
- struct {
- bdrkreg_t l_data : 8;
- bdrkreg_t l_reserved : 56;
- } md_led0_fld_s;
-} md_led0_u_t;
-
-#else
-
-typedef union md_led0_u {
- bdrkreg_t md_led0_regval;
- struct {
- bdrkreg_t l_reserved : 56;
- bdrkreg_t l_data : 8;
- } md_led0_fld_s;
-} md_led0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Each of these addresses allows the value on one 8-bit bank of *
- * LEDs to be read. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_led1_u {
- bdrkreg_t md_led1_regval;
- struct {
- bdrkreg_t l_data : 8;
- bdrkreg_t l_reserved : 56;
- } md_led1_fld_s;
-} md_led1_u_t;
-
-#else
-
-typedef union md_led1_u {
- bdrkreg_t md_led1_regval;
- struct {
- bdrkreg_t l_reserved : 56;
- bdrkreg_t l_data : 8;
- } md_led1_fld_s;
-} md_led1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Each of these addresses allows the value on one 8-bit bank of *
- * LEDs to be read. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_led2_u {
- bdrkreg_t md_led2_regval;
- struct {
- bdrkreg_t l_data : 8;
- bdrkreg_t l_reserved : 56;
- } md_led2_fld_s;
-} md_led2_u_t;
-
-#else
-
-typedef union md_led2_u {
- bdrkreg_t md_led2_regval;
- struct {
- bdrkreg_t l_reserved : 56;
- bdrkreg_t l_data : 8;
- } md_led2_fld_s;
-} md_led2_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Each of these addresses allows the value on one 8-bit bank of *
- * LEDs to be read. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_led3_u {
- bdrkreg_t md_led3_regval;
- struct {
- bdrkreg_t l_data : 8;
- bdrkreg_t l_reserved : 56;
- } md_led3_fld_s;
-} md_led3_u_t;
-
-#else
-
-typedef union md_led3_u {
- bdrkreg_t md_led3_regval;
- struct {
- bdrkreg_t l_reserved : 56;
- bdrkreg_t l_data : 8;
- } md_led3_fld_s;
-} md_led3_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Core control for the BIST function. Start and stop BIST at any *
- * time. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_bist_ctl_u {
- bdrkreg_t md_bist_ctl_regval;
- struct {
- bdrkreg_t bc_bist_start : 1;
- bdrkreg_t bc_bist_stop : 1;
- bdrkreg_t bc_bist_reset : 1;
- bdrkreg_t bc_reserved_1 : 1;
- bdrkreg_t bc_bank_num : 1;
- bdrkreg_t bc_dimm_num : 2;
- bdrkreg_t bc_reserved : 57;
- } md_bist_ctl_fld_s;
-} md_bist_ctl_u_t;
-
-#else
-
-typedef union md_bist_ctl_u {
- bdrkreg_t md_bist_ctl_regval;
- struct {
- bdrkreg_t bc_reserved : 57;
- bdrkreg_t bc_dimm_num : 2;
- bdrkreg_t bc_bank_num : 1;
- bdrkreg_t bc_reserved_1 : 1;
- bdrkreg_t bc_bist_reset : 1;
- bdrkreg_t bc_bist_stop : 1;
- bdrkreg_t bc_bist_start : 1;
- } md_bist_ctl_fld_s;
-} md_bist_ctl_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contain the initial BIST data nibble and the 4-bit data control *
- * field.. *
- * *
- ************************************************************************/
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_bist_data_u {
- bdrkreg_t md_bist_data_regval;
- struct {
- bdrkreg_t bd_bist_data : 4;
- bdrkreg_t bd_bist_nibble : 1;
- bdrkreg_t bd_bist_byte : 1;
- bdrkreg_t bd_bist_cycle : 1;
- bdrkreg_t bd_bist_write : 1;
- bdrkreg_t bd_reserved : 56;
- } md_bist_data_fld_s;
-} md_bist_data_u_t;
-
-#else
-
-typedef union md_bist_data_u {
- bdrkreg_t md_bist_data_regval;
- struct {
- bdrkreg_t bd_reserved : 56;
- bdrkreg_t bd_bist_write : 1;
- bdrkreg_t bd_bist_cycle : 1;
- bdrkreg_t bd_bist_byte : 1;
- bdrkreg_t bd_bist_nibble : 1;
- bdrkreg_t bd_bist_data : 4;
- } md_bist_data_fld_s;
-} md_bist_data_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Captures the BIST error address and indicates whether it is an MB *
- * error or DB error. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_bist_ab_err_addr_u {
- bdrkreg_t md_bist_ab_err_addr_regval;
- struct {
- bdrkreg_t baea_be_db_cas_addr : 15;
- bdrkreg_t baea_reserved_3 : 1;
- bdrkreg_t baea_be_mb_cas_addr : 15;
- bdrkreg_t baea_reserved_2 : 1;
- bdrkreg_t baea_be_ras_addr : 15;
- bdrkreg_t baea_reserved_1 : 1;
- bdrkreg_t baea_bist_mb_error : 1;
- bdrkreg_t baea_bist_db_error : 1;
- bdrkreg_t baea_reserved : 14;
- } md_bist_ab_err_addr_fld_s;
-} md_bist_ab_err_addr_u_t;
-
-#else
-
-typedef union md_bist_ab_err_addr_u {
- bdrkreg_t md_bist_ab_err_addr_regval;
- struct {
- bdrkreg_t baea_reserved : 14;
- bdrkreg_t baea_bist_db_error : 1;
- bdrkreg_t baea_bist_mb_error : 1;
- bdrkreg_t baea_reserved_1 : 1;
- bdrkreg_t baea_be_ras_addr : 15;
- bdrkreg_t baea_reserved_2 : 1;
- bdrkreg_t baea_be_mb_cas_addr : 15;
- bdrkreg_t baea_reserved_3 : 1;
- bdrkreg_t baea_be_db_cas_addr : 15;
- } md_bist_ab_err_addr_fld_s;
-} md_bist_ab_err_addr_u_t;
-
-#endif
-
-
-
-/************************************************************************
- * *
- * Contains information on BIST progress and memory bank currently *
- * under BIST. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_bist_status_u {
- bdrkreg_t md_bist_status_regval;
- struct {
- bdrkreg_t bs_bist_passed : 1;
- bdrkreg_t bs_bist_done : 1;
- bdrkreg_t bs_reserved : 62;
- } md_bist_status_fld_s;
-} md_bist_status_u_t;
-
-#else
-
-typedef union md_bist_status_u {
- bdrkreg_t md_bist_status_regval;
- struct {
- bdrkreg_t bs_reserved : 62;
- bdrkreg_t bs_bist_done : 1;
- bdrkreg_t bs_bist_passed : 1;
- } md_bist_status_fld_s;
-} md_bist_status_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains 3 bits that allow the selection of IB debug information *
- * at the debug port (see design specification for available debug *
- * information). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_ib_debug_u {
- bdrkreg_t md_ib_debug_regval;
- struct {
- bdrkreg_t id_ib_debug_sel : 2;
- bdrkreg_t id_reserved : 62;
- } md_ib_debug_fld_s;
-} md_ib_debug_u_t;
-
-#else
-
-typedef union md_ib_debug_u {
- bdrkreg_t md_ib_debug_regval;
- struct {
- bdrkreg_t id_reserved : 62;
- bdrkreg_t id_ib_debug_sel : 2;
- } md_ib_debug_fld_s;
-} md_ib_debug_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains the directory specific mode bits. The contents of this *
- * register are preserved through soft-resets. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_dir_config_u {
- bdrkreg_t md_dir_config_regval;
- struct {
- bdrkreg_t dc_dir_flavor : 1;
- bdrkreg_t dc_ignore_dir_ecc : 1;
- bdrkreg_t dc_reserved : 62;
- } md_dir_config_fld_s;
-} md_dir_config_u_t;
-
-#else
-
-typedef union md_dir_config_u {
- bdrkreg_t md_dir_config_regval;
- struct {
- bdrkreg_t dc_reserved : 62;
- bdrkreg_t dc_ignore_dir_ecc : 1;
- bdrkreg_t dc_dir_flavor : 1;
- } md_dir_config_fld_s;
-} md_dir_config_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: Contains information on uncorrectable and *
- * correctable directory ECC errors, along with protection ECC *
- * errors. The priority of ECC errors latched is: uncorrectable *
- * directory, protection error, correctable directory. Thus the valid *
- * bits signal: *
- * 1xxx: uncorrectable directory ECC error (UCE) *
- * 01xx: access protection double bit error (AE) *
- * 001x: correctable directory ECC error (CE) *
- * 0001: access protection correctable error (ACE) *
- * If the UCE valid bit is set, the address field contains a pointer *
- * to the Hspec address of the offending directory entry, the *
- * syndrome field contains the bad syndrome, and the UCE overrun bit *
- * indicates whether multiple double-bit errors were received. *
- * If the UCE valid bit is clear but the AE valid bit is set, the *
- * address field contains a pointer to the Hspec address of the *
- * offending protection entry, the Bad Protection field contains the *
- * 4-bit bad protection value, the PROT_INDEX field shows which of *
- * the 8 protection values in the word was bad and the AE overrun bit *
- * indicates whether multiple AE errors were received. *
- * If the UCE and AE valid bits are clear, but the CE valid bit is *
- * set, the address field contains a pointer to the Hspec address of *
- * the offending directory entry, the syndrome field contains the bad *
- * syndrome, and the CE overrun bit indicates whether multiple *
- * single-bit errors were received. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_dir_error_u {
- bdrkreg_t md_dir_error_regval;
- struct {
- bdrkreg_t de_reserved_3 : 3;
- bdrkreg_t de_hspec_addr : 30;
- bdrkreg_t de_reserved_2 : 7;
- bdrkreg_t de_bad_syn : 7;
- bdrkreg_t de_reserved_1 : 1;
- bdrkreg_t de_bad_protect : 4;
- bdrkreg_t de_prot_index : 3;
- bdrkreg_t de_reserved : 1;
- bdrkreg_t de_ace_overrun : 1;
- bdrkreg_t de_ce_overrun : 1;
- bdrkreg_t de_ae_overrun : 1;
- bdrkreg_t de_uce_overrun : 1;
- bdrkreg_t de_ace_valid : 1;
- bdrkreg_t de_ce_valid : 1;
- bdrkreg_t de_ae_valid : 1;
- bdrkreg_t de_uce_valid : 1;
- } md_dir_error_fld_s;
-} md_dir_error_u_t;
-
-#else
-
-typedef union md_dir_error_u {
- bdrkreg_t md_dir_error_regval;
- struct {
- bdrkreg_t de_uce_valid : 1;
- bdrkreg_t de_ae_valid : 1;
- bdrkreg_t de_ce_valid : 1;
- bdrkreg_t de_ace_valid : 1;
- bdrkreg_t de_uce_overrun : 1;
- bdrkreg_t de_ae_overrun : 1;
- bdrkreg_t de_ce_overrun : 1;
- bdrkreg_t de_ace_overrun : 1;
- bdrkreg_t de_reserved : 1;
- bdrkreg_t de_prot_index : 3;
- bdrkreg_t de_bad_protect : 4;
- bdrkreg_t de_reserved_1 : 1;
- bdrkreg_t de_bad_syn : 7;
- bdrkreg_t de_reserved_2 : 7;
- bdrkreg_t de_hspec_addr : 30;
- bdrkreg_t de_reserved_3 : 3;
- } md_dir_error_fld_s;
-} md_dir_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: Contains information on uncorrectable and *
- * correctable directory ECC errors, along with protection ECC *
- * errors. The priority of ECC errors latched is: uncorrectable *
- * directory, protection error, correctable directory. Thus the valid *
- * bits signal: *
- * 1xxx: uncorrectable directory ECC error (UCE) *
- * 01xx: access protection double bit error (AE) *
- * 001x: correctable directory ECC error (CE) *
- * 0001: access protection correctable error (ACE) *
- * If the UCE valid bit is set, the address field contains a pointer *
- * to the Hspec address of the offending directory entry, the *
- * syndrome field contains the bad syndrome, and the UCE overrun bit *
- * indicates whether multiple double-bit errors were received. *
- * If the UCE valid bit is clear but the AE valid bit is set, the *
- * address field contains a pointer to the Hspec address of the *
- * offending protection entry, the Bad Protection field contains the *
- * 4-bit bad protection value, the PROT_INDEX field shows which of *
- * the 8 protection values in the word was bad and the AE overrun bit *
- * indicates whether multiple AE errors were received. *
- * If the UCE and AE valid bits are clear, but the CE valid bit is *
- * set, the address field contains a pointer to the Hspec address of *
- * the offending directory entry, the syndrome field contains the bad *
- * syndrome, and the CE overrun bit indicates whether multiple *
- * single-bit errors were received. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_dir_error_clr_u {
- bdrkreg_t md_dir_error_clr_regval;
- struct {
- bdrkreg_t dec_reserved_3 : 3;
- bdrkreg_t dec_hspec_addr : 30;
- bdrkreg_t dec_reserved_2 : 7;
- bdrkreg_t dec_bad_syn : 7;
- bdrkreg_t dec_reserved_1 : 1;
- bdrkreg_t dec_bad_protect : 4;
- bdrkreg_t dec_prot_index : 3;
- bdrkreg_t dec_reserved : 1;
- bdrkreg_t dec_ace_overrun : 1;
- bdrkreg_t dec_ce_overrun : 1;
- bdrkreg_t dec_ae_overrun : 1;
- bdrkreg_t dec_uce_overrun : 1;
- bdrkreg_t dec_ace_valid : 1;
- bdrkreg_t dec_ce_valid : 1;
- bdrkreg_t dec_ae_valid : 1;
- bdrkreg_t dec_uce_valid : 1;
- } md_dir_error_clr_fld_s;
-} md_dir_error_clr_u_t;
-
-#else
-
-typedef union md_dir_error_clr_u {
- bdrkreg_t md_dir_error_clr_regval;
- struct {
- bdrkreg_t dec_uce_valid : 1;
- bdrkreg_t dec_ae_valid : 1;
- bdrkreg_t dec_ce_valid : 1;
- bdrkreg_t dec_ace_valid : 1;
- bdrkreg_t dec_uce_overrun : 1;
- bdrkreg_t dec_ae_overrun : 1;
- bdrkreg_t dec_ce_overrun : 1;
- bdrkreg_t dec_ace_overrun : 1;
- bdrkreg_t dec_reserved : 1;
- bdrkreg_t dec_prot_index : 3;
- bdrkreg_t dec_bad_protect : 4;
- bdrkreg_t dec_reserved_1 : 1;
- bdrkreg_t dec_bad_syn : 7;
- bdrkreg_t dec_reserved_2 : 7;
- bdrkreg_t dec_hspec_addr : 30;
- bdrkreg_t dec_reserved_3 : 3;
- } md_dir_error_clr_fld_s;
-} md_dir_error_clr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains information on requests that encounter no valid protocol *
- * table entry. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_protocol_error_u {
- bdrkreg_t md_protocol_error_regval;
- struct {
- bdrkreg_t pe_overrun : 1;
- bdrkreg_t pe_pointer_me : 1;
- bdrkreg_t pe_reserved_1 : 1;
- bdrkreg_t pe_address : 30;
- bdrkreg_t pe_reserved : 1;
- bdrkreg_t pe_ptr1_btmbits : 3;
- bdrkreg_t pe_dir_format : 2;
- bdrkreg_t pe_dir_state : 3;
- bdrkreg_t pe_priority : 1;
- bdrkreg_t pe_access : 1;
- bdrkreg_t pe_msg_type : 8;
- bdrkreg_t pe_initiator : 11;
- bdrkreg_t pe_valid : 1;
- } md_protocol_error_fld_s;
-} md_protocol_error_u_t;
-
-#else
-
-typedef union md_protocol_error_u {
- bdrkreg_t md_protocol_error_regval;
- struct {
- bdrkreg_t pe_valid : 1;
- bdrkreg_t pe_initiator : 11;
- bdrkreg_t pe_msg_type : 8;
- bdrkreg_t pe_access : 1;
- bdrkreg_t pe_priority : 1;
- bdrkreg_t pe_dir_state : 3;
- bdrkreg_t pe_dir_format : 2;
- bdrkreg_t pe_ptr1_btmbits : 3;
- bdrkreg_t pe_reserved : 1;
- bdrkreg_t pe_address : 30;
- bdrkreg_t pe_reserved_1 : 1;
- bdrkreg_t pe_pointer_me : 1;
- bdrkreg_t pe_overrun : 1;
- } md_protocol_error_fld_s;
-} md_protocol_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains information on requests that encounter no valid protocol *
- * table entry. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_protocol_err_clr_u {
- bdrkreg_t md_protocol_err_clr_regval;
- struct {
- bdrkreg_t pec_overrun : 1;
- bdrkreg_t pec_pointer_me : 1;
- bdrkreg_t pec_reserved_1 : 1;
- bdrkreg_t pec_address : 30;
- bdrkreg_t pec_reserved : 1;
- bdrkreg_t pec_ptr1_btmbits : 3;
- bdrkreg_t pec_dir_format : 2;
- bdrkreg_t pec_dir_state : 3;
- bdrkreg_t pec_priority : 1;
- bdrkreg_t pec_access : 1;
- bdrkreg_t pec_msg_type : 8;
- bdrkreg_t pec_initiator : 11;
- bdrkreg_t pec_valid : 1;
- } md_protocol_err_clr_fld_s;
-} md_protocol_err_clr_u_t;
-
-#else
-
-typedef union md_protocol_err_clr_u {
- bdrkreg_t md_protocol_err_clr_regval;
- struct {
- bdrkreg_t pec_valid : 1;
- bdrkreg_t pec_initiator : 11;
- bdrkreg_t pec_msg_type : 8;
- bdrkreg_t pec_access : 1;
- bdrkreg_t pec_priority : 1;
- bdrkreg_t pec_dir_state : 3;
- bdrkreg_t pec_dir_format : 2;
- bdrkreg_t pec_ptr1_btmbits : 3;
- bdrkreg_t pec_reserved : 1;
- bdrkreg_t pec_address : 30;
- bdrkreg_t pec_reserved_1 : 1;
- bdrkreg_t pec_pointer_me : 1;
- bdrkreg_t pec_overrun : 1;
- } md_protocol_err_clr_fld_s;
-} md_protocol_err_clr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains the address of the page and the requestor which caused a *
- * migration threshold to be exceeded. Also contains the type of *
- * threshold exceeded and an overrun bit. For Value mode type *
- * interrupts, it indicates whether the local or the remote counter *
- * triggered the interrupt. Unlike most registers, when the overrun *
- * bit is set the register contains information on the most recent *
- * (the last) migration candidate. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_mig_candidate_u {
- bdrkreg_t md_mig_candidate_regval;
- struct {
- bdrkreg_t mc_address : 21;
- bdrkreg_t mc_initiator : 11;
- bdrkreg_t mc_overrun : 1;
- bdrkreg_t mc_type : 1;
- bdrkreg_t mc_local : 1;
- bdrkreg_t mc_reserved : 28;
- bdrkreg_t mc_valid : 1;
- } md_mig_candidate_fld_s;
-} md_mig_candidate_u_t;
-
-#else
-
-typedef union md_mig_candidate_u {
- bdrkreg_t md_mig_candidate_regval;
- struct {
- bdrkreg_t mc_valid : 1;
- bdrkreg_t mc_reserved : 28;
- bdrkreg_t mc_local : 1;
- bdrkreg_t mc_type : 1;
- bdrkreg_t mc_overrun : 1;
- bdrkreg_t mc_initiator : 11;
- bdrkreg_t mc_address : 21;
- } md_mig_candidate_fld_s;
-} md_mig_candidate_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains the address of the page and the requestor which caused a *
- * migration threshold to be exceeded. Also contains the type of *
- * threshold exceeded and an overrun bit. For Value mode type *
- * interrupts, it indicates whether the local or the remote counter *
- * triggered the interrupt. Unlike most registers, when the overrun *
- * bit is set the register contains information on the most recent *
- * (the last) migration candidate. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_mig_candidate_clr_u {
- bdrkreg_t md_mig_candidate_clr_regval;
- struct {
- bdrkreg_t mcc_address : 21;
- bdrkreg_t mcc_initiator : 11;
- bdrkreg_t mcc_overrun : 1;
- bdrkreg_t mcc_type : 1;
- bdrkreg_t mcc_local : 1;
- bdrkreg_t mcc_reserved : 28;
- bdrkreg_t mcc_valid : 1;
- } md_mig_candidate_clr_fld_s;
-} md_mig_candidate_clr_u_t;
-
-#else
-
-typedef union md_mig_candidate_clr_u {
- bdrkreg_t md_mig_candidate_clr_regval;
- struct {
- bdrkreg_t mcc_valid : 1;
- bdrkreg_t mcc_reserved : 28;
- bdrkreg_t mcc_local : 1;
- bdrkreg_t mcc_type : 1;
- bdrkreg_t mcc_overrun : 1;
- bdrkreg_t mcc_initiator : 11;
- bdrkreg_t mcc_address : 21;
- } md_mig_candidate_clr_fld_s;
-} md_mig_candidate_clr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Controls the generation of page-migration interrupts and loading *
- * of the MIGRATION_CANDIDATE register for pages which are using the *
- * difference between the requestor and home counts. If the *
- * difference is greater-than or equal to than the threshold *
- * contained in the register, and the valid bit is set, the migration *
- * candidate is loaded (and an interrupt generated if enabled by the *
- * page migration mode). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_mig_diff_thresh_u {
- bdrkreg_t md_mig_diff_thresh_regval;
- struct {
- bdrkreg_t mdt_threshold : 15;
- bdrkreg_t mdt_reserved_1 : 17;
- bdrkreg_t mdt_th_action : 3;
- bdrkreg_t mdt_sat_action : 3;
- bdrkreg_t mdt_reserved : 25;
- bdrkreg_t mdt_valid : 1;
- } md_mig_diff_thresh_fld_s;
-} md_mig_diff_thresh_u_t;
-
-#else
-
-typedef union md_mig_diff_thresh_u {
- bdrkreg_t md_mig_diff_thresh_regval;
- struct {
- bdrkreg_t mdt_valid : 1;
- bdrkreg_t mdt_reserved : 25;
- bdrkreg_t mdt_sat_action : 3;
- bdrkreg_t mdt_th_action : 3;
- bdrkreg_t mdt_reserved_1 : 17;
- bdrkreg_t mdt_threshold : 15;
- } md_mig_diff_thresh_fld_s;
-} md_mig_diff_thresh_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Controls the generation of page-migration interrupts and loading *
- * of the MIGRATION_CANDIDATE register for pages that are using the *
- * absolute value of the requestor count. If the value is *
- * greater-than or equal to the threshold contained in the register, *
- * and the register valid bit is set, the migration candidate is *
- * loaded and an interrupt generated. For the value mode of page *
- * migration, there are two variations. In the first variation, *
- * interrupts are only generated when the remote counter reaches the *
- * threshold, not when the local counter reaches the threshold. In *
- * the second mode, both the local counter and the remote counter *
- * generate interrupts if they reach the threshold. This second mode *
- * is useful for performance monitoring, to track the number of local *
- * and remote references to a page. LOCAL_INT determines whether we *
- * will generate interrupts when the local counter reaches the *
- * threshold. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_mig_value_thresh_u {
- bdrkreg_t md_mig_value_thresh_regval;
- struct {
- bdrkreg_t mvt_threshold : 15;
- bdrkreg_t mvt_reserved_1 : 17;
- bdrkreg_t mvt_th_action : 3;
- bdrkreg_t mvt_sat_action : 3;
- bdrkreg_t mvt_reserved : 24;
- bdrkreg_t mvt_local_int : 1;
- bdrkreg_t mvt_valid : 1;
- } md_mig_value_thresh_fld_s;
-} md_mig_value_thresh_u_t;
-
-#else
-
-typedef union md_mig_value_thresh_u {
- bdrkreg_t md_mig_value_thresh_regval;
- struct {
- bdrkreg_t mvt_valid : 1;
- bdrkreg_t mvt_local_int : 1;
- bdrkreg_t mvt_reserved : 24;
- bdrkreg_t mvt_sat_action : 3;
- bdrkreg_t mvt_th_action : 3;
- bdrkreg_t mvt_reserved_1 : 17;
- bdrkreg_t mvt_threshold : 15;
- } md_mig_value_thresh_fld_s;
-} md_mig_value_thresh_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains the controls for the sizing of the three MOQH request *
- * queues. The maximum (and default) value is 4. Queue sizes are in *
- * flits. One header equals one flit. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_outgoing_rq_queue_size_u {
- bdrkreg_t md_outgoing_rq_queue_size_regval;
- struct {
- bdrkreg_t orqs_reserved_3 : 8;
- bdrkreg_t orqs_moqh_p0_rq_size : 3;
- bdrkreg_t orqs_reserved_2 : 5;
- bdrkreg_t orqs_moqh_p1_rq_size : 3;
- bdrkreg_t orqs_reserved_1 : 5;
- bdrkreg_t orqs_moqh_np_rq_size : 3;
- bdrkreg_t orqs_reserved : 37;
- } md_outgoing_rq_queue_size_fld_s;
-} md_outgoing_rq_queue_size_u_t;
-
-#else
-
-typedef union md_outgoing_rq_queue_size_u {
- bdrkreg_t md_outgoing_rq_queue_size_regval;
- struct {
- bdrkreg_t orqs_reserved : 37;
- bdrkreg_t orqs_moqh_np_rq_size : 3;
- bdrkreg_t orqs_reserved_1 : 5;
- bdrkreg_t orqs_moqh_p1_rq_size : 3;
- bdrkreg_t orqs_reserved_2 : 5;
- bdrkreg_t orqs_moqh_p0_rq_size : 3;
- bdrkreg_t orqs_reserved_3 : 8;
- } md_outgoing_rq_queue_size_fld_s;
-} md_outgoing_rq_queue_size_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains the 32-bit directory word failing BIST. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_bist_db_err_data_u {
- bdrkreg_t md_bist_db_err_data_regval;
- struct {
- bdrkreg_t bded_db_er_d : 32;
- bdrkreg_t bded_reserved : 32;
- } md_bist_db_err_data_fld_s;
-} md_bist_db_err_data_u_t;
-
-#else
-
-typedef union md_bist_db_err_data_u {
- bdrkreg_t md_bist_db_err_data_regval;
- struct {
- bdrkreg_t bded_reserved : 32;
- bdrkreg_t bded_db_er_d : 32;
- } md_bist_db_err_data_fld_s;
-} md_bist_db_err_data_u_t;
-
-#endif
-
-
-
-/************************************************************************
- * *
- * Contains 2 bits that allow the selection of DB debug information *
- * at the debug port (see the design specification for descrition of *
- * the available debug information). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_db_debug_u {
- bdrkreg_t md_db_debug_regval;
- struct {
- bdrkreg_t dd_db_debug_sel : 2;
- bdrkreg_t dd_reserved : 62;
- } md_db_debug_fld_s;
-} md_db_debug_u_t;
-
-#else
-
-typedef union md_db_debug_u {
- bdrkreg_t md_db_debug_regval;
- struct {
- bdrkreg_t dd_reserved : 62;
- bdrkreg_t dd_db_debug_sel : 2;
- } md_db_debug_fld_s;
-} md_db_debug_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains the IgnoreECC bit. When this bit is set, all ECC errors *
- * are ignored. ECC bits will still be generated on writebacks. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_mb_ecc_config_u {
- bdrkreg_t md_mb_ecc_config_regval;
- struct {
- bdrkreg_t mec_ignore_dataecc : 1;
- bdrkreg_t mec_reserved : 63;
- } md_mb_ecc_config_fld_s;
-} md_mb_ecc_config_u_t;
-
-#else
-
-typedef union md_mb_ecc_config_u {
- bdrkreg_t md_mb_ecc_config_regval;
- struct {
- bdrkreg_t mec_reserved : 63;
- bdrkreg_t mec_ignore_dataecc : 1;
- } md_mb_ecc_config_fld_s;
-} md_mb_ecc_config_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: Contains information on read memory errors (both *
- * correctable and uncorrectable) and write memory errors (always *
- * uncorrectable). The errors are prioritized as follows: *
- * highest: uncorrectable read error (READ_UCE) *
- * middle: write error (WRITE_UCE) *
- * lowest: correctable read error (READ_CE) *
- * Each type of error maintains a two-bit valid/overrun field *
- * (READ_UCE, WRITE_UCE, or READ_CE). Bit 0 of each two-bit field *
- * corresponds to the valid bit, and bit 1 of each two-bit field *
- * corresponds to the overrun bit. *
- * The rule for the valid bit is that it gets set whenever that error *
- * occurs, regardless of whether a higher priority error has occurred. *
- * The rule for the overrun bit is that it gets set whenever we are *
- * unable to record the address information for this particular *
- * error, due to a previous error of the same or higher priority. *
- * Note that the syndrome and address information always corresponds *
- * to the earliest, highest priority error. *
- * Finally, the UCE_DIFF_ADDR bit is set whenever there have been *
- * several uncorrectable errors, to different cache line addresses. *
- * If all the UCEs were to the same cache line address, then *
- * UCE_DIFF_ADDR will be 0. This allows the operating system to *
- * detect the case where a UCE error is read exclusively, and then *
- * written back by the processor. If the bit is 0, it indicates that *
- * no information has been lost about UCEs on other cache lines. In *
- * particular, partial writes do a read modify write of the cache *
- * line. A UCE read error will be set when the cache line is read, *
- * and a UCE write error will occur when the cache line is written *
- * back, but the UCE_DIFF_ADDR will not be set. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_mem_error_u {
- bdrkreg_t md_mem_error_regval;
- struct {
- bdrkreg_t me_reserved_5 : 3;
- bdrkreg_t me_address : 30;
- bdrkreg_t me_reserved_4 : 7;
- bdrkreg_t me_bad_syn : 8;
- bdrkreg_t me_reserved_3 : 4;
- bdrkreg_t me_read_ce : 2;
- bdrkreg_t me_reserved_2 : 2;
- bdrkreg_t me_write_uce : 2;
- bdrkreg_t me_reserved_1 : 2;
- bdrkreg_t me_read_uce : 2;
- bdrkreg_t me_reserved : 1;
- bdrkreg_t me_uce_diff_addr : 1;
- } md_mem_error_fld_s;
-} md_mem_error_u_t;
-
-#else
-
-typedef union md_mem_error_u {
- bdrkreg_t md_mem_error_regval;
- struct {
- bdrkreg_t me_uce_diff_addr : 1;
- bdrkreg_t me_reserved : 1;
- bdrkreg_t me_read_uce : 2;
- bdrkreg_t me_reserved_1 : 2;
- bdrkreg_t me_write_uce : 2;
- bdrkreg_t me_reserved_2 : 2;
- bdrkreg_t me_read_ce : 2;
- bdrkreg_t me_reserved_3 : 4;
- bdrkreg_t me_bad_syn : 8;
- bdrkreg_t me_reserved_4 : 7;
- bdrkreg_t me_address : 30;
- bdrkreg_t me_reserved_5 : 3;
- } md_mem_error_fld_s;
-} md_mem_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: Contains information on read memory errors (both *
- * correctable and uncorrectable) and write memory errors (always *
- * uncorrectable). The errors are prioritized as follows: *
- * highest: uncorrectable read error (READ_UCE) *
- * middle: write error (WRITE_UCE) *
- * lowest: correctable read error (READ_CE) *
- * Each type of error maintains a two-bit valid/overrun field *
- * (READ_UCE, WRITE_UCE, or READ_CE). Bit 0 of each two-bit field *
- * corresponds to the valid bit, and bit 1 of each two-bit field *
- * corresponds to the overrun bit. *
- * The rule for the valid bit is that it gets set whenever that error *
- * occurs, regardless of whether a higher priority error has occurred. *
- * The rule for the overrun bit is that it gets set whenever we are *
- * unable to record the address information for this particular *
- * error, due to a previous error of the same or higher priority. *
- * Note that the syndrome and address information always corresponds *
- * to the earliest, highest priority error. *
- * Finally, the UCE_DIFF_ADDR bit is set whenever there have been *
- * several uncorrectable errors, to different cache line addresses. *
- * If all the UCEs were to the same cache line address, then *
- * UCE_DIFF_ADDR will be 0. This allows the operating system to *
- * detect the case where a UCE error is read exclusively, and then *
- * written back by the processor. If the bit is 0, it indicates that *
- * no information has been lost about UCEs on other cache lines. In *
- * particular, partial writes do a read modify write of the cache *
- * line. A UCE read error will be set when the cache line is read, *
- * and a UCE write error will occur when the cache line is written *
- * back, but the UCE_DIFF_ADDR will not be set. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_mem_error_clr_u {
- bdrkreg_t md_mem_error_clr_regval;
- struct {
- bdrkreg_t mec_reserved_5 : 3;
- bdrkreg_t mec_address : 30;
- bdrkreg_t mec_reserved_4 : 7;
- bdrkreg_t mec_bad_syn : 8;
- bdrkreg_t mec_reserved_3 : 4;
- bdrkreg_t mec_read_ce : 2;
- bdrkreg_t mec_reserved_2 : 2;
- bdrkreg_t mec_write_uce : 2;
- bdrkreg_t mec_reserved_1 : 2;
- bdrkreg_t mec_read_uce : 2;
- bdrkreg_t mec_reserved : 1;
- bdrkreg_t mec_uce_diff_addr : 1;
- } md_mem_error_clr_fld_s;
-} md_mem_error_clr_u_t;
-
-#else
-
-typedef union md_mem_error_clr_u {
- bdrkreg_t md_mem_error_clr_regval;
- struct {
- bdrkreg_t mec_uce_diff_addr : 1;
- bdrkreg_t mec_reserved : 1;
- bdrkreg_t mec_read_uce : 2;
- bdrkreg_t mec_reserved_1 : 2;
- bdrkreg_t mec_write_uce : 2;
- bdrkreg_t mec_reserved_2 : 2;
- bdrkreg_t mec_read_ce : 2;
- bdrkreg_t mec_reserved_3 : 4;
- bdrkreg_t mec_bad_syn : 8;
- bdrkreg_t mec_reserved_4 : 7;
- bdrkreg_t mec_address : 30;
- bdrkreg_t mec_reserved_5 : 3;
- } md_mem_error_clr_fld_s;
-} md_mem_error_clr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains one-quarter of the error memory line failing BIST. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_bist_mb_err_data_0_u {
- bdrkreg_t md_bist_mb_err_data_0_regval;
- struct {
- bdrkreg_t bmed0_mb_er_d : 36;
- bdrkreg_t bmed0_reserved : 28;
- } md_bist_mb_err_data_0_fld_s;
-} md_bist_mb_err_data_0_u_t;
-
-#else
-
-typedef union md_bist_mb_err_data_0_u {
- bdrkreg_t md_bist_mb_err_data_0_regval;
- struct {
- bdrkreg_t bmed0_reserved : 28;
- bdrkreg_t bmed0_mb_er_d : 36;
- } md_bist_mb_err_data_0_fld_s;
-} md_bist_mb_err_data_0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains one-quarter of the error memory line failing BIST. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_bist_mb_err_data_1_u {
- bdrkreg_t md_bist_mb_err_data_1_regval;
- struct {
- bdrkreg_t bmed1_mb_er_d : 36;
- bdrkreg_t bmed1_reserved : 28;
- } md_bist_mb_err_data_1_fld_s;
-} md_bist_mb_err_data_1_u_t;
-
-#else
-
-typedef union md_bist_mb_err_data_1_u {
- bdrkreg_t md_bist_mb_err_data_1_regval;
- struct {
- bdrkreg_t bmed1_reserved : 28;
- bdrkreg_t bmed1_mb_er_d : 36;
- } md_bist_mb_err_data_1_fld_s;
-} md_bist_mb_err_data_1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains one-quarter of the error memory line failing BIST. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_bist_mb_err_data_2_u {
- bdrkreg_t md_bist_mb_err_data_2_regval;
- struct {
- bdrkreg_t bmed2_mb_er_d : 36;
- bdrkreg_t bmed2_reserved : 28;
- } md_bist_mb_err_data_2_fld_s;
-} md_bist_mb_err_data_2_u_t;
-
-#else
-
-typedef union md_bist_mb_err_data_2_u {
- bdrkreg_t md_bist_mb_err_data_2_regval;
- struct {
- bdrkreg_t bmed2_reserved : 28;
- bdrkreg_t bmed2_mb_er_d : 36;
- } md_bist_mb_err_data_2_fld_s;
-} md_bist_mb_err_data_2_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains one-quarter of the error memory line failing BIST. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_bist_mb_err_data_3_u {
- bdrkreg_t md_bist_mb_err_data_3_regval;
- struct {
- bdrkreg_t bmed3_mb_er_d : 36;
- bdrkreg_t bmed3_reserved : 28;
- } md_bist_mb_err_data_3_fld_s;
-} md_bist_mb_err_data_3_u_t;
-
-#else
-
-typedef union md_bist_mb_err_data_3_u {
- bdrkreg_t md_bist_mb_err_data_3_regval;
- struct {
- bdrkreg_t bmed3_reserved : 28;
- bdrkreg_t bmed3_mb_er_d : 36;
- } md_bist_mb_err_data_3_fld_s;
-} md_bist_mb_err_data_3_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Contains 1 bit that allow the selection of MB debug information *
- * at the debug port (see the design specification for the available *
- * debug information). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union md_mb_debug_u {
- bdrkreg_t md_mb_debug_regval;
- struct {
- bdrkreg_t md_mb_debug_sel : 1;
- bdrkreg_t md_reserved : 63;
- } md_mb_debug_fld_s;
-} md_mb_debug_u_t;
-
-#else
-
-typedef union md_mb_debug_u {
- bdrkreg_t md_mb_debug_regval;
- struct {
- bdrkreg_t md_reserved : 63;
- bdrkreg_t md_mb_debug_sel : 1;
- } md_mb_debug_fld_s;
-} md_mb_debug_u_t;
-
-#endif
-
-
-
-
-
-
-#endif /* __ASSEMBLY__ */
-
-/************************************************************************
- * *
- * MAKE ALL ADDITIONS AFTER THIS LINE *
- * *
- ************************************************************************/
-
-
-
-
-#endif /* _ASM_IA64_SN_SN1_HUBMD_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_HUBMD_NEXT_H
-#define _ASM_IA64_SN_SN1_HUBMD_NEXT_H
-
-/* XXX moved over from SN/SN0/hubmd.h -- each should be checked for SN1 */
-/* In fact, most of this stuff is wrong. Some is correct, such as
- * MD_PAGE_SIZE and MD_PAGE_NUM_SHFT.
- */
-
-#define MD_PERF_COUNTERS 6
-#define MD_PERF_SETS 6
-
-#define MD_SIZE_EMPTY 0
-#define MD_SIZE_64MB 1
-#define MD_SIZE_128MB 2
-#define MD_SIZE_256MB 3
-#define MD_SIZE_512MB 4
-#define MD_SIZE_1GB 5
-
-#define MD_SIZE_BYTES(size) ((size) == 0 ? 0 : 0x2000000L << (size))
-#define MD_SIZE_MBYTES(size) ((size) == 0 ? 0 : 0x20 << (size))
-#define MD_NUM_ENABLED(_x) ((_x & 0x1) + ((_x >> 1) & 0x1) + \
- ((_x >> 2) & 0x1) + ((_x >> 3) & 0x1))
-
-
-/* Hardware page size and shift */
-
-#define MD_PAGE_SIZE 16384 /* Page size in bytes */
-#define MD_PAGE_NUM_SHFT 14 /* Address to page number shift */
-
-#define MMC_IO_PROT (UINT64_CAST 1 << 45)
-
-/* Register offsets from LOCAL_HUB or REMOTE_HUB */
-#define MD_PERF_SEL 0x210000 /* Select perf monitor events */
-
-/* MD_MIG_VALUE_THRESH bit definitions */
-
-#define MD_MIG_VALUE_THRES_VALID_MASK (UINT64_CAST 0x1 << 63)
-#define MD_MIG_VALUE_THRES_VALUE_MASK (UINT64_CAST 0xfffff)
-
-/* MD_MIG_CANDIDATE bit definitions */
-
-#define MD_MIG_CANDIDATE_VALID_MASK (UINT64_CAST 0x1 << 63)
-#define MD_MIG_CANDIDATE_VALID_SHFT 63
-#define MD_MIG_CANDIDATE_TYPE_MASK (UINT64_CAST 0x1 << 30)
-#define MD_MIG_CANDIDATE_TYPE_SHFT 30
-#define MD_MIG_CANDIDATE_OVERRUN_MASK (UINT64_CAST 0x1 << 29)
-#define MD_MIG_CANDIDATE_OVERRUN_SHFT 29
-#define MD_MIG_CANDIDATE_NODEID_MASK (UINT64_CAST 0x1ff << 20)
-#define MD_MIG_CANDIDATE_NODEID_SHFT 20
-#define MD_MIG_CANDIDATE_ADDR_MASK (UINT64_CAST 0x3ffff)
-
-
-/* XXX protection and migration are completely revised on SN1. On
- SN0, the reference count and protection fields were accessed in the
- same word, but on SN1 they reside at different addresses. The
- users of these macros will need to be rewritten. Also, the MD page
- size is 16K on SN1 but 4K on SN0. */
-
-/* Premium SIMM protection entry shifts and masks. */
-
-#define MD_PPROT_SHFT 0 /* Prot. field */
-#define MD_PPROT_MASK 0xf
-#define MD_PPROT_REFCNT_SHFT 5 /* Reference count */
-#define MD_PPROT_REFCNT_WIDTH 0x7ffff
-#define MD_PPROT_REFCNT_MASK (MD_PPROT_REFCNT_WIDTH << 5)
-
-#define MD_PPROT_IO_SHFT 8 /* I/O Prot field */
-
-/* Standard SIMM protection entry shifts and masks. */
-
-#define MD_SPROT_SHFT 0 /* Prot. field */
-#define MD_SPROT_MASK 0xf
-#define MD_SPROT_IO_SHFT 8
-#define MD_SPROT_REFCNT_SHFT 5 /* Reference count */
-#define MD_SPROT_REFCNT_WIDTH 0x7ff
-#define MD_SPROT_REFCNT_MASK (MD_SPROT_REFCNT_WIDTH << 5)
-
-/* Migration modes used in protection entries */
-
-#define MD_PROT_MIGMD_IREL (UINT64_CAST 0x3 << 3)
-#define MD_PROT_MIGMD_IABS (UINT64_CAST 0x2 << 3)
-#define MD_PROT_MIGMD_PREL (UINT64_CAST 0x1 << 3)
-#define MD_PROT_MIGMD_OFF (UINT64_CAST 0x0 << 3)
-
-/*
- * Operations on Memory/Directory DIMM control register
- */
-
-#define DIRTYPE_PREMIUM 1
-#define DIRTYPE_STANDARD 0
-
-/*
- * Operations on page migration count difference and absolute threshold
- * registers
- */
-
-#define MD_MIG_VALUE_THRESH_GET(region) ( \
- REMOTE_HUB_L((region), MD_MIG_VALUE_THRESH) & \
- MD_MIG_VALUE_THRES_VALUE_MASK)
-
-#define MD_MIG_VALUE_THRESH_SET(region, value) ( \
- REMOTE_HUB_S((region), MD_MIG_VALUE_THRESH, \
- MD_MIG_VALUE_THRES_VALID_MASK | (value)))
-
-#define MD_MIG_VALUE_THRESH_ENABLE(region) ( \
- REMOTE_HUB_S((region), MD_MIG_VALUE_THRESH, \
- REMOTE_HUB_L((region), MD_MIG_VALUE_THRESH) \
- | MD_MIG_VALUE_THRES_VALID_MASK))
-
-/*
- * Operations on page migration candidate register
- */
-
-#define MD_MIG_CANDIDATE_GET(my_region_id) ( \
- REMOTE_HUB_L((my_region_id), MD_MIG_CANDIDATE_CLR))
-
-#define MD_MIG_CANDIDATE_HWPFN(value) ((value) & MD_MIG_CANDIDATE_ADDR_MASK)
-
-#define MD_MIG_CANDIDATE_NODEID(value) ( \
- ((value) & MD_MIG_CANDIDATE_NODEID_MASK) >> MD_MIG_CANDIDATE_NODEID_SHFT)
-
-#define MD_MIG_CANDIDATE_TYPE(value) ( \
- ((value) & MD_MIG_CANDIDATE_TYPE_MASK) >> MD_MIG_CANDIDATE_TYPE_SHFT)
-
-#define MD_MIG_CANDIDATE_VALID(value) ( \
- ((value) & MD_MIG_CANDIDATE_VALID_MASK) >> MD_MIG_CANDIDATE_VALID_SHFT)
-
-/*
- * Macros to retrieve fields in the protection entry
- */
-
-/* for Premium SIMM */
-#define MD_PPROT_REFCNT_GET(value) ( \
- ((value) & MD_PPROT_REFCNT_MASK) >> MD_PPROT_REFCNT_SHFT)
-
-/* for Standard SIMM */
-#define MD_SPROT_REFCNT_GET(value) ( \
- ((value) & MD_SPROT_REFCNT_MASK) >> MD_SPROT_REFCNT_SHFT)
-
-#ifndef __ASSEMBLY__
-#ifdef LITTLE_ENDIAN
-
-typedef union md_perf_sel {
- uint64_t perf_sel_reg;
- struct {
- uint64_t perf_sel : 3,
- perf_en : 1,
- perf_rsvd : 60;
- } perf_sel_bits;
-} md_perf_sel_t;
-
-#else
-
-typedef union md_perf_sel {
- uint64_t perf_sel_reg;
- struct {
- uint64_t perf_rsvd : 60,
- perf_en : 1,
- perf_sel : 3;
- } perf_sel_bits;
-} md_perf_sel_t;
-
-#endif
-#endif /* __ASSEMBLY__ */
-
-
-/* Like SN0, SN1 supports a mostly-flat address space with 8
- CPU-visible, evenly spaced, contiguous regions, or "software
- banks". On SN1, software bank n begins at addresses n * 1GB,
- 0 <= n < 8.
-
- Physically (and very unlike SN0), each SN1 node board contains 8
- dimm sockets, arranged as 4 "DIMM banks" of 2 dimms each. DIMM
- size and width (x4/x8) is assigned per dimm bank. Each DIMM bank
- consists of 2 "physical banks", one on the front sides of the 2
- DIMMs and the other on the back sides. Therefore a node has a
- total of 8 ( = 4 * 2) physical banks. They are collectively
- referred to as "locational banks", since the locational bank number
- depends on the physical location of the DIMMs on the board.
-
- Dimm bank 0, Phys bank 0a (locational bank 0a)
- Slot D0 ----------------------------------------------
- Dimm bank 0, Phys bank 1a (locational bank 1a)
-
- Dimm bank 1, Phys bank 0a (locational bank 2a)
- Slot D1 ----------------------------------------------
- Dimm bank 1, Phys bank 1a (locational bank 3a)
-
- Dimm bank 2, Phys bank 0a (locational bank 4a)
- Slot D2 ----------------------------------------------
- Dimm bank 2, Phys bank 1a (locational bank 5a)
-
- Dimm bank 3, Phys bank 0a (locational bank 6a)
- Slot D3 ----------------------------------------------
- Dimm bank 3, Phys bank 1a (locational bank 7a)
-
- Dimm bank 0, Phys bank 0b (locational bank 0b)
- Slot D4 ----------------------------------------------
- Dimm bank 0, Phys bank 1b (locational bank 1b)
-
- Dimm bank 1, Phys bank 0b (locational bank 2b)
- Slot D5 ----------------------------------------------
- Dimm bank 1, Phys bank 1b (locational bank 3b)
-
- Dimm bank 2, Phys bank 0b (locational bank 4b)
- Slot D6 ----------------------------------------------
- Dimm bank 2, Phys bank 1b (locational bank 5b)
-
- Dimm bank 3, Phys bank 0b (locational bank 6b)
- Slot D7 ----------------------------------------------
- Dimm bank 3, Phys bank 1b (locational bank 7b)
-
- Since bank size is assigned per DIMM bank, each pair of locational
- banks must have the same size. However, they may be
- enabled/disabled individually.
-
- The locational banks map to the software banks via the dimm0_sel
- field in MD_MEMORY_CONFIG. When the field is 0 (the usual case),
- the mapping is direct: eg. locational bank 1 (dimm bank 0,
- physical bank 1, which is the back side of the first DIMM pair)
- corresponds to software bank 1, at node offset 1GB. More
- generally, locational bank = software bank XOR dimm0_sel.
-
- All the PROM's data structures (promlog variables, klconfig, etc.)
- track memory by the locational bank number. The kernel usually
- tracks memory by the software bank number.
- memsupport.c:slot_psize_compute() performs the mapping.
-
- (Note: the terms "locational bank" and "software bank" are not
- offical in any way, but I've tried to make the PROM use them
- consistently -- bjj.)
- */
-
-#define MD_MEM_BANKS 8
-#define MD_MEM_DIMM_BANKS 4
-#define MD_BANK_SHFT 30 /* log2(1 GB) */
-#define MD_BANK_MASK (UINT64_CAST 0x7 << 30)
-#define MD_BANK_SIZE (UINT64_CAST 1 << MD_BANK_SHFT) /* 1 GB */
-#define MD_BANK_OFFSET(_b) (UINT64_CAST (_b) << MD_BANK_SHFT)
-#define MD_BANK_GET(addr) (((addr) & MD_BANK_MASK) >> MD_BANK_SHFT)
-#define MD_BANK_TO_DIMM_BANK(_b) (( (_b) >> 1) & 0x3)
-#define MD_BANK_TO_PHYS_BANK(_b) (( (_b) >> 0) & 0x1)
-#define MD_DIMM_BANK_GET(addr) MD_BANK_TO_DIMM_BANK(MD_BANK_GET(addr))
-#define MD_PHYS_BANK_GET(addr) MD_BANK_TO_PHYS_BANK(MD_BANK_GET(addr))
-
-
-/* Split an MD pointer (or message source & suppl. fields) into node, device */
-
-#define MD_PTR_NODE_SHFT 3
-#define MD_PTR_DEVICE_MASK 0x7
-#define MD_PTR_SUBNODE0_MASK 0x1
-#define MD_PTR_SUBNODE1_MASK 0x4
-
-
-/**********************************************************************
-
- Backdoor protection and page counter structures
-
-**********************************************************************/
-
-/* Protection entries and page counters are interleaved at 4 separate
- addresses, 0x10 apart. Software must read/write all four. */
-
-#define BD_ITLV_COUNT 4
-#define BD_ITLV_STRIDE 0x10
-
-/* Protection entries */
-
-/* (these macros work for standard (_rgn < 32) or premium DIMMs) */
-#define MD_PROT_SHFT(_rgn, _io) ((((_rgn) & 0x20) >> 2 | \
- ((_rgn) & 0x01) << 2 | \
- ((_io) & 0x1) << 1) * 8)
-#define MD_PROT_MASK(_rgn, _io) (0xff << MD_PROT_SHFT(_rgn, _io))
-#define MD_PROT_GET(_val, _rgn, _io) \
- (((_val) & MD_PROT_MASK(_rgn, _io)) >> MD_PROT_SHFT(_rgn, _io))
-
-/* Protection field values */
-
-#define MD_PROT_RW (UINT64_CAST 0xff)
-#define MD_PROT_RO (UINT64_CAST 0x0f)
-#define MD_PROT_NO (UINT64_CAST 0x00)
-
-
-
-
-/**********************************************************************
-
- Directory format structures
-
-***********************************************************************/
-
-#ifndef __ASSEMBLY__
-
-/* Standard Directory Entries */
-
-#ifdef LITTLE_ENDIAN
-
-struct md_sdir_pointer_fmt { /* exclusive, busy shared/excl, wait, poisoned */
- bdrkreg_t sdp_format : 2;
- bdrkreg_t sdp_state : 3;
- bdrkreg_t sdp_priority : 3;
- bdrkreg_t sdp_pointer1 : 8;
- bdrkreg_t sdp_ecc : 6;
- bdrkreg_t sdp_locprot : 1;
- bdrkreg_t sdp_reserved : 1;
- bdrkreg_t sdp_crit_word_off : 3;
- bdrkreg_t sdp_pointer2 : 5;
- bdrkreg_t sdp_fill : 32;
-};
-
-#else
-
-struct md_sdir_pointer_fmt { /* exclusive, busy shared/excl, wait, poisoned */
- bdrkreg_t sdp_fill : 32;
- bdrkreg_t sdp_pointer2 : 5;
- bdrkreg_t sdp_crit_word_off : 3;
- bdrkreg_t sdp_reserved : 1;
- bdrkreg_t sdp_locprot : 1;
- bdrkreg_t sdp_ecc : 6;
- bdrkreg_t sdp_pointer1 : 8;
- bdrkreg_t sdp_priority : 3;
- bdrkreg_t sdp_state : 3;
- bdrkreg_t sdp_format : 2;
-};
-
-#endif
-
-#ifdef LITTLE_ENDIAN
-
-struct md_sdir_fine_fmt { /* shared (fine) */
- bdrkreg_t sdf_format : 2;
- bdrkreg_t sdf_tag1 : 3;
- bdrkreg_t sdf_tag2 : 3;
- bdrkreg_t sdf_vector1 : 8;
- bdrkreg_t sdf_ecc : 6;
- bdrkreg_t sdf_locprot : 1;
- bdrkreg_t sdf_tag2valid : 1;
- bdrkreg_t sdf_vector2 : 8;
- bdrkreg_t sdf_fill : 32;
-};
-
-#else
-
-struct md_sdir_fine_fmt { /* shared (fine) */
- bdrkreg_t sdf_fill : 32;
- bdrkreg_t sdf_vector2 : 8;
- bdrkreg_t sdf_tag2valid : 1;
- bdrkreg_t sdf_locprot : 1;
- bdrkreg_t sdf_ecc : 6;
- bdrkreg_t sdf_vector1 : 8;
- bdrkreg_t sdf_tag2 : 3;
- bdrkreg_t sdf_tag1 : 3;
- bdrkreg_t sdf_format : 2;
-};
-
-#endif
-
-#ifdef LITTLE_ENDIAN
-
-struct md_sdir_coarse_fmt { /* shared (coarse) */
- bdrkreg_t sdc_format : 2;
- bdrkreg_t sdc_reserved_1 : 6;
- bdrkreg_t sdc_vector_a : 8;
- bdrkreg_t sdc_ecc : 6;
- bdrkreg_t sdc_locprot : 1;
- bdrkreg_t sdc_reserved : 1;
- bdrkreg_t sdc_vector_b : 8;
- bdrkreg_t sdc_fill : 32;
-};
-
-#else
-
-struct md_sdir_coarse_fmt { /* shared (coarse) */
- bdrkreg_t sdc_fill : 32;
- bdrkreg_t sdc_vector_b : 8;
- bdrkreg_t sdc_reserved : 1;
- bdrkreg_t sdc_locprot : 1;
- bdrkreg_t sdc_ecc : 6;
- bdrkreg_t sdc_vector_a : 8;
- bdrkreg_t sdc_reserved_1 : 6;
- bdrkreg_t sdc_format : 2;
-};
-
-#endif
-
-typedef union md_sdir {
- /* The 32 bits of standard directory, in bits 31:0 */
- uint64_t sd_val;
- struct md_sdir_pointer_fmt sdp_fmt;
- struct md_sdir_fine_fmt sdf_fmt;
- struct md_sdir_coarse_fmt sdc_fmt;
-} md_sdir_t;
-
-
-/* Premium Directory Entries */
-
-#ifdef LITTLE_ENDIAN
-
-struct md_pdir_pointer_fmt { /* exclusive, busy shared/excl, wait, poisoned */
- bdrkreg_t pdp_format : 2;
- bdrkreg_t pdp_state : 3;
- bdrkreg_t pdp_priority : 3;
- bdrkreg_t pdp_pointer1_a : 8;
- bdrkreg_t pdp_reserved_4 : 6;
- bdrkreg_t pdp_pointer1_b : 3;
- bdrkreg_t pdp_reserved_3 : 7;
- bdrkreg_t pdp_ecc_a : 6;
- bdrkreg_t pdp_locprot : 1;
- bdrkreg_t pdp_reserved_2 : 1;
- bdrkreg_t pdp_crit_word_off : 3;
- bdrkreg_t pdp_pointer2_a : 5;
- bdrkreg_t pdp_ecc_b : 1;
- bdrkreg_t pdp_reserved_1 : 5;
- bdrkreg_t pdp_pointer2_b : 3;
- bdrkreg_t pdp_reserved : 7;
-};
-
-#else
-
-struct md_pdir_pointer_fmt { /* exclusive, busy shared/excl, wait, poisoned */
- bdrkreg_t pdp_reserved : 7;
- bdrkreg_t pdp_pointer2_b : 3;
- bdrkreg_t pdp_reserved_1 : 5;
- bdrkreg_t pdp_ecc_b : 1;
- bdrkreg_t pdp_pointer2_a : 5;
- bdrkreg_t pdp_crit_word_off : 3;
- bdrkreg_t pdp_reserved_2 : 1;
- bdrkreg_t pdp_locprot : 1;
- bdrkreg_t pdp_ecc_a : 6;
- bdrkreg_t pdp_reserved_3 : 7;
- bdrkreg_t pdp_pointer1_b : 3;
- bdrkreg_t pdp_reserved_4 : 6;
- bdrkreg_t pdp_pointer1_a : 8;
- bdrkreg_t pdp_priority : 3;
- bdrkreg_t pdp_state : 3;
- bdrkreg_t pdp_format : 2;
-};
-
-#endif
-
-#ifdef LITTLE_ENDIAN
-
-struct md_pdir_fine_fmt { /* shared (fine) */
- bdrkreg_t pdf_format : 2;
- bdrkreg_t pdf_tag1_a : 3;
- bdrkreg_t pdf_tag2_a : 3;
- bdrkreg_t pdf_vector1_a : 8;
- bdrkreg_t pdf_reserved_1 : 6;
- bdrkreg_t pdf_tag1_b : 2;
- bdrkreg_t pdf_vector1_b : 8;
- bdrkreg_t pdf_ecc_a : 6;
- bdrkreg_t pdf_locprot : 1;
- bdrkreg_t pdf_tag2valid : 1;
- bdrkreg_t pdf_vector2_a : 8;
- bdrkreg_t pdf_ecc_b : 1;
- bdrkreg_t pdf_reserved : 5;
- bdrkreg_t pdf_tag2_b : 2;
- bdrkreg_t pdf_vector2_b : 8;
-};
-
-#else
-
-struct md_pdir_fine_fmt { /* shared (fine) */
- bdrkreg_t pdf_vector2_b : 8;
- bdrkreg_t pdf_tag2_b : 2;
- bdrkreg_t pdf_reserved : 5;
- bdrkreg_t pdf_ecc_b : 1;
- bdrkreg_t pdf_vector2_a : 8;
- bdrkreg_t pdf_tag2valid : 1;
- bdrkreg_t pdf_locprot : 1;
- bdrkreg_t pdf_ecc_a : 6;
- bdrkreg_t pdf_vector1_b : 8;
- bdrkreg_t pdf_tag1_b : 2;
- bdrkreg_t pdf_reserved_1 : 6;
- bdrkreg_t pdf_vector1_a : 8;
- bdrkreg_t pdf_tag2_a : 3;
- bdrkreg_t pdf_tag1_a : 3;
- bdrkreg_t pdf_format : 2;
-};
-
-#endif
-
-#ifdef LITTLE_ENDIAN
-
-struct md_pdir_sparse_fmt { /* shared (sparse) */
- bdrkreg_t pds_format : 2;
- bdrkreg_t pds_column_a : 6;
- bdrkreg_t pds_row_a : 8;
- bdrkreg_t pds_column_b : 16;
- bdrkreg_t pds_ecc_a : 6;
- bdrkreg_t pds_locprot : 1;
- bdrkreg_t pds_reserved_1 : 1;
- bdrkreg_t pds_row_b : 8;
- bdrkreg_t pds_ecc_b : 1;
- bdrkreg_t pds_column_c : 10;
- bdrkreg_t pds_reserved : 5;
-};
-
-#else
-
-struct md_pdir_sparse_fmt { /* shared (sparse) */
- bdrkreg_t pds_reserved : 5;
- bdrkreg_t pds_column_c : 10;
- bdrkreg_t pds_ecc_b : 1;
- bdrkreg_t pds_row_b : 8;
- bdrkreg_t pds_reserved_1 : 1;
- bdrkreg_t pds_locprot : 1;
- bdrkreg_t pds_ecc_a : 6;
- bdrkreg_t pds_column_b : 16;
- bdrkreg_t pds_row_a : 8;
- bdrkreg_t pds_column_a : 6;
- bdrkreg_t pds_format : 2;
-};
-
-#endif
-
-typedef union md_pdir {
- /* The 64 bits of premium directory */
- uint64_t pd_val;
- struct md_pdir_pointer_fmt pdp_fmt;
- struct md_pdir_fine_fmt pdf_fmt;
- struct md_pdir_sparse_fmt pds_fmt;
-} md_pdir_t;
-
-#endif /* __ASSEMBLY__ */
-
-
-/**********************************************************************
-
- The defines for backdoor directory and backdoor ECC.
-
-***********************************************************************/
-
-/* Directory formats, for each format's "format" field */
-
-#define MD_FORMAT_UNOWNED (UINT64_CAST 0x0) /* 00 */
-#define MD_FORMAT_POINTER (UINT64_CAST 0x1) /* 01 */
-#define MD_FORMAT_SHFINE (UINT64_CAST 0x2) /* 10 */
-#define MD_FORMAT_SHCOARSE (UINT64_CAST 0x3) /* 11 */
- /* Shared coarse (standard) and shared sparse (premium) both use fmt 0x3 */
-
-
-/*
- * Cacheline state values.
- *
- * These are really *software* notions of the "state" of a cacheline; but the
- * actual values have been carefully chosen to align with some hardware values!
- * The MD_FMT_ST_TO_STATE macro is used to convert from hardware format/state
- * pairs in the directory entried into one of these cacheline state values.
- */
-
-#define MD_DIR_EXCLUSIVE (UINT64_CAST 0x0) /* ptr format, hw-defined */
-#define MD_DIR_UNOWNED (UINT64_CAST 0x1) /* format=0 */
-#define MD_DIR_SHARED (UINT64_CAST 0x2) /* format=2,3 */
-#define MD_DIR_BUSY_SHARED (UINT64_CAST 0x4) /* ptr format, hw-defined */
-#define MD_DIR_BUSY_EXCL (UINT64_CAST 0x5) /* ptr format, hw-defined */
-#define MD_DIR_WAIT (UINT64_CAST 0x6) /* ptr format, hw-defined */
-#define MD_DIR_POISONED (UINT64_CAST 0x7) /* ptr format, hw-defined */
-
-#ifndef __ASSEMBLY__
-
-/* Convert format and state fields into a single "cacheline state" value, defined above */
-
-#define MD_FMT_ST_TO_STATE(fmt, state) \
- ((fmt) == MD_FORMAT_POINTER ? (state) : \
- (fmt) == MD_FORMAT_UNOWNED ? MD_DIR_UNOWNED : \
- MD_DIR_SHARED)
-#define MD_DIR_STATE(x) MD_FMT_ST_TO_STATE(MD_DIR_FORMAT(x), MD_DIR_STVAL(x))
-
-#endif /* __ASSEMBLY__ */
-
-
-
-/* Directory field shifts and masks */
-
-/* Standard */
-
-#define MD_SDIR_FORMAT_SHFT 0 /* All formats */
-#define MD_SDIR_FORMAT_MASK (0x3 << 0)
-#define MD_SDIR_STATE_SHFT 2 /* Pointer fmt. only */
-#define MD_SDIR_STATE_MASK (0x7 << 2)
-
-/* Premium */
-
-#define MD_PDIR_FORMAT_SHFT 0 /* All formats */
-#define MD_PDIR_FORMAT_MASK (0x3 << 0)
-#define MD_PDIR_STATE_SHFT 2 /* Pointer fmt. only */
-#define MD_PDIR_STATE_MASK (0x7 << 2)
-
-/* Generic */
-
-#define MD_FORMAT_SHFT 0 /* All formats */
-#define MD_FORMAT_MASK (0x3 << 0)
-#define MD_STATE_SHFT 2 /* Pointer fmt. only */
-#define MD_STATE_MASK (0x7 << 2)
-
-
-/* Special shifts to reconstruct fields from the _a and _b parts */
-
-/* Standard: only shared coarse has split fields */
-
-#define MD_SDC_VECTORB_SHFT 8 /* eg: sdc_vector_a is 8 bits */
-
-/* Premium: pointer, shared fine, shared sparse */
-
-#define MD_PDP_POINTER1A_MASK 0xFF
-#define MD_PDP_POINTER1B_SHFT 8
-#define MD_PDP_POINTER2B_SHFT 5
-#define MD_PDP_ECCB_SHFT 6
-
-#define MD_PDF_VECTOR1B_SHFT 8
-#define MD_PDF_VECTOR2B_SHFT 8
-#define MD_PDF_TAG1B_SHFT 3
-#define MD_PDF_TAG2B_SHFT 3
-#define MD_PDF_ECC_SHFT 6
-
-#define MD_PDS_ROWB_SHFT 8
-#define MD_PDS_COLUMNB_SHFT 6
-#define MD_PDS_COLUMNC_SHFT (MD_PDS_COLUMNB_SHFT + 16)
-#define MD_PDS_ECC_SHFT 6
-
-
-
-/*
- * Directory/protection/counter initialization values, premium and standard
- */
-
-#define MD_PDIR_INIT 0
-#define MD_PDIR_INIT_CNT 0
-#define MD_PDIR_INIT_PROT 0
-
-#define MD_SDIR_INIT 0
-#define MD_SDIR_INIT_CNT 0
-#define MD_SDIR_INIT_PROT 0
-
-#define MD_PDIR_MASK 0xffffffffffffffff
-#define MD_SDIR_MASK 0xffffffff
-
-/* When premium mode is on for probing but standard directory memory
- is installed, the valid directory bits depend on the phys. bank */
-#define MD_PDIR_PROBE_MASK(pb) 0xffffffffffffffff
-#define MD_SDIR_PROBE_MASK(pb) (0xffff0000ffff << ((pb) ? 16 : 0))
-
-
-/*
- * Misc. field extractions and conversions
- */
-
-/* Convert an MD pointer (or message source, supplemental fields) */
-
-#define MD_PTR_NODE(x) ((x) >> MD_PTR_NODE_SHFT)
-#define MD_PTR_DEVICE(x) ((x) & MD_PTR_DEVICE_MASK)
-#define MD_PTR_SLICE(x) (((x) & MD_PTR_SUBNODE0_MASK) | \
- ((x) & MD_PTR_SUBNODE1_MASK) >> 1)
-#define MD_PTR_OWNER_CPU(x) (! ((x) & 2))
-#define MD_PTR_OWNER_IO(x) ((x) & 2)
-
-/* Extract format and raw state from a directory entry */
-
-#define MD_DIR_FORMAT(x) ((x) >> MD_SDIR_FORMAT_SHFT & \
- MD_SDIR_FORMAT_MASK >> MD_SDIR_FORMAT_SHFT)
-#define MD_DIR_STVAL(x) ((x) >> MD_SDIR_STATE_SHFT & \
- MD_SDIR_STATE_MASK >> MD_SDIR_STATE_SHFT)
-
-/* Mask & Shift to get HSPEC_ADDR from MD DIR_ERROR register */
-#define ERROR_ADDR_SHFT 3
-#define ERROR_HSPEC_SHFT 3
-#define DIR_ERR_HSPEC_MASK 0x1fffffff8
-
-/*
- * DIR_ERR* and MEM_ERR* defines are used to avoid ugly
- * #ifdefs for SN0 and SN1 in memerror.c code. See SN0/hubmd.h
- * for corresponding SN0 definitions.
- */
-#define md_dir_error_t md_dir_error_u_t
-#define md_mem_error_t md_mem_error_u_t
-#define derr_reg md_dir_error_regval
-#define merr_reg md_mem_error_regval
-
-#define DIR_ERR_UCE_VALID dir_err.md_dir_error_fld_s.de_uce_valid
-#define DIR_ERR_AE_VALID dir_err.md_dir_error_fld_s.de_ae_valid
-#define DIR_ERR_BAD_SYN dir_err.md_dir_error_fld_s.de_bad_syn
-#define DIR_ERR_CE_OVERRUN dir_err.md_dir_error_fld_s.de_ce_overrun
-#define MEM_ERR_ADDRESS mem_err.md_mem_error_fld_s.me_address
- /* BRINGUP Can the overrun bit be set without the valid bit? */
-#define MEM_ERR_CE_OVERRUN (mem_err.md_mem_error_fld_s.me_read_ce >> 1)
-#define MEM_ERR_BAD_SYN mem_err.md_mem_error_fld_s.me_bad_syn
-#define MEM_ERR_UCE_VALID (mem_err.md_mem_error_fld_s.me_read_uce & 1)
-
-
-
-/*********************************************************************
-
- We have the shift and masks of various fields defined below.
-
- *********************************************************************/
-
-/* MD_REFRESH_CONTROL fields */
-
-#define MRC_ENABLE_SHFT 63
-#define MRC_ENABLE_MASK (UINT64_CAST 1 << 63)
-#define MRC_ENABLE (UINT64_CAST 1 << 63)
-#define MRC_COUNTER_SHFT 12
-#define MRC_COUNTER_MASK (UINT64_CAST 0xfff << 12)
-#define MRC_CNT_THRESH_MASK 0xfff
-#define MRC_RESET_DEFAULTS (UINT64_CAST 0x800)
-
-/* MD_DIR_CONFIG fields */
-
-#define MDC_DIR_PREMIUM (UINT64_CAST 1 << 0)
-#define MDC_IGNORE_ECC_SHFT 1
-#define MDC_IGNORE_ECC_MASK (UINT64_CAST 1 << 1)
-
-/* MD_MEMORY_CONFIG fields */
-
-#define MMC_RP_CONFIG_SHFT 61
-#define MMC_RP_CONFIG_MASK (UINT64_CAST 1 << 61)
-#define MMC_RCD_CONFIG_SHFT 60
-#define MMC_RCD_CONFIG_MASK (UINT64_CAST 1 << 60)
-#define MMC_MB_NEG_EDGE_SHFT 56
-#define MMC_MB_NEG_EDGE_MASK (UINT64_CAST 0x7 << 56)
-#define MMC_SAMPLE_TIME_SHFT 52
-#define MMC_SAMPLE_TIME_MASK (UINT64_CAST 0x3 << 52)
-#define MMC_DELAY_MUX_SEL_SHFT 50
-#define MMC_DELAY_MUX_SEL_MASK (UINT64_CAST 0x3 << 50)
-#define MMC_PHASE_DELAY_SHFT 49
-#define MMC_PHASE_DELAY_MASK (UINT64_CAST 1 << 49)
-#define MMC_DB_NEG_EDGE_SHFT 48
-#define MMC_DB_NEG_EDGE_MASK (UINT64_CAST 1 << 48)
-#define MMC_CPU_PROT_IGNORE_SHFT 47
-#define MMC_CPU_PROT_IGNORE_MASK (UINT64_CAST 1 << 47)
-#define MMC_IO_PROT_IGNORE_SHFT 46
-#define MMC_IO_PROT_IGNORE_MASK (UINT64_CAST 1 << 46)
-#define MMC_IO_PROT_EN_SHFT 45
-#define MMC_IO_PROT_EN_MASK (UINT64_CAST 1 << 45)
-#define MMC_CC_ENABLE_SHFT 44
-#define MMC_CC_ENABLE_MASK (UINT64_CAST 1 << 44)
-#define MMC_DIMM0_SEL_SHFT 32
-#define MMC_DIMM0_SEL_MASK (UINT64_CAST 0x3 << 32)
-#define MMC_DIMM_SIZE_SHFT(_dimm) ((_dimm << 3) + 4)
-#define MMC_DIMM_SIZE_MASK(_dimm) (UINT64_CAST 0xf << MMC_DIMM_SIZE_SHFT(_dimm))
-#define MMC_DIMM_WIDTH_SHFT(_dimm) ((_dimm << 3) + 3)
-#define MMC_DIMM_WIDTH_MASK(_dimm) (UINT64_CAST 0x1 << MMC_DIMM_WIDTH_SHFT(_dimm))
-#define MMC_DIMM_BANKS_SHFT(_dimm) (_dimm << 3)
-#define MMC_DIMM_BANKS_MASK(_dimm) (UINT64_CAST 0x3 << MMC_DIMM_BANKS_SHFT(_dimm))
-#define MMC_BANK_ALL_MASK 0xffffffffLL
-/* Default values for write-only bits in MD_MEMORY_CONFIG */
-#define MMC_DEFAULT_BITS (UINT64_CAST 0x7 << MMC_MB_NEG_EDGE_SHFT)
-
-/* MD_MB_ECC_CONFIG fields */
-
-#define MEC_IGNORE_ECC (UINT64_CAST 0x1 << 0)
-
-/* MD_BIST_DATA fields */
-
-#define MBD_BIST_WRITE (UINT64_CAST 1 << 7)
-#define MBD_BIST_CYCLE (UINT64_CAST 1 << 6)
-#define MBD_BIST_BYTE (UINT64_CAST 1 << 5)
-#define MBD_BIST_NIBBLE (UINT64_CAST 1 << 4)
-#define MBD_BIST_DATA_MASK 0xf
-
-/* MD_BIST_CTL fields */
-
-#define MBC_DIMM_SHFT 5
-#define MBC_DIMM_MASK (UINT64_CAST 0x3 << 5)
-#define MBC_BANK_SHFT 4
-#define MBC_BANK_MASK (UINT64_CAST 0x1 << 4)
-#define MBC_BIST_RESET (UINT64_CAST 0x1 << 2)
-#define MBC_BIST_STOP (UINT64_CAST 0x1 << 1)
-#define MBC_BIST_START (UINT64_CAST 0x1 << 0)
-
-#define MBC_GO(dimm, bank) \
- (((dimm) << MBC_DIMM_SHFT) & MBC_DIMM_MASK | \
- ((bank) << MBC_BANK_SHFT) & MBC_BANK_MASK | \
- MBC_BIST_START)
-
-/* MD_BIST_STATUS fields */
-
-#define MBS_BIST_DONE (UINT64_CAST 0X1 << 1)
-#define MBS_BIST_PASSED (UINT64_CAST 0X1 << 0)
-
-/* MD_JUNK_BUS_TIMING fields */
-
-#define MJT_SYNERGY_ENABLE_SHFT 40
-#define MJT_SYNERGY_ENABLE_MASK (UINT64_CAST 0Xff << MJT_SYNERGY_ENABLE_SHFT)
-#define MJT_SYNERGY_SETUP_SHFT 32
-#define MJT_SYNERGY_SETUP_MASK (UINT64_CAST 0Xff << MJT_SYNERGY_SETUP_SHFT)
-#define MJT_UART_ENABLE_SHFT 24
-#define MJT_UART_ENABLE_MASK (UINT64_CAST 0Xff << MJT_UART_ENABLE_SHFT)
-#define MJT_UART_SETUP_SHFT 16
-#define MJT_UART_SETUP_MASK (UINT64_CAST 0Xff << MJT_UART_SETUP_SHFT)
-#define MJT_FPROM_ENABLE_SHFT 8
-#define MJT_FPROM_ENABLE_MASK (UINT64_CAST 0Xff << MJT_FPROM_ENABLE_SHFT)
-#define MJT_FPROM_SETUP_SHFT 0
-#define MJT_FPROM_SETUP_MASK (UINT64_CAST 0Xff << MJT_FPROM_SETUP_SHFT)
-
-#define MEM_ERROR_VALID_CE 1
-
-
-/* MD_FANDOP_CAC_STAT0, MD_FANDOP_CAC_STAT1 addr field shift */
-
-#define MFC_ADDR_SHFT 6
-
-#endif /* _ASM_IA64_SN_SN1_HUBMD_NEXT_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_HUBNI_H
-#define _ASM_IA64_SN_SN1_HUBNI_H
-
-
-/************************************************************************
- * *
- * WARNING!!! WARNING!!! WARNING!!! WARNING!!! WARNING!!! *
- * *
- * This file is created by an automated script. Any (minimal) changes *
- * made manually to this file should be made with care. *
- * *
- * MAKE ALL ADDITIONS TO THE END OF THIS FILE *
- * *
- ************************************************************************/
-
-#define NI_PORT_STATUS 0x00680000 /* LLP Status */
-
-
-
-#define NI_PORT_RESET 0x00680008 /*
- * Reset the Network
- * Interface
- */
-
-
-
-#define NI_RESET_ENABLE 0x00680010 /* Warm Reset Enable */
-
-
-
-#define NI_DIAG_PARMS 0x00680018 /*
- * Diagnostic
- * Parameters
- */
-
-
-
-#define NI_CHANNEL_CONTROL 0x00680020 /*
- * Virtual channel
- * control
- */
-
-
-
-#define NI_CHANNEL_TEST 0x00680028 /* LLP Test Control. */
-
-
-
-#define NI_PORT_PARMS 0x00680030 /* LLP Parameters */
-
-
-
-#define NI_CHANNEL_AGE 0x00680038 /*
- * Network age
- * injection control
- */
-
-
-
-#define NI_PORT_ERRORS 0x00680100 /* Errors */
-
-
-
-#define NI_PORT_HEADER_A 0x00680108 /*
- * Error Header first
- * half
- */
-
-
-
-#define NI_PORT_HEADER_B 0x00680110 /*
- * Error Header second
- * half
- */
-
-
-
-#define NI_PORT_SIDEBAND 0x00680118 /* Error Sideband */
-
-
-
-#define NI_PORT_ERROR_CLEAR 0x00680120 /*
- * Clear the Error
- * bits
- */
-
-
-
-#define NI_LOCAL_TABLE_0 0x00681000 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_1 0x00681008 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_2 0x00681010 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_3 0x00681018 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_4 0x00681020 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_5 0x00681028 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_6 0x00681030 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_7 0x00681038 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_8 0x00681040 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_9 0x00681048 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_10 0x00681050 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_11 0x00681058 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_12 0x00681060 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_13 0x00681068 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_14 0x00681070 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_15 0x00681078 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_16 0x00681080 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_17 0x00681088 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_18 0x00681090 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_19 0x00681098 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_20 0x006810A0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_21 0x006810A8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_22 0x006810B0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_23 0x006810B8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_24 0x006810C0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_25 0x006810C8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_26 0x006810D0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_27 0x006810D8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_28 0x006810E0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_29 0x006810E8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_30 0x006810F0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_31 0x006810F8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_32 0x00681100 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_33 0x00681108 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_34 0x00681110 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_35 0x00681118 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_36 0x00681120 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_37 0x00681128 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_38 0x00681130 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_39 0x00681138 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_40 0x00681140 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_41 0x00681148 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_42 0x00681150 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_43 0x00681158 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_44 0x00681160 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_45 0x00681168 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_46 0x00681170 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_47 0x00681178 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_48 0x00681180 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_49 0x00681188 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_50 0x00681190 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_51 0x00681198 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_52 0x006811A0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_53 0x006811A8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_54 0x006811B0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_55 0x006811B8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_56 0x006811C0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_57 0x006811C8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_58 0x006811D0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_59 0x006811D8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_60 0x006811E0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_61 0x006811E8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_62 0x006811F0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_63 0x006811F8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_64 0x00681200 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_65 0x00681208 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_66 0x00681210 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_67 0x00681218 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_68 0x00681220 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_69 0x00681228 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_70 0x00681230 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_71 0x00681238 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_72 0x00681240 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_73 0x00681248 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_74 0x00681250 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_75 0x00681258 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_76 0x00681260 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_77 0x00681268 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_78 0x00681270 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_79 0x00681278 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_80 0x00681280 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_81 0x00681288 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_82 0x00681290 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_83 0x00681298 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_84 0x006812A0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_85 0x006812A8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_86 0x006812B0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_87 0x006812B8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_88 0x006812C0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_89 0x006812C8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_90 0x006812D0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_91 0x006812D8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_92 0x006812E0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_93 0x006812E8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_94 0x006812F0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_95 0x006812F8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_96 0x00681300 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_97 0x00681308 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_98 0x00681310 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_99 0x00681318 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_100 0x00681320 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_101 0x00681328 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_102 0x00681330 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_103 0x00681338 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_104 0x00681340 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_105 0x00681348 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_106 0x00681350 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_107 0x00681358 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_108 0x00681360 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_109 0x00681368 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_110 0x00681370 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_111 0x00681378 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_112 0x00681380 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_113 0x00681388 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_114 0x00681390 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_115 0x00681398 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_116 0x006813A0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_117 0x006813A8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_118 0x006813B0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_119 0x006813B8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_120 0x006813C0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_121 0x006813C8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_122 0x006813D0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_123 0x006813D8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_124 0x006813E0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_125 0x006813E8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_126 0x006813F0 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_LOCAL_TABLE_127 0x006813F8 /*
- * Base of Local
- * Mapping Table 0-127
- */
-
-
-
-#define NI_GLOBAL_TABLE 0x00682000 /*
- * Base of Global
- * Mapping Table
- */
-
-
-
-
-
-#ifndef __ASSEMBLY__
-
-/************************************************************************
- * *
- * This register describes the LLP status. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_port_status_u {
- bdrkreg_t ni_port_status_regval;
- struct {
- bdrkreg_t ps_port_status : 2;
- bdrkreg_t ps_remote_power : 1;
- bdrkreg_t ps_rsvd : 61;
- } ni_port_status_fld_s;
-} ni_port_status_u_t;
-
-#else
-
-typedef union ni_port_status_u {
- bdrkreg_t ni_port_status_regval;
- struct {
- bdrkreg_t ps_rsvd : 61;
- bdrkreg_t ps_remote_power : 1;
- bdrkreg_t ps_port_status : 2;
- } ni_port_status_fld_s;
-} ni_port_status_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Writing this register issues a reset to the network interface. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_port_reset_u {
- bdrkreg_t ni_port_reset_regval;
- struct {
- bdrkreg_t pr_link_reset_out : 1;
- bdrkreg_t pr_port_reset : 1;
- bdrkreg_t pr_local_reset : 1;
- bdrkreg_t pr_rsvd : 61;
- } ni_port_reset_fld_s;
-} ni_port_reset_u_t;
-
-#else
-
-typedef union ni_port_reset_u {
- bdrkreg_t ni_port_reset_regval;
- struct {
- bdrkreg_t pr_rsvd : 61;
- bdrkreg_t pr_local_reset : 1;
- bdrkreg_t pr_port_reset : 1;
- bdrkreg_t pr_link_reset_out : 1;
- } ni_port_reset_fld_s;
-} ni_port_reset_u_t;
-
-#endif
-
-
-
-/************************************************************************
- * *
- * This register contains the warm reset enable bit. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_reset_enable_u {
- bdrkreg_t ni_reset_enable_regval;
- struct {
- bdrkreg_t re_reset_ok : 1;
- bdrkreg_t re_rsvd : 63;
- } ni_reset_enable_fld_s;
-} ni_reset_enable_u_t;
-
-#else
-
-typedef union ni_reset_enable_u {
- bdrkreg_t ni_reset_enable_regval;
- struct {
- bdrkreg_t re_rsvd : 63;
- bdrkreg_t re_reset_ok : 1;
- } ni_reset_enable_fld_s;
-} ni_reset_enable_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains parameters for diagnostics. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_diag_parms_u {
- bdrkreg_t ni_diag_parms_regval;
- struct {
- bdrkreg_t dp_send_data_error : 1;
- bdrkreg_t dp_port_disable : 1;
- bdrkreg_t dp_send_err_off : 1;
- bdrkreg_t dp_rsvd : 61;
- } ni_diag_parms_fld_s;
-} ni_diag_parms_u_t;
-
-#else
-
-typedef union ni_diag_parms_u {
- bdrkreg_t ni_diag_parms_regval;
- struct {
- bdrkreg_t dp_rsvd : 61;
- bdrkreg_t dp_send_err_off : 1;
- bdrkreg_t dp_port_disable : 1;
- bdrkreg_t dp_send_data_error : 1;
- } ni_diag_parms_fld_s;
-} ni_diag_parms_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the virtual channel selection control for *
- * outgoing messages from the Bedrock. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_channel_control_u {
- bdrkreg_t ni_channel_control_regval;
- struct {
- bdrkreg_t cc_vch_one_request : 1;
- bdrkreg_t cc_vch_two_request : 1;
- bdrkreg_t cc_vch_nine_request : 1;
- bdrkreg_t cc_vch_vector_request : 1;
- bdrkreg_t cc_vch_one_reply : 1;
- bdrkreg_t cc_vch_two_reply : 1;
- bdrkreg_t cc_vch_nine_reply : 1;
- bdrkreg_t cc_vch_vector_reply : 1;
- bdrkreg_t cc_send_vch_sel : 1;
- bdrkreg_t cc_rsvd : 55;
- } ni_channel_control_fld_s;
-} ni_channel_control_u_t;
-
-#else
-
-typedef union ni_channel_control_u {
- bdrkreg_t ni_channel_control_regval;
- struct {
- bdrkreg_t cc_rsvd : 55;
- bdrkreg_t cc_send_vch_sel : 1;
- bdrkreg_t cc_vch_vector_reply : 1;
- bdrkreg_t cc_vch_nine_reply : 1;
- bdrkreg_t cc_vch_two_reply : 1;
- bdrkreg_t cc_vch_one_reply : 1;
- bdrkreg_t cc_vch_vector_request : 1;
- bdrkreg_t cc_vch_nine_request : 1;
- bdrkreg_t cc_vch_two_request : 1;
- bdrkreg_t cc_vch_one_request : 1;
- } ni_channel_control_fld_s;
-} ni_channel_control_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register allows access to the LLP test logic. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_channel_test_u {
- bdrkreg_t ni_channel_test_regval;
- struct {
- bdrkreg_t ct_testseed : 20;
- bdrkreg_t ct_testmask : 8;
- bdrkreg_t ct_testdata : 20;
- bdrkreg_t ct_testvalid : 1;
- bdrkreg_t ct_testcberr : 1;
- bdrkreg_t ct_testflit : 3;
- bdrkreg_t ct_testclear : 1;
- bdrkreg_t ct_testerrcapture : 1;
- bdrkreg_t ct_rsvd : 9;
- } ni_channel_test_fld_s;
-} ni_channel_test_u_t;
-
-#else
-
-typedef union ni_channel_test_u {
- bdrkreg_t ni_channel_test_regval;
- struct {
- bdrkreg_t ct_rsvd : 9;
- bdrkreg_t ct_testerrcapture : 1;
- bdrkreg_t ct_testclear : 1;
- bdrkreg_t ct_testflit : 3;
- bdrkreg_t ct_testcberr : 1;
- bdrkreg_t ct_testvalid : 1;
- bdrkreg_t ct_testdata : 20;
- bdrkreg_t ct_testmask : 8;
- bdrkreg_t ct_testseed : 20;
- } ni_channel_test_fld_s;
-} ni_channel_test_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains LLP port parameters and enables for the *
- * capture of header data. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_port_parms_u {
- bdrkreg_t ni_port_parms_regval;
- struct {
- bdrkreg_t pp_max_burst : 10;
- bdrkreg_t pp_null_timeout : 6;
- bdrkreg_t pp_max_retry : 10;
- bdrkreg_t pp_d_avail_sel : 2;
- bdrkreg_t pp_rsvd_1 : 1;
- bdrkreg_t pp_first_err_enable : 1;
- bdrkreg_t pp_squash_err_enable : 1;
- bdrkreg_t pp_vch_err_enable : 4;
- bdrkreg_t pp_rsvd : 29;
- } ni_port_parms_fld_s;
-} ni_port_parms_u_t;
-
-#else
-
-typedef union ni_port_parms_u {
- bdrkreg_t ni_port_parms_regval;
- struct {
- bdrkreg_t pp_rsvd : 29;
- bdrkreg_t pp_vch_err_enable : 4;
- bdrkreg_t pp_squash_err_enable : 1;
- bdrkreg_t pp_first_err_enable : 1;
- bdrkreg_t pp_rsvd_1 : 1;
- bdrkreg_t pp_d_avail_sel : 2;
- bdrkreg_t pp_max_retry : 10;
- bdrkreg_t pp_null_timeout : 6;
- bdrkreg_t pp_max_burst : 10;
- } ni_port_parms_fld_s;
-} ni_port_parms_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains the age at which request and reply packets *
- * are injected into the network. This feature allows replies to be *
- * given a higher fixed priority than requests, which can be *
- * important in some network saturation situations. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_channel_age_u {
- bdrkreg_t ni_channel_age_regval;
- struct {
- bdrkreg_t ca_request_inject_age : 8;
- bdrkreg_t ca_reply_inject_age : 8;
- bdrkreg_t ca_rsvd : 48;
- } ni_channel_age_fld_s;
-} ni_channel_age_u_t;
-
-#else
-
-typedef union ni_channel_age_u {
- bdrkreg_t ni_channel_age_regval;
- struct {
- bdrkreg_t ca_rsvd : 48;
- bdrkreg_t ca_reply_inject_age : 8;
- bdrkreg_t ca_request_inject_age : 8;
- } ni_channel_age_fld_s;
-} ni_channel_age_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains latched LLP port and problematic message *
- * errors. The contents are the same information as the *
- * NI_PORT_ERROR_CLEAR register, but, in this register read accesses *
- * are non-destructive. Bits [52:24] assert the NI interrupt. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_port_errors_u {
- bdrkreg_t ni_port_errors_regval;
- struct {
- bdrkreg_t pe_sn_error_count : 8;
- bdrkreg_t pe_cb_error_count : 8;
- bdrkreg_t pe_retry_count : 8;
- bdrkreg_t pe_tail_timeout : 4;
- bdrkreg_t pe_fifo_overflow : 4;
- bdrkreg_t pe_external_short : 4;
- bdrkreg_t pe_external_long : 4;
- bdrkreg_t pe_external_bad_header : 4;
- bdrkreg_t pe_internal_short : 4;
- bdrkreg_t pe_internal_long : 4;
- bdrkreg_t pe_link_reset_in : 1;
- bdrkreg_t pe_rsvd : 11;
- } ni_port_errors_fld_s;
-} ni_port_errors_u_t;
-
-#else
-
-typedef union ni_port_errors_u {
- bdrkreg_t ni_port_errors_regval;
- struct {
- bdrkreg_t pe_rsvd : 11;
- bdrkreg_t pe_link_reset_in : 1;
- bdrkreg_t pe_internal_long : 4;
- bdrkreg_t pe_internal_short : 4;
- bdrkreg_t pe_external_bad_header : 4;
- bdrkreg_t pe_external_long : 4;
- bdrkreg_t pe_external_short : 4;
- bdrkreg_t pe_fifo_overflow : 4;
- bdrkreg_t pe_tail_timeout : 4;
- bdrkreg_t pe_retry_count : 8;
- bdrkreg_t pe_cb_error_count : 8;
- bdrkreg_t pe_sn_error_count : 8;
- } ni_port_errors_fld_s;
-} ni_port_errors_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register provides the sideband data associated with the *
- * NI_PORT_HEADER registers and also additional data for error *
- * processing. This register is not cleared on reset. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_port_sideband_u {
- bdrkreg_t ni_port_sideband_regval;
- struct {
- bdrkreg_t ps_sideband : 8;
- bdrkreg_t ps_bad_dest : 1;
- bdrkreg_t ps_bad_prexsel : 1;
- bdrkreg_t ps_rcv_error : 1;
- bdrkreg_t ps_bad_message : 1;
- bdrkreg_t ps_squash : 1;
- bdrkreg_t ps_sn_status : 1;
- bdrkreg_t ps_cb_status : 1;
- bdrkreg_t ps_send_error : 1;
- bdrkreg_t ps_vch_active : 4;
- bdrkreg_t ps_rsvd : 44;
- } ni_port_sideband_fld_s;
-} ni_port_sideband_u_t;
-
-#else
-
-typedef union ni_port_sideband_u {
- bdrkreg_t ni_port_sideband_regval;
- struct {
- bdrkreg_t ps_rsvd : 44;
- bdrkreg_t ps_vch_active : 4;
- bdrkreg_t ps_send_error : 1;
- bdrkreg_t ps_cb_status : 1;
- bdrkreg_t ps_sn_status : 1;
- bdrkreg_t ps_squash : 1;
- bdrkreg_t ps_bad_message : 1;
- bdrkreg_t ps_rcv_error : 1;
- bdrkreg_t ps_bad_prexsel : 1;
- bdrkreg_t ps_bad_dest : 1;
- bdrkreg_t ps_sideband : 8;
- } ni_port_sideband_fld_s;
-} ni_port_sideband_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register contains latched LLP port and problematic message *
- * errors. The contents are the same information as the *
- * NI_PORT_ERROR_CLEAR register, but, in this register read accesses *
- * are non-destructive. Bits [52:24] assert the NI interrupt. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_port_error_clear_u {
- bdrkreg_t ni_port_error_clear_regval;
- struct {
- bdrkreg_t pec_sn_error_count : 8;
- bdrkreg_t pec_cb_error_count : 8;
- bdrkreg_t pec_retry_count : 8;
- bdrkreg_t pec_tail_timeout : 4;
- bdrkreg_t pec_fifo_overflow : 4;
- bdrkreg_t pec_external_short : 4;
- bdrkreg_t pec_external_long : 4;
- bdrkreg_t pec_external_bad_header : 4;
- bdrkreg_t pec_internal_short : 4;
- bdrkreg_t pec_internal_long : 4;
- bdrkreg_t pec_link_reset_in : 1;
- bdrkreg_t pec_rsvd : 11;
- } ni_port_error_clear_fld_s;
-} ni_port_error_clear_u_t;
-
-#else
-
-typedef union ni_port_error_clear_u {
- bdrkreg_t ni_port_error_clear_regval;
- struct {
- bdrkreg_t pec_rsvd : 11;
- bdrkreg_t pec_link_reset_in : 1;
- bdrkreg_t pec_internal_long : 4;
- bdrkreg_t pec_internal_short : 4;
- bdrkreg_t pec_external_bad_header : 4;
- bdrkreg_t pec_external_long : 4;
- bdrkreg_t pec_external_short : 4;
- bdrkreg_t pec_fifo_overflow : 4;
- bdrkreg_t pec_tail_timeout : 4;
- bdrkreg_t pec_retry_count : 8;
- bdrkreg_t pec_cb_error_count : 8;
- bdrkreg_t pec_sn_error_count : 8;
- } ni_port_error_clear_fld_s;
-} ni_port_error_clear_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Lookup table for the next hop's exit port. The table entry *
- * selection is based on the 7-bit LocalCube routing destination. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_local_table_0_u {
- bdrkreg_t ni_local_table_0_regval;
- struct {
- bdrkreg_t lt0_next_exit_port : 4;
- bdrkreg_t lt0_next_vch_lsb : 1;
- bdrkreg_t lt0_rsvd : 59;
- } ni_local_table_0_fld_s;
-} ni_local_table_0_u_t;
-
-#else
-
-typedef union ni_local_table_0_u {
- bdrkreg_t ni_local_table_0_regval;
- struct {
- bdrkreg_t lt0_rsvd : 59;
- bdrkreg_t lt0_next_vch_lsb : 1;
- bdrkreg_t lt0_next_exit_port : 4;
- } ni_local_table_0_fld_s;
-} ni_local_table_0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Lookup table for the next hop's exit port. The table entry *
- * selection is based on the 7-bit LocalCube routing destination. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_local_table_127_u {
- bdrkreg_t ni_local_table_127_regval;
- struct {
- bdrkreg_t lt1_next_exit_port : 4;
- bdrkreg_t lt1_next_vch_lsb : 1;
- bdrkreg_t lt1_rsvd : 59;
- } ni_local_table_127_fld_s;
-} ni_local_table_127_u_t;
-
-#else
-
-typedef union ni_local_table_127_u {
- bdrkreg_t ni_local_table_127_regval;
- struct {
- bdrkreg_t lt1_rsvd : 59;
- bdrkreg_t lt1_next_vch_lsb : 1;
- bdrkreg_t lt1_next_exit_port : 4;
- } ni_local_table_127_fld_s;
-} ni_local_table_127_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Lookup table for the next hop's exit port. The table entry *
- * selection is based on the 1-bit MetaCube routing destination. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_global_table_u {
- bdrkreg_t ni_global_table_regval;
- struct {
- bdrkreg_t gt_next_exit_port : 4;
- bdrkreg_t gt_next_vch_lsb : 1;
- bdrkreg_t gt_rsvd : 59;
- } ni_global_table_fld_s;
-} ni_global_table_u_t;
-
-#else
-
-typedef union ni_global_table_u {
- bdrkreg_t ni_global_table_regval;
- struct {
- bdrkreg_t gt_rsvd : 59;
- bdrkreg_t gt_next_vch_lsb : 1;
- bdrkreg_t gt_next_exit_port : 4;
- } ni_global_table_fld_s;
-} ni_global_table_u_t;
-
-#endif
-
-
-
-
-
-
-#endif /* __ASSEMBLY__ */
-
-/************************************************************************
- * *
- * The following defines which were not formed into structures are *
- * probably indentical to another register, and the name of the *
- * register is provided against each of these registers. This *
- * information needs to be checked carefully *
- * *
- * NI_LOCAL_TABLE_1 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_2 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_3 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_4 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_5 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_6 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_7 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_8 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_9 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_10 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_11 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_12 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_13 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_14 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_15 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_16 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_17 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_18 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_19 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_20 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_21 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_22 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_23 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_24 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_25 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_26 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_27 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_28 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_29 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_30 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_31 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_32 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_33 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_34 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_35 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_36 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_37 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_38 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_39 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_40 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_41 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_42 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_43 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_44 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_45 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_46 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_47 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_48 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_49 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_50 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_51 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_52 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_53 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_54 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_55 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_56 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_57 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_58 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_59 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_60 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_61 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_62 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_63 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_64 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_65 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_66 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_67 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_68 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_69 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_70 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_71 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_72 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_73 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_74 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_75 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_76 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_77 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_78 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_79 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_80 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_81 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_82 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_83 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_84 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_85 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_86 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_87 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_88 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_89 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_90 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_91 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_92 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_93 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_94 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_95 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_96 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_97 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_98 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_99 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_100 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_101 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_102 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_103 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_104 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_105 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_106 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_107 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_108 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_109 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_110 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_111 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_112 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_113 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_114 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_115 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_116 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_117 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_118 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_119 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_120 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_121 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_122 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_123 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_124 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_125 NI_LOCAL_TABLE_0 *
- * NI_LOCAL_TABLE_126 NI_LOCAL_TABLE_0 *
- * *
- ************************************************************************/
-
-
-/************************************************************************
- * *
- * The following defines were not formed into structures *
- * *
- * This could be because the document did not contain details of the *
- * register, or because the automated script did not recognize the *
- * register details in the documentation. If these register need *
- * structure definition, please create them manually *
- * *
- * NI_PORT_HEADER_A 0x680108 *
- * NI_PORT_HEADER_B 0x680110 *
- * *
- ************************************************************************/
-
-
-/************************************************************************
- * *
- * MAKE ALL ADDITIONS AFTER THIS LINE *
- * *
- ************************************************************************/
-
-
-
-
-
-#endif /* _ASM_IA64_SN_SN1_HUBNI_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_HUBNI_NEXT_H
-#define _ASM_IA64_SN_SN1_HUBNI_NEXT_H
-
-#define NI_LOCAL_ENTRIES 128
-#define NI_META_ENTRIES 1
-
-#define NI_LOCAL_TABLE(_x) (NI_LOCAL_TABLE_0 + (8 * (_x)))
-#define NI_META_TABLE(_x) (NI_GLOBAL_TABLE + (8 * (_x)))
-
-/**************************************************************
-
- Masks and shifts for NI registers are defined below.
-
-**************************************************************/
-
-#define NPS_LINKUP_SHFT 1
-#define NPS_LINKUP_MASK (UINT64_CAST 0x1 << 1)
-
-
-#define NPR_LOCALRESET (UINT64_CAST 1 << 2) /* Reset loc. bdrck */
-#define NPR_PORTRESET (UINT64_CAST 1 << 1) /* Send warm reset */
-#define NPR_LINKRESET (UINT64_CAST 1 << 0) /* Send link reset */
-
-/* NI_DIAG_PARMS bit definitions */
-#define NDP_SENDERROR (UINT64_CAST 1 << 0) /* Send data error */
-#define NDP_PORTDISABLE (UINT64_CAST 1 << 1) /* Port disable */
-#define NDP_SENDERROFF (UINT64_CAST 1 << 2) /* Disable send error recovery */
-
-
-/* NI_PORT_ERROR mask and shift definitions (some are not present in SN0) */
-
-#define NPE_LINKRESET (UINT64_CAST 1 << 52)
-#define NPE_INTLONG_SHFT 48
-#define NPE_INTLONG_MASK (UINT64_CAST 0xf << NPE_INTLONG_SHFT)
-#define NPE_INTSHORT_SHFT 44
-#define NPE_INTSHORT_MASK (UINT64_CAST 0xf << NPE_INTSHORT_SHFT)
-#define NPE_EXTBADHEADER_SHFT 40
-#define NPE_EXTBADHEADER_MASK (UINT64_CAST 0xf << NPE_EXTBADHEADER_SHFT)
-#define NPE_EXTLONG_SHFT 36
-#define NPE_EXTLONG_MASK (UINT64_CAST 0xf << NPE_EXTLONG_SHFT)
-#define NPE_EXTSHORT_SHFT 32
-#define NPE_EXTSHORT_MASK (UINT64_CAST 0xf << NPE_EXTSHORT_SHFT)
-#define NPE_FIFOOVFLOW_SHFT 28
-#define NPE_FIFOOVFLOW_MASK (UINT64_CAST 0xf << NPE_FIFOOVFLOW_SHFT)
-#define NPE_TAILTO_SHFT 24
-#define NPE_TAILTO_MASK (UINT64_CAST 0xf << NPE_TAILTO_SHFT)
-#define NPE_RETRYCOUNT_SHFT 16
-#define NPE_RETRYCOUNT_MASK (UINT64_CAST 0xff << NPE_RETRYCOUNT_SHFT)
-#define NPE_CBERRCOUNT_SHFT 8
-#define NPE_CBERRCOUNT_MASK (UINT64_CAST 0xff << NPE_CBERRCOUNT_SHFT)
-#define NPE_SNERRCOUNT_SHFT 0
-#define NPE_SNERRCOUNT_MASK (UINT64_CAST 0xff << NPE_SNERRCOUNT_SHFT)
-
-#define NPE_COUNT_MAX 0xff
-
-#define NPE_FATAL_ERRORS (NPE_LINKRESET | NPE_INTLONG_MASK |\
- NPE_INTSHORT_MASK | NPE_EXTBADHEADER_MASK |\
- NPE_EXTLONG_MASK | NPE_EXTSHORT_MASK |\
- NPE_FIFOOVFLOW_MASK | NPE_TAILTO_MASK)
-
-#ifndef __ASSEMBLY__
-/* NI_PORT_HEADER[AB] registers (not automatically generated) */
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_port_header_a_u {
- bdrkreg_t ni_port_header_a_regval;
- struct {
- bdrkreg_t pha_v : 1;
- bdrkreg_t pha_age : 8;
- bdrkreg_t pha_direction : 4;
- bdrkreg_t pha_destination : 8;
- bdrkreg_t pha_reserved_1 : 3;
- bdrkreg_t pha_command : 8;
- bdrkreg_t pha_prexsel : 3;
- bdrkreg_t pha_address_b : 27;
- bdrkreg_t pha_reserved : 2;
- } ni_port_header_a_fld_s;
-} ni_port_header_a_u_t;
-
-#else
-
-typedef union ni_port_header_a_u {
- bdrkreg_t ni_port_header_a_regval;
- struct {
- bdrkreg_t pha_reserved : 2;
- bdrkreg_t pha_address_b : 27;
- bdrkreg_t pha_prexsel : 3;
- bdrkreg_t pha_command : 8;
- bdrkreg_t pha_reserved_1 : 3;
- bdrkreg_t pha_destination : 8;
- bdrkreg_t pha_direction : 4;
- bdrkreg_t pha_age : 8;
- bdrkreg_t pha_v : 1;
- } ni_port_header_a_fld_s;
-} ni_port_header_a_u_t;
-
-#endif
-
-#ifdef LITTLE_ENDIAN
-
-typedef union ni_port_header_b_u {
- bdrkreg_t ni_port_header_b_regval;
- struct {
- bdrkreg_t phb_supplemental : 11;
- bdrkreg_t phb_reserved_2 : 5;
- bdrkreg_t phb_source : 11;
- bdrkreg_t phb_reserved_1 : 8;
- bdrkreg_t phb_address_a : 3;
- bdrkreg_t phb_address_c : 8;
- bdrkreg_t phb_reserved : 18;
- } ni_port_header_b_fld_s;
-} ni_port_header_b_u_t;
-
-#else
-
-typedef union ni_port_header_b_u {
- bdrkreg_t ni_port_header_b_regval;
- struct {
- bdrkreg_t phb_reserved : 18;
- bdrkreg_t phb_address_c : 8;
- bdrkreg_t phb_address_a : 3;
- bdrkreg_t phb_reserved_1 : 8;
- bdrkreg_t phb_source : 11;
- bdrkreg_t phb_reserved_2 : 5;
- bdrkreg_t phb_supplemental : 11;
- } ni_port_header_b_fld_s;
-} ni_port_header_b_u_t;
-
-#endif
-#endif
-
-/* NI_RESET_ENABLE mask definitions */
-
-#define NRE_RESETOK (UINT64_CAST 1) /* Let LLP reset bedrock */
-
-/* NI PORT_ERRORS, Max number of RETRY_COUNT, Check Bit, and Sequence */
-/* Number errors (8 bit counters that do not wrap). */
-#define NI_LLP_RETRY_MAX 0xff
-#define NI_LLP_CB_MAX 0xff
-#define NI_LLP_SN_MAX 0xff
-
-/* NI_PORT_PARMS shift and mask definitions */
-
-#define NPP_VCH_ERR_EN_SHFT 31
-#define NPP_VCH_ERR_EN_MASK (0xf << NPP_VCH_ERR_EN_SHFT)
-#define NPP_SQUASH_ERR_EN_SHFT 30
-#define NPP_SQUASH_ERR_EN_MASK (0x1 << NPP_SQUASH_ERR_EN_SHFT)
-#define NPP_FIRST_ERR_EN_SHFT 29
-#define NPP_FIRST_ERR_EN_MASK (0x1 << NPP_FIRST_ERR_EN_SHFT)
-#define NPP_D_AVAIL_SEL_SHFT 26
-#define NPP_D_AVAIL_SEL_MASK (0x3 << NPP_D_AVAIL_SEL_SHFT)
-#define NPP_MAX_RETRY_SHFT 16
-#define NPP_MAX_RETRY_MASK (0x3ff << NPP_MAX_RETRY_SHFT)
-#define NPP_NULL_TIMEOUT_SHFT 10
-#define NPP_NULL_TIMEOUT_MASK (0x3f << NPP_NULL_TIMEOUT_SHFT)
-#define NPP_MAX_BURST_SHFT 0
-#define NPP_MAX_BURST_MASK (0x3ff << NPP_MAX_BURST_SHFT)
-
-#define NPP_RESET_DEFAULTS (0xf << NPP_VCH_ERR_EN_SHFT | \
- 0x1 << NPP_FIRST_ERR_EN_SHFT | \
- 0x3ff << NPP_MAX_RETRY_SHFT | \
- 0x6 << NPP_NULL_TIMEOUT_SHFT | \
- 0x3f0 << NPP_MAX_BURST_SHFT)
-
-#endif /* _ASM_IA64_SN_SN1_HUBNI_NEXT_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_HUBPI_H
-#define _ASM_IA64_SN_SN1_HUBPI_H
-
-/************************************************************************
- * *
- * WARNING!!! WARNING!!! WARNING!!! WARNING!!! WARNING!!! *
- * *
- * This file is created by an automated script. Any (minimal) changes *
- * made manually to this file should be made with care. *
- * *
- * MAKE ALL ADDITIONS TO THE END OF THIS FILE *
- * *
- ************************************************************************/
-
-
-#define PI_CPU_PROTECT 0x00000000 /* CPU Protection */
-
-
-
-#define PI_PROT_OVRRD 0x00000008 /*
- * Clear CPU
- * Protection bit in
- * CPU_PROTECT
- */
-
-
-
-#define PI_IO_PROTECT 0x00000010 /*
- * Interrupt Pending
- * Protection for IO
- * access
- */
-
-
-
-#define PI_REGION_PRESENT 0x00000018 /* Region present */
-
-
-
-#define PI_CPU_NUM 0x00000020 /* CPU Number ID */
-
-
-
-#define PI_CALIAS_SIZE 0x00000028 /* Cached Alias Size */
-
-
-
-#define PI_MAX_CRB_TIMEOUT 0x00000030 /*
- * Maximum Timeout for
- * CRB
- */
-
-
-
-#define PI_CRB_SFACTOR 0x00000038 /*
- * Scale Factor for
- * CRB Timeout
- */
-
-
-
-#define PI_CPU_PRESENT_A 0x00000040 /*
- * CPU Present for
- * CPU_A
- */
-
-
-
-#define PI_CPU_PRESENT_B 0x00000048 /*
- * CPU Present for
- * CPU_B
- */
-
-
-
-#define PI_CPU_ENABLE_A 0x00000050 /*
- * CPU Enable for
- * CPU_A
- */
-
-
-
-#define PI_CPU_ENABLE_B 0x00000058 /*
- * CPU Enable for
- * CPU_B
- */
-
-
-
-#define PI_REPLY_LEVEL 0x00010060 /*
- * Reply FIFO Priority
- * Control
- */
-
-
-
-#define PI_GFX_CREDIT_MODE 0x00020068 /*
- * Graphics Credit
- * Mode
- */
-
-
-
-#define PI_NMI_A 0x00000070 /*
- * Non-maskable
- * Interrupt to CPU A
- */
-
-
-
-#define PI_NMI_B 0x00000078 /*
- * Non-maskable
- * Interrupt to CPU B
- */
-
-
-
-#define PI_INT_PEND_MOD 0x00000090 /*
- * Interrupt Pending
- * Modify
- */
-
-
-
-#define PI_INT_PEND0 0x00000098 /* Interrupt Pending 0 */
-
-
-
-#define PI_INT_PEND1 0x000000A0 /* Interrupt Pending 1 */
-
-
-
-#define PI_INT_MASK0_A 0x000000A8 /*
- * Interrupt Mask 0
- * for CPU A
- */
-
-
-
-#define PI_INT_MASK1_A 0x000000B0 /*
- * Interrupt Mask 1
- * for CPU A
- */
-
-
-
-#define PI_INT_MASK0_B 0x000000B8 /*
- * Interrupt Mask 0
- * for CPU B
- */
-
-
-
-#define PI_INT_MASK1_B 0x000000C0 /*
- * Interrupt Mask 1
- * for CPU B
- */
-
-
-
-#define PI_CC_PEND_SET_A 0x000000C8 /*
- * CC Interrupt
- * Pending for CPU A
- */
-
-
-
-#define PI_CC_PEND_SET_B 0x000000D0 /*
- * CC Interrupt
- * Pending for CPU B
- */
-
-
-
-#define PI_CC_PEND_CLR_A 0x000000D8 /*
- * CPU to CPU
- * Interrupt Pending
- * Clear for CPU A
- */
-
-
-
-#define PI_CC_PEND_CLR_B 0x000000E0 /*
- * CPU to CPU
- * Interrupt Pending
- * Clear for CPU B
- */
-
-
-
-#define PI_CC_MASK 0x000000E8 /*
- * Mask of both
- * CC_PENDs
- */
-
-
-
-#define PI_INT_PEND1_REMAP 0x000000F0 /*
- * Remap Interrupt
- * Pending
- */
-
-
-
-#define PI_RT_COUNTER 0x00030100 /* Real Time Counter */
-
-
-
-#define PI_RT_COMPARE_A 0x00000108 /* Real Time Compare A */
-
-
-
-#define PI_RT_COMPARE_B 0x00000110 /* Real Time Compare B */
-
-
-
-#define PI_PROFILE_COMPARE 0x00000118 /* Profiling Compare */
-
-
-
-#define PI_RT_INT_PEND_A 0x00000120 /*
- * RT interrupt
- * pending
- */
-
-
-
-#define PI_RT_INT_PEND_B 0x00000128 /*
- * RT interrupt
- * pending
- */
-
-
-
-#define PI_PROF_INT_PEND_A 0x00000130 /*
- * Profiling interrupt
- * pending
- */
-
-
-
-#define PI_PROF_INT_PEND_B 0x00000138 /*
- * Profiling interrupt
- * pending
- */
-
-
-
-#define PI_RT_INT_EN_A 0x00000140 /* RT Interrupt Enable */
-
-
-
-#define PI_RT_INT_EN_B 0x00000148 /* RT Interrupt Enable */
-
-
-
-#define PI_PROF_INT_EN_A 0x00000150 /*
- * Profiling Interrupt
- * Enable
- */
-
-
-
-#define PI_PROF_INT_EN_B 0x00000158 /*
- * Profiling Interrupt
- * Enable
- */
-
-
-
-#define PI_DEBUG_SEL 0x00000160 /* PI Debug Select */
-
-
-
-#define PI_INT_PEND_MOD_ALIAS 0x00000180 /*
- * Interrupt Pending
- * Modify
- */
-
-
-
-#define PI_PERF_CNTL_A 0x00040200 /*
- * Performance Counter
- * Control A
- */
-
-
-
-#define PI_PERF_CNTR0_A 0x00040208 /*
- * Performance Counter
- * 0 A
- */
-
-
-
-#define PI_PERF_CNTR1_A 0x00040210 /*
- * Performance Counter
- * 1 A
- */
-
-
-
-#define PI_PERF_CNTL_B 0x00050200 /*
- * Performance Counter
- * Control B
- */
-
-
-
-#define PI_PERF_CNTR0_B 0x00050208 /*
- * Performance Counter
- * 0 B
- */
-
-
-
-#define PI_PERF_CNTR1_B 0x00050210 /*
- * Performance Counter
- * 1 B
- */
-
-
-
-#define PI_GFX_PAGE_A 0x00000300 /* Graphics Page */
-
-
-
-#define PI_GFX_CREDIT_CNTR_A 0x00000308 /*
- * Graphics Credit
- * Counter
- */
-
-
-
-#define PI_GFX_BIAS_A 0x00000310 /* TRex+ BIAS */
-
-
-
-#define PI_GFX_INT_CNTR_A 0x00000318 /*
- * Graphics Interrupt
- * Counter
- */
-
-
-
-#define PI_GFX_INT_CMP_A 0x00000320 /*
- * Graphics Interrupt
- * Compare
- */
-
-
-
-#define PI_GFX_PAGE_B 0x00000328 /* Graphics Page */
-
-
-
-#define PI_GFX_CREDIT_CNTR_B 0x00000330 /*
- * Graphics Credit
- * Counter
- */
-
-
-
-#define PI_GFX_BIAS_B 0x00000338 /* TRex+ BIAS */
-
-
-
-#define PI_GFX_INT_CNTR_B 0x00000340 /*
- * Graphics Interrupt
- * Counter
- */
-
-
-
-#define PI_GFX_INT_CMP_B 0x00000348 /*
- * Graphics Interrupt
- * Compare
- */
-
-
-
-#define PI_ERR_INT_PEND_WR 0x000003F8 /*
- * Error Interrupt
- * Pending (Writable)
- */
-
-
-
-#define PI_ERR_INT_PEND 0x00000400 /*
- * Error Interrupt
- * Pending
- */
-
-
-
-#define PI_ERR_INT_MASK_A 0x00000408 /*
- * Error Interrupt
- * Mask CPU_A
- */
-
-
-
-#define PI_ERR_INT_MASK_B 0x00000410 /*
- * Error Interrupt
- * Mask CPU_B
- */
-
-
-
-#define PI_ERR_STACK_ADDR_A 0x00000418 /*
- * Error Stack Address
- * Pointer
- */
-
-
-
-#define PI_ERR_STACK_ADDR_B 0x00000420 /*
- * Error Stack Address
- * Pointer
- */
-
-
-
-#define PI_ERR_STACK_SIZE 0x00000428 /* Error Stack Size */
-
-
-
-#define PI_ERR_STATUS0_A 0x00000430 /* Error Status 0 */
-
-
-
-#define PI_ERR_STATUS0_A_CLR 0x00000438 /* Error Status 0 */
-
-
-
-#define PI_ERR_STATUS1_A 0x00000440 /* Error Status 1 */
-
-
-
-#define PI_ERR_STATUS1_A_CLR 0x00000448 /* Error Status 1 */
-
-
-
-#define PI_ERR_STATUS0_B 0x00000450 /* Error Status 0 */
-
-
-
-#define PI_ERR_STATUS0_B_CLR 0x00000458 /* Error Status 0 */
-
-
-
-#define PI_ERR_STATUS1_B 0x00000460 /* Error Status 1 */
-
-
-
-#define PI_ERR_STATUS1_B_CLR 0x00000468 /* Error Status 1 */
-
-
-
-#define PI_SPOOL_CMP_A 0x00000470 /* Spool Compare */
-
-
-
-#define PI_SPOOL_CMP_B 0x00000478 /* Spool Compare */
-
-
-
-#define PI_CRB_TIMEOUT_A 0x00000480 /*
- * CRB entries which
- * have timed out but
- * are still valid
- */
-
-
-
-#define PI_CRB_TIMEOUT_B 0x00000488 /*
- * CRB entries which
- * have timed out but
- * are still valid
- */
-
-
-
-#define PI_SYSAD_ERRCHK_EN 0x00000490 /*
- * enables
- * sysad/cmd/state
- * error checking
- */
-
-
-
-#define PI_FORCE_BAD_CHECK_BIT_A 0x00000498 /*
- * force SysAD Check
- * Bit error
- */
-
-
-
-#define PI_FORCE_BAD_CHECK_BIT_B 0x000004A0 /*
- * force SysAD Check
- * Bit error
- */
-
-
-
-#define PI_NACK_CNT_A 0x000004A8 /*
- * consecutive NACK
- * counter
- */
-
-
-
-#define PI_NACK_CNT_B 0x000004B0 /*
- * consecutive NACK
- * counter
- */
-
-
-
-#define PI_NACK_CMP 0x000004B8 /* NACK count compare */
-
-
-
-#define PI_SPOOL_MASK 0x000004C0 /* Spool error mask */
-
-
-
-#define PI_SPURIOUS_HDR_0 0x000004C8 /* Spurious Error 0 */
-
-
-
-#define PI_SPURIOUS_HDR_1 0x000004D0 /* Spurious Error 1 */
-
-
-
-#define PI_ERR_INJECT 0x000004D8 /*
- * SysAD bus error
- * injection
- */
-
-
-
-
-
-#ifndef __ASSEMBLY__
-
-/************************************************************************
- * *
- * Description: This read/write register determines on a *
- * bit-per-region basis whether incoming CPU-initiated PIO Read and *
- * Write to local PI registers are allowed. If access is allowed, the *
- * PI's response to a partial read is a PRPLY message, and the *
- * response to a partial write is a PACK message. If access is not *
- * allowed, the PI's response to a partial read is a PRERR message, *
- * and the response to a partial write is a PWERR message. *
- * This register is not reset by a soft reset. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_cpu_protect_u {
- bdrkreg_t pi_cpu_protect_regval;
- struct {
- bdrkreg_t cp_cpu_protect : 64;
- } pi_cpu_protect_fld_s;
-} pi_cpu_protect_u_t;
-
-
-
-
-/************************************************************************
- * *
- * A write with a special data pattern allows any CPU to set its *
- * region's bit in CPU_PROTECT. This register has data pattern *
- * protection. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_prot_ovrrd_u {
- bdrkreg_t pi_prot_ovrrd_regval;
- struct {
- bdrkreg_t po_prot_ovrrd : 64;
- } pi_prot_ovrrd_fld_s;
-} pi_prot_ovrrd_u_t;
-
-
-
-
-/************************************************************************
- * *
- * Description: This read/write register determines on a *
- * bit-per-region basis whether incoming IO-initiated interrupts are *
- * allowed to set bits in INT_PEND0 and INT_PEND1. If access is *
- * allowed, the PI's response to a partial read is a PRPLY message, *
- * and the response to a partial write is a PACK message. If access *
- * is not allowed, the PI's response to a partial read is a PRERR *
- * message, and the response to a partial write is a PWERR message. *
- * This register is not reset by a soft reset. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_io_protect_u {
- bdrkreg_t pi_io_protect_regval;
- struct {
- bdrkreg_t ip_io_protect : 64;
- } pi_io_protect_fld_s;
-} pi_io_protect_u_t;
-
-
-
-
-/************************************************************************
- * *
- * Description: This read/write register determines on a *
- * bit-per-region basis whether read access from a local processor to *
- * the region is permissible. For example, setting a bit to 0 *
- * prevents speculative reads to that non-existent node. If a read *
- * request to a non-present region occurs, an ERR response is issued *
- * to the TRex+ (no PI error registers are modified). It is up to *
- * software to load this register with the proper contents. *
- * Region-present checking is only done for coherent read requests - *
- * partial reads/writes will be issued to a non-present region. The *
- * setting of these bits does not affect a node's access to its *
- * CALIAS space. *
- * This register is not reset by a soft reset. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_region_present_u {
- bdrkreg_t pi_region_present_regval;
- struct {
- bdrkreg_t rp_region_present : 64;
- } pi_region_present_fld_s;
-} pi_region_present_u_t;
-
-
-
-
-/************************************************************************
- * *
- * A read to the location will allow a CPU to identify itself as *
- * either CPU_A or CPU_B, and will indicate whether the CPU is *
- * connected to PI 0 or PI 1. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_cpu_num_u {
- bdrkreg_t pi_cpu_num_regval;
- struct {
- bdrkreg_t cn_cpu_num : 1;
- bdrkreg_t cn_pi_id : 1;
- bdrkreg_t cn_rsvd : 62;
- } pi_cpu_num_fld_s;
-} pi_cpu_num_u_t;
-
-#else
-
-typedef union pi_cpu_num_u {
- bdrkreg_t pi_cpu_num_regval;
- struct {
- bdrkreg_t cn_rsvd : 62;
- bdrkreg_t cn_pi_id : 1;
- bdrkreg_t cn_cpu_num : 1;
- } pi_cpu_num_fld_s;
-} pi_cpu_num_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This read/write location determines the size of the *
- * Calias Space. *
- * This register is not reset by a soft reset. *
- * NOTE: For predictable behavior, all Calias spaces in a system must *
- * be set to the same size. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_calias_size_u {
- bdrkreg_t pi_calias_size_regval;
- struct {
- bdrkreg_t cs_calias_size : 4;
- bdrkreg_t cs_rsvd : 60;
- } pi_calias_size_fld_s;
-} pi_calias_size_u_t;
-
-#else
-
-typedef union pi_calias_size_u {
- bdrkreg_t pi_calias_size_regval;
- struct {
- bdrkreg_t cs_rsvd : 60;
- bdrkreg_t cs_calias_size : 4;
- } pi_calias_size_fld_s;
-} pi_calias_size_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This Read/Write location determines at which value (increment) *
- * the CRB Timeout Counters cause a timeout error to occur. See *
- * Section 3.4.2.2, "Time-outs in RRB and WRB" in the *
- * Processor Interface chapter, volume 1 of this document for more *
- * details. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_max_crb_timeout_u {
- bdrkreg_t pi_max_crb_timeout_regval;
- struct {
- bdrkreg_t mct_max_timeout : 8;
- bdrkreg_t mct_rsvd : 56;
- } pi_max_crb_timeout_fld_s;
-} pi_max_crb_timeout_u_t;
-
-#else
-
-typedef union pi_max_crb_timeout_u {
- bdrkreg_t pi_max_crb_timeout_regval;
- struct {
- bdrkreg_t mct_rsvd : 56;
- bdrkreg_t mct_max_timeout : 8;
- } pi_max_crb_timeout_fld_s;
-} pi_max_crb_timeout_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This Read/Write location determines how often a valid CRB's *
- * Timeout Counter is incremented. See Section 3.4.2.2, *
- * "Time-outs in RRB and WRB" in the Processor Interface *
- * chapter, volume 1 of this document for more details. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_crb_sfactor_u {
- bdrkreg_t pi_crb_sfactor_regval;
- struct {
- bdrkreg_t cs_sfactor : 24;
- bdrkreg_t cs_rsvd : 40;
- } pi_crb_sfactor_fld_s;
-} pi_crb_sfactor_u_t;
-
-#else
-
-typedef union pi_crb_sfactor_u {
- bdrkreg_t pi_crb_sfactor_regval;
- struct {
- bdrkreg_t cs_rsvd : 40;
- bdrkreg_t cs_sfactor : 24;
- } pi_crb_sfactor_fld_s;
-} pi_crb_sfactor_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. The PI sets this *
- * bit when it sees the first transaction initiated by the associated *
- * CPU. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_cpu_present_a_u {
- bdrkreg_t pi_cpu_present_a_regval;
- struct {
- bdrkreg_t cpa_cpu_present : 1;
- bdrkreg_t cpa_rsvd : 63;
- } pi_cpu_present_a_fld_s;
-} pi_cpu_present_a_u_t;
-
-#else
-
-typedef union pi_cpu_present_a_u {
- bdrkreg_t pi_cpu_present_a_regval;
- struct {
- bdrkreg_t cpa_rsvd : 63;
- bdrkreg_t cpa_cpu_present : 1;
- } pi_cpu_present_a_fld_s;
-} pi_cpu_present_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. The PI sets this *
- * bit when it sees the first transaction initiated by the associated *
- * CPU. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_cpu_present_b_u {
- bdrkreg_t pi_cpu_present_b_regval;
- struct {
- bdrkreg_t cpb_cpu_present : 1;
- bdrkreg_t cpb_rsvd : 63;
- } pi_cpu_present_b_fld_s;
-} pi_cpu_present_b_u_t;
-
-#else
-
-typedef union pi_cpu_present_b_u {
- bdrkreg_t pi_cpu_present_b_regval;
- struct {
- bdrkreg_t cpb_rsvd : 63;
- bdrkreg_t cpb_cpu_present : 1;
- } pi_cpu_present_b_fld_s;
-} pi_cpu_present_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There is one of these registers for each CPU. This *
- * Read/Write location determines whether the associated CPU is *
- * enabled to issue external requests. When this bit is zero for a *
- * processor, the PI ignores SysReq_L from that processor, and so *
- * never grants it the bus. *
- * This register is not reset by a soft reset. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_cpu_enable_a_u {
- bdrkreg_t pi_cpu_enable_a_regval;
- struct {
- bdrkreg_t cea_cpu_enable : 1;
- bdrkreg_t cea_rsvd : 63;
- } pi_cpu_enable_a_fld_s;
-} pi_cpu_enable_a_u_t;
-
-#else
-
-typedef union pi_cpu_enable_a_u {
- bdrkreg_t pi_cpu_enable_a_regval;
- struct {
- bdrkreg_t cea_rsvd : 63;
- bdrkreg_t cea_cpu_enable : 1;
- } pi_cpu_enable_a_fld_s;
-} pi_cpu_enable_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There is one of these registers for each CPU. This *
- * Read/Write location determines whether the associated CPU is *
- * enabled to issue external requests. When this bit is zero for a *
- * processor, the PI ignores SysReq_L from that processor, and so *
- * never grants it the bus. *
- * This register is not reset by a soft reset. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_cpu_enable_b_u {
- bdrkreg_t pi_cpu_enable_b_regval;
- struct {
- bdrkreg_t ceb_cpu_enable : 1;
- bdrkreg_t ceb_rsvd : 63;
- } pi_cpu_enable_b_fld_s;
-} pi_cpu_enable_b_u_t;
-
-#else
-
-typedef union pi_cpu_enable_b_u {
- bdrkreg_t pi_cpu_enable_b_regval;
- struct {
- bdrkreg_t ceb_rsvd : 63;
- bdrkreg_t ceb_cpu_enable : 1;
- } pi_cpu_enable_b_fld_s;
-} pi_cpu_enable_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. A write to this *
- * location will cause an NMI to be issued to the CPU. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_nmi_a_u {
- bdrkreg_t pi_nmi_a_regval;
- struct {
- bdrkreg_t na_nmi_cpu : 64;
- } pi_nmi_a_fld_s;
-} pi_nmi_a_u_t;
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. A write to this *
- * location will cause an NMI to be issued to the CPU. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_nmi_b_u {
- bdrkreg_t pi_nmi_b_regval;
- struct {
- bdrkreg_t nb_nmi_cpu : 64;
- } pi_nmi_b_fld_s;
-} pi_nmi_b_u_t;
-
-
-
-
-/************************************************************************
- * *
- * A write to this register allows a single bit in the INT_PEND0 or *
- * INT_PEND1 registers to be set or cleared. If 6 is clear, a bit is *
- * modified in INT_PEND0, while if 6 is set, a bit is modified in *
- * INT_PEND1. The value in 5:0 (ranging from 63 to 0) will determine *
- * which bit in the register is effected. The value of 8 will *
- * determine whether the desired bit is set (8=1) or cleared (8=0). *
- * This is the only register which is accessible by IO issued PWRI *
- * command and is protected through the IO_PROTECT register. If the *
- * region bit in the IO_PROTECT is not set then a WERR reply is *
- * issued. CPU access is controlled through CPU_PROTECT. The contents *
- * of this register are masked with the contents of INT_MASK_A *
- * (INT_MASK_B) to determine whether an L2 interrupt is issued to *
- * CPU_A (CPU_B). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_int_pend_mod_u {
- bdrkreg_t pi_int_pend_mod_regval;
- struct {
- bdrkreg_t ipm_bit_select : 6;
- bdrkreg_t ipm_reg_select : 1;
- bdrkreg_t ipm_rsvd_1 : 1;
- bdrkreg_t ipm_value : 1;
- bdrkreg_t ipm_rsvd : 55;
- } pi_int_pend_mod_fld_s;
-} pi_int_pend_mod_u_t;
-
-#else
-
-typedef union pi_int_pend_mod_u {
- bdrkreg_t pi_int_pend_mod_regval;
- struct {
- bdrkreg_t ipm_rsvd : 55;
- bdrkreg_t ipm_value : 1;
- bdrkreg_t ipm_rsvd_1 : 1;
- bdrkreg_t ipm_reg_select : 1;
- bdrkreg_t ipm_bit_select : 6;
- } pi_int_pend_mod_fld_s;
-} pi_int_pend_mod_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This read-only register provides information about interrupts *
- * that are currently pending. The interrupts in this register map to *
- * interrupt level 2 (L2). The GFX_INT_A/B bits are set by hardware *
- * but must be cleared by software. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_int_pend0_u {
- bdrkreg_t pi_int_pend0_regval;
- struct {
- bdrkreg_t ip_int_pend0_lo : 1;
- bdrkreg_t ip_gfx_int_a : 1;
- bdrkreg_t ip_gfx_int_b : 1;
- bdrkreg_t ip_page_migration : 1;
- bdrkreg_t ip_uart_ucntrl : 1;
- bdrkreg_t ip_or_cc_pend_a : 1;
- bdrkreg_t ip_or_cc_pend_b : 1;
- bdrkreg_t ip_int_pend0_hi : 57;
- } pi_int_pend0_fld_s;
-} pi_int_pend0_u_t;
-
-#else
-
-typedef union pi_int_pend0_u {
- bdrkreg_t pi_int_pend0_regval;
- struct {
- bdrkreg_t ip_int_pend0_hi : 57;
- bdrkreg_t ip_or_cc_pend_b : 1;
- bdrkreg_t ip_or_cc_pend_a : 1;
- bdrkreg_t ip_uart_ucntrl : 1;
- bdrkreg_t ip_page_migration : 1;
- bdrkreg_t ip_gfx_int_b : 1;
- bdrkreg_t ip_gfx_int_a : 1;
- bdrkreg_t ip_int_pend0_lo : 1;
- } pi_int_pend0_fld_s;
-} pi_int_pend0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This read-only register provides information about interrupts *
- * that are currently pending. The interrupts in this register map to *
- * interrupt level 3 (L3), unless remapped by the INT_PEND1_REMAP *
- * register. The SYS_COR_ERR_A/B, RTC_DROP_OUT, and NACK_INT_A/B bits *
- * are set by hardware but must be cleared by software. The *
- * SYSTEM_SHUTDOWN, NI_ERROR, LB_ERROR and XB_ERROR bits just reflect *
- * the value of other logic, and cannot be changed by PI register *
- * writes. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_int_pend1_u {
- bdrkreg_t pi_int_pend1_regval;
- struct {
- bdrkreg_t ip_int_pend1 : 54;
- bdrkreg_t ip_xb_error : 1;
- bdrkreg_t ip_lb_error : 1;
- bdrkreg_t ip_nack_int_a : 1;
- bdrkreg_t ip_nack_int_b : 1;
- bdrkreg_t ip_perf_cntr_oflow : 1;
- bdrkreg_t ip_sys_cor_err_b : 1;
- bdrkreg_t ip_sys_cor_err_a : 1;
- bdrkreg_t ip_md_corr_error : 1;
- bdrkreg_t ip_ni_error : 1;
- bdrkreg_t ip_system_shutdown : 1;
- } pi_int_pend1_fld_s;
-} pi_int_pend1_u_t;
-
-#else
-
-typedef union pi_int_pend1_u {
- bdrkreg_t pi_int_pend1_regval;
- struct {
- bdrkreg_t ip_system_shutdown : 1;
- bdrkreg_t ip_ni_error : 1;
- bdrkreg_t ip_md_corr_error : 1;
- bdrkreg_t ip_sys_cor_err_a : 1;
- bdrkreg_t ip_sys_cor_err_b : 1;
- bdrkreg_t ip_perf_cntr_oflow : 1;
- bdrkreg_t ip_nack_int_b : 1;
- bdrkreg_t ip_nack_int_a : 1;
- bdrkreg_t ip_lb_error : 1;
- bdrkreg_t ip_xb_error : 1;
- bdrkreg_t ip_int_pend1 : 54;
- } pi_int_pend1_fld_s;
-} pi_int_pend1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This read/write register masks the contents of INT_PEND0 to *
- * determine whether an L2 interrupt (bit 10 of the processor's Cause *
- * register) is sent to CPU_A if the same bit in the INT_PEND0 *
- * register is also set. Only one processor in a Bedrock should *
- * enable the PAGE_MIGRATION bit/interrupt. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_int_mask0_a_u {
- bdrkreg_t pi_int_mask0_a_regval;
- struct {
- bdrkreg_t ima_int_mask0_lo : 1;
- bdrkreg_t ima_gfx_int_a : 1;
- bdrkreg_t ima_gfx_int_b : 1;
- bdrkreg_t ima_page_migration : 1;
- bdrkreg_t ima_uart_ucntrl : 1;
- bdrkreg_t ima_or_ccp_mask_a : 1;
- bdrkreg_t ima_or_ccp_mask_b : 1;
- bdrkreg_t ima_int_mask0_hi : 57;
- } pi_int_mask0_a_fld_s;
-} pi_int_mask0_a_u_t;
-
-#else
-
-typedef union pi_int_mask0_a_u {
- bdrkreg_t pi_int_mask0_a_regval;
- struct {
- bdrkreg_t ima_int_mask0_hi : 57;
- bdrkreg_t ima_or_ccp_mask_b : 1;
- bdrkreg_t ima_or_ccp_mask_a : 1;
- bdrkreg_t ima_uart_ucntrl : 1;
- bdrkreg_t ima_page_migration : 1;
- bdrkreg_t ima_gfx_int_b : 1;
- bdrkreg_t ima_gfx_int_a : 1;
- bdrkreg_t ima_int_mask0_lo : 1;
- } pi_int_mask0_a_fld_s;
-} pi_int_mask0_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This read/write register masks the contents of INT_PEND1 to *
- * determine whether an interrupt should be sent. Bits 63:32 always *
- * generate an L3 interrupt (bit 11 of the processor's Cause *
- * register) is sent to CPU_A if the same bit in the INT_PEND1 *
- * register is set. Bits 31:0 can generate either an L3 or L2 *
- * interrupt, depending on the value of INT_PEND1_REMAP[3:0]. Only *
- * one processor in a Bedrock should enable the NI_ERROR, LB_ERROR, *
- * XB_ERROR and MD_CORR_ERROR bits. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_int_mask1_a_u {
- bdrkreg_t pi_int_mask1_a_regval;
- struct {
- bdrkreg_t ima_int_mask1 : 64;
- } pi_int_mask1_a_fld_s;
-} pi_int_mask1_a_u_t;
-
-
-
-
-/************************************************************************
- * *
- * This read/write register masks the contents of INT_PEND0 to *
- * determine whether an L2 interrupt (bit 10 of the processor's Cause *
- * register) is sent to CPU_B if the same bit in the INT_PEND0 *
- * register is also set. Only one processor in a Bedrock should *
- * enable the PAGE_MIGRATION bit/interrupt. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_int_mask0_b_u {
- bdrkreg_t pi_int_mask0_b_regval;
- struct {
- bdrkreg_t imb_int_mask0_lo : 1;
- bdrkreg_t imb_gfx_int_a : 1;
- bdrkreg_t imb_gfx_int_b : 1;
- bdrkreg_t imb_page_migration : 1;
- bdrkreg_t imb_uart_ucntrl : 1;
- bdrkreg_t imb_or_ccp_mask_a : 1;
- bdrkreg_t imb_or_ccp_mask_b : 1;
- bdrkreg_t imb_int_mask0_hi : 57;
- } pi_int_mask0_b_fld_s;
-} pi_int_mask0_b_u_t;
-
-#else
-
-typedef union pi_int_mask0_b_u {
- bdrkreg_t pi_int_mask0_b_regval;
- struct {
- bdrkreg_t imb_int_mask0_hi : 57;
- bdrkreg_t imb_or_ccp_mask_b : 1;
- bdrkreg_t imb_or_ccp_mask_a : 1;
- bdrkreg_t imb_uart_ucntrl : 1;
- bdrkreg_t imb_page_migration : 1;
- bdrkreg_t imb_gfx_int_b : 1;
- bdrkreg_t imb_gfx_int_a : 1;
- bdrkreg_t imb_int_mask0_lo : 1;
- } pi_int_mask0_b_fld_s;
-} pi_int_mask0_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This read/write register masks the contents of INT_PEND1 to *
- * determine whether an interrupt should be sent. Bits 63:32 always *
- * generate an L3 interrupt (bit 11 of the processor's Cause *
- * register) is sent to CPU_B if the same bit in the INT_PEND1 *
- * register is set. Bits 31:0 can generate either an L3 or L2 *
- * interrupt, depending on the value of INT_PEND1_REMAP[3:0]. Only *
- * one processor in a Bedrock should enable the NI_ERROR, LB_ERROR, *
- * XB_ERROR and MD_CORR_ERROR bits. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_int_mask1_b_u {
- bdrkreg_t pi_int_mask1_b_regval;
- struct {
- bdrkreg_t imb_int_mask1 : 64;
- } pi_int_mask1_b_fld_s;
-} pi_int_mask1_b_u_t;
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. These registers do *
- * not have access protection. A store to this location by a CPU will *
- * cause the bit corresponding to the source's region to be set in *
- * CC_PEND_A (or CC_PEND_B). The contents of CC_PEND_A (or CC_PEND_B) *
- * determines on a bit-per-region basis whether a CPU-to-CPU *
- * interrupt is pending CPU_A (or CPU_B). *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_cc_pend_set_a_u {
- bdrkreg_t pi_cc_pend_set_a_regval;
- struct {
- bdrkreg_t cpsa_cc_pend : 64;
- } pi_cc_pend_set_a_fld_s;
-} pi_cc_pend_set_a_u_t;
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. These registers do *
- * not have access protection. A store to this location by a CPU will *
- * cause the bit corresponding to the source's region to be set in *
- * CC_PEND_A (or CC_PEND_B). The contents of CC_PEND_A (or CC_PEND_B) *
- * determines on a bit-per-region basis whether a CPU-to-CPU *
- * interrupt is pending CPU_A (or CPU_B). *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_cc_pend_set_b_u {
- bdrkreg_t pi_cc_pend_set_b_regval;
- struct {
- bdrkreg_t cpsb_cc_pend : 64;
- } pi_cc_pend_set_b_fld_s;
-} pi_cc_pend_set_b_u_t;
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. Reading this *
- * location will return the contents of CC_PEND_A (or CC_PEND_B). *
- * Writing this location will clear the bits corresponding to which *
- * data bits are driven high during the store; therefore, storing all *
- * ones would clear all bits. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_cc_pend_clr_a_u {
- bdrkreg_t pi_cc_pend_clr_a_regval;
- struct {
- bdrkreg_t cpca_cc_pend : 64;
- } pi_cc_pend_clr_a_fld_s;
-} pi_cc_pend_clr_a_u_t;
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. Reading this *
- * location will return the contents of CC_PEND_A (or CC_PEND_B). *
- * Writing this location will clear the bits corresponding to which *
- * data bits are driven high during the store; therefore, storing all *
- * ones would clear all bits. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_cc_pend_clr_b_u {
- bdrkreg_t pi_cc_pend_clr_b_regval;
- struct {
- bdrkreg_t cpcb_cc_pend : 64;
- } pi_cc_pend_clr_b_fld_s;
-} pi_cc_pend_clr_b_u_t;
-
-
-
-
-/************************************************************************
- * *
- * This read/write register masks the contents of both CC_PEND_A and *
- * CC_PEND_B. *
- * *
- ************************************************************************/
-
-
-
-
-typedef union pi_cc_mask_u {
- bdrkreg_t pi_cc_mask_regval;
- struct {
- bdrkreg_t cm_cc_mask : 64;
- } pi_cc_mask_fld_s;
-} pi_cc_mask_u_t;
-
-
-
-
-/************************************************************************
- * *
- * This read/write register redirects INT_PEND1[31:0] from L3 to L2 *
- * interrupt level.Bit 4 in this register is used to enable error *
- * interrupt forwarding to the II. When this bit is set, if any of *
- * the three memory interrupts (correctable error, uncorrectable *
- * error, or page migration), or the NI, LB or XB error interrupts *
- * are set, the PI_II_ERROR_INT wire will be asserted. When this wire *
- * is asserted, the II will send an interrupt to the node specified *
- * in its IIDSR (Interrupt Destination Register). This allows these *
- * interrupts to be forwarded to another node. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_int_pend1_remap_u {
- bdrkreg_t pi_int_pend1_remap_regval;
- struct {
- bdrkreg_t ipr_remap_0 : 1;
- bdrkreg_t ipr_remap_1 : 1;
- bdrkreg_t ipr_remap_2 : 1;
- bdrkreg_t ipr_remap_3 : 1;
- bdrkreg_t ipr_error_forward : 1;
- bdrkreg_t ipr_reserved : 59;
- } pi_int_pend1_remap_fld_s;
-} pi_int_pend1_remap_u_t;
-
-#else
-
-typedef union pi_int_pend1_remap_u {
- bdrkreg_t pi_int_pend1_remap_regval;
- struct {
- bdrkreg_t ipr_reserved : 59;
- bdrkreg_t ipr_error_forward : 1;
- bdrkreg_t ipr_remap_3 : 1;
- bdrkreg_t ipr_remap_2 : 1;
- bdrkreg_t ipr_remap_1 : 1;
- bdrkreg_t ipr_remap_0 : 1;
- } pi_int_pend1_remap_fld_s;
-} pi_int_pend1_remap_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. When the real time *
- * counter (RT_Counter) is equal to the value in this register, the *
- * RT_INT_PEND register is set, which causes a Level-4 interrupt to *
- * be sent to the processor. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_rt_compare_a_u {
- bdrkreg_t pi_rt_compare_a_regval;
- struct {
- bdrkreg_t rca_rt_compare : 55;
- bdrkreg_t rca_rsvd : 9;
- } pi_rt_compare_a_fld_s;
-} pi_rt_compare_a_u_t;
-
-#else
-
-typedef union pi_rt_compare_a_u {
- bdrkreg_t pi_rt_compare_a_regval;
- struct {
- bdrkreg_t rca_rsvd : 9;
- bdrkreg_t rca_rt_compare : 55;
- } pi_rt_compare_a_fld_s;
-} pi_rt_compare_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. When the real time *
- * counter (RT_Counter) is equal to the value in this register, the *
- * RT_INT_PEND register is set, which causes a Level-4 interrupt to *
- * be sent to the processor. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_rt_compare_b_u {
- bdrkreg_t pi_rt_compare_b_regval;
- struct {
- bdrkreg_t rcb_rt_compare : 55;
- bdrkreg_t rcb_rsvd : 9;
- } pi_rt_compare_b_fld_s;
-} pi_rt_compare_b_u_t;
-
-#else
-
-typedef union pi_rt_compare_b_u {
- bdrkreg_t pi_rt_compare_b_regval;
- struct {
- bdrkreg_t rcb_rsvd : 9;
- bdrkreg_t rcb_rt_compare : 55;
- } pi_rt_compare_b_fld_s;
-} pi_rt_compare_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * When the least significant 32 bits of the real time counter *
- * (RT_Counter) are equal to the value in this register, the *
- * PROF_INT_PEND_A and PROF_INT_PEND_B registers are set to 0x1. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_profile_compare_u {
- bdrkreg_t pi_profile_compare_regval;
- struct {
- bdrkreg_t pc_profile_compare : 32;
- bdrkreg_t pc_rsvd : 32;
- } pi_profile_compare_fld_s;
-} pi_profile_compare_u_t;
-
-#else
-
-typedef union pi_profile_compare_u {
- bdrkreg_t pi_profile_compare_regval;
- struct {
- bdrkreg_t pc_rsvd : 32;
- bdrkreg_t pc_profile_compare : 32;
- } pi_profile_compare_fld_s;
-} pi_profile_compare_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. If the bit in the *
- * corresponding RT_INT_EN_A/B register is set, the processor's level *
- * 5 interrupt is set to the value of the RTC_INT_PEND bit in this *
- * register. Storing any value to this location will clear the *
- * RTC_INT_PEND bit in the register. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_rt_int_pend_a_u {
- bdrkreg_t pi_rt_int_pend_a_regval;
- struct {
- bdrkreg_t ripa_rtc_int_pend : 1;
- bdrkreg_t ripa_rsvd : 63;
- } pi_rt_int_pend_a_fld_s;
-} pi_rt_int_pend_a_u_t;
-
-#else
-
-typedef union pi_rt_int_pend_a_u {
- bdrkreg_t pi_rt_int_pend_a_regval;
- struct {
- bdrkreg_t ripa_rsvd : 63;
- bdrkreg_t ripa_rtc_int_pend : 1;
- } pi_rt_int_pend_a_fld_s;
-} pi_rt_int_pend_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. If the bit in the *
- * corresponding RT_INT_EN_A/B register is set, the processor's level *
- * 5 interrupt is set to the value of the RTC_INT_PEND bit in this *
- * register. Storing any value to this location will clear the *
- * RTC_INT_PEND bit in the register. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_rt_int_pend_b_u {
- bdrkreg_t pi_rt_int_pend_b_regval;
- struct {
- bdrkreg_t ripb_rtc_int_pend : 1;
- bdrkreg_t ripb_rsvd : 63;
- } pi_rt_int_pend_b_fld_s;
-} pi_rt_int_pend_b_u_t;
-
-#else
-
-typedef union pi_rt_int_pend_b_u {
- bdrkreg_t pi_rt_int_pend_b_regval;
- struct {
- bdrkreg_t ripb_rsvd : 63;
- bdrkreg_t ripb_rtc_int_pend : 1;
- } pi_rt_int_pend_b_fld_s;
-} pi_rt_int_pend_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. Both registers are *
- * set when the PROFILE_COMPARE register is equal to bits [31:0] of *
- * the RT_Counter. If the bit in the corresponding PROF_INT_EN_A/B *
- * register is set, the processor's level 5 interrupt is set to the *
- * value of the PROF_INT_PEND bit in this register. Storing any value *
- * to this location will clear the PROF_INT_PEND bit in the register. *
- * The reason for having A and B versions of this register is that *
- * they need to be cleared independently. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_prof_int_pend_a_u {
- bdrkreg_t pi_prof_int_pend_a_regval;
- struct {
- bdrkreg_t pipa_prof_int_pend : 1;
- bdrkreg_t pipa_rsvd : 63;
- } pi_prof_int_pend_a_fld_s;
-} pi_prof_int_pend_a_u_t;
-
-#else
-
-typedef union pi_prof_int_pend_a_u {
- bdrkreg_t pi_prof_int_pend_a_regval;
- struct {
- bdrkreg_t pipa_rsvd : 63;
- bdrkreg_t pipa_prof_int_pend : 1;
- } pi_prof_int_pend_a_fld_s;
-} pi_prof_int_pend_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. Both registers are *
- * set when the PROFILE_COMPARE register is equal to bits [31:0] of *
- * the RT_Counter. If the bit in the corresponding PROF_INT_EN_A/B *
- * register is set, the processor's level 5 interrupt is set to the *
- * value of the PROF_INT_PEND bit in this register. Storing any value *
- * to this location will clear the PROF_INT_PEND bit in the register. *
- * The reason for having A and B versions of this register is that *
- * they need to be cleared independently. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_prof_int_pend_b_u {
- bdrkreg_t pi_prof_int_pend_b_regval;
- struct {
- bdrkreg_t pipb_prof_int_pend : 1;
- bdrkreg_t pipb_rsvd : 63;
- } pi_prof_int_pend_b_fld_s;
-} pi_prof_int_pend_b_u_t;
-
-#else
-
-typedef union pi_prof_int_pend_b_u {
- bdrkreg_t pi_prof_int_pend_b_regval;
- struct {
- bdrkreg_t pipb_rsvd : 63;
- bdrkreg_t pipb_prof_int_pend : 1;
- } pi_prof_int_pend_b_fld_s;
-} pi_prof_int_pend_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. Enables RTC *
- * interrupt to the associated CPU. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_rt_int_en_a_u {
- bdrkreg_t pi_rt_int_en_a_regval;
- struct {
- bdrkreg_t riea_rtc_int_en : 1;
- bdrkreg_t riea_rsvd : 63;
- } pi_rt_int_en_a_fld_s;
-} pi_rt_int_en_a_u_t;
-
-#else
-
-typedef union pi_rt_int_en_a_u {
- bdrkreg_t pi_rt_int_en_a_regval;
- struct {
- bdrkreg_t riea_rsvd : 63;
- bdrkreg_t riea_rtc_int_en : 1;
- } pi_rt_int_en_a_fld_s;
-} pi_rt_int_en_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. Enables RTC *
- * interrupt to the associated CPU. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_rt_int_en_b_u {
- bdrkreg_t pi_rt_int_en_b_regval;
- struct {
- bdrkreg_t rieb_rtc_int_en : 1;
- bdrkreg_t rieb_rsvd : 63;
- } pi_rt_int_en_b_fld_s;
-} pi_rt_int_en_b_u_t;
-
-#else
-
-typedef union pi_rt_int_en_b_u {
- bdrkreg_t pi_rt_int_en_b_regval;
- struct {
- bdrkreg_t rieb_rsvd : 63;
- bdrkreg_t rieb_rtc_int_en : 1;
- } pi_rt_int_en_b_fld_s;
-} pi_rt_int_en_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. Enables profiling *
- * interrupt to the associated CPU. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_prof_int_en_a_u {
- bdrkreg_t pi_prof_int_en_a_regval;
- struct {
- bdrkreg_t piea_prof_int_en : 1;
- bdrkreg_t piea_rsvd : 63;
- } pi_prof_int_en_a_fld_s;
-} pi_prof_int_en_a_u_t;
-
-#else
-
-typedef union pi_prof_int_en_a_u {
- bdrkreg_t pi_prof_int_en_a_regval;
- struct {
- bdrkreg_t piea_rsvd : 63;
- bdrkreg_t piea_prof_int_en : 1;
- } pi_prof_int_en_a_fld_s;
-} pi_prof_int_en_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. Enables profiling *
- * interrupt to the associated CPU. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_prof_int_en_b_u {
- bdrkreg_t pi_prof_int_en_b_regval;
- struct {
- bdrkreg_t pieb_prof_int_en : 1;
- bdrkreg_t pieb_rsvd : 63;
- } pi_prof_int_en_b_fld_s;
-} pi_prof_int_en_b_u_t;
-
-#else
-
-typedef union pi_prof_int_en_b_u {
- bdrkreg_t pi_prof_int_en_b_regval;
- struct {
- bdrkreg_t pieb_rsvd : 63;
- bdrkreg_t pieb_prof_int_en : 1;
- } pi_prof_int_en_b_fld_s;
-} pi_prof_int_en_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register controls operation of the debug data from the PI, *
- * along with Debug_Sel[2:0] from the Debug module. For some values *
- * of Debug_Sel[2:0], the B_SEL bit selects whether the debug bits *
- * are looking at the processor A or processor B logic. The remaining *
- * bits select which signal(s) are ORed to create DebugData bits 31 *
- * and 30 for all of the PI debug selections. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_debug_sel_u {
- bdrkreg_t pi_debug_sel_regval;
- struct {
- bdrkreg_t ds_low_t5cc_a : 1;
- bdrkreg_t ds_low_t5cc_b : 1;
- bdrkreg_t ds_low_totcc_a : 1;
- bdrkreg_t ds_low_totcc_b : 1;
- bdrkreg_t ds_low_reqcc_a : 1;
- bdrkreg_t ds_low_reqcc_b : 1;
- bdrkreg_t ds_low_rplcc_a : 1;
- bdrkreg_t ds_low_rplcc_b : 1;
- bdrkreg_t ds_low_intcc : 1;
- bdrkreg_t ds_low_perf_inc_a_0 : 1;
- bdrkreg_t ds_low_perf_inc_a_1 : 1;
- bdrkreg_t ds_low_perf_inc_b_0 : 1;
- bdrkreg_t ds_low_perf_inc_b_1 : 1;
- bdrkreg_t ds_high_t5cc_a : 1;
- bdrkreg_t ds_high_t5cc_b : 1;
- bdrkreg_t ds_high_totcc_a : 1;
- bdrkreg_t ds_high_totcc_b : 1;
- bdrkreg_t ds_high_reqcc_a : 1;
- bdrkreg_t ds_high_reqcc_b : 1;
- bdrkreg_t ds_high_rplcc_a : 1;
- bdrkreg_t ds_high_rplcc_b : 1;
- bdrkreg_t ds_high_intcc : 1;
- bdrkreg_t ds_high_perf_inc_a_0 : 1;
- bdrkreg_t ds_high_perf_inc_a_1 : 1;
- bdrkreg_t ds_high_perf_inc_b_0 : 1;
- bdrkreg_t ds_high_perf_inc_b_1 : 1;
- bdrkreg_t ds_b_sel : 1;
- bdrkreg_t ds_rsvd : 37;
- } pi_debug_sel_fld_s;
-} pi_debug_sel_u_t;
-
-#else
-
-typedef union pi_debug_sel_u {
- bdrkreg_t pi_debug_sel_regval;
- struct {
- bdrkreg_t ds_rsvd : 37;
- bdrkreg_t ds_b_sel : 1;
- bdrkreg_t ds_high_perf_inc_b_1 : 1;
- bdrkreg_t ds_high_perf_inc_b_0 : 1;
- bdrkreg_t ds_high_perf_inc_a_1 : 1;
- bdrkreg_t ds_high_perf_inc_a_0 : 1;
- bdrkreg_t ds_high_intcc : 1;
- bdrkreg_t ds_high_rplcc_b : 1;
- bdrkreg_t ds_high_rplcc_a : 1;
- bdrkreg_t ds_high_reqcc_b : 1;
- bdrkreg_t ds_high_reqcc_a : 1;
- bdrkreg_t ds_high_totcc_b : 1;
- bdrkreg_t ds_high_totcc_a : 1;
- bdrkreg_t ds_high_t5cc_b : 1;
- bdrkreg_t ds_high_t5cc_a : 1;
- bdrkreg_t ds_low_perf_inc_b_1 : 1;
- bdrkreg_t ds_low_perf_inc_b_0 : 1;
- bdrkreg_t ds_low_perf_inc_a_1 : 1;
- bdrkreg_t ds_low_perf_inc_a_0 : 1;
- bdrkreg_t ds_low_intcc : 1;
- bdrkreg_t ds_low_rplcc_b : 1;
- bdrkreg_t ds_low_rplcc_a : 1;
- bdrkreg_t ds_low_reqcc_b : 1;
- bdrkreg_t ds_low_reqcc_a : 1;
- bdrkreg_t ds_low_totcc_b : 1;
- bdrkreg_t ds_low_totcc_a : 1;
- bdrkreg_t ds_low_t5cc_b : 1;
- bdrkreg_t ds_low_t5cc_a : 1;
- } pi_debug_sel_fld_s;
-} pi_debug_sel_u_t;
-
-#endif
-
-
-/************************************************************************
- * *
- * A write to this register allows a single bit in the INT_PEND0 or *
- * INT_PEND1 registers to be set or cleared. If 6 is clear, a bit is *
- * modified in INT_PEND0, while if 6 is set, a bit is modified in *
- * INT_PEND1. The value in 5:0 (ranging from 63 to 0) will determine *
- * which bit in the register is effected. The value of 8 will *
- * determine whether the desired bit is set (8=1) or cleared (8=0). *
- * This is the only register which is accessible by IO issued PWRI *
- * command and is protected through the IO_PROTECT register. If the *
- * region bit in the IO_PROTECT is not set then a WERR reply is *
- * issued. CPU access is controlled through CPU_PROTECT. The contents *
- * of this register are masked with the contents of INT_MASK_A *
- * (INT_MASK_B) to determine whether an L2 interrupt is issued to *
- * CPU_A (CPU_B). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_int_pend_mod_alias_u {
- bdrkreg_t pi_int_pend_mod_alias_regval;
- struct {
- bdrkreg_t ipma_bit_select : 6;
- bdrkreg_t ipma_reg_select : 1;
- bdrkreg_t ipma_rsvd_1 : 1;
- bdrkreg_t ipma_value : 1;
- bdrkreg_t ipma_rsvd : 55;
- } pi_int_pend_mod_alias_fld_s;
-} pi_int_pend_mod_alias_u_t;
-
-#else
-
-typedef union pi_int_pend_mod_alias_u {
- bdrkreg_t pi_int_pend_mod_alias_regval;
- struct {
- bdrkreg_t ipma_rsvd : 55;
- bdrkreg_t ipma_value : 1;
- bdrkreg_t ipma_rsvd_1 : 1;
- bdrkreg_t ipma_reg_select : 1;
- bdrkreg_t ipma_bit_select : 6;
- } pi_int_pend_mod_alias_fld_s;
-} pi_int_pend_mod_alias_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. This register *
- * specifies the value of the Graphics Page. Uncached writes into the *
- * Graphics Page (with uncached attribute of IO) are done with GFXWS *
- * commands rather than the normal PWRI commands. GFXWS commands are *
- * tracked with the graphics credit counters. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_gfx_page_a_u {
- bdrkreg_t pi_gfx_page_a_regval;
- struct {
- bdrkreg_t gpa_rsvd_1 : 17;
- bdrkreg_t gpa_gfx_page_addr : 23;
- bdrkreg_t gpa_en_gfx_page : 1;
- bdrkreg_t gpa_rsvd : 23;
- } pi_gfx_page_a_fld_s;
-} pi_gfx_page_a_u_t;
-
-#else
-
-typedef union pi_gfx_page_a_u {
- bdrkreg_t pi_gfx_page_a_regval;
- struct {
- bdrkreg_t gpa_rsvd : 23;
- bdrkreg_t gpa_en_gfx_page : 1;
- bdrkreg_t gpa_gfx_page_addr : 23;
- bdrkreg_t gpa_rsvd_1 : 17;
- } pi_gfx_page_a_fld_s;
-} pi_gfx_page_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. This register *
- * counts graphics credits. This counter is decremented for each *
- * doubleword sent to graphics with GFXWS or GFXWL commands. It is *
- * incremented for each doubleword acknowledge from graphics. When *
- * this counter has a smaller value than the GFX_BIAS register, *
- * SysWrRdy_L is deasserted, an interrupt is sent to the processor, *
- * and SysWrRdy_L is allowed to be asserted again. This is the basic *
- * mechanism for flow-controlling graphics writes. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_gfx_credit_cntr_a_u {
- bdrkreg_t pi_gfx_credit_cntr_a_regval;
- struct {
- bdrkreg_t gcca_gfx_credit_cntr : 12;
- bdrkreg_t gcca_rsvd : 52;
- } pi_gfx_credit_cntr_a_fld_s;
-} pi_gfx_credit_cntr_a_u_t;
-
-#else
-
-typedef union pi_gfx_credit_cntr_a_u {
- bdrkreg_t pi_gfx_credit_cntr_a_regval;
- struct {
- bdrkreg_t gcca_rsvd : 52;
- bdrkreg_t gcca_gfx_credit_cntr : 12;
- } pi_gfx_credit_cntr_a_fld_s;
-} pi_gfx_credit_cntr_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. When the graphics *
- * credit counter is less than or equal to this value, a flow control *
- * interrupt is sent. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_gfx_bias_a_u {
- bdrkreg_t pi_gfx_bias_a_regval;
- struct {
- bdrkreg_t gba_gfx_bias : 12;
- bdrkreg_t gba_rsvd : 52;
- } pi_gfx_bias_a_fld_s;
-} pi_gfx_bias_a_u_t;
-
-#else
-
-typedef union pi_gfx_bias_a_u {
- bdrkreg_t pi_gfx_bias_a_regval;
- struct {
- bdrkreg_t gba_rsvd : 52;
- bdrkreg_t gba_gfx_bias : 12;
- } pi_gfx_bias_a_fld_s;
-} pi_gfx_bias_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There is one of these registers for each CPU. When *
- * this counter reaches the value of the GFX_INT_CMP register, an *
- * interrupt is sent to the associated processor. At each clock *
- * cycle, the value in this register can be changed by any one of the *
- * following actions: *
- * - Written by software. *
- * - Loaded with the value of GFX_INT_CMP, when an interrupt, NMI, or *
- * soft reset occurs, thus preventing an additional interrupt. *
- * - Zeroed, when the GFX_CREDIT_CNTR rises above the bias value. *
- * - Incremented (by one at each clock) for each clock that the *
- * GFX_CREDIT_CNTR is less than or equal to zero. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_gfx_int_cntr_a_u {
- bdrkreg_t pi_gfx_int_cntr_a_regval;
- struct {
- bdrkreg_t gica_gfx_int_cntr : 26;
- bdrkreg_t gica_rsvd : 38;
- } pi_gfx_int_cntr_a_fld_s;
-} pi_gfx_int_cntr_a_u_t;
-
-#else
-
-typedef union pi_gfx_int_cntr_a_u {
- bdrkreg_t pi_gfx_int_cntr_a_regval;
- struct {
- bdrkreg_t gica_rsvd : 38;
- bdrkreg_t gica_gfx_int_cntr : 26;
- } pi_gfx_int_cntr_a_fld_s;
-} pi_gfx_int_cntr_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. The value in this *
- * register is loaded into the GFX_INT_CNTR register when an *
- * interrupt, NMI, or soft reset is sent to the processor. The value *
- * in this register is compared to the value of GFX_INT_CNTR and an *
- * interrupt is sent when they become equal. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LINUX
-
-typedef union pi_gfx_int_cmp_a_u {
- bdrkreg_t pi_gfx_int_cmp_a_regval;
- struct {
- bdrkreg_t gica_gfx_int_cmp : 26;
- bdrkreg_t gica_rsvd : 38;
- } pi_gfx_int_cmp_a_fld_s;
-} pi_gfx_int_cmp_a_u_t;
-
-#else
-
-typedef union pi_gfx_int_cmp_a_u {
- bdrkreg_t pi_gfx_int_cmp_a_regval;
- struct {
- bdrkreg_t gica_rsvd : 38;
- bdrkreg_t gica_gfx_int_cmp : 26;
- } pi_gfx_int_cmp_a_fld_s;
-} pi_gfx_int_cmp_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. This register *
- * specifies the value of the Graphics Page. Uncached writes into the *
- * Graphics Page (with uncached attribute of IO) are done with GFXWS *
- * commands rather than the normal PWRI commands. GFXWS commands are *
- * tracked with the graphics credit counters. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_gfx_page_b_u {
- bdrkreg_t pi_gfx_page_b_regval;
- struct {
- bdrkreg_t gpb_rsvd_1 : 17;
- bdrkreg_t gpb_gfx_page_addr : 23;
- bdrkreg_t gpb_en_gfx_page : 1;
- bdrkreg_t gpb_rsvd : 23;
- } pi_gfx_page_b_fld_s;
-} pi_gfx_page_b_u_t;
-
-#else
-
-typedef union pi_gfx_page_b_u {
- bdrkreg_t pi_gfx_page_b_regval;
- struct {
- bdrkreg_t gpb_rsvd : 23;
- bdrkreg_t gpb_en_gfx_page : 1;
- bdrkreg_t gpb_gfx_page_addr : 23;
- bdrkreg_t gpb_rsvd_1 : 17;
- } pi_gfx_page_b_fld_s;
-} pi_gfx_page_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. This register *
- * counts graphics credits. This counter is decremented for each *
- * doubleword sent to graphics with GFXWS or GFXWL commands. It is *
- * incremented for each doubleword acknowledge from graphics. When *
- * this counter has a smaller value than the GFX_BIAS register, *
- * SysWrRdy_L is deasserted, an interrupt is sent to the processor, *
- * and SysWrRdy_L is allowed to be asserted again. This is the basic *
- * mechanism for flow-controlling graphics writes. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_gfx_credit_cntr_b_u {
- bdrkreg_t pi_gfx_credit_cntr_b_regval;
- struct {
- bdrkreg_t gccb_gfx_credit_cntr : 12;
- bdrkreg_t gccb_rsvd : 52;
- } pi_gfx_credit_cntr_b_fld_s;
-} pi_gfx_credit_cntr_b_u_t;
-
-#else
-
-typedef union pi_gfx_credit_cntr_b_u {
- bdrkreg_t pi_gfx_credit_cntr_b_regval;
- struct {
- bdrkreg_t gccb_rsvd : 52;
- bdrkreg_t gccb_gfx_credit_cntr : 12;
- } pi_gfx_credit_cntr_b_fld_s;
-} pi_gfx_credit_cntr_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. When the graphics *
- * credit counter is less than or equal to this value, a flow control *
- * interrupt is sent. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_gfx_bias_b_u {
- bdrkreg_t pi_gfx_bias_b_regval;
- struct {
- bdrkreg_t gbb_gfx_bias : 12;
- bdrkreg_t gbb_rsvd : 52;
- } pi_gfx_bias_b_fld_s;
-} pi_gfx_bias_b_u_t;
-
-#else
-
-typedef union pi_gfx_bias_b_u {
- bdrkreg_t pi_gfx_bias_b_regval;
- struct {
- bdrkreg_t gbb_rsvd : 52;
- bdrkreg_t gbb_gfx_bias : 12;
- } pi_gfx_bias_b_fld_s;
-} pi_gfx_bias_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There is one of these registers for each CPU. When *
- * this counter reaches the value of the GFX_INT_CMP register, an *
- * interrupt is sent to the associated processor. At each clock *
- * cycle, the value in this register can be changed by any one of the *
- * following actions: *
- * - Written by software. *
- * - Loaded with the value of GFX_INT_CMP, when an interrupt, NMI, or *
- * soft reset occurs, thus preventing an additional interrupt. *
- * - Zeroed, when the GFX_CREDIT_CNTR rises above the bias value. *
- * - Incremented (by one at each clock) for each clock that the *
- * GFX_CREDIT_CNTR is less than or equal to zero. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_gfx_int_cntr_b_u {
- bdrkreg_t pi_gfx_int_cntr_b_regval;
- struct {
- bdrkreg_t gicb_gfx_int_cntr : 26;
- bdrkreg_t gicb_rsvd : 38;
- } pi_gfx_int_cntr_b_fld_s;
-} pi_gfx_int_cntr_b_u_t;
-
-#else
-
-typedef union pi_gfx_int_cntr_b_u {
- bdrkreg_t pi_gfx_int_cntr_b_regval;
- struct {
- bdrkreg_t gicb_rsvd : 38;
- bdrkreg_t gicb_gfx_int_cntr : 26;
- } pi_gfx_int_cntr_b_fld_s;
-} pi_gfx_int_cntr_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. The value in this *
- * register is loaded into the GFX_INT_CNTR register when an *
- * interrupt, NMI, or soft reset is sent to the processor. The value *
- * in this register is compared to the value of GFX_INT_CNTR and an *
- * interrupt is sent when they become equal. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_gfx_int_cmp_b_u {
- bdrkreg_t pi_gfx_int_cmp_b_regval;
- struct {
- bdrkreg_t gicb_gfx_int_cmp : 26;
- bdrkreg_t gicb_rsvd : 38;
- } pi_gfx_int_cmp_b_fld_s;
-} pi_gfx_int_cmp_b_u_t;
-
-#else
-
-typedef union pi_gfx_int_cmp_b_u {
- bdrkreg_t pi_gfx_int_cmp_b_regval;
- struct {
- bdrkreg_t gicb_rsvd : 38;
- bdrkreg_t gicb_gfx_int_cmp : 26;
- } pi_gfx_int_cmp_b_fld_s;
-} pi_gfx_int_cmp_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: A read of this register returns all sources of *
- * Bedrock Error Interrupts. Storing to the write-with-clear location *
- * clears any bit for which a one appears on the data bus. Storing to *
- * the writable location does a direct write to all unreserved bits *
- * (except for MEM_UNC). *
- * In Synergy mode, the processor that is the source of the command *
- * that got an error is independent of the A or B SysAD bus. So in *
- * Synergy mode, Synergy provides the source processor number in bit *
- * 52 of the SysAD bus in all commands. The PI saves this in the RRB *
- * or WRB entry, and uses that value to determine which error bit (A *
- * or B) to set, as well as which ERR_STATUS and spool registers to *
- * use, for all error types in this register that are specified as an *
- * error to CPU_A or CPU_B. *
- * This register is not cleared at reset. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_int_pend_wr_u {
- bdrkreg_t pi_err_int_pend_wr_regval;
- struct {
- bdrkreg_t eipw_spool_comp_b : 1;
- bdrkreg_t eipw_spool_comp_a : 1;
- bdrkreg_t eipw_spurious_b : 1;
- bdrkreg_t eipw_spurious_a : 1;
- bdrkreg_t eipw_wrb_terr_b : 1;
- bdrkreg_t eipw_wrb_terr_a : 1;
- bdrkreg_t eipw_wrb_werr_b : 1;
- bdrkreg_t eipw_wrb_werr_a : 1;
- bdrkreg_t eipw_sysstate_par_b : 1;
- bdrkreg_t eipw_sysstate_par_a : 1;
- bdrkreg_t eipw_sysad_data_ecc_b : 1;
- bdrkreg_t eipw_sysad_data_ecc_a : 1;
- bdrkreg_t eipw_sysad_addr_ecc_b : 1;
- bdrkreg_t eipw_sysad_addr_ecc_a : 1;
- bdrkreg_t eipw_syscmd_data_par_b : 1;
- bdrkreg_t eipw_syscmd_data_par_a : 1;
- bdrkreg_t eipw_syscmd_addr_par_b : 1;
- bdrkreg_t eipw_syscmd_addr_par_a : 1;
- bdrkreg_t eipw_spool_err_b : 1;
- bdrkreg_t eipw_spool_err_a : 1;
- bdrkreg_t eipw_ue_uncached_b : 1;
- bdrkreg_t eipw_ue_uncached_a : 1;
- bdrkreg_t eipw_sysstate_tag_b : 1;
- bdrkreg_t eipw_sysstate_tag_a : 1;
- bdrkreg_t eipw_mem_unc : 1;
- bdrkreg_t eipw_sysad_bad_data_b : 1;
- bdrkreg_t eipw_sysad_bad_data_a : 1;
- bdrkreg_t eipw_ue_cached_b : 1;
- bdrkreg_t eipw_ue_cached_a : 1;
- bdrkreg_t eipw_pkt_len_err_b : 1;
- bdrkreg_t eipw_pkt_len_err_a : 1;
- bdrkreg_t eipw_irb_err_b : 1;
- bdrkreg_t eipw_irb_err_a : 1;
- bdrkreg_t eipw_irb_timeout_b : 1;
- bdrkreg_t eipw_irb_timeout_a : 1;
- bdrkreg_t eipw_rsvd : 29;
- } pi_err_int_pend_wr_fld_s;
-} pi_err_int_pend_wr_u_t;
-
-#else
-
-typedef union pi_err_int_pend_wr_u {
- bdrkreg_t pi_err_int_pend_wr_regval;
- struct {
- bdrkreg_t eipw_rsvd : 29;
- bdrkreg_t eipw_irb_timeout_a : 1;
- bdrkreg_t eipw_irb_timeout_b : 1;
- bdrkreg_t eipw_irb_err_a : 1;
- bdrkreg_t eipw_irb_err_b : 1;
- bdrkreg_t eipw_pkt_len_err_a : 1;
- bdrkreg_t eipw_pkt_len_err_b : 1;
- bdrkreg_t eipw_ue_cached_a : 1;
- bdrkreg_t eipw_ue_cached_b : 1;
- bdrkreg_t eipw_sysad_bad_data_a : 1;
- bdrkreg_t eipw_sysad_bad_data_b : 1;
- bdrkreg_t eipw_mem_unc : 1;
- bdrkreg_t eipw_sysstate_tag_a : 1;
- bdrkreg_t eipw_sysstate_tag_b : 1;
- bdrkreg_t eipw_ue_uncached_a : 1;
- bdrkreg_t eipw_ue_uncached_b : 1;
- bdrkreg_t eipw_spool_err_a : 1;
- bdrkreg_t eipw_spool_err_b : 1;
- bdrkreg_t eipw_syscmd_addr_par_a : 1;
- bdrkreg_t eipw_syscmd_addr_par_b : 1;
- bdrkreg_t eipw_syscmd_data_par_a : 1;
- bdrkreg_t eipw_syscmd_data_par_b : 1;
- bdrkreg_t eipw_sysad_addr_ecc_a : 1;
- bdrkreg_t eipw_sysad_addr_ecc_b : 1;
- bdrkreg_t eipw_sysad_data_ecc_a : 1;
- bdrkreg_t eipw_sysad_data_ecc_b : 1;
- bdrkreg_t eipw_sysstate_par_a : 1;
- bdrkreg_t eipw_sysstate_par_b : 1;
- bdrkreg_t eipw_wrb_werr_a : 1;
- bdrkreg_t eipw_wrb_werr_b : 1;
- bdrkreg_t eipw_wrb_terr_a : 1;
- bdrkreg_t eipw_wrb_terr_b : 1;
- bdrkreg_t eipw_spurious_a : 1;
- bdrkreg_t eipw_spurious_b : 1;
- bdrkreg_t eipw_spool_comp_a : 1;
- bdrkreg_t eipw_spool_comp_b : 1;
- } pi_err_int_pend_wr_fld_s;
-} pi_err_int_pend_wr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: A read of this register returns all sources of *
- * Bedrock Error Interrupts. Storing to the write-with-clear location *
- * clears any bit for which a one appears on the data bus. Storing to *
- * the writable location does a direct write to all unreserved bits *
- * (except for MEM_UNC). *
- * In Synergy mode, the processor that is the source of the command *
- * that got an error is independent of the A or B SysAD bus. So in *
- * Synergy mode, Synergy provides the source processor number in bit *
- * 52 of the SysAD bus in all commands. The PI saves this in the RRB *
- * or WRB entry, and uses that value to determine which error bit (A *
- * or B) to set, as well as which ERR_STATUS and spool registers to *
- * use, for all error types in this register that are specified as an *
- * error to CPU_A or CPU_B. *
- * This register is not cleared at reset. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_int_pend_u {
- bdrkreg_t pi_err_int_pend_regval;
- struct {
- bdrkreg_t eip_spool_comp_b : 1;
- bdrkreg_t eip_spool_comp_a : 1;
- bdrkreg_t eip_spurious_b : 1;
- bdrkreg_t eip_spurious_a : 1;
- bdrkreg_t eip_wrb_terr_b : 1;
- bdrkreg_t eip_wrb_terr_a : 1;
- bdrkreg_t eip_wrb_werr_b : 1;
- bdrkreg_t eip_wrb_werr_a : 1;
- bdrkreg_t eip_sysstate_par_b : 1;
- bdrkreg_t eip_sysstate_par_a : 1;
- bdrkreg_t eip_sysad_data_ecc_b : 1;
- bdrkreg_t eip_sysad_data_ecc_a : 1;
- bdrkreg_t eip_sysad_addr_ecc_b : 1;
- bdrkreg_t eip_sysad_addr_ecc_a : 1;
- bdrkreg_t eip_syscmd_data_par_b : 1;
- bdrkreg_t eip_syscmd_data_par_a : 1;
- bdrkreg_t eip_syscmd_addr_par_b : 1;
- bdrkreg_t eip_syscmd_addr_par_a : 1;
- bdrkreg_t eip_spool_err_b : 1;
- bdrkreg_t eip_spool_err_a : 1;
- bdrkreg_t eip_ue_uncached_b : 1;
- bdrkreg_t eip_ue_uncached_a : 1;
- bdrkreg_t eip_sysstate_tag_b : 1;
- bdrkreg_t eip_sysstate_tag_a : 1;
- bdrkreg_t eip_mem_unc : 1;
- bdrkreg_t eip_sysad_bad_data_b : 1;
- bdrkreg_t eip_sysad_bad_data_a : 1;
- bdrkreg_t eip_ue_cached_b : 1;
- bdrkreg_t eip_ue_cached_a : 1;
- bdrkreg_t eip_pkt_len_err_b : 1;
- bdrkreg_t eip_pkt_len_err_a : 1;
- bdrkreg_t eip_irb_err_b : 1;
- bdrkreg_t eip_irb_err_a : 1;
- bdrkreg_t eip_irb_timeout_b : 1;
- bdrkreg_t eip_irb_timeout_a : 1;
- bdrkreg_t eip_rsvd : 29;
- } pi_err_int_pend_fld_s;
-} pi_err_int_pend_u_t;
-
-#else
-
-typedef union pi_err_int_pend_u {
- bdrkreg_t pi_err_int_pend_regval;
- struct {
- bdrkreg_t eip_rsvd : 29;
- bdrkreg_t eip_irb_timeout_a : 1;
- bdrkreg_t eip_irb_timeout_b : 1;
- bdrkreg_t eip_irb_err_a : 1;
- bdrkreg_t eip_irb_err_b : 1;
- bdrkreg_t eip_pkt_len_err_a : 1;
- bdrkreg_t eip_pkt_len_err_b : 1;
- bdrkreg_t eip_ue_cached_a : 1;
- bdrkreg_t eip_ue_cached_b : 1;
- bdrkreg_t eip_sysad_bad_data_a : 1;
- bdrkreg_t eip_sysad_bad_data_b : 1;
- bdrkreg_t eip_mem_unc : 1;
- bdrkreg_t eip_sysstate_tag_a : 1;
- bdrkreg_t eip_sysstate_tag_b : 1;
- bdrkreg_t eip_ue_uncached_a : 1;
- bdrkreg_t eip_ue_uncached_b : 1;
- bdrkreg_t eip_spool_err_a : 1;
- bdrkreg_t eip_spool_err_b : 1;
- bdrkreg_t eip_syscmd_addr_par_a : 1;
- bdrkreg_t eip_syscmd_addr_par_b : 1;
- bdrkreg_t eip_syscmd_data_par_a : 1;
- bdrkreg_t eip_syscmd_data_par_b : 1;
- bdrkreg_t eip_sysad_addr_ecc_a : 1;
- bdrkreg_t eip_sysad_addr_ecc_b : 1;
- bdrkreg_t eip_sysad_data_ecc_a : 1;
- bdrkreg_t eip_sysad_data_ecc_b : 1;
- bdrkreg_t eip_sysstate_par_a : 1;
- bdrkreg_t eip_sysstate_par_b : 1;
- bdrkreg_t eip_wrb_werr_a : 1;
- bdrkreg_t eip_wrb_werr_b : 1;
- bdrkreg_t eip_wrb_terr_a : 1;
- bdrkreg_t eip_wrb_terr_b : 1;
- bdrkreg_t eip_spurious_a : 1;
- bdrkreg_t eip_spurious_b : 1;
- bdrkreg_t eip_spool_comp_a : 1;
- bdrkreg_t eip_spool_comp_b : 1;
- } pi_err_int_pend_fld_s;
-} pi_err_int_pend_u_t;
-
-#endif
-
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. This read/write *
- * register masks the contents of ERR_INT_PEND to determine which *
- * conditions cause a Level-6 interrupt to CPU_A or CPU_B. A bit set *
- * allows the interrupt. Only one processor in a Bedrock should *
- * enable the Memory/Directory Uncorrectable Error bit. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_int_mask_a_u {
- bdrkreg_t pi_err_int_mask_a_regval;
- struct {
- bdrkreg_t eima_mask : 35;
- bdrkreg_t eima_rsvd : 29;
- } pi_err_int_mask_a_fld_s;
-} pi_err_int_mask_a_u_t;
-
-#else
-
-typedef union pi_err_int_mask_a_u {
- bdrkreg_t pi_err_int_mask_a_regval;
- struct {
- bdrkreg_t eima_rsvd : 29;
- bdrkreg_t eima_mask : 35;
- } pi_err_int_mask_a_fld_s;
-} pi_err_int_mask_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. This read/write *
- * register masks the contents of ERR_INT_PEND to determine which *
- * conditions cause a Level-6 interrupt to CPU_A or CPU_B. A bit set *
- * allows the interrupt. Only one processor in a Bedrock should *
- * enable the Memory/Directory Uncorrectable Error bit. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_int_mask_b_u {
- bdrkreg_t pi_err_int_mask_b_regval;
- struct {
- bdrkreg_t eimb_mask : 35;
- bdrkreg_t eimb_rsvd : 29;
- } pi_err_int_mask_b_fld_s;
-} pi_err_int_mask_b_u_t;
-
-#else
-
-typedef union pi_err_int_mask_b_u {
- bdrkreg_t pi_err_int_mask_b_regval;
- struct {
- bdrkreg_t eimb_rsvd : 29;
- bdrkreg_t eimb_mask : 35;
- } pi_err_int_mask_b_fld_s;
-} pi_err_int_mask_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There is one of these registers for each CPU. This *
- * register is the address of the next write to the error stack. This *
- * register is incremented after each such write. Only the low N bits *
- * are incremented, where N is defined by the size of the error stack *
- * specified in the ERR_STACK_SIZE register. *
- * This register is not reset by a soft reset. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_stack_addr_a_u {
- bdrkreg_t pi_err_stack_addr_a_regval;
- struct {
- bdrkreg_t esaa_rsvd_1 : 3;
- bdrkreg_t esaa_addr : 30;
- bdrkreg_t esaa_rsvd : 31;
- } pi_err_stack_addr_a_fld_s;
-} pi_err_stack_addr_a_u_t;
-
-#else
-
-typedef union pi_err_stack_addr_a_u {
- bdrkreg_t pi_err_stack_addr_a_regval;
- struct {
- bdrkreg_t esaa_rsvd : 31;
- bdrkreg_t esaa_addr : 30;
- bdrkreg_t esaa_rsvd_1 : 3;
- } pi_err_stack_addr_a_fld_s;
-} pi_err_stack_addr_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: There is one of these registers for each CPU. This *
- * register is the address of the next write to the error stack. This *
- * register is incremented after each such write. Only the low N bits *
- * are incremented, where N is defined by the size of the error stack *
- * specified in the ERR_STACK_SIZE register. *
- * This register is not reset by a soft reset. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_stack_addr_b_u {
- bdrkreg_t pi_err_stack_addr_b_regval;
- struct {
- bdrkreg_t esab_rsvd_1 : 3;
- bdrkreg_t esab_addr : 30;
- bdrkreg_t esab_rsvd : 31;
- } pi_err_stack_addr_b_fld_s;
-} pi_err_stack_addr_b_u_t;
-
-#else
-
-typedef union pi_err_stack_addr_b_u {
- bdrkreg_t pi_err_stack_addr_b_regval;
- struct {
- bdrkreg_t esab_rsvd : 31;
- bdrkreg_t esab_addr : 30;
- bdrkreg_t esab_rsvd_1 : 3;
- } pi_err_stack_addr_b_fld_s;
-} pi_err_stack_addr_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: Sets the size (number of 64-bit entries) in the *
- * error stack that is spooled to local memory when an error occurs. *
- * Table16 defines the format of each entry in the spooled error *
- * stack. *
- * This register is not reset by a soft reset. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_stack_size_u {
- bdrkreg_t pi_err_stack_size_regval;
- struct {
- bdrkreg_t ess_size : 4;
- bdrkreg_t ess_rsvd : 60;
- } pi_err_stack_size_fld_s;
-} pi_err_stack_size_u_t;
-
-#else
-
-typedef union pi_err_stack_size_u {
- bdrkreg_t pi_err_stack_size_regval;
- struct {
- bdrkreg_t ess_rsvd : 60;
- bdrkreg_t ess_size : 4;
- } pi_err_stack_size_fld_s;
-} pi_err_stack_size_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is not cleared at reset. Writing this register with *
- * the Write-clear address (with any data) clears both the *
- * ERR_STATUS0_A and ERR_STATUS1_A registers. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_status0_a_u {
- bdrkreg_t pi_err_status0_a_regval;
- struct {
- bdrkreg_t esa_error_type : 3;
- bdrkreg_t esa_proc_req_num : 3;
- bdrkreg_t esa_supplemental : 11;
- bdrkreg_t esa_cmd : 8;
- bdrkreg_t esa_addr : 37;
- bdrkreg_t esa_over_run : 1;
- bdrkreg_t esa_valid : 1;
- } pi_err_status0_a_fld_s;
-} pi_err_status0_a_u_t;
-
-#else
-
-typedef union pi_err_status0_a_u {
- bdrkreg_t pi_err_status0_a_regval;
- struct {
- bdrkreg_t esa_valid : 1;
- bdrkreg_t esa_over_run : 1;
- bdrkreg_t esa_addr : 37;
- bdrkreg_t esa_cmd : 8;
- bdrkreg_t esa_supplemental : 11;
- bdrkreg_t esa_proc_req_num : 3;
- bdrkreg_t esa_error_type : 3;
- } pi_err_status0_a_fld_s;
-} pi_err_status0_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is not cleared at reset. Writing this register with *
- * the Write-clear address (with any data) clears both the *
- * ERR_STATUS0_A and ERR_STATUS1_A registers. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_status0_a_clr_u {
- bdrkreg_t pi_err_status0_a_clr_regval;
- struct {
- bdrkreg_t esac_error_type : 3;
- bdrkreg_t esac_proc_req_num : 3;
- bdrkreg_t esac_supplemental : 11;
- bdrkreg_t esac_cmd : 8;
- bdrkreg_t esac_addr : 37;
- bdrkreg_t esac_over_run : 1;
- bdrkreg_t esac_valid : 1;
- } pi_err_status0_a_clr_fld_s;
-} pi_err_status0_a_clr_u_t;
-
-#else
-
-typedef union pi_err_status0_a_clr_u {
- bdrkreg_t pi_err_status0_a_clr_regval;
- struct {
- bdrkreg_t esac_valid : 1;
- bdrkreg_t esac_over_run : 1;
- bdrkreg_t esac_addr : 37;
- bdrkreg_t esac_cmd : 8;
- bdrkreg_t esac_supplemental : 11;
- bdrkreg_t esac_proc_req_num : 3;
- bdrkreg_t esac_error_type : 3;
- } pi_err_status0_a_clr_fld_s;
-} pi_err_status0_a_clr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is not cleared at reset. Writing this register with *
- * the Write-clear address (with any data) clears both the *
- * ERR_STATUS0_A and ERR_STATUS1_A registers. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_status1_a_u {
- bdrkreg_t pi_err_status1_a_regval;
- struct {
- bdrkreg_t esa_spool_count : 21;
- bdrkreg_t esa_time_out_count : 8;
- bdrkreg_t esa_inval_count : 10;
- bdrkreg_t esa_crb_num : 3;
- bdrkreg_t esa_wrb : 1;
- bdrkreg_t esa_e_bits : 2;
- bdrkreg_t esa_t_bit : 1;
- bdrkreg_t esa_i_bit : 1;
- bdrkreg_t esa_h_bit : 1;
- bdrkreg_t esa_w_bit : 1;
- bdrkreg_t esa_a_bit : 1;
- bdrkreg_t esa_r_bit : 1;
- bdrkreg_t esa_v_bit : 1;
- bdrkreg_t esa_p_bit : 1;
- bdrkreg_t esa_source : 11;
- } pi_err_status1_a_fld_s;
-} pi_err_status1_a_u_t;
-
-#else
-
-typedef union pi_err_status1_a_u {
- bdrkreg_t pi_err_status1_a_regval;
- struct {
- bdrkreg_t esa_source : 11;
- bdrkreg_t esa_p_bit : 1;
- bdrkreg_t esa_v_bit : 1;
- bdrkreg_t esa_r_bit : 1;
- bdrkreg_t esa_a_bit : 1;
- bdrkreg_t esa_w_bit : 1;
- bdrkreg_t esa_h_bit : 1;
- bdrkreg_t esa_i_bit : 1;
- bdrkreg_t esa_t_bit : 1;
- bdrkreg_t esa_e_bits : 2;
- bdrkreg_t esa_wrb : 1;
- bdrkreg_t esa_crb_num : 3;
- bdrkreg_t esa_inval_count : 10;
- bdrkreg_t esa_time_out_count : 8;
- bdrkreg_t esa_spool_count : 21;
- } pi_err_status1_a_fld_s;
-} pi_err_status1_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is not cleared at reset. Writing this register with *
- * the Write-clear address (with any data) clears both the *
- * ERR_STATUS0_A and ERR_STATUS1_A registers. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_status1_a_clr_u {
- bdrkreg_t pi_err_status1_a_clr_regval;
- struct {
- bdrkreg_t esac_spool_count : 21;
- bdrkreg_t esac_time_out_count : 8;
- bdrkreg_t esac_inval_count : 10;
- bdrkreg_t esac_crb_num : 3;
- bdrkreg_t esac_wrb : 1;
- bdrkreg_t esac_e_bits : 2;
- bdrkreg_t esac_t_bit : 1;
- bdrkreg_t esac_i_bit : 1;
- bdrkreg_t esac_h_bit : 1;
- bdrkreg_t esac_w_bit : 1;
- bdrkreg_t esac_a_bit : 1;
- bdrkreg_t esac_r_bit : 1;
- bdrkreg_t esac_v_bit : 1;
- bdrkreg_t esac_p_bit : 1;
- bdrkreg_t esac_source : 11;
- } pi_err_status1_a_clr_fld_s;
-} pi_err_status1_a_clr_u_t;
-
-#else
-
-typedef union pi_err_status1_a_clr_u {
- bdrkreg_t pi_err_status1_a_clr_regval;
- struct {
- bdrkreg_t esac_source : 11;
- bdrkreg_t esac_p_bit : 1;
- bdrkreg_t esac_v_bit : 1;
- bdrkreg_t esac_r_bit : 1;
- bdrkreg_t esac_a_bit : 1;
- bdrkreg_t esac_w_bit : 1;
- bdrkreg_t esac_h_bit : 1;
- bdrkreg_t esac_i_bit : 1;
- bdrkreg_t esac_t_bit : 1;
- bdrkreg_t esac_e_bits : 2;
- bdrkreg_t esac_wrb : 1;
- bdrkreg_t esac_crb_num : 3;
- bdrkreg_t esac_inval_count : 10;
- bdrkreg_t esac_time_out_count : 8;
- bdrkreg_t esac_spool_count : 21;
- } pi_err_status1_a_clr_fld_s;
-} pi_err_status1_a_clr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is not cleared at reset. Writing this register with *
- * the Write-clear address (with any data) clears both the *
- * ERR_STATUS0_B and ERR_STATUS1_B registers. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_status0_b_u {
- bdrkreg_t pi_err_status0_b_regval;
- struct {
- bdrkreg_t esb_error_type : 3;
- bdrkreg_t esb_proc_request_number : 3;
- bdrkreg_t esb_supplemental : 11;
- bdrkreg_t esb_cmd : 8;
- bdrkreg_t esb_addr : 37;
- bdrkreg_t esb_over_run : 1;
- bdrkreg_t esb_valid : 1;
- } pi_err_status0_b_fld_s;
-} pi_err_status0_b_u_t;
-
-#else
-
-typedef union pi_err_status0_b_u {
- bdrkreg_t pi_err_status0_b_regval;
- struct {
- bdrkreg_t esb_valid : 1;
- bdrkreg_t esb_over_run : 1;
- bdrkreg_t esb_addr : 37;
- bdrkreg_t esb_cmd : 8;
- bdrkreg_t esb_supplemental : 11;
- bdrkreg_t esb_proc_request_number : 3;
- bdrkreg_t esb_error_type : 3;
- } pi_err_status0_b_fld_s;
-} pi_err_status0_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is not cleared at reset. Writing this register with *
- * the Write-clear address (with any data) clears both the *
- * ERR_STATUS0_B and ERR_STATUS1_B registers. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_status0_b_clr_u {
- bdrkreg_t pi_err_status0_b_clr_regval;
- struct {
- bdrkreg_t esbc_error_type : 3;
- bdrkreg_t esbc_proc_request_number : 3;
- bdrkreg_t esbc_supplemental : 11;
- bdrkreg_t esbc_cmd : 8;
- bdrkreg_t esbc_addr : 37;
- bdrkreg_t esbc_over_run : 1;
- bdrkreg_t esbc_valid : 1;
- } pi_err_status0_b_clr_fld_s;
-} pi_err_status0_b_clr_u_t;
-
-#else
-
-typedef union pi_err_status0_b_clr_u {
- bdrkreg_t pi_err_status0_b_clr_regval;
- struct {
- bdrkreg_t esbc_valid : 1;
- bdrkreg_t esbc_over_run : 1;
- bdrkreg_t esbc_addr : 37;
- bdrkreg_t esbc_cmd : 8;
- bdrkreg_t esbc_supplemental : 11;
- bdrkreg_t esbc_proc_request_number : 3;
- bdrkreg_t esbc_error_type : 3;
- } pi_err_status0_b_clr_fld_s;
-} pi_err_status0_b_clr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is not cleared at reset. Writing this register with *
- * the Write-clear address (with any data) clears both the *
- * ERR_STATUS0_B and ERR_STATUS1_B registers. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_status1_b_u {
- bdrkreg_t pi_err_status1_b_regval;
- struct {
- bdrkreg_t esb_spool_count : 21;
- bdrkreg_t esb_time_out_count : 8;
- bdrkreg_t esb_inval_count : 10;
- bdrkreg_t esb_crb_num : 3;
- bdrkreg_t esb_wrb : 1;
- bdrkreg_t esb_e_bits : 2;
- bdrkreg_t esb_t_bit : 1;
- bdrkreg_t esb_i_bit : 1;
- bdrkreg_t esb_h_bit : 1;
- bdrkreg_t esb_w_bit : 1;
- bdrkreg_t esb_a_bit : 1;
- bdrkreg_t esb_r_bit : 1;
- bdrkreg_t esb_v_bit : 1;
- bdrkreg_t esb_p_bit : 1;
- bdrkreg_t esb_source : 11;
- } pi_err_status1_b_fld_s;
-} pi_err_status1_b_u_t;
-
-#else
-
-typedef union pi_err_status1_b_u {
- bdrkreg_t pi_err_status1_b_regval;
- struct {
- bdrkreg_t esb_source : 11;
- bdrkreg_t esb_p_bit : 1;
- bdrkreg_t esb_v_bit : 1;
- bdrkreg_t esb_r_bit : 1;
- bdrkreg_t esb_a_bit : 1;
- bdrkreg_t esb_w_bit : 1;
- bdrkreg_t esb_h_bit : 1;
- bdrkreg_t esb_i_bit : 1;
- bdrkreg_t esb_t_bit : 1;
- bdrkreg_t esb_e_bits : 2;
- bdrkreg_t esb_wrb : 1;
- bdrkreg_t esb_crb_num : 3;
- bdrkreg_t esb_inval_count : 10;
- bdrkreg_t esb_time_out_count : 8;
- bdrkreg_t esb_spool_count : 21;
- } pi_err_status1_b_fld_s;
-} pi_err_status1_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is not cleared at reset. Writing this register with *
- * the Write-clear address (with any data) clears both the *
- * ERR_STATUS0_B and ERR_STATUS1_B registers. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_status1_b_clr_u {
- bdrkreg_t pi_err_status1_b_clr_regval;
- struct {
- bdrkreg_t esbc_spool_count : 21;
- bdrkreg_t esbc_time_out_count : 8;
- bdrkreg_t esbc_inval_count : 10;
- bdrkreg_t esbc_crb_num : 3;
- bdrkreg_t esbc_wrb : 1;
- bdrkreg_t esbc_e_bits : 2;
- bdrkreg_t esbc_t_bit : 1;
- bdrkreg_t esbc_i_bit : 1;
- bdrkreg_t esbc_h_bit : 1;
- bdrkreg_t esbc_w_bit : 1;
- bdrkreg_t esbc_a_bit : 1;
- bdrkreg_t esbc_r_bit : 1;
- bdrkreg_t esbc_v_bit : 1;
- bdrkreg_t esbc_p_bit : 1;
- bdrkreg_t esbc_source : 11;
- } pi_err_status1_b_clr_fld_s;
-} pi_err_status1_b_clr_u_t;
-
-#else
-
-typedef union pi_err_status1_b_clr_u {
- bdrkreg_t pi_err_status1_b_clr_regval;
- struct {
- bdrkreg_t esbc_source : 11;
- bdrkreg_t esbc_p_bit : 1;
- bdrkreg_t esbc_v_bit : 1;
- bdrkreg_t esbc_r_bit : 1;
- bdrkreg_t esbc_a_bit : 1;
- bdrkreg_t esbc_w_bit : 1;
- bdrkreg_t esbc_h_bit : 1;
- bdrkreg_t esbc_i_bit : 1;
- bdrkreg_t esbc_t_bit : 1;
- bdrkreg_t esbc_e_bits : 2;
- bdrkreg_t esbc_wrb : 1;
- bdrkreg_t esbc_crb_num : 3;
- bdrkreg_t esbc_inval_count : 10;
- bdrkreg_t esbc_time_out_count : 8;
- bdrkreg_t esbc_spool_count : 21;
- } pi_err_status1_b_clr_fld_s;
-} pi_err_status1_b_clr_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_spool_cmp_a_u {
- bdrkreg_t pi_spool_cmp_a_regval;
- struct {
- bdrkreg_t sca_compare : 20;
- bdrkreg_t sca_rsvd : 44;
- } pi_spool_cmp_a_fld_s;
-} pi_spool_cmp_a_u_t;
-
-#else
-
-typedef union pi_spool_cmp_a_u {
- bdrkreg_t pi_spool_cmp_a_regval;
- struct {
- bdrkreg_t sca_rsvd : 44;
- bdrkreg_t sca_compare : 20;
- } pi_spool_cmp_a_fld_s;
-} pi_spool_cmp_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_spool_cmp_b_u {
- bdrkreg_t pi_spool_cmp_b_regval;
- struct {
- bdrkreg_t scb_compare : 20;
- bdrkreg_t scb_rsvd : 44;
- } pi_spool_cmp_b_fld_s;
-} pi_spool_cmp_b_u_t;
-
-#else
-
-typedef union pi_spool_cmp_b_u {
- bdrkreg_t pi_spool_cmp_b_regval;
- struct {
- bdrkreg_t scb_rsvd : 44;
- bdrkreg_t scb_compare : 20;
- } pi_spool_cmp_b_fld_s;
-} pi_spool_cmp_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. A timeout can be *
- * forced by writing one(s). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_crb_timeout_a_u {
- bdrkreg_t pi_crb_timeout_a_regval;
- struct {
- bdrkreg_t cta_rrb : 4;
- bdrkreg_t cta_wrb : 8;
- bdrkreg_t cta_rsvd : 52;
- } pi_crb_timeout_a_fld_s;
-} pi_crb_timeout_a_u_t;
-
-#else
-
-typedef union pi_crb_timeout_a_u {
- bdrkreg_t pi_crb_timeout_a_regval;
- struct {
- bdrkreg_t cta_rsvd : 52;
- bdrkreg_t cta_wrb : 8;
- bdrkreg_t cta_rrb : 4;
- } pi_crb_timeout_a_fld_s;
-} pi_crb_timeout_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. A timeout can be *
- * forced by writing one(s). *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_crb_timeout_b_u {
- bdrkreg_t pi_crb_timeout_b_regval;
- struct {
- bdrkreg_t ctb_rrb : 4;
- bdrkreg_t ctb_wrb : 8;
- bdrkreg_t ctb_rsvd : 52;
- } pi_crb_timeout_b_fld_s;
-} pi_crb_timeout_b_u_t;
-
-#else
-
-typedef union pi_crb_timeout_b_u {
- bdrkreg_t pi_crb_timeout_b_regval;
- struct {
- bdrkreg_t ctb_rsvd : 52;
- bdrkreg_t ctb_wrb : 8;
- bdrkreg_t ctb_rrb : 4;
- } pi_crb_timeout_b_fld_s;
-} pi_crb_timeout_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register controls error checking and forwarding of SysAD *
- * errors. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_sysad_errchk_en_u {
- bdrkreg_t pi_sysad_errchk_en_regval;
- struct {
- bdrkreg_t see_ecc_gen_en : 1;
- bdrkreg_t see_qual_gen_en : 1;
- bdrkreg_t see_sadp_chk_en : 1;
- bdrkreg_t see_cmdp_chk_en : 1;
- bdrkreg_t see_state_chk_en : 1;
- bdrkreg_t see_qual_chk_en : 1;
- bdrkreg_t see_rsvd : 58;
- } pi_sysad_errchk_en_fld_s;
-} pi_sysad_errchk_en_u_t;
-
-#else
-
-typedef union pi_sysad_errchk_en_u {
- bdrkreg_t pi_sysad_errchk_en_regval;
- struct {
- bdrkreg_t see_rsvd : 58;
- bdrkreg_t see_qual_chk_en : 1;
- bdrkreg_t see_state_chk_en : 1;
- bdrkreg_t see_cmdp_chk_en : 1;
- bdrkreg_t see_sadp_chk_en : 1;
- bdrkreg_t see_qual_gen_en : 1;
- bdrkreg_t see_ecc_gen_en : 1;
- } pi_sysad_errchk_en_fld_s;
-} pi_sysad_errchk_en_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. If any bit in this *
- * register is set, then whenever reply data arrives with the UE *
- * (uncorrectable error) indication set, the check-bits that are *
- * generated and sent to the SysAD will be inverted corresponding to *
- * the bits set in the register. This will also prevent the assertion *
- * of the data quality indicator. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_force_bad_check_bit_a_u {
- bdrkreg_t pi_force_bad_check_bit_a_regval;
- struct {
- bdrkreg_t fbcba_bad_check_bit : 8;
- bdrkreg_t fbcba_rsvd : 56;
- } pi_force_bad_check_bit_a_fld_s;
-} pi_force_bad_check_bit_a_u_t;
-
-#else
-
-typedef union pi_force_bad_check_bit_a_u {
- bdrkreg_t pi_force_bad_check_bit_a_regval;
- struct {
- bdrkreg_t fbcba_rsvd : 56;
- bdrkreg_t fbcba_bad_check_bit : 8;
- } pi_force_bad_check_bit_a_fld_s;
-} pi_force_bad_check_bit_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. If any bit in this *
- * register is set, then whenever reply data arrives with the UE *
- * (uncorrectable error) indication set, the check-bits that are *
- * generated and sent to the SysAD will be inverted corresponding to *
- * the bits set in the register. This will also prevent the assertion *
- * of the data quality indicator. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_force_bad_check_bit_b_u {
- bdrkreg_t pi_force_bad_check_bit_b_regval;
- struct {
- bdrkreg_t fbcbb_bad_check_bit : 8;
- bdrkreg_t fbcbb_rsvd : 56;
- } pi_force_bad_check_bit_b_fld_s;
-} pi_force_bad_check_bit_b_u_t;
-
-#else
-
-typedef union pi_force_bad_check_bit_b_u {
- bdrkreg_t pi_force_bad_check_bit_b_regval;
- struct {
- bdrkreg_t fbcbb_rsvd : 56;
- bdrkreg_t fbcbb_bad_check_bit : 8;
- } pi_force_bad_check_bit_b_fld_s;
-} pi_force_bad_check_bit_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. When a counter is *
- * enabled, it increments each time a DNACK reply is received. The *
- * counter is cleared when any other reply is received. The register *
- * is cleared when the CNT_EN bit is zero. If a DNACK reply is *
- * received when the counter equals the value in the NACK_CMP *
- * register, the counter is cleared, an error response is sent to the *
- * CPU instead of a nack response, and the NACK_INT_A/B bit is set in *
- * INT_PEND1. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_nack_cnt_a_u {
- bdrkreg_t pi_nack_cnt_a_regval;
- struct {
- bdrkreg_t nca_nack_cnt : 20;
- bdrkreg_t nca_cnt_en : 1;
- bdrkreg_t nca_rsvd : 43;
- } pi_nack_cnt_a_fld_s;
-} pi_nack_cnt_a_u_t;
-
-#else
-
-typedef union pi_nack_cnt_a_u {
- bdrkreg_t pi_nack_cnt_a_regval;
- struct {
- bdrkreg_t nca_rsvd : 43;
- bdrkreg_t nca_cnt_en : 1;
- bdrkreg_t nca_nack_cnt : 20;
- } pi_nack_cnt_a_fld_s;
-} pi_nack_cnt_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * There is one of these registers for each CPU. When a counter is *
- * enabled, it increments each time a DNACK reply is received. The *
- * counter is cleared when any other reply is received. The register *
- * is cleared when the CNT_EN bit is zero. If a DNACK reply is *
- * received when the counter equals the value in the NACK_CMP *
- * register, the counter is cleared, an error response is sent to the *
- * CPU instead of a nack response, and the NACK_INT_A/B bit is set in *
- * INT_PEND1. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_nack_cnt_b_u {
- bdrkreg_t pi_nack_cnt_b_regval;
- struct {
- bdrkreg_t ncb_nack_cnt : 20;
- bdrkreg_t ncb_cnt_en : 1;
- bdrkreg_t ncb_rsvd : 43;
- } pi_nack_cnt_b_fld_s;
-} pi_nack_cnt_b_u_t;
-
-#else
-
-typedef union pi_nack_cnt_b_u {
- bdrkreg_t pi_nack_cnt_b_regval;
- struct {
- bdrkreg_t ncb_rsvd : 43;
- bdrkreg_t ncb_cnt_en : 1;
- bdrkreg_t ncb_nack_cnt : 20;
- } pi_nack_cnt_b_fld_s;
-} pi_nack_cnt_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * The setting of this register affects both CPUs on this PI. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_nack_cmp_u {
- bdrkreg_t pi_nack_cmp_regval;
- struct {
- bdrkreg_t nc_nack_cmp : 20;
- bdrkreg_t nc_rsvd : 44;
- } pi_nack_cmp_fld_s;
-} pi_nack_cmp_u_t;
-
-#else
-
-typedef union pi_nack_cmp_u {
- bdrkreg_t pi_nack_cmp_regval;
- struct {
- bdrkreg_t nc_rsvd : 44;
- bdrkreg_t nc_nack_cmp : 20;
- } pi_nack_cmp_fld_s;
-} pi_nack_cmp_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register controls which errors are spooled. When a bit in *
- * this register is set, the corresponding error is spooled. The *
- * setting of this register affects both CPUs on this PI. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_spool_mask_u {
- bdrkreg_t pi_spool_mask_regval;
- struct {
- bdrkreg_t sm_access_err : 1;
- bdrkreg_t sm_uncached_err : 1;
- bdrkreg_t sm_dir_err : 1;
- bdrkreg_t sm_timeout_err : 1;
- bdrkreg_t sm_poison_err : 1;
- bdrkreg_t sm_nack_oflow_err : 1;
- bdrkreg_t sm_rsvd : 58;
- } pi_spool_mask_fld_s;
-} pi_spool_mask_u_t;
-
-#else
-
-typedef union pi_spool_mask_u {
- bdrkreg_t pi_spool_mask_regval;
- struct {
- bdrkreg_t sm_rsvd : 58;
- bdrkreg_t sm_nack_oflow_err : 1;
- bdrkreg_t sm_poison_err : 1;
- bdrkreg_t sm_timeout_err : 1;
- bdrkreg_t sm_dir_err : 1;
- bdrkreg_t sm_uncached_err : 1;
- bdrkreg_t sm_access_err : 1;
- } pi_spool_mask_fld_s;
-} pi_spool_mask_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is not cleared at reset. When the VALID bit is *
- * zero, this register (along with SPURIOUS_HDR_1) will capture the *
- * header of an incoming spurious message received from the XBar. A *
- * spurious message is a message that does not match up with any of *
- * the CRB entries. This is a read/write register, so it is cleared *
- * by writing of all zeros. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_spurious_hdr_0_u {
- bdrkreg_t pi_spurious_hdr_0_regval;
- struct {
- bdrkreg_t sh0_prev_valid_b : 1;
- bdrkreg_t sh0_prev_valid_a : 1;
- bdrkreg_t sh0_rsvd : 4;
- bdrkreg_t sh0_supplemental : 11;
- bdrkreg_t sh0_cmd : 8;
- bdrkreg_t sh0_addr : 37;
- bdrkreg_t sh0_tail : 1;
- bdrkreg_t sh0_valid : 1;
- } pi_spurious_hdr_0_fld_s;
-} pi_spurious_hdr_0_u_t;
-
-#else
-
-typedef union pi_spurious_hdr_0_u {
- bdrkreg_t pi_spurious_hdr_0_regval;
- struct {
- bdrkreg_t sh0_valid : 1;
- bdrkreg_t sh0_tail : 1;
- bdrkreg_t sh0_addr : 37;
- bdrkreg_t sh0_cmd : 8;
- bdrkreg_t sh0_supplemental : 11;
- bdrkreg_t sh0_rsvd : 4;
- bdrkreg_t sh0_prev_valid_a : 1;
- bdrkreg_t sh0_prev_valid_b : 1;
- } pi_spurious_hdr_0_fld_s;
-} pi_spurious_hdr_0_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is not cleared at reset. When the VALID bit in *
- * SPURIOUS_HDR_0 is zero, this register (along with SPURIOUS_HDR_0) *
- * will capture the header of an incoming spurious message received *
- * from the XBar. A spurious message is a message that does not match *
- * up with any of the CRB entries. This is a read/write register, so *
- * it is cleared by writing of all zeros. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_spurious_hdr_1_u {
- bdrkreg_t pi_spurious_hdr_1_regval;
- struct {
- bdrkreg_t sh1_rsvd : 53;
- bdrkreg_t sh1_source : 11;
- } pi_spurious_hdr_1_fld_s;
-} pi_spurious_hdr_1_u_t;
-
-#else
-
-typedef union pi_spurious_hdr_1_u {
- bdrkreg_t pi_spurious_hdr_1_regval;
- struct {
- bdrkreg_t sh1_source : 11;
- bdrkreg_t sh1_rsvd : 53;
- } pi_spurious_hdr_1_fld_s;
-} pi_spurious_hdr_1_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Description: This register controls the injection of errors in *
- * outbound SysAD transfers. When a write sets a bit in this *
- * register, the PI logic is "armed" to inject that error. At the *
- * first transfer of the specified type, the error is injected and *
- * the bit in this register is cleared. Writing to this register does *
- * not cause a transaction to occur. A bit in this register will *
- * remain set until a transaction of the specified type occurs as a *
- * result of normal system activity. This register can be polled to *
- * determine if an error has been injected or is still "armed". *
- * This register does not control injection of data quality bad *
- * indicator on a data cycle. This type of error can be created by *
- * reading from a memory location that has an uncorrectable ECC *
- * error. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_err_inject_u {
- bdrkreg_t pi_err_inject_regval;
- struct {
- bdrkreg_t ei_cmd_syscmd_par_a : 1;
- bdrkreg_t ei_data_syscmd_par_a : 1;
- bdrkreg_t ei_cmd_sysad_corecc_a : 1;
- bdrkreg_t ei_data_sysad_corecc_a : 1;
- bdrkreg_t ei_cmd_sysad_uncecc_a : 1;
- bdrkreg_t ei_data_sysad_uncecc_a : 1;
- bdrkreg_t ei_sysresp_par_a : 1;
- bdrkreg_t ei_reserved_1 : 25;
- bdrkreg_t ei_cmd_syscmd_par_b : 1;
- bdrkreg_t ei_data_syscmd_par_b : 1;
- bdrkreg_t ei_cmd_sysad_corecc_b : 1;
- bdrkreg_t ei_data_sysad_corecc_b : 1;
- bdrkreg_t ei_cmd_sysad_uncecc_b : 1;
- bdrkreg_t ei_data_sysad_uncecc_b : 1;
- bdrkreg_t ei_sysresp_par_b : 1;
- bdrkreg_t ei_reserved : 25;
- } pi_err_inject_fld_s;
-} pi_err_inject_u_t;
-
-#else
-
-typedef union pi_err_inject_u {
- bdrkreg_t pi_err_inject_regval;
- struct {
- bdrkreg_t ei_reserved : 25;
- bdrkreg_t ei_sysresp_par_b : 1;
- bdrkreg_t ei_data_sysad_uncecc_b : 1;
- bdrkreg_t ei_cmd_sysad_uncecc_b : 1;
- bdrkreg_t ei_data_sysad_corecc_b : 1;
- bdrkreg_t ei_cmd_sysad_corecc_b : 1;
- bdrkreg_t ei_data_syscmd_par_b : 1;
- bdrkreg_t ei_cmd_syscmd_par_b : 1;
- bdrkreg_t ei_reserved_1 : 25;
- bdrkreg_t ei_sysresp_par_a : 1;
- bdrkreg_t ei_data_sysad_uncecc_a : 1;
- bdrkreg_t ei_cmd_sysad_uncecc_a : 1;
- bdrkreg_t ei_data_sysad_corecc_a : 1;
- bdrkreg_t ei_cmd_sysad_corecc_a : 1;
- bdrkreg_t ei_data_syscmd_par_a : 1;
- bdrkreg_t ei_cmd_syscmd_par_a : 1;
- } pi_err_inject_fld_s;
-} pi_err_inject_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This Read/Write location determines at what point the TRex+ is *
- * stopped from issuing requests, based on the number of entries in *
- * the incoming reply FIFO. When the number of entries in the Reply *
- * FIFO is greater than the value of this register, the PI will *
- * deassert both SysWrRdy and SysRdRdy to both processors. The Reply *
- * FIFO has a depth of 0x3F entries, so setting this register to 0x3F *
- * effectively disables this feature, allowing requests to be issued *
- * always. Setting this register to 0x00 effectively lowers the *
- * TRex+'s priority below the reply FIFO, disabling TRex+ requests *
- * any time there is an entry waiting in the incoming FIFO.This *
- * register is in its own 64KB page so that it can be mapped to user *
- * space. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_reply_level_u {
- bdrkreg_t pi_reply_level_regval;
- struct {
- bdrkreg_t rl_reply_level : 6;
- bdrkreg_t rl_rsvd : 58;
- } pi_reply_level_fld_s;
-} pi_reply_level_u_t;
-
-#else
-
-typedef union pi_reply_level_u {
- bdrkreg_t pi_reply_level_regval;
- struct {
- bdrkreg_t rl_rsvd : 58;
- bdrkreg_t rl_reply_level : 6;
- } pi_reply_level_fld_s;
-} pi_reply_level_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register is used to change the graphics credit counter *
- * operation from "Doubleword" mode to "Transaction" mode. This *
- * register is in its own 64KB page so that it can be mapped to user *
- * space. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_gfx_credit_mode_u {
- bdrkreg_t pi_gfx_credit_mode_regval;
- struct {
- bdrkreg_t gcm_trans_mode : 1;
- bdrkreg_t gcm_rsvd : 63;
- } pi_gfx_credit_mode_fld_s;
-} pi_gfx_credit_mode_u_t;
-
-#else
-
-typedef union pi_gfx_credit_mode_u {
- bdrkreg_t pi_gfx_credit_mode_regval;
- struct {
- bdrkreg_t gcm_rsvd : 63;
- bdrkreg_t gcm_trans_mode : 1;
- } pi_gfx_credit_mode_fld_s;
-} pi_gfx_credit_mode_u_t;
-
-#endif
-
-
-
-/************************************************************************
- * *
- * This location contains a 55-bit read/write counter that wraps to *
- * zero when the maximum value is reached. This counter is *
- * incremented at each rising edge of the global clock (GCLK). This *
- * register is in its own 64KB page so that it can be mapped to user *
- * space. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_rt_counter_u {
- bdrkreg_t pi_rt_counter_regval;
- struct {
- bdrkreg_t rc_count : 55;
- bdrkreg_t rc_rsvd : 9;
- } pi_rt_counter_fld_s;
-} pi_rt_counter_u_t;
-
-#else
-
-typedef union pi_rt_counter_u {
- bdrkreg_t pi_rt_counter_regval;
- struct {
- bdrkreg_t rc_rsvd : 9;
- bdrkreg_t rc_count : 55;
- } pi_rt_counter_fld_s;
-} pi_rt_counter_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register controls the performance counters for one CPU. *
- * There are two counters for each CPU. Each counter can be *
- * configured to count a variety of events. The performance counter *
- * registers for each processor are in their own 64KB page so that *
- * they can be mapped to user space. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_perf_cntl_a_u {
- bdrkreg_t pi_perf_cntl_a_regval;
- struct {
- bdrkreg_t pca_cntr_0_select : 28;
- bdrkreg_t pca_cntr_0_mode : 3;
- bdrkreg_t pca_cntr_0_enable : 1;
- bdrkreg_t pca_cntr_1_select : 28;
- bdrkreg_t pca_cntr_1_mode : 3;
- bdrkreg_t pca_cntr_1_enable : 1;
- } pi_perf_cntl_a_fld_s;
-} pi_perf_cntl_a_u_t;
-
-#else
-
-typedef union pi_perf_cntl_a_u {
- bdrkreg_t pi_perf_cntl_a_regval;
- struct {
- bdrkreg_t pca_cntr_1_enable : 1;
- bdrkreg_t pca_cntr_1_mode : 3;
- bdrkreg_t pca_cntr_1_select : 28;
- bdrkreg_t pca_cntr_0_enable : 1;
- bdrkreg_t pca_cntr_0_mode : 3;
- bdrkreg_t pca_cntr_0_select : 28;
- } pi_perf_cntl_a_fld_s;
-} pi_perf_cntl_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register accesses the performance counter 0 for each CPU. *
- * Each performance counter is 40-bits wide. On overflow, It wraps to *
- * zero, sets the overflow bit in this register, and sets the *
- * PERF_CNTR_OFLOW bit in the INT_PEND1 register. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_perf_cntr0_a_u {
- bdrkreg_t pi_perf_cntr0_a_regval;
- struct {
- bdrkreg_t pca_count_value : 40;
- bdrkreg_t pca_overflow : 1;
- bdrkreg_t pca_rsvd : 23;
- } pi_perf_cntr0_a_fld_s;
-} pi_perf_cntr0_a_u_t;
-
-#else
-
-typedef union pi_perf_cntr0_a_u {
- bdrkreg_t pi_perf_cntr0_a_regval;
- struct {
- bdrkreg_t pca_rsvd : 23;
- bdrkreg_t pca_overflow : 1;
- bdrkreg_t pca_count_value : 40;
- } pi_perf_cntr0_a_fld_s;
-} pi_perf_cntr0_a_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register accesses the performance counter 1for each CPU. *
- * Each performance counter is 40-bits wide. On overflow, It wraps to *
- * zero, sets the overflow bit in this register, and sets the *
- * PERF_CNTR_OFLOW bit in the INT_PEND1 register. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_perf_cntr1_a_u {
- bdrkreg_t pi_perf_cntr1_a_regval;
- struct {
- bdrkreg_t pca_count_value : 40;
- bdrkreg_t pca_overflow : 1;
- bdrkreg_t pca_rsvd : 23;
- } pi_perf_cntr1_a_fld_s;
-} pi_perf_cntr1_a_u_t;
-
-#else
-
-typedef union pi_perf_cntr1_a_u {
- bdrkreg_t pi_perf_cntr1_a_regval;
- struct {
- bdrkreg_t pca_rsvd : 23;
- bdrkreg_t pca_overflow : 1;
- bdrkreg_t pca_count_value : 40;
- } pi_perf_cntr1_a_fld_s;
-} pi_perf_cntr1_a_u_t;
-
-#endif
-
-
-
-
-
-/************************************************************************
- * *
- * This register controls the performance counters for one CPU. *
- * There are two counters for each CPU. Each counter can be *
- * configured to count a variety of events. The performance counter *
- * registers for each processor are in their own 64KB page so that *
- * they can be mapped to user space. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_perf_cntl_b_u {
- bdrkreg_t pi_perf_cntl_b_regval;
- struct {
- bdrkreg_t pcb_cntr_0_select : 28;
- bdrkreg_t pcb_cntr_0_mode : 3;
- bdrkreg_t pcb_cntr_0_enable : 1;
- bdrkreg_t pcb_cntr_1_select : 28;
- bdrkreg_t pcb_cntr_1_mode : 3;
- bdrkreg_t pcb_cntr_1_enable : 1;
- } pi_perf_cntl_b_fld_s;
-} pi_perf_cntl_b_u_t;
-
-#else
-
-typedef union pi_perf_cntl_b_u {
- bdrkreg_t pi_perf_cntl_b_regval;
- struct {
- bdrkreg_t pcb_cntr_1_enable : 1;
- bdrkreg_t pcb_cntr_1_mode : 3;
- bdrkreg_t pcb_cntr_1_select : 28;
- bdrkreg_t pcb_cntr_0_enable : 1;
- bdrkreg_t pcb_cntr_0_mode : 3;
- bdrkreg_t pcb_cntr_0_select : 28;
- } pi_perf_cntl_b_fld_s;
-} pi_perf_cntl_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register accesses the performance counter 0 for each CPU. *
- * Each performance counter is 40-bits wide. On overflow, It wraps to *
- * zero, sets the overflow bit in this register, and sets the *
- * PERF_CNTR_OFLOW bit in the INT_PEND1 register. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_perf_cntr0_b_u {
- bdrkreg_t pi_perf_cntr0_b_regval;
- struct {
- bdrkreg_t pcb_count_value : 40;
- bdrkreg_t pcb_overflow : 1;
- bdrkreg_t pcb_rsvd : 23;
- } pi_perf_cntr0_b_fld_s;
-} pi_perf_cntr0_b_u_t;
-
-#else
-
-typedef union pi_perf_cntr0_b_u {
- bdrkreg_t pi_perf_cntr0_b_regval;
- struct {
- bdrkreg_t pcb_rsvd : 23;
- bdrkreg_t pcb_overflow : 1;
- bdrkreg_t pcb_count_value : 40;
- } pi_perf_cntr0_b_fld_s;
-} pi_perf_cntr0_b_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * This register accesses the performance counter 1for each CPU. *
- * Each performance counter is 40-bits wide. On overflow, It wraps to *
- * zero, sets the overflow bit in this register, and sets the *
- * PERF_CNTR_OFLOW bit in the INT_PEND1 register. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union pi_perf_cntr1_b_u {
- bdrkreg_t pi_perf_cntr1_b_regval;
- struct {
- bdrkreg_t pcb_count_value : 40;
- bdrkreg_t pcb_overflow : 1;
- bdrkreg_t pcb_rsvd : 23;
- } pi_perf_cntr1_b_fld_s;
-} pi_perf_cntr1_b_u_t;
-
-#else
-
-typedef union pi_perf_cntr1_b_u {
- bdrkreg_t pi_perf_cntr1_b_regval;
- struct {
- bdrkreg_t pcb_rsvd : 23;
- bdrkreg_t pcb_overflow : 1;
- bdrkreg_t pcb_count_value : 40;
- } pi_perf_cntr1_b_fld_s;
-} pi_perf_cntr1_b_u_t;
-
-#endif
-
-
-
-
-
-
-#endif /* __ASSEMBLY__ */
-
-/************************************************************************
- * *
- * MAKE ALL ADDITIONS AFTER THIS LINE *
- * *
- ************************************************************************/
-
-
-#define PI_GFX_OFFSET (PI_GFX_PAGE_B - PI_GFX_PAGE_A)
-#define PI_GFX_PAGE_ENABLE 0x0000010000000000LL
-
-
-#endif /* _ASM_IA64_SN_SN1_HUBPI_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_HUBPI_NEXT_H
-#define _ASM_IA64_SN_SN1_HUBPI_NEXT_H
-
-
-/* define for remote PI_1 space. It is always half of a node_addressspace
- * from PI_0. The normal REMOTE_HUB space for PI registers access
- * the PI_0 space, unless they are qualified by PI_1.
- */
-#define PI_0(x) (x)
-#define PI_1(x) ((x) + 0x200000)
-#define PIREG(x,sn) ((sn) ? PI_1(x) : PI_0(x))
-
-#define PI_MIN_STACK_SIZE 4096 /* For figuring out the size to set */
-#define PI_STACK_SIZE_SHFT 12 /* 4k */
-
-#define PI_STACKADDR_OFFSET (PI_ERR_STACK_ADDR_B - PI_ERR_STACK_ADDR_A)
-#define PI_ERRSTAT_OFFSET (PI_ERR_STATUS0_B - PI_ERR_STATUS0_A)
-#define PI_RDCLR_OFFSET (PI_ERR_STATUS0_A_RCLR - PI_ERR_STATUS0_A)
-/* these macros are correct, but fix their users to understand two PIs
- and 4 CPUs (slices) per bedrock */
-#define PI_INT_MASK_OFFSET (PI_INT_MASK0_B - PI_INT_MASK0_A)
-#define PI_INT_SET_OFFSET (PI_CC_PEND_CLR_B - PI_CC_PEND_CLR_A)
-#define PI_NMI_OFFSET (PI_NMI_B - PI_NMI_A)
-
-#define ERR_STACK_SIZE_BYTES(_sz) \
- ((_sz) ? (PI_MIN_STACK_SIZE << ((_sz) - 1)) : 0)
-
-#define PI_CRB_STS_P (1 << 9) /* "P" (partial word read/write) bit */
-#define PI_CRB_STS_V (1 << 8) /* "V" (valid) bit */
-#define PI_CRB_STS_R (1 << 7) /* "R" (response data sent to CPU) */
-#define PI_CRB_STS_A (1 << 6) /* "A" (data ack. received) bit */
-#define PI_CRB_STS_W (1 << 5) /* "W" (waiting for write compl.) */
-#define PI_CRB_STS_H (1 << 4) /* "H" (gathering invalidates) bit */
-#define PI_CRB_STS_I (1 << 3) /* "I" (targ. inbound invalidate) */
-#define PI_CRB_STS_T (1 << 2) /* "T" (targ. inbound intervention) */
-#define PI_CRB_STS_E (0x3) /* "E" (coherent read type) */
-
-/* When the "P" bit is set in the sk_crb_sts field of an error stack
- * entry, the "R," "A," "H," and "I" bits are actually bits 6..3 of
- * the address. This macro extracts those address bits and shifts
- * them to their proper positions, ready to be ORed in to the rest of
- * the address (which is calculated as sk_addr << 7).
- */
-#define PI_CRB_STS_ADDR_BITS(sts) \
- ((sts) & (PI_CRB_STS_I | PI_CRB_STS_H) | \
- ((sts) & (PI_CRB_STS_A | PI_CRB_STS_R)) >> 1)
-
-#ifndef __ASSEMBLY__
-/*
- * format of error stack and error status registers.
- */
-
-#ifdef LITTLE_ENDIAN
-
-struct err_stack_format {
- uint64_t sk_err_type: 3, /* error type */
- sk_suppl : 3, /* lowest 3 bit of supplemental */
- sk_t5_req : 3, /* RRB T5 request number */
- sk_crb_num : 3, /* WRB (0 to 7) or RRB (0 to 4) */
- sk_rw_rb : 1, /* RRB == 0, WRB == 1 */
- sk_crb_sts : 10, /* status from RRB or WRB */
- sk_cmd : 8, /* message command */
- sk_addr : 33; /* address */
-};
-
-#else
-
-struct err_stack_format {
- uint64_t sk_addr : 33, /* address */
- sk_cmd : 8, /* message command */
- sk_crb_sts : 10, /* status from RRB or WRB */
- sk_rw_rb : 1, /* RRB == 0, WRB == 1 */
- sk_crb_num : 3, /* WRB (0 to 7) or RRB (0 to 4) */
- sk_t5_req : 3, /* RRB T5 request number */
- sk_suppl : 3, /* lowest 3 bit of supplemental */
- sk_err_type: 3; /* error type */
-};
-
-#endif
-
-typedef union pi_err_stack {
- uint64_t pi_stk_word;
- struct err_stack_format pi_stk_fmt;
-} pi_err_stack_t;
-
-/* Simplified version of pi_err_status0_a_u_t (PI_ERR_STATUS0_A) */
-#ifdef LITTLE_ENDIAN
-
-struct err_status0_format {
- uint64_t s0_err_type : 3, /* Encoded error cause */
- s0_proc_req_num : 3, /* Request number for RRB only */
- s0_supplemental : 11, /* ncoming message sup field */
- s0_cmd : 8, /* Incoming message command */
- s0_addr : 37, /* Address */
- s0_over_run : 1, /* Subsequent errors spooled */
- s0_valid : 1; /* error is valid */
-};
-
-#else
-
-struct err_status0_format {
- uint64_t s0_valid : 1, /* error is valid */
- s0_over_run : 1, /* Subsequent errors spooled */
- s0_addr : 37, /* Address */
- s0_cmd : 8, /* Incoming message command */
- s0_supplemental : 11, /* ncoming message sup field */
- s0_proc_req_num : 3, /* Request number for RRB only */
- s0_err_type : 3; /* Encoded error cause */
-};
-
-#endif
-
-
-typedef union pi_err_stat0 {
- uint64_t pi_stat0_word;
- struct err_status0_format pi_stat0_fmt;
-} pi_err_stat0_t;
-
-/* Simplified version of pi_err_status1_a_u_t (PI_ERR_STATUS1_A) */
-
-#ifdef LITTLE_ENDIAN
-
-struct err_status1_format {
- uint64_t s1_spl_cnt : 21, /* number spooled to memory */
- s1_to_cnt : 8, /* crb timeout counter */
- s1_inval_cnt:10, /* signed invalidate counter RRB */
- s1_crb_num : 3, /* WRB (0 to 7) or RRB (0 to 4) */
- s1_rw_rb : 1, /* RRB == 0, WRB == 1 */
- s1_crb_sts : 10, /* status from RRB or WRB */
- s1_src : 11; /* message source */
-};
-
-#else
-
-struct err_status1_format {
- uint64_t s1_src : 11, /* message source */
- s1_crb_sts : 10, /* status from RRB or WRB */
- s1_rw_rb : 1, /* RRB == 0, WRB == 1 */
- s1_crb_num : 3, /* WRB (0 to 7) or RRB (0 to 4) */
- s1_inval_cnt:10, /* signed invalidate counter RRB */
- s1_to_cnt : 8, /* crb timeout counter */
- s1_spl_cnt : 21; /* number spooled to memory */
-};
-
-#endif
-
-typedef union pi_err_stat1 {
- uint64_t pi_stat1_word;
- struct err_status1_format pi_stat1_fmt;
-} pi_err_stat1_t;
-#endif
-
-/* Error stack types (sk_err_type) for reads: */
-#define PI_ERR_RD_AERR 0 /* Read Access Error */
-#define PI_ERR_RD_PRERR 1 /* Uncached Partitial Read */
-#define PI_ERR_RD_DERR 2 /* Directory Error */
-#define PI_ERR_RD_TERR 3 /* read timeout */
-#define PI_ERR_RD_PERR 4 /* Poison Access Violation */
-#define PI_ERR_RD_NACK 5 /* Excessive NACKs */
-#define PI_ERR_RD_RDE 6 /* Response Data Error */
-#define PI_ERR_RD_PLERR 7 /* Packet Length Error */
-/* Error stack types (sk_err_type) for writes: */
-#define PI_ERR_WR_WERR 0 /* Write Access Error */
-#define PI_ERR_WR_PWERR 1 /* Uncached Write Error */
-#define PI_ERR_WR_TERR 3 /* write timeout */
-#define PI_ERR_WR_RDE 6 /* Response Data Error */
-#define PI_ERR_WR_PLERR 7 /* Packet Length Error */
-
-
-/* For backwards compatibility */
-#define PI_RT_COUNT PI_RT_COUNTER /* Real Time Counter */
-#define PI_RT_EN_A PI_RT_INT_EN_A /* RT int for CPU A enable */
-#define PI_RT_EN_B PI_RT_INT_EN_B /* RT int for CPU B enable */
-#define PI_PROF_EN_A PI_PROF_INT_EN_A /* PROF int for CPU A enable */
-#define PI_PROF_EN_B PI_PROF_INT_EN_B /* PROF int for CPU B enable */
-#define PI_RT_PEND_A PI_RT_INT_PEND_A /* RT interrupt pending */
-#define PI_RT_PEND_B PI_RT_INT_PEND_B /* RT interrupt pending */
-#define PI_PROF_PEND_A PI_PROF_INT_PEND_A /* Profiling interrupt pending */
-#define PI_PROF_PEND_B PI_PROF_INT_PEND_B /* Profiling interrupt pending */
-
-
-/* Bits in PI_SYSAD_ERRCHK_EN */
-#define PI_SYSAD_ERRCHK_ECCGEN 0x01 /* Enable ECC generation */
-#define PI_SYSAD_ERRCHK_QUALGEN 0x02 /* Enable data quality signal gen. */
-#define PI_SYSAD_ERRCHK_SADP 0x04 /* Enable SysAD parity checking */
-#define PI_SYSAD_ERRCHK_CMDP 0x08 /* Enable SysCmd parity checking */
-#define PI_SYSAD_ERRCHK_STATE 0x10 /* Enable SysState parity checking */
-#define PI_SYSAD_ERRCHK_QUAL 0x20 /* Enable data quality checking */
-#define PI_SYSAD_CHECK_ALL 0x3f /* Generate and check all signals. */
-
-/* CALIAS values */
-#define PI_CALIAS_SIZE_0 0
-#define PI_CALIAS_SIZE_4K 1
-#define PI_CALIAS_SIZE_8K 2
-#define PI_CALIAS_SIZE_16K 3
-#define PI_CALIAS_SIZE_32K 4
-#define PI_CALIAS_SIZE_64K 5
-#define PI_CALIAS_SIZE_128K 6
-#define PI_CALIAS_SIZE_256K 7
-#define PI_CALIAS_SIZE_512K 8
-#define PI_CALIAS_SIZE_1M 9
-#define PI_CALIAS_SIZE_2M 10
-#define PI_CALIAS_SIZE_4M 11
-#define PI_CALIAS_SIZE_8M 12
-#define PI_CALIAS_SIZE_16M 13
-#define PI_CALIAS_SIZE_32M 14
-#define PI_CALIAS_SIZE_64M 15
-
-/* Fields in PI_ERR_STATUS0_[AB] */
-#define PI_ERR_ST0_VALID_MASK 0x8000000000000000
-#define PI_ERR_ST0_VALID_SHFT 63
-
-/* Fields in PI_SPURIOUS_HDR_0 */
-#define PI_SPURIOUS_HDR_VALID_MASK 0x8000000000000000
-#define PI_SPURIOUS_HDR_VALID_SHFT 63
-
-/* Fields in PI_NACK_CNT_A/B */
-#define PI_NACK_CNT_EN_SHFT 20
-#define PI_NACK_CNT_EN_MASK 0x100000
-#define PI_NACK_CNT_MASK 0x0fffff
-#define PI_NACK_CNT_MAX 0x0fffff
-
-/* Bits in PI_ERR_INT_PEND */
-#define PI_ERR_SPOOL_CMP_B 0x000000001 /* Spool end hit high water */
-#define PI_ERR_SPOOL_CMP_A 0x000000002
-#define PI_ERR_SPUR_MSG_B 0x000000004 /* Spurious message intr. */
-#define PI_ERR_SPUR_MSG_A 0x000000008
-#define PI_ERR_WRB_TERR_B 0x000000010 /* WRB TERR */
-#define PI_ERR_WRB_TERR_A 0x000000020
-#define PI_ERR_WRB_WERR_B 0x000000040 /* WRB WERR */
-#define PI_ERR_WRB_WERR_A 0x000000080
-#define PI_ERR_SYSSTATE_B 0x000000100 /* SysState parity error */
-#define PI_ERR_SYSSTATE_A 0x000000200
-#define PI_ERR_SYSAD_DATA_B 0x000000400 /* SysAD data parity error */
-#define PI_ERR_SYSAD_DATA_A 0x000000800
-#define PI_ERR_SYSAD_ADDR_B 0x000001000 /* SysAD addr parity error */
-#define PI_ERR_SYSAD_ADDR_A 0x000002000
-#define PI_ERR_SYSCMD_DATA_B 0x000004000 /* SysCmd data parity error */
-#define PI_ERR_SYSCMD_DATA_A 0x000008000
-#define PI_ERR_SYSCMD_ADDR_B 0x000010000 /* SysCmd addr parity error */
-#define PI_ERR_SYSCMD_ADDR_A 0x000020000
-#define PI_ERR_BAD_SPOOL_B 0x000040000 /* Error spooling to memory */
-#define PI_ERR_BAD_SPOOL_A 0x000080000
-#define PI_ERR_UNCAC_UNCORR_B 0x000100000 /* Uncached uncorrectable */
-#define PI_ERR_UNCAC_UNCORR_A 0x000200000
-#define PI_ERR_SYSSTATE_TAG_B 0x000400000 /* SysState tag parity error */
-#define PI_ERR_SYSSTATE_TAG_A 0x000800000
-#define PI_ERR_MD_UNCORR 0x001000000 /* Must be cleared in MD */
-#define PI_ERR_SYSAD_BAD_DATA_B 0x002000000 /* SysAD Data quality bad */
-#define PI_ERR_SYSAD_BAD_DATA_A 0x004000000
-#define PI_ERR_UE_CACHED_B 0x008000000 /* UE during cached load */
-#define PI_ERR_UE_CACHED_A 0x010000000
-#define PI_ERR_PKT_LEN_ERR_B 0x020000000 /* Xbar data too long/short */
-#define PI_ERR_PKT_LEN_ERR_A 0x040000000
-#define PI_ERR_IRB_ERR_B 0x080000000 /* Protocol error */
-#define PI_ERR_IRB_ERR_A 0x100000000
-#define PI_ERR_IRB_TIMEOUT_B 0x200000000 /* IRB_B got a timeout */
-#define PI_ERR_IRB_TIMEOUT_A 0x400000000
-
-#define PI_ERR_CLEAR_ALL_A 0x554aaaaaa
-#define PI_ERR_CLEAR_ALL_B 0x2aa555555
-
-
-/*
- * The following three macros define all possible error int pends.
- */
-
-#define PI_FATAL_ERR_CPU_A (PI_ERR_IRB_TIMEOUT_A | \
- PI_ERR_IRB_ERR_A | \
- PI_ERR_PKT_LEN_ERR_A | \
- PI_ERR_SYSSTATE_TAG_A | \
- PI_ERR_BAD_SPOOL_A | \
- PI_ERR_SYSCMD_ADDR_A | \
- PI_ERR_SYSCMD_DATA_A | \
- PI_ERR_SYSAD_ADDR_A | \
- PI_ERR_SYSAD_DATA_A | \
- PI_ERR_SYSSTATE_A)
-
-#define PI_MISC_ERR_CPU_A (PI_ERR_UE_CACHED_A | \
- PI_ERR_SYSAD_BAD_DATA_A| \
- PI_ERR_UNCAC_UNCORR_A | \
- PI_ERR_WRB_WERR_A | \
- PI_ERR_WRB_TERR_A | \
- PI_ERR_SPUR_MSG_A | \
- PI_ERR_SPOOL_CMP_A)
-
-#define PI_FATAL_ERR_CPU_B (PI_ERR_IRB_TIMEOUT_B | \
- PI_ERR_IRB_ERR_B | \
- PI_ERR_PKT_LEN_ERR_B | \
- PI_ERR_SYSSTATE_TAG_B | \
- PI_ERR_BAD_SPOOL_B | \
- PI_ERR_SYSCMD_ADDR_B | \
- PI_ERR_SYSCMD_DATA_B | \
- PI_ERR_SYSAD_ADDR_B | \
- PI_ERR_SYSAD_DATA_B | \
- PI_ERR_SYSSTATE_B)
-
-#define PI_MISC_ERR_CPU_B (PI_ERR_UE_CACHED_B | \
- PI_ERR_SYSAD_BAD_DATA_B| \
- PI_ERR_UNCAC_UNCORR_B | \
- PI_ERR_WRB_WERR_B | \
- PI_ERR_WRB_TERR_B | \
- PI_ERR_SPUR_MSG_B | \
- PI_ERR_SPOOL_CMP_B)
-
-#define PI_ERR_GENERIC (PI_ERR_MD_UNCORR)
-
-/* Values for PI_MAX_CRB_TIMEOUT and PI_CRB_SFACTOR */
-#define PMCT_MAX 0xff
-#define PCS_MAX 0xffffff
-
-/* pi_err_status0_a_u_t address shift */
-#define ERR_STAT0_ADDR_SHFT 3
-
-/* PI error read/write bit (RRB == 0, WRB == 1) */
-/* pi_err_status1_a_u_t.pi_err_status1_a_fld_s.esa_wrb */
-#define PI_ERR_RRB 0
-#define PI_ERR_WRB 1
-
-/* Error stack address shift, for use with pi_stk_fmt.sk_addr */
-#define ERR_STK_ADDR_SHFT 3
-
-#endif /* _ASM_IA64_SN_SN1_HUBPI_NEXT_H */
+++ /dev/null
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_HUBSPC_H
-#define _ASM_IA64_SN_SN1_HUBSPC_H
-
-typedef enum {
- HUBSPC_REFCOUNTERS,
- HUBSPC_PROM
-} hubspc_subdevice_t;
-
-
-/*
- * Reference Counters
- */
-
-extern int refcounters_attach(devfs_handle_t hub);
-
-#endif /* _ASM_IA64_SN_SN1_HUBSPC_H */
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000 - 2001 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_IA64_SN_SN1_HUBSTAT_H
-#define _ASM_IA64_SN_SN1_HUBSTAT_H
-
-typedef int64_t hub_count_t;
-
-#define HUBSTAT_VERSION 1
-
-typedef struct hubstat_s {
- char hs_version; /* structure version */
- cnodeid_t hs_cnode; /* cnode of this hub */
- nasid_t hs_nasid; /* Nasid of same */
- int64_t hs_timebase; /* Time of first sample */
- int64_t hs_timestamp; /* Time of last sample */
- int64_t hs_per_minute; /* Ticks per minute */
-
- union {
- hubreg_t hs_niu_stat_rev_id; /* SN0: Status rev ID */
- hubreg_t hs_niu_port_status; /* SN1: Port status */
- } hs_niu;
-
- hub_count_t hs_ni_retry_errors; /* Total retry errors */
- hub_count_t hs_ni_sn_errors; /* Total sn errors */
- hub_count_t hs_ni_cb_errors; /* Total cb errors */
- int hs_ni_overflows; /* NI count overflows */
- hub_count_t hs_ii_sn_errors; /* Total sn errors */
- hub_count_t hs_ii_cb_errors; /* Total cb errors */
- int hs_ii_overflows; /* II count overflows */
-
- /*
- * Anything below this comment is intended for kernel internal-use
- * only and may be changed at any time.
- *
- * Any members that contain pointers or are conditionally compiled
- * need to be below here also.
- */
- int64_t hs_last_print; /* When we last printed */
- char hs_print; /* Should we print */
-
- char *hs_name; /* This hub's name */
- unsigned char hs_maint; /* Should we print to availmon */
-} hubstat_t;
-
-#define hs_ni_stat_rev_id hs_niu.hs_niu_stat_rev_id
-#define hs_ni_port_status hs_niu.hs_niu_port_status
-
-extern struct file_operations hub_mon_fops;
-
-#endif /* _ASM_IA64_SN_SN1_HUBSTAT_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_HUBXB_H
-#define _ASM_IA64_SN_SN1_HUBXB_H
-
-/************************************************************************
- * *
- * WARNING!!! WARNING!!! WARNING!!! WARNING!!! WARNING!!! *
- * *
- * This file is created by an automated script. Any (minimal) changes *
- * made manually to this file should be made with care. *
- * *
- * MAKE ALL ADDITIONS TO THE END OF THIS FILE *
- * *
- ************************************************************************/
-
-
-#define XB_PARMS 0x00700000 /*
- * Controls
- * crossbar-wide
- * parameters.
- */
-
-
-
-#define XB_SLOW_GNT 0x00700008 /*
- * Controls wavefront
- * arbiter grant
- * frequency, used to
- * slow XB grants
- */
-
-
-
-#define XB_SPEW_CONTROL 0x00700010 /*
- * Controls spew
- * settings (debug
- * only).
- */
-
-
-
-#define XB_IOQ_ARB_TRIGGER 0x00700018 /*
- * Controls IOQ
- * trigger level
- */
-
-
-
-#define XB_FIRST_ERROR 0x00700090 /*
- * Records the first
- * crossbar error
- * seen.
- */
-
-
-
-#define XB_POQ0_ERROR 0x00700020 /*
- * POQ0 error
- * register.
- */
-
-
-
-#define XB_PIQ0_ERROR 0x00700028 /*
- * PIQ0 error
- * register.
- */
-
-
-
-#define XB_POQ1_ERROR 0x00700030 /*
- * POQ1 error
- * register.
- */
-
-
-
-#define XB_PIQ1_ERROR 0x00700038 /*
- * PIQ1 error
- * register.
- */
-
-
-
-#define XB_MP0_ERROR 0x00700040 /*
- * MOQ for PI0 error
- * register.
- */
-
-
-
-#define XB_MP1_ERROR 0x00700048 /*
- * MOQ for PI1 error
- * register.
- */
-
-
-
-#define XB_MMQ_ERROR 0x00700050 /*
- * MOQ for misc. (LB,
- * NI, II) error
- * register.
- */
-
-
-
-#define XB_MIQ_ERROR 0x00700058 /*
- * MIQ error register,
- * addtional MIQ
- * errors are logged
- * in MD "Input
- * Error
- * Registers".
- */
-
-
-
-#define XB_NOQ_ERROR 0x00700060 /* NOQ error register. */
-
-
-
-#define XB_NIQ_ERROR 0x00700068 /* NIQ error register. */
-
-
-
-#define XB_IOQ_ERROR 0x00700070 /* IOQ error register. */
-
-
-
-#define XB_IIQ_ERROR 0x00700078 /* IIQ error register. */
-
-
-
-#define XB_LOQ_ERROR 0x00700080 /* LOQ error register. */
-
-
-
-#define XB_LIQ_ERROR 0x00700088 /* LIQ error register. */
-
-
-
-#define XB_DEBUG_DATA_CTL 0x00700098 /*
- * Debug Datapath
- * Select
- */
-
-
-
-#define XB_DEBUG_ARB_CTL 0x007000A0 /*
- * XB master debug
- * control
- */
-
-
-
-#define XB_POQ0_ERROR_CLEAR 0x00700120 /*
- * Clears
- * XB_POQ0_ERROR
- * register.
- */
-
-
-
-#define XB_PIQ0_ERROR_CLEAR 0x00700128 /*
- * Clears
- * XB_PIQ0_ERROR
- * register.
- */
-
-
-
-#define XB_POQ1_ERROR_CLEAR 0x00700130 /*
- * Clears
- * XB_POQ1_ERROR
- * register.
- */
-
-
-
-#define XB_PIQ1_ERROR_CLEAR 0x00700138 /*
- * Clears
- * XB_PIQ1_ERROR
- * register.
- */
-
-
-
-#define XB_MP0_ERROR_CLEAR 0x00700140 /*
- * Clears XB_MP0_ERROR
- * register.
- */
-
-
-
-#define XB_MP1_ERROR_CLEAR 0x00700148 /*
- * Clears XB_MP1_ERROR
- * register.
- */
-
-
-
-#define XB_MMQ_ERROR_CLEAR 0x00700150 /*
- * Clears XB_MMQ_ERROR
- * register.
- */
-
-
-
-#define XB_XM_MIQ_ERROR_CLEAR 0x00700158 /*
- * Clears XB_MIQ_ERROR
- * register
- */
-
-
-
-#define XB_NOQ_ERROR_CLEAR 0x00700160 /*
- * Clears XB_NOQ_ERROR
- * register.
- */
-
-
-
-#define XB_NIQ_ERROR_CLEAR 0x00700168 /*
- * Clears XB_NIQ_ERROR
- * register.
- */
-
-
-
-#define XB_IOQ_ERROR_CLEAR 0x00700170 /*
- * Clears XB_IOQ
- * _ERROR register.
- */
-
-
-
-#define XB_IIQ_ERROR_CLEAR 0x00700178 /*
- * Clears XB_IIQ
- * _ERROR register.
- */
-
-
-
-#define XB_LOQ_ERROR_CLEAR 0x00700180 /*
- * Clears XB_LOQ_ERROR
- * register.
- */
-
-
-
-#define XB_LIQ_ERROR_CLEAR 0x00700188 /*
- * Clears XB_LIQ_ERROR
- * register.
- */
-
-
-
-#define XB_FIRST_ERROR_CLEAR 0x00700190 /*
- * Clears
- * XB_FIRST_ERROR
- * register
- */
-
-
-
-
-
-#ifndef __ASSEMBLY__
-
-/************************************************************************
- * *
- * Access to parameters which control various aspects of the *
- * crossbar's operation. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_parms_u {
- bdrkreg_t xb_parms_regval;
- struct {
- bdrkreg_t p_byp_en : 1;
- bdrkreg_t p_rsrvd_1 : 3;
- bdrkreg_t p_age_wrap : 8;
- bdrkreg_t p_deadlock_to_wrap : 20;
- bdrkreg_t p_tail_to_wrap : 20;
- bdrkreg_t p_rsrvd : 12;
- } xb_parms_fld_s;
-} xb_parms_u_t;
-
-#else
-
-typedef union xb_parms_u {
- bdrkreg_t xb_parms_regval;
- struct {
- bdrkreg_t p_rsrvd : 12;
- bdrkreg_t p_tail_to_wrap : 20;
- bdrkreg_t p_deadlock_to_wrap : 20;
- bdrkreg_t p_age_wrap : 8;
- bdrkreg_t p_rsrvd_1 : 3;
- bdrkreg_t p_byp_en : 1;
- } xb_parms_fld_s;
-} xb_parms_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Sets the period of wavefront grants given to each unit. The *
- * register's value corresponds to the number of cycles between each *
- * wavefront grant opportunity given to the requesting unit. If set *
- * to 0xF, no grants are given to this unit. If set to 0xE, the unit *
- * is granted at the slowest rate (sometimes called "molasses mode"). *
- * This feature can be used to apply backpressure to a unit's output *
- * queue(s). The setting does not affect bypass grants. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_slow_gnt_u {
- bdrkreg_t xb_slow_gnt_regval;
- struct {
- bdrkreg_t sg_lb_slow_gnt : 4;
- bdrkreg_t sg_ii_slow_gnt : 4;
- bdrkreg_t sg_ni_slow_gnt : 4;
- bdrkreg_t sg_mmq_slow_gnt : 4;
- bdrkreg_t sg_mp1_slow_gnt : 4;
- bdrkreg_t sg_mp0_slow_gnt : 4;
- bdrkreg_t sg_pi1_slow_gnt : 4;
- bdrkreg_t sg_pi0_slow_gnt : 4;
- bdrkreg_t sg_rsrvd : 32;
- } xb_slow_gnt_fld_s;
-} xb_slow_gnt_u_t;
-
-#else
-
-typedef union xb_slow_gnt_u {
- bdrkreg_t xb_slow_gnt_regval;
- struct {
- bdrkreg_t sg_rsrvd : 32;
- bdrkreg_t sg_pi0_slow_gnt : 4;
- bdrkreg_t sg_pi1_slow_gnt : 4;
- bdrkreg_t sg_mp0_slow_gnt : 4;
- bdrkreg_t sg_mp1_slow_gnt : 4;
- bdrkreg_t sg_mmq_slow_gnt : 4;
- bdrkreg_t sg_ni_slow_gnt : 4;
- bdrkreg_t sg_ii_slow_gnt : 4;
- bdrkreg_t sg_lb_slow_gnt : 4;
- } xb_slow_gnt_fld_s;
-} xb_slow_gnt_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Enables snooping of internal crossbar traffic by spewing all *
- * traffic across a selected crossbar point to the PI1 port. Only one *
- * bit should be set at any one time, and any bit set will preclude *
- * using the P1 for anything but a debug connection. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_spew_control_u {
- bdrkreg_t xb_spew_control_regval;
- struct {
- bdrkreg_t sc_snoop_liq : 1;
- bdrkreg_t sc_snoop_iiq : 1;
- bdrkreg_t sc_snoop_niq : 1;
- bdrkreg_t sc_snoop_miq : 1;
- bdrkreg_t sc_snoop_piq0 : 1;
- bdrkreg_t sc_snoop_loq : 1;
- bdrkreg_t sc_snoop_ioq : 1;
- bdrkreg_t sc_snoop_noq : 1;
- bdrkreg_t sc_snoop_mmq : 1;
- bdrkreg_t sc_snoop_mp0 : 1;
- bdrkreg_t sc_snoop_poq0 : 1;
- bdrkreg_t sc_rsrvd : 53;
- } xb_spew_control_fld_s;
-} xb_spew_control_u_t;
-
-#else
-
-typedef union xb_spew_control_u {
- bdrkreg_t xb_spew_control_regval;
- struct {
- bdrkreg_t sc_rsrvd : 53;
- bdrkreg_t sc_snoop_poq0 : 1;
- bdrkreg_t sc_snoop_mp0 : 1;
- bdrkreg_t sc_snoop_mmq : 1;
- bdrkreg_t sc_snoop_noq : 1;
- bdrkreg_t sc_snoop_ioq : 1;
- bdrkreg_t sc_snoop_loq : 1;
- bdrkreg_t sc_snoop_piq0 : 1;
- bdrkreg_t sc_snoop_miq : 1;
- bdrkreg_t sc_snoop_niq : 1;
- bdrkreg_t sc_snoop_iiq : 1;
- bdrkreg_t sc_snoop_liq : 1;
- } xb_spew_control_fld_s;
-} xb_spew_control_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Number of clocks the IOQ will wait before beginning XB *
- * arbitration. This is set so that the slower IOQ data rate can *
- * catch up up with the XB data rate in the IOQ buffer. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_ioq_arb_trigger_u {
- bdrkreg_t xb_ioq_arb_trigger_regval;
- struct {
- bdrkreg_t iat_ioq_arb_trigger : 4;
- bdrkreg_t iat_rsrvd : 60;
- } xb_ioq_arb_trigger_fld_s;
-} xb_ioq_arb_trigger_u_t;
-
-#else
-
-typedef union xb_ioq_arb_trigger_u {
- bdrkreg_t xb_ioq_arb_trigger_regval;
- struct {
- bdrkreg_t iat_rsrvd : 60;
- bdrkreg_t iat_ioq_arb_trigger : 4;
- } xb_ioq_arb_trigger_fld_s;
-} xb_ioq_arb_trigger_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by POQ0.Can be written to test software, will *
- * cause an interrupt. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_poq0_error_u {
- bdrkreg_t xb_poq0_error_regval;
- struct {
- bdrkreg_t pe_invalid_xsel : 2;
- bdrkreg_t pe_rsrvd_3 : 2;
- bdrkreg_t pe_overflow : 2;
- bdrkreg_t pe_rsrvd_2 : 2;
- bdrkreg_t pe_underflow : 2;
- bdrkreg_t pe_rsrvd_1 : 2;
- bdrkreg_t pe_tail_timeout : 2;
- bdrkreg_t pe_unused : 6;
- bdrkreg_t pe_rsrvd : 44;
- } xb_poq0_error_fld_s;
-} xb_poq0_error_u_t;
-
-#else
-
-typedef union xb_poq0_error_u {
- bdrkreg_t xb_poq0_error_regval;
- struct {
- bdrkreg_t pe_rsrvd : 44;
- bdrkreg_t pe_unused : 6;
- bdrkreg_t pe_tail_timeout : 2;
- bdrkreg_t pe_rsrvd_1 : 2;
- bdrkreg_t pe_underflow : 2;
- bdrkreg_t pe_rsrvd_2 : 2;
- bdrkreg_t pe_overflow : 2;
- bdrkreg_t pe_rsrvd_3 : 2;
- bdrkreg_t pe_invalid_xsel : 2;
- } xb_poq0_error_fld_s;
-} xb_poq0_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by PIQ0. Note that the PIQ/PI interface *
- * precludes PIQ underflow. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_piq0_error_u {
- bdrkreg_t xb_piq0_error_regval;
- struct {
- bdrkreg_t pe_overflow : 2;
- bdrkreg_t pe_rsrvd_1 : 2;
- bdrkreg_t pe_deadlock_timeout : 2;
- bdrkreg_t pe_rsrvd : 58;
- } xb_piq0_error_fld_s;
-} xb_piq0_error_u_t;
-
-#else
-
-typedef union xb_piq0_error_u {
- bdrkreg_t xb_piq0_error_regval;
- struct {
- bdrkreg_t pe_rsrvd : 58;
- bdrkreg_t pe_deadlock_timeout : 2;
- bdrkreg_t pe_rsrvd_1 : 2;
- bdrkreg_t pe_overflow : 2;
- } xb_piq0_error_fld_s;
-} xb_piq0_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by MP0 queue (the MOQ for processor 0). Since *
- * the xselect is decoded on the MD/MOQ interface, no invalid xselect *
- * errors are possible. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_mp0_error_u {
- bdrkreg_t xb_mp0_error_regval;
- struct {
- bdrkreg_t me_rsrvd_3 : 4;
- bdrkreg_t me_overflow : 2;
- bdrkreg_t me_rsrvd_2 : 2;
- bdrkreg_t me_underflow : 2;
- bdrkreg_t me_rsrvd_1 : 2;
- bdrkreg_t me_tail_timeout : 2;
- bdrkreg_t me_rsrvd : 50;
- } xb_mp0_error_fld_s;
-} xb_mp0_error_u_t;
-
-#else
-
-typedef union xb_mp0_error_u {
- bdrkreg_t xb_mp0_error_regval;
- struct {
- bdrkreg_t me_rsrvd : 50;
- bdrkreg_t me_tail_timeout : 2;
- bdrkreg_t me_rsrvd_1 : 2;
- bdrkreg_t me_underflow : 2;
- bdrkreg_t me_rsrvd_2 : 2;
- bdrkreg_t me_overflow : 2;
- bdrkreg_t me_rsrvd_3 : 4;
- } xb_mp0_error_fld_s;
-} xb_mp0_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by MIQ. *
- * *
- ************************************************************************/
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_miq_error_u {
- bdrkreg_t xb_miq_error_regval;
- struct {
- bdrkreg_t me_rsrvd_1 : 4;
- bdrkreg_t me_deadlock_timeout : 4;
- bdrkreg_t me_rsrvd : 56;
- } xb_miq_error_fld_s;
-} xb_miq_error_u_t;
-
-#else
-
-typedef union xb_miq_error_u {
- bdrkreg_t xb_miq_error_regval;
- struct {
- bdrkreg_t me_rsrvd : 56;
- bdrkreg_t me_deadlock_timeout : 4;
- bdrkreg_t me_rsrvd_1 : 4;
- } xb_miq_error_fld_s;
-} xb_miq_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by NOQ. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_noq_error_u {
- bdrkreg_t xb_noq_error_regval;
- struct {
- bdrkreg_t ne_rsvd : 4;
- bdrkreg_t ne_overflow : 4;
- bdrkreg_t ne_underflow : 4;
- bdrkreg_t ne_tail_timeout : 4;
- bdrkreg_t ne_rsrvd : 48;
- } xb_noq_error_fld_s;
-} xb_noq_error_u_t;
-
-#else
-
-typedef union xb_noq_error_u {
- bdrkreg_t xb_noq_error_regval;
- struct {
- bdrkreg_t ne_rsrvd : 48;
- bdrkreg_t ne_tail_timeout : 4;
- bdrkreg_t ne_underflow : 4;
- bdrkreg_t ne_overflow : 4;
- bdrkreg_t ne_rsvd : 4;
- } xb_noq_error_fld_s;
-} xb_noq_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by LOQ. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_loq_error_u {
- bdrkreg_t xb_loq_error_regval;
- struct {
- bdrkreg_t le_invalid_xsel : 2;
- bdrkreg_t le_rsrvd_1 : 6;
- bdrkreg_t le_underflow : 2;
- bdrkreg_t le_rsvd : 2;
- bdrkreg_t le_tail_timeout : 2;
- bdrkreg_t le_rsrvd : 50;
- } xb_loq_error_fld_s;
-} xb_loq_error_u_t;
-
-#else
-
-typedef union xb_loq_error_u {
- bdrkreg_t xb_loq_error_regval;
- struct {
- bdrkreg_t le_rsrvd : 50;
- bdrkreg_t le_tail_timeout : 2;
- bdrkreg_t le_rsvd : 2;
- bdrkreg_t le_underflow : 2;
- bdrkreg_t le_rsrvd_1 : 6;
- bdrkreg_t le_invalid_xsel : 2;
- } xb_loq_error_fld_s;
-} xb_loq_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by LIQ. Note that the LIQ only records errors *
- * for the request channel. The reply channel can never deadlock or *
- * overflow because it does not have hardware flow control. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_liq_error_u {
- bdrkreg_t xb_liq_error_regval;
- struct {
- bdrkreg_t le_overflow : 1;
- bdrkreg_t le_rsrvd_1 : 3;
- bdrkreg_t le_deadlock_timeout : 1;
- bdrkreg_t le_rsrvd : 59;
- } xb_liq_error_fld_s;
-} xb_liq_error_u_t;
-
-#else
-
-typedef union xb_liq_error_u {
- bdrkreg_t xb_liq_error_regval;
- struct {
- bdrkreg_t le_rsrvd : 59;
- bdrkreg_t le_deadlock_timeout : 1;
- bdrkreg_t le_rsrvd_1 : 3;
- bdrkreg_t le_overflow : 1;
- } xb_liq_error_fld_s;
-} xb_liq_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * First error is latched whenever the Valid bit is clear and an *
- * error occurs. Any valid bit on in this register causes an *
- * interrupt to PI0 and PI1. This interrupt bit will persist until *
- * the specific error register to capture the error is cleared, then *
- * the FIRST_ERROR register is cleared (in that oder.) The *
- * FIRST_ERROR register is not writable, but will be set when any of *
- * the corresponding error registers are written by software. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_first_error_u {
- bdrkreg_t xb_first_error_regval;
- struct {
- bdrkreg_t fe_type : 4;
- bdrkreg_t fe_channel : 4;
- bdrkreg_t fe_source : 4;
- bdrkreg_t fe_valid : 1;
- bdrkreg_t fe_rsrvd : 51;
- } xb_first_error_fld_s;
-} xb_first_error_u_t;
-
-#else
-
-typedef union xb_first_error_u {
- bdrkreg_t xb_first_error_regval;
- struct {
- bdrkreg_t fe_rsrvd : 51;
- bdrkreg_t fe_valid : 1;
- bdrkreg_t fe_source : 4;
- bdrkreg_t fe_channel : 4;
- bdrkreg_t fe_type : 4;
- } xb_first_error_fld_s;
-} xb_first_error_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Controls DEBUG_DATA mux setting. Allows user to watch the output *
- * of any OQ or input of any IQ on the DEBUG port. Note that bits *
- * 13:0 are one-hot. If more than one bit is set in [13:0], the debug *
- * output is undefined. Details on the debug output lines can be *
- * found in the XB chapter of the Bedrock Interface Specification. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_debug_data_ctl_u {
- bdrkreg_t xb_debug_data_ctl_regval;
- struct {
- bdrkreg_t ddc_observe_liq_traffic : 1;
- bdrkreg_t ddc_observe_iiq_traffic : 1;
- bdrkreg_t ddc_observe_niq_traffic : 1;
- bdrkreg_t ddc_observe_miq_traffic : 1;
- bdrkreg_t ddc_observe_piq1_traffic : 1;
- bdrkreg_t ddc_observe_piq0_traffic : 1;
- bdrkreg_t ddc_observe_loq_traffic : 1;
- bdrkreg_t ddc_observe_ioq_traffic : 1;
- bdrkreg_t ddc_observe_noq_traffic : 1;
- bdrkreg_t ddc_observe_mp1_traffic : 1;
- bdrkreg_t ddc_observe_mp0_traffic : 1;
- bdrkreg_t ddc_observe_mmq_traffic : 1;
- bdrkreg_t ddc_observe_poq1_traffic : 1;
- bdrkreg_t ddc_observe_poq0_traffic : 1;
- bdrkreg_t ddc_observe_source_field : 1;
- bdrkreg_t ddc_observe_lodata : 1;
- bdrkreg_t ddc_rsrvd : 48;
- } xb_debug_data_ctl_fld_s;
-} xb_debug_data_ctl_u_t;
-
-#else
-
-typedef union xb_debug_data_ctl_u {
- bdrkreg_t xb_debug_data_ctl_regval;
- struct {
- bdrkreg_t ddc_rsrvd : 48;
- bdrkreg_t ddc_observe_lodata : 1;
- bdrkreg_t ddc_observe_source_field : 1;
- bdrkreg_t ddc_observe_poq0_traffic : 1;
- bdrkreg_t ddc_observe_poq1_traffic : 1;
- bdrkreg_t ddc_observe_mmq_traffic : 1;
- bdrkreg_t ddc_observe_mp0_traffic : 1;
- bdrkreg_t ddc_observe_mp1_traffic : 1;
- bdrkreg_t ddc_observe_noq_traffic : 1;
- bdrkreg_t ddc_observe_ioq_traffic : 1;
- bdrkreg_t ddc_observe_loq_traffic : 1;
- bdrkreg_t ddc_observe_piq0_traffic : 1;
- bdrkreg_t ddc_observe_piq1_traffic : 1;
- bdrkreg_t ddc_observe_miq_traffic : 1;
- bdrkreg_t ddc_observe_niq_traffic : 1;
- bdrkreg_t ddc_observe_iiq_traffic : 1;
- bdrkreg_t ddc_observe_liq_traffic : 1;
- } xb_debug_data_ctl_fld_s;
-} xb_debug_data_ctl_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Controls debug mux setting for XB Input/Output Queues and *
- * Arbiter. Can select one of the following values. Details on the *
- * debug output lines can be found in the XB chapter of the Bedrock *
- * Interface Specification. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_debug_arb_ctl_u {
- bdrkreg_t xb_debug_arb_ctl_regval;
- struct {
- bdrkreg_t dac_xb_debug_select : 3;
- bdrkreg_t dac_rsrvd : 61;
- } xb_debug_arb_ctl_fld_s;
-} xb_debug_arb_ctl_u_t;
-
-#else
-
-typedef union xb_debug_arb_ctl_u {
- bdrkreg_t xb_debug_arb_ctl_regval;
- struct {
- bdrkreg_t dac_rsrvd : 61;
- bdrkreg_t dac_xb_debug_select : 3;
- } xb_debug_arb_ctl_fld_s;
-} xb_debug_arb_ctl_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by POQ0.Can be written to test software, will *
- * cause an interrupt. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_poq0_error_clear_u {
- bdrkreg_t xb_poq0_error_clear_regval;
- struct {
- bdrkreg_t pec_invalid_xsel : 2;
- bdrkreg_t pec_rsrvd_3 : 2;
- bdrkreg_t pec_overflow : 2;
- bdrkreg_t pec_rsrvd_2 : 2;
- bdrkreg_t pec_underflow : 2;
- bdrkreg_t pec_rsrvd_1 : 2;
- bdrkreg_t pec_tail_timeout : 2;
- bdrkreg_t pec_unused : 6;
- bdrkreg_t pec_rsrvd : 44;
- } xb_poq0_error_clear_fld_s;
-} xb_poq0_error_clear_u_t;
-
-#else
-
-typedef union xb_poq0_error_clear_u {
- bdrkreg_t xb_poq0_error_clear_regval;
- struct {
- bdrkreg_t pec_rsrvd : 44;
- bdrkreg_t pec_unused : 6;
- bdrkreg_t pec_tail_timeout : 2;
- bdrkreg_t pec_rsrvd_1 : 2;
- bdrkreg_t pec_underflow : 2;
- bdrkreg_t pec_rsrvd_2 : 2;
- bdrkreg_t pec_overflow : 2;
- bdrkreg_t pec_rsrvd_3 : 2;
- bdrkreg_t pec_invalid_xsel : 2;
- } xb_poq0_error_clear_fld_s;
-} xb_poq0_error_clear_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by PIQ0. Note that the PIQ/PI interface *
- * precludes PIQ underflow. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_piq0_error_clear_u {
- bdrkreg_t xb_piq0_error_clear_regval;
- struct {
- bdrkreg_t pec_overflow : 2;
- bdrkreg_t pec_rsrvd_1 : 2;
- bdrkreg_t pec_deadlock_timeout : 2;
- bdrkreg_t pec_rsrvd : 58;
- } xb_piq0_error_clear_fld_s;
-} xb_piq0_error_clear_u_t;
-
-#else
-
-typedef union xb_piq0_error_clear_u {
- bdrkreg_t xb_piq0_error_clear_regval;
- struct {
- bdrkreg_t pec_rsrvd : 58;
- bdrkreg_t pec_deadlock_timeout : 2;
- bdrkreg_t pec_rsrvd_1 : 2;
- bdrkreg_t pec_overflow : 2;
- } xb_piq0_error_clear_fld_s;
-} xb_piq0_error_clear_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by MP0 queue (the MOQ for processor 0). Since *
- * the xselect is decoded on the MD/MOQ interface, no invalid xselect *
- * errors are possible. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_mp0_error_clear_u {
- bdrkreg_t xb_mp0_error_clear_regval;
- struct {
- bdrkreg_t mec_rsrvd_3 : 4;
- bdrkreg_t mec_overflow : 2;
- bdrkreg_t mec_rsrvd_2 : 2;
- bdrkreg_t mec_underflow : 2;
- bdrkreg_t mec_rsrvd_1 : 2;
- bdrkreg_t mec_tail_timeout : 2;
- bdrkreg_t mec_rsrvd : 50;
- } xb_mp0_error_clear_fld_s;
-} xb_mp0_error_clear_u_t;
-
-#else
-
-typedef union xb_mp0_error_clear_u {
- bdrkreg_t xb_mp0_error_clear_regval;
- struct {
- bdrkreg_t mec_rsrvd : 50;
- bdrkreg_t mec_tail_timeout : 2;
- bdrkreg_t mec_rsrvd_1 : 2;
- bdrkreg_t mec_underflow : 2;
- bdrkreg_t mec_rsrvd_2 : 2;
- bdrkreg_t mec_overflow : 2;
- bdrkreg_t mec_rsrvd_3 : 4;
- } xb_mp0_error_clear_fld_s;
-} xb_mp0_error_clear_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by MIQ. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_xm_miq_error_clear_u {
- bdrkreg_t xb_xm_miq_error_clear_regval;
- struct {
- bdrkreg_t xmec_rsrvd_1 : 4;
- bdrkreg_t xmec_deadlock_timeout : 4;
- bdrkreg_t xmec_rsrvd : 56;
- } xb_xm_miq_error_clear_fld_s;
-} xb_xm_miq_error_clear_u_t;
-
-#else
-
-typedef union xb_xm_miq_error_clear_u {
- bdrkreg_t xb_xm_miq_error_clear_regval;
- struct {
- bdrkreg_t xmec_rsrvd : 56;
- bdrkreg_t xmec_deadlock_timeout : 4;
- bdrkreg_t xmec_rsrvd_1 : 4;
- } xb_xm_miq_error_clear_fld_s;
-} xb_xm_miq_error_clear_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by NOQ. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_noq_error_clear_u {
- bdrkreg_t xb_noq_error_clear_regval;
- struct {
- bdrkreg_t nec_rsvd : 4;
- bdrkreg_t nec_overflow : 4;
- bdrkreg_t nec_underflow : 4;
- bdrkreg_t nec_tail_timeout : 4;
- bdrkreg_t nec_rsrvd : 48;
- } xb_noq_error_clear_fld_s;
-} xb_noq_error_clear_u_t;
-
-#else
-
-typedef union xb_noq_error_clear_u {
- bdrkreg_t xb_noq_error_clear_regval;
- struct {
- bdrkreg_t nec_rsrvd : 48;
- bdrkreg_t nec_tail_timeout : 4;
- bdrkreg_t nec_underflow : 4;
- bdrkreg_t nec_overflow : 4;
- bdrkreg_t nec_rsvd : 4;
- } xb_noq_error_clear_fld_s;
-} xb_noq_error_clear_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by LOQ. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_loq_error_clear_u {
- bdrkreg_t xb_loq_error_clear_regval;
- struct {
- bdrkreg_t lec_invalid_xsel : 2;
- bdrkreg_t lec_rsrvd_1 : 6;
- bdrkreg_t lec_underflow : 2;
- bdrkreg_t lec_rsvd : 2;
- bdrkreg_t lec_tail_timeout : 2;
- bdrkreg_t lec_rsrvd : 50;
- } xb_loq_error_clear_fld_s;
-} xb_loq_error_clear_u_t;
-
-#else
-
-typedef union xb_loq_error_clear_u {
- bdrkreg_t xb_loq_error_clear_regval;
- struct {
- bdrkreg_t lec_rsrvd : 50;
- bdrkreg_t lec_tail_timeout : 2;
- bdrkreg_t lec_rsvd : 2;
- bdrkreg_t lec_underflow : 2;
- bdrkreg_t lec_rsrvd_1 : 6;
- bdrkreg_t lec_invalid_xsel : 2;
- } xb_loq_error_clear_fld_s;
-} xb_loq_error_clear_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * Records errors seen by LIQ. Note that the LIQ only records errors *
- * for the request channel. The reply channel can never deadlock or *
- * overflow because it does not have hardware flow control. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_liq_error_clear_u {
- bdrkreg_t xb_liq_error_clear_regval;
- struct {
- bdrkreg_t lec_overflow : 1;
- bdrkreg_t lec_rsrvd_1 : 3;
- bdrkreg_t lec_deadlock_timeout : 1;
- bdrkreg_t lec_rsrvd : 59;
- } xb_liq_error_clear_fld_s;
-} xb_liq_error_clear_u_t;
-
-#else
-
-typedef union xb_liq_error_clear_u {
- bdrkreg_t xb_liq_error_clear_regval;
- struct {
- bdrkreg_t lec_rsrvd : 59;
- bdrkreg_t lec_deadlock_timeout : 1;
- bdrkreg_t lec_rsrvd_1 : 3;
- bdrkreg_t lec_overflow : 1;
- } xb_liq_error_clear_fld_s;
-} xb_liq_error_clear_u_t;
-
-#endif
-
-
-
-
-/************************************************************************
- * *
- * First error is latched whenever the Valid bit is clear and an *
- * error occurs. Any valid bit on in this register causes an *
- * interrupt to PI0 and PI1. This interrupt bit will persist until *
- * the specific error register to capture the error is cleared, then *
- * the FIRST_ERROR register is cleared (in that oder.) The *
- * FIRST_ERROR register is not writable, but will be set when any of *
- * the corresponding error registers are written by software. *
- * *
- ************************************************************************/
-
-
-
-
-#ifdef LITTLE_ENDIAN
-
-typedef union xb_first_error_clear_u {
- bdrkreg_t xb_first_error_clear_regval;
- struct {
- bdrkreg_t fec_type : 4;
- bdrkreg_t fec_channel : 4;
- bdrkreg_t fec_source : 4;
- bdrkreg_t fec_valid : 1;
- bdrkreg_t fec_rsrvd : 51;
- } xb_first_error_clear_fld_s;
-} xb_first_error_clear_u_t;
-
-#else
-
-typedef union xb_first_error_clear_u {
- bdrkreg_t xb_first_error_clear_regval;
- struct {
- bdrkreg_t fec_rsrvd : 51;
- bdrkreg_t fec_valid : 1;
- bdrkreg_t fec_source : 4;
- bdrkreg_t fec_channel : 4;
- bdrkreg_t fec_type : 4;
- } xb_first_error_clear_fld_s;
-} xb_first_error_clear_u_t;
-
-#endif
-
-
-
-
-
-
-#endif /* __ASSEMBLY__ */
-
-/************************************************************************
- * *
- * The following defines were not formed into structures *
- * *
- * This could be because the document did not contain details of the *
- * register, or because the automated script did not recognize the *
- * register details in the documentation. If these register need *
- * structure definition, please create them manually *
- * *
- * XB_POQ1_ERROR 0x700030 *
- * XB_PIQ1_ERROR 0x700038 *
- * XB_MP1_ERROR 0x700048 *
- * XB_MMQ_ERROR 0x700050 *
- * XB_NIQ_ERROR 0x700068 *
- * XB_IOQ_ERROR 0x700070 *
- * XB_IIQ_ERROR 0x700078 *
- * XB_POQ1_ERROR_CLEAR 0x700130 *
- * XB_PIQ1_ERROR_CLEAR 0x700138 *
- * XB_MP1_ERROR_CLEAR 0x700148 *
- * XB_MMQ_ERROR_CLEAR 0x700150 *
- * XB_NIQ_ERROR_CLEAR 0x700168 *
- * XB_IOQ_ERROR_CLEAR 0x700170 *
- * XB_IIQ_ERROR_CLEAR 0x700178 *
- * *
- ************************************************************************/
-
-
-/************************************************************************
- * *
- * MAKE ALL ADDITIONS AFTER THIS LINE *
- * *
- ************************************************************************/
-
-
-
-
-
-#endif /* _ASM_IA64_SN_SN1_HUBXB_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_HUBXB_NEXT_H
-#define _ASM_IA64_SN_SN1_HUBXB_NEXT_H
-
-/* XB_FIRST_ERROR fe_source field encoding */
-#define XVE_SOURCE_POQ0 0xf /* 1111 */
-#define XVE_SOURCE_PIQ0 0xe /* 1110 */
-#define XVE_SOURCE_POQ1 0xd /* 1101 */
-#define XVE_SOURCE_PIQ1 0xc /* 1100 */
-#define XVE_SOURCE_MP0 0xb /* 1011 */
-#define XVE_SOURCE_MP1 0xa /* 1010 */
-#define XVE_SOURCE_MMQ 0x9 /* 1001 */
-#define XVE_SOURCE_MIQ 0x8 /* 1000 */
-#define XVE_SOURCE_NOQ 0x7 /* 0111 */
-#define XVE_SOURCE_NIQ 0x6 /* 0110 */
-#define XVE_SOURCE_IOQ 0x5 /* 0101 */
-#define XVE_SOURCE_IIQ 0x4 /* 0100 */
-#define XVE_SOURCE_LOQ 0x3 /* 0011 */
-#define XVE_SOURCE_LIQ 0x2 /* 0010 */
-
-/* XB_PARMS fields */
-#define XBP_RESET_DEFAULTS 0x0008000080000021LL
-#define XBP_ACTIVE_DEFAULTS 0x00080000fffff021LL
-
-#endif /* _ASM_IA64_SN_SN1_HUBXB_NEXT_H */
+++ /dev/null
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_HWCNTRS_H
-#define _ASM_IA64_SN_SN1_HWCNTRS_H
-
-
-typedef uint64_t refcnt_t;
-
-#define SN0_REFCNT_MAX_COUNTERS 64
-
-typedef struct sn0_refcnt_set {
- refcnt_t refcnt[SN0_REFCNT_MAX_COUNTERS];
- uint64_t flags;
- uint64_t reserved[4];
-} sn0_refcnt_set_t;
-
-typedef struct sn0_refcnt_buf {
- sn0_refcnt_set_t refcnt_set;
- uint64_t paddr;
- uint64_t page_size;
- cnodeid_t cnodeid; /* cnodeid + pad[3] use 64 bits */
- uint16_t pad[3];
- uint64_t reserved[4];
-} sn0_refcnt_buf_t;
-
-typedef struct sn0_refcnt_args {
- uint64_t vaddr;
- uint64_t len;
- sn0_refcnt_buf_t* buf;
- uint64_t reserved[4];
-} sn0_refcnt_args_t;
-
-/*
- * Info needed by the user level program
- * to mmap the refcnt buffer
- */
-
-#define RCB_INFO_GET 1
-#define RCB_SLOT_GET 2
-
-typedef struct rcb_info {
- uint64_t rcb_len; /* total refcnt buffer len in bytes */
-
- int rcb_sw_sets; /* number of sw counter sets in buffer */
- int rcb_sw_counters_per_set; /* sw counters per set -- num_compact_nodes */
- int rcb_sw_counter_size; /* sizeof(refcnt_t) -- size of sw cntr */
-
- int rcb_base_pages; /* number of base pages in node */
- int rcb_base_page_size; /* sw base page size */
- uint64_t rcb_base_paddr; /* base physical address for this node */
-
- int rcb_cnodeid; /* cnodeid for this node */
- int rcb_granularity; /* hw page size used for counter sets */
- uint rcb_hw_counter_max; /* max hwcounter count (width mask) */
- int rcb_diff_threshold; /* current node differential threshold */
- int rcb_abs_threshold; /* current node absolute threshold */
- int rcb_num_slots; /* physmem slots */
-
- int rcb_reserved[512];
-
-} rcb_info_t;
-
-typedef struct rcb_slot {
- uint64_t base;
- uint64_t size;
-} rcb_slot_t;
-
-#if defined(__KERNEL__)
-typedef struct sn0_refcnt_args_32 {
- uint64_t vaddr;
- uint64_t len;
- app32_ptr_t buf;
- uint64_t reserved[4];
-} sn0_refcnt_args_32_t;
-
-/* Defines and Macros */
-/* A set of reference counts are for 4k bytes of physical memory */
-#define NBPREFCNTP 0x1000
-#define BPREFCNTPSHIFT 12
-#define bytes_to_refcntpages(x) (((__psunsigned_t)(x)+(NBPREFCNTP-1))>>BPREFCNTPSHIFT)
-#define refcntpage_offset(x) ((__psunsigned_t)(x)&((NBPP-1)&~(NBPREFCNTP-1)))
-#define align_to_refcntpage(x) ((__psunsigned_t)(x)&(~(NBPREFCNTP-1)))
-
-extern void migr_refcnt_read(sn0_refcnt_buf_t*);
-extern void migr_refcnt_read_extended(sn0_refcnt_buf_t*);
-extern int migr_refcnt_enabled(void);
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_IA64_SN_SN1_HWCNTRS_H */
+++ /dev/null
-/* $Id: intr.h,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_INTR_H
-#define _ASM_IA64_SN_SN1_INTR_H
-
-/* Subnode wildcard */
-#define SUBNODE_ANY (-1)
-
-/* Number of interrupt levels associated with each interrupt register. */
-#define N_INTPEND_BITS 64
-
-#define INT_PEND0_BASELVL 0
-#define INT_PEND1_BASELVL 64
-
-#define N_INTPENDJUNK_BITS 8
-#define INTPENDJUNK_CLRBIT 0x80
-
-#include <asm/sn/intr_public.h>
-#include <asm/sn/driver.h>
-#include <asm/sn/xtalk/xtalk.h>
-#include <asm/sn/hack.h>
-
-#ifndef __ASSEMBLY__
-#define II_NAMELEN 24
-
-/*
- * Dispatch table entry - contains information needed to call an interrupt
- * routine.
- */
-typedef struct intr_vector_s {
- intr_func_t iv_func; /* Interrupt handler function */
- intr_func_t iv_prefunc; /* Interrupt handler prologue func */
- void *iv_arg; /* Argument to pass to handler */
- cpuid_t iv_mustruncpu; /* Where we must run. */
-} intr_vector_t;
-
-/* Interrupt information table. */
-typedef struct intr_info_s {
- xtalk_intr_setfunc_t ii_setfunc; /* Function to set the interrupt
- * destination and level register.
- * It returns 0 (success) or an
- * error code.
- */
- void *ii_cookie; /* arg passed to setfunc */
- devfs_handle_t ii_owner_dev; /* device that owns this intr */
- char ii_name[II_NAMELEN]; /* Name of this intr. */
- int ii_flags; /* informational flags */
-} intr_info_t;
-
-
-#define THD_CREATED 0x00000001 /*
- * We've created a thread for this
- * interrupt.
- */
-
-/*
- * Bits for ii_flags:
- */
-#define II_UNRESERVE 0
-#define II_RESERVE 1 /* Interrupt reserved. */
-#define II_INUSE 2 /* Interrupt connected */
-#define II_ERRORINT 4 /* INterrupt is an error condition */
-#define II_THREADED 8 /* Interrupt handler is threaded. */
-
-/*
- * Interrupt level wildcard
- */
-#define INTRCONNECT_ANYBIT (-1)
-
-/*
- * This structure holds information needed both to call and to maintain
- * interrupts. The two are in separate arrays for the locality benefits.
- * Since there's only one set of vectors per hub chip (but more than one
- * CPU, the lock to change the vector tables must be here rather than in
- * the PDA.
- */
-
-typedef struct intr_vecblk_s {
- intr_vector_t vectors[N_INTPEND_BITS]; /* information needed to
- call an intr routine. */
- intr_info_t info[N_INTPEND_BITS]; /* information needed only
- to maintain interrupts. */
- spinlock_t vector_lock; /* Lock for this and the
- masks in the PDA. */
- splfunc_t vector_spl; /* vector_lock req'd spl */
- int vector_state; /* Initialized to zero.
- Set to INTR_INITED
- by hubintr_init.
- */
- int vector_count; /* Number of vectors
- * reserved.
- */
- int cpu_count[CPUS_PER_SUBNODE]; /* How many interrupts are
- * connected to each CPU
- */
- int ithreads_enabled; /* Are interrupt threads
- * initialized on this node.
- * and block?
- */
-} intr_vecblk_t;
-
-/* Possible values for vector_state: */
-#define VECTOR_UNINITED 0
-#define VECTOR_INITED 1
-#define VECTOR_SET 2
-
-#define hub_intrvect0 private.p_intmasks.dispatch0->vectors
-#define hub_intrvect1 private.p_intmasks.dispatch1->vectors
-#define hub_intrinfo0 private.p_intmasks.dispatch0->info
-#define hub_intrinfo1 private.p_intmasks.dispatch1->info
-
-/*
- * Macros to manipulate the interrupt register on the calling hub chip.
- */
-
-#define LOCAL_HUB_SEND_INTR(_level) LOCAL_HUB_S(PI_INT_PEND_MOD, \
- (0x100|(_level)))
-#define REMOTE_HUB_PI_SEND_INTR(_hub, _sn, _level) \
- REMOTE_HUB_PI_S((_hub), _sn, PI_INT_PEND_MOD, (0x100|(_level)))
-
-#define REMOTE_CPU_SEND_INTR(_cpuid, _level) \
- REMOTE_HUB_PI_S(cpuid_to_nasid(_cpuid), \
- SUBNODE(cpuid_to_slice(_cpuid)), \
- PI_INT_PEND_MOD, (0x100|(_level)))
-
-/*
- * When clearing the interrupt, make sure this clear does make it
- * to the hub. Otherwise we could end up losing interrupts.
- * We do an uncached load of the int_pend0 register to ensure this.
- */
-
-#define LOCAL_HUB_CLR_INTR(_level) \
- LOCAL_HUB_S(PI_INT_PEND_MOD, (_level)), \
- LOCAL_HUB_L(PI_INT_PEND0)
-#define REMOTE_HUB_PI_CLR_INTR(_hub, _sn, _level) \
- REMOTE_HUB_PI_S((_hub), (_sn), PI_INT_PEND_MOD, (_level)), \
- REMOTE_HUB_PI_L((_hub), (_sn), PI_INT_PEND0)
-
-/* Special support for use by gfx driver only. Supports special gfx hub interrupt. */
-extern void install_gfxintr(cpuid_t cpu, ilvl_t swlevel, intr_func_t intr_func, void *intr_arg);
-
-void setrtvector(intr_func_t func);
-
-/*
- * Interrupt blocking
- */
-extern void intr_block_bit(cpuid_t cpu, int bit);
-extern void intr_unblock_bit(cpuid_t cpu, int bit);
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * Hard-coded interrupt levels:
- */
-
-/*
- * L0 = SW1
- * L1 = SW2
- * L2 = INT_PEND0
- * L3 = INT_PEND1
- * L4 = RTC
- * L5 = Profiling Timer
- * L6 = Hub Errors
- * L7 = Count/Compare (T5 counters)
- */
-
-
-/* INT_PEND0 hard-coded bits. */
-#ifdef DEBUG_INTR_TSTAMP
-/* hard coded interrupt level for interrupt latency test interrupt */
-#define CPU_INTRLAT_B 62
-#define CPU_INTRLAT_A 61
-#endif
-
-/* Hardcoded bits required by software. */
-#define MSC_MESG_INTR 9
-#define CPU_ACTION_B 8
-#define CPU_ACTION_A 7
-
-/* These are determined by hardware: */
-#define CC_PEND_B 6
-#define CC_PEND_A 5
-#define UART_INTR 4
-#define PG_MIG_INTR 3
-#define GFX_INTR_B 2
-#define GFX_INTR_A 1
-#define RESERVED_INTR 0
-
-/* INT_PEND1 hard-coded bits: */
-#define MSC_PANIC_INTR 63
-#define NI_ERROR_INTR 62
-#define MD_COR_ERR_INTR 61
-#define COR_ERR_INTR_B 60
-#define COR_ERR_INTR_A 59
-#define CLK_ERR_INTR 58
-
-# define NACK_INT_B 57
-# define NACK_INT_A 56
-# define LB_ERROR 55
-# define XB_ERROR 54
-
-#define BRIDGE_ERROR_INTR 53 /* Setup by PROM to catch Bridge Errors */
-
-#define IP27_INTR_0 52 /* Reserved for PROM use */
-#define IP27_INTR_1 51 /* (do not use in Kernel) */
-#define IP27_INTR_2 50
-#define IP27_INTR_3 49
-#define IP27_INTR_4 48
-#define IP27_INTR_5 47
-#define IP27_INTR_6 46
-#define IP27_INTR_7 45
-
-#define TLB_INTR_B 44 /* used for tlb flush random */
-#define TLB_INTR_A 43
-
-#define LLP_PFAIL_INTR_B 42 /* see ml/SN/SN0/sysctlr.c */
-#define LLP_PFAIL_INTR_A 41
-
-#define NI_BRDCAST_ERR_B 40
-#define NI_BRDCAST_ERR_A 39
-
-# define IO_ERROR_INTR 38 /* set up by prom */
-# define DEBUG_INTR_B 37 /* used by symmon to stop all cpus */
-# define DEBUG_INTR_A 36
-
-// These aren't strictly accurate or complete. See the
-// Synergy Spec. for details.
-#define SGI_UART_IRQ (65)
-#define SGI_HUB_ERROR_IRQ (182)
-
-#endif /* _ASM_IA64_SN_SN1_INTR_H */
+++ /dev/null
-/* $Id: intr_public.h,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_INTR_PUBLIC_H
-#define _ASM_IA64_SN_SN1_INTR_PUBLIC_H
-
-/* REMEMBER: If you change these, the whole world needs to be recompiled.
- * It would also require changing the hubspl.s code and SN0/intr.c
- * Currently, the spl code has no support for multiple INTPEND1 masks.
- */
-
-#define N_INTPEND0_MASKS 1
-#define N_INTPEND1_MASKS 1
-
-#define INTPEND0_MAXMASK (N_INTPEND0_MASKS - 1)
-#define INTPEND1_MAXMASK (N_INTPEND1_MASKS - 1)
-
-#ifndef __ASSEMBLY__
-#include <asm/sn/arch.h>
-
-struct intr_vecblk_s; /* defined in asm/sn/intr.h */
-
-/*
- * The following are necessary to create the illusion of a CEL
- * on the IP27 hub. We'll add more priority levels soon, but for
- * now, any interrupt in a particular band effectively does an spl.
- * These must be in the PDA since they're different for each processor.
- * Users of this structure must hold the vector_lock in the appropriate vector
- * block before modifying the mask arrays. There's only one vector block
- * for each Hub so a lock in the PDA wouldn't be adequate.
- */
-typedef struct hub_intmasks_s {
- /*
- * The masks are stored with the lowest-priority (most inclusive)
- * in the lowest-numbered masks (i.e., 0, 1, 2...).
- */
- /* INT_PEND0: */
- hubreg_t intpend0_masks[N_INTPEND0_MASKS];
- /* INT_PEND1: */
- hubreg_t intpend1_masks[N_INTPEND1_MASKS];
- /* INT_PEND0: */
- struct intr_vecblk_s *dispatch0;
- /* INT_PEND1: */
- struct intr_vecblk_s *dispatch1;
-} hub_intmasks_t;
-
-#endif /* __ASSEMBLY__ */
-#endif /* _ASM_IA64_SN_SN1_INTR_PUBLIC_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_IA64_SN_SN1_IP27CONFIG_H
-#define _ASM_IA64_SN_SN1_IP27CONFIG_H
-
-
-/*
- * Structure: ip27config_s
- * Typedef: ip27config_t
- * Purpose: Maps out the region of the boot prom used to define
- * configuration information.
- * Notes: Corresponds to ip27config structure found in start.s.
- * Fields are ulong where possible to facilitate IP27 PROM fetches.
- */
-
-#define CONFIG_INFO_OFFSET 0x60
-
-#define IP27CONFIG_ADDR (LBOOT_BASE + \
- CONFIG_INFO_OFFSET)
-#define IP27CONFIG_ADDR_NODE(n) (NODE_RBOOT_BASE(n) + \
- CONFIG_INFO_OFFSET)
-
-/* Offset to the config_type field within local ip27config structure */
-#define CONFIG_FLAGS_ADDR (IP27CONFIG_ADDR + 72)
-/* Offset to the config_type field in the ip27config structure on
- * node with nasid n
- */
-#define CONFIG_FLAGS_ADDR_NODE(n) (IP27CONFIG_ADDR_NODE(n) + 72)
-
-/* Meaning of each valid bit in the config flags
- * None are currently defined
- */
-
-/* Meaning of each mach_type value
- */
-#define SN1_MACH_TYPE 0
-
-/*
- * Since 800 ns works well with various HUB frequencies, (such as 360,
- * 380, 390, and 400 MHZ), we now use 800ns rtc cycle time instead of
- * 1 microsec.
- */
-#define IP27_RTC_FREQ 1250 /* 800ns cycle time */
-
-#ifndef __ASSEMBLY__
-
-typedef struct ip27config_s { /* KEEP IN SYNC w/ start.s & below */
- uint time_const; /* Time constant */
- uint r10k_mode; /* R10k boot mode bits */
-
- uint64_t magic; /* CONFIG_MAGIC */
-
- uint64_t freq_cpu; /* Hz */
- uint64_t freq_hub; /* Hz */
- uint64_t freq_rtc; /* Hz */
-
- uint ecc_enable; /* ECC enable flag */
- uint fprom_cyc; /* FPROM_CYC speed control */
-
- uint mach_type; /* Inidicate IP27 (0) or Sn00 (1) */
-
- uint check_sum_adj; /* Used after config hdr overlay */
- /* to make the checksum 0 again */
- uint flash_count; /* Value incr'd on each PROM flash */
- uint fprom_wr; /* FPROM_WR speed control */
-
- uint pvers_vers; /* Prom version number */
- uint pvers_rev; /* Prom revision number */
- uint config_type; /* To support special configurations
- * (none currently defined)
- */
-} ip27config_t;
-
-typedef struct {
- uint r10k_mode; /* R10k boot mode bits */
- uint freq_cpu; /* Hz */
- uint freq_hub; /* Hz */
- char fprom_cyc; /* FPROM_CYC speed control */
- char mach_type; /* IP35(0) is only type defined */
- char fprom_wr; /* FPROM_WR speed control */
-} config_modifiable_t;
-
-#define IP27CONFIG (*(ip27config_t *) IP27CONFIG_ADDR)
-#define IP27CONFIG_NODE(n) (*(ip27config_t *) IP27CONFIG_ADDR_NODE(n))
-#define SN00 0 /* IP35 has no Speedo equivalent */
-
-/* Get the config flags from local ip27config */
-#define CONFIG_FLAGS (*(uint *) (CONFIG_FLAGS_ADDR))
-
-/* Get the config flags from ip27config on the node
- * with nasid n
- */
-#define CONFIG_FLAGS_NODE(n) (*(uint *) (CONFIG_FLAGS_ADDR_NODE(n)))
-
-/* Macro to check if the local ip27config indicates a config
- * of 12 p 4io
- */
-#define CONFIG_12P4I (0) /* IP35 has no 12p4i equivalent */
-
-/* Macro to check if the ip27config on node with nasid n
- * indicates a config of 12 p 4io
- */
-#define CONFIG_12P4I_NODE(n) (0)
-
-#endif /* __ASSEMBLY__ */
-
-#if __ASSEMBLY__
- .struct 0 /* KEEP IN SYNC WITH C structure */
-
-ip27c_time_const: .word 0
-ip27c_r10k_mode: .word 0
-
-ip27c_magic: .dword 0
-
-ip27c_freq_cpu: .dword 0
-ip27c_freq_hub: .dword 0
-ip27c_freq_rtc: .dword 0
-
-ip27c_ecc_enable: .word 1
-ip27c_fprom_cyc: .word 0
-
-ip27c_mach_type: .word 0
-ip27c_check_sum_adj: .word 0
-
-ip27c_flash_count: .word 0
-ip27c_fprom_wr: .word 0
-
-ip27c_pvers_vers: .word 0
-ip27c_pvers_rev: .word 0
-
-ip27c_config_type: .word 0 /* To recognize special configs */
-#endif /* __ASSEMBLY__ */
-
-/*
- * R10000 Configuration Cycle - These define the SYSAD values used
- * during the reset cycle.
- */
-
-#define IP27C_R10000_KSEG0CA_SHFT 0
-#define IP27C_R10000_KSEG0CA_MASK (7 << IP27C_R10000_KSEG0CA_SHFT)
-#define IP27C_R10000_KSEG0CA(_B) ((_B) << IP27C_R10000_KSEG0CA_SHFT)
-
-#define IP27C_R10000_DEVNUM_SHFT 3
-#define IP27C_R10000_DEVNUM_MASK (3 << IP27C_R10000_DEVNUM_SHFT)
-#define IP27C_R10000_DEVNUM(_B) ((_B) << IP27C_R10000_DEVNUM_SHFT)
-
-#define IP27C_R10000_CRPT_SHFT 5
-#define IP27C_R10000_CRPT_MASK (1 << IP27C_R10000_CRPT_SHFT)
-#define IP27C_R10000_CPRT(_B) ((_B)<<IP27C_R10000_CRPT_SHFT)
-
-#define IP27C_R10000_PER_SHFT 6
-#define IP27C_R10000_PER_MASK (1 << IP27C_R10000_PER_SHFT)
-#define IP27C_R10000_PER(_B) ((_B) << IP27C_R10000_PER_SHFT)
-
-#define IP27C_R10000_PRM_SHFT 7
-#define IP27C_R10000_PRM_MASK (3 << IP27C_R10000_PRM_SHFT)
-#define IP27C_R10000_PRM(_B) ((_B) << IP27C_R10000_PRM_SHFT)
-
-#define IP27C_R10000_SCD_SHFT 9
-#define IP27C_R10000_SCD_MASK (0xf << IP27C_R10000_SCD_MASK)
-#define IP27C_R10000_SCD(_B) ((_B) << IP27C_R10000_SCD_SHFT)
-
-#define IP27C_R10000_SCBS_SHFT 13
-#define IP27C_R10000_SCBS_MASK (1 << IP27C_R10000_SCBS_SHFT)
-#define IP27C_R10000_SCBS(_B) (((_B)) << IP27C_R10000_SCBS_SHFT)
-
-#define IP27C_R10000_SCCE_SHFT 14
-#define IP27C_R10000_SCCE_MASK (1 << IP27C_R10000_SCCE_SHFT)
-#define IP27C_R10000_SCCE(_B) ((_B) << IP27C_R10000_SCCE_SHFT)
-
-#define IP27C_R10000_ME_SHFT 15
-#define IP27C_R10000_ME_MASK (1 << IP27C_R10000_ME_SHFT)
-#define IP27C_R10000_ME(_B) ((_B) << IP27C_R10000_ME_SHFT)
-
-#define IP27C_R10000_SCS_SHFT 16
-#define IP27C_R10000_SCS_MASK (7 << IP27C_R10000_SCS_SHFT)
-#define IP27C_R10000_SCS(_B) ((_B) << IP27C_R10000_SCS_SHFT)
-
-#define IP27C_R10000_SCCD_SHFT 19
-#define IP27C_R10000_SCCD_MASK (7 << IP27C_R10000_SCCD_SHFT)
-#define IP27C_R10000_SCCD(_B) ((_B) << IP27C_R10000_SCCD_SHFT)
-
-#define IP27C_R10000_DDR_SHFT 23
-#define IP27C_R10000_DDR_MASK (1 << IP27C_R10000_DDR_SHFT)
-#define IP27C_R10000_DDR(_B) ((_B) << IP27C_R10000_DDR_SHFT)
-
-#define IP27C_R10000_SCCT_SHFT 25
-#define IP27C_R10000_SCCT_MASK (0xf << IP27C_R10000_SCCT_SHFT)
-#define IP27C_R10000_SCCT(_B) ((_B) << IP27C_R10000_SCCT_SHFT)
-
-#define IP27C_R10000_ODSC_SHFT 29
-#define IP27C_R10000_ODSC_MASK (1 << IP27C_R10000_ODSC_SHFT)
-#define IP27C_R10000_ODSC(_B) ((_B) << IP27C_R10000_ODSC_SHFT)
-
-#define IP27C_R10000_ODSYS_SHFT 30
-#define IP27C_R10000_ODSYS_MASK (1 << IP27C_R10000_ODSYS_SHFT)
-#define IP27C_R10000_ODSYS(_B) ((_B) << IP27C_R10000_ODSYS_SHFT)
-
-#define IP27C_R10000_CTM_SHFT 31
-#define IP27C_R10000_CTM_MASK (1 << IP27C_R10000_CTM_SHFT)
-#define IP27C_R10000_CTM(_B) ((_B) << IP27C_R10000_CTM_SHFT)
-
-#define IP27C_MHZ(x) (1000000 * (x))
-#define IP27C_KHZ(x) (1000 * (x))
-#define IP27C_MB(x) ((x) << 20)
-
-/*
- * PROM Configurations
- */
-
-#define CONFIG_MAGIC 0x69703237636f6e66
-
-/* The high 32 bits of the "mode bits". Bits 7..0 contain one more
- * than the number of 5ms clocks in the 100ms "long delay" intervals
- * of the TRex reset sequence. Bit 8 is the "synergy mode" bit.
- */
-#define CONFIG_TIME_CONST 0x15
-
-#define CONFIG_ECC_ENABLE 1
-#define CONFIG_CHECK_SUM_ADJ 0
-#define CONFIG_DEFAULT_FLASH_COUNT 0
-
-/*
- * Some promICEs have trouble if CONFIG_FPROM_SETUP is too low.
- * The nominal value for 100 MHz hub is 5, for 200MHz bedrock is 16.
- * any update to the below should also reflected in the logic in
- * IO7prom/flashprom.c function _verify_config_info and _fill_in_config_info
- */
-
-/* default junk bus timing values to use */
-#define CONFIG_SYNERGY_ENABLE 0xff
-#define CONFIG_SYNERGY_SETUP 0xff
-#define CONFIG_UART_ENABLE 0x0c
-#define CONFIG_UART_SETUP 0x02
-#define CONFIG_FPROM_ENABLE 0x10
-#define CONFIG_FPROM_SETUP 0x10
-
-#define CONFIG_FREQ_RTC IP27C_KHZ(IP27_RTC_FREQ)
-
-#ifndef __ASSEMBLY__
-
-/* we are going to define all the known configs is a table
- * for building hex images we will pull out the particular
- * slice we care about by using the IP27_CONFIG_XX_XX as
- * entries into the table
- * to keep the table of reasonable size we only include the
- * values that differ across configurations
- * please note then that this makes assumptions about what
- * will and will not change across configurations
- */
-
-/* these numbers are as the are ordered in the table below */
-#define IP27_CONFIG_UNKNOWN (-1)
-#define IP27_CONFIG_SN1_1MB_200_400_200_TABLE 0
-#define IP27_CONFIG_SN00_4MB_100_200_133_TABLE 1
-#define IP27_CONFIG_SN1_4MB_200_400_267_TABLE 2
-#define IP27_CONFIG_SN1_8MB_200_500_250_TABLE 3
-#define IP27_CONFIG_SN1_8MB_200_400_267_TABLE 4
-#define IP27_CONFIG_SN1_4MB_180_360_240_TABLE 5
-#define NUMB_IP_CONFIGS 6
-
-#ifdef DEF_IP_CONFIG_TABLE
-/*
- * N.B.: A new entry needs to be added here everytime a new config is added
- * The table is indexed by the PIMM PSC value
- */
-
-static int psc_to_flash_config[] = {
- IP27_CONFIG_SN1_4MB_200_400_267_TABLE, /* 0x0 */
- IP27_CONFIG_SN1_8MB_200_500_250_TABLE, /* 0x1 */
- IP27_CONFIG_SN1_8MB_200_400_267_TABLE, /* 0x2 */
- IP27_CONFIG_UNKNOWN, /* 0x3 */
- IP27_CONFIG_UNKNOWN, /* 0x4 */
- IP27_CONFIG_UNKNOWN, /* 0x5 */
- IP27_CONFIG_UNKNOWN, /* 0x6 */
- IP27_CONFIG_UNKNOWN, /* 0x7 */
- IP27_CONFIG_SN1_4MB_180_360_240_TABLE, /* 0x8 */
- IP27_CONFIG_UNKNOWN, /* 0x9 */
- IP27_CONFIG_UNKNOWN, /* 0xa */
- IP27_CONFIG_UNKNOWN, /* 0xb */
- IP27_CONFIG_UNKNOWN, /* 0xc */
- IP27_CONFIG_UNKNOWN, /* 0xd */
- IP27_CONFIG_SN00_4MB_100_200_133_TABLE, /* 0xe O200 PIMM for bringup */
- IP27_CONFIG_UNKNOWN /* 0xf == PIMM not installed */
-};
-
-static config_modifiable_t ip_config_table[NUMB_IP_CONFIGS] = {
-/* the 1MB_200_400_200 values (Generic settings, will work for any config.) */
-{
- (IP27C_R10000_KSEG0CA(5) + \
- IP27C_R10000_DEVNUM(0) + \
- IP27C_R10000_CPRT(0) + \
- IP27C_R10000_PER(0) + \
- IP27C_R10000_PRM(3) + \
- IP27C_R10000_SCD(3) + \
- IP27C_R10000_SCBS(1) + \
- IP27C_R10000_SCCE(0) + \
- IP27C_R10000_ME(1) + \
- IP27C_R10000_SCS(1) + \
- IP27C_R10000_SCCD(3) + \
- IP27C_R10000_SCCT(9) + \
- IP27C_R10000_ODSC(0) + \
- IP27C_R10000_ODSYS(1) + \
- IP27C_R10000_CTM(0)),
- IP27C_MHZ(400),
- IP27C_MHZ(200),
- CONFIG_FPROM_SETUP,
- SN1_MACH_TYPE,
- CONFIG_FPROM_ENABLE
-},
-
-/* the 4MB_100_200_133 values (O200 PIMM w/translation board, PSC 0xe)
- * (SysAD at 100MHz (SCD=3), and bedrock core at 200 MHz) */
-{
- /* ODSYS == 0 means HSTL1 on SysAD bus; other PIMMs use HSTL2 */
- (IP27C_R10000_KSEG0CA(5) + \
- IP27C_R10000_DEVNUM(0) + \
- IP27C_R10000_CPRT(0) + \
- IP27C_R10000_PER(0) + \
- IP27C_R10000_PRM(3) + \
- IP27C_R10000_SCD(3) + \
- IP27C_R10000_SCBS(1) + \
- IP27C_R10000_SCCE(0) + \
- IP27C_R10000_ME(1) + \
- IP27C_R10000_SCS(3) + \
- IP27C_R10000_SCCD(2) + \
- IP27C_R10000_SCCT(9) + \
- IP27C_R10000_ODSC(0) + \
- IP27C_R10000_ODSYS(0) + \
- IP27C_R10000_CTM(0)),
- IP27C_MHZ(200),
- IP27C_MHZ(200),
- CONFIG_FPROM_SETUP,
- SN1_MACH_TYPE,
- CONFIG_FPROM_ENABLE
-},
-
-/* 4MB_200_400_267 values (R12KS, 3.7ns, LWR, 030-1602-001, PSC 0x0) */
-{
- (IP27C_R10000_KSEG0CA(5) + \
- IP27C_R10000_DEVNUM(0) + \
- IP27C_R10000_CPRT(0) + \
- IP27C_R10000_PER(0) + \
- IP27C_R10000_PRM(3) + \
- IP27C_R10000_SCD(3) + \
- IP27C_R10000_SCBS(1) + \
- IP27C_R10000_SCCE(0) + \
- IP27C_R10000_ME(1) + \
- IP27C_R10000_SCS(3) + \
- IP27C_R10000_SCCD(2) + \
- IP27C_R10000_SCCT(0xa) + \
- IP27C_R10000_ODSC(0) + \
- IP27C_R10000_ODSYS(1) + \
- IP27C_R10000_CTM(0)),
- IP27C_MHZ(400),
- IP27C_MHZ(200),
- CONFIG_FPROM_SETUP,
- SN1_MACH_TYPE,
- CONFIG_FPROM_ENABLE
-},
-
-/* 8MB_200_500_250 values (R14K, 4.0ns, DDR1, 030-1520-001, PSC 0x1) */
-{
- (IP27C_R10000_KSEG0CA(5) + \
- IP27C_R10000_DEVNUM(0) + \
- IP27C_R10000_CPRT(0) + \
- IP27C_R10000_PER(0) + \
- IP27C_R10000_PRM(3) + \
- IP27C_R10000_SCD(4) + \
- IP27C_R10000_SCBS(1) + \
- IP27C_R10000_SCCE(0) + \
- IP27C_R10000_ME(1) + \
- IP27C_R10000_SCS(4) + \
- IP27C_R10000_DDR(1) + \
- IP27C_R10000_SCCD(3) + \
- IP27C_R10000_SCCT(0xa) + \
- IP27C_R10000_ODSC(0) + \
- IP27C_R10000_ODSYS(1) + \
- IP27C_R10000_CTM(0)),
- IP27C_MHZ(500),
- IP27C_MHZ(200),
- CONFIG_FPROM_SETUP,
- SN1_MACH_TYPE,
- CONFIG_FPROM_ENABLE
-},
-
-/* 8MB_200_400_267 values (R12KS, 3.7ns, LWR, 030-1616-001, PSC 0x2) */
-{
- (IP27C_R10000_KSEG0CA(5) + \
- IP27C_R10000_DEVNUM(0) + \
- IP27C_R10000_CPRT(0) + \
- IP27C_R10000_PER(0) + \
- IP27C_R10000_PRM(3) + \
- IP27C_R10000_SCD(3) + \
- IP27C_R10000_SCBS(1) + \
- IP27C_R10000_SCCE(0) + \
- IP27C_R10000_ME(1) + \
- IP27C_R10000_SCS(4) + \
- IP27C_R10000_SCCD(2) + \
- IP27C_R10000_SCCT(0xa) + \
- IP27C_R10000_ODSC(0) + \
- IP27C_R10000_ODSYS(1) + \
- IP27C_R10000_CTM(0)),
- IP27C_MHZ(400),
- IP27C_MHZ(200),
- CONFIG_FPROM_SETUP,
- SN1_MACH_TYPE,
- CONFIG_FPROM_ENABLE
-},
-
-/* 4MB_180_360_240 values (R12KS, 3.7ns, LWR, 030-1627-001, PSC 0x8)
- * (SysAD at 180 MHz (SCD=3, the fastest possible), bedrock core at 200MHz) */
-{
- (IP27C_R10000_KSEG0CA(5) + \
- IP27C_R10000_DEVNUM(0) + \
- IP27C_R10000_CPRT(0) + \
- IP27C_R10000_PER(0) + \
- IP27C_R10000_PRM(3) + \
- IP27C_R10000_SCD(3) + \
- IP27C_R10000_SCBS(1) + \
- IP27C_R10000_SCCE(0) + \
- IP27C_R10000_ME(1) + \
- IP27C_R10000_SCS(3) + \
- IP27C_R10000_SCCD(2) + \
- IP27C_R10000_SCCT(9) + \
- IP27C_R10000_ODSC(0) + \
- IP27C_R10000_ODSYS(1) + \
- IP27C_R10000_CTM(0)),
- IP27C_MHZ(360),
- IP27C_MHZ(200),
- CONFIG_FPROM_SETUP,
- SN1_MACH_TYPE,
- CONFIG_FPROM_ENABLE
-},
-
-};
-#else
-extern config_modifiable_t ip_config_table[];
-#endif /* DEF_IP27_CONFIG_TABLE */
-
-#ifdef IP27_CONFIG_SN00_4MB_100_200_133
-#define CONFIG_CPU_MODE ip_config_table[IP27_CONFIG_SN00_4MB_100_200_133_TABLE].r10k_mode
-#define CONFIG_FREQ_CPU ip_config_table[IP27_CONFIG_SN00_4MB_100_200_133_TABLE].freq_cpu
-#define CONFIG_FREQ_HUB ip_config_table[IP27_CONFIG_SN00_4MB_100_200_133_TABLE].freq_hub
-#define CONFIG_FPROM_CYC ip_config_table[IP27_CONFIG_SN00_4MB_100_200_133_TABLE].fprom_cyc
-#define CONFIG_MACH_TYPE ip_config_table[IP27_CONFIG_SN00_4MB_100_200_133_TABLE].mach_type
-#define CONFIG_FPROM_WR ip_config_table[IP27_CONFIG_SN00_4MB_100_200_133_TABLE].fprom_wr
-#endif /* IP27_CONFIG_SN00_4MB_100_200_133 */
-
-#ifdef IP27_CONFIG_SN1_1MB_200_400_200
-#define CONFIG_CPU_MODE ip_config_table[IP27_CONFIG_SN1_1MB_200_400_200_TABLE].r10k_mode
-#define CONFIG_FREQ_CPU ip_config_table[IP27_CONFIG_SN1_1MB_200_400_200_TABLE].freq_cpu
-#define CONFIG_FREQ_HUB ip_config_table[IP27_CONFIG_SN1_1MB_200_400_200_TABLE].freq_hub
-#define CONFIG_FPROM_CYC ip_config_table[IP27_CONFIG_SN1_1MB_200_400_200_TABLE].fprom_cyc
-#define CONFIG_MACH_TYPE ip_config_table[IP27_CONFIG_SN1_1MB_200_400_200_TABLE].mach_type
-#define CONFIG_FPROM_WR ip_config_table[IP27_CONFIG_SN1_1MB_200_400_200_TABLE].fprom_wr
-#endif /* IP27_CONFIG_SN1_1MB_200_400_200 */
-
-#ifdef IP27_CONFIG_SN1_4MB_200_400_267
-#define CONFIG_CPU_MODE ip_config_table[IP27_CONFIG_SN1_4MB_200_400_267_TABLE].r10k_mode
-#define CONFIG_FREQ_CPU ip_config_table[IP27_CONFIG_SN1_4MB_200_400_267_TABLE].freq_cpu
-#define CONFIG_FREQ_HUB ip_config_table[IP27_CONFIG_SN1_4MB_200_400_267_TABLE].freq_hub
-#define CONFIG_FPROM_CYC ip_config_table[IP27_CONFIG_SN1_4MB_200_400_267_TABLE].fprom_cyc
-#define CONFIG_MACH_TYPE ip_config_table[IP27_CONFIG_SN1_4MB_200_400_267_TABLE].mach_type
-#define CONFIG_FPROM_WR ip_config_table[IP27_CONFIG_SN1_4MB_200_400_267_TABLE].fprom_wr
-#endif /* IP27_CONFIG_SN1_4MB_200_400_267 */
-
-#ifdef IP27_CONFIG_SN1_8MB_200_500_250
-#define CONFIG_CPU_MODE ip_config_table[IP27_CONFIG_SN1_8MB_200_500_250_TABLE].r10k_mode
-#define CONFIG_FREQ_CPU ip_config_table[IP27_CONFIG_SN1_8MB_200_500_250_TABLE].freq_cpu
-#define CONFIG_FREQ_HUB ip_config_table[IP27_CONFIG_SN1_8MB_200_500_250_TABLE].freq_hub
-#define CONFIG_FPROM_CYC ip_config_table[IP27_CONFIG_SN1_8MB_200_500_250_TABLE].fprom_cyc
-#define CONFIG_MACH_TYPE ip_config_table[IP27_CONFIG_SN1_8MB_200_500_250_TABLE].mach_type
-#define CONFIG_FPROM_WR ip_config_table[IP27_CONFIG_SN1_8MB_200_500_250_TABLE].fprom_wr
-#endif /* IP27_CONFIG_SN1_8MB_200_500_250 */
-
-#ifdef IP27_CONFIG_SN1_8MB_200_400_267
-#define CONFIG_CPU_MODE ip_config_table[IP27_CONFIG_SN1_8MB_200_400_267_TABLE].r10k_mode
-#define CONFIG_FREQ_CPU ip_config_table[IP27_CONFIG_SN1_8MB_200_400_267_TABLE].freq_cpu
-#define CONFIG_FREQ_HUB ip_config_table[IP27_CONFIG_SN1_8MB_200_400_267_TABLE].freq_hub
-#define CONFIG_FPROM_CYC ip_config_table[IP27_CONFIG_SN1_8MB_200_400_267_TABLE].fprom_cyc
-#define CONFIG_MACH_TYPE ip_config_table[IP27_CONFIG_SN1_8MB_200_400_267_TABLE].mach_type
-#define CONFIG_FPROM_WR ip_config_table[IP27_CONFIG_SN1_8MB_200_400_267_TABLE].fprom_wr
-#endif /* IP27_CONFIG_SN1_8MB_200_400_267 */
-
-#ifdef IP27_CONFIG_SN1_4MB_180_360_240
-#define CONFIG_CPU_MODE ip_config_table[IP27_CONFIG_SN1_4MB_180_360_240_TABLE].r10k_mode
-#define CONFIG_FREQ_CPU ip_config_table[IP27_CONFIG_SN1_4MB_180_360_240_TABLE].freq_cpu
-#define CONFIG_FREQ_HUB ip_config_table[IP27_CONFIG_SN1_4MB_180_360_240_TABLE].freq_hub
-#define CONFIG_FPROM_CYC ip_config_table[IP27_CONFIG_SN1_4MB_180_360_240_TABLE].fprom_cyc
-#define CONFIG_MACH_TYPE ip_config_table[IP27_CONFIG_SN1_4MB_180_360_240_TABLE].mach_type
-#define CONFIG_FPROM_WR ip_config_table[IP27_CONFIG_SN1_4MB_180_360_240_TABLE].fprom_wr
-#endif /* IP27_CONFIG_SN1_4MB_180_360_240 */
-
-#endif /* __ASSEMBLY__ */
-
-#if __ASSEMBLY__
-
-/* these need to be in here since we need assembly definitions
- * for building hex images (as required by start.s)
- */
-#ifdef IP27_CONFIG_SN00_4MB_100_200_133
-#define BRINGUP_PRM_VAL 3
-#define CONFIG_CPU_MODE \
- (IP27C_R10000_KSEG0CA(5) + \
- IP27C_R10000_DEVNUM(0) + \
- IP27C_R10000_CPRT(0) + \
- IP27C_R10000_PER(0) + \
- IP27C_R10000_PRM(BRINGUP_PRM_VAL) + \
- IP27C_R10000_SCD(3) + \
- IP27C_R10000_SCBS(1) + \
- IP27C_R10000_SCCE(0) + \
- IP27C_R10000_ME(1) + \
- IP27C_R10000_SCS(3) + \
- IP27C_R10000_SCCD(2) + \
- IP27C_R10000_SCCT(9) + \
- IP27C_R10000_ODSC(0) + \
- IP27C_R10000_ODSYS(0) + \
- IP27C_R10000_CTM(0))
-#define CONFIG_FREQ_CPU IP27C_MHZ(200)
-#define CONFIG_FREQ_HUB IP27C_MHZ(200)
-#define CONFIG_FPROM_CYC CONFIG_FPROM_SETUP
-#define CONFIG_MACH_TYPE SN1_MACH_TYPE
-#define CONFIG_FPROM_WR CONFIG_FPROM_ENABLE
-#endif /* IP27_CONFIG_SN00_4MB_100_200_133 */
-
-#ifdef IP27_CONFIG_SN1_1MB_200_400_200
-#define CONFIG_CPU_MODE \
- (IP27C_R10000_KSEG0CA(5) + \
- IP27C_R10000_DEVNUM(0) + \
- IP27C_R10000_CPRT(0) + \
- IP27C_R10000_PER(0) + \
- IP27C_R10000_PRM(3) + \
- IP27C_R10000_SCD(3) + \
- IP27C_R10000_SCBS(1) + \
- IP27C_R10000_SCCE(0) + \
- IP27C_R10000_ME(1) + \
- IP27C_R10000_SCS(1) + \
- IP27C_R10000_SCCD(3) + \
- IP27C_R10000_SCCT(9) + \
- IP27C_R10000_ODSC(0) + \
- IP27C_R10000_ODSYS(1) + \
- IP27C_R10000_CTM(0))
-#define CONFIG_FREQ_CPU IP27C_MHZ(400)
-#define CONFIG_FREQ_HUB IP27C_MHZ(200)
-#define CONFIG_FPROM_CYC CONFIG_FPROM_SETUP
-#define CONFIG_MACH_TYPE SN1_MACH_TYPE
-#define CONFIG_FPROM_WR CONFIG_FPROM_ENABLE
-#endif /* IP27_CONFIG_SN1_1MB_200_400_200 */
-
-#ifdef IP27_CONFIG_SN1_4MB_200_400_267
-#define CONFIG_CPU_MODE \
- (IP27C_R10000_KSEG0CA(5) + \
- IP27C_R10000_DEVNUM(0) + \
- IP27C_R10000_CPRT(0) + \
- IP27C_R10000_PER(0) + \
- IP27C_R10000_PRM(3) + \
- IP27C_R10000_SCD(3) + \
- IP27C_R10000_SCBS(1) + \
- IP27C_R10000_SCCE(0) + \
- IP27C_R10000_ME(1) + \
- IP27C_R10000_SCS(3) + \
- IP27C_R10000_SCCD(2) + \
- IP27C_R10000_SCCT(0xa) + \
- IP27C_R10000_ODSC(0) + \
- IP27C_R10000_ODSYS(1) + \
- IP27C_R10000_CTM(0))
-#define CONFIG_FREQ_CPU IP27C_MHZ(400)
-#define CONFIG_FREQ_HUB IP27C_MHZ(200)
-#define CONFIG_FPROM_CYC CONFIG_FPROM_SETUP
-#define CONFIG_MACH_TYPE SN1_MACH_TYPE
-#define CONFIG_FPROM_WR CONFIG_FPROM_ENABLE
-#endif /* IP27_CONFIG_SN1_4MB_200_400_267 */
-
-#ifdef IP27_CONFIG_SN1_8MB_200_500_250
-#define CONFIG_CPU_MODE \
- (IP27C_R10000_KSEG0CA(5) + \
- IP27C_R10000_DEVNUM(0) + \
- IP27C_R10000_CPRT(0) + \
- IP27C_R10000_PER(0) + \
- IP27C_R10000_PRM(3) + \
- IP27C_R10000_SCD(4) + \
- IP27C_R10000_SCBS(1) + \
- IP27C_R10000_SCCE(0) + \
- IP27C_R10000_ME(1) + \
- IP27C_R10000_SCS(4) + \
- IP27C_R10000_SCCD(3) + \
- IP27C_R10000_DDR(1) + \
- IP27C_R10000_SCCT(0xa) + \
- IP27C_R10000_ODSC(0) + \
- IP27C_R10000_ODSYS(1) + \
- IP27C_R10000_CTM(0))
-#define CONFIG_FREQ_CPU IP27C_MHZ(500)
-#define CONFIG_FREQ_HUB IP27C_MHZ(200)
-#define CONFIG_FPROM_CYC CONFIG_FPROM_SETUP
-#define CONFIG_MACH_TYPE SN1_MACH_TYPE
-#define CONFIG_FPROM_WR CONFIG_FPROM_ENABLE
-#endif /* IP27_CONFIG_SN1_8MB_200_500_250 */
-
-#ifdef IP27_CONFIG_SN1_8MB_200_400_267
-#define CONFIG_CPU_MODE \
- (IP27C_R10000_KSEG0CA(5) + \
- IP27C_R10000_DEVNUM(0) + \
- IP27C_R10000_CPRT(0) + \
- IP27C_R10000_PER(0) + \
- IP27C_R10000_PRM(3) + \
- IP27C_R10000_SCD(3) + \
- IP27C_R10000_SCBS(1) + \
- IP27C_R10000_SCCE(0) + \
- IP27C_R10000_ME(1) + \
- IP27C_R10000_SCS(4) + \
- IP27C_R10000_SCCD(2) + \
- IP27C_R10000_SCCT(0xa) + \
- IP27C_R10000_ODSC(0) + \
- IP27C_R10000_ODSYS(1) + \
- IP27C_R10000_CTM(0))
-#define CONFIG_FREQ_CPU IP27C_MHZ(400)
-#define CONFIG_FREQ_HUB IP27C_MHZ(200)
-#define CONFIG_FPROM_CYC CONFIG_FPROM_SETUP
-#define CONFIG_MACH_TYPE SN1_MACH_TYPE
-#define CONFIG_FPROM_WR CONFIG_FPROM_ENABLE
-#endif /* IP27_CONFIG_SN1_8MB_200_400_267 */
-
-#ifdef IP27_CONFIG_SN1_4MB_180_360_240
-#define CONFIG_CPU_MODE \
- (IP27C_R10000_KSEG0CA(5) + \
- IP27C_R10000_DEVNUM(0) + \
- IP27C_R10000_CPRT(0) + \
- IP27C_R10000_PER(0) + \
- IP27C_R10000_PRM(3) + \
- IP27C_R10000_SCD(3) + \
- IP27C_R10000_SCBS(1) + \
- IP27C_R10000_SCCE(0) + \
- IP27C_R10000_ME(1) + \
- IP27C_R10000_SCS(3) + \
- IP27C_R10000_SCCD(2) + \
- IP27C_R10000_SCCT(9) + \
- IP27C_R10000_ODSC(0) + \
- IP27C_R10000_ODSYS(1) + \
- IP27C_R10000_CTM(0))
-#define CONFIG_FREQ_CPU IP27C_MHZ(360)
-#define CONFIG_FREQ_HUB IP27C_MHZ(200)
-#define CONFIG_FPROM_CYC CONFIG_FPROM_SETUP
-#define CONFIG_MACH_TYPE SN1_MACH_TYPE
-#define CONFIG_FPROM_WR CONFIG_FPROM_ENABLE
-#endif /* IP27_CONFIG_SN1_4MB_180_360_240 */
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_IA64_SN_SN1_IP27CONFIG_H */
+++ /dev/null
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_MEM_REFCNT_H
-#define _ASM_IA64_SN_SN1_MEM_REFCNT_H
-
-extern int mem_refcnt_attach(devfs_handle_t hub);
-extern int mem_refcnt_open(devfs_handle_t *devp, mode_t oflag, int otyp, cred_t *crp);
-extern int mem_refcnt_close(devfs_handle_t dev, int oflag, int otyp, cred_t *crp);
-extern int mem_refcnt_mmap(devfs_handle_t dev, vhandl_t *vt, off_t off, size_t len, uint prot);
-extern int mem_refcnt_unmap(devfs_handle_t dev, vhandl_t *vt);
-extern int mem_refcnt_ioctl(devfs_handle_t dev,
- int cmd,
- void *arg,
- int mode,
- cred_t *cred_p,
- int *rvalp);
-
-
-#endif /* _ASM_IA64_SN_SN1_MEM_REFCNT_H */
+++ /dev/null
-#ifndef _ASM_IA64_SN_MMZONE_SN1_H
-#define _ASM_IA64_SN_MMZONE_SN1_H
-
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/config.h>
-
-
-/*
- * SGI SN1 Arch defined values
- *
- * An SN1 physical address is broken down as follows:
- *
- * +-----------------------------------------+
- * | | | | node offset |
- * | unused | AS | node |-------------------|
- * | | | | cn | clump offset |
- * +-----------------------------------------+
- * 6 4 4 4 3 3 3 3 2 0
- * 3 4 3 0 9 3 2 0 9 0
- *
- * bits 63-44 Unused - must be zero
- * bits 43-40 Address space ID. Cached memory has a value of 0.
- * Chipset & IO addresses have non-zero values.
- * bits 39-33 Node number. Note that some configurations do NOT
- * have a node zero.
- * bits 32-0 Node offset.
- *
- * The node offset can be further broken down as:
- * bits 32-30 Clump (bank) number.
- * bits 29-0 Clump (bank) offset.
- *
- * A node consists of up to 8 clumps (banks) of memory. A clump may be empty, or may be
- * populated with a single contiguous block of memory starting at clump
- * offset 0. The size of the block is (2**n) * 64MB, where 0<n<5.
- *
- *
- * NOTE: This file exports symbols prefixed with "PLAT_". Symbols prefixed with
- * "SN_" are intended for internal use only and should not be used in
- * any platform independent code.
- *
- * This file is also responsible for exporting the following definitions:
- * cnodeid_t Define a compact node id.
- */
-
-typedef signed short cnodeid_t;
-
-#define SN1_BANKS_PER_NODE 8
-#define SN1_NODE_SIZE (8UL*1024*1024*1024) /* 8 GB per node */
-#define SN1_BANK_SIZE (SN1_NODE_SIZE/SN1_BANKS_PER_NODE)
-#define SN1_NODE_SHIFT 33
-#define SN1_NODE_MASK 0x7fUL
-#define SN1_NODE_OFFSET_MASK (SN1_NODE_SIZE-1)
-#define SN1_NODE_NUMBER(addr) (((unsigned long)(addr) >> SN1_NODE_SHIFT) & SN1_NODE_MASK)
-#define SN1_NODE_CLUMP_NUMBER(addr) (((unsigned long)(addr) >>30) & 7)
-#define SN1_NODE_OFFSET(addr) (((unsigned long)(addr)) & SN1_NODE_OFFSET_MASK)
-#define SN1_KADDR(nasid, offset) (((unsigned long)(nasid)<<SN1_NODE_SHIFT) | (offset) | PAGE_OFFSET)
-
-
-#define PLAT_MAX_NODE_NUMBER 128 /* Maximum node number +1 */
-#define PLAT_MAX_COMPACT_NODES 128 /* Maximum number of nodes in SSI */
-
-#define PLAT_MAX_PHYS_MEMORY (1UL << 40)
-
-
-
-/*
- * On the SN platforms, a clump is the same as a memory bank.
- */
-#define PLAT_CLUMPS_PER_NODE SN1_BANKS_PER_NODE
-#define PLAT_CLUMP_OFFSET(addr) ((unsigned long)(addr) & 0x3fffffffUL)
-#define PLAT_CLUMPSIZE (SN1_NODE_SIZE/PLAT_CLUMPS_PER_NODE)
-#define PLAT_MAXCLUMPS (PLAT_CLUMPS_PER_NODE*PLAT_MAX_COMPACT_NODES)
-
-
-
-
-/*
- * PLAT_VALID_MEM_KADDR returns a boolean to indicate if a kaddr is potentially a
- * valid cacheable identity mapped RAM memory address.
- * Note that the RAM may or may not actually be present!!
- */
-#define SN1_VALID_KERN_ADDR_MASK 0xffffff0000000000UL
-#define SN1_VALID_KERN_ADDR_VALUE 0xe000000000000000UL
-#define PLAT_VALID_MEM_KADDR(kaddr) (((unsigned long)(kaddr) & SN1_VALID_KERN_ADDR_MASK) == SN1_VALID_KERN_ADDR_VALUE)
-
-
-
-/*
- * Memory is conceptually divided into chunks. A chunk is either
- * completely present, or else the kernel assumes it is completely
- * absent. Each node consists of a number of possibly discontiguous chunks.
- */
-#define SN1_CHUNKSHIFT 26 /* 64 MB */
-#define PLAT_CHUNKSIZE (1UL << SN1_CHUNKSHIFT)
-#define PLAT_CHUNKNUM(addr) (((addr) & (PLAT_MAX_PHYS_MEMORY-1)) >> SN1_CHUNKSHIFT)
-
-
-/*
- * Given a kaddr, find the nid (compact nodeid)
- */
-#ifdef CONFIG_IA64_SGI_SN_DEBUG
-#define DISCONBUG(kaddr) panic("DISCONTIG BUG: line %d, %s. kaddr 0x%lx", \
- __LINE__, __FILE__, (long)(kaddr))
-
-#define KVADDR_TO_NID(kaddr) ({long _ktn=(long)(kaddr); \
- kern_addr_valid(_ktn) ? \
- local_node_data->physical_node_map[SN1_NODE_NUMBER(_ktn)] :\
- (DISCONBUG(_ktn), 0UL);})
-#else
-#define KVADDR_TO_NID(kaddr) (local_node_data->physical_node_map[SN1_NODE_NUMBER(kaddr)])
-#endif
-
-
-
-/*
- * Given a kaddr, find the index into the clump_mem_map_base array of the page struct entry
- * for the first page of the clump.
- */
-#define PLAT_CLUMP_MEM_MAP_INDEX(kaddr) ({long _kmmi=(long)(kaddr); \
- KVADDR_TO_NID(_kmmi) * PLAT_CLUMPS_PER_NODE + \
- SN1_NODE_CLUMP_NUMBER(_kmmi);})
-
-
-/*
- * Calculate a "goal" value to be passed to __alloc_bootmem_node for allocating structures on
- * nodes so that they don't alias to the same line in the cache as the previous allocated structure.
- * This macro takes an address of the end of previous allocation, rounds it to a page boundary &
- * changes the node number.
- */
-#define PLAT_BOOTMEM_ALLOC_GOAL(cnode,kaddr) __pa(SN1_KADDR(PLAT_PXM_TO_PHYS_NODE_NUMBER(nid_to_pxm_map[cnode]), \
- (SN1_NODE_OFFSET(kaddr) + PAGE_SIZE - 1) >> PAGE_SHIFT << PAGE_SHIFT))
-
-
-
-
-/*
- * Convert a proximity domain number (from the ACPI tables) into a physical node number.
- */
-
-#define PLAT_PXM_TO_PHYS_NODE_NUMBER(pxm) (pxm)
-
-#endif /* _ASM_IA64_SN_MMZONE_SN1_H */
+++ /dev/null
-/* $Id$
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-
-#ifndef _ASM_IA64_SN_SN1_SLOTNUM_H
-#define _ASM_IA64_SN_SN1_SLOTNUM_H
-
-#define SLOTNUM_MAXLENGTH 16
-
-/*
- * This file attempts to define a slot number space across all slots.
- *
- * Node slots
- * Router slots
- * Crosstalk slots
- *
- * Other slots are children of their parent crosstalk slot:
- * PCI slots
- * VME slots
- *
- * The PCI class has been added since the XBridge ASIC on SN-MIPS
- * has built-in PCI bridges (2). On IBricks, widget E & F serve
- * PCI busses, and on PBricks all widgets serve as PCI busses
- * with the use of the super-bridge mode of the XBridge ASIC.
- */
-
-#define SLOTNUM_NODE_CLASS 0x00 /* Node */
-#define SLOTNUM_ROUTER_CLASS 0x10 /* Router */
-#define SLOTNUM_XTALK_CLASS 0x20 /* Xtalk */
-#define SLOTNUM_MIDPLANE_CLASS 0x30 /* Midplane */
-#define SLOTNUM_XBOW_CLASS 0x40 /* Xbow */
-#define SLOTNUM_KNODE_CLASS 0x50 /* Kego node */
-#define SLOTNUM_PCI_CLASS 0x60 /* PCI widgets on XBridge */
-#define SLOTNUM_INVALID_CLASS 0xf0 /* Invalid */
-
-#define SLOTNUM_CLASS_MASK 0xf0
-#define SLOTNUM_SLOT_MASK 0x0f
-
-#define SLOTNUM_GETCLASS(_sn) ((_sn) & SLOTNUM_CLASS_MASK)
-#define SLOTNUM_GETSLOT(_sn) ((_sn) & SLOTNUM_SLOT_MASK)
-
-/* This determines module to pnode mapping. */
-/* NODESLOTS_PER_MODULE has changed from 4 to 6
- * to support the 12P 4IO configuration. This change
- * helps in minimum number of changes to code which
- * depend on the number of node boards within a module.
- */
-#define NODESLOTS_PER_MODULE 6
-#define NODESLOTS_PER_MODULE_SHFT 2
-
-#define HIGHEST_I2C_VISIBLE_NODESLOT 4
-#define RTRSLOTS_PER_MODULE 2
-
-#if __KERNEL__
-#include <asm/sn/xtalk/xtalk.h>
-
-extern slotid_t xbwidget_to_xtslot(int crossbow, int widget);
-extern slotid_t hub_slotbits_to_slot(slotid_t slotbits);
-extern slotid_t hub_slot_to_crossbow(slotid_t hub_slot);
-extern slotid_t router_slotbits_to_slot(slotid_t slotbits);
-extern slotid_t get_node_slotid(nasid_t nasid);
-extern slotid_t get_my_slotid(void);
-extern slotid_t get_node_crossbow(nasid_t);
-extern xwidgetnum_t hub_slot_to_widget(slotid_t);
-extern void get_slotname(slotid_t, char *);
-extern void get_my_slotname(char *);
-extern slotid_t get_widget_slotnum(int xbow, int widget);
-extern void get_widget_slotname(int, int, char *);
-extern void router_slotbits_to_slotname(int, char *);
-extern slotid_t meta_router_slotbits_to_slot(slotid_t) ;
-extern slotid_t hub_slot_get(void);
-
-extern int node_can_talk_to_elsc(void);
-
-extern int slot_to_widget(int) ;
-#define MAX_IO_SLOT_NUM 12
-#define MAX_NODE_SLOT_NUM 4
-#define MAX_ROUTER_SLOTNUM 2
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_IA64_SN_SN1_SLOTNUM_H */
+++ /dev/null
-/* $Id: sn_private.h,v 1.1 2002/02/28 17:31:25 marcelo Exp $
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
- */
-#ifndef _ASM_IA64_SN_SN1_SN_PRIVATE_H
-#define _ASM_IA64_SN_SN1_SN_PRIVATE_H
-
-#include <asm/sn/nodepda.h>
-#include <asm/sn/xtalk/xwidget.h>
-#include <asm/sn/xtalk/xtalk_private.h>
-
-extern nasid_t master_nasid;
-
-/* promif.c */
-#ifdef LATER
-extern cpuid_t cpu_node_probe(cpumask_t *cpumask, int *numnodes);
-#endif
-extern void he_arcs_set_vectors(void);
-extern void mem_init(void);
-#ifdef LATER
-extern int cpu_enabled(cpuid_t);
-#endif
-extern void cpu_unenable(cpuid_t);
-extern nasid_t get_lowest_nasid(void);
-extern __psunsigned_t get_master_bridge_base(void);
-extern void set_master_bridge_base(void);
-extern int check_nasid_equiv(nasid_t, nasid_t);
-extern nasid_t get_console_nasid(void);
-extern char get_console_pcislot(void);
-#ifdef LATER
-extern void intr_init_vecblk(nodepda_t *npda, cnodeid_t, int);
-#endif
-
-extern int is_master_nasid_widget(nasid_t test_nasid, xwidgetnum_t test_wid);
-
-/* memsupport.c */
-extern void poison_state_alter_range(__psunsigned_t start, int len, int poison);
-extern int memory_present(paddr_t);
-extern int memory_read_accessible(paddr_t);
-extern int memory_write_accessible(paddr_t);
-extern void memory_set_access(paddr_t, int, int);
-extern void show_dir_state(paddr_t, void (*)(char *, ...));
-extern void check_dir_state(nasid_t, int, void (*)(char *, ...));
-extern void set_dir_owner(paddr_t, int);
-extern void set_dir_state(paddr_t, int);
-extern void set_dir_state_POISONED(paddr_t);
-extern void set_dir_state_UNOWNED(paddr_t);
-extern int is_POISONED_dir_state(paddr_t);
-extern int is_UNOWNED_dir_state(paddr_t);
-extern void get_dir_ent(paddr_t paddr, int *state,
- uint64_t *vec_ptr, hubreg_t *elo);
-
-/* intr.c */
-extern int intr_reserve_level(cpuid_t cpu, int level, int err, devfs_handle_t owner_dev, char *name);
-extern void intr_unreserve_level(cpuid_t cpu, int level);
-extern int intr_connect_level(cpuid_t cpu, int bit, ilvl_t mask_no,
- intr_func_t intr_prefunc);
-extern int intr_disconnect_level(cpuid_t cpu, int bit);
-extern cpuid_t intr_heuristic(devfs_handle_t dev, device_desc_t dev_desc,
- int req_bit,int intr_resflags,devfs_handle_t owner_dev,
- char *intr_name,int *resp_bit);
-extern void intr_block_bit(cpuid_t cpu, int bit);
-extern void intr_unblock_bit(cpuid_t cpu, int bit);
-extern void setrtvector(intr_func_t);
-extern void install_cpuintr(cpuid_t cpu);
-extern void install_dbgintr(cpuid_t cpu);
-extern void install_tlbintr(cpuid_t cpu);
-extern void hub_migrintr_init(cnodeid_t /*cnode*/);
-extern int cause_intr_connect(int level, intr_func_t handler, uint intr_spl_mask);
-extern int cause_intr_disconnect(int level);
-extern void intr_reserve_hardwired(cnodeid_t);
-extern void intr_clear_all(nasid_t);
-extern void intr_dumpvec(cnodeid_t cnode, void (*pf)(char *, ...));
-
-/* error_dump.c */
-extern char *hub_rrb_err_type[];
-extern char *hub_wrb_err_type[];
-
-void nmi_dump(void);
-void install_cpu_nmi_handler(int slice);
-
-/* klclock.c */
-extern void hub_rtc_init(cnodeid_t);
-
-/* bte.c */
-void bte_lateinit(void);
-void bte_wait_for_xfer_completion(void *);
-
-/* klgraph.c */
-void klhwg_add_all_nodes(devfs_handle_t);
-void klhwg_add_all_modules(devfs_handle_t);
-
-/* klidbg.c */
-void install_klidbg_functions(void);
-
-/* klnuma.c */
-extern void replicate_kernel_text(int numnodes);
-extern __psunsigned_t get_freemem_start(cnodeid_t cnode);
-extern void setup_replication_mask(int maxnodes);
-
-/* init.c */
-extern cnodeid_t get_compact_nodeid(void); /* get compact node id */
-extern void init_platform_nodepda(nodepda_t *npda, cnodeid_t node);
-extern void init_platform_pda(cpuid_t cpu);
-extern void per_cpu_init(void);
-#ifdef LATER
-extern cpumask_t boot_cpumask;
-#endif
-extern int is_fine_dirmode(void);
-extern void update_node_information(cnodeid_t);
-
-#ifdef LATER
-/* clksupport.c */
-extern void early_counter_intr(eframe_t *);
-#endif
-
-/* hubio.c */
-extern void hubio_init(void);
-extern void hub_merge_clean(nasid_t nasid);
-extern void hub_set_piomode(nasid_t nasid, int conveyor);
-
-/* huberror.c */
-extern void hub_error_init(cnodeid_t);
-extern void dump_error_spool(cpuid_t cpu, void (*pf)(char *, ...));
-extern void hubni_error_handler(char *, int);
-extern int check_ni_errors(void);
-
-/* Used for debugger to signal upper software a breakpoint has taken place */
-
-extern void *debugger_update;
-extern __psunsigned_t debugger_stopped;
-
-/*
- * IP27 piomap, created by hub_pio_alloc.
- * xtalk_info MUST BE FIRST, since this structure is cast to a
- * xtalk_piomap_s by generic xtalk routines.
- */
-struct hub_piomap_s {
- struct xtalk_piomap_s hpio_xtalk_info;/* standard crosstalk pio info */
- devfs_handle_t hpio_hub; /* which hub's mapping registers are set up */
- short hpio_holdcnt; /* count of current users of bigwin mapping */
- char hpio_bigwin_num;/* if big window map, which one */
- int hpio_flags; /* defined below */
-};
-/* hub_piomap flags */
-#define HUB_PIOMAP_IS_VALID 0x1
-#define HUB_PIOMAP_IS_BIGWINDOW 0x2
-#define HUB_PIOMAP_IS_FIXED 0x4
-
-#define hub_piomap_xt_piomap(hp) (&hp->hpio_xtalk_info)
-#define hub_piomap_hub_v(hp) (hp->hpio_hub)
-#define hub_piomap_winnum(hp) (hp->hpio_bigwin_num)
-
-#if TBD
- /* Ensure that hpio_xtalk_info is first */
- #assert (&(((struct hub_piomap_s *)0)->hpio_xtalk_info) == 0)
-#endif
-
-
-/*
- * IP27 dmamap, created by hub_pio_alloc.
- * xtalk_info MUST BE FIRST, since this structure is cast to a
- * xtalk_dmamap_s by generic xtalk routines.
- */
-struct hub_dmamap_s {
- struct xtalk_dmamap_s hdma_xtalk_info;/* standard crosstalk dma info */
- devfs_handle_t hdma_hub; /* which hub we go through */
- int hdma_flags; /* defined below */
-};
-/* hub_dmamap flags */
-#define HUB_DMAMAP_IS_VALID 0x1
-#define HUB_DMAMAP_USED 0x2
-#define HUB_DMAMAP_IS_FIXED 0x4
-
-#if TBD
- /* Ensure that hdma_xtalk_info is first */
- #assert (&(((struct hub_dmamap_s *)0)->hdma_xtalk_info) == 0)
-#endif
-
-/*
- * IP27 interrupt handle, created by hub_intr_alloc.
- * xtalk_info MUST BE FIRST, since this structure is cast to a
- * xtalk_intr_s by generic xtalk routines.
- */
-struct hub_intr_s {
- struct xtalk_intr_s i_xtalk_info; /* standard crosstalk intr info */
- ilvl_t i_swlevel; /* software level for blocking intr */
- cpuid_t i_cpuid; /* which cpu */
- int i_bit; /* which bit */
- int i_flags;
-};
-/* flag values */
-#define HUB_INTR_IS_ALLOCED 0x1 /* for debug: allocated */
-#define HUB_INTR_IS_CONNECTED 0x4 /* for debug: connected to a software driver */
-
-#if TBD
- /* Ensure that i_xtalk_info is first */
- #assert (&(((struct hub_intr_s *)0)->i_xtalk_info) == 0)
-#endif
-
-
-/* IP27 hub-specific information stored under INFO_LBL_HUB_INFO */
-/* TBD: IP27-dependent stuff currently in nodepda.h should be here */
-typedef struct hubinfo_s {
- nodepda_t *h_nodepda; /* pointer to node's private data area */
- cnodeid_t h_cnodeid; /* compact nodeid */
- nasid_t h_nasid; /* nasid */
-
- /* structures for PIO management */
- xwidgetnum_t h_widgetid; /* my widget # (as viewed from xbow) */
- struct hub_piomap_s h_small_window_piomap[HUB_WIDGET_ID_MAX+1];
- sv_t h_bwwait; /* wait for big window to free */
- spinlock_t h_bwlock; /* guard big window piomap's */
- spinlock_t h_crblock; /* gaurd CRB error handling */
- int h_num_big_window_fixed; /* count number of FIXED maps */
- struct hub_piomap_s h_big_window_piomap[HUB_NUM_BIG_WINDOW];
- hub_intr_t hub_ii_errintr;
-} *hubinfo_t;
-
-#define hubinfo_get(vhdl, infoptr) ((void)hwgraph_info_get_LBL \
- (vhdl, INFO_LBL_NODE_INFO, (arbitrary_info_t *)infoptr))
-
-#define hubinfo_set(vhdl, infoptr) (void)hwgraph_info_add_LBL \
- (vhdl, INFO_LBL_NODE_INFO, (arbitrary_info_t)infoptr)
-
-#define hubinfo_to_hubv(hinfo, hub_v) (hinfo->h_nodepda->node_vertex)
-
-/*
- * Hub info PIO map access functions.
- */
-#define hubinfo_bwin_piomap_get(hinfo, win) \
- (&hinfo->h_big_window_piomap[win])
-#define hubinfo_swin_piomap_get(hinfo, win) \
- (&hinfo->h_small_window_piomap[win])
-
-/* IP27 cpu-specific information stored under INFO_LBL_CPU_INFO */
-/* TBD: IP27-dependent stuff currently in pda.h should be here */
-typedef struct cpuinfo_s {
-#ifdef LATER
- pda_t *ci_cpupda; /* pointer to CPU's private data area */
-#endif
- cpuid_t ci_cpuid; /* CPU ID */
-} *cpuinfo_t;
-
-#define cpuinfo_get(vhdl, infoptr) ((void)hwgraph_info_get_LBL \
- (vhdl, INFO_LBL_CPU_INFO, (arbitrary_info_t *)infoptr))
-
-#define cpuinfo_set(vhdl, infoptr) (void)hwgraph_info_add_LBL \
- (vhdl, INFO_LBL_CPU_INFO, (arbitrary_info_t)infoptr)
-
-/* Special initialization function for xswitch vertices created during startup. */
-extern void xswitch_vertex_init(devfs_handle_t xswitch);
-
-extern xtalk_provider_t hub_provider;
-
-/* du.c */
-int ducons_write(char *buf, int len);
-
-/* memerror.c */
-
-extern void install_eccintr(cpuid_t cpu);
-extern void memerror_get_stats(cnodeid_t cnode,
- int *bank_stats, int *bank_stats_max);
-extern void probe_md_errors(nasid_t);
-/* sysctlr.c */
-extern void sysctlr_init(void);
-extern void sysctlr_power_off(int sdonly);
-extern void sysctlr_keepalive(void);
-
-#define valid_cpuid(_x) (((_x) >= 0) && ((_x) < maxcpus))
-
-/* Useful definitions to get the memory dimm given a physical
- * address.
- */
-#define paddr_dimm(_pa) ((_pa & MD_BANK_MASK) >> MD_BANK_SHFT)
-#define paddr_cnode(_pa) (NASID_TO_COMPACT_NODEID(NASID_GET(_pa)))
-extern void membank_pathname_get(paddr_t,char *);
-
-/* To redirect the output into the error buffer */
-#define errbuf_print(_s) printf("#%s",_s)
-
-extern void crbx(nasid_t nasid, void (*pf)(char *, ...));
-void bootstrap(void);
-
-/* sndrv.c */
-extern int sndrv_attach(devfs_handle_t vertex);
-
-#endif /* _ASM_IA64_SN_SN1_SN_PRIVATE_H */
+++ /dev/null
-#ifndef _ASM_IA64_SN_SN1_SYNERGY_H
-#define _ASM_IA64_SN_SN1_SYNERGY_H
-
-#include <asm/io.h>
-#include <asm/sn/hcl.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/intr_public.h>
-
-
-/*
- * Definitions for the synergy asic driver
- *
- * These are for SGI platforms only.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-#define SYNERGY_L4_BYTES (64UL*1024*1024)
-#define SYNERGY_L4_WAYS 8
-#define SYNERGY_L4_BYTES_PER_WAY (SYNERGY_L4_BYTES/SYNERGY_L4_WAYS)
-#define SYNERGY_BLOCK_SIZE 512UL
-
-
-#define SSPEC_BASE (0xe0000000000UL)
-#define LB_REG_BASE (SSPEC_BASE + 0x0)
-
-#define VEC_MASK3A_ADDR (0x2a0 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
-#define VEC_MASK3B_ADDR (0x2a8 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
-#define VEC_MASK3A (0x2a0)
-#define VEC_MASK3B (0x2a8)
-
-#define VEC_MASK2A_ADDR (0x2b0 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
-#define VEC_MASK2B_ADDR (0x2b8 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
-#define VEC_MASK2A (0x2b0)
-#define VEC_MASK2B (0x2b8)
-
-#define VEC_MASK1A_ADDR (0x2c0 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
-#define VEC_MASK1B_ADDR (0x2c8 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
-#define VEC_MASK1A (0x2c0)
-#define VEC_MASK1B (0x2c8)
-
-#define VEC_MASK0A_ADDR (0x2d0 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
-#define VEC_MASK0B_ADDR (0x2d8 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
-#define VEC_MASK0A (0x2d0)
-#define VEC_MASK0B (0x2d8)
-
-#define GBL_PERF_A_ADDR (0x330 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
-#define GBL_PERF_B_ADDR (0x338 + LB_REG_BASE + __IA64_UNCACHED_OFFSET)
-
-#define WRITE_LOCAL_SYNERGY_REG(addr, value) __synergy_out(addr, value)
-
-#define HSPEC_SYNERGY0_0 0x04000000 /* Synergy0 Registers */
-#define HSPEC_SYNERGY1_0 0x05000000 /* Synergy1 Registers */
-#define HS_SYNERGY_STRIDE (HSPEC_SYNERGY1_0 - HSPEC_SYNERGY0_0)
-#define REMOTE_HSPEC(_n, _x) (HUBREG_CAST (RREG_BASE(_n) + (_x)))
-
-#define RREG_BASE(_n) (NODE_LREG_BASE(_n))
-#define NODE_LREG_BASE(_n) (NODE_HSPEC_BASE(_n) + 0x30000000)
-#define NODE_HSPEC_BASE(_n) (HSPEC_BASE + NODE_OFFSET(_n))
-#ifndef HSPEC_BASE
-#define HSPEC_BASE (SYN_UNCACHED_SPACE | HSPEC_BASE_SYN)
-#endif
-#define SYN_UNCACHED_SPACE 0xc000000000000000
-#define HSPEC_BASE_SYN 0x00000b0000000000
-#define NODE_OFFSET(_n) (UINT64_CAST (_n) << NODE_SIZE_BITS)
-#define NODE_SIZE_BITS 33
-
-#define SYN_TAG_DISABLE_WAY (SSPEC_BASE+0xae0)
-
-
-#define RSYN_REG_OFFSET(fsb, reg) (((fsb) ? HSPEC_SYNERGY1_0 : HSPEC_SYNERGY0_0) | (reg))
-
-#define REMOTE_SYNERGY_LOAD(nasid, fsb, reg) __remote_synergy_in(nasid, fsb, reg)
-#define REMOTE_SYNERGY_STORE(nasid, fsb, reg, val) __remote_synergy_out(nasid, fsb, reg, val)
-
-static inline uint64_t
-__remote_synergy_in(int nasid, int fsb, uint64_t reg) {
- volatile uint64_t *addr;
-
- addr = (uint64_t *)(RREG_BASE(nasid) + RSYN_REG_OFFSET(fsb, reg));
- return (*addr);
-}
-
-static inline void
-__remote_synergy_out(int nasid, int fsb, uint64_t reg, uint64_t value) {
- volatile uint64_t *addr;
-
- addr = (uint64_t *)(RREG_BASE(nasid) + RSYN_REG_OFFSET(fsb, (reg<<2)));
- *(addr+0) = value >> 48;
- *(addr+1) = value >> 32;
- *(addr+2) = value >> 16;
- *(addr+3) = value;
- __ia64_mf_a();
-}
-
-/* XX this doesn't make a lot of sense. Which fsb? */
-static inline void
-__synergy_out(unsigned long addr, unsigned long value)
-{
- volatile unsigned long *adr = (unsigned long *)
- (addr | __IA64_UNCACHED_OFFSET);
-
- *adr = value;
- __ia64_mf_a();
-}
-
-#define READ_LOCAL_SYNERGY_REG(addr) __synergy_in(addr)
-
-/* XX this doesn't make a lot of sense. Which fsb? */
-static inline unsigned long
-__synergy_in(unsigned long addr)
-{
- unsigned long ret, *adr = (unsigned long *)
- (addr | __IA64_UNCACHED_OFFSET);
-
- ret = *adr;
- __ia64_mf_a();
- return ret;
-}
-
-struct sn1_intr_action {
- void (*handler)(int, void *, struct pt_regs *);
- void *intr_arg;
- unsigned long flags;
- struct sn1_intr_action * next;
-};
-
-typedef struct synergy_da_s {
- hub_intmasks_t s_intmasks;
-}synergy_da_t;
-
-struct sn1_cnode_action_list {
- spinlock_t action_list_lock;
- struct sn1_intr_action *action_list;
-};
-
-/*
- * ioctl cmds for node/hub/synergy/[01]/mon for synergy
- * perf monitoring are defined in sndrv.h
- */
-
-/* multiplex the counters every 10 timer interrupts */
-#define SYNERGY_PERF_FREQ_DEFAULT 10
-
-/* macros for synergy "mon" device ioctl handler */
-#define SYNERGY_PERF_INFO(_s, _f) (arbitrary_info_t)(((_s) << 16)|(_f))
-#define SYNERGY_PERF_INFO_CNODE(_x) (cnodeid_t)(((uint64_t)_x) >> 16)
-#define SYNERGY_PERF_INFO_FSB(_x) (((uint64_t)_x) & 1)
-
-/* synergy perf control registers */
-#define PERF_CNTL0_A 0xab0UL /* control A on FSB0 */
-#define PERF_CNTL0_B 0xab8UL /* control B on FSB0 */
-#define PERF_CNTL1_A 0xac0UL /* control A on FSB1 */
-#define PERF_CNTL1_B 0xac8UL /* control B on FSB1 */
-
-/* synergy perf counters */
-#define PERF_CNTR0_A 0xad0UL /* counter A on FSB0 */
-#define PERF_CNTR0_B 0xad8UL /* counter B on FSB0 */
-#define PERF_CNTR1_A 0xaf0UL /* counter A on FSB1 */
-#define PERF_CNTR1_B 0xaf8UL /* counter B on FSB1 */
-
-/* Synergy perf data. Each nodepda keeps a list of these */
-struct synergy_perf_s {
- uint64_t intervals; /* count of active intervals for this event */
- uint64_t total_intervals;/* snapshot of total intervals */
- uint64_t modesel; /* mode and sel bits, both A and B registers */
- struct synergy_perf_s *next; /* next in circular linked list */
- uint64_t counts[2]; /* [0] is synergy-A counter, [1] synergy-B counter */
-};
-
-typedef struct synergy_perf_s synergy_perf_t;
-
-typedef struct synergy_info_s synergy_info_t;
-
-extern void synergy_perf_init(void);
-extern void synergy_perf_update(int);
-extern struct file_operations synergy_mon_fops;
-
-#endif /* _ASM_IA64_SN_SN1_SYNERGY_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN2_ADDRS_H
#define LOCAL_MEM_SPACE 0xc000010000000000 /* Local Memory space */
#define GLOBAL_MMR_SPACE 0xc000000800000000 /* Global MMR space */
#define GLOBAL_PHYS_MMR_SPACE 0x0000000800000000 /* Global Physical MMR space */
-#define GET_SPACE 0xc000001000000000 /* GET space */
+#define GET_SPACE 0xe000001000000000 /* GET space */
#define AMO_SPACE 0xc000002000000000 /* AMO space */
#define CACHEABLE_MEM_SPACE 0xe000003000000000 /* Cacheable memory space */
#define UNCACHED 0xc000000000000000 /* UnCacheable memory space */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN2_ARCH_H
#define _ASM_IA64_SN_SN2_ARCH_H
#define NASID_MASK_BYTES ((MAX_NASIDS + 7) / 8)
+#define CNASID_MASK_BYTES (NASID_MASK_BYTES / 2)
/*
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN2_INTR_H
#define _ASM_IA64_SN_SN2_INTR_H
// These two IRQ's are used by partitioning.
#define SGI_XPC_ACTIVATE (0x30)
+#define SGI_II_ERROR (0x31)
+#define SGI_XBOW_ERROR (0x32)
+#define SGI_PCIBR_ERROR (0x33)
#define SGI_XPC_NOTIFY (0xe7)
-#define IA64_SN2_FIRST_DEVICE_VECTOR (0x31)
+#define IA64_SN2_FIRST_DEVICE_VECTOR (0x34)
#define IA64_SN2_LAST_DEVICE_VECTOR (0xe6)
#define SN2_IRQ_RESERVED (0x1)
#define SN2_IRQ_CONNECTED (0x2)
+#define SN2_IRQ_SHARED (0x4)
#define SN2_IRQ_PER_HUB (2048)
unsigned char ret;
ret = *addr;
- sn_dma_flush((unsigned long)addr);
__sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
return ret;
}
unsigned short ret;
ret = *addr;
- sn_dma_flush((unsigned long)addr);
__sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
return ret;
}
unsigned int ret;
ret = *addr;
- sn_dma_flush((unsigned long)addr);
__sn_mf_a();
+ sn_dma_flush((unsigned long)addr);
return ret;
}
unsigned char val;
val = *(volatile unsigned char *)addr;
+ __sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
unsigned short val;
val = *(volatile unsigned short *)addr;
+ __sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
unsigned int val;
val = *(volatile unsigned int *) addr;
+ __sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
unsigned long val;
val = *(volatile unsigned long *) addr;
+ __sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
+++ /dev/null
-#ifndef _ASM_IA64_SN_MMZONE_SN2_H
-#define _ASM_IA64_SN_MMZONE_SN2_H
-
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2000-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/config.h>
-
-
-/*
- * SGI SN2 Arch defined values
- *
- * An SN2 physical address is broken down as follows:
- *
- * +-----------------------------------------+
- * | | | | node offset |
- * | unused | node | AS |-------------------|
- * | | | | cn | clump offset |
- * +-----------------------------------------+
- * 6 4 4 3 3 3 3 3 3 0
- * 3 9 8 8 7 6 5 4 3 0
- *
- * bits 63-49 Unused - must be zero
- * bits 48-38 Node number. Note that some configurations do NOT
- * have a node zero.
- * bits 37-36 Address space ID. Cached memory has a value of 3 (!!!).
- * Chipset & IO addresses have other values.
- * (Yikes!! The hardware folks hate us...)
- * bits 35-0 Node offset.
- *
- * The node offset can be further broken down as:
- * bits 35-34 Clump (bank) number.
- * bits 33-0 Clump (bank) offset.
- *
- * A node consists of up to 4 clumps (banks) of memory. A clump may be empty, or may be
- * populated with a single contiguous block of memory starting at clump
- * offset 0. The size of the block is (2**n) * 64MB, where 0<n<9.
- *
- * Important notes:
- * - IO space addresses are embedded with the range of valid memory addresses.
- * - All cached memory addresses have bits 36 & 37 set to 1's.
- * - There is no physical address 0.
- *
- * NOTE: This file exports symbols prefixed with "PLAT_". Symbols prefixed with
- * "SN_" are intended for internal use only and should not be used in
- * any platform independent code.
- *
- * This file is also responsible for exporting the following definitions:
- * cnodeid_t Define a compact node id.
- */
-
-typedef signed short cnodeid_t;
-
-#define SN2_BANKS_PER_NODE 4
-#define SN2_NODE_SIZE (64UL*1024*1024*1024) /* 64GB per node */
-#define SN2_BANK_SIZE (SN2_NODE_SIZE/SN2_BANKS_PER_NODE)
-#define SN2_NODE_SHIFT 38
-#define SN2_NODE_MASK 0x7ffUL
-#define SN2_NODE_OFFSET_MASK (SN2_NODE_SIZE-1)
-#define SN2_NODE_NUMBER(addr) (((unsigned long)(addr) >> SN2_NODE_SHIFT) & SN2_NODE_MASK)
-#define SN2_NODE_CLUMP_NUMBER(kaddr) (((unsigned long)(kaddr) >>34) & 3)
-#define SN2_NODE_OFFSET(addr) (((unsigned long)(addr)) & SN2_NODE_OFFSET_MASK)
-#define SN2_KADDR(nasid, offset) (((unsigned long)(nasid)<<SN2_NODE_SHIFT) | (offset) | SN2_PAGE_OFFSET)
-#define SN2_PAGE_OFFSET 0xe000003000000000UL /* Cacheable memory space */
-
-
-#define PLAT_MAX_NODE_NUMBER 2048 /* Maximum node number + 1 */
-#define PLAT_MAX_COMPACT_NODES 128 /* Maximum number of nodes in SSI system */
-
-#define PLAT_MAX_PHYS_MEMORY (1UL << 49)
-
-
-
-/*
- * On the SN platforms, a clump is the same as a memory bank.
- */
-#define PLAT_CLUMPS_PER_NODE SN2_BANKS_PER_NODE
-#define PLAT_CLUMP_OFFSET(addr) ((unsigned long)(addr) & 0x3ffffffffUL)
-#define PLAT_CLUMPSIZE (SN2_NODE_SIZE/PLAT_CLUMPS_PER_NODE)
-#define PLAT_MAXCLUMPS (PLAT_CLUMPS_PER_NODE * PLAT_MAX_COMPACT_NODES)
-
-
-
-/*
- * PLAT_VALID_MEM_KADDR returns a boolean to indicate if a kaddr is potentially a
- * valid cacheable identity mapped RAM memory address.
- * Note that the RAM may or may not actually be present!!
- */
-#define SN2_VALID_KERN_ADDR_MASK 0xffff003000000000UL
-#define SN2_VALID_KERN_ADDR_VALUE 0xe000003000000000UL
-#define PLAT_VALID_MEM_KADDR(kaddr) (((unsigned long)(kaddr) & SN2_VALID_KERN_ADDR_MASK) == SN2_VALID_KERN_ADDR_VALUE)
-
-
-
-/*
- * Memory is conceptually divided into chunks. A chunk is either
- * completely present, or else the kernel assumes it is completely
- * absent. Each node consists of a number of possibly contiguous chunks.
- */
-#define SN2_CHUNKSHIFT 25 /* 32 MB */
-#define PLAT_CHUNKSIZE (1UL << SN2_CHUNKSHIFT)
-#define PLAT_CHUNKNUM(addr) ({unsigned long _p=(unsigned long)(addr); \
- (((_p&SN2_NODE_MASK)>>2) | \
- (_p&SN2_NODE_OFFSET_MASK)) >>SN2_CHUNKSHIFT;})
-
-/*
- * Given a kaddr, find the nid (compact nodeid)
- */
-#ifdef CONFIG_IA64_SGI_SN_DEBUG
-#define DISCONBUG(kaddr) panic("DISCONTIG BUG: line %d, %s. kaddr 0x%lx", \
- __LINE__, __FILE__, (long)(kaddr))
-
-#define KVADDR_TO_NID(kaddr) ({long _ktn=(long)(kaddr); \
- kern_addr_valid(_ktn) ? \
- local_node_data->physical_node_map[SN2_NODE_NUMBER(_ktn)] : \
- (DISCONBUG(_ktn), 0UL);})
-#else
-#define KVADDR_TO_NID(kaddr) (local_node_data->physical_node_map[SN2_NODE_NUMBER(kaddr)])
-#endif
-
-
-
-/*
- * Given a kaddr, find the index into the clump_mem_map_base array of the page struct entry
- * for the first page of the clump.
- */
-#define PLAT_CLUMP_MEM_MAP_INDEX(kaddr) ({long _kmmi=(long)(kaddr); \
- KVADDR_TO_NID(_kmmi) * PLAT_CLUMPS_PER_NODE + \
- SN2_NODE_CLUMP_NUMBER(_kmmi);})
-
-
-
-/*
- * Calculate a "goal" value to be passed to __alloc_bootmem_node for allocating structures on
- * nodes so that they don't alias to the same line in the cache as the previous allocated structure.
- * This macro takes an address of the end of previous allocation, rounds it to a page boundary &
- * changes the node number.
- */
-#define PLAT_BOOTMEM_ALLOC_GOAL(cnode,kaddr) __pa(SN2_KADDR(PLAT_PXM_TO_PHYS_NODE_NUMBER(nid_to_pxm_map[cnode]), \
- (SN2_NODE_OFFSET(kaddr) + PAGE_SIZE - 1) >> PAGE_SHIFT << PAGE_SHIFT))
-
-
-
-
-/*
- * Convert a proximity domain number (from the ACPI tables) into a physical node number.
- * Note: on SN2, the promity domain number is the same as bits [8:1] of the NASID. The following
- * algorithm relies on:
- * - bit 0 of the NASID for cpu nodes is always 0
- * - bits [10:9] of all NASIDs in a partition are always the same
- * - hard_smp_processor_id return the SAPIC of the current cpu &
- * bits 0..11 contain the NASID.
- *
- * All of this complexity is because MS architectually limited proximity domain numbers to
- * 8 bits.
- */
-
-#define PLAT_PXM_TO_PHYS_NODE_NUMBER(pxm) (((pxm)<<1) | (hard_smp_processor_id() & 0x300))
-
-#endif /* _ASM_IA64_SN_MMZONE_SN2_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2001, 2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2001, 2002-2003 Silicon Graphics, Inc. All rights reserved.
*/
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
/* Real-time Clock */
/* ==================================================================== */
-#define SH_RTC 0x00000001101c0000
-#define SH_RTC_MASK 0x007fffffffffffff
+#define SH_RTC 0x00000001101c0000UL
+#define SH_RTC_MASK 0x007fffffffffffffUL
#define SH_RTC_INIT 0x0000000000000000
/* SH_RTC_REAL_TIME_CLOCK */
/* Description: Real-time Clock */
#define SH_RTC_REAL_TIME_CLOCK_SHFT 0
-#define SH_RTC_REAL_TIME_CLOCK_MASK 0x007fffffffffffff
+#define SH_RTC_REAL_TIME_CLOCK_MASK 0x007fffffffffffffUL
/* ==================================================================== */
/* Register "SH_SCRATCH0" */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2001-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN2_SHUBIO_H
/* Scratch registers (all bits available) */
#define IIO_SCRATCH_REG0 IIO_ISCR0
#define IIO_SCRATCH_REG1 IIO_ISCR1
-#define IIO_SCRATCH_MASK 0xffffffffffffffff
-
-#define IIO_SCRATCH_BIT0_0 0x0000000000000001
-#define IIO_SCRATCH_BIT0_1 0x0000000000000002
-#define IIO_SCRATCH_BIT0_2 0x0000000000000004
-#define IIO_SCRATCH_BIT0_3 0x0000000000000008
-#define IIO_SCRATCH_BIT0_4 0x0000000000000010
-#define IIO_SCRATCH_BIT0_5 0x0000000000000020
-#define IIO_SCRATCH_BIT0_6 0x0000000000000040
-#define IIO_SCRATCH_BIT0_7 0x0000000000000080
-#define IIO_SCRATCH_BIT0_8 0x0000000000000100
-#define IIO_SCRATCH_BIT0_9 0x0000000000000200
-#define IIO_SCRATCH_BIT0_A 0x0000000000000400
-
-#define IIO_SCRATCH_BIT1_0 0x0000000000000001
-#define IIO_SCRATCH_BIT1_1 0x0000000000000002
+#define IIO_SCRATCH_MASK 0xffffffffffffffffUL
+
+#define IIO_SCRATCH_BIT0_0 0x0000000000000001UL
+#define IIO_SCRATCH_BIT0_1 0x0000000000000002UL
+#define IIO_SCRATCH_BIT0_2 0x0000000000000004UL
+#define IIO_SCRATCH_BIT0_3 0x0000000000000008UL
+#define IIO_SCRATCH_BIT0_4 0x0000000000000010UL
+#define IIO_SCRATCH_BIT0_5 0x0000000000000020UL
+#define IIO_SCRATCH_BIT0_6 0x0000000000000040UL
+#define IIO_SCRATCH_BIT0_7 0x0000000000000080UL
+#define IIO_SCRATCH_BIT0_8 0x0000000000000100UL
+#define IIO_SCRATCH_BIT0_9 0x0000000000000200UL
+#define IIO_SCRATCH_BIT0_A 0x0000000000000400UL
+
+#define IIO_SCRATCH_BIT1_0 0x0000000000000001UL
+#define IIO_SCRATCH_BIT1_1 0x0000000000000002UL
/* IO Translation Table Entries */
#define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */
/* Hw manuals number them 1..7! */
/*
* IIO_IMEM Register fields.
*/
-#define IIO_IMEM_W0ESD 0x1 /* Widget 0 shut down due to error */
-#define IIO_IMEM_B0ESD (1 << 4) /* BTE 0 shut down due to error */
-#define IIO_IMEM_B1ESD (1 << 8) /* BTE 1 Shut down due to error */
+#define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */
+#define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */
+#define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */
/*
* As a permanent workaround for a bug in the PI side of the shub, we've
/*
* IO BTE Length/Status (IIO_IBLS) register bit field definitions
*/
-#define IBLS_BUSY (0x1 << 20)
+#define IBLS_BUSY (0x1UL << 20)
#define IBLS_ERROR_SHFT 16
-#define IBLS_ERROR (0x1 << IBLS_ERROR_SHFT)
+#define IBLS_ERROR (0x1UL << IBLS_ERROR_SHFT)
#define IBLS_LENGTH_MASK 0xffff
/*
* IO BTE Control/Terminate register (IBCT) register bit field definitions
*/
-#define IBCT_POISON (0x1 << 8)
-#define IBCT_NOTIFY (0x1 << 4)
-#define IBCT_ZFIL_MODE (0x1 << 0)
+#define IBCT_POISON (0x1UL << 8)
+#define IBCT_NOTIFY (0x1UL << 4)
+#define IBCT_ZFIL_MODE (0x1UL << 0)
/*
* IIO Incoming Error Packet Header (IIO_IIEPH1/IIO_IIEPH2)
*/
-#define IIEPH1_VALID (1 << 44)
-#define IIEPH1_OVERRUN (1 << 40)
+#define IIEPH1_VALID (1UL << 44)
+#define IIEPH1_OVERRUN (1UL << 40)
#define IIEPH1_ERR_TYPE_SHFT 32
#define IIEPH1_ERR_TYPE_MASK 0xf
#define IIEPH1_SOURCE_SHFT 20
#define IIEPH1_CMD_SHFT 0
#define IIEPH1_CMD_MASK 7
-#define IIEPH2_TAIL (1 << 40)
+#define IIEPH2_TAIL (1UL << 40)
#define IIEPH2_ADDRESS_SHFT 0
#define IIEPH2_ADDRESS_MASK 38
/*
* IO Error Clear register bit field definitions
*/
-#define IECLR_PI1_FWD_INT (1 << 31) /* clear PI1_FORWARD_INT in iidsr */
-#define IECLR_PI0_FWD_INT (1 << 30) /* clear PI0_FORWARD_INT in iidsr */
-#define IECLR_SPUR_RD_HDR (1 << 29) /* clear valid bit in ixss reg */
-#define IECLR_BTE1 (1 << 18) /* clear bte error 1 */
-#define IECLR_BTE0 (1 << 17) /* clear bte error 0 */
-#define IECLR_CRAZY (1 << 16) /* clear crazy bit in wstat reg */
-#define IECLR_PRB_F (1 << 15) /* clear err bit in PRB_F reg */
-#define IECLR_PRB_E (1 << 14) /* clear err bit in PRB_E reg */
-#define IECLR_PRB_D (1 << 13) /* clear err bit in PRB_D reg */
-#define IECLR_PRB_C (1 << 12) /* clear err bit in PRB_C reg */
-#define IECLR_PRB_B (1 << 11) /* clear err bit in PRB_B reg */
-#define IECLR_PRB_A (1 << 10) /* clear err bit in PRB_A reg */
-#define IECLR_PRB_9 (1 << 9) /* clear err bit in PRB_9 reg */
-#define IECLR_PRB_8 (1 << 8) /* clear err bit in PRB_8 reg */
-#define IECLR_PRB_0 (1 << 0) /* clear err bit in PRB_0 reg */
+#define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */
+#define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */
+#define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */
+#define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */
+#define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */
+#define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */
+#define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */
+#define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */
+#define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */
+#define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */
+#define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */
+#define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */
+#define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */
+#define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */
+#define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */
/*
* IIO CRB control register Fields: IIO_ICCR
typedef struct hub_piomap_s *hub_piomap_t;
extern hub_piomap_t
-hub_piomap_alloc(devfs_handle_t dev, /* set up mapping for this device */
+hub_piomap_alloc(vertex_hdl_t dev, /* set up mapping for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* map for this xtalk_addr range */
size_t byte_count,
hub_piomap_done(hub_piomap_t hub_piomap);
extern caddr_t
-hub_piotrans_addr( devfs_handle_t dev, /* translate to this device */
+hub_piotrans_addr( vertex_hdl_t dev, /* translate to this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* Crosstalk address */
size_t byte_count, /* map this many bytes */
typedef struct hub_dmamap_s *hub_dmamap_t;
extern hub_dmamap_t
-hub_dmamap_alloc( devfs_handle_t dev, /* set up mappings for dev */
+hub_dmamap_alloc( vertex_hdl_t dev, /* set up mappings for dev */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags); /* defined in dma.h */
hub_dmamap_done( hub_dmamap_t dmamap); /* done w/ mapping resources */
extern iopaddr_t
-hub_dmatrans_addr( devfs_handle_t dev, /* translate for this device */
+hub_dmatrans_addr( vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
unsigned flags); /* defined in dma.h */
extern alenlist_t
-hub_dmatrans_list( devfs_handle_t dev, /* translate for this device */
+hub_dmatrans_list( vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
alenlist_t palenlist, /* system addr/length list */
unsigned flags); /* defined in dma.h */
hub_dmamap_drain( hub_dmamap_t map);
extern void
-hub_dmaaddr_drain( devfs_handle_t vhdl,
+hub_dmaaddr_drain( vertex_hdl_t vhdl,
paddr_t addr,
size_t bytes);
extern void
-hub_dmalist_drain( devfs_handle_t vhdl,
+hub_dmalist_drain( vertex_hdl_t vhdl,
alenlist_t list);
typedef struct hub_intr_s *hub_intr_t;
extern hub_intr_t
-hub_intr_alloc( devfs_handle_t dev, /* which device */
+hub_intr_alloc( vertex_hdl_t dev, /* which device */
device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev); /* owner of this interrupt */
+ vertex_hdl_t owner_dev); /* owner of this interrupt */
extern hub_intr_t
-hub_intr_alloc_nothd(devfs_handle_t dev, /* which device */
+hub_intr_alloc_nothd(vertex_hdl_t dev, /* which device */
device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev); /* owner of this interrupt */
+ vertex_hdl_t owner_dev); /* owner of this interrupt */
extern void
hub_intr_free(hub_intr_t intr_hdl);
extern void
hub_intr_disconnect(hub_intr_t intr_hdl);
-extern devfs_handle_t
-hub_intr_cpu_get(hub_intr_t intr_hdl);
/* CONFIGURATION MANAGEMENT */
extern void
-hub_provider_startup(devfs_handle_t hub);
+hub_provider_startup(vertex_hdl_t hub);
extern void
-hub_provider_shutdown(devfs_handle_t hub);
+hub_provider_shutdown(vertex_hdl_t hub);
#define HUB_PIO_CONVEYOR 0x1 /* PIO in conveyor belt mode */
#define HUB_PIO_FIRE_N_FORGET 0x2 /* PIO in fire-and-forget mode */
typedef int hub_widget_flags_t;
-/* Set the PIO mode for a widget. These two functions perform the
- * same operation, but hub_device_flags_set() takes a hardware graph
- * vertex while hub_widget_flags_set() takes a nasid and widget
- * number. In most cases, hub_device_flags_set() should be used.
- */
+/* Set the PIO mode for a widget. */
extern int hub_widget_flags_set(nasid_t nasid,
xwidgetnum_t widget_num,
hub_widget_flags_t flags);
-/* Depending on the flags set take the appropriate actions */
-extern int hub_device_flags_set(devfs_handle_t widget_dev,
- hub_widget_flags_t flags);
-
-
/* Error Handling. */
-extern int hub_ioerror_handler(devfs_handle_t, int, int, struct io_error_s *);
+extern int hub_ioerror_handler(vertex_hdl_t, int, int, struct io_error_s *);
extern int kl_ioerror_handler(cnodeid_t, cnodeid_t, cpuid_t,
int, paddr_t, caddr_t, ioerror_mode_t);
-extern void hub_widget_reset(devfs_handle_t, xwidgetnum_t);
-extern int hub_error_devenable(devfs_handle_t, int, int);
-extern void hub_widgetdev_enable(devfs_handle_t, int);
-extern void hub_widgetdev_shutdown(devfs_handle_t, int);
-extern int hub_dma_enabled(devfs_handle_t);
-
-/* hubdev */
-extern void hubdev_init(void);
-extern void hubdev_register(int (*attach_method)(devfs_handle_t));
-extern int hubdev_unregister(int (*attach_method)(devfs_handle_t));
-extern int hubdev_docallouts(devfs_handle_t hub);
-
-extern caddr_t hubdev_prombase_get(devfs_handle_t hub);
-extern cnodeid_t hubdev_cnodeid_get(devfs_handle_t hub);
+extern int hub_error_devenable(vertex_hdl_t, int, int);
+extern int hub_dma_enabled(vertex_hdl_t);
#endif /* __ASSEMBLY__ */
#endif /* _KERNEL */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 1992 - 1997,2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 1992-1997,2001-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN2_SLOTNUM_H
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN2_SN_PRIVATE_H
#define _ASM_IA64_SN_SN2_SN_PRIVATE_H
#endif
/* intr.c */
-extern int intr_reserve_level(cpuid_t cpu, int level, int err, devfs_handle_t owner_dev, char *name);
+extern int intr_reserve_level(cpuid_t cpu, int level, int err, vertex_hdl_t owner_dev, char *name);
extern void intr_unreserve_level(cpuid_t cpu, int level);
extern int intr_connect_level(cpuid_t cpu, int bit, ilvl_t mask_no,
intr_func_t intr_prefunc);
extern int intr_disconnect_level(cpuid_t cpu, int bit);
-extern cpuid_t intr_heuristic(devfs_handle_t dev, device_desc_t dev_desc,
- int req_bit,int intr_resflags,devfs_handle_t owner_dev,
+extern cpuid_t intr_heuristic(vertex_hdl_t dev, device_desc_t dev_desc,
+ int req_bit,int intr_resflags,vertex_hdl_t owner_dev,
char *intr_name,int *resp_bit);
extern void intr_block_bit(cpuid_t cpu, int bit);
extern void intr_unblock_bit(cpuid_t cpu, int bit);
void bte_wait_for_xfer_completion(void *);
/* klgraph.c */
-void klhwg_add_all_nodes(devfs_handle_t);
-void klhwg_add_all_modules(devfs_handle_t);
+void klhwg_add_all_nodes(vertex_hdl_t);
+void klhwg_add_all_modules(vertex_hdl_t);
/* klidbg.c */
void install_klidbg_functions(void);
/* init.c */
extern cnodeid_t get_compact_nodeid(void); /* get compact node id */
extern void init_platform_nodepda(nodepda_t *npda, cnodeid_t node);
-extern void init_platform_pda(cpuid_t cpu);
extern void per_cpu_init(void);
extern int is_fine_dirmode(void);
extern void update_node_information(cnodeid_t);
*/
struct hub_piomap_s {
struct xtalk_piomap_s hpio_xtalk_info;/* standard crosstalk pio info */
- devfs_handle_t hpio_hub; /* which shub's mapping registers are set up */
+ vertex_hdl_t hpio_hub; /* which shub's mapping registers are set up */
short hpio_holdcnt; /* count of current users of bigwin mapping */
char hpio_bigwin_num;/* if big window map, which one */
int hpio_flags; /* defined below */
*/
struct hub_dmamap_s {
struct xtalk_dmamap_s hdma_xtalk_info;/* standard crosstalk dma info */
- devfs_handle_t hdma_hub; /* which shub we go through */
+ vertex_hdl_t hdma_hub; /* which shub we go through */
int hdma_flags; /* defined below */
};
/* shub_dmamap flags */
(vhdl, INFO_LBL_CPU_INFO, (arbitrary_info_t)infoptr)
/* Special initialization function for xswitch vertices created during startup. */
-extern void xswitch_vertex_init(devfs_handle_t xswitch);
+extern void xswitch_vertex_init(vertex_hdl_t xswitch);
extern xtalk_provider_t hub_provider;
void bootstrap(void);
/* sndrv.c */
-extern int sndrv_attach(devfs_handle_t vertex);
+extern int sndrv_attach(vertex_hdl_t vertex);
#endif /* _ASM_IA64_SN_SN2_SN_PRIVATE_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/sn/types.h>
#include <asm/current.h>
#include <asm/nodedata.h>
+#include <asm/sn/pda.h>
/*
*
* LID - processor defined register (see PRM V2).
*
- * On SN1
- * 31:24 - id Contains the NASID
- * 23:16 - eid Contains 0-3 to identify the cpu on the node
- * bit 17 - synergy number
- * bit 16 - FSB slot number
* On SN2
* 31:28 - id Contains 0-3 to identify the cpu on the node
* 27:16 - eid Contains the NASID
*
* The following assumes the following mappings for LID register values:
*
- * The macros convert between cpu physical ids & slice/fsb/synergy/nasid/cnodeid.
+ * The macros convert between cpu physical ids & slice/nasid/cnodeid.
* These terms are described below:
*
*
+ * Brick
* ----- ----- ----- ----- CPU
- * | 0 | | 1 | | 2 | | 3 | SLICE
+ * | 0 | | 1 | | 0 | | 1 | SLICE
* ----- ----- ----- -----
* | | | |
* | | | |
- * 0 | | 1 0 | | 1 FSB SLOT
+ * 0 | | 2 0 | | 2 FSB SLOT
* ------- -------
* | |
* | |
- * ------- -------
- * | | | |
- * | 0 | | 1 | SYNERGY (SN1 only)
- * | | | |
- * ------- -------
* | |
- * | |
- * -------------------------------
- * | |
- * | BEDROCK / SHUB | NASID (0..MAX_NASIDS)
- * | | CNODEID (0..num_compact_nodes-1)
- * | |
- * | |
- * -------------------------------
- * |
+ * ------------ -------------
+ * | | | |
+ * | SHUB | | SHUB | NASID (0..MAX_NASIDS)
+ * | |----- | | CNODEID (0..num_compact_nodes-1)
+ * | | | |
+ * | | | |
+ * ------------ -------------
+ * | |
+ *
*
*/
#define cpu_physical_id(cpuid) ((ia64_get_lid() >> 16) & 0xffff)
#endif
-#ifdef CONFIG_IA64_SGI_SN1
/*
* macros for some of these exist in sn/addrs.h & sn/arch.h, etc. However,
* trying #include these files here causes circular dependencies.
*/
-#define cpu_physical_id_to_nasid(cpi) ((cpi) >> 8)
-#define cpu_physical_id_to_synergy(cpi) (((cpi) >> 1) & 1)
-#define cpu_physical_id_to_fsb_slot(cpi) ((cpi) & 1)
-#define cpu_physical_id_to_slice(cpi) ((cpi) & 3)
-#define get_nasid() ((ia64_get_lid() >> 24))
-#define get_slice() ((ia64_get_lid() >> 16) & 3)
-#define get_node_number(addr) (((unsigned long)(addr)>>33) & 0x7f)
-#else
#define cpu_physical_id_to_nasid(cpi) ((cpi) &0xfff)
#define cpu_physical_id_to_slice(cpi) ((cpi>>12) & 3)
#define get_nasid() ((ia64_get_lid() >> 16) & 0xfff)
#define get_slice() ((ia64_get_lid() >> 28) & 0xf)
#define get_node_number(addr) (((unsigned long)(addr)>>38) & 0x7ff)
-#endif
/*
* NOTE: id & eid refer to Intel's definitions of the LID register
#define nasid_slice_to_cpuid(nasid,slice) (cpu_logical_id(nasid_slice_to_cpu_physical_id((nasid),(slice))))
-#ifdef CONFIG_IA64_SGI_SN1
-#define nasid_slice_to_cpu_physical_id(nasid, slice) (((nasid)<<8) | (slice))
-#else
#define nasid_slice_to_cpu_physical_id(nasid, slice) (((slice)<<12) | (nasid))
-#endif
/*
* The following table/struct is used for managing PTC coherency domains.
} sn_sapicid_info_t;
extern sn_sapicid_info_t sn_sapicid_info[]; /* indexed by cpuid */
+extern short physical_node_map[]; /* indexed by nasid to get cnode */
-
-#ifdef CONFIG_IA64_SGI_SN1
-/*
- * cpuid_to_fsb_slot - convert a cpuid to the fsb slot number that it is in.
- * (there are 2 cpus per FSB. This function returns 0 or 1)
- */
-#define cpuid_to_fsb_slot(cpuid) (cpu_physical_id_to_fsb_slot(cpu_physical_id(cpuid)))
-
-
-/*
- * cpuid_to_synergy - convert a cpuid to the synergy that it resides on
- * (there are 2 synergies per node. Function returns 0 or 1 to
- * specify which synergy the cpu is on)
- */
-#define cpuid_to_synergy(cpuid) (cpu_physical_id_to_synergy(cpu_physical_id(cpuid)))
-
-#endif
-
/*
* cpuid_to_slice - convert a cpuid to the slice that it resides on
* There are 4 cpus per node. This function returns 0 .. 3)
/*
* cpuid_to_cnodeid - convert a cpuid to the cnode that it resides on
*/
-#define cpuid_to_cnodeid(cpuid) (local_node_data->physical_node_map[cpuid_to_nasid(cpuid)])
+#define cpuid_to_cnodeid(cpuid) (physical_node_map[cpuid_to_nasid(cpuid)])
/*
* Just extract the NASID from the pointer.
*
*/
-#define cnodeid_to_nasid(cnodeid) (get_node_number(local_node_data->pg_data_ptrs[cnodeid]))
+#define cnodeid_to_nasid(cnodeid) pda->cnodeid_to_nasid_table[cnodeid]
/*
* nasid_to_cnodeid - convert a NASID to a cnodeid
*/
-#define nasid_to_cnodeid(nasid) (nasid) /* (local_node_data->physical_node_map[nasid]) */
+#define nasid_to_cnodeid(nasid) (physical_node_map[nasid])
/*
#define cpuid_to_subnode(cpuid) ((cpuid_to_slice(cpuid)<2) ? 0 : 1)
-/*
- * cpuid_to_localslice - convert a cpuid to a local slice
- * slice 0 & 2 are local slice 0
- * slice 1 & 3 are local slice 1
- */
-#define cpuid_to_localslice(cpuid) (cpuid_to_slice(cpuid) & 1)
-
-
#define smp_physical_node_id() (cpuid_to_nasid(smp_processor_id()))
-/*
- * cnodeid_to_cpuid - convert a cnode to a cpuid of a cpu on the node.
- * returns -1 if no cpus exist on the node
- */
-extern int cnodeid_to_cpuid(int cnode);
-
-
#endif /* _ASM_IA64_SN_SN_CPUID_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 1999-2001 Silicon Graphics, Inc.
- * All rights reserved.
+ * Copyright (C) 1992-1997,1999-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN_FRU_H
#define _ASM_IA64_SN_SN_FRU_H
+++ /dev/null
-/*
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001-2002 Silicon Graphics, Inc. All rights reserved.
- */
-
-
-#ifndef _ASM_IA64_SN_SN_PIO_WRITE_SYNC_H
-#define _ASM_IA64_SN_SN_PIO_WRITE_SYNC_H
-
-#include <linux/config.h>
-#ifdef CONFIG_IA64_SGI_SN2
-#include <asm/sn/sn_cpuid.h>
-#include <asm/sn/sn2/addrs.h>
-#include <asm/sn/sn2/shub_mmr.h>
-#include <asm/sn/sn2/shub_mmr_t.h>
-
-/*
- * This macro flushes all outstanding PIOs performed by this cpu to the
- * intended destination SHUB. This in essence ensures that all PIO's
- * issues by this cpu has landed at it's destination.
- *
- * This macro expects the caller:
- * 1. The thread is locked.
- * 2. All prior PIO operations has been fenced with __ia64_mf_a().
- *
- * The expectation is that get_slice() will return either 0 or 2.
- * When we have multi-core cpu's, the expectation is get_slice() will
- * return either 0,1 or 2,3.
- */
-
-#define SN_PIO_WRITE_SYNC \
- { \
- volatile unsigned long sn_pio_writes_done; \
- do { \
- sn_pio_writes_done = (volatile unsigned long) (SH_PIO_WRITE_STATUS_0_WRITES_OK_MASK & HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(get_nasid(), (get_slice() < 2) ? SH_PIO_WRITE_STATUS_0 : SH_PIO_WRITE_STATUS_1 ))); \
- } while (!sn_pio_writes_done); \
- __ia64_mf_a(); \
- }
-#else
-
-/*
- * For all ARCHITECTURE type, this is a NOOP.
- */
-
-#define SN_PIO_WRITE_SYNC
-
-#endif
-
-#endif /* _ASM_IA64_SN_SN_PIO_WRITE_SYNC_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN_PRIVATE_H
#define _ASM_IA64_SN_SN_PRIVATE_H
#include <asm/sn/xtalk/xwidget.h>
#include <asm/sn/xtalk/xtalk_private.h>
-#if defined(CONFIG_IA64_SGI_SN1)
-#include <asm/sn/sn1/sn_private.h>
-#elif defined(CONFIG_IA64_SGI_SN2)
#include <asm/sn/sn2/sn_private.h>
-#endif
#endif /* _ASM_IA64_SN_SN_PRIVATE_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#define SN_SAL_CONSOLE_POLL 0x02000026
#define SN_SAL_CONSOLE_INTR 0x02000027
#define SN_SAL_CONSOLE_PUTB 0x02000028
+#define SN_SAL_CONSOLE_XMIT_CHARS 0x0200002a
+#define SN_SAL_CONSOLE_READC 0x0200002b
#define SN_SAL_SYSCTL_MODID_GET 0x02000031
#define SN_SAL_SYSCTL_GET 0x02000032
#define SN_SAL_SYSCTL_IOBRICK_MODULE_GET 0x02000033
#define SN_SAL_SYSCTL_PARTITION_GET 0x0200003a
#define SN_SAL_SYSTEM_POWER_DOWN 0x0200003b
#define SN_SAL_GET_MASTER_BASEIO_NASID 0x0200003c
+#define SN_SAL_COHERENCE 0x0200003d
+#define SN_SAL_SYSCTL_FRU_CAPTURE 0x0200003f
/*
* Service-specific constants
*/
-#define SAL_CONSOLE_INTR_IN 0 /* manipulate input interrupts */
-#define SAL_CONSOLE_INTR_OUT 1 /* manipulate output low-water
- * interrupts
- */
+
+/* Console interrupt manipulation */
+ /* action codes */
#define SAL_CONSOLE_INTR_OFF 0 /* turn the interrupt off */
#define SAL_CONSOLE_INTR_ON 1 /* turn the interrupt on */
+#define SAL_CONSOLE_INTR_STATUS 2 /* retrieve the interrupt status */
+ /* interrupt specification & status return codes */
+#define SAL_CONSOLE_INTR_XMIT 1 /* output interrupt */
+#define SAL_CONSOLE_INTR_RECV 2 /* input interrupt */
/*
* Specify the minimum PROM revsion required for this kernel.
* Note that they're stored in hex format...
*/
-#ifdef CONFIG_IA64_SGI_SN1
-#define SN_SAL_MIN_MAJOR 0x0
-#define SN_SAL_MIN_MINOR 0x03 /* SN1 PROMs are stuck at rev 0.03 */
-#elif defined(CONFIG_IA64_SGI_SN2)
-#define SN_SAL_MIN_MAJOR 0x0
-#define SN_SAL_MIN_MINOR 0x11
-#else
-#error "must specify which PROM revisions this kernel needs"
-#endif /* CONFIG_IA64_SGI_SN1 */
+#define SN_SAL_MIN_MAJOR 0x1 /* SN2 kernels need at least PROM 1.0 */
+#define SN_SAL_MIN_MINOR 0x0
u64 ia64_sn_probe_io_slot(long paddr, long size, void *data_ptr);
{
struct ia64_sal_retval ret_stuff;
- ret_stuff.status = (uint64_t)0;
- ret_stuff.v0 = (uint64_t)0;
- ret_stuff.v1 = (uint64_t)0;
- ret_stuff.v2 = (uint64_t)0;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_NASID, 0, 0, 0, 0, 0, 0, 0);
if (ret_stuff.status < 0)
{
struct ia64_sal_retval ret_stuff;
- ret_stuff.status = (uint64_t)0;
- ret_stuff.v0 = (uint64_t)0;
- ret_stuff.v1 = (uint64_t)0;
- ret_stuff.v2 = (uint64_t)0;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_BASEIO_NASID, 0, 0, 0, 0, 0, 0, 0);
if (ret_stuff.status < 0)
extern u64 klgraph_addr[];
int cnodeid;
- cnodeid = 0 /* nasid_to_cnodeid(nasid) */;
+ cnodeid = nasid_to_cnodeid(nasid);
if (klgraph_addr[cnodeid] == 0) {
- ret_stuff.status = (uint64_t)0;
- ret_stuff.v0 = (uint64_t)0;
- ret_stuff.v1 = (uint64_t)0;
- ret_stuff.v2 = (uint64_t)0;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 0, 0);
/*
{
struct ia64_sal_retval ret_stuff;
- ret_stuff.status = (uint64_t)0;
- ret_stuff.v0 = (uint64_t)0;
- ret_stuff.v1 = (uint64_t)0;
- ret_stuff.v2 = (uint64_t)0;
- __SAL_CALL(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0);
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0);
/* character is in 'v0' */
*ch = (int)ret_stuff.v0;
}
/*
+ * Read a character from the SAL console device, after a previous interrupt
+ * or poll operation has given us to know that a character is available
+ * to be read.
+ */
+static inline u64
+ia64_sn_console_readc(void)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_READC, 0, 0, 0, 0, 0, 0, 0);
+
+ /* character is in 'v0' */
+ return ret_stuff.v0;
+}
+
+/*
* Sends the given character to the console.
*/
static inline u64
{
struct ia64_sal_retval ret_stuff;
- ret_stuff.status = (uint64_t)0;
- ret_stuff.v0 = (uint64_t)0;
- ret_stuff.v1 = (uint64_t)0;
- ret_stuff.v2 = (uint64_t)0;
- __SAL_CALL(ret_stuff, SN_SAL_CONSOLE_PUTC, (uint64_t)ch, 0, 0, 0, 0, 0, 0);
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (uint64_t)ch, 0, 0, 0, 0, 0, 0);
return ret_stuff.status;
}
* Sends the given buffer to the console.
*/
static inline u64
-ia64_sn_console_putb(char *buf, int len)
+ia64_sn_console_putb(const char *buf, int len)
{
struct ia64_sal_retval ret_stuff;
- ret_stuff.status = (uint64_t)0;
- ret_stuff.v0 = (uint64_t)0;
- ret_stuff.v1 = (uint64_t)0;
- ret_stuff.v2 = (uint64_t)0;
- __SAL_CALL(ret_stuff, SN_SAL_CONSOLE_PUTB, (uint64_t)buf, (uint64_t)len, 0, 0, 0, 0, 0);
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (uint64_t)buf, (uint64_t)len, 0, 0, 0, 0, 0);
- return ret_stuff.status;
+ if ( ret_stuff.status == 0 ) {
+ return ret_stuff.v0;
+ }
+ return (u64)0;
}
/*
{
struct ia64_sal_retval ret_stuff;
- ret_stuff.status = (uint64_t)0;
- ret_stuff.v0 = (uint64_t)0;
- ret_stuff.v1 = (uint64_t)0;
- ret_stuff.v2 = (uint64_t)0;
- __SAL_CALL(ret_stuff, SN_SAL_PRINT_ERROR, (uint64_t)hook, (uint64_t)rec, 0, 0, 0, 0, 0);
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_PRINT_ERROR, (uint64_t)hook, (uint64_t)rec, 0, 0, 0, 0, 0);
return ret_stuff.status;
}
{
struct ia64_sal_retval ret_stuff;
- ret_stuff.status = (uint64_t)0;
- ret_stuff.v0 = (uint64_t)0;
- ret_stuff.v1 = (uint64_t)0;
- ret_stuff.v2 = (uint64_t)0;
- __SAL_CALL(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0);
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0);
return ret_stuff.status;
}
{
struct ia64_sal_retval ret_stuff;
- ret_stuff.status = (uint64_t)0;
- ret_stuff.v0 = (uint64_t)0;
- ret_stuff.v1 = (uint64_t)0;
- ret_stuff.v2 = (uint64_t)0;
- __SAL_CALL(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0);
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0);
/* result is in 'v0' */
*result = (int)ret_stuff.v0;
}
/*
+ * Checks console interrupt status
+ */
+static inline u64
+ia64_sn_console_intr_status(void)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR,
+ 0, SAL_CONSOLE_INTR_STATUS,
+ 0, 0, 0, 0, 0);
+
+ if (ret_stuff.status == 0) {
+ return ret_stuff.v0;
+ }
+
+ return 0;
+}
+
+/*
+ * Enable an interrupt on the SAL console device.
+ */
+static inline void
+ia64_sn_console_intr_enable(uint64_t intr)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR,
+ intr, SAL_CONSOLE_INTR_ON,
+ 0, 0, 0, 0, 0);
+}
+
+/*
+ * Disable an interrupt on the SAL console device.
+ */
+static inline void
+ia64_sn_console_intr_disable(uint64_t intr)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR,
+ intr, SAL_CONSOLE_INTR_OFF,
+ 0, 0, 0, 0, 0);
+}
+
+/*
+ * Sends a character buffer to the console asynchronously.
+ */
+static inline u64
+ia64_sn_console_xmit_chars(char *buf, int len)
+{
+ struct ia64_sal_retval ret_stuff;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS,
+ (uint64_t)buf, (uint64_t)len,
+ 0, 0, 0, 0, 0);
+
+ if (ret_stuff.status == 0) {
+ return ret_stuff.v0;
+ }
+
+ return 0;
+}
+
+/*
* Returns the iobrick module Id
*/
static inline u64
{
struct ia64_sal_retval ret_stuff;
- ret_stuff.status = (uint64_t)0;
- ret_stuff.v0 = (uint64_t)0;
- ret_stuff.v1 = (uint64_t)0;
- ret_stuff.v2 = (uint64_t)0;
- SAL_CALL(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 0, 0, 0, 0, 0);
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ ret_stuff.v1 = 0;
+ ret_stuff.v2 = 0;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 0, 0, 0, 0, 0);
/* result is in 'v0' */
*result = (int)ret_stuff.v0;
ia64_sn_sys_serial_get(char *buf)
{
struct ia64_sal_retval ret_stuff;
- SAL_CALL(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 0);
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 0);
return ret_stuff.status;
}
return ((partid_t)ret_stuff.v0);
}
-#ifdef CONFIG_IA64_SGI_SN2
/*
* Returns the partition id of the current processor.
*/
}
}
-#endif /* CONFIG_IA64_SGI_SN2 */
+/*
+ * Change or query the coherence domain for this partition. Each cpu-based
+ * nasid is represented by a bit in an array of 64-bit words:
+ * 0 = not in this partition's coherency domain
+ * 1 = in this partition's coherency domain
+ * It is not possible for the local system's nasids to be removed from
+ * the coherency domain.
+ *
+ * new_domain = set the coherence domain to the given nasids
+ * old_domain = return the current coherence domain
+ */
+static inline int
+sn_change_coherence(u64 *new_domain, u64 *old_domain)
+{
+ struct ia64_sal_retval ret_stuff;
+ SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0,
+ 0, 0, 0);
+ return ret_stuff.status;
+}
/*
* Turns off system power.
/* never returns */
}
+/**
+ * ia64_sn_fru_capture - tell the system controller to capture hw state
+ *
+ * This routine will call the SAL which will tell the system controller(s)
+ * to capture hw mmr information from each SHub in the system.
+ */
+static inline u64
+ia64_sn_fru_capture(void)
+{
+ struct ia64_sal_retval isrv;
+ SAL_CALL(isrv, SN_SAL_SYSCTL_FRU_CAPTURE, 0, 0, 0, 0, 0, 0, 0);
+ if (isrv.status)
+ return 0;
+ return isrv.v0;
+}
#endif /* _ASM_IA64_SN_SN_SAL_H */
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000-2001 Silicon Graphics, Inc.
- */
-#ifndef _ASM_IA64_SN_SNCONFIG_H
-#define _ASM_IA64_SN_SNCONFIG_H
-
-#include <linux/config.h>
-
-#if defined(CONFIG_IA64_SGI_SN1)
-#include <asm/sn/sn1/ip27config.h>
-#elif defined(CONFIG_IA64_SGI_SN2)
-#endif
-
-#endif /* _ASM_IA64_SN_SNCONFIG_H */
+/*
+ * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ */
+
#ifndef _ASM_IA64_SN_SNDRV_H
#define _ASM_IA64_SN_SNDRV_H
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2000-2001 Silicon Graphics, Inc. All rights reserved
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This implemenation of synchronization variables is heavily based on
* one done by Steve Lord <lord@sgi.com>
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SYSTEMINFO_H
#define _ASM_IA64_SN_SYSTEMINFO_H
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 1999 by Ralf Baechle
*/
#ifndef _ASM_IA64_SN_TYPES_H
#include <linux/types.h>
typedef unsigned long cpuid_t;
-typedef unsigned long cpumask_t;
typedef signed short nasid_t; /* node id in numa-as-id space */
typedef signed char partid_t; /* partition ID type */
-#ifdef CONFIG_IA64_SGI_SN2
typedef unsigned int moduleid_t; /* user-visible module number type */
typedef unsigned int cmoduleid_t; /* kernel compact module id type */
-#else
-typedef signed short moduleid_t; /* user-visible module number type */
-typedef signed short cmoduleid_t; /* kernel compact module id type */
-#endif
typedef signed char slabid_t;
typedef unsigned char clusterid_t; /* Clusterid of the cell */
typedef unsigned char uchar_t;
typedef unsigned long paddr_t;
typedef unsigned long pfn_t;
+typedef short cnodeid_t;
#endif /* _ASM_IA64_SN_TYPES_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_UART16550_H
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_VECTOR_H
#define _ASM_IA64_SN_VECTOR_H
#include <linux/config.h>
-#include <asm/sn/arch.h>
#define NET_VEC_NULL ((net_vec_t) 0)
#define NET_VEC_BAD ((net_vec_t) -1)
-#ifdef RTL
-
-#define VEC_POLLS_W 16 /* Polls before write times out */
-#define VEC_POLLS_R 16 /* Polls before read times out */
-#define VEC_POLLS_X 16 /* Polls before exch times out */
-
-#define VEC_RETRIES_W 1 /* Retries before write fails */
-#define VEC_RETRIES_R 1 /* Retries before read fails */
-#define VEC_RETRIES_X 1 /* Retries before exch fails */
-
-#else /* RTL */
-
#define VEC_POLLS_W 128 /* Polls before write times out */
#define VEC_POLLS_R 128 /* Polls before read times out */
#define VEC_POLLS_X 128 /* Polls before exch times out */
#define VEC_RETRIES_R 8 /* Retries before read fails */
#define VEC_RETRIES_X 4 /* Retries before exch fails */
-#endif /* RTL */
-
-#if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_GENERIC)
-#define VECTOR_PARMS LB_VECTOR_PARMS
-#define VECTOR_ROUTE LB_VECTOR_ROUTE
-#define VECTOR_DATA LB_VECTOR_DATA
-#define VECTOR_STATUS LB_VECTOR_STATUS
-#define VECTOR_RETURN LB_VECTOR_RETURN
-#define VECTOR_READ_DATA LB_VECTOR_READ_DATA
-#define VECTOR_STATUS_CLEAR LB_VECTOR_STATUS_CLEAR
-#define VP_PIOID_SHFT LVP_PIOID_SHFT
-#define VP_PIOID_MASK LVP_PIOID_MASK
-#define VP_WRITEID_SHFT LVP_WRITEID_SHFT
-#define VP_WRITEID_MASK LVP_WRITEID_MASK
-#define VP_ADDRESS_MASK LVP_ADDRESS_MASK
-#define VP_TYPE_SHFT LVP_TYPE_SHFT
-#define VP_TYPE_MASK LVP_TYPE_MASK
-#define VS_VALID LVS_VALID
-#define VS_OVERRUN LVS_OVERRUN
-#define VS_TARGET_SHFT LVS_TARGET_SHFT
-#define VS_TARGET_MASK LVS_TARGET_MASK
-#define VS_PIOID_SHFT LVS_PIOID_SHFT
-#define VS_PIOID_MASK LVS_PIOID_MASK
-#define VS_WRITEID_SHFT LVS_WRITEID_SHFT
-#define VS_WRITEID_MASK LVS_WRITEID_MASK
-#define VS_ADDRESS_MASK LVS_ADDRESS_MASK
-#define VS_TYPE_SHFT LVS_TYPE_SHFT
-#define VS_TYPE_MASK LVS_TYPE_MASK
-#define VS_ERROR_MASK LVS_ERROR_MASK
-#endif
-
#define NET_ERROR_NONE 0 /* No error */
#define NET_ERROR_HARDWARE (-1) /* Hardware error */
#define NET_ERROR_OVERRUN (-2) /* Extra response(s) */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc.
- * Copyright (C) 2000 by Colin Ngam
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_SN_SN_XTALK_XBOW_H
#define _ASM_SN_SN_XTALK_XBOW_H
/* XBOW_WID_ARB_RELOAD */
#define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */
-
-#ifdef CONFIG_IA64_SGI_SN1
-#define nasid_has_xbridge(nasid) \
- (XWIDGET_PART_NUM(XWIDGET_ID_READ(nasid, 0)) == XXBOW_WIDGET_PART_NUM)
-#endif
-
#define IS_XBRIDGE_XBOW(wid) \
(XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \
XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997,2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_SN_XTALK_XBOW_INFO_H
#define _ASM_SN_XTALK_XBOW_INFO_H
volatile uint32_t *xp_perf_reg;
} xbow_perf_t;
-extern void xbow_update_perf_counters(devfs_handle_t);
-extern xbow_perf_link_t *xbow_get_perf_counters(devfs_handle_t);
-extern int xbow_enable_perf_counter(devfs_handle_t, int, int, int);
+extern void xbow_update_perf_counters(vertex_hdl_t);
+extern xbow_perf_link_t *xbow_get_perf_counters(vertex_hdl_t);
+extern int xbow_enable_perf_counter(vertex_hdl_t, int, int, int);
#define XBOWIOC_PERF_ENABLE 1
#define XBOWIOC_PERF_DISABLE 2
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997,2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_SN_XTALK_XSWITCH_H
#define _ASM_SN_XTALK_XSWITCH_H
typedef struct xswitch_info_s *xswitch_info_t;
typedef int
- xswitch_reset_link_f(devfs_handle_t xconn);
+ xswitch_reset_link_f(vertex_hdl_t xconn);
typedef struct xswitch_provider_s {
xswitch_reset_link_f *reset_link;
} xswitch_provider_t;
-extern void xswitch_provider_register(devfs_handle_t sw_vhdl, xswitch_provider_t * xsw_fns);
+extern void xswitch_provider_register(vertex_hdl_t sw_vhdl, xswitch_provider_t * xsw_fns);
xswitch_reset_link_f xswitch_reset_link;
-extern xswitch_info_t xswitch_info_new(devfs_handle_t vhdl);
+extern xswitch_info_t xswitch_info_new(vertex_hdl_t vhdl);
extern void xswitch_info_link_is_ok(xswitch_info_t xswitch_info,
xwidgetnum_t port);
extern void xswitch_info_vhdl_set(xswitch_info_t xswitch_info,
xwidgetnum_t port,
- devfs_handle_t xwidget);
+ vertex_hdl_t xwidget);
extern void xswitch_info_master_assignment_set(xswitch_info_t xswitch_info,
xwidgetnum_t port,
- devfs_handle_t master_vhdl);
+ vertex_hdl_t master_vhdl);
-extern xswitch_info_t xswitch_info_get(devfs_handle_t vhdl);
+extern xswitch_info_t xswitch_info_get(vertex_hdl_t vhdl);
extern int xswitch_info_link_ok(xswitch_info_t xswitch_info,
xwidgetnum_t port);
-extern devfs_handle_t xswitch_info_vhdl_get(xswitch_info_t xswitch_info,
+extern vertex_hdl_t xswitch_info_vhdl_get(xswitch_info_t xswitch_info,
xwidgetnum_t port);
-extern devfs_handle_t xswitch_info_master_assignment_get(xswitch_info_t xswitch_info,
+extern vertex_hdl_t xswitch_info_master_assignment_get(xswitch_info_t xswitch_info,
xwidgetnum_t port);
-extern int xswitch_id_get(devfs_handle_t vhdl);
-extern void xswitch_id_set(devfs_handle_t vhdl,int xbow_num);
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_SN_XTALK_XSWITCH_H */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_SN_XTALK_XTALK_H
#define _ASM_SN_XTALK_XTALK_H
#include <linux/config.h>
+#include "asm/sn/sgi.h"
+
+
/*
* xtalk.h -- platform-independent crosstalk interface
*/
/* PIO MANAGEMENT */
typedef xtalk_piomap_t
-xtalk_piomap_alloc_f (devfs_handle_t dev, /* set up mapping for this device */
+xtalk_piomap_alloc_f (vertex_hdl_t dev, /* set up mapping for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* map for this xtalk_addr range */
size_t byte_count,
xtalk_piomap_done_f (xtalk_piomap_t xtalk_piomap);
typedef caddr_t
-xtalk_piotrans_addr_f (devfs_handle_t dev, /* translate for this device */
+xtalk_piotrans_addr_f (vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* Crosstalk address */
size_t byte_count, /* map this many bytes */
unsigned flags); /* (currently unused) */
extern caddr_t
-xtalk_pio_addr (devfs_handle_t dev, /* translate for this device */
+xtalk_pio_addr (vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
iopaddr_t xtalk_addr, /* Crosstalk address */
size_t byte_count, /* map this many bytes */
typedef struct xtalk_dmamap_s *xtalk_dmamap_t;
typedef xtalk_dmamap_t
-xtalk_dmamap_alloc_f (devfs_handle_t dev, /* set up mappings for this device */
+xtalk_dmamap_alloc_f (vertex_hdl_t dev, /* set up mappings for this device */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags); /* defined in dma.h */
xtalk_dmamap_done_f (xtalk_dmamap_t dmamap);
typedef iopaddr_t
-xtalk_dmatrans_addr_f (devfs_handle_t dev, /* translate for this device */
+xtalk_dmatrans_addr_f (vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
unsigned flags);
typedef alenlist_t
-xtalk_dmatrans_list_f (devfs_handle_t dev, /* translate for this device */
+xtalk_dmatrans_list_f (vertex_hdl_t dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
alenlist_t palenlist, /* system address/length list */
unsigned flags);
xtalk_dmamap_drain_f (xtalk_dmamap_t map); /* drain this map's channel */
typedef void
-xtalk_dmaaddr_drain_f (devfs_handle_t vhdl, /* drain channel from this device */
+xtalk_dmaaddr_drain_f (vertex_hdl_t vhdl, /* drain channel from this device */
paddr_t addr, /* to this physical address */
size_t bytes); /* for this many bytes */
typedef void
-xtalk_dmalist_drain_f (devfs_handle_t vhdl, /* drain channel from this device */
+xtalk_dmalist_drain_f (vertex_hdl_t vhdl, /* drain channel from this device */
alenlist_t list); /* for this set of physical blocks */
xtalk_intr_setfunc_f (xtalk_intr_t intr_hdl); /* interrupt handle */
typedef xtalk_intr_t
-xtalk_intr_alloc_f (devfs_handle_t dev, /* which crosstalk device */
+xtalk_intr_alloc_f (vertex_hdl_t dev, /* which crosstalk device */
device_desc_t dev_desc, /* device descriptor */
- devfs_handle_t owner_dev); /* owner of this intr */
+ vertex_hdl_t owner_dev); /* owner of this intr */
typedef void
xtalk_intr_free_f (xtalk_intr_t intr_hdl);
-#ifdef CONFIG_IA64_SGI_SN1
-typedef int
-xtalk_intr_connect_f (xtalk_intr_t intr_hdl, /* xtalk intr resource handle */
- xtalk_intr_setfunc_f *setfunc, /* func to set intr hw */
- void *setfunc_arg); /* arg to setfunc */
-#else
typedef int
xtalk_intr_connect_f (xtalk_intr_t intr_hdl, /* xtalk intr resource handle */
intr_func_t intr_func, /* xtalk intr handler */
void *intr_arg, /* arg to intr handler */
xtalk_intr_setfunc_f *setfunc, /* func to set intr hw */
void *setfunc_arg); /* arg to setfunc */
-#endif
typedef void
xtalk_intr_disconnect_f (xtalk_intr_t intr_hdl);
-typedef devfs_handle_t
+typedef vertex_hdl_t
xtalk_intr_cpu_get_f (xtalk_intr_t intr_hdl); /* xtalk intr resource handle */
/* CONFIGURATION MANAGEMENT */
typedef void
-xtalk_provider_startup_f (devfs_handle_t xtalk_provider);
+xtalk_provider_startup_f (vertex_hdl_t xtalk_provider);
typedef void
-xtalk_provider_shutdown_f (devfs_handle_t xtalk_provider);
+xtalk_provider_shutdown_f (vertex_hdl_t xtalk_provider);
typedef void
-xtalk_widgetdev_enable_f (devfs_handle_t, int);
+xtalk_widgetdev_enable_f (vertex_hdl_t, int);
typedef void
-xtalk_widgetdev_shutdown_f (devfs_handle_t, int);
+xtalk_widgetdev_shutdown_f (vertex_hdl_t, int);
typedef int
-xtalk_dma_enabled_f (devfs_handle_t);
+xtalk_dma_enabled_f (vertex_hdl_t);
/* Error Management */
typedef int
-xtalk_error_devenable_f (devfs_handle_t xconn_vhdl,
+xtalk_error_devenable_f (vertex_hdl_t xconn_vhdl,
int devnum,
int error_code);
xtalk_intr_free_f *intr_free;
xtalk_intr_connect_f *intr_connect;
xtalk_intr_disconnect_f *intr_disconnect;
- xtalk_intr_cpu_get_f *intr_cpu_get;
/* CONFIGURATION MANAGEMENT */
xtalk_provider_startup_f *provider_startup;
/* error management */
-extern int xtalk_error_handler(devfs_handle_t,
+extern int xtalk_error_handler(vertex_hdl_t,
int,
ioerror_mode_t,
ioerror_t *);
#define XTALK_INTR_VECTOR_NONE (xtalk_intr_vector_t)0
/* Generic crosstalk interrupt interfaces */
-extern devfs_handle_t xtalk_intr_dev_get(xtalk_intr_t xtalk_intr);
+extern vertex_hdl_t xtalk_intr_dev_get(xtalk_intr_t xtalk_intr);
extern xwidgetnum_t xtalk_intr_target_get(xtalk_intr_t xtalk_intr);
extern xtalk_intr_vector_t xtalk_intr_vector_get(xtalk_intr_t xtalk_intr);
extern iopaddr_t xtalk_intr_addr_get(xtalk_intr_t xtalk_intr);
-extern devfs_handle_t xtalk_intr_cpu_get(xtalk_intr_t xtalk_intr);
+extern vertex_hdl_t xtalk_intr_cpu_get(xtalk_intr_t xtalk_intr);
extern void *xtalk_intr_sfarg_get(xtalk_intr_t xtalk_intr);
/* Generic crosstalk pio interfaces */
-extern devfs_handle_t xtalk_pio_dev_get(xtalk_piomap_t xtalk_piomap);
+extern vertex_hdl_t xtalk_pio_dev_get(xtalk_piomap_t xtalk_piomap);
extern xwidgetnum_t xtalk_pio_target_get(xtalk_piomap_t xtalk_piomap);
extern iopaddr_t xtalk_pio_xtalk_addr_get(xtalk_piomap_t xtalk_piomap);
extern size_t xtalk_pio_mapsz_get(xtalk_piomap_t xtalk_piomap);
extern caddr_t xtalk_pio_kvaddr_get(xtalk_piomap_t xtalk_piomap);
/* Generic crosstalk dma interfaces */
-extern devfs_handle_t xtalk_dma_dev_get(xtalk_dmamap_t xtalk_dmamap);
+extern vertex_hdl_t xtalk_dma_dev_get(xtalk_dmamap_t xtalk_dmamap);
extern xwidgetnum_t xtalk_dma_target_get(xtalk_dmamap_t xtalk_dmamap);
/* Register/unregister Crosstalk providers and get implementation handle */
extern void xtalk_set_early_piotrans_addr(xtalk_early_piotrans_addr_f *);
-extern void xtalk_provider_register(devfs_handle_t provider, xtalk_provider_t *xtalk_fns);
-extern void xtalk_provider_unregister(devfs_handle_t provider);
-extern xtalk_provider_t *xtalk_provider_fns_get(devfs_handle_t provider);
+extern void xtalk_provider_register(vertex_hdl_t provider, xtalk_provider_t *xtalk_fns);
+extern void xtalk_provider_unregister(vertex_hdl_t provider);
+extern xtalk_provider_t *xtalk_provider_fns_get(vertex_hdl_t provider);
/* Crosstalk Switch generic layer, for use by initialization code */
-extern void xswitch_census(devfs_handle_t xswitchv);
-extern void xswitch_init_widgets(devfs_handle_t xswitchv);
+extern void xswitch_census(vertex_hdl_t xswitchv);
+extern void xswitch_init_widgets(vertex_hdl_t xswitchv);
/* early init interrupt management */
typedef xtalk_intr_setfunc_f *xtalk_intr_setfunc_t;
-typedef void xtalk_iter_f(devfs_handle_t vhdl);
+typedef void xtalk_iter_f(vertex_hdl_t vhdl);
extern void xtalk_iterate(char *prefix, xtalk_iter_f *func);
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992-1997, 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_SN_XTALK_XTALK_PRIVATE_H
#define _ASM_SN_XTALK_XTALK_PRIVATE_H
* All Crosstalk providers set up PIO using this information.
*/
struct xtalk_piomap_s {
- devfs_handle_t xp_dev; /* a requestor of this mapping */
+ vertex_hdl_t xp_dev; /* a requestor of this mapping */
xwidgetnum_t xp_target; /* target (node's widget number) */
iopaddr_t xp_xtalk_addr; /* which crosstalk addr is mapped */
size_t xp_mapsz; /* size of this mapping */
* All Crosstalk providers set up DMA using this information.
*/
struct xtalk_dmamap_s {
- devfs_handle_t xd_dev; /* a requestor of this mapping */
+ vertex_hdl_t xd_dev; /* a requestor of this mapping */
xwidgetnum_t xd_target; /* target (node's widget number) */
};
* All Crosstalk providers set up interrupts using this information.
*/
struct xtalk_intr_s {
- devfs_handle_t xi_dev; /* requestor of this intr */
+ vertex_hdl_t xi_dev; /* requestor of this intr */
xwidgetnum_t xi_target; /* master's widget number */
xtalk_intr_vector_t xi_vector; /* 8-bit interrupt vector */
iopaddr_t xi_addr; /* xtalk address to generate intr */
*/
struct xwidget_info_s {
char *w_fingerprint;
- devfs_handle_t w_vertex; /* back pointer to vertex */
+ vertex_hdl_t w_vertex; /* back pointer to vertex */
xwidgetnum_t w_id; /* widget id */
struct xwidget_hwid_s w_hwid; /* hardware identification (part/rev/mfg) */
- devfs_handle_t w_master; /* CACHED widget's master */
+ vertex_hdl_t w_master; /* CACHED widget's master */
xwidgetnum_t w_masterid; /* CACHED widget's master's widgetnum */
error_handler_f *w_efunc; /* error handling function */
error_handler_arg_t w_einfo; /* first parameter for efunc */
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc.
- * Copyright (C) 2000 by Colin Ngam
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_SN_XTALK_XTALKADDRS_H
#define _ASM_SN_XTALK_XTALKADDRS_H
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1992 - 1997, 2000-2001 Silicon Graphics, Inc.
- * Copyright (C) 2000 by Colin Ngam
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef __ASM_SN_XTALK_XWIDGET_H__
#define __ASM_SN_XTALK_XWIDGET_H__
extern void xwidget_driver_unregister(char *driver_prefix);
extern int xwidget_register(struct xwidget_hwid_s *hwid,
- devfs_handle_t dev,
+ vertex_hdl_t dev,
xwidgetnum_t id,
- devfs_handle_t master,
- xwidgetnum_t targetid,
- async_attach_t aa);
+ vertex_hdl_t master,
+ xwidgetnum_t targetid);
-extern int xwidget_unregister(devfs_handle_t);
+extern int xwidget_unregister(vertex_hdl_t);
-extern void xwidget_reset(devfs_handle_t xwidget);
-extern void xwidget_gfx_reset(devfs_handle_t xwidget);
-extern char *xwidget_name_get(devfs_handle_t xwidget);
+extern void xwidget_reset(vertex_hdl_t xwidget);
+extern void xwidget_gfx_reset(vertex_hdl_t xwidget);
+extern char *xwidget_name_get(vertex_hdl_t xwidget);
/* Generic crosstalk widget information access interface */
-extern xwidget_info_t xwidget_info_chk(devfs_handle_t widget);
-extern xwidget_info_t xwidget_info_get(devfs_handle_t widget);
-extern void xwidget_info_set(devfs_handle_t widget, xwidget_info_t widget_info);
-extern devfs_handle_t xwidget_info_dev_get(xwidget_info_t xwidget_info);
+extern xwidget_info_t xwidget_info_chk(vertex_hdl_t widget);
+extern xwidget_info_t xwidget_info_get(vertex_hdl_t widget);
+extern void xwidget_info_set(vertex_hdl_t widget, xwidget_info_t widget_info);
+extern vertex_hdl_t xwidget_info_dev_get(xwidget_info_t xwidget_info);
extern xwidgetnum_t xwidget_info_id_get(xwidget_info_t xwidget_info);
extern int xwidget_info_type_get(xwidget_info_t xwidget_info);
extern int xwidget_info_state_get(xwidget_info_t xwidget_info);
-extern devfs_handle_t xwidget_info_master_get(xwidget_info_t xwidget_info);
+extern vertex_hdl_t xwidget_info_master_get(xwidget_info_t xwidget_info);
extern xwidgetnum_t xwidget_info_masterid_get(xwidget_info_t xwidget_info);
extern xwidget_part_num_t xwidget_info_part_num_get(xwidget_info_t xwidget_info);
extern xwidget_rev_num_t xwidget_info_rev_num_get(xwidget_info_t xwidget_info);
* carefully coded to touch only those registers that spin_lock() marks "clobbered".
*/
-#define IA64_SPINLOCK_CLOBBERS "ar.pfs", "p14", "r28", "r29", "r30", "b6", "memory"
+#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "r28", "r29", "r30", "b6", "memory"
static inline void
_raw_spin_lock (spinlock_t *lock)
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
while (*(volatile int *)__read_lock_ptr < 0) \
- barrier(); \
- \
+ cpu_relax(); \
} \
} while (0)
#include <asm/pal.h>
#include <asm/percpu.h>
-#define KERNEL_START (PAGE_OFFSET + 68*1024*1024)
-
-/* 0xa000000000000000 - 0xa000000000000000+PERCPU_MAX_SIZE remain unmapped */
+/* 0xa000000000000000 - 0xa000000000000000+PERCPU_PAGE_SIZE remain unmapped */
#define PERCPU_ADDR (0xa000000000000000 + PERCPU_PAGE_SIZE)
#define GATE_ADDR (0xa000000000000000 + 2*PERCPU_PAGE_SIZE)
+#define KERNEL_START 0xa000000100000000
#ifndef __ASSEMBLY__
|| IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
#define __switch_to(prev,next,last) do { \
- struct task_struct *__fpu_owner = ia64_get_fpu_owner(); \
if (IA64_HAS_EXTRA_STATE(prev)) \
ia64_save_extra(prev); \
if (IA64_HAS_EXTRA_STATE(next)) \
ia64_load_extra(next); \
- ia64_psr(ia64_task_regs(next))->dfh = \
- !(__fpu_owner == (next) && ((next)->thread.last_fph_cpu == smp_processor_id())); \
+ ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
(last) = ia64_switch_to((next)); \
} while (0)
ia64_psr(ia64_task_regs(prev))->mfh = 0; \
(prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
__ia64_save_fpu((prev)->thread.fph); \
- (prev)->thread.last_fph_cpu = smp_processor_id(); \
} \
__switch_to(prev, next, last); \
} while (0)
#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)
#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
+#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#include <asm/processor.h>
#include <asm/ptrace.h>
-#define TI_EXEC_DOMAIN 0x00
-#define TI_FLAGS 0x08
-#define TI_CPU 0x0c
-#define TI_ADDR_LIMIT 0x10
-#define TI_PRE_COUNT 0x18
+#define TI_TASK 0x00
+#define TI_EXEC_DOMAIN 0x08
+#define TI_FLAGS 0x10
+#define TI_CPU 0x14
+#define TI_ADDR_LIMIT 0x18
+#define TI_PRE_COUNT 0x20
+#define TI_RESTART_BLOCK 0x28
#define PREEMPT_ACTIVE_BIT 30
#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
* without having to do pointer masking.
*/
struct thread_info {
+ struct task_struct *task; /* XXX not really needed, except for dup_task_struct() */
struct exec_domain *exec_domain;/* execution domain */
__u32 flags; /* thread_info flags (see TIF_*) */
__u32 cpu; /* current CPU */
struct restart_block restart_block;
};
-#define INIT_THREAD_SIZE /* tell sched.h not to declare the thread_union */
#define THREAD_SIZE KERNEL_STACK_SIZE
-#define INIT_THREAD_INFO(ti) \
+#define INIT_THREAD_INFO(tsk) \
{ \
+ .task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
}
/* how to get the thread information struct from C */
-#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
+#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
+#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
#define free_thread_info(ti) /* nothing */
+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
+#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER))
+#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
+
#endif /* !__ASSEMBLY */
/*
#define _ASM_IA64_TIMEX_H
/*
- * Copyright (C) 1998-2001 Hewlett-Packard Co
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
typedef unsigned long cycles_t;
/*
- * Something low processor frequency like 100Mhz but
- * yet multiple of HZ to avoid truncation in some formulas.
+ * For performance reasons, we don't want to define CLOCK_TICK_TRATE as
+ * local_cpu_data->itc_rate. Fortunately, we don't have to, either: according to George
+ * Anzinger, 1/CLOCK_TICK_RATE is taken as the resolution of the timer clock. The time
+ * calculation assumes that you will use enough of these so that your tick size <= 1/HZ.
+ * If the calculation shows that your CLOCK_TICK_RATE can not supply exactly 1/HZ ticks,
+ * the actual value is calculated and used to update the wall clock each jiffie. Setting
+ * the CLOCK_TICK_RATE to x*HZ insures that the calculation will find no errors. Hence we
+ * pick a multiple of HZ which gives us a (totally virtual) CLOCK_TICK_RATE of about
+ * 100MHz.
*/
#define CLOCK_TICK_RATE (HZ * 100000UL)
};
/* Users of the generic TLB shootdown code must declare this storage space. */
-extern struct mmu_gather mmu_gathers[NR_CPUS];
+DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
/*
* Flush the TLB for address range START to END and, if not in fast mode, release the
static inline struct mmu_gather *
tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
{
- struct mmu_gather *tlb = &mmu_gathers[smp_processor_id()];
+ struct mmu_gather *tlb = &per_cpu(mmu_gathers, smp_processor_id());
tlb->mm = mm;
/*
*/
#define node_to_cpumask(node) (node_to_cpu_mask[node])
-#else
-#define cpu_to_node(cpu) (0)
-#define node_to_cpumask(node) (phys_cpu_present_map)
-#endif
-
/*
* Returns the number of the node containing MemBlk 'memblk'
*/
/* Cross-node load balancing interval. */
#define NODE_BALANCE_RATE 10
+void build_cpu_to_node_map(void);
+
+#endif /* CONFIG_NUMA */
+
+#include <asm-generic/topology.h>
+
#endif /* _ASM_IA64_TOPOLOGY_H */
#define __NR_sys_clock_getres 1255
#define __NR_sys_clock_nanosleep 1256
+#ifdef __KERNEL__
+
+#define NR_syscalls 256 /* length of syscall table */
+
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
extern long __ia64_syscall (long a0, long a1, long a2, long a3, long a4, long nr);
/*
* "Conditional" syscalls
*
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))), but it doesn't work on
+ * all toolchains, so we just do it by hand. Note, this macro can only be used in the
+ * file which defines sys_ni_syscall, i.e., in kernel/sys.c.
*/
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
#endif /* _ASM_IA64_UNISTD_H */
UNW_AR_EC,
UNW_AR_FPSR,
UNW_AR_RSC,
- UNW_AR_CCV
+ UNW_AR_CCV,
+ UNW_AR_CSD,
+ UNW_AR_SSD
};
/*
* Initialize unwind support.
*/
extern void unw_init (void);
-extern void unw_create_gate_table (void);
extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
const void *table_start, const void *table_end);
--- /dev/null
+#ifndef _ASM_IA64_USTACK_H
+#define _ASM_IA64_USTACK_H
+
+/*
+ * Constants for the user stack size
+ */
+
+#include <asm/page.h>
+
+/* The absolute hard limit for stack size is 1/2 of the mappable space in the region */
+#define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2)
+/* Make a default stack size of 2GB */
+#define DEFAULT_USER_STACK_SIZE (1UL << 31)
+#define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT)
+
+#endif /* _ASM_IA64_USTACK_H */
extern void xor_ia64_5(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *, unsigned long *);
-asm ("
- .text
-
- // Assume L2 memory latency of 6 cycles.
-
- .proc xor_ia64_2
-xor_ia64_2:
- .prologue
- .fframe 0
- { .mii
- .save ar.pfs, r31
- alloc r31 = ar.pfs, 3, 0, 13, 16
- .save ar.lc, r30
- mov r30 = ar.lc
- .save pr, r29
- mov r29 = pr
- ;;
- }
- .body
- { .mii
- mov r8 = in1
- mov ar.ec = 6 + 2
- shr in0 = in0, 3
- ;;
- }
- { .mmi
- adds in0 = -1, in0
- mov r16 = in1
- mov r17 = in2
- ;;
- }
- { .mii
- mov ar.lc = in0
- mov pr.rot = 1 << 16
- ;;
- }
- .rotr s1[6+1], s2[6+1], d[2]
- .rotp p[6+2]
-0: { .mmi
-(p[0]) ld8.nta s1[0] = [r16], 8
-(p[0]) ld8.nta s2[0] = [r17], 8
-(p[6]) xor d[0] = s1[6], s2[6]
- }
- { .mfb
-(p[6+1]) st8.nta [r8] = d[1], 8
- nop.f 0
- br.ctop.dptk.few 0b
- ;;
- }
- { .mii
- mov ar.lc = r30
- mov pr = r29, -1
- }
- { .bbb
- br.ret.sptk.few rp
- }
- .endp xor_ia64_2
-
- .proc xor_ia64_3
-xor_ia64_3:
- .prologue
- .fframe 0
- { .mii
- .save ar.pfs, r31
- alloc r31 = ar.pfs, 4, 0, 20, 24
- .save ar.lc, r30
- mov r30 = ar.lc
- .save pr, r29
- mov r29 = pr
- ;;
- }
- .body
- { .mii
- mov r8 = in1
- mov ar.ec = 6 + 2
- shr in0 = in0, 3
- ;;
- }
- { .mmi
- adds in0 = -1, in0
- mov r16 = in1
- mov r17 = in2
- ;;
- }
- { .mii
- mov r18 = in3
- mov ar.lc = in0
- mov pr.rot = 1 << 16
- ;;
- }
- .rotr s1[6+1], s2[6+1], s3[6+1], d[2]
- .rotp p[6+2]
-0: { .mmi
-(p[0]) ld8.nta s1[0] = [r16], 8
-(p[0]) ld8.nta s2[0] = [r17], 8
-(p[6]) xor d[0] = s1[6], s2[6]
- ;;
- }
- { .mmi
-(p[0]) ld8.nta s3[0] = [r18], 8
-(p[6+1]) st8.nta [r8] = d[1], 8
-(p[6]) xor d[0] = d[0], s3[6]
- }
- { .bbb
- br.ctop.dptk.few 0b
- ;;
- }
- { .mii
- mov ar.lc = r30
- mov pr = r29, -1
- }
- { .bbb
- br.ret.sptk.few rp
- }
- .endp xor_ia64_3
-
- .proc xor_ia64_4
-xor_ia64_4:
- .prologue
- .fframe 0
- { .mii
- .save ar.pfs, r31
- alloc r31 = ar.pfs, 5, 0, 27, 32
- .save ar.lc, r30
- mov r30 = ar.lc
- .save pr, r29
- mov r29 = pr
- ;;
- }
- .body
- { .mii
- mov r8 = in1
- mov ar.ec = 6 + 2
- shr in0 = in0, 3
- ;;
- }
- { .mmi
- adds in0 = -1, in0
- mov r16 = in1
- mov r17 = in2
- ;;
- }
- { .mii
- mov r18 = in3
- mov ar.lc = in0
- mov pr.rot = 1 << 16
- }
- { .mfb
- mov r19 = in4
- ;;
- }
- .rotr s1[6+1], s2[6+1], s3[6+1], s4[6+1], d[2]
- .rotp p[6+2]
-0: { .mmi
-(p[0]) ld8.nta s1[0] = [r16], 8
-(p[0]) ld8.nta s2[0] = [r17], 8
-(p[6]) xor d[0] = s1[6], s2[6]
- }
- { .mmi
-(p[0]) ld8.nta s3[0] = [r18], 8
-(p[0]) ld8.nta s4[0] = [r19], 8
-(p[6]) xor r20 = s3[6], s4[6]
- ;;
- }
- { .mib
-(p[6+1]) st8.nta [r8] = d[1], 8
-(p[6]) xor d[0] = d[0], r20
- br.ctop.dptk.few 0b
- ;;
- }
- { .mii
- mov ar.lc = r30
- mov pr = r29, -1
- }
- { .bbb
- br.ret.sptk.few rp
- }
- .endp xor_ia64_4
-
- .proc xor_ia64_5
-xor_ia64_5:
- .prologue
- .fframe 0
- { .mii
- .save ar.pfs, r31
- alloc r31 = ar.pfs, 6, 0, 34, 40
- .save ar.lc, r30
- mov r30 = ar.lc
- .save pr, r29
- mov r29 = pr
- ;;
- }
- .body
- { .mii
- mov r8 = in1
- mov ar.ec = 6 + 2
- shr in0 = in0, 3
- ;;
- }
- { .mmi
- adds in0 = -1, in0
- mov r16 = in1
- mov r17 = in2
- ;;
- }
- { .mii
- mov r18 = in3
- mov ar.lc = in0
- mov pr.rot = 1 << 16
- }
- { .mib
- mov r19 = in4
- mov r20 = in5
- ;;
- }
- .rotr s1[6+1], s2[6+1], s3[6+1], s4[6+1], s5[6+1], d[2]
- .rotp p[6+2]
-0: { .mmi
-(p[0]) ld8.nta s1[0] = [r16], 8
-(p[0]) ld8.nta s2[0] = [r17], 8
-(p[6]) xor d[0] = s1[6], s2[6]
- }
- { .mmi
-(p[0]) ld8.nta s3[0] = [r18], 8
-(p[0]) ld8.nta s4[0] = [r19], 8
-(p[6]) xor r21 = s3[6], s4[6]
- ;;
- }
- { .mmi
-(p[0]) ld8.nta s5[0] = [r20], 8
-(p[6+1]) st8.nta [r8] = d[1], 8
-(p[6]) xor d[0] = d[0], r21
- ;;
- }
- { .mfb
-(p[6]) xor d[0] = d[0], s5[6]
- nop.f 0
- br.ctop.dptk.few 0b
- ;;
- }
- { .mii
- mov ar.lc = r30
- mov pr = r29, -1
- }
- { .bbb
- br.ret.sptk.few rp
- }
- .endp xor_ia64_5
-");
-
static struct xor_block_template xor_block_ia64 = {
- name: "ia64",
- do_2: xor_ia64_2,
- do_3: xor_ia64_3,
- do_4: xor_ia64_4,
- do_5: xor_ia64_5,
+ .name = "ia64",
+ .do_2 = xor_ia64_2,
+ .do_3 = xor_ia64_3,
+ .do_4 = xor_ia64_4,
+ .do_5 = xor_ia64_5,
};
#define XOR_TRY_TEMPLATES xor_speed(&xor_block_ia64)
{
}
+extern irq_desc_t irq_desc [NR_IRQS];
+
#endif /* _ASM_HW_IRQ_H */
Everything of consequence is in arch/alpha/kernel/irq_impl.h,
to be used only in arch/alpha/kernel/. */
+
+extern irq_desc_t irq_desc [NR_IRQS];
#include <asm/irq.h>
+extern irq_desc_t irq_desc [NR_IRQS];
+
#endif
struct hw_interrupt_type;
static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
+extern irq_desc_t irq_desc [NR_IRQS];
+
#endif /* _PPC_HW_IRQ_H */
#endif /* __KERNEL__ */
#include <linux/config.h>
#include <linux/kernel.h>
+#include <linux/irq.h>
#include <asm/processor.h>
#include <asm/atomic.h>
-#include <asm/hw_irq.h>
/*
* Memory barrier.
struct hw_interrupt_type;
static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
+extern irq_desc_t irq_desc [NR_IRQS];
+
#endif /* _PPC64_HW_IRQ_H */
#endif /* __KERNEL__ */
static __inline__ void sh_do_profile (unsigned long pc) {/*Not implemented yet*/}
static __inline__ void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { /* Nothing to do */ }
+
+extern irq_desc_t irq_desc [NR_IRQS];
+
#endif /* __ASM_SH_HW_IRQ_H */
static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
{}
+extern irq_desc_t irq_desc [NR_IRQS];
+
#endif
{
}
+extern irq_desc_t irq_desc [NR_IRQS];
+
#endif /* __V850_HW_IRQ_H__ */
static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
#endif
+extern irq_desc_t irq_desc [NR_IRQS];
+
#endif
#endif /* _ASM_HW_IRQ_H */
#include <linux/config.h>
#include <linux/fs.h>
+#include <linux/mm.h>
+
#include <asm/cacheflush.h>
#ifdef CONFIG_HIGHMEM
*
* Pad this out to 32 bytes for cache and indexing reasons.
*/
-typedef struct {
+typedef struct irq_desc {
unsigned int status; /* IRQ status */
hw_irq_controller *handler;
struct irqaction *action; /* IRQ action list */
spinlock_t lock;
} ____cacheline_aligned irq_desc_t;
+#ifndef CONFIG_IA64
extern irq_desc_t irq_desc [NR_IRQS];
+#endif
#include <asm/hw_irq.h> /* the arch dependent stuff */
nfs_size_to_loff_t(__u64 size)
{
loff_t maxsz = (((loff_t) ULONG_MAX) << PAGE_CACHE_SHIFT) + PAGE_CACHE_SIZE - 1;
- if (size > maxsz)
+ if (size > (__u64) maxsz)
return maxsz;
return (loff_t) size;
}
return rc;
}
+/*
+ * PCI domain support. Sometimes called PCI segment (eg by ACPI),
+ * a PCI domain is defined to be a set of PCI busses which share
+ * configuration space.
+ */
+#ifndef CONFIG_PCI_DOMAINS
+static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
+#endif
+
#endif /* !CONFIG_PCI */
/* these helpers provide future and backwards compatibility
#define PCIPCI_VSFX 16
#define PCIPCI_ALIMAGIK 32
-/*
- * PCI domain support. Sometimes called PCI segment (eg by ACPI),
- * a PCI domain is defined to be a set of PCI busses which share
- * configuration space.
- */
-
-#ifndef CONFIG_PCI_DOMAINS
-static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
-#endif
-
#endif /* __KERNEL__ */
#endif /* LINUX_PCI_H */
#define PCI_DEVICE_ID_HP_DIVA_TOSCA1 0x1049
#define PCI_DEVICE_ID_HP_DIVA_TOSCA2 0x104A
#define PCI_DEVICE_ID_HP_DIVA_MAESTRO 0x104B
+#define PCI_DEVICE_ID_HP_REO_SBA 0x10f0
+#define PCI_DEVICE_ID_HP_REO_IOC 0x10f1
#define PCI_DEVICE_ID_HP_VISUALIZE_FXE 0x108b
#define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223
#define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226
*/
extern struct exec_domain default_exec_domain;
-#ifndef INIT_THREAD_SIZE
-# define INIT_THREAD_SIZE 2048*sizeof(long)
-#endif
+#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
+# ifndef INIT_THREAD_SIZE
+# define INIT_THREAD_SIZE 2048*sizeof(long)
+# endif
union thread_union {
struct thread_info thread_info;
};
extern union thread_union init_thread_union;
+
+#endif /* !__HAVE_ARCH_TASK_STRUCT_ALLOCATOR */
+
extern struct task_struct init_task;
extern struct mm_struct init_mm;
* This assumes that the non-page part of an rpc reply will fit
* in a page - NFSd ensures this. lockd also has no trouble.
*/
-#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE + 1)
+#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE + 2)
static inline u32 svc_getu32(struct iovec *iov)
{
KERN_PIDMAX=55, /* int: PID # limit */
KERN_CORE_PATTERN=56, /* string: pattern for core-file names */
KERN_PANIC_ON_OOPS=57, /* int: whether we will panic on an oops */
+ KERN_CACHEDECAYTICKS=58, /* ulong: value for cache_decay_ticks (EXPERIMENTAL!) */
};
if (clone_flags & CLONE_CHILD_SETTID)
p->set_child_tid = child_tidptr;
+ else
+ p->set_child_tid = NULL;
/*
* Clear TID on mm_release()?
*/
if (clone_flags & CLONE_CHILD_CLEARTID)
p->clear_child_tid = child_tidptr;
+ else
+ p->clear_child_tid = NULL;
/*
* Syscall tracing should be turned off in the child regardless
/* init task, for moving kthread roots - ought to export a function ?? */
EXPORT_SYMBOL(init_task);
-EXPORT_SYMBOL(init_thread_union);
EXPORT_SYMBOL(tasklist_lock);
EXPORT_SYMBOL(find_task_by_pid);
__call_console_drivers(start, end);
}
}
+#ifdef CONFIG_IA64_EARLY_PRINTK
+ if (!console_drivers) {
+ void early_printk (const char *str, size_t len);
+ early_printk(&LOG_BUF(start), end - start);
+ }
+#endif
}
/*
* for us.
*/
spin_lock_irqsave(&logbuf_lock, flags);
+#ifdef CONFIG_IA64_EARLY_PRINTK
+ con_start = log_end;
+#else
con_start = log_start;
+#endif
spin_unlock_irqrestore(&logbuf_lock, flags);
}
release_console_sem();
tty->driver->write(tty, 0, msg, strlen(msg));
return;
}
+
+#ifdef CONFIG_IA64_EARLY_PRINTK
+
+#include <asm/io.h>
+
+# ifdef CONFIG_IA64_EARLY_PRINTK_VGA
+
+
+#define VGABASE ((char *)0xc0000000000b8000)
+#define VGALINES 24
+#define VGACOLS 80
+
+static int current_ypos = VGALINES, current_xpos = 0;
+
+static void
+early_printk_vga (const char *str, size_t len)
+{
+ char c;
+ int i, k, j;
+
+ while (len-- > 0) {
+ c = *str++;
+ if (current_ypos >= VGALINES) {
+ /* scroll 1 line up */
+ for (k = 1, j = 0; k < VGALINES; k++, j++) {
+ for (i = 0; i < VGACOLS; i++) {
+ writew(readw(VGABASE + 2*(VGACOLS*k + i)),
+ VGABASE + 2*(VGACOLS*j + i));
+ }
+ }
+ for (i = 0; i < VGACOLS; i++) {
+ writew(0x720, VGABASE + 2*(VGACOLS*j + i));
+ }
+ current_ypos = VGALINES-1;
+ }
+ if (c == '\n') {
+ current_xpos = 0;
+ current_ypos++;
+ } else if (c != '\r') {
+ writew(((0x7 << 8) | (unsigned short) c),
+ VGABASE + 2*(VGACOLS*current_ypos + current_xpos++));
+ if (current_xpos >= VGACOLS) {
+ current_xpos = 0;
+ current_ypos++;
+ }
+ }
+ }
+}
+
+# endif /* CONFIG_IA64_EARLY_PRINTK_VGA */
+
+# ifdef CONFIG_IA64_EARLY_PRINTK_UART
+
+#include <linux/serial_reg.h>
+#include <asm/system.h>
+
+static void early_printk_uart(const char *str, size_t len)
+{
+ static char *uart = NULL;
+ unsigned long uart_base;
+ char c;
+
+ if (!uart) {
+ uart_base = 0;
+# ifdef CONFIG_SERIAL_8250_HCDP
+ {
+ extern unsigned long hcdp_early_uart(void);
+ uart_base = hcdp_early_uart();
+ }
+# endif
+# if CONFIG_IA64_EARLY_PRINTK_UART_BASE
+ if (!uart_base)
+ uart_base = CONFIG_IA64_EARLY_PRINTK_UART_BASE;
+# endif
+ if (!uart_base)
+ return;
+
+ uart = ioremap(uart_base, 64);
+ if (!uart)
+ return;
+ }
+
+ while (len-- > 0) {
+ c = *str++;
+ while ((readb(uart + UART_LSR) & UART_LSR_TEMT) == 0)
+ cpu_relax(); /* spin */
+
+ writeb(c, uart + UART_TX);
+
+ if (c == '\n')
+ writeb('\r', uart + UART_TX);
+ }
+}
+
+# endif /* CONFIG_IA64_EARLY_PRINTK_UART */
+
+#ifdef CONFIG_IA64_EARLY_PRINTK_SGI_SN
+extern int sn_sal_console_out(const char *str, int len);
+#endif
+
+void early_printk(const char *str, size_t len)
+{
+#ifdef CONFIG_IA64_EARLY_PRINTK_UART
+ early_printk_uart(str, len);
+#endif
+#ifdef CONFIG_IA64_EARLY_PRINTK_VGA
+ early_printk_vga(str, len);
+#endif
+#ifdef CONFIG_IA64_EARLY_PRINTK_SGI_SN
+ sn_sal_console_out(str, len);
+#endif
+}
+
+#endif /* CONFIG_IA64_EARLY_PRINTK */
? -EFAULT : 0;
}
-#if !defined(__ia64__) && !defined(CONFIG_V850)
+#if (!defined(__ia64__) && !defined(CONFIG_V850)) || defined(CONFIG_COMPAT)
/*
* Back compatibility for getrlimit. Needed for some apps.
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+#ifdef CONFIG_SMP
+ {
+ .ctl_name = KERN_CACHEDECAYTICKS,
+ .procname = "cache_decay_ticks",
+ .data = &cache_decay_ticks,
+ .maxlen = sizeof(cache_decay_ticks),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_minmax,
+ },
+#endif
{ .ctl_name = 0 }
};
}
pmd = pmd_offset(dir, 0);
pgd_clear(dir);
- for (j = 0; j < PTRS_PER_PMD ; j++)
+ for (j = 0; j < PTRS_PER_PMD ; j++) {
+ prefetchw(pmd + j + PREFETCH_STRIDE/sizeof(*pmd));
free_one_pmd(tlb, pmd+j);
+ }
pmd_free_tlb(tlb, pmd);
}
clean-files := initramfs_data.cpio.gz
-LDFLAGS_initramfs_data.o := $(LDFLAGS_BLOB) -r -T
-
-$(obj)/initramfs_data.o: $(src)/initramfs_data.scr \
- $(obj)/initramfs_data.cpio.gz FORCE
- $(call if_changed,ld)
+$(obj)/initramfs_data.S: $(obj)/initramfs_data.cpio.gz
+ echo '.section ".init.ramfs", "a"' > $@
+ od -v -An -t x1 -w8 $^ | cut -c2- | sed -e s"/ /,0x/g" -e s"/^/.byte 0x"/ >> $@
# initramfs-y are the programs which will be copied into the CPIO
# archive. Currently, the filenames are hardcoded in gen_init_cpio,