o util-linux 2.10o # fdformat --version
o modutils 2.4.2 # insmod -V
o e2fsprogs 1.19 # tune2fs
-o reiserfsprogs 3.x.0b # reiserfsck 2>&1|grep reiserfsprogs
+o reiserfsprogs 3.x.0d # reiserfsck 2>&1|grep reiserfsprogs
o pcmcia-cs 3.1.21 # cardmgr -V
o PPP 2.4.0 # pppd --version
o isdn4k-utils 3.1pre1 # isdnctrl 2>&1|grep version
Reiserfsprogs
-------------
-o <ftp://ftp.namesys.com/pub/reiserfsprogs/reiserfs_utils-3.6.25-fsck-3.x.0b.tar.gz>
+o <ftp://ftp.namesys.com/pub/reiserfsprogs/reiserfsprogs-3.x.0d.tar.gz>
LVM toolset
-----------
If you want to compile it as a module, say M here and read
Documentation/modules.txt. If unsure, say `N'.
+TCPMSS target support
+CONFIG_IP_NF_TARGET_TCPMSS
+ This option adds a `TCPMSS' target, which allows you to alter the
+ MSS value of TCP SYN packets, to control the maximum size for that
+ connection (usually limiting it to your outgoing interface's MTU
+ minus 40).
+
+ This is used to overcome criminally braindead ISPs or servers which
+ block ICMP Fragmentation Needed packets. The symptoms of this
+ problem are that everything works fine from your Linux
+ firewall/router, but machines behind it can never exchange large
+ packets:
+ 1) Web browsers connect, then hang with no data received.
+ 2) Small mail works fine, but large emails hang.
+ 3) ssh works fine, but scp hangs after initial handshaking.
+
+ Workaround: activate this option and add a rule to your firewall
+ configuration like:
+
+ iptables -A FORWARD -p tcp --tcp-flags SYN,RST SYN \
+ -j TCPMSS --clamp-mss-to-pmtu
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+tcpmss match support
+CONFIG_IP_NF_MATCH_TCPMSS
+ This option adds a `tcpmss' match, which allows you to examine the
+ MSS value of TCP SYN packets, which control the maximum packet size
+ for that connection.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
LOG target support
CONFIG_IP_NF_TARGET_LOG
This option adds a `LOG' target, which allows you to create rules in
If you want to compile it as a module, say M here and read
Documentation/modules.txt. If unsure, say `N'.
+IP6 tables support (required for filtering/masq/NAT)
+CONFIG_IP6_NF_IPTABLES
+ ip6tables is a general, extensible packet identification framework.
+ Currently only the packet filtering and packet mangling subsystem
+ for IPv6 use this, but connection tracking is going to follow.
+ Say 'Y' or 'M' here if you want to use either of those.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+IPv6 limit match support
+CONFIG_IP6_NF_MATCH_LIMIT
+ limit matching allows you to control the rate at which a rule can be
+ matched: mainly useful in combination with the LOG target ("LOG
+ target support", below) and to avoid some Denial of Service attacks.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+MAC address match support
+CONFIG_IP6_NF_MATCH_MAC
+ mac matching allows you to match packets based on the source
+ ethernet address of the packet.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+netfilter mark match support
+CONFIG_IP6_NF_MATCH_MARK
+ Netfilter mark matching allows you to match packets based on the
+ `nfmark' value in the packet. This can be set by the MARK target
+ (see below).
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+Packet filtering
+CONFIG_IP6_NF_FILTER
+ Packet filtering defines a table `filter', which has a series of
+ rules for simple packet filtering at local input, forwarding and
+ local output. See the man page for iptables(8).
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+Packet mangling
+CONFIG_IP6_NF_MANGLE
+ This option adds a `mangle' table to iptables: see the man page for
+ iptables(8). This table is used for various packet alterations
+ which can effect how the packet is routed.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+MARK target support
+CONFIG_IP6_NF_TARGET_MARK
+ This option adds a `MARK' target, which allows you to create rules
+ in the `mangle' table which alter the netfilter mark (nfmark) field
+ associated with the packet packet prior to routing. This can change
+ the routing method (see `IP: use netfilter MARK value as routing
+ key') and can also be used by other subsystems to change their
+ behavior.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+
TCP Explicit Congestion Notification support
CONFIG_INET_ECN
Explicit Congestion Notification (ECN) allows routers to notify
if (! pci_dma_supported(pdev, 0x00ffffff))
goto ignore_this_device;
+When DMA is possible for a given mask, the PCI layer must be informed of the
+mask for later allocation operations on the device. This is achieved by
+setting the dma_mask member of the pci_dev structure, like so:
+
+#define MY_HW_DMA_MASK 0x00ffffff
+
+ if (! pci_dma_supported(pdev, MY_HW_DMA_MASK))
+ goto ignore_this_device;
+
+ pdev->dma_mask = MY_HW_DMA_MASK;
+
+A helper function is provided which performs this common code sequence:
+
+ int pci_set_dma_mask(struct pci_dev *pdev, dma_addr_t device_mask)
+
+Unlike pci_dma_supported(), this returns -EIO when the PCI layer will not be
+able to DMA with addresses restricted by that mask, and returns 0 when DMA
+transfers are possible. If the call succeeds, the dma_mask will have been
+updated so that your driver need not worry about it.
+
There is a case which we are aware of at this time, which is worth
mentioning in this documentation. If your device supports multiple
functions (for example a sound card provides playback and record
functions) and the various different functions have _different_
DMA addressing limitations, you may wish to probe each mask and
-only provide the functionality which the machine can handle.
+only provide the functionality which the machine can handle. It
+is important that the last call to pci_set_dma_mask() be for the
+most specific mask.
+
Here is pseudo-code showing how this might be done:
#define PLAYBACK_ADDRESS_BITS 0xffffffff
struct pci_dev *pdev;
...
- if (pci_dma_supported(pdev, PLAYBACK_ADDRESS_BITS)) {
+ if (pci_set_dma_mask(pdev, PLAYBACK_ADDRESS_BITS)) {
card->playback_enabled = 1;
} else {
card->playback_enabled = 0;
printk(KERN_WARN "%s: Playback disabled due to DMA limitations.\n",
card->name);
}
- if (pci_dma_supported(pdev, RECORD_ADDRESS_BITS)) {
+ if (pci_set_dma_mask(pdev, RECORD_ADDRESS_BITS)) {
card->record_enabled = 1;
} else {
card->record_enabled = 0;
(C) 1997-1998 Caldera, Inc.
(C) 1998 James Banks
-(C) 1999-2000 Torben Mathiasen <tmm@image.dk, torben.mathiasen@compaq.com>
+(C) 1999-2001 Torben Mathiasen <tmm@image.dk, torben.mathiasen@compaq.com>
-For driver information/updates visit http://tlan.kernel.dk
+For driver information/updates visit http://opensource.compaq.com
-TLAN driver for Linux, version 1.8a
+TLAN driver for Linux, version 1.14a
README
speeds with kernel-parameters.
ether=0,0,0x12,0,eth0 will force link to 100Mbps Half-Duplex.
+ 7. If you have more than one tlan adapter in your system, you can
+ use the above options on a per adapter basis. To force a 100Mbit/HD
+ link with your eth1 adapter use:
+
+ insmod tlan speed=0,100 duplex=0,1
+
+ Now eth0 will use auto-neg and eth1 will be forced to 100Mbit/HD.
+ Note that the tlan driver supports a maximum of 8 adapters.
+
+
III. Things to try if you have problems.
1. Make sure your card's PCI id is among those listed in
section I, above.
There is also a tlan mailing list which you can join by sending "subscribe tlan"
in the body of an email to majordomo@vuser.vu.union.edu.
-There is also a tlan website at http://tlan.kernel.dk
+There is also a tlan website at http://opensource.compaq.com
This document describes the usage and errata of the 3Com "Vortex" device
driver for Linux, 3c59x.c.
-The driver was written by Donald Becker <becker@cesdis.gsfc.nasa.gov>
+The driver was written by Donald Becker <becker@scyld.com>
Don is no longer the prime maintainer of this version of the driver.
Please report problems to one or more of:
3c900 Boomerang 10baseT
3c900 Boomerang 10Mbps Combo
3c900 Cyclone 10Mbps TPO
+ 3c900B Cyclone 10Mbps T
3c900 Cyclone 10Mbps Combo
3c900 Cyclone 10Mbps TPC
3c900B-FL Cyclone 10base-FL
full_duplex=N1,N2,N3...
Similar to bit 9 of 'options'. Forces the corresponding card into
- full-duplex mode.
+ full-duplex mode. Please use this in preference to the `options'
+ parameter.
+
+ In fact, please don't use this at all! You're better off getting
+ autonegotiation working properly.
flow_ctrl=N1,N2,N3...
is exceeded the interrupt service routine gives up and generates a
warning message "eth0: Too much work in interrupt".
+hw_checksums=N1,N2,N3,...
+
+ Recent 3com NICs are able to generate IPv4, TCP and UDP checksums
+ in hardware. Linux has used the Rx checksumming for a long time.
+ The "zero copy" patch which is planned for the 2.4 kernel series
+ allows you to make use of the NIC's DMA scatter/gather and transmit
+ checksumming as well.
+
+ The driver is set up so that, when the zerocopy patch is applied,
+ all Tornado and Cyclone devices will use S/G and Tx checksums.
+
+ This module parameter has been provided so you can override this
+ decision. If you think that Tx checksums are causing a problem, you
+ may disable the feature with `hw_checksums=0'.
+
+ If you think your NIC should be performing Tx checksumming and the
+ driver isn't enabling it, you can force the use of hardware Tx
+ checksumming with `hw_checksums=1'.
+
+ The driver drops a message in the logfiles to indicate whether or
+ not it is using hardware scatter/gather and hardware Tx checksums.
+
+ Scatter/gather and hardware checksums provide considerable
+ performance improvement for the sendfile() system call, but a small
+ decrease in throughput for send(). There is no effect upon receive
+ efficiency.
+
compaq_ioaddr=N
compaq_irq=N
compaq_device_id=N
decides that the transmitter has become stuck and needs to be reset.
This is mainly for debugging purposes, although it may be advantageous
to increase this value on LANs which have very high collision rates.
- The default value is 400 (0.4 seconds).
+ The default value is 5000 (5.0 seconds).
+
+enable_wol=N1,N2,N3,...
+
+ Enable Wake-on-LAN support for the relevant interface. Donald
+ Becker's `ether-wake' application may be used to wake suspended
+ machines.
+
+
+Media selection
+---------------
+
+A number of the older NICs such as the 3c590 and 3c900 series have
+10base2 and AUI interfaces.
+
+Prior to January, 2001 this driver would autoeselect the 10base2 or AUI
+port if it didn't detect activity on the 10baseT port. It would then
+get stuck on the 10base2 port and a driver reload was necessary to
+switch back to 10baseT. This behaviour could not be prevented with a
+module option override.
+
+Later (current) versions of the driver _do_ support locking of the
+media type. So if you load the driver module with
+
+ modprobe 3c59x options=0
+
+it will permanently select the 10baseT port. Automatic selection of
+other media types does not occur.
+
Additional resources
--------------------
Additional documentation is available at Don Becker's Linux Drivers site:
- http://cesdis.gsfc.nasa.gov/linux/drivers/vortex.html
+ http://www.scyld.com/network/vortex.html
Donald Becker's driver development site:
- http://www.scyld.com
- http://cesdis.gsfc.nasa.gov/linux/
+ http://www.scyld.com/network
-Don's vortex-diag program is useful for inspecting the NIC's state:
+Donald's vortex-diag program is useful for inspecting the NIC's state:
http://www.scyld.com/diag/#pci-diags
- http://cesdis.gsfc.nasa.gov/linux/diag/vortex-diag.c
-Don's mii-diag program may be used for inspecting and manipulating the
-NIC's Media Independent Interface subsystem:
+Donald's mii-diag program may be used for inspecting and manipulating
+the NIC's Media Independent Interface subsystem:
http://www.scyld.com/diag/#mii-diag
- http://cesdis.gsfc.nasa.gov/linux/diag/#mii-diag
+
+Donald's wake-on-LAN page:
+
+ http://www.scyld.com/expert/wake-on-lan.html
3Com's documentation for many NICs, including the ones supported by
this driver is available at
http://support.3com.com/partners/developer/developer_form.html
-A detailed changelog for the modifications which were made for 2.3
-series kernel is available at
+3Com's DOS-based application for setting up the NICs EEPROMs:
+
+ ftp://ftp.3com.com/pub/nic/3c90x/3c90xx2.exe
+
+Driver updates and a detailed changelog for the modifications which
+were made for the 2.3/2,4 series kernel is available at
http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3
mode. Otherwise, the negotiation fails. This has been an issue
we've noticed for a while but haven't had the time to track down.
+ Cisco switches (Jeff Busch <jbusch@deja.com>)
+
+ My "standard config" for ports to which PC's/servers connect directly:
+
+ interface FastEthernet0/N
+ description machinename
+ load-interval 30
+ spanning-tree portfast
+
+ If autonegotiation is a problem, you may need to specify "speed
+ 100" and "duplex full" as well (or "speed 10" and "duplex half").
+
+ WARNING: DO NOT hook up hubs/switches/bridges to these
+ specially-configured ports! The switch will become very confused.
+
Reporting and diagnosing problems
---------------------------------
VERSION = 2
PATCHLEVEL = 4
SUBLEVEL = 3
-EXTRAVERSION =-pre2
+EXTRAVERSION =-pre3
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
else if [ -x /bin/bash ]; then echo /bin/bash; \
else echo sh; fi ; fi)
-TOPDIR := $(shell if [ "$$PWD" != "" ]; then echo $$PWD; else pwd; fi)
+TOPDIR := $(shell /bin/pwd)
HPATH = $(TOPDIR)/include
FINDHPATH = $(HPATH)/asm $(HPATH)/linux $(HPATH)/scsi $(HPATH)/net
#
ALL_MOBJS = $(filter-out $(obj-y), $(obj-m))
ifneq "$(strip $(ALL_MOBJS))" ""
-MOD_DESTDIR ?= $(shell $(CONFIG_SHELL) $(TOPDIR)/scripts/pathdown.sh)
+MOD_DESTDIR := $(shell $(CONFIG_SHELL) $(TOPDIR)/scripts/pathdown.sh)
endif
unexport MOD_DIRS
#include <linux/config.h>
#include <asm/assembler.h>
+#include <asm/constants.h>
#include <asm/errno.h>
#include <asm/hardware.h>
-#include "../lib/constants.h"
-
.macro zero_fp
-#ifdef CONFIG_FRAME_POINTER
+#ifndef CONFIG_NO_FRAME_POINTER
mov fp, #0
#endif
.endm
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/constants.h>
#include <asm/errno.h>
#include <asm/hardware.h>
#include <asm/arch/irqs.h>
#include <asm/proc-fns.h>
-#include "../lib/constants.h"
#ifndef MODE_SVC
#define MODE_SVC 0x13
#endif
.macro zero_fp
-#ifdef CONFIG_FRAME_POINTER
+#ifndef CONFIG_NO_FRAME_POINTER
mov fp, #0
#endif
.endm
#else
wfs_mask_data: .word 0x0e200110 @ WFS/RFS
.word 0x0fef0fff
- .word 0x0d0d0100 @ LDF [sp]/STF [sp]
- .word 0x0d0b0100 @ LDF [fp]/STF [fp]
- .word 0x0f0f0f00
+ .word 0x0d000100 @ LDF [sp]/STF [sp]
+ .word 0x0d000100 @ LDF [fp]/STF [fp]
+ .word 0x0f000f00
/* We get here if an undefined instruction happens and the floating
* point emulator is not present. If the offending instruction was
sys_mmap2:
#if PAGE_SHIFT > 12
tst r5, #PGOFF_MASK
- moveq r5, r5, lsr #PGOFF_SHIFT
+ moveq r5, r5, lsr #PAGE_SHIFT - 12
streq r5, [sp, #4]
beq do_mmap2
mov r0, #-EINVAL
endif
include $(TOPDIR)/Rules.make
-
-constants.h: getconsdata.o extractconstants.pl
- $(PERL) extractconstants.pl $(OBJDUMP) > $@
-
-getconsdata.o: getconsdata.c
- $(CC) $(CFLAGS) -c getconsdata.c
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include "constants.h"
+#include <asm/constants.h>
.text
.align 5
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/errno.h>
-#include "constants.h"
+#include <asm/constants.h>
.text
+++ /dev/null
-#!/usr/bin/perl
-
-$OBJDUMP=$ARGV[0];
-
-sub swapdata {
- local ($num) = @_;
-
- return substr($num, 6, 2).substr($num, 4, 2).substr ($num, 2, 2).substr ($num, 0, 2);
-}
-
-open (DATA, $OBJDUMP.' --full-contents --section=.data getconsdata.o | grep \'^ 00\' |') ||
- die ('Cant objdump!');
-while (<DATA>) {
- ($addr, $data0, $data1, $data2, $data3) = split (' ');
- $dat[hex($addr)] = hex(&swapdata($data0));
- $dat[hex($addr)+4] = hex(&swapdata($data1));
- $dat[hex($addr)+8] = hex(&swapdata($data2));
- $dat[hex($addr)+12] = hex(&swapdata($data3));
-}
-close (DATA);
-
-open (DATA, $OBJDUMP.' --syms getconsdata.o |') || die ('Cant objdump!');
-while (<DATA>) {
- /elf32/ && ( $elf = 1 );
- /a.out/ && ( $aout = 1 );
- next if ($aout && ! / 07 /);
- next if ($elf && ! (/^0*0...... g/ && /.data/));
- next if (!$aout && !$elf);
-
- if ($aout) {
- ($addr, $flags, $sect, $a1, $a2, $a3, $name) = split (' ');
- $nam[hex($addr)] = substr($name, 1);
- }
- if ($elf) {
- chomp;
- $addr = substr ($_, 0, index($_, " "));
- $name = substr ($_, rindex($_, " ") + 1);
- $nam[hex($addr)] = $name;
- }
-}
-close (DATA);
-
-print "/*\n * *** This file is automatically generated from getconsdata.c. Do not edit! ***\n */\n";
-for ($i = 0; $i < hex($addr)+4; $i += 4) {
- print "#define $nam[$i] $dat[$i]\n";
-}
+++ /dev/null
-/*
- * linux/arch/arm/lib/getconsdata.c
- *
- * Copyright (C) 1995-2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/config.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-
-/*
- * Make sure that the compiler and target are compatible
- */
-#if (defined(__APCS_32__) && defined(CONFIG_CPU_26))
-#error Your compiler targets APCS-32 but this kernel requires APCS-26.
-#endif
-#if (defined(__APCS_26__) && defined(CONFIG_CPU_32))
-#error Your compiler targets APCS-26 but this kernel requires APCS-32.
-#endif
-
-#undef PAGE_READONLY
-
-#define OFF_TSK(n) (unsigned long)&(((struct task_struct *)0)->n)
-#define OFF_MM(n) (unsigned long)&(((struct mm_struct *)0)->n)
-
-unsigned long TSK_SIGPENDING = OFF_TSK(sigpending);
-unsigned long TSK_ADDR_LIMIT = OFF_TSK(addr_limit);
-unsigned long TSK_NEED_RESCHED = OFF_TSK(need_resched);
-unsigned long TSK_PTRACE = OFF_TSK(ptrace);
-unsigned long TSK_USED_MATH = OFF_TSK(used_math);
-
-unsigned long TSS_SAVE = OFF_TSK(thread.save);
-unsigned long TSS_FPESAVE = OFF_TSK(thread.fpstate.soft.save);
-#ifdef CONFIG_CPU_32
-unsigned long TSS_DOMAIN = OFF_TSK(thread.domain);
-#endif
-
-#ifdef _PAGE_PRESENT
-unsigned long PAGE_PRESENT = _PAGE_PRESENT;
-#endif
-#ifdef _PAGE_RW
-unsigned long PAGE_RW = _PAGE_RW;
-#endif
-#ifdef _PAGE_USER
-unsigned long PAGE_USER = _PAGE_USER;
-#endif
-#ifdef _PAGE_ACCESSED
-unsigned long PAGE_ACCESSED = _PAGE_ACCESSED;
-#endif
-#ifdef _PAGE_DIRTY
-unsigned long PAGE_DIRTY = _PAGE_DIRTY;
-#endif
-#ifdef _PAGE_READONLY
-unsigned long PAGE_READONLY = _PAGE_READONLY;
-#endif
-#ifdef _PAGE_NOT_USER
-unsigned long PAGE_NOT_USER = _PAGE_NOT_USER;
-#endif
-#ifdef _PAGE_OLD
-unsigned long PAGE_OLD = _PAGE_OLD;
-#endif
-#ifdef _PAGE_CLEAN
-unsigned long PAGE_CLEAN = _PAGE_CLEAN;
-#endif
-
-#ifdef PTE_TYPE_SMALL
-unsigned long HPTE_TYPE_SMALL = PTE_TYPE_SMALL;
-unsigned long HPTE_AP_READ = PTE_AP_READ;
-unsigned long HPTE_AP_WRITE = PTE_AP_WRITE;
-#endif
-
-#ifdef L_PTE_PRESENT
-unsigned long LPTE_PRESENT = L_PTE_PRESENT;
-unsigned long LPTE_YOUNG = L_PTE_YOUNG;
-unsigned long LPTE_BUFFERABLE = L_PTE_BUFFERABLE;
-unsigned long LPTE_CACHEABLE = L_PTE_CACHEABLE;
-unsigned long LPTE_USER = L_PTE_USER;
-unsigned long LPTE_WRITE = L_PTE_WRITE;
-unsigned long LPTE_EXEC = L_PTE_EXEC;
-unsigned long LPTE_DIRTY = L_PTE_DIRTY;
-#endif
-
-unsigned long PAGE_SZ = PAGE_SIZE;
-
-unsigned long KSWI_BASE = 0x900000;
-unsigned long KSWI_SYS_BASE = 0x9f0000;
-unsigned long SYS_ERROR0 = 0x9f0000;
-unsigned long PGOFF_SHIFT = PAGE_SHIFT - 12;
-unsigned long PGOFF_MASK = (1 << (PAGE_SHIFT - 12)) - 1;
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include "constants.h"
.text
*
* Extra MM routines for RiscPC architecture
*/
+#include <linux/types.h>
#include <linux/init.h>
#include <asm/hardware.h>
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/constants.h>
#include <asm/procinfo.h>
-#include "../lib/constants.h"
/*
* MEMC workhorse code. It's both a horse which things it's a pig.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/constants.h>
#include <asm/procinfo.h>
-#include "../lib/constants.h"
/*
* Function: arm6_7_cache_clean_invalidate_all (void)
ENTRY(cpu_arm7_cache_clean_invalidate_all)
ENTRY(cpu_arm6_cache_clean_invalidate_range)
ENTRY(cpu_arm7_cache_clean_invalidate_range)
-ENTRY(cpu_arm6_invalidate_icache_range)
-ENTRY(cpu_arm7_invalidate_icache_range)
-ENTRY(cpu_arm6_invalidate_icache_page)
-ENTRY(cpu_arm7_invalidate_icache_page)
+ENTRY(cpu_arm6_icache_invalidate_range)
+ENTRY(cpu_arm7_icache_invalidate_range)
+ENTRY(cpu_arm6_icache_invalidate_page)
+ENTRY(cpu_arm7_icache_invalidate_page)
ENTRY(cpu_arm6_dcache_clean_range)
ENTRY(cpu_arm7_dcache_clean_range)
ENTRY(cpu_arm6_dcache_invalidate_range)
.word cpu_arm6_dcache_clean_entry
/* icache */
- .word cpu_arm6_invalidate_icache_range
- .word cpu_arm6_invalidate_icache_page
+ .word cpu_arm6_icache_invalidate_range
+ .word cpu_arm6_icache_invalidate_page
/* tlb */
.word cpu_arm6_tlb_invalidate_all
.word cpu_arm7_dcache_clean_entry
/* icache */
- .word cpu_arm7_invalidate_icache_range
- .word cpu_arm7_invalidate_icache_page
+ .word cpu_arm7_icache_invalidate_range
+ .word cpu_arm7_icache_invalidate_page
/* tlb */
.word cpu_arm7_tlb_invalidate_all
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
-#include "../lib/constants.h"
/*
* Function: arm720_cache_clean_invalidate_all (void)
#include <linux/linkage.h>
#include <linux/config.h>
#include <asm/assembler.h>
+#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
-#include "../lib/constants.h"
/*
* This is the maximum size of an area which will be invalidated
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
-#include "../lib/constants.h"
/* This is the maximum size of an area which will be flushed. If the area
* is larger than this, then we flush the whole cache
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include "../lib/constants.h"
+#include <asm/constants.h>
/* This is the kernel's entry point into the floating point emulator.
It is called from the kernel with code similar to this:
--- /dev/null
+#
+# linux/arch/arm/tools/Makefile
+#
+# Copyright (C) 2001 Russell King
+#
+
+all: $(TOPDIR)/include/asm-arm/mach-types.h \
+ $(TOPDIR)/include/asm-arm/constants.h
+
+$(TOPDIR)/include/asm-arm/mach-types.h: mach-types gen-mach-types
+ awk -f gen-mach-types mach-types > $@
+
+# Generate the constants.h header file using the compiler. We get
+# the compiler to spit out assembly code, and then mundge it into
+# what we want.
+
+$(TOPDIR)/include/asm-arm/constants.h: constants-hdr getconstants.c
+ $(CC) $(CFLAGS) -S -o - getconstants.c | \
+ sed 's/^\(#define .* \)#\(.*\)/\1\2/;/^#define/!d' | \
+ cat constants-hdr - > $@.tmp
+ cmp $@.tmp $@ >/dev/null 2>&1 || mv $@.tmp $@; $(RM) $@.tmp
+
+# Build our dependencies, and then generate the constants and
+# mach-types header files. If we do it now, mkdep will pick
+# the dependencies up later on when it runs through the other
+# directories
+
+dep:
+ $(TOPDIR)/scripts/mkdep getconstants.c | sed s,getconstants.o,$(TOPDIR)/include/asm-arm/constants.h, > .depend
+ $(MAKE) all
+
+.PHONY: all dep
+
+ifneq ($(wildcard .depend),)
+include .depend
+endif
--- /dev/null
+/*
+ * This file is automatically generated from arch/arm/tools/getconstants.c.
+ * Do not edit! Only include this file in assembly (.S) files!
+ */
+
--- /dev/null
+/*
+ * linux/arch/arm/tools/getconsdata.c
+ *
+ * Copyright (C) 1995-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+
+/*
+ * Make sure that the compiler and target are compatible
+ */
+#if (defined(__APCS_32__) && defined(CONFIG_CPU_26))
+#error Your compiler targets APCS-32 but this kernel requires APCS-26.
+#endif
+#if (defined(__APCS_26__) && defined(CONFIG_CPU_32))
+#error Your compiler targets APCS-26 but this kernel requires APCS-32.
+#endif
+
+#define OFF_TSK(n) (unsigned long)&(((struct task_struct *)0)->n)
+
+#define DEFN(name,off) asm("\n#define "name" %0" :: "I" (off))
+
+void func(void)
+{
+DEFN("TSK_SIGPENDING", OFF_TSK(sigpending));
+DEFN("TSK_ADDR_LIMIT", OFF_TSK(addr_limit));
+DEFN("TSK_NEED_RESCHED", OFF_TSK(need_resched));
+DEFN("TSK_PTRACE", OFF_TSK(ptrace));
+DEFN("TSK_USED_MATH", OFF_TSK(used_math));
+
+DEFN("TSS_SAVE", OFF_TSK(thread.save));
+DEFN("TSS_FPESAVE", OFF_TSK(thread.fpstate.soft.save));
+
+#ifdef CONFIG_CPU_32
+DEFN("TSS_DOMAIN", OFF_TSK(thread.domain));
+
+DEFN("HPTE_TYPE_SMALL", PTE_TYPE_SMALL);
+DEFN("HPTE_AP_READ", PTE_AP_READ);
+DEFN("HPTE_AP_WRITE", PTE_AP_WRITE);
+
+DEFN("LPTE_PRESENT", L_PTE_PRESENT);
+DEFN("LPTE_YOUNG", L_PTE_YOUNG);
+DEFN("LPTE_BUFFERABLE", L_PTE_BUFFERABLE);
+DEFN("LPTE_CACHEABLE", L_PTE_CACHEABLE);
+DEFN("LPTE_USER", L_PTE_USER);
+DEFN("LPTE_WRITE", L_PTE_WRITE);
+DEFN("LPTE_EXEC", L_PTE_EXEC);
+DEFN("LPTE_DIRTY", L_PTE_DIRTY);
+#endif
+
+#ifdef CONFIG_CPU_26
+DEFN("PAGE_PRESENT", _PAGE_PRESENT);
+DEFN("PAGE_READONLY", _PAGE_READONLY);
+DEFN("PAGE_NOT_USER", _PAGE_NOT_USER);
+DEFN("PAGE_OLD", _PAGE_OLD);
+DEFN("PAGE_CLEAN", _PAGE_CLEAN);
+#endif
+
+DEFN("PAGE_SZ", PAGE_SIZE);
+
+DEFN("KSWI_BASE", 0x900000);
+DEFN("KSWI_SYS_BASE", 0x9f0000);
+DEFN("SYS_ERROR0", 0x9f0000);
+}
# To add an entry into this database, please see Documentation/arm/README,
# or contact rmk@arm.linux.org.uk
#
-# Last update: Mon Nov 20 22:59:11 2000
+# Last update: Fri Feb 9 22:27:32 2001
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
netport SA1100_NETPORT NETPORT 38
pangolin SA1100_PANGOLIN PANGOLIN 39
yopy SA1100_YOPY YOPY 40
-sa1100 SA1100_SA1100 SA1100 41
-huw_webpanel ARCH_HUW_WEBPANEL HUW_WEBPANEL 42
+coolidge SA1100_COOLIDGE coolidge 41
+huw_webpanel SA1100_HUW_WEBPANEL HUW_WEBPANEL 42
spotme ARCH_SPOTME SPOTME 43
freebird ARCH_FREEBIRD FREEBIRD 44
ti925 ARCH_TI925 TI925 45
riscstation ARCH_RISCSTATION RISCSTATION 46
cavy SA1100_CAVY CAVY 47
+jornada720 SA1100_JORNADA720 JORNADA720 48
+omnimeter SA1100_OMNIMETER OMNIMETER 49
+edb7211 ARCH_EDB7211 EDB7211 50
+citygo SA1100_CITYGO CITYGO 51
+pfs168 SA1100_PFS168 PFS168 52
+spot SA1100_SPOT SPOT 53
+flexanet ARCH_FLEXANET FLEXANET 54
+webpal ARCH_WEBPAL WEBPAL 55
+linpda SA1100_LINPDA LINPDA 56
+anakin ARCH_ANAKIN ANAKIN 57
# The following are unallocated
empeg SA1100_EMPEG EMPEG
*(.glue_7)
*(.glue_7t)
*(.kstrtab)
- . = ALIGN(16);
- __start___ex_table = .; /* Exception table */
+ *(.got) /* Global offset table */
+
+ _etext = .; /* End of text section */
+ }
+
+ . = ALIGN(16);
+ __ex_table : { /* Exception table */
+ __start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
+ }
- __start___ksymtab = .; /* Kernel symbol table */
+ __ksymtab : { /* Kernel symbol table */
+ __start___ksymtab = .;
*(__ksymtab)
__stop___ksymtab = .;
-
- *(.got) /* Global offset table */
-
- _etext = .; /* End of text section */
}
. = ALIGN(8192);
CONFIG_DRM_RADEON=y
# CONFIG_DRM_I810 is not set
# CONFIG_DRM_MGA is not set
-CONFIG_PCMCIA_SERIAL=y
#
-# PCMCIA character device support
+# PCMCIA character devices
#
# CONFIG_PCMCIA_SERIAL_CS is not set
-# CONFIG_PCMCIA_SERIAL_CB is not set
#
# Multimedia devices
}
}
+#if 0
+/* Our bus code shouldnt need this fixup any more. Delete once verified */
/*
* Compaq host bridges -- Find and scan all secondary buses.
* This time registers 0xc8 and 0xc9.
printk("PCI: Compaq host bridge: last bus %02x\n", busno2);
}
}
+#endif
static void __init pci_fixup_umc_ide(struct pci_dev *d)
{
pcibios_max_latency = 32;
}
+static void __init pci_fixup_via_acpi(struct pci_dev *d)
+{
+ /*
+ * VIA ACPI device: IRQ line in PCI config byte 0x42
+ */
+ u8 irq;
+ pci_read_config_byte(d, 0x42, &irq);
+ irq &= 0x0f;
+ if (irq && (irq != 2))
+ d->irq = irq;
+}
+
+static void __init pci_fixup_piix4_acpi(struct pci_dev *d)
+{
+ /*
+ * PIIX4 ACPI device: hardwired IRQ9
+ */
+ d->irq = 9;
+}
+
static void __init pci_fixup_vt8363(struct pci_dev *d)
{
/*
- * VIA VT8363 host bridge has broken feature 'PCI Master Read
- * Caching'. It caches more than is good for it, sometimes
- * serving the bus master with stale data. Some BIOSes enable
- * it by default, so we disable it.
+ * The VIA bridge will corrupt disks without these settings.
*/
u8 tmp;
+ pci_read_config_byte(d, 0x54, &tmp);
+ if(tmp & (1<<2)) {
+ printk("PCI: Bus master Pipeline request disabled\n");
+ pci_write_config_byte(d, 0x54, tmp & ~(1<<2));
+ }
pci_read_config_byte(d, 0x70, &tmp);
- if(tmp & 4) {
- printk("PCI: Bus master read caching disabled\n");
- pci_write_config_byte(d, 0x70, tmp & ~4);
+ if(tmp & (1<<3)) {
+ printk("PCI: Disabled enhanced CPU to PCI writes\n");
+ pci_write_config_byte(d, 0x70, tmp & ~(1<<3));
+ }
+ pci_read_config_byte(d, 0x71, &tmp);
+ if(tmp & (1<<3)) {
+ printk("PCI: Bursting cornercase bug worked around\n");
+ pci_write_config_byte(d, 0x71, tmp | (1<<3));
+ }
+ pci_read_config_byte(d, 0x76, &tmp);
+ if(tmp & (1<<7)) {
+ printk("PCI: Post Write Fail set to Retry\n");
+ pci_write_config_byte(d, 0x76, tmp & ~(1<<7));
}
}
struct pci_fixup pcibios_fixups[] = {
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx },
+#if 0
+/* Until we get proper handling pray the BIOS gets it right */
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HE, pci_fixup_serverworks },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE, pci_fixup_serverworks },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CMIC_HE, pci_fixup_serverworks },
+#endif
+#if 0
+/* Our bus code shouldnt need this fixup any more. Delete once verified */
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_6010, pci_fixup_compaq },
+#endif
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, pci_fixup_ide_trash },
{ PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency },
- { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_vt8363 },
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, pci_fixup_via_acpi },
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, pci_fixup_via_acpi },
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_vt8363 }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi },
{ 0 }
};
if (request == PTRACE_ATTACH) {
if (child == current)
goto out_tsk;
- if ((!child->dumpable ||
- (current->uid != child->euid) ||
+ if(((current->uid != child->euid) ||
(current->uid != child->suid) ||
(current->uid != child->uid) ||
(current->gid != child->egid) ||
(!cap_issubset(child->cap_permitted, current->cap_permitted)) ||
(current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
goto out_tsk;
+ rmb();
+ if (!child->dumpable && !capable(CAP_SYS_PTRACE))
+ goto out_tsk;
/* the same process cannot be attached many times */
if (child->ptrace & PT_PTRACED)
goto out_tsk;
* in2 number of bytes to copy
*
* Outputs:
- * ret0 0 in case of sucess. The number of bytes NOT copied in
+ * ret0 0 in case of success. The number of bytes NOT copied in
* case of error.
*
* Copyright (C) 2000 Hewlett-Packard Co
;;
sub ret0=ret0,tmp // length=now - back -1
mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
- br.ret.sptk.few rp // end of sucessful recovery code
+ br.ret.sptk.few rp // end of successful recovery code
END(strlen)
;;
sub ret0=ret0,tmp // length=now - back -1
mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
- br.ret.sptk.few rp // end of sucessful recovery code
+ br.ret.sptk.few rp // end of successful recovery code
//
// We failed even on the normal load (called from exception handler)
while( (r = _elscuart_poll( sc )) == 0 );
if( r < 0 ) {
- /* some error occured */
+ /* some error occurred */
return r;
}
* above.
*
* Need to set the D_INTR_ISERR flag
- * in the dev_desc used for alocating the
+ * in the dev_desc used for allocating the
* error interrupt, so our interrupt will
* be properly routed and prioritized.
*
} else
xio_port = pcibr_dmamap->bd_xio_port;
- /* If this DMA is to an addres that
+ /* If this DMA is to an address that
* refers back to this Bridge chip,
* reduce it back to the correct
* PCI MEM address.
*
* CAUTION: Resetting bit BRIDGE_IRR_PCI_GRP_CLR, acknowledges
* a group of interrupts. If while handling this error,
- * some other error has occured, that would be
+ * some other error has occurred, that would be
* implicitly cleared by this write.
* Need a way to ensure we don't inadvertently clear some
* other errors.
dn_irqs[irq-160].handler(irq,dn_irqs[irq-160].dev_id,fp);
}
else {
- printk("spurious irq %d occured\n",irq);
+ printk("spurious irq %d occurred\n",irq);
}
*(volatile unsigned char *)(pica)=0x20;
# in INEX2. #
# #
# A10. Or in INEX. #
-# If INEX is set, round error occured. This is #
+# If INEX is set, round error occurred. This is #
# compensated for by 'or-ing' in the INEX2 flag to #
# the lsb of Y. #
# #
fmul.x %fp1,%fp0 # calculate X * SCALE -> Y to fp0
# A10. Or in INEX.
-# If INEX is set, round error occured. This is compensated
+# If INEX is set, round error occurred. This is compensated
# for by 'or-ing' in the INEX2 flag to the lsb of Y.
#
# Register usage:
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
- mov.w %cc,MUL64_CC(%a6) # save incomming ccodes
+ mov.w %cc,MUL64_CC(%a6) # save incoming ccodes
mov.l 0x8(%a6),%d0 # store multiplier in d0
beq.w mulu64_zero # handle zero separately
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
- mov.w %cc,MUL64_CC(%a6) # save incomming ccodes
+ mov.w %cc,MUL64_CC(%a6) # save incoming ccodes
mov.l 0x8(%a6),%d0 # store multiplier in d0
beq.b mulu64_zero # handle zero separately
# assert LOCKE* for the final write operation. #
# (13)Exit. #
# #
-# The algorithm is actually implemented slightly diferently #
+# The algorithm is actually implemented slightly differently #
# depending on the size of the operation and the misalignment of the #
# operand. A misaligned operand must be written in aligned chunks or #
# else the BUSCR register control gets confused. #
# in INEX2. #
# #
# A10. Or in INEX. #
-# If INEX is set, round error occured. This is #
+# If INEX is set, round error occurred. This is #
# compensated for by 'or-ing' in the INEX2 flag to #
# the lsb of Y. #
# #
fmul.x %fp1,%fp0 # calculate X * SCALE -> Y to fp0
# A10. Or in INEX.
-# If INEX is set, round error occured. This is compensated
+# If INEX is set, round error occurred. This is compensated
# for by 'or-ing' in the INEX2 flag to the lsb of Y.
#
# Register usage:
* dev - device.
* i - resource.
*
- * Result: 0 if successfull.
+ * Result: 0 if successful.
*/
int __init pcibios_assign_resource(struct pci_dev *dev, int i)
#ifdef CONFIG_MVME16x
is_not_mvme16x(L(gvtdone))
- /* Need to get the BRD_ID info to diferentiate between 162, 167,
+ /* Need to get the BRD_ID info to differentiate between 162, 167,
* etc. This is available as a BI_VME_BRDINFO tag with later
* versions of VMELILO and TFTPLILO, otherwise we call the Bug.
*/
/*
* Do we need these probe functions on the m68k?
*
- * ... may be usefull with ISA devices
+ * ... may be useful with ISA devices
*/
unsigned long probe_irq_on (void)
{
- exact keypress/release sequence
- 'showkey -s' run on q40, non-X session
- 'showkey -s' run on a PC, non-X session
- - AT codes as displayed by the q40 debuging ROM
+ - AT codes as displayed by the q40 debugging ROM
btw if the showkey output from PC and Q40 doesn't differ then you have some
classic configuration problem - don't send me anything in this case
mach_max_dma_address = 32*1024*1024; /* no DMA at all, but ide-scsi requires it.. */
-/* userfull for early debuging stages writes kernel messages into SRAM */
+/* userfull for early debugging stages writes kernel messages into SRAM */
if (!strncmp( m68k_debug_device,"mem",3 ))
{
add_interrupt_randomness(irq);
__cli();
} else {
- printk("do_IRQ: Unregistered IRQ (0x%X) occured\n", irq);
+ printk("do_IRQ: Unregistered IRQ (0x%X) occurred\n", irq);
}
unmask_irq(irq);
irq_exit(cpu);
char ibuf [4096];
int remaining, cur, count;
- /* Go the the start of the ELF symbol table... */
+ /* Go to the start of the ELF symbol table... */
if (lseek (in, offset, SEEK_SET) < 0)
{
perror ("copy: lseek");
extern int (*prom_printf)(char *, ...);
#endif
-volatile unsigned long mem_err = 0; /* So we know an error occured */
+volatile unsigned long mem_err = 0; /* So we know an error occurred */
extern char _end;
/* $Id: indy_sc.c,v 1.14 2000/03/25 22:35:07 ralf Exp $
*
- * indy_sc.c: Indy cache managment functions.
+ * indy_sc.c: Indy cache management functions.
*
* Copyright (C) 1997 Ralf Baechle (ralf@gnu.org),
* derived from r4xx0.c by David S. Miller (dm@engr.sgi.com).
*tcwp = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | SGINT_TCWORD_MSWST);
/* Return the difference, this is how far the r4k counter increments
- * for every 1/HZ seconds. We round off the the nearest 1 MHz of
+ * for every 1/HZ seconds. We round off the nearest 1 MHz of
* master clock (= 1000000 / 100 / 2 = 5000 count).
*/
return ((ct1 - ct0) / 5000) * 5000;
/*
* Unlock any spinlocks which will prevent us from getting the
- * message out (timerlist_lock is aquired through the
+ * message out (timerlist_lock is acquired through the
* console unblank code)
*/
void bust_spinlocks(void)
/* $Id: ip22-sc.c,v 1.2 1999/12/04 03:59:01 ralf Exp $
*
- * indy_sc.c: Indy cache managment functions.
+ * indy_sc.c: Indy cache management functions.
*
* Copyright (C) 1997 Ralf Baechle (ralf@gnu.org),
* derived from r4xx0.c by David S. Miller (dm@engr.sgi.com).
extern void mackbd_init_hw(void);
extern unsigned char mackbd_sysrq_xlate[128];
-kdev_t boot_dev;
+extern kdev_t boot_dev;
extern PTE *Hash, *Hash_end;
extern unsigned long Hash_size, Hash_mask;
void *boot_host;
int boot_target;
int boot_part;
-kdev_t boot_dev;
+extern kdev_t boot_dev;
void __init
pmac_init2(void)
-# $Id: config.in,v 1.130 2001/01/18 04:47:44 davem Exp $
+# $Id: config.in,v 1.133 2001/03/07 00:44:36 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
fi
endmenu
-source drivers/message/fusion/Config.in
-
source drivers/fc4/Config.in
if [ "$CONFIG_PCI" = "y" -a "$CONFIG_EXPERIMENTAL" = "y" ]; then
# CONFIG_MD_RAID1 is not set
# CONFIG_MD_RAID5 is not set
# CONFIG_BLK_DEV_LVM is not set
-# CONFIG_LVM_PROC_FS is not set
# CONFIG_BLK_DEV_RAM is not set
# CONFIG_BLK_DEV_INITRD is not set
CONFIG_SCSI_QLOGICPTI=m
CONFIG_SCSI_AIC7XXX=m
CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
-CONFIG_AIC7XXX_RESET_DEALY=5000
+CONFIG_AIC7XXX_RESET_DELAY=5000
CONFIG_SCSI_AIC7XXX_OLD=m
-CONFIG_AIC7XXX_OLD_TAGGED_QUEUEING=y
+CONFIG_AIC7XXX_OLD_TCQ_ON_BY_DEFAULT=y
CONFIG_AIC7XXX_OLD_CMDS_PER_DEVICE=8
CONFIG_AIC7XXX_OLD_PROC_STATS=y
CONFIG_SCSI_NCR53C8XX=m
-# $Id: Makefile,v 1.63 2000/12/14 22:57:25 davem Exp $
+# $Id: Makefile,v 1.64 2001/02/28 05:59:45 davem Exp $
# Makefile for the linux kernel.
#
# Note! Dependencies are done automagically by 'make dep', which also
power.o sbus.o iommu_common.o sparc64_ksyms.o
obj-$(CONFIG_PCI) += ebus.o pci_common.o pci_iommu.o \
- pci_psycho.o pci_sabre.o
+ pci_psycho.o pci_sabre.o pci_schizo.o
obj-$(CONFIG_SMP) += smp.o trampoline.o
obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o ioctl32.o
obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
{ 0x22, 0x10, 0, "UltraSparc II integrated FPU"},
{ 0x17, 0x11, 0, "UltraSparc II integrated FPU"},
{ 0x17, 0x12, 0, "UltraSparc IIi integrated FPU"},
+ { 0x17, 0x13, 0, "UltraSparc IIe integrated FPU"},
{ 0x17, 0x14, 0, "UltraSparc III integrated FPU"},
};
{ 0x22, 0x10, "TI UltraSparc II (BlackBird)"},
{ 0x17, 0x11, "TI UltraSparc II (BlackBird)"},
{ 0x17, 0x12, "TI UltraSparc IIi"},
- { 0x17, 0x14, "TI UltraSparc III (Cheetah)"}, /* A guess... */
+ { 0x17, 0x13, "TI UltraSparc IIe"},
+ { 0x3e, 0x14, "TI UltraSparc III (Cheetah)"},
};
#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
-/* $Id: ebus.c,v 1.54 2001/02/13 01:16:44 davem Exp $
+/* $Id: ebus.c,v 1.57 2001/02/28 03:28:55 davem Exp $
* ebus.c: PCI to EBus bridge device.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
struct pci_controller_info *p = pbm->parent;
if (ebus_intmap_match(dev->bus, preg, &irqs[i]) != -1) {
- dev->irqs[i] = p->irq_build(p,
+ dev->irqs[i] = p->irq_build(pbm,
dev->bus->self,
irqs[i]);
} else {
struct pci_controller_info *p = pbm->parent;
if (ebus_intmap_match(dev->bus, ®s[0], &irqs[i]) != -1) {
- dev->irqs[i] = p->irq_build(p,
+ dev->irqs[i] = p->irq_build(pbm,
dev->bus->self,
irqs[i]);
} else {
-/* $Id: head.S,v 1.65 2000/05/09 17:40:13 davem Exp $
+/* $Id: head.S,v 1.67 2001/03/04 18:31:00 davem Exp $
* head.S: Initial boot code for the Sparc64 port of Linux.
*
* Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
#include <asm/signal.h>
#include <asm/processor.h>
#include <asm/lsu.h>
+#include <asm/dcr.h>
+#include <asm/dcu.h>
#include <asm/head.h>
#include <asm/ttable.h>
* PROM entry point is on %o4
*/
sparc64_boot:
+ rdpr %ver, %g1
+ sethi %hi(0x003e0014), %g5
+ srlx %g1, 32, %g1
+ or %g5, %lo(0x003e0014), %g5
+ cmp %g1, %g5
+ bne,pt %icc, spitfire_boot
+ nop
+
+cheetah_boot:
+ mov DCR_BPE | DCR_RPE | DCR_SI | DCR_MS, %g1
+ wr %g1, %asr18
+
+ sethi %uhi(DCU_ME | DCU_RE | DCU_PE | DCU_HPE | DCU_SPE | DCU_SL | DCU_WE), %g5
+ or %g5, %ulo(DCU_ME | DCU_RE | DCU_PE | DCU_HPE | DCU_SPE | DCU_SL | DCU_WE), %g5
+ sllx %g5, 32, %g5
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
+ or %g1, %g5, %g1
+ stxa %g5, [%g0] ASI_DCU_CONTROL_REG
+ membar #Sync
+
+ wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
+ wr %g0, 0, %fprs
+
+ /* Just like for Spitfire, we probe itlb-2 for a mapping which
+ * matches our current %pc. We take the physical address in
+ * that mapping and use it to make our own.
+ */
+
+ /* %g5 holds the tlb data */
+ sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
+ sllx %g5, 32, %g5
+ or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
+
+ /* Put PADDR tlb data mask into %g3. */
+ sethi %uhi(_PAGE_PADDR), %g3
+ or %g3, %ulo(_PAGE_PADDR), %g3
+ sllx %g3, 32, %g3
+ sethi %hi(_PAGE_PADDR), %g7
+ or %g7, %lo(_PAGE_PADDR), %g7
+ or %g3, %g7, %g3
+
+ set 2 << 16, %l0 /* TLB entry walker. */
+ set 0x1fff, %l2 /* Page mask. */
+ rd %pc, %l3
+ andn %l3, %l2, %g2 /* vaddr comparator */
+
+1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
+ membar #Sync
+ andn %g1, %l2, %g1
+ cmp %g1, %g2
+ be,pn %xcc, cheetah_got_tlbentry
+ nop
+ and %l0, (127 << 3), %g1
+ cmp %g1, (127 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+cheetah_got_tlbentry:
+ ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
+ membar #Sync
+ and %g1, %g3, %g1
+ sub %g1, %g2, %g1
+ or %g5, %g1, %g5
+
+ /* Clear out any KERNBASE area entries. */
+ set 2 << 16, %l0
+ sethi %hi(KERNBASE), %g3
+ sethi %hi(KERNBASE<<1), %g7
+ mov TLB_TAG_ACCESS, %l7
+
+ /* First, check ITLB */
+1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
+ membar #Sync
+ andn %g1, %l2, %g1
+ cmp %g1, %g3
+ blu,pn %xcc, 2f
+ cmp %g1, %g7
+ bgeu,pn %xcc, 2f
+ nop
+ stxa %g0, [%l7] ASI_IMMU
+ membar #Sync
+ stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+
+2: and %l0, (127 << 3), %g1
+ cmp %g1, (127 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+ /* Next, check DTLB */
+ set 2 << 16, %l0
+1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
+ membar #Sync
+ andn %g1, %l2, %g1
+ cmp %g1, %g3
+ blu,pn %xcc, 2f
+ cmp %g1, %g7
+ bgeu,pn %xcc, 2f
+ nop
+ stxa %g0, [%l7] ASI_DMMU
+ membar #Sync
+ stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+2: and %l0, (511 << 3), %g1
+ cmp %g1, (511 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+ /* Now lock the TTE we created into ITLB-0 and DTLB-0,
+ * entry 15.
+ */
+ sethi %hi(KERNBASE), %g3
+ set (0 << 16) | (15 << 3), %g7
+ stxa %g3, [%l7] ASI_DMMU
+ membar #Sync
+ stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+ stxa %g3, [%l7] ASI_IMMU
+ membar #Sync
+ stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+ flush %g3
+ membar #Sync
+ ba,pt %xcc, 1f
+ nop
+
+1: set sun4u_init, %g2
+ jmpl %g2 + %g0, %g0
+ nop
+
+spitfire_boot:
/* Typically PROM has already enabled both MMU's and both on-chip
* caches, but we do it here anyway just to be paranoid.
*/
wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
wr %g0, 0, %fprs
-create_mappings:
+spitfire_create_mappings:
/* %g5 holds the tlb data */
sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
sllx %g5, 32, %g5
*/
/* Put PADDR tlb data mask into %g3. */
- sethi %uhi(_PAGE_PADDR), %g3
- or %g3, %ulo(_PAGE_PADDR), %g3
+ sethi %uhi(_PAGE_PADDR_SF), %g3
+ or %g3, %ulo(_PAGE_PADDR_SF), %g3
sllx %g3, 32, %g3
- sethi %hi(_PAGE_PADDR), %g7
- or %g7, %lo(_PAGE_PADDR), %g7
+ sethi %hi(_PAGE_PADDR_SF), %g7
+ or %g7, %lo(_PAGE_PADDR_SF), %g7
or %g3, %g7, %g3
/* Walk through entire ITLB, looking for entry which maps
nop
andn %g1, %l2, %g1 /* Get vaddr */
cmp %g1, %g2
- be,a,pn %xcc, got_tlbentry
+ be,a,pn %xcc, spitfire_got_tlbentry
ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
+ /* XXX Spitfire dependency... */
cmp %l0, (63 << 3)
blu,pt %xcc, 1b
add %l0, (1 << 3), %l0
-got_tlbentry:
+spitfire_got_tlbentry:
/* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
nop
nop
stxa %g0, [%l7] ASI_IMMU
stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
2:
+ /* XXX Spitfire dependency... */
cmp %l0, (63 << 3)
blu,pt %xcc, 1b
add %l0, (1 << 3), %l0
stxa %g0, [%l7] ASI_DMMU
stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
2:
+ /* XXX Spitfire dependency... */
cmp %l0, (63 << 3)
blu,pt %xcc, 1b
add %l0, (1 << 3), %l0
*/
sethi %hi(KERNBASE), %g3
+ /* XXX Spitfire dependency... */
mov (63 << 3), %g7
stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */
stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
stxa %g3, [%g2] ASI_IMMU
stxa %g3, [%g2] ASI_DMMU
+ /* XXX Spitfire dependency... */
mov (63 << 3), %g7
ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
andn %g1, (_PAGE_G), %g1
/* Set fixed globals used by dTLB miss handler. */
#define KERN_HIGHBITS ((_PAGE_VALID | _PAGE_SZ4MB) ^ 0xfffff80000000000)
#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
-#ifdef THIS_IS_CHEETAH
-#error Dave, make sure you took care of other issues in rest of sparc64 code...
-#define VPTE_BASE 0xffe0000000000000
-#else /* Spitfire/Blackbird */
-#define VPTE_BASE 0xfffffffe00000000
-#endif
+
+#define VPTE_BASE_CHEETAH 0xffe0000000000000
+#define VPTE_BASE_SPITFIRE 0xfffffffe00000000
+
mov TSB_REG, %g1
stxa %g0, [%g1] ASI_DMMU
membar #Sync
or %g2, %ulo(KERN_HIGHBITS), %g2
sllx %g2, 32, %g2
or %g2, KERN_LOWBITS, %g2
- sethi %uhi(VPTE_BASE), %g3
- or %g3, %ulo(VPTE_BASE), %g3
- sllx %g3, 32, %g3
+
+ rdpr %ver, %g3
+ sethi %hi(0x003e0014), %g7
+ srlx %g3, 32, %g3
+ or %g7, %lo(0x003e0014), %g7
+ cmp %g3, %g7
+ bne,pt %icc, 1f
+ nop
+
+ sethi %uhi(VPTE_BASE_CHEETAH), %g3
+ or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
+ ba,pt %xcc, 2f
+ sllx %g3, 32, %g3
+1:
+ sethi %uhi(VPTE_BASE_SPITFIRE), %g3
+ or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
+ sllx %g3, 32, %g3
+
+2:
clr %g7
#undef KERN_HIGHBITS
#undef KERN_LOWBITS
-/* $Id: pci.c,v 1.21 2001/01/10 18:22:59 davem Exp $
+/* $Id: pci.c,v 1.22 2001/02/28 05:59:45 davem Exp $
* pci.c: UltraSparc PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
/* Probe for all PCI controllers in the system. */
extern void sabre_init(int);
extern void psycho_init(int);
-#if 0
extern void schizo_init(int);
-#endif
static struct {
char *model_name;
{ "SUNW,sabre", sabre_init },
{ "pci108e,a000", sabre_init },
{ "SUNW,psycho", psycho_init },
- { "pci108e,8000", psycho_init }
-#if 0
+ { "pci108e,8000", psycho_init },
{ "SUNW,schizo", schizo_init },
{ "pci108e,8001", schizo_init }
-#endif
};
#define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
sizeof(pci_controller_table[0]))
-/* $Id: pci_common.c,v 1.13 2001/02/13 01:16:44 davem Exp $
+/* $Id: pci_common.c,v 1.14 2001/02/28 03:28:55 davem Exp $
* pci_common.c: PCI controller common support.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
/* Fully specified already? */
if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) {
- pdev->irq = p->irq_build(p, pdev, prom_irq);
+ pdev->irq = p->irq_build(pbm, pdev, prom_irq);
goto have_irq;
}
/* An onboard device? (bit 5 set) */
if ((prom_irq & PCI_IRQ_INO) & 0x20) {
- pdev->irq = p->irq_build(p, pdev, (portid << 6 | prom_irq));
+ pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq));
goto have_irq;
}
/* Can we find a matching entry in the interrupt-map? */
if (pci_intmap_match(pdev, &prom_irq)) {
- pdev->irq = p->irq_build(p, pdev, (portid << 6) | prom_irq);
+ pdev->irq = p->irq_build(pbm, pdev, (portid << 6) | prom_irq);
goto have_irq;
}
}
slot = slot << 2;
- pdev->irq = p->irq_build(p, pdev,
+ pdev->irq = p->irq_build(pbm, pdev,
((portid << 6) & PCI_IRQ_IGN) |
(bus | slot | line));
}
pci_fixup_irq(pbm, pci_bus_b(walk));
}
-#undef DEBUG_BUSMASTERING
-
static void pdev_setup_busmastering(struct pci_dev *pdev, int is_66mhz)
{
u16 cmd;
u8 hdr_type, min_gnt, ltimer;
-#ifdef DEBUG_BUSMASTERING
- printk("PCI: Checking DEV(%s), ", pdev->name);
-#endif
-
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_MASTER;
pci_write_config_word(pdev, PCI_COMMAND, cmd);
* mastering so we have nothing to do here.
*/
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- if ((cmd & PCI_COMMAND_MASTER) == 0) {
-#ifdef DEBUG_BUSMASTERING
- printk("no bus mastering...\n");
-#endif
+ if ((cmd & PCI_COMMAND_MASTER) == 0)
return;
- }
/* Set correct cache line size, 64-byte on all
* Sparc64 PCI systems. Note that the value is
* measured in 32-bit words.
*/
-#ifdef DEBUG_BUSMASTERING
- printk("set cachelinesize, ");
-#endif
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
64 / sizeof(u32));
pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr_type);
hdr_type &= ~0x80;
- if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
-#ifdef DEBUG_BUSMASTERING
- printk("hdr_type=%x, exit\n", hdr_type);
-#endif
+ if (hdr_type != PCI_HEADER_TYPE_NORMAL)
return;
- }
/* If the latency timer is already programmed with a non-zero
* value, assume whoever set it (OBP or whoever) knows what
* they are doing.
*/
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, <imer);
- if (ltimer != 0) {
-#ifdef DEBUG_BUSMASTERING
- printk("ltimer was %x, exit\n", ltimer);
-#endif
+ if (ltimer != 0)
return;
- }
/* XXX Since I'm tipping off the min grant value to
* XXX choose a suitable latency timer value, I also
}
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ltimer);
-#ifdef DEBUG_BUSMASTERING
- printk("set ltimer to %x\n", ltimer);
-#endif
}
void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
-/* $Id: pci_psycho.c,v 1.19 2001/02/13 01:16:44 davem Exp $
+/* $Id: pci_psycho.c,v 1.21 2001/02/28 03:28:55 davem Exp $
* pci_psycho.c: PSYCHO/U2P specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
* ---------------------------------------------------------
*/
#define PSYCHO_CONFIG_BASE(PBM) \
- ((PBM)->parent->config_space | (1UL << 24))
+ ((PBM)->config_space | (1UL << 24))
#define PSYCHO_CONFIG_ENCODE(BUS, DEVFN, REG) \
(((unsigned long)(BUS) << 16) | \
((unsigned long)(DEVFN) << 8) | \
return ret;
}
-static unsigned int __init psycho_irq_build(struct pci_controller_info *p,
+static unsigned int __init psycho_irq_build(struct pci_pbm_info *pbm,
struct pci_dev *pdev,
unsigned int ino)
{
+ struct pci_controller_info *p = pbm->parent;
struct ino_bucket *bucket;
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
#define PSYCHO_PCIERR_B_INO 0x31
static void __init psycho_register_error_handlers(struct pci_controller_info *p)
{
+ struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
unsigned long base = p->controller_regs;
unsigned int irq, portid = p->portid;
u64 tmp;
/* Build IRQs and register handlers. */
- irq = psycho_irq_build(p, NULL, (portid << 6) | PSYCHO_UE_INO);
+ irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_UE_INO);
if (request_irq(irq, psycho_ue_intr,
SA_SHIRQ, "PSYCHO UE", p) < 0) {
prom_printf("PSYCHO%d: Cannot register UE interrupt.\n",
prom_halt();
}
- irq = psycho_irq_build(p, NULL, (portid << 6) | PSYCHO_CE_INO);
+ irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_CE_INO);
if (request_irq(irq, psycho_ce_intr,
SA_SHIRQ, "PSYCHO CE", p) < 0) {
prom_printf("PSYCHO%d: Cannot register CE interrupt.\n",
prom_halt();
}
- irq = psycho_irq_build(p, NULL, (portid << 6) | PSYCHO_PCIERR_A_INO);
+ irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_A_INO);
if (request_irq(irq, psycho_pcierr_intr,
SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_A) < 0) {
prom_printf("PSYCHO%d(PBMA): Cannot register PciERR interrupt.\n",
prom_halt();
}
- irq = psycho_irq_build(p, NULL, (portid << 6) | PSYCHO_PCIERR_B_INO);
+ irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_B_INO);
if (request_irq(irq, psycho_pcierr_intr,
SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_B) < 0) {
prom_printf("PSYCHO%d(PBMB): Cannot register PciERR interrupt.\n",
printk("PCI: Found PSYCHO, control regs at %016lx\n",
p->controller_regs);
- p->config_space = pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE;
- printk("PSYCHO: PCI config space at %016lx\n", p->config_space);
+ p->pbm_A.config_space = p->pbm_B.config_space =
+ (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE);
+ printk("PSYCHO: Shared PCI config space at %016lx\n",
+ p->pbm_A.config_space);
/*
* Psycho's PCI MEM space is mapped to a 2GB aligned area, so
-/* $Id: pci_sabre.c,v 1.23 2001/02/13 01:16:44 davem Exp $
+/* $Id: pci_sabre.c,v 1.25 2001/02/28 03:28:55 davem Exp $
* pci_sabre.c: Sabre specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
* ---------------------------------------------------------
*/
#define SABRE_CONFIG_BASE(PBM) \
- ((PBM)->parent->config_space | (1UL << 24))
+ ((PBM)->config_space | (1UL << 24))
#define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG) \
(((unsigned long)(BUS) << 16) | \
((unsigned long)(DEVFN) << 8) | \
return ret;
}
-static unsigned int __init sabre_irq_build(struct pci_controller_info *p,
+static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm,
struct pci_dev *pdev,
unsigned int ino)
{
+ struct pci_controller_info *p = pbm->parent;
struct ino_bucket *bucket;
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
#define SABRE_PCIERR_INO 0x30
static void __init sabre_register_error_handlers(struct pci_controller_info *p)
{
+ struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
unsigned long base = p->controller_regs;
unsigned long irq, portid = p->portid;
u64 tmp;
(SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE));
- irq = sabre_irq_build(p, NULL, (portid << 6) | SABRE_UE_INO);
+ irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_UE_INO);
if (request_irq(irq, sabre_ue_intr,
SA_SHIRQ, "SABRE UE", p) < 0) {
prom_printf("SABRE%d: Cannot register UE interrupt.\n",
sabre_write(base + SABRE_CE_AFSR,
(SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR));
- irq = sabre_irq_build(p, NULL, (portid << 6) | SABRE_CE_INO);
+ irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_CE_INO);
if (request_irq(irq, sabre_ce_intr,
SA_SHIRQ, "SABRE CE", p) < 0) {
prom_printf("SABRE%d: Cannot register CE interrupt.\n",
prom_halt();
}
- irq = sabre_irq_build(p, NULL, (portid << 6) | SABRE_PCIERR_INO);
+ irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_PCIERR_INO);
if (request_irq(irq, sabre_pcierr_intr,
SA_SHIRQ, "SABRE PCIERR", p) < 0) {
prom_printf("SABRE%d: Cannot register PciERR interrupt.\n",
SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN));
/* Now map in PCI config space for entire SABRE. */
- p->config_space = p->controller_regs + SABRE_CONFIGSPACE;
- printk("SABRE: PCI config space at %016lx\n", p->config_space);
+ p->pbm_A.config_space = p->pbm_B.config_space =
+ (p->controller_regs + SABRE_CONFIGSPACE);
+ printk("SABRE: Shared PCI config space at %016lx\n",
+ p->pbm_A.config_space);
err = prom_getproperty(pnode, "virtual-dma",
(char *)&vdma[0], sizeof(vdma));
-/* $Id: pci_schizo.c,v 1.3 2001/02/13 01:16:44 davem Exp $
+/* $Id: pci_schizo.c,v 1.8 2001/03/01 08:05:32 davem Exp $
* pci_schizo.c: SCHIZO specific PCI controller support.
*
* Copyright (C) 2001 David S. Miller (davem@redhat.com)
#include "pci_impl.h"
+/* All SCHIZO registers are 64-bits. The following accessor
+ * routines are how they are accessed. The REG parameter
+ * is a physical address.
+ */
+#define schizo_read(__reg) \
+({ u64 __ret; \
+ __asm__ __volatile__("ldxa [%1] %2, %0" \
+ : "=r" (__ret) \
+ : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
+ : "memory"); \
+ __ret; \
+})
+#define schizo_write(__reg, __val) \
+ __asm__ __volatile__("stxa %0, [%1] %2" \
+ : /* no outputs */ \
+ : "r" (__val), "r" (__reg), \
+ "i" (ASI_PHYS_BYPASS_EC_E))
+
+/* This is a convention that at least Excalibur and Merlin
+ * follow. I suppose the SCHIZO used in Starcat and friends
+ * will do similar.
+ *
+ * The only way I could see this changing is if the newlink
+ * block requires more space in Schizo's address space than
+ * they predicted, thus requiring an address space reorg when
+ * the newer Schizo is taped out.
+ *
+ * These offsets look weird because I keep in p->controller_regs
+ * the second PROM register property minus 0x10000 which is the
+ * base of the Safari and UPA64S registers of SCHIZO.
+ */
+#define SCHIZO_PBM_A_REGS_OFF (0x600000UL - 0x400000UL)
+#define SCHIZO_PBM_B_REGS_OFF (0x700000UL - 0x400000UL)
+
+/* Streaming buffer control register. */
+#define SCHIZO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
+#define SCHIZO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
+#define SCHIZO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
+#define SCHIZO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
+#define SCHIZO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
+
+/* IOMMU control register. */
+#define SCHIZO_IOMMU_CTRL_RESV 0xfffffffff9000000 /* Reserved */
+#define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000 /* Translation Error Status */
+#define SCHIZO_IOMMU_CTRL_XLTEERR 0x0000000001000000 /* Translation Error encountered */
+#define SCHIZO_IOMMU_CTRL_LCKEN 0x0000000000800000 /* Enable translation locking */
+#define SCHIZO_IOMMU_CTRL_LCKPTR 0x0000000000780000 /* Translation lock pointer */
+#define SCHIZO_IOMMU_CTRL_TSBSZ 0x0000000000070000 /* TSB Size */
+#define SCHIZO_IOMMU_TSBSZ_1K 0x0000000000000000 /* TSB Table 1024 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_2K 0x0000000000010000 /* TSB Table 2048 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_4K 0x0000000000020000 /* TSB Table 4096 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_8K 0x0000000000030000 /* TSB Table 8192 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_16K 0x0000000000040000 /* TSB Table 16k 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_32K 0x0000000000050000 /* TSB Table 32k 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_64K 0x0000000000060000 /* TSB Table 64k 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_128K 0x0000000000070000 /* TSB Table 128k 8-byte entries */
+#define SCHIZO_IOMMU_CTRL_RESV2 0x000000000000fff8 /* Reserved */
+#define SCHIZO_IOMMU_CTRL_TBWSZ 0x0000000000000004 /* Assumed page size, 0=8k 1=64k */
+#define SCHIZO_IOMMU_CTRL_DENAB 0x0000000000000002 /* Diagnostic mode enable */
+#define SCHIZO_IOMMU_CTRL_ENAB 0x0000000000000001 /* IOMMU Enable */
+
+/* Schizo config space address format is nearly identical to
+ * that of PSYCHO:
+ *
+ * 32 24 23 16 15 11 10 8 7 2 1 0
+ * ---------------------------------------------------------
+ * |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 |
+ * ---------------------------------------------------------
+ */
+#define SCHIZO_CONFIG_BASE(PBM) ((PBM)->config_space)
+#define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG) \
+ (((unsigned long)(BUS) << 16) | \
+ ((unsigned long)(DEVFN) << 8) | \
+ ((unsigned long)(REG)))
+
+static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm,
+ unsigned char bus,
+ unsigned int devfn,
+ int where)
+{
+ if (!pbm)
+ return NULL;
+ return (void *)
+ (SCHIZO_CONFIG_BASE(pbm) |
+ SCHIZO_CONFIG_ENCODE(bus, devfn, where));
+}
+
+/* 4 slots on pbm A, and 6 slots on pbm B. In both cases
+ * slot 0 is the SCHIZO host bridge itself.
+ */
+static int schizo_out_of_range(struct pci_pbm_info *pbm,
+ unsigned char bus,
+ unsigned char devfn)
+{
+ return ((pbm->parent == 0) ||
+ ((pbm == &pbm->parent->pbm_B) &&
+ (bus == pbm->pci_first_busno) &&
+ PCI_SLOT(devfn) > 6) ||
+ ((pbm == &pbm->parent->pbm_A) &&
+ (bus == pbm->pci_first_busno) &&
+ PCI_SLOT(devfn) > 4));
+}
+
+/* SCHIZO PCI configuration space accessors. */
+
static int schizo_read_byte(struct pci_dev *dev, int where, u8 *value)
{
- /* IMPLEMENT ME */
+ struct pci_pbm_info *pbm = pci_bus2pbm[dev->bus->number];
+ unsigned char bus = dev->bus->number;
+ unsigned int devfn = dev->devfn;
+ u8 *addr;
+
+ *value = 0xff;
+ addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (schizo_out_of_range(pbm, bus, devfn))
+ return PCIBIOS_SUCCESSFUL;
+ pci_config_read8(addr, value);
+ return PCIBIOS_SUCCESSFUL;
}
static int schizo_read_word(struct pci_dev *dev, int where, u16 *value)
{
- /* IMPLEMENT ME */
+ struct pci_pbm_info *pbm = pci_bus2pbm[dev->bus->number];
+ unsigned char bus = dev->bus->number;
+ unsigned int devfn = dev->devfn;
+ u16 *addr;
+
+ *value = 0xffff;
+ addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (schizo_out_of_range(pbm, bus, devfn))
+ return PCIBIOS_SUCCESSFUL;
+
+ if (where & 0x01) {
+ printk("pcibios_read_config_word: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_read16(addr, value);
+ return PCIBIOS_SUCCESSFUL;
}
static int schizo_read_dword(struct pci_dev *dev, int where, u32 *value)
{
- /* IMPLEMENT ME */
+ struct pci_pbm_info *pbm = pci_bus2pbm[dev->bus->number];
+ unsigned char bus = dev->bus->number;
+ unsigned int devfn = dev->devfn;
+ u32 *addr;
+
+ *value = 0xffffffff;
+ addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (schizo_out_of_range(pbm, bus, devfn))
+ return PCIBIOS_SUCCESSFUL;
+
+ if (where & 0x03) {
+ printk("pcibios_read_config_dword: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ pci_config_read32(addr, value);
+ return PCIBIOS_SUCCESSFUL;
}
static int schizo_write_byte(struct pci_dev *dev, int where, u8 value)
{
- /* IMPLEMENT ME */
+ struct pci_pbm_info *pbm = pci_bus2pbm[dev->bus->number];
+ unsigned char bus = dev->bus->number;
+ unsigned int devfn = dev->devfn;
+ u8 *addr;
+
+ addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (schizo_out_of_range(pbm, bus, devfn))
+ return PCIBIOS_SUCCESSFUL;
+
+ pci_config_write8(addr, value);
+ return PCIBIOS_SUCCESSFUL;
}
static int schizo_write_word(struct pci_dev *dev, int where, u16 value)
{
- /* IMPLEMENT ME */
+ struct pci_pbm_info *pbm = pci_bus2pbm[dev->bus->number];
+ unsigned char bus = dev->bus->number;
+ unsigned int devfn = dev->devfn;
+ u16 *addr;
+
+ addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (schizo_out_of_range(pbm, bus, devfn))
+ return PCIBIOS_SUCCESSFUL;
+
+ if (where & 0x01) {
+ printk("pcibios_write_config_word: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_write16(addr, value);
+ return PCIBIOS_SUCCESSFUL;
}
static int schizo_write_dword(struct pci_dev *dev, int where, u32 value)
{
- /* IMPLEMENT ME */
+ struct pci_pbm_info *pbm = pci_bus2pbm[dev->bus->number];
+ unsigned char bus = dev->bus->number;
+ unsigned int devfn = dev->devfn;
+ u32 *addr;
+
+ addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (schizo_out_of_range(pbm, bus, devfn))
+ return PCIBIOS_SUCCESSFUL;
+
+ if (where & 0x03) {
+ printk("pcibios_write_config_dword: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_write32(addr, value);
+ return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops schizo_ops = {
schizo_write_dword
};
-static void __init schizo_scan_bus(struct pci_controller_info *p)
+/* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the
+ * imap/iclr registers are per-PBM.
+ */
+#define SCHIZO_IMAP_BASE 0x1000UL
+#define SCHIZO_ICLR_BASE 0x1400UL
+
+static unsigned long schizo_imap_offset(unsigned long ino)
+{
+ return SCHIZO_IMAP_BASE + (ino * 8UL);
+}
+
+static unsigned long schizo_iclr_offset(unsigned long ino)
{
- /* IMPLEMENT ME */
+ return SCHIZO_ICLR_BASE + (ino * 8UL);
}
-static unsigned int __init schizo_irq_build(struct pci_controller_info *p,
+/* PCI SCHIZO INO number to Sparc PIL level. This table only matters for
+ * INOs which will not have an associated PCI device struct, ie. onboard
+ * EBUS devices and PCI controller internal error interrupts.
+ */
+static unsigned char schizo_pil_table[] = {
+/*0x00*/0, 0, 0, 0, /* PCI slot 0 Int A, B, C, D */
+/*0x04*/0, 0, 0, 0, /* PCI slot 1 Int A, B, C, D */
+/*0x08*/0, 0, 0, 0, /* PCI slot 2 Int A, B, C, D */
+/*0x0c*/0, 0, 0, 0, /* PCI slot 3 Int A, B, C, D */
+/*0x10*/0, 0, 0, 0, /* PCI slot 4 Int A, B, C, D */
+/*0x14*/0, 0, 0, 0, /* PCI slot 5 Int A, B, C, D */
+/*0x18*/0, 0, 0, 0, /* PCI slot 6 Int A, B, C, D */
+/*0x1c*/8, /* Parallel */
+/*0x1d*/0, /* UNKNOWN */
+/*0x1e*/0, /* UNKNOWN */
+/*0x1f*/0, /* UNKNOWN */
+/*0x20*/13, /* Audio Record */
+/*0x21*/14, /* Audio Playback */
+/*0x22*/12, /* Serial */
+/*0x23*/2, /* EBUS I2C */
+/*0x24*/10, /* RTC Clock */
+/*0x25*/11, /* Floppy */
+/*0x26*/0, /* UNKNOWN */
+/*0x27*/0, /* UNKNOWN */
+/*0x28*/0, /* UNKNOWN */
+/*0x29*/0, /* UNKNOWN */
+/*0x2a*/10, /* UPA 1 */
+/*0x2b*/10, /* UPA 2 */
+/*0x2c*/0, /* UNKNOWN */
+/*0x2d*/0, /* UNKNOWN */
+/*0x2e*/0, /* UNKNOWN */
+/*0x2f*/0, /* UNKNOWN */
+/*0x30*/15, /* Uncorrectable ECC */
+/*0x31*/15, /* Correctable ECC */
+/*0x32*/15, /* PCI Bus A Error */
+/*0x33*/15, /* PCI Bus B Error */
+/*0x34*/15, /* Safari Bus Error */
+/*0x35*/0, /* Reserved */
+/*0x36*/0, /* Reserved */
+/*0x37*/0, /* Reserved */
+/*0x38*/0, /* Reserved for NewLink */
+/*0x39*/0, /* Reserved for NewLink */
+/*0x3a*/0, /* Reserved for NewLink */
+/*0x3b*/0, /* Reserved for NewLink */
+/*0x3c*/0, /* Reserved for NewLink */
+/*0x3d*/0, /* Reserved for NewLink */
+/*0x3e*/0, /* Reserved for NewLink */
+/*0x3f*/0, /* Reserved for NewLink */
+};
+
+static int __init schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
+{
+ int ret;
+
+ ret = schizo_pil_table[ino];
+ if (ret == 0 && pdev == NULL) {
+ ret = 1;
+ } else if (ret == 0) {
+ switch ((pdev->class >> 16) & 0x0f) {
+ case PCI_BASE_CLASS_STORAGE:
+ ret = 4;
+
+ case PCI_BASE_CLASS_NETWORK:
+ ret = 6;
+
+ case PCI_BASE_CLASS_DISPLAY:
+ ret = 9;
+
+ case PCI_BASE_CLASS_MULTIMEDIA:
+ case PCI_BASE_CLASS_MEMORY:
+ case PCI_BASE_CLASS_BRIDGE:
+ ret = 10;
+
+ default:
+ ret = 1;
+ };
+ }
+
+ return ret;
+}
+
+static unsigned int __init schizo_irq_build(struct pci_pbm_info *pbm,
struct pci_dev *pdev,
unsigned int ino)
{
- /* IMPLEMENT ME */
+ struct pci_controller_info *p = pbm->parent;
+ struct ino_bucket *bucket;
+ unsigned long imap, iclr, pbm_off;
+ unsigned long imap_off, iclr_off;
+ int pil, inofixup = 0;
+
+ if (pbm == &p->pbm_A)
+ pbm_off = SCHIZO_PBM_A_REGS_OFF;
+ else
+ pbm_off = SCHIZO_PBM_B_REGS_OFF;
+
+ ino &= PCI_IRQ_INO;
+ imap_off = schizo_imap_offset(ino);
+
+ /* Now build the IRQ bucket. */
+ pil = schizo_ino_to_pil(pdev, ino);
+ imap = p->controller_regs + pbm_off + imap_off;
+ imap += 4;
+
+ iclr_off = schizo_iclr_offset(ino);
+ iclr = p->controller_regs + pbm_off + iclr_off;
+ iclr += 4;
+
+ if ((ino & 0x20) == 0)
+ inofixup = ino & 0x03;
+
+ bucket = __bucket(build_irq(pil, inofixup, iclr, imap));
+ bucket->flags |= IBF_PCI;
+
+ return __irq(bucket);
+}
+
+/* SCHIZO error handling support. */
+enum schizo_error_type {
+ UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR
+};
+
+static spinlock_t stc_buf_lock = SPIN_LOCK_UNLOCKED;
+static unsigned long stc_error_buf[128];
+static unsigned long stc_tag_buf[16];
+static unsigned long stc_line_buf[16];
+
+#define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */
+#define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */
+#define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */
+
+#define SCHIZO_STCERR_WRITE 0x2UL
+#define SCHIZO_STCERR_READ 0x1UL
+
+#define SCHIZO_STCTAG_PPN 0x3fffffff00000000UL
+#define SCHIZO_STCTAG_VPN 0x00000000ffffe000UL
+#define SCHIZO_STCTAG_VALID 0x8000000000000000UL
+#define SCHIZO_STCTAG_READ 0x4000000000000000UL
+
+#define SCHIZO_STCLINE_LINDX 0x0000000007800000UL
+#define SCHIZO_STCLINE_SPTR 0x000000000007e000UL
+#define SCHIZO_STCLINE_LADDR 0x0000000000001fc0UL
+#define SCHIZO_STCLINE_EPTR 0x000000000000003fUL
+#define SCHIZO_STCLINE_VALID 0x0000000000600000UL
+#define SCHIZO_STCLINE_FOFN 0x0000000000180000UL
+
+static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
+ enum schizo_error_type type)
+{
+ struct pci_controller_info *p = pbm->parent;
+ struct pci_strbuf *strbuf = &pbm->stc;
+ unsigned long regbase = p->controller_regs;
+ unsigned long err_base, tag_base, line_base;
+ u64 control;
+ char pbm_name = (pbm == &p->pbm_A ? 'A' : 'B');
+ int i;
+
+ if (pbm == &p->pbm_A)
+ regbase += SCHIZO_PBM_A_REGS_OFF;
+ else
+ regbase += SCHIZO_PBM_B_REGS_OFF;
+
+ err_base = regbase + SCHIZO_STC_ERR;
+ tag_base = regbase + SCHIZO_STC_TAG;
+ line_base = regbase + SCHIZO_STC_LINE;
+
+ spin_lock(&stc_buf_lock);
+
+ /* This is __REALLY__ dangerous. When we put the
+ * streaming buffer into diagnostic mode to probe
+ * it's tags and error status, we _must_ clear all
+ * of the line tag valid bits before re-enabling
+ * the streaming buffer. If any dirty data lives
+ * in the STC when we do this, we will end up
+ * invalidating it before it has a chance to reach
+ * main memory.
+ */
+ control = schizo_read(strbuf->strbuf_control);
+ schizo_write(strbuf->strbuf_control,
+ (control | SCHIZO_STRBUF_CTRL_DENAB));
+ for (i = 0; i < 128; i++) {
+ unsigned long val;
+
+ val = schizo_read(err_base + (i * 8UL));
+ schizo_write(err_base + (i * 8UL), 0UL);
+ stc_error_buf[i] = val;
+ }
+ for (i = 0; i < 16; i++) {
+ stc_tag_buf[i] = schizo_read(tag_base + (i * 8UL));
+ stc_line_buf[i] = schizo_read(line_base + (i * 8UL));
+ schizo_write(tag_base + (i * 8UL), 0UL);
+ schizo_write(line_base + (i * 8UL), 0UL);
+ }
+
+ /* OK, state is logged, exit diagnostic mode. */
+ schizo_write(strbuf->strbuf_control, control);
+
+ for (i = 0; i < 16; i++) {
+ int j, saw_error, first, last;
+
+ saw_error = 0;
+ first = i * 8;
+ last = first + 8;
+ for (j = first; j < last; j++) {
+ unsigned long errval = stc_error_buf[j];
+ if (errval != 0) {
+ saw_error++;
+ printk("SCHIZO%d: PBM-%c STC_ERR(%d)[wr(%d)rd(%d)]\n",
+ p->index, pbm_name,
+ j,
+ (errval & SCHIZO_STCERR_WRITE) ? 1 : 0,
+ (errval & SCHIZO_STCERR_READ) ? 1 : 0);
+ }
+ }
+ if (saw_error != 0) {
+ unsigned long tagval = stc_tag_buf[i];
+ unsigned long lineval = stc_line_buf[i];
+ printk("SCHIZO%d: PBM-%c STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n",
+ p->index, pbm_name,
+ i,
+ ((tagval & SCHIZO_STCTAG_PPN) >> 19UL),
+ (tagval & SCHIZO_STCTAG_VPN),
+ ((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0),
+ ((tagval & SCHIZO_STCTAG_READ) ? 1 : 0));
+
+ /* XXX Should spit out per-bank error information... -DaveM */
+ printk("SCHIZO%d: PBM-%c STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
+ "V(%d)FOFN(%d)]\n",
+ p->index, pbm_name,
+ i,
+ ((lineval & SCHIZO_STCLINE_LINDX) >> 23UL),
+ ((lineval & SCHIZO_STCLINE_SPTR) >> 13UL),
+ ((lineval & SCHIZO_STCLINE_LADDR) >> 6UL),
+ ((lineval & SCHIZO_STCLINE_EPTR) >> 0UL),
+ ((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0),
+ ((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0));
+ }
+ }
+
+ spin_unlock(&stc_buf_lock);
+}
+
+/* IOMMU is per-PBM in Schizo, so interrogate both for anonymous
+ * controller level errors.
+ */
+
+#define SCHIZO_IOMMU_TAG 0xa580UL
+#define SCHIZO_IOMMU_DATA 0xa600UL
+
+#define SCHIZO_IOMMU_TAG_CTXT 0x0000001ffe000000UL
+#define SCHIZO_IOMMU_TAG_ERRSTS 0x0000000001800000UL
+#define SCHIZO_IOMMU_TAG_ERR 0x0000000000400000UL
+#define SCHIZO_IOMMU_TAG_WRITE 0x0000000000200000UL
+#define SCHIZO_IOMMU_TAG_STREAM 0x0000000000100000UL
+#define SCHIZO_IOMMU_TAG_SIZE 0x0000000000080000UL
+#define SCHIZO_IOMMU_TAG_VPAGE 0x000000000007ffffUL
+
+#define SCHIZO_IOMMU_DATA_VALID 0x0000000100000000UL
+#define SCHIZO_IOMMU_DATA_CACHE 0x0000000040000000UL
+#define SCHIZO_IOMMU_DATA_PPAGE 0x000000003fffffffUL
+
+static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
+ enum schizo_error_type type)
+{
+ struct pci_controller_info *p = pbm->parent;
+ struct pci_iommu *iommu = pbm->iommu;
+ unsigned long iommu_tag[16];
+ unsigned long iommu_data[16];
+ unsigned long flags;
+ u64 control;
+ char pbm_name = (pbm == &p->pbm_A ? 'A' : 'B');
+ int i;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ control = schizo_read(iommu->iommu_control);
+ if (control & SCHIZO_IOMMU_CTRL_XLTEERR) {
+ unsigned long base;
+ char *type_string;
+
+ /* Clear the error encountered bit. */
+ control &= ~SCHIZO_IOMMU_CTRL_XLTEERR;
+ schizo_write(iommu->iommu_control, control);
+
+ switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
+ case 0:
+ type_string = "Protection Error";
+ break;
+ case 1:
+ type_string = "Invalid Error";
+ break;
+ case 2:
+ type_string = "TimeOut Error";
+ break;
+ case 3:
+ default:
+ type_string = "ECC Error";
+ break;
+ };
+ printk("SCHIZO%d: PBM-%c IOMMU Error, type[%s]\n",
+ p->index, pbm_name, type_string);
+
+ /* Put the IOMMU into diagnostic mode and probe
+ * it's TLB for entries with error status.
+ *
+ * It is very possible for another DVMA to occur
+ * while we do this probe, and corrupt the system
+ * further. But we are so screwed at this point
+ * that we are likely to crash hard anyways, so
+ * get as much diagnostic information to the
+ * console as we can.
+ */
+ schizo_write(iommu->iommu_control,
+ control | SCHIZO_IOMMU_CTRL_DENAB);
+
+ base = p->controller_regs;
+ if (pbm == &p->pbm_A)
+ base += SCHIZO_PBM_A_REGS_OFF;
+ else
+ base += SCHIZO_PBM_B_REGS_OFF;
+
+ for (i = 0; i < 16; i++) {
+ iommu_tag[i] =
+ schizo_read(base + SCHIZO_IOMMU_TAG + (i * 8UL));
+ iommu_data[i] =
+ schizo_read(base + SCHIZO_IOMMU_DATA + (i * 8UL));
+
+ /* Now clear out the entry. */
+ schizo_write(base + SCHIZO_IOMMU_TAG + (i * 8UL), 0);
+ schizo_write(base + SCHIZO_IOMMU_DATA + (i * 8UL), 0);
+ }
+
+ /* Leave diagnostic mode. */
+ schizo_write(iommu->iommu_control, control);
+
+ for (i = 0; i < 16; i++) {
+ unsigned long tag, data;
+
+ tag = iommu_tag[i];
+ if (!(tag & SCHIZO_IOMMU_TAG_ERR))
+ continue;
+
+ data = iommu_data[i];
+ switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) {
+ case 0:
+ type_string = "Protection Error";
+ break;
+ case 1:
+ type_string = "Invalid Error";
+ break;
+ case 2:
+ type_string = "TimeOut Error";
+ break;
+ case 3:
+ default:
+ type_string = "ECC Error";
+ break;
+ };
+ printk("SCHIZO%d: PBM-%c IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
+ "sz(%dK) vpg(%08lx)]\n",
+ p->index, pbm_name, i, type_string,
+ (int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL),
+ ((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0),
+ ((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0),
+ ((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8),
+ (tag & SCHIZO_IOMMU_TAG_VPAGE) << PAGE_SHIFT);
+ printk("SCHIZO%d: PBM-%c IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
+ p->index, pbm_name, i,
+ ((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0),
+ ((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0),
+ (data & SCHIZO_IOMMU_DATA_PPAGE) << PAGE_SHIFT);
+ }
+ }
+ __schizo_check_stc_error_pbm(pbm, type);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static void schizo_check_iommu_error(struct pci_controller_info *p,
+ enum schizo_error_type type)
+{
+ schizo_check_iommu_error_pbm(&p->pbm_A, type);
+ schizo_check_iommu_error_pbm(&p->pbm_B, type);
+}
+
+/* Uncorrectable ECC error status gathering. */
+#define SCHIZO_UE_AFSR 0x10030UL
+#define SCHIZO_UE_AFAR 0x10038UL
+
+#define SCHIZO_UEAFSR_PPIO 0x8000000000000000UL
+#define SCHIZO_UEAFSR_PDRD 0x4000000000000000UL
+#define SCHIZO_UEAFSR_PDWR 0x2000000000000000UL
+#define SCHIZO_UEAFSR_SPIO 0x1000000000000000UL
+#define SCHIZO_UEAFSR_SDMA 0x0800000000000000UL
+#define SCHIZO_UEAFSR_ERRPNDG 0x0300000000000000UL
+#define SCHIZO_UEAFSR_BMSK 0x000003ff00000000UL
+#define SCHIZO_UEAFSR_QOFF 0x00000000c0000000UL
+#define SCHIZO_UEAFSR_AID 0x000000001f000000UL
+#define SCHIZO_UEAFSR_PARTIAL 0x0000000000800000UL
+#define SCHIZO_UEAFSR_OWNEDIN 0x0000000000400000UL
+#define SCHIZO_UEAFSR_MTAGSYND 0x00000000000f0000UL
+#define SCHIZO_UEAFSR_MTAG 0x000000000000e000UL
+#define SCHIZO_UEAFSR_ECCSYND 0x00000000000001ffUL
+
+static void schizo_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_controller_info *p = dev_id;
+ unsigned long afsr_reg = p->controller_regs + SCHIZO_UE_AFSR;
+ unsigned long afar_reg = p->controller_regs + SCHIZO_UE_AFAR;
+ unsigned long afsr, afar, error_bits;
+ int reported, limit;
+
+ /* Latch uncorrectable error status. */
+ afar = schizo_read(afar_reg);
+
+ /* If either of the error pending bits are set in the
+ * AFSR, the error status is being actively updated by
+ * the hardware and we must re-read to get a clean value.
+ */
+ limit = 1000;
+ do {
+ afsr = schizo_read(afsr_reg);
+ } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
+
+ /* Clear the primary/secondary error status bits. */
+ error_bits = afsr &
+ (SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR |
+ SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA);
+ schizo_write(afsr_reg, error_bits);
+
+ /* Log the error. */
+ printk("SCHIZO%d: Uncorrectable Error, primary error type[%s]\n",
+ p->index,
+ (((error_bits & SCHIZO_UEAFSR_PPIO) ?
+ "PIO" :
+ ((error_bits & SCHIZO_UEAFSR_PDRD) ?
+ "DMA Read" :
+ ((error_bits & SCHIZO_UEAFSR_PDWR) ?
+ "DMA Write" : "???")))));
+ printk("SCHIZO%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
+ p->index,
+ (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
+ (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
+ (afsr & SCHIZO_UEAFSR_AID) >> 24UL);
+ printk("SCHIZO%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
+ p->index,
+ (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
+ (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
+ (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
+ (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
+ (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
+ printk("SCHIZO%d: UE AFAR [%016lx]\n", p->index, afar);
+ printk("SCHIZO%d: UE Secondary errors [", p->index);
+ reported = 0;
+ if (afsr & SCHIZO_UEAFSR_SPIO) {
+ reported++;
+ printk("(PIO)");
+ }
+ if (afsr & SCHIZO_UEAFSR_SDMA) {
+ reported++;
+ printk("(DMA)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ /* Interrogate IOMMU for error status. */
+ schizo_check_iommu_error(p, UE_ERR);
+}
+
+#define SCHIZO_CE_AFSR 0x10040UL
+#define SCHIZO_CE_AFAR 0x10048UL
+
+#define SCHIZO_CEAFSR_PPIO 0x8000000000000000UL
+#define SCHIZO_CEAFSR_PDRD 0x4000000000000000UL
+#define SCHIZO_CEAFSR_PDWR 0x2000000000000000UL
+#define SCHIZO_CEAFSR_SPIO 0x1000000000000000UL
+#define SCHIZO_CEAFSR_SDMA 0x0800000000000000UL
+#define SCHIZO_CEAFSR_ERRPNDG 0x0300000000000000UL
+#define SCHIZO_CEAFSR_BMSK 0x000003ff00000000UL
+#define SCHIZO_CEAFSR_QOFF 0x00000000c0000000UL
+#define SCHIZO_CEAFSR_AID 0x000000001f000000UL
+#define SCHIZO_CEAFSR_PARTIAL 0x0000000000800000UL
+#define SCHIZO_CEAFSR_OWNEDIN 0x0000000000400000UL
+#define SCHIZO_CEAFSR_MTAGSYND 0x00000000000f0000UL
+#define SCHIZO_CEAFSR_MTAG 0x000000000000e000UL
+#define SCHIZO_CEAFSR_ECCSYND 0x00000000000001ffUL
+
+static void schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_controller_info *p = dev_id;
+ unsigned long afsr_reg = p->controller_regs + SCHIZO_CE_AFSR;
+ unsigned long afar_reg = p->controller_regs + SCHIZO_CE_AFAR;
+ unsigned long afsr, afar, error_bits;
+ int reported, limit;
+
+ /* Latch error status. */
+ afar = schizo_read(afar_reg);
+
+ /* If either of the error pending bits are set in the
+ * AFSR, the error status is being actively updated by
+ * the hardware and we must re-read to get a clean value.
+ */
+ limit = 1000;
+ do {
+ afsr = schizo_read(afsr_reg);
+ } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR |
+ SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA);
+ schizo_write(afsr_reg, error_bits);
+
+ /* Log the error. */
+ printk("SCHIZO%d: Correctable Error, primary error type[%s]\n",
+ p->index,
+ (((error_bits & SCHIZO_CEAFSR_PPIO) ?
+ "PIO" :
+ ((error_bits & SCHIZO_CEAFSR_PDRD) ?
+ "DMA Read" :
+ ((error_bits & SCHIZO_CEAFSR_PDWR) ?
+ "DMA Write" : "???")))));
+
+ /* XXX Use syndrome and afar to print out module string just like
+ * XXX UDB CE trap handler does... -DaveM
+ */
+ printk("SCHIZO%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
+ p->index,
+ (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
+ (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
+ (afsr & SCHIZO_UEAFSR_AID) >> 24UL);
+ printk("SCHIZO%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
+ p->index,
+ (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
+ (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
+ (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
+ (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
+ (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
+ printk("SCHIZO%d: CE AFAR [%016lx]\n", p->index, afar);
+ printk("SCHIZO%d: CE Secondary errors [", p->index);
+ reported = 0;
+ if (afsr & SCHIZO_CEAFSR_SPIO) {
+ reported++;
+ printk("(PIO)");
+ }
+ if (afsr & SCHIZO_CEAFSR_SDMA) {
+ reported++;
+ printk("(DMA)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+}
+
+#define SCHIZO_PCI_AFSR 0x2010UL
+#define SCHIZO_PCI_AFAR 0x2018UL
+
+#define SCHIZO_PCIAFSR_PMA 0x8000000000000000UL
+#define SCHIZO_PCIAFSR_PTA 0x4000000000000000UL
+#define SCHIZO_PCIAFSR_PRTRY 0x2000000000000000UL
+#define SCHIZO_PCIAFSR_PPERR 0x1000000000000000UL
+#define SCHIZO_PCIAFSR_PTTO 0x0800000000000000UL
+#define SCHIZO_PCIAFSR_PUNUS 0x0400000000000000UL
+#define SCHIZO_PCIAFSR_SMA 0x0200000000000000UL
+#define SCHIZO_PCIAFSR_STA 0x0100000000000000UL
+#define SCHIZO_PCIAFSR_SRTRY 0x0080000000000000UL
+#define SCHIZO_PCIAFSR_SPERR 0x0040000000000000UL
+#define SCHIZO_PCIAFSR_STTO 0x0020000000000000UL
+#define SCHIZO_PCIAFSR_SUNUS 0x0010000000000000UL
+#define SCHIZO_PCIAFSR_BMSK 0x000003ff00000000UL
+#define SCHIZO_PCIAFSR_BLK 0x0000000080000000UL
+#define SCHIZO_PCIAFSR_CFG 0x0000000040000000UL
+#define SCHIZO_PCIAFSR_MEM 0x0000000020000000UL
+#define SCHIZO_PCIAFSR_IO 0x0000000010000000UL
+
+static void schizo_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_pbm_info *pbm = dev_id;
+ struct pci_controller_info *p = pbm->parent;
+ unsigned long afsr_reg, afar_reg, base;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+ char pbm_name;
+
+ base = p->controller_regs;
+ if (pbm == &pbm->parent->pbm_A) {
+ base += SCHIZO_PBM_A_REGS_OFF;
+ pbm_name = 'A';
+ } else {
+ base += SCHIZO_PBM_B_REGS_OFF;
+ pbm_name = 'B';
+ }
+
+ afsr_reg = base + SCHIZO_PCI_AFSR;
+ afar_reg = base + SCHIZO_PCI_AFAR;
+
+ /* Latch error status. */
+ afar = schizo_read(afar_reg);
+ afsr = schizo_read(afsr_reg);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
+ SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
+ SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
+ SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
+ SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
+ SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS);
+ schizo_write(afsr_reg, error_bits);
+
+ /* Log the error. */
+ printk("SCHIZO%d: PBM-%c PCI Error, primary error type[%s]\n",
+ p->index, pbm_name,
+ (((error_bits & SCHIZO_PCIAFSR_PMA) ?
+ "Master Abort" :
+ ((error_bits & SCHIZO_PCIAFSR_PTA) ?
+ "Target Abort" :
+ ((error_bits & SCHIZO_PCIAFSR_PRTRY) ?
+ "Excessive Retries" :
+ ((error_bits & SCHIZO_PCIAFSR_PPERR) ?
+ "Parity Error" :
+ ((error_bits & SCHIZO_PCIAFSR_PTTO) ?
+ "Timeout" :
+ ((error_bits & SCHIZO_PCIAFSR_PUNUS) ?
+ "Bus Unusable" : "???"))))))));
+ printk("SCHIZO%d: PBM-%c bytemask[%04lx] was_block(%d) space(%s)\n",
+ p->index, pbm_name,
+ (afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL,
+ (afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0,
+ ((afsr & SCHIZO_PCIAFSR_CFG) ?
+ "Config" :
+ ((afsr & SCHIZO_PCIAFSR_MEM) ?
+ "Memory" :
+ ((afsr & SCHIZO_PCIAFSR_IO) ?
+ "I/O" : "???"))));
+ printk("SCHIZO%d: PBM-%c PCI AFAR [%016lx]\n",
+ p->index, pbm_name, afar);
+ printk("SCHIZO%d: PBM-%c PCI Secondary errors [",
+ p->index, pbm_name);
+ reported = 0;
+ if (afsr & SCHIZO_PCIAFSR_SMA) {
+ reported++;
+ printk("(Master Abort)");
+ }
+ if (afsr & SCHIZO_PCIAFSR_STA) {
+ reported++;
+ printk("(Target Abort)");
+ }
+ if (afsr & SCHIZO_PCIAFSR_SRTRY) {
+ reported++;
+ printk("(Excessive Retries)");
+ }
+ if (afsr & SCHIZO_PCIAFSR_SPERR) {
+ reported++;
+ printk("(Parity Error)");
+ }
+ if (afsr & SCHIZO_PCIAFSR_STTO) {
+ reported++;
+ printk("(Timeout)");
+ }
+ if (afsr & SCHIZO_PCIAFSR_SUNUS) {
+ reported++;
+ printk("(Bus Unusable)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ /* For the error types shown, scan PBM's PCI bus for devices
+ * which have logged that error type.
+ */
+
+ /* If we see a Target Abort, this could be the result of an
+ * IOMMU translation error of some sort. It is extremely
+ * useful to log this information as usually it indicates
+ * a bug in the IOMMU support code or a PCI device driver.
+ */
+ if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) {
+ schizo_check_iommu_error(p, PCI_ERR);
+ pci_scan_for_target_abort(p, pbm, pbm->pci_bus);
+ }
+ if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA))
+ pci_scan_for_master_abort(p, pbm, pbm->pci_bus);
+
+ /* For excessive retries, PSYCHO/PBM will abort the device
+ * and there is no way to specifically check for excessive
+ * retries in the config space status registers. So what
+ * we hope is that we'll catch it via the master/target
+ * abort events.
+ */
+
+ if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR))
+ pci_scan_for_parity_error(p, pbm, pbm->pci_bus);
+}
+
+#define SCHIZO_SAFARI_ERRLOG 0x10018UL
+
+#define SAFARI_ERRLOG_ERROUT 0x8000000000000000UL
+
+#define SAFARI_ERROR_BADCMD 0x4000000000000000UL
+#define SAFARI_ERROR_SSMDIS 0x2000000000000000UL
+#define SAFARI_ERROR_BADMA 0x1000000000000000UL
+#define SAFARI_ERROR_BADMB 0x0800000000000000UL
+#define SAFARI_ERROR_BADMC 0x0400000000000000UL
+#define SAFARI_ERROR_CPU1PS 0x0000000000002000UL
+#define SAFARI_ERROR_CPU1PB 0x0000000000001000UL
+#define SAFARI_ERROR_CPU0PS 0x0000000000000800UL
+#define SAFARI_ERROR_CPU0PB 0x0000000000000400UL
+#define SAFARI_ERROR_CIQTO 0x0000000000000200UL
+#define SAFARI_ERROR_LPQTO 0x0000000000000100UL
+#define SAFARI_ERROR_SFPQTO 0x0000000000000080UL
+#define SAFARI_ERROR_UFPQTO 0x0000000000000040UL
+#define SAFARI_ERROR_APERR 0x0000000000000020UL
+#define SAFARI_ERROR_UNMAP 0x0000000000000010UL
+#define SAFARI_ERROR_BUSERR 0x0000000000000004UL
+#define SAFARI_ERROR_TIMEOUT 0x0000000000000002UL
+#define SAFARI_ERROR_ILL 0x0000000000000001UL
+
+/* We only expect UNMAP errors here. The rest of the Safari errors
+ * are marked fatal and thus cause a system reset.
+ */
+static void schizo_safarierr_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_controller_info *p = dev_id;
+ u64 errlog;
+
+ errlog = schizo_read(p->controller_regs + SCHIZO_SAFARI_ERRLOG);
+ schizo_write(p->controller_regs + SCHIZO_SAFARI_ERRLOG,
+ errlog & ~(SAFARI_ERRLOG_ERROUT));
+
+ if (!(errlog & SAFARI_ERROR_UNMAP)) {
+ printk("SCHIZO%d: Unexpected Safari error interrupt, errlog[%016lx]\n",
+ p->index, errlog);
+ return;
+ }
+
+ printk("SCHIZO%d: Safari interrupt, UNMAPPED error, interrogating IOMMUs.\n",
+ p->index);
+ schizo_check_iommu_error(p, SAFARI_ERR);
+}
+
+/* Nearly identical to PSYCHO equivalents... */
+#define SCHIZO_ECC_CTRL 0x10020UL
+#define SCHIZO_ECCCTRL_EE 0x8000000000000000 /* Enable ECC Checking */
+#define SCHIZO_ECCCTRL_UE 0x4000000000000000 /* Enable UE Interrupts */
+#define SCHIZO_ECCCTRL_CE 0x2000000000000000 /* Enable CE INterrupts */
+
+#define SCHIZO_SAFARI_ERRCTRL 0x10008UL
+#define SCHIZO_SAFERRCTRL_EN 0x8000000000000000UL
+#define SCHIZO_SAFARI_IRQCTRL 0x10010UL
+#define SCHIZO_SAFIRQCTRL_EN 0x8000000000000000UL
+
+#define SCHIZO_UE_INO 0x30 /* Uncorrectable ECC error */
+#define SCHIZO_CE_INO 0x31 /* Correctable ECC error */
+#define SCHIZO_PCIERR_A_INO 0x32 /* PBM A PCI bus error */
+#define SCHIZO_PCIERR_B_INO 0x33 /* PBM B PCI bus error */
+#define SCHIZO_SERR_INO 0x34 /* Safari interface error */
+
+#define SCHIZO_PCIA_CTRL (SCHIZO_PBM_A_REGS_OFF + 0x2000UL)
+#define SCHIZO_PCIB_CTRL (SCHIZO_PBM_B_REGS_OFF + 0x2000UL)
+#define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL)
+#define SCHIZO_PCICTRL_SERR (1UL << 34UL)
+#define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL)
+#define SCHIZO_PCICTRL_EEN (1UL << 17UL)
+
+/* XXX It is not entirely clear if I need to enable the PCI controller interrupts
+ * XXX in both PBMs, the documentation is very vague about this point. For now
+ * XXX I'll just enable it on PBM A but this needs to be verified! -DaveM
+ */
+static void __init schizo_register_error_handlers(struct pci_controller_info *p)
+{
+ struct pci_pbm_info *pbm = &p->pbm_A; /* XXX verify me XXX */
+ unsigned long base = p->controller_regs;
+ unsigned int irq, portid = p->portid;
+ u64 tmp;
+
+ /* Build IRQs and register handlers. */
+ irq = schizo_irq_build(pbm, NULL, (portid << 6) | SCHIZO_UE_INO);
+ if (request_irq(irq, schizo_ue_intr,
+ SA_SHIRQ, "SCHIZO UE", p) < 0) {
+ prom_printf("SCHIZO%d: Cannot register UE interrupt.\n",
+ p->index);
+ prom_halt();
+ }
+
+ irq = schizo_irq_build(pbm, NULL, (portid << 6) | SCHIZO_CE_INO);
+ if (request_irq(irq, schizo_ce_intr,
+ SA_SHIRQ, "SCHIZO CE", p) < 0) {
+ prom_printf("SCHIZO%d: Cannot register CE interrupt.\n",
+ p->index);
+ prom_halt();
+ }
+
+ irq = schizo_irq_build(pbm, NULL, (portid << 6) | SCHIZO_PCIERR_A_INO);
+ if (request_irq(irq, schizo_pcierr_intr,
+ SA_SHIRQ, "SCHIZO PCIERR", &p->pbm_A) < 0) {
+ prom_printf("SCHIZO%d(PBMA): Cannot register PciERR interrupt.\n",
+ p->index);
+ prom_halt();
+ }
+
+ irq = schizo_irq_build(pbm, NULL, (portid << 6) | SCHIZO_PCIERR_B_INO);
+ if (request_irq(irq, schizo_pcierr_intr,
+ SA_SHIRQ, "SCHIZO PCIERR", &p->pbm_B) < 0) {
+ prom_printf("SCHIZO%d(PBMB): Cannot register PciERR interrupt.\n",
+ p->index);
+ prom_halt();
+ }
+
+ irq = schizo_irq_build(pbm, NULL, (portid << 6) | SCHIZO_SERR_INO);
+ if (request_irq(irq, schizo_safarierr_intr,
+ SA_SHIRQ, "SCHIZO SERR", p) < 0) {
+ prom_printf("SCHIZO%d(PBMB): Cannot register SafariERR interrupt.\n",
+ p->index);
+ prom_halt();
+ }
+
+ /* Enable UE and CE interrupts for controller. */
+ schizo_write(base + SCHIZO_ECC_CTRL,
+ (SCHIZO_ECCCTRL_EE |
+ SCHIZO_ECCCTRL_UE |
+ SCHIZO_ECCCTRL_CE));
+
+ /* Enable PCI Error interrupts and clear error
+ * bits for each PBM.
+ *
+ * XXX More error bits should be cleared, this is
+ * XXX just the stuff which is identical on Psycho. -DaveM
+ */
+ tmp = schizo_read(base + SCHIZO_PCIA_CTRL);
+ tmp |= (SCHIZO_PCICTRL_SBH_ERR |
+ SCHIZO_PCICTRL_SERR |
+ SCHIZO_PCICTRL_SBH_INT |
+ SCHIZO_PCICTRL_EEN);
+ schizo_write(base + SCHIZO_PCIA_CTRL, tmp);
+
+ tmp = schizo_read(base + SCHIZO_PCIB_CTRL);
+ tmp |= (SCHIZO_PCICTRL_SBH_ERR |
+ SCHIZO_PCICTRL_SERR |
+ SCHIZO_PCICTRL_SBH_INT |
+ SCHIZO_PCICTRL_EEN);
+ schizo_write(base + SCHIZO_PCIB_CTRL, tmp);
+
+ /* Make all Safari error conditions fatal except unmapped errors
+ * which we make generate interrupts.
+ */
+ schizo_write(base + SCHIZO_SAFARI_ERRCTRL,
+ (SCHIZO_SAFERRCTRL_EN |
+ (SAFARI_ERROR_BADCMD | SAFARI_ERROR_SSMDIS |
+ SAFARI_ERROR_BADMA | SAFARI_ERROR_BADMB |
+ SAFARI_ERROR_BADMC | SAFARI_ERROR_CPU1PS |
+ SAFARI_ERROR_CPU1PB | SAFARI_ERROR_CPU0PS |
+ SAFARI_ERROR_CPU0PB | SAFARI_ERROR_CIQTO |
+ SAFARI_ERROR_LPQTO | SAFARI_ERROR_SFPQTO |
+ SAFARI_ERROR_UFPQTO | SAFARI_ERROR_APERR |
+ SAFARI_ERROR_BUSERR | SAFARI_ERROR_TIMEOUT |
+ SAFARI_ERROR_ILL)));
+
+ schizo_write(base + SCHIZO_SAFARI_IRQCTRL,
+ (SCHIZO_SAFIRQCTRL_EN | (SAFARI_ERROR_UNMAP)));
+}
+
+/* We have to do the config space accesses by hand, thus... */
+#define PBM_BRIDGE_BUS 0x40
+#define PBM_BRIDGE_SUBORDINATE 0x41
+static void __init pbm_renumber(struct pci_pbm_info *pbm, u8 orig_busno)
+{
+ u8 *addr, busno;
+ int nbus;
+
+ busno = pci_highest_busnum;
+ nbus = pbm->pci_last_busno - pbm->pci_first_busno;
+
+ addr = schizo_pci_config_mkaddr(pbm, orig_busno,
+ 0, PBM_BRIDGE_BUS);
+ pci_config_write8(addr, busno);
+ addr = schizo_pci_config_mkaddr(pbm, busno,
+ 0, PBM_BRIDGE_SUBORDINATE);
+ pci_config_write8(addr, busno + nbus);
+
+ pbm->pci_first_busno = busno;
+ pbm->pci_last_busno = busno + nbus;
+ pci_highest_busnum = busno + nbus + 1;
+
+ do {
+ pci_bus2pbm[busno++] = pbm;
+ } while (nbus--);
+}
+
+/* We have to do the config space accesses by hand here since
+ * the pci_bus2pbm array is not ready yet.
+ */
+static void __init pbm_pci_bridge_renumber(struct pci_pbm_info *pbm,
+ u8 busno)
+{
+ u32 devfn, l, class;
+ u8 hdr_type;
+ int is_multi = 0;
+
+ for(devfn = 0; devfn < 0xff; ++devfn) {
+ u32 *dwaddr;
+ u8 *baddr;
+
+ if (PCI_FUNC(devfn) != 0 && is_multi == 0)
+ continue;
+
+ /* Anything there? */
+ dwaddr = schizo_pci_config_mkaddr(pbm, busno, devfn, PCI_VENDOR_ID);
+ l = 0xffffffff;
+ pci_config_read32(dwaddr, &l);
+ if (l == 0xffffffff || l == 0x00000000 ||
+ l == 0x0000ffff || l == 0xffff0000) {
+ is_multi = 0;
+ continue;
+ }
+
+ baddr = schizo_pci_config_mkaddr(pbm, busno, devfn, PCI_HEADER_TYPE);
+ pci_config_read8(baddr, &hdr_type);
+ if (PCI_FUNC(devfn) == 0)
+ is_multi = hdr_type & 0x80;
+
+ dwaddr = schizo_pci_config_mkaddr(pbm, busno, devfn, PCI_CLASS_REVISION);
+ class = 0xffffffff;
+ pci_config_read32(dwaddr, &class);
+ if ((class >> 16) == PCI_CLASS_BRIDGE_PCI) {
+ u32 buses = 0xffffffff;
+
+ dwaddr = schizo_pci_config_mkaddr(pbm, busno, devfn,
+ PCI_PRIMARY_BUS);
+ pci_config_read32(dwaddr, &buses);
+ pbm_pci_bridge_renumber(pbm, (buses >> 8) & 0xff);
+ buses &= 0xff000000;
+ pci_config_write32(dwaddr, buses);
+ }
+ }
+}
+
+static void __init pbm_bridge_reconfigure(struct pci_controller_info *p)
+{
+ struct pci_pbm_info *pbm;
+ u8 *addr;
+
+ /* Clear out primary/secondary/subordinate bus numbers on
+ * all PCI-to-PCI bridges under each PBM. The generic bus
+ * probing will fix them up.
+ */
+ pbm_pci_bridge_renumber(&p->pbm_B, p->pbm_B.pci_first_busno);
+ pbm_pci_bridge_renumber(&p->pbm_A, p->pbm_A.pci_first_busno);
+
+ /* Move PBM A out of the way. */
+ pbm = &p->pbm_A;
+ addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
+ 0, PBM_BRIDGE_BUS);
+ pci_config_write8(addr, 0xff);
+ addr = schizo_pci_config_mkaddr(pbm, 0xff,
+ 0, PBM_BRIDGE_SUBORDINATE);
+ pci_config_write8(addr, 0xff);
+
+ /* Now we can safely renumber both PBMs. */
+ pbm_renumber(&p->pbm_B, p->pbm_B.pci_first_busno);
+ pbm_renumber(&p->pbm_A, 0xff);
+}
+
+static void __init pbm_config_busmastering(struct pci_pbm_info *pbm)
+{
+ u8 *addr;
+
+ /* Set cache-line size to 64 bytes, this is actually
+ * a nop but I do it for completeness.
+ */
+ addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
+ 0, PCI_CACHE_LINE_SIZE);
+ pci_config_write8(addr, 64 / sizeof(u32));
+
+ /* Set PBM latency timer to 64 PCI clocks. */
+ addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
+ 0, PCI_LATENCY_TIMER);
+ pci_config_write8(addr, 64);
+}
+
+static void __init pbm_scan_bus(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm)
+{
+ pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
+ p->pci_ops,
+ pbm);
+ pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
+ pci_record_assignments(pbm, pbm->pci_bus);
+ pci_assign_unassigned(pbm, pbm->pci_bus);
+ pci_fixup_irq(pbm, pbm->pci_bus);
+ pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
+ pci_setup_busmastering(pbm, pbm->pci_bus);
+}
+
+static void __init schizo_scan_bus(struct pci_controller_info *p)
+{
+ pbm_bridge_reconfigure(p);
+ pbm_config_busmastering(&p->pbm_B);
+ p->pbm_B.is_66mhz_capable = 0;
+ pbm_config_busmastering(&p->pbm_A);
+ p->pbm_A.is_66mhz_capable = 1;
+ pbm_scan_bus(p, &p->pbm_B);
+ pbm_scan_bus(p, &p->pbm_A);
+
+ /* After the PCI bus scan is complete, we can register
+ * the error interrupt handlers.
+ */
+ schizo_register_error_handlers(p);
}
static void __init schizo_base_address_update(struct pci_dev *pdev, int resource)
{
- /* IMPLEMENT ME */
+ struct pcidev_cookie *pcp = pdev->sysdata;
+ struct pci_pbm_info *pbm = pcp->pbm;
+ struct resource *res, *root;
+ u32 reg;
+ int where, size, is_64bit;
+
+ res = &pdev->resource[resource];
+ where = PCI_BASE_ADDRESS_0 + (resource * 4);
+
+ is_64bit = 0;
+ if (res->flags & IORESOURCE_IO)
+ root = &pbm->io_space;
+ else {
+ root = &pbm->mem_space;
+ if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
+ == PCI_BASE_ADDRESS_MEM_TYPE_64)
+ is_64bit = 1;
+ }
+
+ size = res->end - res->start;
+ pci_read_config_dword(pdev, where, ®);
+ reg = ((reg & size) |
+ (((u32)(res->start - root->start)) & ~size));
+ pci_write_config_dword(pdev, where, reg);
+
+ /* This knows that the upper 32-bits of the address
+ * must be zero. Our PCI common layer enforces this.
+ */
+ if (is_64bit)
+ pci_write_config_dword(pdev, where + 4, 0);
}
static void __init schizo_resource_adjust(struct pci_dev *pdev,
struct resource *res,
struct resource *root)
{
- /* IMPLEMENT ME */
+ res->start += root->start;
+ res->end += root->start;
+}
+
+/* Interrogate Safari match/mask registers to figure out where
+ * PCI MEM, I/O, and Config space are for this PCI bus module.
+ */
+
+#define SCHIZO_PCI_A_MEM_MATCH 0x00040UL
+#define SCHIZO_PCI_A_MEM_MASK 0x00048UL
+#define SCHIZO_PCI_A_IO_MATCH 0x00050UL
+#define SCHIZO_PCI_A_IO_MASK 0x00058UL
+#define SCHIZO_PCI_B_MEM_MATCH 0x00060UL
+#define SCHIZO_PCI_B_MEM_MASK 0x00068UL
+#define SCHIZO_PCI_B_IO_MATCH 0x00070UL
+#define SCHIZO_PCI_B_IO_MASK 0x00078UL
+
+/* VAL must be non-zero. */
+static unsigned long strip_to_lowest_bit_set(unsigned long val)
+{
+ unsigned long tmp;
+
+ tmp = 1UL;
+ while (!(tmp & val))
+ tmp <<= 1UL;
+
+ return tmp;
+}
+
+static void schizo_determine_mem_io_space(struct pci_pbm_info *pbm,
+ int is_pbm_a, unsigned long reg_base)
+{
+ u64 mem_match, mem_mask;
+ u64 io_match;
+ u64 long a, b;
+
+ if (is_pbm_a) {
+ mem_match = reg_base + SCHIZO_PCI_A_MEM_MATCH;
+ io_match = reg_base + SCHIZO_PCI_A_IO_MATCH;
+ } else {
+ mem_match = reg_base + SCHIZO_PCI_B_MEM_MATCH;
+ io_match = reg_base + SCHIZO_PCI_B_IO_MATCH;
+ }
+ mem_mask = mem_match + 0x8UL;
+
+ a = schizo_read(mem_match) & ~0x8000000000000000UL;
+ b = strip_to_lowest_bit_set(schizo_read(mem_mask));
+
+ /* It should be 2GB in size. */
+ pbm->mem_space.start = a;
+ pbm->mem_space.end = a + (b - 1UL);
+ pbm->mem_space.flags = IORESOURCE_MEM;
+
+ /* This 32MB area is divided into two pieces. The first
+ * 16MB is Config space, the next 16MB is I/O space.
+ */
+
+ a = schizo_read(io_match) & ~0x8000000000000000UL;
+ pbm->config_space = a;
+ printk("SCHIZO PBM%c: Local PCI config space at %016lx\n",
+ (is_pbm_a ? 'A' : 'B'), pbm->config_space);
+
+ a += (16UL * 1024UL * 1024UL);
+ pbm->io_space.start = a;
+ pbm->io_space.end = a + ((16UL * 1024UL * 1024UL) - 1UL);
+ pbm->io_space.flags = IORESOURCE_IO;
+}
+
+static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm)
+{
+ char *name = pbm->name;
+
+ sprintf(name, "SCHIZO%d PBM%c",
+ p->index,
+ (pbm == &p->pbm_A ? 'A' : 'B'));
+ pbm->io_space.name = pbm->mem_space.name = name;
+
+ request_resource(&ioport_resource, &pbm->io_space);
+ request_resource(&iomem_resource, &pbm->mem_space);
+}
+
+#define SCHIZO_STRBUF_CONTROL_A (SCHIZO_PBM_A_REGS_OFF + 0x02800UL)
+#define SCHIZO_STRBUF_FLUSH_A (SCHIZO_PBM_A_REGS_OFF + 0x02808UL)
+#define SCHIZO_STRBUF_FSYNC_A (SCHIZO_PBM_A_REGS_OFF + 0x02810UL)
+#define SCHIZO_STRBUF_CTXFLUSH_A (SCHIZO_PBM_A_REGS_OFF + 0x02818UL)
+#define SCHIZO_STRBUF_CTXMATCH_A (SCHIZO_PBM_A_REGS_OFF + 0x10000UL)
+
+#define SCHIZO_STRBUF_CONTROL_B (SCHIZO_PBM_B_REGS_OFF + 0x02800UL)
+#define SCHIZO_STRBUF_FLUSH_B (SCHIZO_PBM_B_REGS_OFF + 0x02808UL)
+#define SCHIZO_STRBUF_FSYNC_B (SCHIZO_PBM_B_REGS_OFF + 0x02810UL)
+#define SCHIZO_STRBUF_CTXFLUSH_B (SCHIZO_PBM_B_REGS_OFF + 0x02818UL)
+#define SCHIZO_STRBUF_CTXMATCH_B (SCHIZO_PBM_B_REGS_OFF + 0x10000UL)
+
+static void schizo_pbm_strbuf_init(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm,
+ int is_pbm_a)
+{
+ unsigned long base = p->controller_regs;
+ u64 control;
+
+ /* SCHIZO has context flushing. */
+ if (is_pbm_a) {
+ pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL_A;
+ pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH_A;
+ pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC_A;
+ pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH_A;
+ pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH_A;
+ } else {
+ pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL_B;
+ pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH_B;
+ pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC_B;
+ pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH_B;
+ pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH_B;
+ }
+
+ pbm->stc.strbuf_flushflag = (volatile unsigned long *)
+ ((((unsigned long)&pbm->stc.__flushflag_buf[0])
+ + 63UL)
+ & ~63UL);
+ pbm->stc.strbuf_flushflag_pa = (unsigned long)
+ __pa(pbm->stc.strbuf_flushflag);
+
+ /* Turn off LRU locking and diag mode, enable the
+ * streaming buffer and leave the rerun-disable
+ * setting however OBP set it.
+ */
+ control = schizo_read(pbm->stc.strbuf_control);
+ control &= ~(SCHIZO_STRBUF_CTRL_LPTR |
+ SCHIZO_STRBUF_CTRL_LENAB |
+ SCHIZO_STRBUF_CTRL_DENAB);
+ control |= SCHIZO_STRBUF_CTRL_ENAB;
+ schizo_write(pbm->stc.strbuf_control, control);
+
+ pbm->stc.strbuf_enabled = 1;
+}
+
+#define SCHIZO_IOMMU_CONTROL_A (SCHIZO_PBM_A_REGS_OFF + 0x00200UL)
+#define SCHIZO_IOMMU_TSBBASE_A (SCHIZO_PBM_A_REGS_OFF + 0x00208UL)
+#define SCHIZO_IOMMU_FLUSH_A (SCHIZO_PBM_A_REGS_OFF + 0x00210UL)
+#define SCHIZO_IOMMU_CTXFLUSH_A (SCHIZO_PBM_A_REGS_OFF + 0x00218UL)
+#define SCHIZO_IOMMU_TAG_A (SCHIZO_PBM_A_REGS_OFF + 0x0a580UL)
+#define SCHIZO_IOMMU_DATA_A (SCHIZO_PBM_A_REGS_OFF + 0x0a600UL)
+#define SCHIZO_IOMMU_CONTROL_B (SCHIZO_PBM_B_REGS_OFF + 0x00200UL)
+#define SCHIZO_IOMMU_TSBBASE_B (SCHIZO_PBM_B_REGS_OFF + 0x00208UL)
+#define SCHIZO_IOMMU_FLUSH_B (SCHIZO_PBM_B_REGS_OFF + 0x00210UL)
+#define SCHIZO_IOMMU_CTXFLUSH_B (SCHIZO_PBM_B_REGS_OFF + 0x00218UL)
+#define SCHIZO_IOMMU_TAG_B (SCHIZO_PBM_B_REGS_OFF + 0x0a580UL)
+#define SCHIZO_IOMMU_DATA_B (SCHIZO_PBM_B_REGS_OFF + 0x0a600UL)
+
+static void schizo_pbm_iommu_init(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm,
+ int is_pbm_a)
+{
+ struct pci_iommu *iommu = pbm->iommu;
+ unsigned long tsbbase, i, tagbase, database;
+ u64 control;
+
+ /* Setup initial software IOMMU state. */
+ spin_lock_init(&iommu->lock);
+ iommu->iommu_cur_ctx = 0;
+
+ /* Register addresses, SCHIZO has iommu ctx flushing. */
+ if (is_pbm_a) {
+ iommu->iommu_control = p->controller_regs + SCHIZO_IOMMU_CONTROL_A;
+ iommu->iommu_tsbbase = p->controller_regs + SCHIZO_IOMMU_TSBBASE_A;
+ iommu->iommu_flush = p->controller_regs + SCHIZO_IOMMU_FLUSH_A;
+ iommu->iommu_ctxflush = p->controller_regs + SCHIZO_IOMMU_CTXFLUSH_A;
+ } else {
+ iommu->iommu_control = p->controller_regs + SCHIZO_IOMMU_CONTROL_B;
+ iommu->iommu_tsbbase = p->controller_regs + SCHIZO_IOMMU_TSBBASE_B;
+ iommu->iommu_flush = p->controller_regs + SCHIZO_IOMMU_FLUSH_B;
+ iommu->iommu_ctxflush = p->controller_regs + SCHIZO_IOMMU_CTXFLUSH_B;
+ }
+
+ /* We use the main control/status register of SCHIZO as the write
+ * completion register.
+ */
+ iommu->write_complete_reg = p->controller_regs + 0x10000UL;
+
+ /*
+ * Invalidate TLB Entries.
+ */
+ control = schizo_read(iommu->iommu_control);
+ control |= SCHIZO_IOMMU_CTRL_DENAB;
+ schizo_write(iommu->iommu_control, control);
+
+ if (is_pbm_a)
+ tagbase = SCHIZO_IOMMU_TAG_A, database = SCHIZO_IOMMU_DATA_A;
+ else
+ tagbase = SCHIZO_IOMMU_TAG_B, database = SCHIZO_IOMMU_DATA_B;
+ for(i = 0; i < 16; i++) {
+ schizo_write(p->controller_regs + tagbase + (i * 8UL), 0);
+ schizo_write(p->controller_regs + database + (i * 8UL), 0);
+ }
+
+ /* Leave diag mode enabled for full-flushing done
+ * in pci_iommu.c
+ */
+
+ /* Using assumed page size 8K with 128K entries we need 1MB iommu page
+ * table (128K ioptes * 8 bytes per iopte). This is
+ * page order 7 on UltraSparc.
+ */
+ tsbbase = __get_free_pages(GFP_KERNEL, 7);
+ if (!tsbbase) {
+ prom_printf("SCHIZO_IOMMU: Error, gfp(tsb) failed.\n");
+ prom_halt();
+ }
+ iommu->page_table = (iopte_t *)tsbbase;
+ iommu->page_table_sz_bits = 17;
+ iommu->page_table_map_base = 0xc0000000;
+ iommu->dma_addr_mask = 0xffffffff;
+ memset((char *)tsbbase, 0, PAGE_SIZE << 7);
+
+ /* We start with no consistent mappings. */
+ iommu->lowest_consistent_map =
+ 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
+
+ for (i = 0; i < PBM_NCLUSTERS; i++) {
+ iommu->alloc_info[i].flush = 0;
+ iommu->alloc_info[i].next = 0;
+ }
+
+ schizo_write(iommu->iommu_tsbbase, __pa(tsbbase));
+
+ control = schizo_read(iommu->iommu_control);
+ control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
+ control |= (SCHIZO_IOMMU_TSBSZ_128K | SCHIZO_IOMMU_CTRL_ENAB);
+ schizo_write(iommu->iommu_control, control);
}
static void schizo_pbm_init(struct pci_controller_info *p,
int prom_node, int is_pbm_a)
{
- /* IMPLEMENT ME */
+ unsigned int busrange[2];
+ struct pci_pbm_info *pbm;
+ int err;
+
+ if (is_pbm_a)
+ pbm = &p->pbm_A;
+ else
+ pbm = &p->pbm_B;
+
+ schizo_determine_mem_io_space(pbm, is_pbm_a, p->controller_regs);
+ pbm_register_toplevel_resources(p, pbm);
+
+ pbm->parent = p;
+ pbm->prom_node = prom_node;
+ prom_getstring(prom_node, "name",
+ pbm->prom_name,
+ sizeof(pbm->prom_name));
+
+ err = prom_getproperty(prom_node, "ranges",
+ (char *) pbm->pbm_ranges,
+ sizeof(pbm->pbm_ranges));
+ if (err != -1)
+ pbm->num_pbm_ranges =
+ (err / sizeof(struct linux_prom_pci_ranges));
+ else
+ pbm->num_pbm_ranges = 0;
+
+ err = prom_getproperty(prom_node, "interrupt-map",
+ (char *)pbm->pbm_intmap,
+ sizeof(pbm->pbm_intmap));
+ if (err != -1) {
+ pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
+ err = prom_getproperty(prom_node, "interrupt-map-mask",
+ (char *)&pbm->pbm_intmask,
+ sizeof(pbm->pbm_intmask));
+ if (err == -1) {
+ prom_printf("SCHIZO-PBM: Fatal error, no "
+ "interrupt-map-mask.\n");
+ prom_halt();
+ }
+ } else {
+ pbm->num_pbm_intmap = 0;
+ memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
+ }
+
+ err = prom_getproperty(prom_node, "bus-range",
+ (char *)&busrange[0],
+ sizeof(busrange));
+ if (err == 0 || err == -1) {
+ prom_printf("SCHIZO-PBM: Fatal error, no bus-range.\n");
+ prom_halt();
+ }
+ pbm->pci_first_busno = busrange[0];
+ pbm->pci_last_busno = busrange[1];
+
+ schizo_pbm_iommu_init(p, pbm, is_pbm_a);
+ schizo_pbm_strbuf_init(p, pbm, is_pbm_a);
+}
+
+static void schizo_controller_hwinit(struct pci_controller_info *p)
+{
+ unsigned long pbm_a_base, pbm_b_base;
+ u64 tmp;
+
+ pbm_a_base = p->controller_regs + SCHIZO_PBM_A_REGS_OFF;
+ pbm_b_base = p->controller_regs + SCHIZO_PBM_B_REGS_OFF;
+
+ /* Set IRQ retry to infinity. */
+ schizo_write(pbm_a_base + 0x1a00UL, 0xff);
+ schizo_write(pbm_b_base + 0x1a00UL, 0xff);
+
+ /* Enable arbiter for all PCI slots. */
+ tmp = schizo_read(pbm_a_base + 0x2000UL);
+ tmp |= 0x3fUL;
+ schizo_write(pbm_a_base + 0x2000UL, tmp);
+
+ tmp = schizo_read(pbm_b_base + 0x2000UL);
+ tmp |= 0x3fUL;
+ schizo_write(pbm_b_base + 0x2000UL, tmp);
}
void __init schizo_init(int node)
struct linux_prom64_registers pr_regs[3];
struct pci_controller_info *p;
struct pci_iommu *iommu;
+ unsigned long flags;
u32 portid;
int is_pbm_a, err;
p->resource_adjust = schizo_resource_adjust;
p->pci_ops = &schizo_ops;
-pbm_init:
/* Three OBP regs:
* 1) PBM controller regs
* 2) Schizo front-end controller regs (same for both PBMs)
prom_halt();
}
- /* XXX Read REG base, record in controller/pbm structures. */
-
- /* XXX Report controller to console. */
+ p->controller_regs = pr_regs[1].phys_addr - 0x10000UL;
+ printk("PCI: Found SCHIZO, control regs at %016lx\n",
+ p->controller_regs);
- /* XXX Setup pci_memspace_mask */
+ /* Like PSYCHO we have a 2GB aligned area for memory space. */
+ pci_memspace_mask = 0x7fffffffUL;
- /* XXX Init core controller and IOMMU */
+ /* Init core controller. */
+ schizo_controller_hwinit(p);
- is_pbm_a = XXX; /* Figure out this test */
+ is_pbm_a = ((pr_regs[0].phys_addr & 0x00700000) == 0x00600000);
schizo_pbm_init(p, node, is_pbm_a);
}
-/* $Id: setup.c,v 1.59 2001/02/13 01:16:44 davem Exp $
+/* $Id: setup.c,v 1.62 2001/03/03 10:34:45 davem Exp $
* linux/arch/sparc64/kernel/setup.c
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
"r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
/*
- * Locked down tlb entry 63.
+ * Locked down tlb entry.
*/
- tte = spitfire_get_dtlb_data(63);
+ if (tlb_type == spitfire)
+ tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
+ else if (tlb_type == cheetah)
+ tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);
+
res = PROM_TRUE;
goto done;
}
unsigned long tte;
tte = args[3];
- prom_printf("%lx ", (tte & _PAGE_SOFT2) >> 50);
+ prom_printf("%lx ", (tte & 0x07FC000000000000) >> 50);
args[2] = 2;
args[args[1] + 3] = 0;
-/* $Id: trampoline.S,v 1.12 1999/12/15 15:45:12 davem Exp $
+/* $Id: trampoline.S,v 1.14 2001/03/04 18:31:00 davem Exp $
* trampoline.S: Jump start slave processors on sparc64.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/lsu.h>
+#include <asm/dcr.h>
+#include <asm/dcu.h>
#include <asm/pstate.h>
#include <asm/page.h>
#include <asm/pgtable.h>
sparc64_cpu_startup:
flushw
- mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
- stxa %g1, [%g0] ASI_LSU_CONTROL
- membar #Sync
+ rdpr %ver, %g1
+ sethi %hi(0x003e0014), %g5
+ srlx %g1, 32, %g1
+ or %g5, %lo(0x003e0014), %g5
+ cmp %g1, %g5
+ bne,pt %icc, spitfire_startup
+ nop
+
+cheetah_startup:
+ mov DCR_BPE | DCR_RPE | DCR_SI | DCR_MS, %g1
+ wr %g1, %asr18
+
+ sethi %uhi(DCU_ME | DCU_RE | DCU_PE | DCU_HPE | DCU_SPE | DCU_SL | DCU_WE), %g5
+ or %g5, %ulo(DCU_ME | DCU_RE | DCU_PE | DCU_HPE | DCU_SPE | DCU_SL | DCU_WE), %g5
+ sllx %g5, 32, %g5
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
+ or %g1, %g5, %g1
+ stxa %g5, [%g0] ASI_DCU_CONTROL_REG
+ membar #Sync
+
+ ba,pt %xcc, startup_continue
+ nop
+
+spitfire_startup:
+ mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
+ stxa %g1, [%g0] ASI_LSU_CONTROL
+ membar #Sync
- wrpr %g0, 15, %pil
- wr %g0, 0, %tick_cmpr
+startup_continue:
+ wrpr %g0, 15, %pil
+ wr %g0, 0, %tick_cmpr
/* Call OBP by hand to lock KERNBASE into i/d tlbs. */
- mov %o0, %l0
-
- sethi %hi(prom_entry_lock), %g2
-1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
- brnz,pn %g1, 1b
- membar #StoreLoad | #StoreStore
-
- sethi %hi(p1275buf), %g2
- or %g2, %lo(p1275buf), %g2
- ldx [%g2 + 0x10], %l2
- mov %sp, %l1
- add %l2, -(192 + 128), %sp
+ mov %o0, %l0
+
+ sethi %hi(prom_entry_lock), %g2
+1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
+ brnz,pn %g1, 1b
+ membar #StoreLoad | #StoreStore
+
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x10], %l2
+ mov %sp, %l1
+ add %l2, -(192 + 128), %sp
flushw
- sethi %hi(call_method), %g2
- or %g2, %lo(call_method), %g2
- stx %g2, [%sp + 2047 + 128 + 0x00]
- mov 5, %g2
- stx %g2, [%sp + 2047 + 128 + 0x08]
- mov 1, %g2
- stx %g2, [%sp + 2047 + 128 + 0x10]
- sethi %hi(itlb_load), %g2
- or %g2, %lo(itlb_load), %g2
- stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
- stx %g2, [%sp + 2047 + 128 + 0x20]
- sethi %hi(KERNBASE), %g2
- stx %g2, [%sp + 2047 + 128 + 0x28]
- sethi %hi(kern_locked_tte_data), %g2
- ldx [%g2 + %lo(kern_locked_tte_data)], %g2
- stx %g2, [%sp + 2047 + 128 + 0x30]
- mov 63, %g2
- stx %g2, [%sp + 2047 + 128 + 0x38]
- sethi %hi(p1275buf), %g2
- or %g2, %lo(p1275buf), %g2
- ldx [%g2 + 0x08], %o1
- call %o1
- add %sp, (2047 + 128), %o0
-
- sethi %hi(call_method), %g2
- or %g2, %lo(call_method), %g2
- stx %g2, [%sp + 2047 + 128 + 0x00]
- mov 5, %g2
- stx %g2, [%sp + 2047 + 128 + 0x08]
- mov 1, %g2
- stx %g2, [%sp + 2047 + 128 + 0x10]
- sethi %hi(dtlb_load), %g2
- or %g2, %lo(dtlb_load), %g2
- stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
- stx %g2, [%sp + 2047 + 128 + 0x20]
- sethi %hi(KERNBASE), %g2
- stx %g2, [%sp + 2047 + 128 + 0x28]
- sethi %hi(kern_locked_tte_data), %g2
- ldx [%g2 + %lo(kern_locked_tte_data)], %g2
- stx %g2, [%sp + 2047 + 128 + 0x30]
- mov 63, %g2
- stx %g2, [%sp + 2047 + 128 + 0x38]
- sethi %hi(p1275buf), %g2
- or %g2, %lo(p1275buf), %g2
- ldx [%g2 + 0x08], %o1
- call %o1
- add %sp, (2047 + 128), %o0
-
- sethi %hi(prom_entry_lock), %g2
- stb %g0, [%g2 + %lo(prom_entry_lock)]
- membar #StoreStore | #StoreLoad
-
- mov %l1, %sp
+ sethi %hi(call_method), %g2
+ or %g2, %lo(call_method), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x00]
+ mov 5, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x08]
+ mov 1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x10]
+ sethi %hi(itlb_load), %g2
+ or %g2, %lo(itlb_load), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x18]
+ sethi %hi(mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x20]
+ sethi %hi(KERNBASE), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x28]
+ sethi %hi(kern_locked_tte_data), %g2
+ ldx [%g2 + %lo(kern_locked_tte_data)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x30]
+
+ rdpr %ver, %g1
+ sethi %hi(0x003e0014), %g5
+ srlx %g1, 32, %g1
+ or %g5, %lo(0x003e0014), %g5
+ cmp %g1, %g5
+ bne,a,pt %icc, 1f
+ mov 63, %g2
+ mov 15, %g2
+1:
+ stx %g2, [%sp + 2047 + 128 + 0x38]
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x08], %o1
+ call %o1
+ add %sp, (2047 + 128), %o0
+
+ sethi %hi(call_method), %g2
+ or %g2, %lo(call_method), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x00]
+ mov 5, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x08]
+ mov 1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x10]
+ sethi %hi(dtlb_load), %g2
+ or %g2, %lo(dtlb_load), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x18]
+ sethi %hi(mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x20]
+ sethi %hi(KERNBASE), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x28]
+ sethi %hi(kern_locked_tte_data), %g2
+ ldx [%g2 + %lo(kern_locked_tte_data)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x30]
+
+ rdpr %ver, %g1
+ sethi %hi(0x003e0014), %g5
+ srlx %g1, 32, %g1
+ or %g5, %lo(0x003e0014), %g5
+ cmp %g1, %g5
+ bne,a,pt %icc, 1f
+ mov 63, %g2
+ mov 15, %g2
+1:
+
+ stx %g2, [%sp + 2047 + 128 + 0x38]
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x08], %o1
+ call %o1
+ add %sp, (2047 + 128), %o0
+
+ sethi %hi(prom_entry_lock), %g2
+ stb %g0, [%g2 + %lo(prom_entry_lock)]
+ membar #StoreStore | #StoreLoad
+
+ mov %l1, %sp
flushw
- mov %l0, %o0
+ mov %l0, %o0
- wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
- wr %g0, 0, %fprs
+ wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
+ wr %g0, 0, %fprs
- sethi %uhi(PAGE_OFFSET), %g4
- sllx %g4, 32, %g4
+ sethi %uhi(PAGE_OFFSET), %g4
+ sllx %g4, 32, %g4
/* XXX Buggy PROM... */
- srl %o0, 0, %o0
- ldx [%o0], %g6
+ srl %o0, 0, %o0
+ ldx [%o0], %g6
- wr %g0, ASI_P, %asi
+ wr %g0, ASI_P, %asi
- mov PRIMARY_CONTEXT, %g7
- stxa %g0, [%g7] ASI_DMMU
- membar #Sync
- mov SECONDARY_CONTEXT, %g7
- stxa %g0, [%g7] ASI_DMMU
- membar #Sync
+ mov PRIMARY_CONTEXT, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+ mov SECONDARY_CONTEXT, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
- mov 1, %g5
- sllx %g5, (PAGE_SHIFT + 1), %g5
- sub %g5, (REGWIN_SZ + STACK_BIAS), %g5
- add %g6, %g5, %sp
- mov 0, %fp
+ mov 1, %g5
+ sllx %g5, (PAGE_SHIFT + 1), %g5
+ sub %g5, (REGWIN_SZ + STACK_BIAS), %g5
+ add %g6, %g5, %sp
+ mov 0, %fp
- wrpr %g0, 0, %wstate
- wrpr %g0, 0, %tl
+ wrpr %g0, 0, %wstate
+ wrpr %g0, 0, %tl
/* Setup the trap globals, then we can resurface. */
- rdpr %pstate, %o1
- mov %g6, %o2
- wrpr %o1, PSTATE_AG, %pstate
- sethi %hi(sparc64_ttable_tl0), %g5
- wrpr %g5, %tba
- mov %o2, %g6
-
- wrpr %o1, PSTATE_MG, %pstate
+ rdpr %pstate, %o1
+ mov %g6, %o2
+ wrpr %o1, PSTATE_AG, %pstate
+ sethi %hi(sparc64_ttable_tl0), %g5
+ wrpr %g5, %tba
+ mov %o2, %g6
+
+ wrpr %o1, PSTATE_MG, %pstate
#define KERN_HIGHBITS ((_PAGE_VALID | _PAGE_SZ4MB) ^ 0xfffff80000000000)
#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
-#ifdef THIS_IS_CHEETAH
-#error Dave, make sure you took care of other issues in rest of sparc64 code...
-#define VPTE_BASE 0xffe0000000000000
-#else /* Spitfire/Blackbird */
-#define VPTE_BASE 0xfffffffe00000000
-#endif
- mov TSB_REG, %g1
- stxa %g0, [%g1] ASI_DMMU
- membar #Sync
- mov TLB_SFSR, %g1
- sethi %uhi(KERN_HIGHBITS), %g2
- or %g2, %ulo(KERN_HIGHBITS), %g2
- sllx %g2, 32, %g2
- or %g2, KERN_LOWBITS, %g2
- sethi %uhi(VPTE_BASE), %g3
- or %g3, %ulo(VPTE_BASE), %g3
- sllx %g3, 32, %g3
+
+#define VPTE_BASE_CHEETAH 0xffe0000000000000
+#define VPTE_BASE_SPITFIRE 0xfffffffe00000000
+
+ mov TSB_REG, %g1
+ stxa %g0, [%g1] ASI_DMMU
+ membar #Sync
+ mov TLB_SFSR, %g1
+ sethi %uhi(KERN_HIGHBITS), %g2
+ or %g2, %ulo(KERN_HIGHBITS), %g2
+ sllx %g2, 32, %g2
+ or %g2, KERN_LOWBITS, %g2
+
+ rdpr %ver, %g3
+ sethi %hi(0x003e0014), %g7
+ srlx %g3, 32, %g3
+ or %g7, %lo(0x003e0014), %g7
+ cmp %g3, %g7
+ bne,pt %icc, 1f
+ nop
+
+ sethi %uhi(VPTE_BASE_CHEETAH), %g3
+ or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
+ ba,pt %xcc, 2f
+ sllx %g3, 32, %g3
+1:
+ sethi %uhi(VPTE_BASE_SPITFIRE), %g3
+ or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
+ sllx %g3, 32, %g3
+
+2:
clr %g7
#undef KERN_HIGHBITS
#undef KERN_LOWBITS
#undef VPTE_BASE
/* Setup interrupt globals, we are always SMP. */
- wrpr %o1, PSTATE_IG, %pstate
+ wrpr %o1, PSTATE_IG, %pstate
/* Get our UPA MID. */
- lduw [%o2 + AOFF_task_processor], %g1
- sethi %hi(cpu_data), %g5
- or %g5, %lo(cpu_data), %g5
+ lduw [%o2 + AOFF_task_processor], %g1
+ sethi %hi(cpu_data), %g5
+ or %g5, %lo(cpu_data), %g5
/* In theory this is: &(cpu_data[this_upamid].irq_worklists[0]) */
- sllx %g1, 7, %g1
- add %g5, %g1, %g1
- add %g1, 64, %g6
+ sllx %g1, 7, %g1
+ add %g5, %g1, %g1
+ add %g1, 64, %g6
- wrpr %g0, 0, %wstate
- or %o1, PSTATE_IE, %o1
- wrpr %o1, 0, %pstate
+ wrpr %g0, 0, %wstate
+ or %o1, PSTATE_IE, %o1
+ wrpr %o1, 0, %pstate
- call prom_set_trap_table
- sethi %hi(sparc64_ttable_tl0), %o0
+ call prom_set_trap_table
+ sethi %hi(sparc64_ttable_tl0), %o0
- call smp_callin
+ call smp_callin
nop
- call cpu_idle
- mov 0, %o0
- call cpu_panic
+ call cpu_idle
+ mov 0, %o0
+ call cpu_panic
nop
-1: b,a,pt %xcc, 1b
+1: b,a,pt %xcc, 1b
.align 8
sparc64_cpu_startup_end:
-/* $Id: init.c,v 1.162 2001/02/13 01:16:44 davem Exp $
+/* $Id: init.c,v 1.164 2001/03/03 10:34:45 davem Exp $
* arch/sparc64/mm/init.c
*
* Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
/* Ugly, but necessary... -DaveM */
unsigned long phys_base;
+enum ultra_tlb_layout tlb_type = spitfire;
+
/* get_new_mmu_context() uses "cache + 1". */
spinlock_t ctx_alloc_lock = SPIN_LOCK_UNLOCKED;
unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
{
int freed = 0;
- if(pgtable_cache_size > high) {
+ if (pgtable_cache_size > high) {
do {
#ifdef CONFIG_SMP
- if(pgd_quicklist)
+ if (pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++;
#endif
- if(pte_quicklist[0])
+ if (pte_quicklist[0])
free_pte_slow(get_pte_fast(0)), freed++;
- if(pte_quicklist[1])
+ if (pte_quicklist[1])
free_pte_slow(get_pte_fast(1)), freed++;
- } while(pgtable_cache_size > low);
+ } while (pgtable_cache_size > low);
}
#ifndef CONFIG_SMP
if (pgd_cache_size > high / 4) {
int mmu_info(char *buf)
{
- /* We'll do the rest later to make it nice... -DaveM */
-#if 0
- if (this_is_cheetah)
- sprintf(buf, "MMU Type\t: One bad ass cpu\n");
+ if (tlb_type == cheetah)
+ return sprintf(buf, "MMU Type\t: Cheetah\n");
+ else if (tlb_type == spitfire)
+ return sprintf(buf, "MMU Type\t: Spitfire\n");
else
-#endif
- return sprintf(buf, "MMU Type\t: Spitfire\n");
+ return sprintf(buf, "MMU Type\t: ???\n");
}
struct linux_prom_translation {
for (vaddr = trans[i].virt;
vaddr < trans[i].virt + trans[i].size;
vaddr += PAGE_SIZE) {
+ unsigned long val;
+
pgdp = pgd_offset(&init_mm, vaddr);
if (pgd_none(*pgdp)) {
pmdp = __alloc_bootmem(PMD_TABLE_SIZE,
pmd_set(pmdp, ptep);
}
ptep = pte_offset(pmdp, vaddr);
- set_pte (ptep, __pte(trans[i].data | _PAGE_MODIFIED));
+
+ val = trans[i].data;
+
+ /* Clear diag TTE bits. */
+ if (tlb_type == spitfire)
+ val &= ~0x0003fe0000000000UL;
+
+ set_pte (ptep, __pte(val | _PAGE_MODIFIED));
trans[i].data += PAGE_SIZE;
}
}
: "r" (0),
"r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
- phys_page = spitfire_get_dtlb_data(63) & _PAGE_PADDR;
+ switch (tlb_type) {
+ default:
+ case spitfire:
+ phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
+ break;
+
+ case cheetah:
+ phys_page = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
+ break;
+ };
+
+ phys_page &= _PAGE_PADDR;
phys_page += ((unsigned long)&prom_boot_page -
(unsigned long)&empty_zero_page);
- /* Lock this into i/d tlb entry 59 */
- __asm__ __volatile__(
- "stxa %%g0, [%2] %3\n\t"
- "stxa %0, [%1] %4\n\t"
- "membar #Sync\n\t"
- "flush %%g6\n\t"
- "stxa %%g0, [%2] %5\n\t"
- "stxa %0, [%1] %6\n\t"
- "membar #Sync\n\t"
- "flush %%g6"
- : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
- _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
- "r" (59 << 3), "r" (TLB_TAG_ACCESS),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
- "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
- : "memory");
+ if (tlb_type == spitfire) {
+ /* Lock this into i/d tlb entry 59 */
+ __asm__ __volatile__(
+ "stxa %%g0, [%2] %3\n\t"
+ "stxa %0, [%1] %4\n\t"
+ "membar #Sync\n\t"
+ "flush %%g6\n\t"
+ "stxa %%g0, [%2] %5\n\t"
+ "stxa %0, [%1] %6\n\t"
+ "membar #Sync\n\t"
+ "flush %%g6"
+ : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
+ _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
+ "r" (59 << 3), "r" (TLB_TAG_ACCESS),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
+ "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
+ : "memory");
+ } else if (tlb_type == cheetah) {
+ /* Lock this into i/d tlb-0 entry 11 */
+ __asm__ __volatile__(
+ "stxa %%g0, [%2] %3\n\t"
+ "stxa %0, [%1] %4\n\t"
+ "membar #Sync\n\t"
+ "flush %%g6\n\t"
+ "stxa %%g0, [%2] %5\n\t"
+ "stxa %0, [%1] %6\n\t"
+ "membar #Sync\n\t"
+ "flush %%g6"
+ : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
+ _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
+ "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
+ "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
+ : "memory");
+ } else {
+ /* Implement me :-) */
+ BUG();
+ }
tte_vaddr = (unsigned long) &empty_zero_page;
: "r" (0),
"r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
- kern_locked_tte_data = tte_data = spitfire_get_dtlb_data(63);
+ if (tlb_type == spitfire)
+ tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
+ else
+ tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
+
+ kern_locked_tte_data = tte_data;
remap_func = (void *) ((unsigned long) &prom_remap -
(unsigned long) &prom_boot_page);
: "r" (0),
"r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
- remap_func(spitfire_get_dtlb_data(63) & _PAGE_PADDR,
+ remap_func((tlb_type == spitfire ?
+ (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
+ (cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
(unsigned long) &empty_zero_page,
prom_get_mmu_ihandle());
spitfire_flush_itlb_nucleus_page(0x0);
/* Now lock us back into the TLBs via OBP. */
- prom_dtlb_load(63, tte_data, tte_vaddr);
- prom_itlb_load(63, tte_data, tte_vaddr);
+ prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
+ prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
/* Re-read translations property. */
if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
int i;
/* Only DTLB must be checked for VPTE entries. */
- for(i = 0; i < 63; i++) {
- unsigned long tag;
-
- /* Spitfire Errata #32 workaround */
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "flush %%g6"
- : /* No outputs */
- : "r" (0),
- "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
- tag = spitfire_get_dtlb_tag(i);
- if(((tag & ~(PAGE_MASK)) == 0) &&
- ((tag & (PAGE_MASK)) >= prom_reserved_base)) {
- __asm__ __volatile__("stxa %%g0, [%0] %1"
- : /* no outputs */
- : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
- membar("#Sync");
- spitfire_put_dtlb_data(i, 0x0UL);
- membar("#Sync");
+ if (tlb_type == spitfire) {
+ for (i = 0; i < 63; i++) {
+ unsigned long tag;
+
+ /* Spitfire Errata #32 workaround */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ tag = spitfire_get_dtlb_tag(i);
+ if (((tag & ~(PAGE_MASK)) == 0) &&
+ ((tag & (PAGE_MASK)) >= prom_reserved_base)) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : /* no outputs */
+ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ membar("#Sync");
+ spitfire_put_dtlb_data(i, 0x0UL);
+ membar("#Sync");
+ }
+ }
+ } else if (tlb_type == cheetah) {
+ for (i = 0; i < 511; i++) {
+ unsigned long tag = cheetah_get_dtlb_tag(i);
+
+ if ((tag & ~PAGE_MASK) == 0 &&
+ (tag & PAGE_MASK) >= prom_reserved_base) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : /* no outputs */
+ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ membar("#Sync");
+ cheetah_put_dtlb_data(i, 0x0UL);
+ membar("#Sync");
+ }
}
+ } else {
+ /* Implement me :-) */
+ BUG();
}
}
unsigned long tlb_tag;
unsigned long tlb_data;
};
-struct prom_tlb_entry prom_itlb[8], prom_dtlb[8];
+struct prom_tlb_entry prom_itlb[16], prom_dtlb[16];
void prom_world(int enter)
{
__flush_nucleus_vptes();
/* Install PROM world. */
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 16; i++) {
if (prom_dtlb[i].tlb_ent != -1) {
__asm__ __volatile__("stxa %0, [%1] %2"
: : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
"i" (ASI_DMMU));
membar("#Sync");
- spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
- prom_dtlb[i].tlb_data);
+ if (tlb_type == spitfire)
+ spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
+ prom_dtlb[i].tlb_data);
+ else if (tlb_type == cheetah)
+ cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
+ prom_dtlb[i].tlb_data);
membar("#Sync");
}
-
if (prom_itlb[i].tlb_ent != -1) {
__asm__ __volatile__("stxa %0, [%1] %2"
- : : "r" (prom_itlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
- "i" (ASI_IMMU));
+ : : "r" (prom_itlb[i].tlb_tag),
+ "r" (TLB_TAG_ACCESS),
+ "i" (ASI_IMMU));
membar("#Sync");
- spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
- prom_itlb[i].tlb_data);
+ if (tlb_type == spitfire)
+ spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
+ prom_itlb[i].tlb_data);
+ else if (tlb_type == cheetah)
+ cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
+ prom_itlb[i].tlb_data);
membar("#Sync");
}
}
} else {
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 16; i++) {
if (prom_dtlb[i].tlb_ent != -1) {
__asm__ __volatile__("stxa %%g0, [%0] %1"
: : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
membar("#Sync");
- spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
+ if (tlb_type == spitfire)
+ spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
+ else
+ cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
membar("#Sync");
}
if (prom_itlb[i].tlb_ent != -1) {
__asm__ __volatile__("stxa %%g0, [%0] %1"
- : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+ : : "r" (TLB_TAG_ACCESS),
+ "i" (ASI_IMMU));
membar("#Sync");
- spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
+ if (tlb_type == spitfire)
+ spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
+ else
+ cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL);
membar("#Sync");
}
}
* UNDOCUMENTED!!!!!! Thanks S(t)un!
*/
if (save_p) {
- for(i = 0; i < 8; i++) {
- prom_dtlb[i].tlb_ent = -1;
+ for (i = 0; i < 16; i++) {
prom_itlb[i].tlb_ent = -1;
+ prom_dtlb[i].tlb_ent = -1;
}
}
- for(i = 0; i < 63; i++) {
- unsigned long data;
-
-
- /* Spitfire Errata #32 workaround */
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "flush %%g6"
- : /* No outputs */
- : "r" (0),
- "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
- data = spitfire_get_dtlb_data(i);
- if((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
- unsigned long tag;
+ if (tlb_type == spitfire) {
+ for (i = 0; i < SPITFIRE_HIGHEST_LOCKED_TLBENT; i++) {
+ unsigned long data;
/* Spitfire Errata #32 workaround */
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
: "r" (0),
"r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
- tag = spitfire_get_dtlb_tag(i);
- if(save_p) {
- prom_dtlb[dtlb_seen].tlb_ent = i;
- prom_dtlb[dtlb_seen].tlb_tag = tag;
- prom_dtlb[dtlb_seen].tlb_data = data;
- }
- __asm__ __volatile__("stxa %%g0, [%0] %1"
- : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
- membar("#Sync");
- spitfire_put_dtlb_data(i, 0x0UL);
- membar("#Sync");
+ data = spitfire_get_dtlb_data(i);
+ if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
+ unsigned long tag;
+
+ /* Spitfire Errata #32 workaround */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ tag = spitfire_get_dtlb_tag(i);
+ if (save_p) {
+ prom_dtlb[dtlb_seen].tlb_ent = i;
+ prom_dtlb[dtlb_seen].tlb_tag = tag;
+ prom_dtlb[dtlb_seen].tlb_data = data;
+ }
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ membar("#Sync");
+ spitfire_put_dtlb_data(i, 0x0UL);
+ membar("#Sync");
- dtlb_seen++;
- if(dtlb_seen > 7)
- break;
+ dtlb_seen++;
+ if (dtlb_seen > 15)
+ break;
+ }
}
- }
- for(i = 0; i < 63; i++) {
- unsigned long data;
-
- /* Spitfire Errata #32 workaround */
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "flush %%g6"
- : /* No outputs */
- : "r" (0),
- "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
- data = spitfire_get_itlb_data(i);
- if((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
- unsigned long tag;
+
+ for (i = 0; i < SPITFIRE_HIGHEST_LOCKED_TLBENT; i++) {
+ unsigned long data;
/* Spitfire Errata #32 workaround */
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
: "r" (0),
"r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
- tag = spitfire_get_itlb_tag(i);
- if(save_p) {
- prom_itlb[itlb_seen].tlb_ent = i;
- prom_itlb[itlb_seen].tlb_tag = tag;
- prom_itlb[itlb_seen].tlb_data = data;
+ data = spitfire_get_itlb_data(i);
+ if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
+ unsigned long tag;
+
+ /* Spitfire Errata #32 workaround */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ tag = spitfire_get_itlb_tag(i);
+ if (save_p) {
+ prom_itlb[itlb_seen].tlb_ent = i;
+ prom_itlb[itlb_seen].tlb_tag = tag;
+ prom_itlb[itlb_seen].tlb_data = data;
+ }
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+ membar("#Sync");
+ spitfire_put_itlb_data(i, 0x0UL);
+ membar("#Sync");
+
+ itlb_seen++;
+ if (itlb_seen > 15)
+ break;
}
- __asm__ __volatile__("stxa %%g0, [%0] %1"
- : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
- membar("#Sync");
- spitfire_put_itlb_data(i, 0x0UL);
- membar("#Sync");
+ }
+ } else if (tlb_type == cheetah) {
+ for (i = 0; i < CHEETAH_HIGHEST_LOCKED_TLBENT; i++) {
+ unsigned long data;
+
+ data = cheetah_get_ldtlb_data(i);
+ if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
+ unsigned long tag;
+
+ tag = cheetah_get_ldtlb_tag(i);
+ if (save_p) {
+ prom_dtlb[dtlb_seen].tlb_ent = i;
+ prom_dtlb[dtlb_seen].tlb_tag = tag;
+ prom_dtlb[dtlb_seen].tlb_data = data;
+ }
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ membar("#Sync");
+ cheetah_put_ldtlb_data(i, 0x0UL);
+ membar("#Sync");
- itlb_seen++;
- if(itlb_seen > 7)
- break;
+ dtlb_seen++;
+ if (dtlb_seen > 15)
+ break;
+ }
+ }
+
+ for (i = 0; i < CHEETAH_HIGHEST_LOCKED_TLBENT; i++) {
+ unsigned long data;
+
+ data = cheetah_get_litlb_data(i);
+ if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) {
+ unsigned long tag;
+
+ tag = cheetah_get_litlb_tag(i);
+ if (save_p) {
+ prom_itlb[itlb_seen].tlb_ent = i;
+ prom_itlb[itlb_seen].tlb_tag = tag;
+ prom_itlb[itlb_seen].tlb_data = data;
+ }
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+ membar("#Sync");
+ cheetah_put_litlb_data(i, 0x0UL);
+ membar("#Sync");
+
+ itlb_seen++;
+ if (itlb_seen > 15)
+ break;
+ }
}
+ } else {
+ /* Implement me :-) */
+ BUG();
}
if (save_p)
prom_ditlb_set = 1;
{
int i;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 16; i++) {
if (prom_dtlb[i].tlb_ent != -1) {
__asm__ __volatile__("stxa %0, [%1] %2"
: : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
"i" (ASI_DMMU));
membar("#Sync");
- spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
- prom_dtlb[i].tlb_data);
+ if (tlb_type == spitfire)
+ spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
+ prom_dtlb[i].tlb_data);
+ else if (tlb_type == cheetah)
+ cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent,
+ prom_dtlb[i].tlb_data);
membar("#Sync");
}
if (prom_itlb[i].tlb_ent != -1) {
__asm__ __volatile__("stxa %0, [%1] %2"
- : : "r" (prom_itlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
- "i" (ASI_IMMU));
+ : : "r" (prom_itlb[i].tlb_tag),
+ "r" (TLB_TAG_ACCESS),
+ "i" (ASI_IMMU));
membar("#Sync");
- spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
- prom_itlb[i].tlb_data);
+ if (tlb_type == spitfire)
+ spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
+ prom_itlb[i].tlb_data);
+ else
+ cheetah_put_litlb_data(prom_itlb[i].tlb_ent,
+ prom_itlb[i].tlb_data);
membar("#Sync");
}
}
unsigned long va;
flushw_all();
- for(va = 0; va < (PAGE_SIZE << 1); va += 32)
+ for (va = 0; va < (PAGE_SIZE << 1); va += 32)
spitfire_put_icache_tag(va, 0x0);
}
"wrpr %0, %1, %%pstate"
: "=r" (pstate)
: "i" (PSTATE_IE));
- for(i = 0; i < 64; i++) {
- /* Spitfire Errata #32 workaround */
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "flush %%g6"
- : /* No outputs */
- : "r" (0),
- "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
- if(!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
- __asm__ __volatile__("stxa %%g0, [%0] %1"
- : /* no outputs */
- : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
- membar("#Sync");
- spitfire_put_dtlb_data(i, 0x0UL);
- membar("#Sync");
- }
+ if (tlb_type == spitfire) {
+ for (i = 0; i < 64; i++) {
+ /* Spitfire Errata #32 workaround */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
- /* Spitfire Errata #32 workaround */
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "flush %%g6"
- : /* No outputs */
- : "r" (0),
- "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
- if(!(spitfire_get_itlb_data(i) & _PAGE_L)) {
- __asm__ __volatile__("stxa %%g0, [%0] %1"
- : /* no outputs */
- : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
- membar("#Sync");
- spitfire_put_itlb_data(i, 0x0UL);
- membar("#Sync");
+ if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : /* no outputs */
+ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ membar("#Sync");
+ spitfire_put_dtlb_data(i, 0x0UL);
+ membar("#Sync");
+ }
+
+ /* Spitfire Errata #32 workaround */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (0),
+ "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+ if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : /* no outputs */
+ : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+ membar("#Sync");
+ spitfire_put_itlb_data(i, 0x0UL);
+ membar("#Sync");
+ }
}
+ } else if (tlb_type == cheetah) {
+ cheetah_flush_dtlb_all();
+ cheetah_flush_itlb_all();
}
__asm__ __volatile__("wrpr %0, 0, %%pstate"
: : "r" (pstate));
mmu_context_bmap[1] = 0;
mmu_context_bmap[2] = 0;
mmu_context_bmap[3] = 0;
- for(i = 4; i < CTX_BMAP_SLOTS; i += 4) {
+ for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
mmu_context_bmap[i + 0] = 0;
mmu_context_bmap[i + 1] = 0;
mmu_context_bmap[i + 2] = 0;
pmd_t *pmd;
pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
- if(pmd) {
+ if (pmd) {
memset(pmd, 0, PAGE_SIZE);
pgd_set(pgd, pmd);
return pmd + offset;
{
int slot;
- printk ("Contents of itlb: ");
- for (slot = 0; slot < 14; slot++) printk (" ");
- printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
- for (slot = 1; slot < 64; slot+=3) {
- printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
- slot, spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
- slot+1, spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
- slot+2, spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
- }
+ if (tlb_type == spitfire) {
+ printk ("Contents of itlb: ");
+ for (slot = 0; slot < 14; slot++) printk (" ");
+ printk ("%2x:%016lx,%016lx\n",
+ 0,
+ spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
+ for (slot = 1; slot < 64; slot+=3) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
+ slot+1,
+ spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
+ slot+2,
+ spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
+ }
+ } else if (tlb_type == cheetah) {
+ printk ("Contents of itlb0:\n");
+ for (slot = 0; slot < 16; slot+=2) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
+ slot+1,
+ cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
+ }
+ printk ("Contents of itlb2:\n");
+ for (slot = 0; slot < 128; slot+=2) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
+ slot+1,
+ cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
+ }
+ }
}
void sparc_ultra_dump_dtlb(void)
{
int slot;
- printk ("Contents of dtlb: ");
- for (slot = 0; slot < 14; slot++) printk (" ");
- printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_dtlb_tag(0),
- spitfire_get_dtlb_data(0));
- for (slot = 1; slot < 64; slot+=3) {
- printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
- slot, spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
- slot+1, spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
- slot+2, spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
- }
+ if (tlb_type == spitfire) {
+ printk ("Contents of dtlb: ");
+ for (slot = 0; slot < 14; slot++) printk (" ");
+ printk ("%2x:%016lx,%016lx\n", 0,
+ spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
+ for (slot = 1; slot < 64; slot+=3) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
+ slot+1,
+ spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
+ slot+2,
+ spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
+ }
+ } else if (tlb_type == cheetah) {
+ printk ("Contents of dtlb0:\n");
+ for (slot = 0; slot < 16; slot+=2) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
+ slot+1,
+ cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
+ }
+ printk ("Contents of dtlb2:\n");
+ for (slot = 0; slot < 512; slot+=2) {
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot,
+ cheetah_get_dtlb_tag(slot), cheetah_get_dtlb_data(slot),
+ slot+1,
+ cheetah_get_dtlb_tag(slot+1), cheetah_get_dtlb_data(slot+1));
+ }
+ }
}
extern unsigned long cmdline_memory_size;
pt = phys_base | _PAGE_VALID | _PAGE_SZ4MB;
pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
__save_and_cli(flags);
- __asm__ __volatile__("
- stxa %1, [%0] %3
- stxa %2, [%5] %4
- membar #Sync
- flush %%g6
- nop
- nop
- nop"
- : /* No outputs */
- : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
- : "memory");
- if (((unsigned long)&_end) >= KERNBASE + 0x340000) {
- second_alias_page = alias_base + 0x400000;
+ if (tlb_type == spitfire) {
__asm__ __volatile__("
stxa %1, [%0] %3
stxa %2, [%5] %4
nop
nop"
: /* No outputs */
- : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
+ : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
: "memory");
+ if (((unsigned long)&_end) >= KERNBASE + 0x340000) {
+ second_alias_page = alias_base + 0x400000;
+ __asm__ __volatile__("
+ stxa %1, [%0] %3
+ stxa %2, [%5] %4
+ membar #Sync
+ flush %%g6
+ nop
+ nop
+ nop"
+ : /* No outputs */
+ : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
+ : "memory");
+ }
+ } else if (tlb_type == cheetah) {
+ __asm__ __volatile__("
+ stxa %1, [%0] %3
+ stxa %2, [%5] %4
+ membar #Sync
+ flush %%g6
+ nop
+ nop
+ nop"
+ : /* No outputs */
+ : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
+ : "memory");
+ if (((unsigned long)&_end) >= KERNBASE + 0x340000) {
+ second_alias_page = alias_base + 0x400000;
+ __asm__ __volatile__("
+ stxa %1, [%0] %3
+ stxa %2, [%5] %4
+ membar #Sync
+ flush %%g6
+ nop
+ nop
+ nop"
+ : /* No outputs */
+ : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
+ : "memory");
+ }
}
__restore_flags(flags);
-/* $Id: ultra.S,v 1.48 2000/11/06 06:59:04 davem Exp $
+/* $Id: ultra.S,v 1.49 2001/03/02 03:12:00 davem Exp $
* ultra.S: Don't expand these all over the place...
*
* Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
/*IC5*/ rdpr %pstate, %g1
wrpr %g1, PSTATE_IE, %pstate
mov TLB_TAG_ACCESS, %g3
+ /* XXX Spitfire dependency... */
mov (62 << 3), %g2
/* Spitfire Errata #32 workaround. */
stx %g0, [%g4 + %lo(errata32_hwbug)]
2: add %g2, 1, %g2
+ /* XXX Spitfire dependency... */
cmp %g2, 63
ble,pt %icc, 1b
sll %g2, 3, %g3
* PARAMETERS: Event Event that generated an SCI.
*
* RETURN: Number of SCI's for requested event since last time
- * Sci_occured() was called for this event.
+ * Sci_occurred() was called for this event.
*
* DESCRIPTION: Checks to see if SCI has been generated from requested source
* since the last time this function was called.
/*
* Debugger threading model
* Use single threaded if the entire subsystem is contained in an application
- * Use multiple threaded when the the subsystem is running in the kernel.
+ * Use multiple threaded when the subsystem is running in the kernel.
*
* By default the model is single threaded if ACPI_APPLICATION is set,
* multi-threaded if ACPI_APPLICATION is not set.
* FUNCTION: Acpi_aml_store_object_to_index
*
* PARAMETERS: *Val_desc - Value to be stored
- * *Node - Named object to recieve the value
+ * *Node - Named object to receive the value
*
* RETURN: Status
*
* FUNCTION: Acpi_aml_store_object_to_node
*
* PARAMETERS: *Source_desc - Value to be stored
- * *Node - Named object to recieve the value
+ * *Node - Named object to receive the value
*
* RETURN: Status
*
}
else {
- /* Count of successfull INIs */
+ /* Count of successful INIs */
info->num_INI++;
}
lo->lo_device);
}
-static int lo_send(struct loop_device *lo, char *data, int len, loff_t pos)
+static int lo_send(struct loop_device *lo, struct buffer_head *bh, int bsize,
+ loff_t pos)
{
struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
struct address_space_operations *aops = mapping->a_ops;
struct page *page;
- char *kaddr;
+ char *kaddr, *data;
unsigned long index;
unsigned size, offset;
+ int len;
index = pos >> PAGE_CACHE_SHIFT;
offset = pos & (PAGE_CACHE_SIZE - 1);
+ len = bh->b_size;
+ data = bh->b_data;
while (len > 0) {
- int IV = index * (PAGE_CACHE_SIZE/lo->lo_blksize) + offset/lo->lo_blksize;
- IV >>= 2;
+ int IV = index * (PAGE_CACHE_SIZE/bsize) + offset/bsize;
size = PAGE_CACHE_SIZE - offset;
if (size > len)
size = len;
struct lo_read_data {
struct loop_device *lo;
char *data;
+ int bsize;
};
static int lo_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
unsigned long count = desc->count;
struct lo_read_data *p = (struct lo_read_data*)desc->buf;
struct loop_device *lo = p->lo;
- int IV = page->index * (PAGE_CACHE_SIZE/lo->lo_blksize) + offset/lo->lo_blksize;
-
- IV >>= 2;
+ int IV = page->index * (PAGE_CACHE_SIZE/p->bsize) + offset/p->bsize;
if (size > count)
size = count;
return size;
}
-static int lo_receive(struct loop_device *lo, char *data, int len, loff_t pos)
+static int lo_receive(struct loop_device *lo, struct buffer_head *bh, int bsize,
+ loff_t pos)
{
struct lo_read_data cookie;
read_descriptor_t desc;
struct file *file;
cookie.lo = lo;
- cookie.data = data;
+ cookie.data = bh->b_data;
+ cookie.bsize = bsize;
desc.written = 0;
- desc.count = len;
+ desc.count = bh->b_size;
desc.buf = (char*)&cookie;
desc.error = 0;
spin_lock_irq(&lo->lo_lock);
return desc.error;
}
+static inline int loop_get_bs(struct loop_device *lo)
+{
+ int bs = 0;
+
+ if (blksize_size[MAJOR(lo->lo_device)])
+ bs = blksize_size[MAJOR(lo->lo_device)][MINOR(lo->lo_device)];
+ if (!bs)
+ bs = BLOCK_SIZE;
+
+ return bs;
+}
+
+static inline unsigned long loop_get_iv(struct loop_device *lo,
+ unsigned long sector)
+{
+ int bs = loop_get_bs(lo);
+ unsigned long offset, IV;
+
+ IV = sector / (bs >> 9) + lo->lo_offset / bs;
+ offset = ((sector % (bs >> 9)) << 9) + lo->lo_offset % bs;
+ if (offset >= bs)
+ IV++;
+
+ return IV;
+}
+
static int do_bh_filebacked(struct loop_device *lo, struct buffer_head *bh, int rw)
{
loff_t pos;
pos = ((loff_t) bh->b_rsector << 9) + lo->lo_offset;
if (rw == WRITE)
- ret = lo_send(lo, bh->b_data, bh->b_size, pos);
+ ret = lo_send(lo, bh, loop_get_bs(lo), pos);
else
- ret = lo_receive(lo, bh->b_data, bh->b_size, pos);
+ ret = lo_receive(lo, bh, loop_get_bs(lo), pos);
return ret;
}
lo->lo_bh = lo->lo_bhtail = bh;
spin_unlock_irqrestore(&lo->lo_lock, flags);
- atomic_inc(&lo->lo_pending);
up(&lo->lo_bh_mutex);
}
memset(bh, 0, sizeof(*bh));
bh->b_size = rbh->b_size;
- bh->b_dev = rbh->b_dev;
+ bh->b_dev = rbh->b_rdev;
spin_lock_irq(&lo->lo_lock);
bh->b_rdev = lo->lo_device;
spin_unlock_irq(&lo->lo_lock);
spin_lock_irq(&lo->lo_lock);
if (lo->lo_state != Lo_bound)
goto inactive;
+ atomic_inc(&lo->lo_pending);
spin_unlock_irq(&lo->lo_lock);
if (rw == WRITE) {
rbh = create_bounce(rw, rbh);
#endif
- if (lo->lo_blksize != rbh->b_size)
- lo->lo_blksize = rbh->b_size;
-
/*
* file backed, queue for loop_thread to handle
*/
*/
bh = loop_get_buffer(lo, rbh);
bh->b_private = rbh;
- IV = (bh->b_rsector / (lo->lo_blksize >> 9));
- IV += lo->lo_offset / lo->lo_blksize;
- IV >>= 2;
+ IV = loop_get_iv(lo, bh->b_rsector);
if (rw == WRITE) {
set_bit(BH_Dirty, &bh->b_state);
if (lo_do_transfer(lo, WRITE, bh->b_data, rbh->b_data, bh->b_size, IV))
return 0;
err:
+ if (atomic_dec_and_test(&lo->lo_pending))
+ up(&lo->lo_bh_mutex);
loop_put_buffer(bh);
out:
buffer_IO_error(rbh);
bh->b_end_io(bh, !ret);
} else {
struct buffer_head *rbh = bh->b_private;
- unsigned long IV;
-
- IV = (bh->b_rsector / (bh->b_size >> 9));
- IV += lo->lo_offset / bh->b_size;
- IV >>= 2;
+ unsigned long IV = loop_get_iv(lo, rbh->b_rsector);
ret = lo_do_transfer(lo, READ, bh->b_data, rbh->b_data,
bh->b_size, IV);
for (;;) {
down_interruptible(&lo->lo_bh_mutex);
+ /*
+ * could be upped because of tear-down, not because of
+ * pending work
+ */
if (!atomic_read(&lo->lo_pending))
break;
bh = loop_get_bh(lo);
- atomic_dec(&lo->lo_pending);
if (!bh) {
- printk("missing bh\n");
+ printk("loop: missing bh\n");
continue;
}
loop_handle_bh(lo, bh);
+
+ /*
+ * upped both for pending work and tear-down, lo_pending
+ * will hit zero then
+ */
+ if (atomic_dec_and_test(&lo->lo_pending))
+ break;
}
up(&lo->lo_sem);
|| !(lo_file->f_mode & FMODE_WRITE))
lo_flags |= LO_FLAGS_READ_ONLY;
- set_device_ro(dev, (lo_flags & LO_FLAGS_READ_ONLY)!=0);
+ set_device_ro(dev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->lo_device = lo_device;
lo->lo_flags = lo_flags;
if (!bs)
bs = BLOCK_SIZE;
- lo->lo_blksize = bs;
set_blocksize(dev, bs);
lo->lo_bh = lo->lo_bhtail = NULL;
* 97-9-13 Cosmetic changes
* 98-5-13 Attempt to make 64-bit-clean on 64-bit machines
* 99-1-11 Attempt to make 64-bit-clean on 32-bit machines <ankry@mif.pg.gda.pl>
+ * 01-2-27 Fix to store proper blockcount for kernel (calculated using
+ * BLOCK_SIZE_BITS, not device blocksize) <aga@permonline.ru>
*
* possible FIXME: make set_sock / set_blksize / set_size / do_it one syscall
* why not: would need verify_area and friends, would share yet another
nbd_blksize_bits[dev]++;
temp >>= 1;
}
- nbd_sizes[dev] = nbd_bytesizes[dev] >> nbd_blksize_bits[dev];
- nbd_bytesizes[dev] = nbd_sizes[dev] << nbd_blksize_bits[dev];
+ nbd_bytesizes[dev] &= ~(nbd_blksizes[dev]-1);
+ nbd_sizes[dev] = nbd_bytesizes[dev] >> BLOCK_SIZE_BITS;
return 0;
case NBD_SET_SIZE:
- nbd_sizes[dev] = arg >> nbd_blksize_bits[dev];
- nbd_bytesizes[dev] = nbd_sizes[dev] << nbd_blksize_bits[dev];
+ nbd_bytesizes[dev] = arg & ~(nbd_blksizes[dev]-1);
+ nbd_sizes[dev] = nbd_bytesizes[dev] >> BLOCK_SIZE_BITS;
return 0;
case NBD_SET_SIZE_BLOCKS:
- nbd_sizes[dev] = arg;
- nbd_bytesizes[dev] = ((u64) arg) << nbd_blksize_bits[dev];
+ nbd_bytesizes[dev] = ((u64) arg) << nbd_blksize_bits[dev];
+ nbd_sizes[dev] = nbd_bytesizes[dev] >> BLOCK_SIZE_BITS;
return 0;
case NBD_DO_IT:
if (!lo->file)
nbd_blksizes[i] = 1024;
nbd_blksize_bits[i] = 10;
nbd_bytesizes[i] = 0x7ffffc00; /* 2GB */
- nbd_sizes[i] = nbd_bytesizes[i] >> nbd_blksize_bits[i];
+ nbd_sizes[i] = nbd_bytesizes[i] >> BLOCK_SIZE_BITS;
register_disk(NULL, MKDEV(MAJOR_NR,i), 1, &nbd_fops,
nbd_bytesizes[i]>>9);
}
fi
tristate ' Berkshire Products PC Watchdog' CONFIG_PCWATCHDOG
tristate ' Acquire SBC Watchdog Timer' CONFIG_ACQUIRE_WDT
+ tristate ' Advantech SBC Watchdog Timer' CONFIG_ADVANTECH_WDT
tristate ' SBC-60XX Watchdog Timer' CONFIG_60XX_WDT
tristate ' Mixcom Watchdog' CONFIG_MIXCOMWD
tristate ' Intel i810 TCO timer / Watchdog' CONFIG_I810_TCO
tristate ' NetWinder WB83C977 watchdog' CONFIG_977_WATCHDOG
fi
fi
+ tristate ' ZF MachZ Watchdog' CONFIG_MACHZ_WDT
fi
endmenu
fi
endmenu
-tristate '/dev/agpgart (AGP Support)' CONFIG_AGP $CONFIG_DRM_AGP
+dep_tristate '/dev/agpgart (AGP Support)' CONFIG_AGP $CONFIG_DRM_AGP
if [ "$CONFIG_AGP" != "n" ]; then
bool ' Intel 440LX/BX/GX and I815/I840/I850 support' CONFIG_AGP_INTEL
bool ' Intel I810/I815 (on-board) support' CONFIG_AGP_I810
obj-$(CONFIG_PCWATCHDOG) += pcwd.o
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
+obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
obj-$(CONFIG_MIXCOMWD) += mixcomwd.o
obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
obj-$(CONFIG_WDT) += wdt.o
obj-$(CONFIG_21285_WATCHDOG) += wdt285.o
obj-$(CONFIG_977_WATCHDOG) += wdt977.o
obj-$(CONFIG_I810_TCO) += i810-tco.o
+obj-$(CONFIG_MACHZ_WDT) += machzwd.o
obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
--- /dev/null
+/*
+ * Advantech Single Board Computer WDT driver for Linux 2.4.x
+ *
+ * (c) Copyright 2000-2001 Marek Michalkiewicz <marekm@linux.org.pl>
+ *
+ * Based on acquirewdt.c which is based on wdt.c.
+ * Original copyright messages:
+ *
+ * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
+ * http://www.redhat.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
+ * warranty for any of this software. This material is provided
+ * "AS-IS" and at no charge.
+ *
+ * (c) Copyright 1995 Alan Cox <alan@redhat.com>
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+
+static int advwdt_is_open;
+static spinlock_t advwdt_lock;
+
+/*
+ * You must set these - there is no sane way to probe for this board.
+ *
+ * To enable or restart, write the timeout value in seconds (1 to 63)
+ * to I/O port WDT_START. To disable, read I/O port WDT_STOP.
+ * Both are 0x443 for most boards (tested on a PCA-6276VE-00B1), but
+ * check your manual (at least the PCA-6159 seems to be different -
+ * the manual says WDT_STOP is 0x43, not 0x443).
+ * (0x43 is also a write-only control register for the 8254 timer!)
+ *
+ * TODO: module parameters to set the I/O port addresses and NOWAYOUT
+ * option at load time.
+ */
+
+#define WDT_STOP 0x443
+#define WDT_START 0x443
+
+#define WD_TIMO 60 /* 1 minute */
+
+/*
+ * Kernel methods.
+ */
+
+static void
+advwdt_ping(void)
+{
+ /* Write a watchdog value */
+ outb_p(WD_TIMO, WDT_START);
+}
+
+static ssize_t
+advwdt_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
+{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
+ if (count) {
+ advwdt_ping();
+ return 1;
+ }
+ return 0;
+}
+
+static ssize_t
+advwdt_read(struct file *file, char *buf, size_t count, loff_t *ppos)
+{
+ return -EINVAL;
+}
+
+static int
+advwdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ static struct watchdog_info ident = {
+ WDIOF_KEEPALIVEPING, 1, "Advantech WDT"
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ if (copy_to_user((struct watchdog_info *)arg, &ident, sizeof(ident)))
+ return -EFAULT;
+ break;
+
+ case WDIOC_GETSTATUS:
+ if (copy_to_user((int *)arg, &advwdt_is_open, sizeof(int)))
+ return -EFAULT;
+ break;
+
+ case WDIOC_KEEPALIVE:
+ advwdt_ping();
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static int
+advwdt_open(struct inode *inode, struct file *file)
+{
+ switch (MINOR(inode->i_rdev)) {
+ case WATCHDOG_MINOR:
+ spin_lock(&advwdt_lock);
+ if (advwdt_is_open) {
+ spin_unlock(&advwdt_lock);
+ return -EBUSY;
+ }
+ /*
+ * Activate
+ */
+
+ advwdt_is_open = 1;
+ advwdt_ping();
+ spin_unlock(&advwdt_lock);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static int
+advwdt_close(struct inode *inode, struct file *file)
+{
+ lock_kernel();
+ if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) {
+ spin_lock(&advwdt_lock);
+#ifndef CONFIG_WATCHDOG_NOWAYOUT
+ inb_p(WDT_STOP);
+#endif
+ advwdt_is_open = 0;
+ spin_unlock(&advwdt_lock);
+ }
+ unlock_kernel();
+ return 0;
+}
+
+/*
+ * Notifier for system down
+ */
+
+static int
+advwdt_notify_sys(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT) {
+ /* Turn the WDT off */
+ inb_p(WDT_STOP);
+ }
+ return NOTIFY_DONE;
+}
+
+/*
+ * Kernel Interfaces
+ */
+
+static struct file_operations advwdt_fops = {
+ owner: THIS_MODULE,
+ read: advwdt_read,
+ write: advwdt_write,
+ ioctl: advwdt_ioctl,
+ open: advwdt_open,
+ release: advwdt_close,
+};
+
+static struct miscdevice advwdt_miscdev = {
+ WATCHDOG_MINOR,
+ "watchdog",
+ &advwdt_fops
+};
+
+/*
+ * The WDT needs to learn about soft shutdowns in order to
+ * turn the timebomb registers off.
+ */
+
+static struct notifier_block advwdt_notifier = {
+ advwdt_notify_sys,
+ NULL,
+ 0
+};
+
+static int __init
+advwdt_init(void)
+{
+ printk("WDT driver for Advantech single board computer initialising.\n");
+
+ spin_lock_init(&advwdt_lock);
+ misc_register(&advwdt_miscdev);
+#if WDT_START != WDT_STOP
+ request_region(WDT_STOP, 1, "Advantech WDT");
+#endif
+ request_region(WDT_START, 1, "Advantech WDT");
+ register_reboot_notifier(&advwdt_notifier);
+ return 0;
+}
+
+static void __exit
+advwdt_exit(void)
+{
+ misc_deregister(&advwdt_miscdev);
+ unregister_reboot_notifier(&advwdt_notifier);
+#if WDT_START != WDT_STOP
+ release_region(WDT_STOP,1);
+#endif
+ release_region(WDT_START,1);
+}
+
+module_init(advwdt_init);
+module_exit(advwdt_exit);
+
+/* end of advantechwdt.c */
return 0;
}
-static void __exit agp_frontend_cleanup(void)
+void __exit agp_frontend_cleanup(void)
{
misc_deregister(&agp_miscdev);
}
+
--- /dev/null
+/*
+ * MachZ ZF-Logic Watchdog Timer driver for Linux
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * The author does NOT admit liability nor provide warranty for
+ * any of this software. This material is provided "AS-IS" in
+ * the hope that it may be useful for others.
+ *
+ * Author: Fernando Fuganti <fuganti@conectiva.com.br>
+ *
+ * Based on sbc60xxwdt.c by Jakob Oestergaard
+ *
+ *
+ * We have two timers (wd#1, wd#2) driven by a 32 KHz clock with the
+ * following periods:
+ * wd#1 - 2 seconds;
+ * wd#2 - 7.2 ms;
+ * After the expiration of wd#1, it can generate a NMI, SCI, SMI, or
+ * a system RESET and it starts wd#2 that unconditionaly will RESET
+ * the system when the counter reaches zero.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/malloc.h>
+#include <linux/ioport.h>
+#include <linux/fcntl.h>
+#include <linux/smp_lock.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+
+
+/* ports */
+#define ZF_IOBASE 0x218
+#define INDEX 0x218
+#define DATA_B 0x219
+#define DATA_W 0x21A
+#define DATA_D 0x21A
+
+/* indexes */ /* size */
+#define ZFL_VERSION 0x02 /* 16 */
+#define CONTROL 0x10 /* 16 */
+#define STATUS 0x12 /* 8 */
+#define COUNTER_1 0x0C /* 16 */
+#define COUNTER_2 0x0E /* 8 */
+#define PULSE_LEN 0x0F /* 8 */
+
+/* controls */
+#define ENABLE_WD1 0x0001
+#define ENABLE_WD2 0x0002
+#define RESET_WD1 0x0010
+#define RESET_WD2 0x0020
+#define GEN_SCI 0x0100
+#define GEN_NMI 0x0200
+#define GEN_SMI 0x0400
+#define GEN_RESET 0x0800
+
+
+/* utilities */
+
+#define WD1 0
+#define WD2 1
+
+#define zf_writew(port, data) { outb(port, INDEX); outw(data, DATA_W); }
+#define zf_writeb(port, data) { outb(port, INDEX); outb(data, DATA_B); }
+#define zf_get_ZFL_version() zf_readw(ZFL_VERSION)
+
+
+static unsigned short zf_readw(unsigned char port)
+{
+ outb(port, INDEX);
+ return inw(DATA_W);
+}
+
+static unsigned short zf_readb(unsigned char port)
+{
+ outb(port, INDEX);
+ return inb(DATA_B);
+}
+
+
+MODULE_AUTHOR("Fernando Fuganti <fuganti@conectiva.com.br>");
+MODULE_DESCRIPTION("MachZ ZF-Logic Watchdog driver");
+MODULE_PARM(action, "i");
+MODULE_PARM_DESC(action, "after watchdog resets, generate: 0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI");
+
+#define PFX "machzwd"
+
+static struct watchdog_info zf_info = {
+ options: WDIOF_KEEPALIVEPING,
+ firmware_version: 1,
+ identity: "ZF-Logic watchdog"
+};
+
+
+/*
+ * action refers to action taken when watchdog resets
+ * 0 = GEN_RESET
+ * 1 = GEN_SMI
+ * 2 = GEN_NMI
+ * 3 = GEN_SCI
+ * defaults to GEN_RESET (0)
+ */
+static int action = 0;
+static int zf_action = GEN_RESET;
+static int zf_is_open = 0;
+static int zf_expect_close = 0;
+static spinlock_t zf_lock;
+static struct timer_list zf_timer;
+static unsigned long next_heartbeat = 0;
+
+
+/* timeout for user land heart beat (10 seconds) */
+#define ZF_USER_TIMEO (HZ*10)
+
+/* timeout for hardware watchdog (~500ms) */
+#define ZF_HW_TIMEO (HZ/2)
+
+/* number of ticks on WD#1 (driven by a 32KHz clock, 2s) */
+#define ZF_CTIMEOUT 0xffff
+
+#ifndef ZF_DEBUG
+# define dprintk(format, args...)
+#else
+# define dprintk(format, args...) printk(KERN_DEBUG PFX ":" __FUNCTION__ ":%d: " format, __LINE__ , ## args)
+#endif
+
+
+/* STATUS register functions */
+
+static inline unsigned char zf_get_status(void)
+{
+ return zf_readb(STATUS);
+}
+
+static inline void zf_set_status(unsigned char new)
+{
+ zf_writeb(STATUS, new);
+}
+
+
+/* CONTROL register functions */
+
+static inline unsigned short zf_get_control(void)
+{
+ return zf_readw(CONTROL);
+}
+
+static inline void zf_set_control(unsigned short new)
+{
+ zf_writew(CONTROL, new);
+}
+
+
+/* WD#? counter functions */
+/*
+ * Just get current counter value
+ */
+
+inline unsigned short zf_get_timer(unsigned char n)
+{
+ switch(n){
+ case WD1:
+ return zf_readw(COUNTER_1);
+ case WD2:
+ return zf_readb(COUNTER_2);
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Just set counter value
+ */
+
+static inline void zf_set_timer(unsigned short new, unsigned char n)
+{
+ switch(n){
+ case WD1:
+ zf_writew(COUNTER_1, new);
+ case WD2:
+ zf_writeb(COUNTER_2, new > 0xff ? 0xff : new);
+ default:
+ return;
+ }
+}
+
+/*
+ * stop hardware timer
+ */
+static void zf_timer_off(void)
+{
+ unsigned int ctrl_reg = 0;
+
+ /* stop internal ping */
+ del_timer(&zf_timer);
+
+ /* stop watchdog timer */
+ ctrl_reg = zf_get_control();
+ ctrl_reg |= (ENABLE_WD1|ENABLE_WD2); /* disable wd1 and wd2 */
+ ctrl_reg &= ~(ENABLE_WD1|ENABLE_WD2);
+ zf_set_control(ctrl_reg);
+
+ printk(PFX ": Watchdog timer is now disabled\n");
+}
+
+
+/*
+ * start hardware timer
+ */
+static void zf_timer_on(void)
+{
+ unsigned int ctrl_reg = 0;
+
+ zf_writeb(PULSE_LEN, 0xff);
+
+ zf_set_timer(ZF_CTIMEOUT, WD1);
+
+ /* user land ping */
+ next_heartbeat = jiffies + ZF_USER_TIMEO;
+
+ /* start the timer for internal ping */
+ zf_timer.expires = jiffies + ZF_HW_TIMEO;
+
+ add_timer(&zf_timer);
+
+ /* start watchdog timer */
+ ctrl_reg = zf_get_control();
+ ctrl_reg |= (ENABLE_WD1|zf_action);
+ zf_set_control(ctrl_reg);
+
+ printk(PFX ": Watchdog timer is now enabled\n");
+}
+
+
+static void zf_ping(unsigned long data)
+{
+ unsigned int ctrl_reg = 0;
+
+ zf_writeb(COUNTER_2, 0xff);
+
+ if(time_before(jiffies, next_heartbeat)){
+
+ dprintk("time_before: %ld\n", next_heartbeat - jiffies);
+
+ /*
+ * reset event is activated by transition from 0 to 1 on
+ * RESET_WD1 bit and we assume that it is already zero...
+ */
+ ctrl_reg = zf_get_control();
+ ctrl_reg |= RESET_WD1;
+ zf_set_control(ctrl_reg);
+
+ /* ...and nothing changes until here */
+ ctrl_reg &= ~(RESET_WD1);
+ zf_set_control(ctrl_reg);
+
+ zf_timer.expires = jiffies + ZF_HW_TIMEO;
+ add_timer(&zf_timer);
+ }else{
+ printk(PFX ": I will reset your machine\n");
+ }
+}
+
+static ssize_t zf_write(struct file *file, const char *buf, size_t count,
+ loff_t *ppos)
+{
+ /* Can't seek (pwrite) on this device */
+ if (ppos != &file->f_pos)
+ return -ESPIPE;
+
+ /* See if we got the magic character */
+ if(count){
+
+/*
+ * no need to check for close confirmation
+ * no way to disable watchdog ;)
+ */
+#ifndef CONFIG_WATCHDOG_NOWAYOUT
+ size_t ofs;
+
+ /*
+ * note: just in case someone wrote the magic character
+ * five months ago...
+ */
+ zf_expect_close = 0;
+
+ /* now scan */
+ for(ofs = 0; ofs != count; ofs++){
+ if(buf[ofs] == 'V'){
+ zf_expect_close = 1;
+ dprintk("zf_expect_close 1\n");
+ }
+ }
+#endif
+ /*
+ * Well, anyhow someone wrote to us,
+ * we should return that favour
+ */
+ next_heartbeat = jiffies + ZF_USER_TIMEO;
+ dprintk("user ping at %ld\n", jiffies);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static ssize_t zf_read(struct file *file, char *buf, size_t count,
+ loff_t *ppos)
+{
+ return -EINVAL;
+}
+
+
+
+static int zf_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ switch(cmd){
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user((struct watchdog_info *)arg,
+ &zf_info, sizeof(zf_info));
+ if(ret)
+ return -EFAULT;
+ break;
+
+ case WDIOC_GETSTATUS:
+ ret = copy_to_user((int *)arg, &zf_is_open,
+ sizeof(int));
+ if(ret)
+ return -EFAULT;
+ break;
+
+ case WDIOC_KEEPALIVE:
+ zf_ping(0);
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
+}
+
+static int zf_open(struct inode *inode, struct file *file)
+{
+ switch(MINOR(inode->i_rdev)){
+ case WATCHDOG_MINOR:
+ spin_lock(&zf_lock);
+ if(zf_is_open){
+ spin_unlock(&zf_lock);
+ return -EBUSY;
+ }
+
+#ifdef CONFIG_WATCHDOG_NOWAYOUT
+ MOD_INC_USE_COUNT;
+#endif
+ zf_is_open = 1;
+
+ spin_unlock(&zf_lock);
+
+ zf_timer_on();
+
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static int zf_close(struct inode *inode, struct file *file)
+{
+ if(MINOR(inode->i_rdev) == WATCHDOG_MINOR){
+
+ if(zf_expect_close){
+ zf_timer_off();
+ } else {
+ del_timer(&zf_timer);
+ printk(PFX ": device file closed unexpectedly. Will not stop the WDT!\n");
+ }
+
+ spin_lock(&zf_lock);
+ zf_is_open = 0;
+ spin_unlock(&zf_lock);
+
+ zf_expect_close = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Notifier for system down
+ */
+
+static int zf_notify_sys(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ if(code == SYS_DOWN || code == SYS_HALT){
+ zf_timer_off();
+ }
+
+ return NOTIFY_DONE;
+}
+
+
+
+
+static struct file_operations zf_fops = {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,34)
+ owner: THIS_MODULE,
+#endif
+ read: zf_read,
+ write: zf_write,
+ ioctl: zf_ioctl,
+ open: zf_open,
+ release: zf_close,
+};
+
+static struct miscdevice zf_miscdev = {
+ WATCHDOG_MINOR,
+ "watchdog",
+ &zf_fops
+};
+
+
+/*
+ * The device needs to learn about soft shutdowns in order to
+ * turn the timebomb registers off.
+ */
+static struct notifier_block zf_notifier = {
+ zf_notify_sys,
+ NULL,
+ 0
+};
+
+static void __init zf_show_action(int act)
+{
+ char *str[] = { "RESET", "SMI", "NMI", "SCI" };
+
+ printk(PFX ": Watchdog using action = %s\n", str[act]);
+}
+
+int __init zf_init(void)
+{
+ int ret;
+
+ printk(PFX ": MachZ ZF-Logic Watchdog driver initializing.\n");
+
+ ret = zf_get_ZFL_version();
+ printk("%#x\n", ret);
+ if((!ret) || (ret != 0xffff)){
+ printk(PFX ": no ZF-Logic found\n");
+ return -ENODEV;
+ }
+
+ if((action <= 3) && (action >= 0)){
+ zf_action = zf_action>>action;
+ } else
+ action = 0;
+
+ zf_show_action(action);
+
+ spin_lock_init(&zf_lock);
+
+ ret = misc_register(&zf_miscdev);
+ if (ret){
+ printk(KERN_ERR "can't misc_register on minor=%d\n",
+ WATCHDOG_MINOR);
+ goto out;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,3)
+ if(check_region(ZF_IOBASE, 3)){
+#else
+ if(!request_region(ZF_IOBASE, 3, "MachZ ZFL WDT")){
+#endif
+
+ printk(KERN_ERR "cannot reserve I/O ports at %d\n",
+ ZF_IOBASE);
+ ret = -EBUSY;
+ goto no_region;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,3)
+ request_region(ZF_IOBASE, 3, "MachZ ZFL WDT");
+#define __exit
+#endif
+
+ ret = register_reboot_notifier(&zf_notifier);
+ if(ret){
+ printk(KERN_ERR "can't register reboot notifier (err=%d)\n",
+ ret);
+ goto no_reboot;
+ }
+
+ zf_set_status(0);
+ zf_set_control(0);
+
+ /* this is the timer that will do the hard work */
+ init_timer(&zf_timer);
+ zf_timer.function = zf_ping;
+ zf_timer.data = 0;
+
+ return 0;
+
+no_reboot:
+ release_region(ZF_IOBASE, 3);
+no_region:
+ misc_deregister(&zf_miscdev);
+out:
+ return ret;
+}
+
+
+void __exit zf_exit(void)
+{
+ zf_timer_off();
+
+ misc_deregister(&zf_miscdev);
+ unregister_reboot_notifier(&zf_notifier);
+ release_region(ZF_IOBASE, 3);
+}
+
+module_init(zf_init);
+module_exit(zf_exit);
# PCMCIA character device configuration
#
-if [ "$CONFIG_SERIAL" = "n" ]; then
- define_tristate CONFIG_PCMCIA_SERIAL n
-else
- if [ "$CONFIG_SERIAL" = "m" -o "$CONFIG_PCMCIA" = "m" ]; then
- define_tristate CONFIG_PCMCIA_SERIAL m
- else
- define_tristate CONFIG_PCMCIA_SERIAL y
- fi
-fi
-
-if [ "$CONFIG_PCMCIA_SERIAL" != "n" ]; then
- mainmenu_option next_comment
- comment 'PCMCIA character device support'
+mainmenu_option next_comment
+comment 'PCMCIA character devices'
- dep_tristate 'PCMCIA serial device support' CONFIG_PCMCIA_SERIAL_CS $CONFIG_PCMCIA_SERIAL
- if [ "$CONFIG_CARDBUS" = "y" ]; then
- dep_tristate 'CardBus serial device support' CONFIG_PCMCIA_SERIAL_CB $CONFIG_PCMCIA_SERIAL
- fi
+dep_tristate 'PCMCIA serial device support' CONFIG_PCMCIA_SERIAL_CS $CONFIG_SERIAL
+if [ "$CONFIG_PCMCIA_SERIAL_CS" = "y" ]; then
+ define_bool CONFIG_PCMCIA_CHRDEV y
+fi
- if [ "$CONFIG_PCMCIA_SERIAL_CS" = "y" -o \
- "$CONFIG_PCMCIA_SERIAL_CB" = "y" ]; then
- define_bool CONFIG_PCMCIA_CHRDEV y
- fi
+endmenu
- endmenu
-fi
obj- :=
obj-$(CONFIG_PCMCIA_SERIAL_CS) += serial_cs.o
-obj-$(CONFIG_PCMCIA_SERIAL_CB) += serial_cb.o
include $(TOPDIR)/Rules.make
+++ /dev/null
-/*======================================================================
-
- A driver for CardBus serial devices
-
- serial_cb.c 1.20 2000/08/07 19:02:03
-
- Copyright 1998, 1999 by Donald Becker and David Hinds
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
- All other rights reserved.
-
- This driver is an activator for CardBus serial cards, as
- found on multifunction (e.g. Ethernet and Modem) CardBus cards.
-
- Donald Becker may be reached as becker@CESDIS.edu, or C/O
- USRA Center of Excellence in Space Data and Information Sciences
- Code 930.5, NASA Goddard Space Flight Center, Greenbelt MD 20771
- David Hinds may be reached at dahinds@users.sourceforge.net
-
-======================================================================*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/major.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-
-#include <pcmcia/driver_ops.h>
-
-#ifdef PCMCIA_DEBUG
-static int pc_debug = PCMCIA_DEBUG;
-MODULE_PARM(pc_debug, "i");
-#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
-static char *version =
-"serial_cb.c 1.20 2000/08/07 19:02:03 (David Hinds)";
-#else
-#define DEBUG(n, args...)
-#endif
-
-/*======================================================================
-
- Card-specific configuration hacks
-
-======================================================================*/
-
-static void device_setup(struct pci_dev *pdev, u_int ioaddr)
-{
- u_short a, b;
-
- a = pdev->subsystem_vendor;
- b = pdev->subsystem_device;
- if (((a == 0x13a2) && (b == 0x8007)) ||
- ((a == 0x1420) && (b == 0x8003))) {
- /* Ositech, Psion 83c175-based cards */
- DEBUG(0, " 83c175 NVCTL_m = 0x%4.4x.\n", inl(ioaddr+0x80));
- outl(0x4C00, ioaddr + 0x80);
- outl(0x4C80, ioaddr + 0x80);
- }
- DEBUG(0, " modem registers are %2.2x %2.2x %2.2x "
- "%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x.\n",
- inb(ioaddr + 0), inb(ioaddr + 1), inb(ioaddr + 2),
- inb(ioaddr + 3), inb(ioaddr + 4), inb(ioaddr + 5),
- inb(ioaddr + 6), inb(ioaddr + 7), inb(ioaddr + 8));
-}
-
-/*======================================================================
-
- serial_attach() creates a serial device "instance" and registers
- it with the kernel serial driver, and serial_detach() unregisters
- an instance.
-
-======================================================================*/
-
-static dev_node_t *serial_attach(dev_locator_t *loc)
-{
- u_int io;
- u_char irq;
- int line;
- struct serial_struct serial;
- struct pci_dev *pdev;
- dev_node_t *node;
-
- MOD_INC_USE_COUNT;
-
- if (loc->bus != LOC_PCI) goto err_out;
- pdev = pci_find_slot (loc->b.pci.bus, loc->b.pci.devfn);
- if (!pdev) goto err_out;
- if (pci_enable_device(pdev)) goto err_out;
-
- printk(KERN_INFO "serial_attach(bus %d, fn %d)\n", pdev->bus->number, pdev->devfn);
- io = pci_resource_start (pdev, 0);
- irq = pdev->irq;
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
- printk(KERN_NOTICE "serial_cb: PCI base address 0 is not IO\n");
- goto err_out;
- }
- device_setup(pdev, io);
- memset(&serial, 0, sizeof(serial));
- serial.port = io; serial.irq = irq;
- serial.flags = ASYNC_SKIP_TEST | ASYNC_SHARE_IRQ;
-
- /* Some devices seem to need extra time */
- __set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/50);
-
- line = register_serial(&serial);
- if (line < 0) {
- printk(KERN_NOTICE "serial_cb: register_serial() at 0x%04x, "
- "irq %d failed\n", serial.port, serial.irq);
- goto err_out;
- }
-
- node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
- if (!node)
- goto err_out_unregister;
- sprintf(node->dev_name, "ttyS%d", line);
- node->major = TTY_MAJOR; node->minor = 0x40 + line;
- node->next = NULL;
- return node;
-
-err_out_unregister:
- unregister_serial(line);
-err_out:
- MOD_DEC_USE_COUNT;
- return NULL;
-}
-
-static void serial_detach(dev_node_t *node)
-{
- DEBUG(0, "serial_detach(ttyS%02d)\n", node->minor - 0x40);
- unregister_serial(node->minor - 0x40);
- kfree(node);
- MOD_DEC_USE_COUNT;
-}
-
-/*====================================================================*/
-
-struct driver_operations serial_ops = {
- "serial_cb", serial_attach, NULL, NULL, serial_detach
-};
-
-static int __init init_serial_cb(void)
-{
- DEBUG(0, "%s\n", version);
- register_driver(&serial_ops);
- return 0;
-}
-
-static void __exit exit_serial_cb(void)
-{
- DEBUG(0, "serial_cb: unloading\n");
- unregister_driver(&serial_ops);
-}
-
-module_init(init_serial_cb);
-module_exit(exit_serial_cb);
#endif
#define NEW_MODULES
-#ifdef LOCAL_ROCKET_H /* We're building standalone */
-#define MODULE
-#endif
-
-#if defined(NEW_MODULES) && defined(LOCAL_ROCKET_H)
-#ifdef MODVERSIONS
-#include <linux/modversions.h>
-#endif
-#else /* !NEW_MODULES */
-#ifdef MODVERSIONS
-#define MODULE
-#endif
-#endif /* NEW_MODULES */
#include <linux/module.h>
#include <linux/errno.h>
* 7/00: fix some returns on failure not using MOD_DEC_USE_COUNT.
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
- * 10/00: add in optional hardware flow control for serial console.
- * Kanoj Sarcar <kanoj@sgi.com>
+ * 10/00: add in optional software flow control for serial console.
+ * Kanoj Sarcar <kanoj@sgi.com> (Modified by Theodore Ts'o)
*
- * This module exports the following rs232 io functions:
- *
- * int rs_init(void);
*/
-static char *serial_version = "5.02";
-static char *serial_revdate = "2000-08-09";
+static char *serial_version = "5.05";
+static char *serial_revdate = "2000-12-13";
/*
* Serial driver configuration section. Here are the various options:
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/mm.h>
-#include <linux/malloc.h>
+#include <linux/slab.h>
#if (LINUX_VERSION_CODE >= 131343)
#include <linux/init.h>
#endif
#define NR_PCI_BOARDS 8
static struct pci_board_inst serial_pci_board[NR_PCI_BOARDS];
-static int serial_pci_board_idx;
#ifndef IS_PCI_REGION_IOPORT
#define IS_PCI_REGION_IOPORT(dev, r) (pci_resource_flags((dev), (r)) & \
{
struct tty_struct *tty = info->tty;
unsigned char ch;
- int ignored = 0;
struct async_icount *icount;
+ int max_count = 256;
icount = &info->state->icount;
do {
icount->overrun++;
/*
- * Now check to see if character should be
- * ignored, and mask off conditions which
- * should be ignored.
+ * Mask off conditions which should be ignored.
*/
- if (*status & info->ignore_status_mask) {
- if (++ignored > 100)
- break;
- goto ignore_char;
- }
*status &= info->read_status_mask;
#ifdef CONFIG_SERIAL_CONSOLE
*tty->flip.flag_buf_ptr = TTY_PARITY;
else if (*status & UART_LSR_FE)
*tty->flip.flag_buf_ptr = TTY_FRAME;
- if (*status & UART_LSR_OE) {
- /*
- * Overrun is special, since it's
- * reported immediately, and doesn't
- * affect the current character
- */
- tty->flip.count++;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
- if (tty->flip.count >= TTY_FLIPBUF_SIZE)
- goto ignore_char;
- }
}
#if defined(CONFIG_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
if (break_pressed && info->line == sercons.index) {
break_pressed = 0;
}
#endif
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
+ if ((*status & info->ignore_status_mask) == 0) {
+ tty->flip.flag_buf_ptr++;
+ tty->flip.char_buf_ptr++;
+ tty->flip.count++;
+ }
+ if ((*status & UART_LSR_OE) &&
+ (tty->flip.count < TTY_FLIPBUF_SIZE)) {
+ /*
+ * Overrun is special, since it's reported
+ * immediately, and doesn't affect the current
+ * character
+ */
+ *tty->flip.flag_buf_ptr = TTY_OVERRUN;
+ tty->flip.count++;
+ tty->flip.flag_buf_ptr++;
+ tty->flip.char_buf_ptr++;
+ }
ignore_char:
*status = serial_inp(info, UART_LSR);
- } while (*status & UART_LSR_DR);
+ } while ((*status & UART_LSR_DR) && (max_count-- > 0));
#if (LINUX_VERSION_CODE > 131394) /* 2.1.66 */
tty_flip_buffer_push(tty);
#else
- queue_task(&tty->flip.tqueue, &tq_timer);
+ queue_task_irq_off(&tty->flip.tqueue, &tq_timer);
#endif
}
end_mark = info;
goto next;
}
+#ifdef SERIAL_DEBUG_INTR
+ printk("IIR = %x...", serial_in(info, UART_IIR));
+#endif
end_mark = 0;
info->last_active = jiffies;
#endif
break;
}
+#ifdef SERIAL_DEBUG_INTR
+ printk("IIR = %x...", serial_in(info, UART_IIR));
+#endif
} while (!(serial_in(info, UART_IIR) & UART_IIR_NO_INT));
info->last_active = jiffies;
#ifdef CONFIG_SERIAL_MULTIPORT
*/
if (!(info->flags & ASYNC_BUGGY_UART) &&
(serial_inp(info, UART_LSR) == 0xff)) {
- printk("LSR safety check engaged!\n");
+ printk("ttyS%d: LSR safety check engaged!\n", state->line);
if (capable(CAP_SYS_ADMIN)) {
if (info->tty)
set_bit(TTY_IO_ERROR, &info->tty->flags);
/* Arrange to enter sleep mode */
serial_outp(info, UART_LCR, 0xBF);
serial_outp(info, UART_EFR, UART_EFR_ECB);
+ serial_outp(info, UART_LCR, 0);
serial_outp(info, UART_IER, UART_IERX_SLEEP);
+ serial_outp(info, UART_LCR, 0xBF);
+ serial_outp(info, UART_EFR, 0);
serial_outp(info, UART_LCR, 0);
}
if (info->state->type == PORT_16750) {
if (timeout && time_after(jiffies, orig_jiffies + timeout))
break;
}
- set_current_state(TASK_RUNNING);
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
#endif
info->magic = SERIAL_MAGIC;
info->port = state->port;
info->flags = state->flags;
+ info->hub6 = state->hub6;
+ info->io_type = state->io_type;
+ info->iomem_base = state->iomem_base;
+ info->iomem_reg_shift = state->iomem_reg_shift;
info->quot = 0;
info->tty = 0;
}
#if defined(ENABLE_SERIAL_PCI) || defined(ENABLE_SERIAL_PNP)
-static void __init printk_pnp_dev_id(unsigned short vendor,
+static void __devinit printk_pnp_dev_id(unsigned short vendor,
unsigned short device)
{
printk("%c%c%c%x%x%x%x",
/*
* Common enabler code shared by both PCI and ISAPNP probes
*/
-static void __init start_pci_pnp_board(struct pci_dev *dev,
+static void __devinit start_pci_pnp_board(struct pci_dev *dev,
struct pci_board *board)
{
int k, line;
if (board->init_fn && ((board->init_fn)(dev, board, 1) != 0))
return;
-#ifdef MODULE
/*
* Register the serial board in the array if we need to
- * shutdown the board on a module unload.
+ * shutdown the board on a module unload or card removal
*/
if (DEACTIVATE_FUNC(dev) || board->init_fn) {
- if (serial_pci_board_idx >= NR_PCI_BOARDS)
+ for (k=0; k < NR_PCI_BOARDS; k++)
+ if (serial_pci_board[k].dev == 0)
+ break;
+ if (k >= NR_PCI_BOARDS)
return;
- serial_pci_board[serial_pci_board_idx].board = *board;
- serial_pci_board[serial_pci_board_idx].dev = dev;
- serial_pci_board_idx++;
+ serial_pci_board[k].board = *board;
+ serial_pci_board[k].dev = dev;
}
-#endif
base_baud = board->base_baud;
if (!base_baud)
if (line < 0)
break;
rs_table[line].baud_base = base_baud;
+ rs_table[line].dev = dev;
}
}
#endif /* ENABLE_SERIAL_PCI || ENABLE_SERIAL_PNP */
*/
static int
#ifndef MODULE
-__init
+__devinit
#endif
pci_plx9050_fn(struct pci_dev *dev, struct pci_board *board, int enable)
{
static int
#ifndef MODULE
-__init
+__devinit
#endif
pci_siig10x_fn(struct pci_dev *dev, struct pci_board *board, int enable)
{
static int
#ifndef MODULE
-__init
+__devinit
#endif
pci_siig20x_fn(struct pci_dev *dev, struct pci_board *board, int enable)
{
/* Added for EKF Intel i960 serial boards */
static int
#ifndef MODULE
-__init
+__devinit
#endif
pci_inteli960ni_fn(struct pci_dev *dev,
struct pci_board *board,
static int
#ifndef MODULE
-__init
+__devinit
#endif
pci_timedia_fn(struct pci_dev *dev, struct pci_board *board, int enable)
{
SPCI_FL_BASE0, 1, 520833,
64, 3, NULL, 0x300 },
#endif
+#if 0 /* PCI_DEVICE_ID_DCI_PCCOM8 ? */
+ { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM8,
+ PCI_ANY_ID, PCI_ANY_ID,
+ SPCI_FL_BASE3, 8, 115200,
+ 8 },
+#endif
/* Generic serial board */
{ 0, 0,
0, 0,
return 1;
}
+static int __devinit serial_init_one(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ struct pci_board *board, tmp;
+
+ for (board = pci_boards; board->vendor; board++) {
+ if (board->vendor != (unsigned short) PCI_ANY_ID &&
+ dev->vendor != board->vendor)
+ continue;
+ if (board->device != (unsigned short) PCI_ANY_ID &&
+ dev->device != board->device)
+ continue;
+ if (board->subvendor != (unsigned short) PCI_ANY_ID &&
+ pci_get_subvendor(dev) != board->subvendor)
+ continue;
+ if (board->subdevice != (unsigned short) PCI_ANY_ID &&
+ pci_get_subdevice(dev) != board->subdevice)
+ continue;
+ break;
+ }
+
+ if (board->vendor == 0 && serial_pci_guess_board(dev, board))
+ return -ENODEV;
+ else if (serial_pci_guess_board(dev, &tmp) == 0) {
+ printk(KERN_INFO "Redundant entry in serial pci_table. "
+ "Please send the output of\n"
+ "lspci -vv, this message (%d,%d,%d,%d)\n"
+ "and the manufacturer and name of "
+ "serial board or modem board\n"
+ "to serial-pci-info@lists.sourceforge.net.\n",
+ dev->vendor, dev->device,
+ pci_get_subvendor(dev), pci_get_subdevice(dev));
+ }
+
+ start_pci_pnp_board(dev, board);
+
+ return 0;
+}
+
+static void __devexit serial_remove_one(struct pci_dev *dev)
+{
+ int i;
+
+ /*
+ * Iterate through all of the ports finding those that belong
+ * to this PCI device.
+ */
+ for(i = 0; i < NR_PORTS; i++) {
+ if (rs_table[i].dev != dev)
+ continue;
+ unregister_serial(i);
+ rs_table[i].dev = 0;
+ }
+ /*
+ * Now execute any board-specific shutdown procedure
+ */
+ for (i=0; i < NR_PCI_BOARDS; i++) {
+ struct pci_board_inst *brd = &serial_pci_board[i];
+
+ if (serial_pci_board[i].dev != dev)
+ continue;
+ if (brd->board.init_fn)
+ (brd->board.init_fn)(brd->dev, &brd->board, 0);
+ if (DEACTIVATE_FUNC(brd->dev))
+ (DEACTIVATE_FUNC(brd->dev))(brd->dev);
+ serial_pci_board[i].dev = 0;
+ }
+}
+
+
+static struct pci_device_id serial_pci_tbl[] __devinitdata = {
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xffff00, },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, serial_pci_tbl);
+
+static struct pci_driver serial_pci_driver = {
+ name: "serial",
+ probe: serial_init_one,
+ remove: serial_remove_one,
+ id_table: serial_pci_tbl,
+};
/*
* Accept a maximum of eight boards
*
*/
-static void __init probe_serial_pci(void)
+static void __devinit probe_serial_pci(void)
{
- struct pci_dev *dev = NULL;
- struct pci_board *board;
-
#ifdef SERIAL_DEBUG_PCI
printk(KERN_DEBUG "Entered probe_serial_pci()\n");
#endif
-
- pci_for_each_dev(dev) {
- for (board = pci_boards; board->vendor; board++) {
- if (board->vendor != (unsigned short) PCI_ANY_ID &&
- dev->vendor != board->vendor)
- continue;
- if (board->device != (unsigned short) PCI_ANY_ID &&
- dev->device != board->device)
- continue;
- if (board->subvendor != (unsigned short) PCI_ANY_ID &&
- pci_get_subvendor(dev) != board->subvendor)
- continue;
- if (board->subdevice != (unsigned short) PCI_ANY_ID &&
- pci_get_subdevice(dev) != board->subdevice)
- continue;
- break;
- }
-
- if (board->vendor == 0 && serial_pci_guess_board(dev, board))
- continue;
-
- start_pci_pnp_board(dev, board);
- }
-
+
+ /* Register call PCI serial devices. Null out
+ * the driver name upon failure, as a signal
+ * not to attempt to unregister the driver later
+ */
+ if (pci_module_init (&serial_pci_driver) != 0)
+ serial_pci_driver.name[0] = 0;
+
#ifdef SERIAL_DEBUG_PCI
printk(KERN_DEBUG "Leaving probe_serial_pci() (probe finished)\n");
#endif
unsigned short device;
};
-static struct pnp_board pnp_devices[] __initdata = {
+static struct pnp_board pnp_devices[] __devinitdata = {
/* Archtek America Corp. */
/* Archtek SmartLink Modem 3334BT Plug & Play */
{ ISAPNP_VENDOR('A', 'A', 'C'), ISAPNP_DEVICE(0x000F) },
irq->map = map;
}
-static char *modem_names[] __initdata = {
+static char *modem_names[] __devinitdata = {
"MODEM", "Modem", "modem", "FAX", "Fax", "fax",
"56K", "56k", "K56", "33.6", "28.8", "14.4",
"33,600", "28,800", "14,400", "33.600", "28.800", "14.400",
"33600", "28800", "14400", "V.90", "V.34", "V.32", 0
};
-static int __init check_name(char *name)
+static int __devinit check_name(char *name)
{
char **tmp = modem_names;
return 1;
}
-static void __init probe_serial_pnp(void)
+static void __devinit probe_serial_pnp(void)
{
struct pci_dev *dev = NULL;
struct pnp_board *pnp_board;
}
/*
- * This is for use by architectures that know their serial port
+ * This is for use by architectures that know their serial console
* attributes only at run time. Not to be invoked after rs_init().
*/
int __init early_serial_setup(struct serial_struct *req)
(rs_table[i].iomem_base == req->iomem_base))
break;
}
+#ifdef __i386__
+ if (i == NR_PORTS) {
+ for (i = 4; i < NR_PORTS; i++)
+ if ((rs_table[i].type == PORT_UNKNOWN) &&
+ (rs_table[i].count == 0))
+ break;
+ }
+#endif
if (i == NR_PORTS) {
for (i = 0; i < NR_PORTS; i++)
if ((rs_table[i].type == PORT_UNKNOWN) &&
#endif
}
#if defined(ENABLE_SERIAL_PCI) || defined(ENABLE_SERIAL_PNP)
- for (i=0; i < serial_pci_board_idx; i++) {
+ for (i=0; i < NR_PCI_BOARDS; i++) {
struct pci_board_inst *brd = &serial_pci_board[i];
-
+
+ if (serial_pci_board[i].dev == 0)
+ continue;
if (brd->board.init_fn)
(brd->board.init_fn)(brd->dev, &brd->board, 0);
-
if (DEACTIVATE_FUNC(brd->dev))
(DEACTIVATE_FUNC(brd->dev))(brd->dev);
}
tmp_buf = NULL;
free_page(pg);
}
+
+#ifdef ENABLE_SERIAL_PCI
+ if (serial_pci_driver.name[0])
+ pci_unregister_driver (&serial_pci_driver);
+#endif
}
module_init(rs_init);
if (--tmout == 0)
break;
} while((status & BOTH_EMPTY) != BOTH_EMPTY);
- if (info->flags & ASYNC_NO_FLOW)
- return;
- tmout = 1000000;
- while (--tmout && ((serial_in(info, UART_MSR) & UART_MSR_CTS) == 0));
+
+ /* Wait for flow control if necessary */
+ if (info->flags & ASYNC_CONS_FLOW) {
+ tmout = 1000000;
+ while (--tmout &&
+ ((serial_in(info, UART_MSR) & UART_MSR_CTS) == 0));
+ }
}
}
/*
- * Setup initial baud/bits/parity/flow. We do two things here:
+ * Setup initial baud/bits/parity/flow control. We do two things here:
* - construct a cflag setting for the first rs_open()
* - initialize the serial port
* Return non-zero if we didn't find a serial port.
s++;
if (*s) parity = *s++;
if (*s) bits = *s++ - '0';
- if ((*s) && (!strcmp(s, "rtscts")))
- doflow = 1;
+ if (*s) doflow = (*s++ == 'r');
}
/*
* Divisor, bytesize and parity
*/
state = rs_table + co->index;
- if (doflow == 0)
- state->flags |= ASYNC_NO_FLOW;
+ if (doflow)
+ state->flags |= ASYNC_CONS_FLOW;
info = &async_sercons;
info->magic = SERIAL_MAGIC;
info->state = state;
#if 0
/* Removed superuser check: Sysops can use the permissions on the device
file to restrict access. Recommendation: Root only. (root.root 600) */
- if (!suser ()) {
+ if (!capable(CAP_SYS_ADMIN)) {
return -EPERM;
}
#endif
*/
#define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq))
-#define BREAKPOINT() asm(" int $3");
+#if defined(__i386__)
+# define BREAKPOINT() asm(" int $3");
+#else
+# define BREAKPOINT() { }
+#endif
#define MAX_ISA_DEVICES 10
#define MAX_PCI_DEVICES 10
#endif
#ifdef CONFIG_SYNCLINK_SYNCPPP
-#include "../net/wan/syncppp.h"
+#include <net/syncppp.h>
#endif
#include <asm/segment.h>
status = info->rx_buffer_list[0].status;
if ( status & (BIT8 + BIT3 + BIT1) ) {
- /* receive error has occured */
+ /* receive error has occurred */
rc = FALSE;
} else {
if ( memcmp( info->tx_buffer_list[0].virt_addr ,
/* returns:
* 1 if the device acknowledged
* 0 if the device did not ack
- * -ETIMEDOUT if an error occured (while raising the scl line)
+ * -ETIMEDOUT if an error occurred (while raising the scl line)
*/
static int i2c_outb(struct i2c_adapter *i2c_adap, char c)
{
* reads, writes as well as 10bit-addresses.
* returns:
* 0 everything went okay, the chip ack'ed
- * -x an error occured (like: -EREMOTEIO if the device did not answer, or
+ * -x an error occurred (like: -EREMOTEIO if the device did not answer, or
* -ETIMEDOUT, for example if the lines are stuck...)
*/
static inline int bit_doAddress(struct i2c_adapter *i2c_adap,
/* the remaining 8 bit address */
ret = i2c_outb(i2c_adap,msg->addr & 0x7f);
if (ret != 1) {
- /* the chip did not ack / xmission error occured */
+ /* the chip did not ack / xmission error occurred */
printk("died at 2nd address code.\n");
return -EREMOTEIO;
}
if ((handler = hwgroup->handler) == NULL) {
/*
- * Either a marginal timeout occured
+ * Either a marginal timeout occurred
* (got the interrupt just as timer expired),
* or we were "sleeping" to give other devices a chance.
* Either way, we don't really want to complain about anything.
void ide_delay_50ms (void)
{
#ifndef CONFIG_BLK_DEV_IDECS
- unsigned long timeout = jiffies + ((HZ + 19)/20) + 1;
- while (0 < (signed long)(timeout - jiffies));
+ mdelay(50);
#else
__set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ/20);
/*
* promise_write() transfers a block of one or more sectors of data to a
- * drive as part of a disk write operation. All but 4 sectors are transfered
+ * drive as part of a disk write operation. All but 4 sectors are transferred
* in the first attempt, then the interface is polled (nicely!) for completion
- * before the final 4 sectors are transfered. There is no interrupt generated
+ * before the final 4 sectors are transferred. There is no interrupt generated
* on writes (at least on the DC4030VL-2), we just have to poll for NOT BUSY.
*/
static ide_startstop_t promise_write (ide_drive_t *drive)
/*
* Based on settings done by AMI BIOS
- * (might be usefull if drive is not registered in CMOS for any reason).
+ * (might be useful if drive is not registered in CMOS for any reason).
*/
static void piix_tune_drive (ide_drive_t *drive, byte pio)
{
/*
* Based on settings done by AMI BIOS
- * (might be usefull if drive is not registered in CMOS for any reason).
+ * (might be useful if drive is not registered in CMOS for any reason).
*/
static void slc90e66_tune_drive (ide_drive_t *drive, byte pio)
{
#define PCI_DEVICE_ID_NEC_UPD72871 0x00ce
#endif
-#ifndef PCI_DEVICE_ID_APPLE_UNI_N_FW
-#define PCI_DEVICE_ID_APPLE_UNI_N_FW 0x0018
-#endif
-
#ifndef PCI_DEVICE_ID_ALI_OHCI1394_M5251
#define PCI_DEVICE_ID_ALI_OHCI1394_M5251 0x5251
#endif
/* returns card type + card ID (for bt878-based ones)
for possible values see lines below beginning with #define BTTV_UNKNOWN
- returns negative value if error ocurred
+ returns negative value if error occurred
*/
extern int bttv_get_cardinfo(unsigned int card, int *type, int *cardid);
/* sets GPOE register (BT848_GPIO_OUT_EN) to new value:
data | (current_GPOE_value & ~mask)
- returns negative value if error ocurred
+ returns negative value if error occurred
*/
extern int bttv_gpio_enable(unsigned int card,
unsigned long mask, unsigned long data);
/* fills data with GPDATA register contents
- returns negative value if error ocurred
+ returns negative value if error occurred
*/
extern int bttv_read_gpio(unsigned int card, unsigned long *data);
/* sets GPDATA register to new value:
(data & mask) | (current_GPDATA_value & ~mask)
- returns negative value if error ocurred
+ returns negative value if error occurred
*/
extern int bttv_write_gpio(unsigned int card,
unsigned long mask, unsigned long data);
in interrupt handler if BT848_INT_GPINT bit is set - this queue is activated
(wake_up_interruptible) and following call to the function bttv_read_gpio
should return new value of GPDATA,
- returns NULL value if error ocurred or queue is not available
+ returns NULL value if error occurred or queue is not available
WARNING: because there is no buffer for GPIO data, one MUST
process data ASAP
*/
/* i2c */
#define I2C_CLIENTS_MAX 8
+extern struct i2c_algo_bit_data bttv_i2c_algo_template;
+extern struct i2c_adapter bttv_i2c_adap_template;
+extern struct i2c_client bttv_i2c_client_template;
extern void bttv_bit_setscl(void *data, int state);
extern void bttv_bit_setsda(void *data, int state);
extern void bttv_call_i2c_clients(struct bttv *btv, unsigned int cmd, void *arg);
-#define MAX_KMALLOC_MEM (512*1024)
/*
buz - Iomega Buz driver version 1.0
or set in in a VIDIOCSFBUF ioctl
*/
-static unsigned long vidmem = 0; /* Video memory base address */
+static unsigned long vidmem; /* Video memory base address (default 0) */
/* Special purposes only: */
-static int triton = 0; /* 0=no, 1=yes */
-static int natoma = 0; /* 0=no, 1=yes */
+static int triton; /* 0=no (default), 1=yes */
+static int natoma; /* 0=no (default), 1=yes */
/*
Number and size of grab buffers for Video 4 Linux
Default input and video norm at startup of the driver.
*/
-static int default_input = 0; /* 0=Composite, 1=S-VHS */
-static int default_norm = 0; /* 0=PAL, 1=NTSC */
+static int default_input; /* 0=Composite (default), 1=S-VHS */
+static int default_norm; /* 0=PAL (default), 1=NTSC */
MODULE_PARM(vidmem, "i");
MODULE_PARM(triton, "i");
* Allocate the V4L grab buffers
*
* These have to be pysically contiguous.
- * If v4l_bufsize <= MAX_KMALLOC_MEM we use kmalloc
+ * If v4l_bufsize <= KMALLOC_MAXSIZE we use kmalloc
*/
static int v4l_fbuffer_alloc(struct zoran *zr)
if (zr->v4l_gbuf[i].fbuffer)
printk(KERN_WARNING "%s: v4l_fbuffer_alloc: buffer %d allready allocated ?\n", zr->name, i);
- if (v4l_bufsize <= MAX_KMALLOC_MEM) {
+ if (v4l_bufsize <= KMALLOC_MAXSIZE) {
/* Use kmalloc */
mem = (unsigned char *) kmalloc(v4l_bufsize, GFP_KERNEL);
/*
* Allocate the MJPEG grab buffers.
*
- * If the requested buffer size is smaller than MAX_KMALLOC_MEM,
+ * If the requested buffer size is smaller than KMALLOC_MAXSIZE,
* kmalloc is used to request a physically contiguous area,
* else we allocate the memory in framgents with get_free_page.
*
* (RJ: This statement is from Dave Perks' original driver,
* I could never check it because I have a zr36067)
* The driver cares about this because it reduces the buffer
- * size to MAX_KMALLOC_MEM in that case (which forces contiguous allocation).
+ * size to KMALLOC_MAXSIZE in that case (which forces contiguous allocation).
*
* RJ: The contents grab buffers needs never be accessed in the driver.
* Therefore there is no need to allocate them with vmalloc in order
/* Decide if we should alloc contiguous or fragmented memory */
/* This has to be identical in jpg_fbuffer_alloc and jpg_fbuffer_free */
- alloc_contig = (zr->jpg_bufsize < MAX_KMALLOC_MEM);
+ alloc_contig = (zr->jpg_bufsize < KMALLOC_MAXSIZE);
for (i = 0; i < zr->jpg_nbufs; i++) {
if (zr->jpg_gbuf[i].frag_tab)
/* Decide if we should alloc contiguous or fragmented memory */
/* This has to be identical in jpg_fbuffer_alloc and jpg_fbuffer_free */
- alloc_contig = (zr->jpg_bufsize < MAX_KMALLOC_MEM);
+ alloc_contig = (zr->jpg_bufsize < KMALLOC_MAXSIZE);
for (i = 0; i < zr->jpg_nbufs; i++) {
if (!zr->jpg_gbuf[i].frag_tab)
DEBUG(printk(BUZ_DEBUG "-%u: i2c detach %02x\n", zr->id, id));
}
-static struct i2c_bus zoran_i2c_bus_template =
-{
- "zr36057",
- I2C_BUSID_BT848,
- NULL,
-
- SPIN_LOCK_UNLOCKED,
+static struct i2c_bus zoran_i2c_bus_template = {
+ name: "zr36057",
+ id: I2C_BUSID_BT848,
+ bus_lock: SPIN_LOCK_UNLOCKED,
- attach_inform,
- detach_inform,
+ attach_inform: attach_inform,
+ detach_inform: detach_inform,
- i2c_setlines,
- i2c_getdataline,
- NULL,
- NULL,
+ i2c_setlines: i2c_setlines,
+ i2c_getdataline: i2c_getdataline,
};
return -EBUSY;
}
- MOD_INC_USE_COUNT;
return 0;
}
jpg_fbuffer_free(zr);
zr->jpg_nbufs = 0;
- MOD_DEC_USE_COUNT;
DEBUG(printk(KERN_INFO ": zoran_close done\n"));
}
#endif
#endif
- /* Check for vaild parameters */
+ /* Check for valid parameters */
if (vw.width < BUZ_MIN_WIDTH || vw.height < BUZ_MIN_HEIGHT ||
vw.width > BUZ_MAX_WIDTH || vw.height > BUZ_MAX_HEIGHT) {
return -EINVAL;
/* br.size is limited by 1 page for the stat_com tables to a Maximum of 2 MB */
if (br.size > (512 * 1024))
br.size = (512 * 1024); /* 512 K should be enough */
- if (zr->need_contiguous && br.size > MAX_KMALLOC_MEM)
- br.size = MAX_KMALLOC_MEM;
+ if (zr->need_contiguous && br.size > KMALLOC_MAXSIZE)
+ br.size = KMALLOC_MAXSIZE;
zr->jpg_nbufs = br.count;
zr->jpg_bufsize = br.size;
static struct video_device zoran_template =
{
+ owner: THIS_MODULE,
name: BUZ_NAME,
type: VID_TYPE_CAPTURE | VID_TYPE_OVERLAY | VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM |
VID_TYPE_SCALES | VID_TYPE_SUBCAPTURE,
zr->zr36057_mem = ioremap(zr->zr36057_adr, 0x1000);
if (!zr->zr36057_mem) {
printk(KERN_ERR "%s: ioremap failed\n", zr->name);
- /* XXX handle error */
+ break;
}
/* set PCI latency timer */
DECLARE_MUTEX_LOCKED(sem);
struct msp3400c *msp;
struct i2c_client *c;
- int rev1,rev2,i;
+ int rev1,rev2=0,i;
client_template.adapter = adap;
client_template.addr = addr;
(int)msp3400c_read(client, I2C_MSP3400C_DFP, 0x1c));
break;
#endif
- default:
+ default:;
/* nothing */
}
return 0;
#define WRD_COUNT 4
int el2_probe(struct net_device *dev);
-int el2_pio_probe(struct net_device *dev);
-int el2_probe1(struct net_device *dev, int ioaddr);
+static int el2_pio_probe(struct net_device *dev);
+static int el2_probe1(struct net_device *dev, int ioaddr);
/* A zero-terminated list of I/O addresses to be probed in PIO mode. */
static unsigned int netcard_portlist[] __initdata =
/* Try all of the locations that aren't obviously empty. This touches
a lot of locations, and is much riskier than the code above. */
-int __init
+static int __init
el2_pio_probe(struct net_device *dev)
{
int i;
/* Probe for the Etherlink II card at I/O port base IOADDR,
returning non-zero on success. If found, set the station
address and memory parameters in DEVICE. */
-int __init
+static int __init
el2_probe1(struct net_device *dev, int ioaddr)
{
int i, iobase_reg, membase_reg, saved_406, wordlength, retval;
static unsigned version_printed;
unsigned long vendor_id;
+ /* FIXME: code reads ioaddr + 0x400, we request ioaddr + 16 */
if (!request_region(ioaddr, EL2_IO_EXTENT, dev->name))
return -EBUSY;
}
#endif /* EL2MEMTEST */
- dev->mem_end = dev->rmem_end = dev->mem_start + EL2_MEMSIZE;
+ if (dev->mem_start)
+ dev->mem_end = dev->rmem_end = dev->mem_start + EL2_MEMSIZE;
if (wordlength) { /* No Tx pages to skip over to get to Rx */
dev->rmem_start = dev->mem_start;
#include <asm/irq.h>
#ifdef EL3_DEBUG
-int el3_debug = EL3_DEBUG;
+static int el3_debug = EL3_DEBUG;
#else
-int el3_debug = 2;
+static int el3_debug = 2;
#endif
/* To minimize the size of the driver source I only define operating
int id;
};
-struct el3_mca_adapters_struct el3_mca_adapters[] = {
+static struct el3_mca_adapters_struct el3_mca_adapters[] = {
{ "3Com 3c529 EtherLink III (10base2)", 0x627c },
{ "3Com 3c529 EtherLink III (10baseT)", 0x627d },
{ "3Com 3c529 EtherLink III (test mode)", 0x62db },
{ "3Com 3c529 EtherLink III (TP)", 0x62f7 },
{ NULL, 0 },
};
-#endif
+#endif /* CONFIG_MCA */
-#ifdef __ISAPNP__
-struct el3_isapnp_adapters_struct {
- unsigned short vendor, function;
- char *name;
-};
-static struct el3_isapnp_adapters_struct el3_isapnp_adapters[] = {
- {ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5090), "3Com Etherlink III (TP)"},
- {ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5091), "3Com Etherlink III"},
- {ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5094), "3Com Etherlink III (combo)"},
- {ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5095), "3Com Etherlink III (TPO)"},
- {ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5098), "3Com Etherlink III (TPC)"},
- {ISAPNP_VENDOR('P', 'N', 'P'), ISAPNP_FUNCTION(0x80f8), "3Com Etherlink III compatible"},
- {0, }
+#ifdef CONFIG_ISAPNP
+static struct isapnp_device_id el3_isapnp_adapters[] = {
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5090),
+ (long) "3Com Etherlink III (TP)" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5091),
+ (long) "3Com Etherlink III" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5094),
+ (long) "3Com Etherlink III (combo)" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5095),
+ (long) "3Com Etherlink III (TPO)" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5098),
+ (long) "3Com Etherlink III (TPC)" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('P', 'N', 'P'), ISAPNP_FUNCTION(0x80f8),
+ (long) "3Com Etherlink III compatible" },
+ { } /* terminate list */
};
+
+MODULE_DEVICE_TABLE(isapnp, el3_isapnp_adapters);
+
static u16 el3_isapnp_phys_addr[8][3];
-#endif /* CONFIG_ISAPNP */
-#ifdef __ISAPNP__
static int nopnp;
-#endif
+#endif /* CONFIG_ISAPNP */
int el3_probe(struct net_device *dev)
{
u16 phys_addr[3];
static int current_tag = 0;
int mca_slot = -1;
-#ifdef __ISAPNP__
+#ifdef CONFIG_ISAPNP
static int pnp_cards = 0;
-#endif /* __ISAPNP__ */
+#endif /* CONFIG_ISAPNP */
if (dev) SET_MODULE_OWNER(dev);
}
#endif /* CONFIG_MCA */
-#ifdef __ISAPNP__
+#ifdef CONFIG_ISAPNP
if (nopnp == 1)
goto no_pnp;
}
}
no_pnp:
-#endif /* __ISAPNP__ */
+#endif /* CONFIG_ISAPNP */
/* Select an open I/O location at 0x1*0 to do contention select. */
for ( ; id_port < 0x200; id_port += 0x10) {
phys_addr[i] = htons(id_read_eeprom(i));
}
-#ifdef __ISAPNP__
+#ifdef CONFIG_ISAPNP
if (nopnp == 0) {
/* The ISA PnP 3c509 cards respond to the ID sequence.
This check is needed in order not to register them twice. */
}
}
}
-#endif /* __ISAPNP__ */
+#endif /* CONFIG_ISAPNP */
{
unsigned int iobase = id_read_eeprom(8);
MODULE_PARM(irq,"1-8i");
MODULE_PARM(xcvr,"1-8i");
MODULE_PARM(max_interrupt_work, "i");
-#ifdef __ISAPNP__
+#ifdef CONFIG_ISAPNP
MODULE_PARM(nopnp, "i");
#endif
#define CORKSCREW_TOTAL_SIZE 0x20
#ifdef DRIVER_DEBUG
-int corkscrew_debug = DRIVER_DEBUG;
+static int corkscrew_debug = DRIVER_DEBUG;
#else
-int corkscrew_debug = 1;
+static int corkscrew_debug = 1;
#endif
#define CORKSCREW_ID 10
{ "Default", 0, 0xFF, XCVR_10baseT, 10000},
};
-#ifdef __ISAPNP__
-struct corkscrew_isapnp_adapters_struct {
- unsigned short vendor, function;
- char *name;
-};
-struct corkscrew_isapnp_adapters_struct corkscrew_isapnp_adapters[] = {
- {ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5051), "3Com Fast EtherLink ISA"},
- {0, }
-};
-int corkscrew_isapnp_phys_addr[3] = {
- 0, 0, 0
+#ifdef CONFIG_ISAPNP
+static struct isapnp_device_id corkscrew_isapnp_adapters[] = {
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5051),
+ (long) "3Com Fast EtherLink ISA" },
+ { } /* terminate list */
};
+MODULE_DEVICE_TABLE(isapnp, corkscrew_isapnp_adapters);
+
+static int corkscrew_isapnp_phys_addr[3];
+
static int nopnp;
-#endif
+#endif /* CONFIG_ISAPNP */
static int corkscrew_scan(struct net_device *dev);
static struct net_device *corkscrew_found_device(struct net_device *dev,
{
int cards_found = 0;
static int ioaddr;
-#ifdef __ISAPNP__
+#ifdef CONFIG_ISAPNP
short i;
static int pnp_cards = 0;
#endif
-#ifdef __ISAPNP__
+#ifdef CONFIG_ISAPNP
if(nopnp == 1)
goto no_pnp;
for(i=0; corkscrew_isapnp_adapters[i].vendor != 0; i++) {
}
}
no_pnp:
-#endif /* not __ISAPNP__ */
+#endif /* CONFIG_ISAPNP */
/* Check all locations on the ISA bus -- evil! */
for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x20) {
int irq;
-#ifdef __ISAPNP__
+#ifdef CONFIG_ISAPNP
/* Make sure this was not already picked up by isapnp */
if(ioaddr == corkscrew_isapnp_phys_addr[0]) continue;
if(ioaddr == corkscrew_isapnp_phys_addr[1]) continue;
if(ioaddr == corkscrew_isapnp_phys_addr[2]) continue;
-#endif
+#endif /* CONFIG_ISAPNP */
if (check_region(ioaddr, CORKSCREW_TOTAL_SIZE))
continue;
/* Check the resource configuration for a matching ioaddr. */
#ifdef VORTEX_BUS_MASTER
if (status & DMADone) {
outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
- dev_kfree_skb_irq(lp->tx_skb); /* Release the transfered buffer */
+ dev_kfree_skb_irq(lp->tx_skb); /* Release the transferred buffer */
netif_wake_queue(dev);
}
#endif
LK1.1.11 13 Nov 2000 andrewm
- Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER
- LK1.1.12 1 Jan 2001 andrewm
+ LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1)
- Call pci_enable_device before we request our IRQ (Tobias Ringstrom)
- Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra)
- Added extended wait_for_completion for the 3c905CX.
- Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger)
- Don't free skbs we don't own on oom path in vortex_open().
+ LK1.1.13 27 Jan 2001
+ - Added explicit `medialock' flag so we can truly
+ lock the media type down with `options'.
+ - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <acme@conectiva.com.br>)
+ - Added and used EEPROM_NORESET for 3c556B PM resumes.
+ - Fixed leakage of vp->rx_ring.
+ - Break out separate HAS_HWCKSM device capability flag.
+ - Kill vp->tx_full (ANK)
+ - Merge zerocopy fragment handling (ANK?)
+
+ LK1.1.14 15 Feb 2001
+ - Enable WOL. Can be turned on with `enable_wol' module option.
+ - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul)
+ - If a device's internalconfig register reports it has NWAY,
+ use it, even if autoselect is enabled.
+
- See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details.
- Also see Documentation/networking/vortex.txt
*/
/*
- * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamaci.c implementation
+ * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation
* as well as other drivers
*
* NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 32;
/* Tx timeout interval (millisecs) */
-static int watchdog = 400;
+static int watchdog = 5000;
/* Allow aggregation of Tx interrupts. Saves CPU load at the cost
* of possible Tx stalls if the system is blocking interrupts
* somewhere else. Undefine this to disable.
- * AKPM 26 April 2000: enabling this still gets vestigial Tx timeouts
- * in a heavily loaded (collision-prone) 10BaseT LAN. Should be OK with
- * switched Ethernet.
- * AKPM 24May00: vestigial timeouts have been removed by later fixes.
*/
#define tx_interrupt_mitigation 1
static int vortex_debug = 1;
#endif
-/* Some values here only for performance evaluation and path-coverage
- debugging. */
-static int rx_nocopy = 0, rx_copy = 0, queued_packet = 0, rx_csumhits;
-
#ifndef __OPTIMIZE__
#error You must compile this file with the correct options!
#error See the last lines of the source file.
#include <linux/delay.h>
static char version[] __devinitdata =
-"3c59x.c:LK1.1.12 06 Jan 2000 Donald Becker and others. http://www.scyld.com/network/vortex.html " "$Revision: 1.102.2.46 $\n";
+"3c59x.c:LK1.1.13 27 Jan 2001 Donald Becker and others. http://www.scyld.com/network/vortex.html\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("3Com 3c59x/3c90x/3c575 series Vortex/Boomerang/Cyclone driver");
MODULE_PARM(debug, "i");
MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(hw_checksums, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(flow_ctrl, "1-" __MODULE_STRING(8) "i");
+MODULE_PARM(enable_wol, "1-" __MODULE_STRING(8) "i");
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(max_interrupt_work, "i");
MODULE_PARM(compaq_ioaddr, "i");
EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */
HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100,
INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800,
- EEPROM_OFFSET=0x1000 };
+ EEPROM_OFFSET=0x1000, EEPROM_NORESET=0x2000, HAS_HWCKSM=0x4000 };
enum vortex_chips {
CH_3C590 = 0,
{"3c900 Boomerang 10Mbps Combo",
PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, },
{"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
{"3c900 Cyclone 10Mbps Combo",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
{"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
{"3c900B-FL Cyclone 10base-FL",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
{"3c905 Boomerang 100baseTx",
PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, },
{"3c905 Boomerang 100baseT4",
PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, },
{"3c905B Cyclone 100baseTx",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
{"3c905B Cyclone 10/100/BNC",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
{"3c905B-FX Cyclone 100baseFx",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
{"3c905C Tornado",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
{"3c980 Cyclone",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
{"3c980 10/100 Base-TX NIC(Python-T)",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
{"3cSOHO100-TX Hurricane",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
{"3c555 Laptop Hurricane",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
{"3c556 Laptop Tornado",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
+ HAS_HWCKSM, 128, },
{"3c556B Laptop Hurricane",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
+ EEPROM_NORESET|HAS_HWCKSM, 128, },
{"3c575 [Megahertz] 10/100 LAN CardBus",
- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
{"3c575 Boomerang CardBus",
PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
{"3CCFE575BT Cyclone CardBus",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_LED_PWR, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
+ INVERT_LED_PWR|HAS_HWCKSM, 128, },
{"3CCFE575CT Tornado CardBus",
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|MAX_COLLISION_RESET, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
{"3CCFE656 Cyclone CardBus",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|INVERT_LED_PWR, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ INVERT_LED_PWR|HAS_HWCKSM, 128, },
{"3CCFEM656B Cyclone+Winmodem CardBus",
- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|INVERT_LED_PWR, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ INVERT_LED_PWR|HAS_HWCKSM, 128, },
{"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|MAX_COLLISION_RESET, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+ MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
{"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */
- PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY, 128, },
+ PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
{0,}, /* 0 terminated list. */
};
IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
};
+#ifdef MAX_SKB_FRAGS
+#define DO_ZEROCOPY 1
+#else
+#define DO_ZEROCOPY 0
+#endif
+
struct boom_tx_desc {
u32 next; /* Last entry points to 0. */
s32 status; /* bits 0:12 length, others see below. */
- u32 addr;
- s32 length;
+#if DO_ZEROCOPY
+ struct {
+ u32 addr;
+ s32 length;
+ } frag[1+MAX_SKB_FRAGS];
+#else
+ u32 addr;
+ s32 length;
+#endif
};
/* Values for the Tx status entry. */
struct pci_dev *pdev;
char *cb_fn_base; /* CardBus function status addr space. */
+ /* Some values here only for performance evaluation and path-coverage */
+ int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
+ int card_idx;
+
/* The remainder are related to chip state, mostly media selection. */
struct timer_list timer; /* Media selection timer. */
struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */
partner_flow_ctrl:1, /* Partner supports flow control */
- tx_full:1,
has_nway:1,
+ enable_wol:1, /* Wake-on-LAN is enabled */
open:1,
+ medialock:1,
must_free_region:1; /* Flag: if zero, Cardbus owns the I/O region */
int drv_flags;
u16 status_enable;
#define MAX_UNITS 8
static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,};
static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int hw_checksums[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int flow_ctrl[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int enable_wol[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* #define dev_alloc_skb dev_alloc_skb_debug */
}
rc = vortex_probe1(NULL, ioaddr, inw(ioaddr + 0xC88) >> 12,
- EISA_TBL_OFFSET,
- vortex_cards_found);
+ EISA_TBL_OFFSET, vortex_cards_found);
if (rc == 0)
vortex_cards_found++;
else
}
dev = init_etherdev(NULL, sizeof(*vp));
+ retval = -ENOMEM;
if (!dev) {
printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n");
- retval = -ENOMEM;
goto out;
}
SET_MODULE_OWNER(dev);
vp->drv_flags = vci->drv_flags;
vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
vp->io_size = vci->io_size;
+ vp->card_idx = card_idx;
/* module list only for EISA devices */
if (pdev == NULL) {
vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
&vp->rx_ring_dma);
- if (vp->rx_ring == 0) {
- retval = -ENOMEM;
+ retval = -ENOMEM;
+ if (vp->rx_ring == 0)
goto free_region;
- }
vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
/* if we are a PCI driver, we store info in pdev->driver_data
* instead of a module list */
if (pdev)
- pdev->driver_data = dev;
+ pci_set_drvdata(pdev, dev);
/* The lower four bits are the media type. */
if (dev->mem_start) {
vp->media_override = 7;
if (option >= 0) {
vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
+ if (vp->media_override != 7)
+ vp->medialock = 1;
vp->full_duplex = (option & 0x200) ? 1 : 0;
vp->bus_master = (option & 16) ? 1 : 0;
}
vp->full_duplex = 1;
if (flow_ctrl[card_idx] > 0)
vp->flow_ctrl = 1;
+ if (enable_wol[card_idx] > 0)
+ vp->enable_wol = 1;
}
vp->force_fd = vp->full_duplex;
EL3WINDOW(4);
step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
- printk(KERN_INFO " product code '%c%c' rev %02x.%d date %02d-"
+ printk(KERN_INFO " product code %02x%02x rev %02x.%d date %02d-"
"%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
unsigned short n;
fn_st_addr = pci_resource_start (pdev, 2);
- if (fn_st_addr)
+ if (fn_st_addr) {
vp->cb_fn_base = ioremap(fn_st_addr, 128);
+ retval = -ENOMEM;
+ if (!vp->cb_fn_base)
+ goto free_ring;
+ }
printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n",
dev->name, fn_st_addr, vp->cb_fn_base);
EL3WINDOW(2);
XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
media_tbl[XCVR(config)].name);
vp->default_media = XCVR(config);
+ if (vp->default_media == XCVR_NWAY)
+ vp->has_nway = 1;
vp->autoselect = AUTOSELECT(config);
}
}
}
- if (vp->capabilities & CapPwrMgmt)
+ if (pdev && vp->enable_wol && (vp->capabilities & CapPwrMgmt))
acpi_set_WOL(dev);
if (vp->capabilities & CapBusMaster) {
/* The 3c59x-specific entries in the device structure. */
dev->open = vortex_open;
- dev->hard_start_xmit = vp->full_bus_master_tx ?
- boomerang_start_xmit : vortex_start_xmit;
+ if (vp->full_bus_master_tx) {
+ dev->hard_start_xmit = boomerang_start_xmit;
+#ifndef CONFIG_HIGHMEM
+ /* Actually, it still should work with iommu. */
+ dev->features |= NETIF_F_SG;
+#endif
+ if (((hw_checksums[card_idx] == -1) && (vp->drv_flags & HAS_HWCKSM)) ||
+ (hw_checksums[card_idx] == 1)) {
+ dev->features |= NETIF_F_IP_CSUM;
+ }
+ } else {
+ dev->hard_start_xmit = vortex_start_xmit;
+ }
+
+ if (vortex_debug > 0) {
+ printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n",
+ dev->name,
+ (dev->features & NETIF_F_SG) ? "en":"dis",
+ (dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
+ }
+
dev->stop = vortex_close;
dev->get_stats = vortex_get_stats;
dev->do_ioctl = vortex_ioctl;
dev->set_multicast_list = set_rx_mode;
dev->tx_timeout = vortex_tx_timeout;
dev->watchdog_timeo = (watchdog * HZ) / 1000;
-// publish_netdev(dev);
return 0;
+free_ring:
+ pci_free_consistent(pdev,
+ sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring,
+ vp->rx_ring_dma);
free_region:
if (vp->must_free_region)
release_region(ioaddr, vci->io_size);
-// withdraw_netdev(dev);
unregister_netdev(dev);
kfree (dev);
printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval);
return retval;
}
-static void wait_for_completion(struct net_device *dev, int cmd)
+static void
+wait_for_completion(struct net_device *dev, int cmd)
{
int i;
/* OK, that didn't work. Do it the slow way. One second */
for (i = 0; i < 100000; i++) {
if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) {
- printk(KERN_INFO "%s: command 0x%04x took %d usecs! Please tell andrewm@uow.edu.au\n",
+ if (vortex_debug > 1)
+ printk(KERN_INFO "%s: command 0x%04x took %d usecs\n",
dev->name, cmd, i * 10);
return;
}
long ioaddr = dev->base_addr;
struct vortex_private *vp = (struct vortex_private *)dev->priv;
unsigned int config;
- int i, device_id;
+ int i;
+
+ if (vp->pdev && vp->enable_wol) /* AKPM: test not needed? */
+ pci_set_power_state(vp->pdev, 0); /* Go active */
- if (vp->pdev)
- device_id = vp->pdev->device;
- else
- device_id = 0x5900; /* EISA */
-
/* Before initializing select the active media port. */
EL3WINDOW(3);
config = inl(ioaddr + Wn3_Config);
if (vp->media_override != 7) {
- if (vortex_debug > 1)
- printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
- dev->name, vp->media_override,
- media_tbl[vp->media_override].name);
+ printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
+ dev->name, vp->media_override,
+ media_tbl[vp->media_override].name);
dev->if_port = vp->media_override;
} else if (vp->autoselect) {
if (vp->has_nway) {
- printk(KERN_INFO "%s: using NWAY autonegotiation\n", dev->name);
+ printk(KERN_INFO "%s: using NWAY device table, not %d\n", dev->name, dev->if_port);
dev->if_port = XCVR_NWAY;
} else {
/* Find first available media type, starting with 100baseTx. */
while (! (vp->available_media & media_tbl[dev->if_port].mask))
dev->if_port = media_tbl[dev->if_port].next;
printk(KERN_INFO "%s: first available media type: %s\n",
- dev->name,
- media_tbl[dev->if_port].name);
+ dev->name, media_tbl[dev->if_port].name);
}
} else {
dev->if_port = vp->default_media;
vp->full_duplex = vp->force_fd;
config = BFINS(config, dev->if_port, 20, 4);
-//AKPM if (!vp->has_nway)
- {
- if (vortex_debug > 6)
- printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n",
- config);
- outl(config, ioaddr + Wn3_Config);
- }
+ if (vortex_debug > 6)
+ printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
+ outl(config, ioaddr + Wn3_Config);
if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
int mii_reg1, mii_reg5;
vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
if (vortex_debug > 1)
printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
- " setting %s-duplex.\n", dev->name, vp->phys[0],
- mii_reg1, mii_reg5, vp->full_duplex ? "full" : "half");
+ " info1 %04x, setting %s-duplex.\n",
+ dev->name, vp->phys[0],
+ mii_reg1, mii_reg5,
+ vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half");
EL3WINDOW(3);
}
int i;
int retval;
+ if (vp->pdev && vp->enable_wol) /* AKPM: test not needed? */
+ pci_set_power_state(vp->pdev, 0); /* Go active */
+
/* Use the now-standard shared IRQ implementation. */
if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
&boomerang_interrupt : &vortex_interrupt, SA_SHIRQ, dev->name, dev))) {
vortex_up(dev);
vp->open = 1;
- vp->tx_full = 0;
return 0;
out_free_irq:
return retval;
}
-static void vortex_timer(unsigned long data)
+static void
+vortex_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct vortex_private *vp = (struct vortex_private *)dev->priv;
printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
}
+ if (vp->medialock)
+ goto leave_media_alone;
disable_irq(dev->irq);
old_window = inw(ioaddr + EL3_CMD) >> 13;
EL3WINDOW(4);
dev->name, vp->full_duplex ? "full" : "half",
vp->phys[0], mii_reg5);
/* Set the full-duplex bit. */
- EL3WINDOW(3); /* AKPM: this was missing from 2.3.99 3c59x.c! */
+ EL3WINDOW(3);
outw( (vp->full_duplex ? 0x20 : 0) |
(dev->mtu > 1500 ? 0x40 : 0) |
((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
EL3WINDOW(old_window);
enable_irq(dev->irq);
+leave_media_alone:
if (vortex_debug > 2)
printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
dev->name, media_tbl[dev->if_port].name);
/* Bad idea here.. but we might as well handle a few events. */
{
/*
- * AKPM: block interrupts because vortex_interrupt
- * does a bare spin_lock()
+ * Block interrupts because vortex_interrupt does a bare spin_lock()
*/
unsigned long flags;
local_irq_save(flags);
vp->stats.tx_errors++;
if (vp->full_bus_master_tx) {
- if (vortex_debug > 0)
- printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n",
- dev->name);
+ printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name);
if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0)
outl(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
ioaddr + DownListPtr);
- if (vp->tx_full && (vp->cur_tx - vp->dirty_tx <= TX_RING_SIZE - 1)) {
- vp->tx_full = 0;
+ if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
netif_wake_queue (dev);
- }
- if (vp->tx_full)
- netif_stop_queue (dev);
if (vp->drv_flags & IS_BOOMERANG)
outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
outw(DownUnstall, ioaddr + EL3_CMD);
dev->name, vp->cur_tx);
}
- if (vp->tx_full) {
+ if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
if (vortex_debug > 0)
- printk(KERN_WARNING "%s: Tx Ring full, refusing to send buffer.\n",
+ printk(KERN_WARNING "%s: BUG! Tx Ring full, refusing to send buffer.\n",
dev->name);
+ netif_stop_queue(dev);
return 1;
}
+
vp->tx_skbuff[entry] = skb;
+
vp->tx_ring[entry].next = 0;
+#if DO_ZEROCOPY
+ if (skb->ip_summed != CHECKSUM_HW)
+ vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+ else
+ vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum);
+
+ if (!skb_shinfo(skb)->nr_frags) {
+ vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE));
+ vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
+ } else {
+ int i;
+
+ vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data,
+ skb->len-skb->data_len, PCI_DMA_TODEVICE));
+ vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ vp->tx_ring[entry].frag[i+1].addr =
+ cpu_to_le32(pci_map_single(vp->pdev,
+ (void*)page_address(frag->page) + frag->page_offset,
+ frag->size, PCI_DMA_TODEVICE));
+
+ if (i == skb_shinfo(skb)->nr_frags-1)
+ vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
+ else
+ vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
+ }
+ }
+#else
vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE));
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+#endif
spin_lock_irqsave(&vp->lock, flags);
/* Wait for the stall to complete. */
prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
if (inl(ioaddr + DownListPtr) == 0) {
outl(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
- queued_packet++;
+ vp->queued_packet++;
}
vp->cur_tx++;
if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
- vp->tx_full = 1;
netif_stop_queue (dev);
} else { /* Clear previous interrupt enable. */
#if defined(tx_interrupt_mitigation)
+ /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
+ * were selected, this would corrupt DN_COMPLETE. No?
+ */
prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
#endif
- /* netif_start_queue (dev); */ /* AKPM: redundant? */
}
outw(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
vp->deferred = 0;
}
- if (status == 0xffff) /* AKPM: h/w no longer present (hotplug)? */
+ if (status == 0xffff) /* h/w no longer present (hotplug)? */
goto handler_exit;
if (vortex_debug > 4)
netif_wake_queue(dev);
} else { /* Interrupt when FIFO has room for max-sized packet. */
outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
- netif_stop_queue(dev); /* AKPM: This is new */
+ netif_stop_queue(dev);
}
}
}
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs can cause this */
- if (status == 0xffff) { /* AKPM: h/w no longer present (hotplug)? */
+ if (status == 0xffff) { /* h/w no longer present (hotplug)? */
if (vortex_debug > 1)
printk(KERN_DEBUG "boomerang_interrupt(1): status = 0xffff\n");
goto handler_exit;
if (vp->tx_skbuff[entry]) {
struct sk_buff *skb = vp->tx_skbuff[entry];
-
+#if DO_ZEROCOPY
+ int i;
+ for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
+ pci_unmap_single(vp->pdev,
+ le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
+ le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
+ PCI_DMA_TODEVICE);
+#else
pci_unmap_single(vp->pdev,
le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
+#endif
dev_kfree_skb_irq(skb);
vp->tx_skbuff[entry] = 0;
} else {
dirty_tx++;
}
vp->dirty_tx = dirty_tx;
- if (vp->tx_full && (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) {
+ if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
if (vortex_debug > 6)
- printk(KERN_DEBUG "boomerang_interrupt: clearing tx_full\n");
- vp->tx_full = 0;
+ printk(KERN_DEBUG "boomerang_interrupt: wake queue\n");
netif_wake_queue (dev);
}
}
memcpy(skb_put(skb, pkt_len),
vp->rx_skbuff[entry]->tail,
pkt_len);
- rx_copy++;
+ vp->rx_copy++;
} else {
/* Pass up the skbuff already on the Rx ring. */
skb = vp->rx_skbuff[entry];
vp->rx_skbuff[entry] = NULL;
skb_put(skb, pkt_len);
pci_unmap_single(vp->pdev, dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
- rx_nocopy++;
+ vp->rx_nocopy++;
}
skb->protocol = eth_type_trans(skb, dev);
{ /* Use hardware checksum info. */
(csum_bits == (IPChksumValid | TCPChksumValid) ||
csum_bits == (IPChksumValid | UDPChksumValid))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- rx_csumhits++;
+ vp->rx_csumhits++;
}
}
netif_rx(skb);
if (vp->full_bus_master_tx)
outl(0, ioaddr + DownListPtr);
- if (vp->capabilities & CapPwrMgmt)
+ if (vp->pdev && vp->enable_wol && (vp->capabilities & CapPwrMgmt))
acpi_set_WOL(dev);
}
dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
" tx_queued %d Rx pre-checksummed %d.\n",
- dev->name, rx_nocopy, rx_copy, queued_packet, rx_csumhits);
+ dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
}
+#if DO_ZEROCOPY
+ if ( vp->rx_csumhits &&
+ ((vp->drv_flags & HAS_HWCKSM) == 0) &&
+ (hw_checksums[vp->card_idx] == -1)) {
+ printk(KERN_WARNING "%s supports hardware checksums, and we're not using them!\n", dev->name);
+ printk(KERN_WARNING "Please see http://www.uow.edu.au/~andrewm/zerocopy.html\n");
+ }
+#endif
+
free_irq(dev->irq, dev);
if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
}
}
if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
- for (i = 0; i < TX_RING_SIZE; i++)
+ for (i = 0; i < TX_RING_SIZE; i++) {
if (vp->tx_skbuff[i]) {
struct sk_buff *skb = vp->tx_skbuff[i];
-
+#if DO_ZEROCOPY
+ int k;
+
+ for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
+ pci_unmap_single(vp->pdev,
+ le32_to_cpu(vp->tx_ring[i].frag[k].addr),
+ le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
+ PCI_DMA_TODEVICE);
+#else
pci_unmap_single(vp->pdev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
+#endif
dev_kfree_skb(skb);
vp->tx_skbuff[i] = 0;
}
+ }
}
vp->open = 0;
int i;
int stalled = inl(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
- wait_for_completion(dev, DownStall);
- printk(KERN_ERR " Flags; bus-master %d, full %d; dirty %d(%d) "
- "current %d(%d).\n",
- vp->full_bus_master_tx, vp->tx_full,
+ printk(KERN_ERR " Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
+ vp->full_bus_master_tx,
vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
printk(KERN_ERR " Transmit list %8.8x vs. %p.\n",
inl(ioaddr + DownListPtr),
&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
+ wait_for_completion(dev, DownStall);
for (i = 0; i < TX_RING_SIZE; i++) {
printk(KERN_ERR " %d: @%p length %8.8x status %8.8x\n", i,
&vp->tx_ring[i],
+#if DO_ZEROCOPY
+ le32_to_cpu(vp->tx_ring[i].frag[0].length),
+#else
le32_to_cpu(vp->tx_ring[i].length),
+#endif
le32_to_cpu(vp->tx_ring[i].status));
}
if (!stalled)
vp->stats.tx_bytes += (up & 0xf0) << 12;
}
- /* We change back to window 7 (not 1) with the Vortex. */
- /* AKPM: the previous comment is obsolete - we switch back to the old window */
EL3WINDOW(old_window >> 13);
return;
}
struct vortex_private *vp = (struct vortex_private *)dev->priv;
long ioaddr = dev->base_addr;
- /* AKPM: This kills the 905 */
- if (vortex_debug > 1) {
- printk(KERN_INFO PFX "Wake-on-LAN functions disabled\n");
- }
- return;
-
/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
EL3WINDOW(7);
outw(2, ioaddr + 0x0c);
outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
outw(RxEnable, ioaddr + EL3_CMD);
/* Change the power state to D3; RxEnable doesn't take effect. */
- pci_write_config_word(vp->pdev, 0xe0, 0x8103);
+ pci_set_power_state(vp->pdev, 0x8103);
}
static void __devexit vortex_remove_one (struct pci_dev *pdev)
{
- struct net_device *dev = pdev->driver_data;
+ struct net_device *dev = pci_get_drvdata(pdev);
struct vortex_private *vp;
if (!dev) {
BUG();
}
- vp = (void *)(dev->priv);
+ vp = dev->priv;
/* AKPM: FIXME: we should have
* if (vp->cb_fn_base) iounmap(vp->cb_fn_base);
* here
*/
unregister_netdev(dev);
- outw(TotalReset, dev->base_addr + EL3_CMD);
+ /* Should really use wait_for_completion() here */
+ outw((vp->drv_flags & EEPROM_NORESET) ? (TotalReset|0x10) : TotalReset, dev->base_addr + EL3_CMD);
+ pci_free_consistent(pdev,
+ sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+ vp->rx_ring,
+ vp->rx_ring_dma);
if (vp->must_free_region)
release_region(dev->base_addr, vp->io_size);
kfree(dev);
static struct pci_driver vortex_driver = {
- name: "3c575_cb",
+ name: "3c59x",
probe: vortex_init_one,
remove: vortex_remove_one,
suspend: vortex_suspend,
static int __init vortex_init (void)
{
- int rc;
-
- rc = pci_module_init(&vortex_driver);
- if (rc < 0) {
- rc = vortex_eisa_init();
- if (rc > 0)
- vortex_have_eisa = 1;
- } else {
+ int pci_rc, eisa_rc;
+
+ pci_rc = pci_module_init(&vortex_driver);
+ eisa_rc = vortex_eisa_init();
+
+ if (pci_rc == 0)
vortex_have_pci = 1;
- }
+ if (eisa_rc > 0)
+ vortex_have_eisa = 1;
- return rc;
+ return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV;
}
module_init(vortex_init);
module_exit(vortex_cleanup);
-
\f
/*
* Local variables:
init_waitqueue_head (&tp->thr_wait);
init_MUTEX_LOCKED (&tp->thr_exited);
- pdev->driver_data = dev;
+ pci_set_drvdata(pdev, dev);
printk (KERN_INFO "%s: %s at 0x%lx, "
"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
{
- struct net_device *dev = pdev->driver_data;
+ struct net_device *dev = pci_get_drvdata (pdev);
struct rtl8139_private *np;
DPRINTK ("ENTER\n");
kfree (dev);
- pdev->driver_data = NULL;
+ pci_set_drvdata (pdev, NULL);
DPRINTK ("EXIT\n");
}
void *ioaddr,
int status, int link_changed)
{
- printk (KERN_DEBUG "%s: Abnormal interrupt, status %8.8x.\n",
- dev->name, status);
+ DPRINTK ("%s: Abnormal interrupt, status %8.8x.\n",
+ dev->name, status);
assert (dev != NULL);
assert (tp != NULL);
static void rtl8139_suspend (struct pci_dev *pdev)
{
- struct net_device *dev = pdev->driver_data;
+ struct net_device *dev = pci_get_drvdata (pdev);
struct rtl8139_private *tp = dev->priv;
void *ioaddr = tp->mmio_addr;
unsigned long flags;
static void rtl8139_resume (struct pci_dev *pdev)
{
- struct net_device *dev = pdev->driver_data;
+ struct net_device *dev = pci_get_drvdata (pdev);
netif_device_attach (dev);
rtl8139_hw_start (dev);
*/
-static const char *version = "82596.c $Revision: 1.4 $\n";
-
#include <linux/config.h>
#include <linux/module.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+static char version[] __initdata =
+ "82596.c $Revision: 1.4 $\n";
+
/* DEBUG flags
*/
spinlock_t lock;
};
-char init_setup[] =
+static char init_setup[] =
{
0x8E, /* length, prefetch on */
0xC8, /* fifo to 8, monitor off */
/* this is easy the ethernet interface can only be at 0x300 */
/* first check nothing is already registered here */
- if (check_region(ioaddr, I596_TOTAL_SIZE)) {
+ if (!request_region(ioaddr, I596_TOTAL_SIZE, dev->name)) {
printk("82596: IO address 0x%04x in use\n", ioaddr);
- return -ENODEV;
+ return -EBUSY;
}
for (i = 0; i < 8; i++) {
/* checksum is a multiple of 0x100, got this wrong first time
some machines have 0x100, some 0x200. The DOS driver doesn't
- even bother with the checksum */
+ even bother with the checksum.
+ Some other boards trip the checksum.. but then appear as
+ ether address 0. Trap these - AC */
- if (checksum % 0x100)
- return -ENODEV;
-
- /* Some other boards trip the checksum.. but then appear as
- * ether address 0. Trap these - AC */
-
- if (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)
- return -ENODEV;
-
- if (!request_region(ioaddr, I596_TOTAL_SIZE, "i596"))
+ if ((checksum % 0x100) ||
+ (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) {
+ release_region(ioaddr, I596_TOTAL_SIZE);
return -ENODEV;
+ }
dev->base_addr = ioaddr;
dev->irq = 10;
tristate ' ICL EtherTeam 16i/32 support' CONFIG_ETH16I
tristate ' NE2000/NE1000 support' CONFIG_NE2000
if [ "$CONFIG_OBSOLETE" = "y" ]; then
- tristate ' SEEQ8005 support (EXPERIMENTAL)' CONFIG_SEEQ8005
+ dep_tristate ' SEEQ8005 support (EXPERIMENTAL)' CONFIG_SEEQ8005 $CONFIG_EXPERIMENTAL
+ fi
+ if [ "$CONFIG_OBSOLETE" = "y" ]; then
+ tristate ' SK_G16 support' CONFIG_SK_G16
fi
- tristate ' SK_G16 support' CONFIG_SK_G16
fi
if [ "$CONFIG_MCA" = "y" ]; then
tristate ' SKnet MCA support' CONFIG_SKMC
bool 'FDDI driver support' CONFIG_FDDI
if [ "$CONFIG_FDDI" = "y" ]; then
- dep_tristate ' Digital DEFEA and DEFPA adapter support' CONFIG_DEFXX $CONFIG_PCI
+ if [ "$CONFIG_PCI" = "y" -o "$CONFIG_EISA" = "y" ]; then
+ tristate ' Digital DEFEA and DEFPA adapter support' CONFIG_DEFXX
+ fi
tristate ' SysKonnect FDDI PCI support' CONFIG_SKFP
fi
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
if [ "$CONFIG_INET" = "y" ]; then
bool 'HIPPI driver support (EXPERIMENTAL)' CONFIG_HIPPI
- if [ "$CONFIG_HIPPI" = "y" ]; then
+ if [ "$CONFIG_HIPPI" = "y" -a "$CONFIG_PCI" = "y" ]; then
tristate ' Essential RoadRunner HIPPI PCI adapter support' CONFIG_ROADRUNNER
if [ "$CONFIG_ROADRUNNER" != "n" ]; then
bool ' Use large TX/RX rings' CONFIG_ROADRUNNER_LARGE_RINGS
fi
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
- tristate 'Red Creek Hardware VPN (EXPERIMENTAL)' CONFIG_RCPCI
+ dep_tristate 'Red Creek Hardware VPN (EXPERIMENTAL)' CONFIG_RCPCI $CONFIG_PCI
tristate 'Traffic Shaper (EXPERIMENTAL)' CONFIG_SHAPER
fi
#define tigonFwRodata 0
#else
/* Generated by genfw.c */
-u32 tigonFwText[(MAX_TEXT_LEN/4) + 1] __initdata = {
+static u32 tigonFwText[(MAX_TEXT_LEN/4) + 1] __initdata = {
0x10000003,
0x0, 0xd, 0xd, 0x3c1d0001,
0x8fbd5c54, 0x3a0f021, 0x3c100000, 0x26104000,
0x3c010001, 0x220821, 0xac317e30, 0x8fbf0024,
0x8fb40020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
0x8fb00010, 0x3e00008, 0x27bd0028, 0x0 };
-u32 tigonFwRodata[(MAX_RODATA_LEN/4) + 1] __initdata = {
+static u32 tigonFwRodata[(MAX_RODATA_LEN/4) + 1] __initdata = {
0x24486561, 0x6465723a, 0x202f7072,
0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
0x2f2e2f6e, 0x69632f66, 0x772f636f, 0x6d6d6f6e,
0x0, 0x14c38, 0x14c38, 0x14b80,
0x14bc4, 0x14c38, 0x14c38, 0x0,
0x0, 0x0 };
-u32 tigonFwData[(MAX_DATA_LEN/4) + 1] __initdata = {
+static u32 tigonFwData[(MAX_DATA_LEN/4) + 1] __initdata = {
0x416c7465,
0x6f6e2041, 0x63654e49, 0x43205600, 0x416c7465,
0x6f6e2041, 0x63654e49, 0x43205600, 0x42424242,
#define tigon2FwSbssLen 0xcc
#define tigon2FwBssAddr 0x00016f50
#define tigon2FwBssLen 0x20c0
-u32 tigon2FwText[(MAX_TEXT_LEN/4) + 1] __initdata = {
+static u32 tigon2FwText[(MAX_TEXT_LEN/4) + 1] __initdata = {
0x0,
0x10000003, 0x0, 0xd, 0xd,
0x3c1d0001, 0x8fbd6d20, 0x3a0f021, 0x3c100000,
0x24020001, 0x8f430328, 0x1021, 0x24630001,
0x3e00008, 0xaf430328, 0x3e00008, 0x0,
0x0, 0x0, 0x0, 0x0 };
-u32 tigon2FwRodata[(MAX_RODATA_LEN/4) + 1] __initdata = {
+static u32 tigon2FwRodata[(MAX_RODATA_LEN/4) + 1] __initdata = {
0x24486561, 0x6465723a, 0x202f7072,
0x6f6a6563, 0x74732f72, 0x63732f73, 0x772f6765,
0x2f2e2f6e, 0x69632f66, 0x77322f63, 0x6f6d6d6f,
0x14ed8, 0x14b8c, 0x14bd8, 0x14c24,
0x14ed8, 0x7365746d, 0x61636163, 0x74000000,
0x0, 0x0 };
-u32 tigon2FwData[(MAX_DATA_LEN/4) + 1] __initdata = {
+static u32 tigon2FwData[(MAX_DATA_LEN/4) + 1] __initdata = {
0x1,
0x1, 0x1, 0xc001fc, 0x3ffc,
0xc00000, 0x416c7465, 0x6f6e2041, 0x63654e49,
dep_tristate 'ARCnet COM90xx (IO mapped) chipset driver' CONFIG_ARCNET_COM90xxIO $CONFIG_ARCNET
dep_tristate 'ARCnet COM90xx (RIM I) chipset driver' CONFIG_ARCNET_RIM_I $CONFIG_ARCNET
dep_tristate 'ARCnet COM20020 chipset driver' CONFIG_ARCNET_COM20020 $CONFIG_ARCNET
- if [ "$CONFIG_ARCNET_COM20020" != "n" ]; then
- dep_tristate ' Support for COM20020 on ISA' CONFIG_ARCNET_COM20020_ISA $CONFIG_ARCNET_COM20020 $CONFIG_ARCNET
- dep_tristate ' Support for COM20020 on PCI' CONFIG_ARCNET_COM20020_PCI $CONFIG_ARCNET_COM20020 $CONFIG_ARCNET
- fi
+ dep_tristate ' Support for COM20020 on ISA' CONFIG_ARCNET_COM20020_ISA $CONFIG_ARCNET_COM20020 $CONFIG_ISA
+ dep_tristate ' Support for COM20020 on PCI' CONFIG_ARCNET_COM20020_PCI $CONFIG_ARCNET_COM20020 $CONFIG_PCI
fi
endmenu
if (!dev)
return err;
lp = dev->priv = kmalloc(sizeof(struct arcnet_local), GFP_KERNEL);
- if (!lp)
- return -ENOMEM;
+ if (!lp) {
+ err = -ENOMEM;
+ goto out_dev;
+ }
memset(lp, 0, sizeof(struct arcnet_local));
pdev->driver_data = dev;
if (check_region(ioaddr, ARCNET_TOTAL_SIZE)) {
BUGMSG(D_INIT, "IO region %xh-%xh already allocated.\n",
ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1);
- return -EBUSY;
+ err = -EBUSY;
+ goto out_priv;
}
if (ASTATUS() == 0xFF) {
BUGMSG(D_NORMAL, "IO address %Xh was reported by PCI BIOS, "
"but seems empty!\n", ioaddr);
- return -EIO;
+ err = -EIO;
+ goto out_priv;
}
- if (com20020_check(dev))
- return -EIO;
+ if (com20020_check(dev)) {
+ err = -EIO;
+ goto out_priv;
+ }
+
+ if ((err = com20020_found(dev, SA_SHIRQ)) != 0)
+ goto out_priv;
+
+ return 0;
- return com20020_found(dev, SA_SHIRQ);
+out_priv:
+ kfree(dev->priv);
+out_dev:
+ kfree(dev);
+ return err;
}
static void __devexit com20020pci_remove(struct pci_dev *pdev)
return "type A672T";
}
}
-#ifdef ARLAN_DEBUGING
+#ifdef ARLAN_DEBUGGING
static void arlan_print_diagnostic_info(struct net_device *dev)
{
int i;
DEBUGSHM(4, "arlan configuredStatus = %d \n", arlan->configuredStatusFlag, u_char);
DEBUGSHM(4, "arlan driver diagnostic: 0x%2x\n", arlan->diagnosticInfo, u_char);
- /* issue nop command - no interupt */
+ /* issue nop command - no interrupt */
arlan_command(dev, ARLAN_COMMAND_NOOP);
if (arlan_command(dev, ARLAN_COMMAND_WAIT_NOW) != 0)
return -1;
#define CTBLN(num,card,nam) \
{num , #nam, &(arlan_conf[card].nam), \
sizeof(int), 0600, NULL, &proc_dointvec}
-#ifdef ARLAN_DEBUGING
+#ifdef ARLAN_DEBUGGING
#define ARLAN_PROC_DEBUG_ENTRIES {48, "entry_exit_debug", &arlan_entry_and_exit_debug, \
sizeof(int), 0600, NULL, &proc_dointvec},\
static int async = 1;
static int tx_queue_len = 1;
static int arlan_EEPROM_bad;
-static int arlan_entry_and_exit_debug;
-#ifdef ARLAN_DEBUGING
+#ifdef ARLAN_DEBUGGING
static int arlan_entry_debug;
static int arlan_exit_debug;
MODULE_PARM(arlan_EEPROM_bad, "i");
EXPORT_SYMBOL(arlan_device);
+EXPORT_SYMBOL(arlan_conf);
EXPORT_SYMBOL(last_arlan);
return ((long long) timev.tv_sec * 1000000 + timev.tv_usec);
};
-#ifdef ARLAN_ENTRY_EXIT_DEBUGING
+#ifdef ARLAN_ENTRY_EXIT_DEBUGGING
#define ARLAN_DEBUG_ENTRY(name) \
{\
struct timeval timev;\
}
-#ifdef ARLAN_DEBUGING
+#ifdef ARLAN_DEBUGGING
static void arlan_print_registers(struct net_device *dev, int line)
{
}
/* we reach here if multicast filtering is on and packet
* is multicast and not for receive */
- goto end_of_interupt;
+ goto end_of_interrupt;
}
}
#endif // ARLAN_MULTICAST
break;
default:
- printk(KERN_ERR "arlan intr: recieved unknown status\n");
+ printk(KERN_ERR "arlan intr: received unknown status\n");
priv->stats.rx_crc_errors++;
break;
}
return 0;
}
-#ifdef ARLAN_DEBUGING
+#ifdef ARLAN_DEBUGGING
static long alignLong(volatile u_char * ptr)
{
long ret;
#include <linux/etherdevice.h>
-//#define ARLAN_DEBUGING 1
+//#define ARLAN_DEBUGGING 1
#define ARLAN_PROC_INTERFACE
#define MAX_ARLANS 4 /* not more than 4 ! */
extern char * siteName;
extern int arlan_entry_debug;
extern int arlan_exit_debug;
-extern int arlan_entry_and_exit_debug;
extern int testMemory;
extern const char* arlan_version;
extern int arlan_command(struct net_device * dev, int command);
#define IFDEBUG( L ) if ( (L) & arlan_debug )
#define ARLAN_FAKE_HDR_LEN 12
-#ifdef ARLAN_DEBUGING
+#ifdef ARLAN_DEBUGGING
#define DEBUG 1
- #define ARLAN_ENTRY_EXIT_DEBUGING 1
+ #define ARLAN_ENTRY_EXIT_DEBUGGING 1
#define ARLAN_DEBUG(a,b) printk(KERN_DEBUG a, b)
#else
#define ARLAN_DEBUG(a,b)
int tx_queue_len;
};
-struct arlan_conf_stru arlan_conf[MAX_ARLANS];
+extern struct arlan_conf_stru arlan_conf[MAX_ARLANS];
struct TxParam
{
dev->trans_start = jiffies;
netif_wake_queue(dev);
np->stats.tx_errors++;
- return;
}
static int atp_send_packet(struct sk_buff *skb, struct net_device *dev)
static void bond_set_multicast_list(struct net_device *master)
{
- bonding_t *bond = master->priv;
- slave_t *slave;
-
- for (slave = bond->next; slave != (slave_t*)bond; slave = slave->next) {
- slave->dev->mc_list = master->mc_list;
- slave->dev->mc_count = master->mc_count;
- slave->dev->flags = master->flags;
- slave->dev->set_multicast_list(slave->dev);
- }
}
static int bond_enslave(struct net_device *master, struct net_device *dev)
: Use SET_MODULE_OWNER()
: Tidied up strange request_irq() abuse in net_open().
-*/
-
-static char version[] =
-"cs89x0.c: v2.4.0-test11-pre4 Russell Nelson <nelson@crynwr.com>, Andrew Morton <andrewm@uow.edu.au>\n";
-
-/* ======================= end of configuration ======================= */
+ Andrew Morton : Kernel 2.4.3-pre1
+ : Request correct number of pages for DMA (Hugh Dickens)
+ : Select PP_ChipID _after_ unregister_netdev in cleanup_module()
+ : because unregister_netdev() calls get_stats.
+ : Make `version[]' __initdata
+ : Uninlined the read/write reg/word functions.
+*/
/* Always include 'config.h' first in case the user wants to turn on
or override something. */
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/init.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/io.h>
#include "cs89x0.h"
+static char version[] __initdata =
+"cs89x0.c: v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton <andrewm@uow.edu.au>\n";
+
/* First, a few definitions that the brave might change.
A zero-terminated list of I/O addresses to be probed. Some special flags..
Addr & 1 = Read back the address port, look for signature and reset
SET_MODULE_OWNER(dev);
if (net_debug)
- printk("cs89x0:cs89x0_probe()\n");
+ printk("cs89x0:cs89x0_probe(0x%x)\n", base_addr);
if (base_addr > 0x1ff) /* Check a single specified location. */
return cs89x0_probe1(dev, base_addr);
return -ENODEV;
}
-extern int inline
+static int
readreg(struct net_device *dev, int portno)
{
outw(portno, dev->base_addr + ADD_PORT);
return inw(dev->base_addr + DATA_PORT);
}
-extern void inline
+static void
writereg(struct net_device *dev, int portno, int value)
{
outw(portno, dev->base_addr + ADD_PORT);
outw(value, dev->base_addr + DATA_PORT);
}
-extern int inline
+static int
readword(struct net_device *dev, int portno)
{
return inw(dev->base_addr + portno);
}
-extern void inline
+static void
writeword(struct net_device *dev, int portno, int value)
{
outw(value, dev->base_addr + portno);
lp = (struct net_local *)dev->priv;
/* Grab the region so we can find another board if autoIRQ fails. */
- if (!request_region(ioaddr, NETCARD_IO_EXTENT, dev->name)) {
+ if (!request_region(ioaddr & ~3, NETCARD_IO_EXTENT, dev->name)) {
+ printk(KERN_ERR "%s: request_region(0x%x, 0x%x) failed\n",
+ dev->name, ioaddr, NETCARD_IO_EXTENT);
retval = -EBUSY;
goto out1;
}
expect to find the EISA signature word. An IO with a base of 0x3
will skip the test for the ADD_PORT. */
if (ioaddr & 1) {
+ if (net_debug > 1)
+ printk(KERN_INFO "%s: odd ioaddr 0x%x\n", dev->name, ioaddr);
if ((ioaddr & 2) != 2)
if ((inw((ioaddr & ~3)+ ADD_PORT) & ADD_MASK) != ADD_SIG) {
+ printk(KERN_ERR "%s: bad signature 0x%x\n",
+ dev->name, inw((ioaddr & ~3)+ ADD_PORT));
retval = -ENODEV;
goto out2;
}
ioaddr &= ~3;
outw(PP_ChipID, ioaddr + ADD_PORT);
}
+printk("PP_addr=0x%x\n", inw(ioaddr + ADD_PORT));
if (inw(ioaddr + DATA_PORT) != CHIP_EISA_ID_SIG) {
+ printk(KERN_ERR "%s: incorrect signature 0x%x\n",
+ dev->name, inw(ioaddr + DATA_PORT));
retval = -ENODEV;
goto out2;
}
lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T |
A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO;
+ if (net_debug > 1)
+ printk(KERN_INFO "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n",
+ dev->name, i, lp->adapter_cnf);
+
/* IRQ. Other chips already probe, see below. */
if (lp->chip_type == CS8900)
lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK;
dev->dev_addr[i*2] = eeprom_buff[i];
dev->dev_addr[i*2+1] = eeprom_buff[i] >> 8;
}
+ if (net_debug > 1)
+ printk(KERN_DEBUG "%s: new adapter_cnf: 0%x\n",
+ dev->name, lp->adapter_cnf);
}
/* allow them to force multiple transceivers. If they force multiple, autosense */
else if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_MEDIA_10B_2; }
}
+ if (net_debug > 1)
+ printk(KERN_DEBUG "%s: after force 0x%x, adapter_cnf=0x%x\n",
+ dev->name, lp->force, lp->adapter_cnf);
+
/* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */
/* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */
printk("cs89x0_probe1() successful\n");
return 0;
out2:
- release_region(ioaddr, NETCARD_IO_EXTENT);
+ release_region(ioaddr & ~3, NETCARD_IO_EXTENT);
out1:
kfree(dev->priv);
dev->priv = 0;
if (lp->isa_config & ANY_ISA_DMA) {
unsigned long flags;
lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL,
- (lp->dmasize * 1024) / PAGE_SIZE);
+ get_order(lp->dmasize * 1024));
if (!lp->dma_buff) {
printk(KERN_ERR "%s: cannot get %dK memory for DMA\n", dev->name, lp->dmasize);
static void release_dma_buff(struct net_local *lp)
{
if (lp->dma_buff) {
- free_pages((unsigned long)(lp->dma_buff), (lp->dmasize * 1024) / PAGE_SIZE);
+ free_pages((unsigned long)(lp->dma_buff), get_order(lp->dmasize * 1024));
lp->dma_buff = 0;
}
}
void
cleanup_module(void)
{
- outw(PP_ChipID, dev_cs89x0.base_addr + ADD_PORT);
if (dev_cs89x0.priv != NULL) {
/* Free up the private structure, or leak memory :-) */
unregister_netdev(&dev_cs89x0);
+ outw(PP_ChipID, dev_cs89x0.base_addr + ADD_PORT);
kfree(dev_cs89x0.priv);
dev_cs89x0.priv = NULL; /* gets re-allocated by cs89x0_probe1 */
/* If we don't do this, we can't re-insmod it later. */
static int autoconf_media(struct net_device *dev);
static void create_packet(struct net_device *dev, char *frame, int len);
-static void de4x5_us_delay(u32 usec);
-static void de4x5_ms_delay(u32 msec);
static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
static int dc21040_autoconf(struct net_device *dev);
static int dc21041_autoconf(struct net_device *dev);
#define RESET_DE4X5 {\
int i;\
i=inl(DE4X5_BMR);\
- de4x5_ms_delay(1);\
+ mdelay(1);\
outl(i | BMR_SWR, DE4X5_BMR);\
- de4x5_ms_delay(1);\
+ mdelay(1);\
outl(i, DE4X5_BMR);\
- de4x5_ms_delay(1);\
- for (i=0;i<5;i++) {inl(DE4X5_BMR); de4x5_ms_delay(1);}\
- de4x5_ms_delay(1);\
+ mdelay(1);\
+ for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
+ mdelay(1);\
}
#define PHY_HARD_RESET {\
pcibios_write_config_byte(lp->bus_num, lp->device << 3,
PCI_CFDA_PSM, WAKEUP);
}
- de4x5_ms_delay(10);
+ mdelay(10);
RESET_DE4X5;
/* Push up the protocol stack */
skb->protocol=eth_type_trans(skb,dev);
+ de4x5_local_stats(dev, skb->data, pkt_len);
netif_rx(skb);
/* Update stats */
dev->last_rx = jiffies;
lp->stats.rx_packets++;
lp->stats.rx_bytes += pkt_len;
- de4x5_local_stats(dev, skb->data, pkt_len);
}
}
for (walk = walk->next; walk != &dev->bus_list; walk = walk->next) {
struct pci_dev *this_dev = pci_dev_b(walk);
+ /* Skip the pci_bus list entry */
+ if (list_entry(walk, struct pci_bus, devices) == dev->bus) continue;
+
pb = this_dev->bus->number;
vendor = this_dev->vendor;
device = this_dev->device << 8;
outl(csr14, DE4X5_STRR);
outl(csr13, DE4X5_SICR);
- de4x5_ms_delay(10);
+ mdelay(10);
return;
}
}
/*
-** Known delay in microseconds
-*/
-static void
-de4x5_us_delay(u32 usec)
-{
- udelay(usec);
-
- return;
-}
-
-/*
-** Known delay in milliseconds, in millisecond steps.
-*/
-static void
-de4x5_ms_delay(u32 msec)
-{
- u_int i;
-
- for (i=0; i<msec; i++) {
- de4x5_us_delay(1000);
- }
-
- return;
-}
-
-
-/*
** Look for a particular board name in the EISA configuration space
*/
static int
for (i=0; i<6; i++, a <<= 1) {
srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
}
- de4x5_us_delay(1);
+ udelay(1);
i = (getfrom_srom(addr) >> 3) & 0x01;
sendto_srom((command & 0x0000ff00) | DT_CS, addr);
while (!((getfrom_srom(addr) >> 3) & 0x01)) {
- de4x5_ms_delay(1);
+ mdelay(1);
}
sendto_srom(command & 0x0000ff00, addr);
switch(state) {
case WAKEUP:
outb(WAKEUP, PCI_CFPM);
- de4x5_ms_delay(10);
+ mdelay(10);
break;
case SNOOZE:
case WAKEUP:
pcibios_write_config_byte(lp->bus_num, lp->device << 3,
PCI_CFDA_PSM, WAKEUP);
- de4x5_ms_delay(10);
+ mdelay(10);
break;
case SNOOZE:
#define STS_LNF 0x00001000 /* Link Fail */
#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */
#define STS_TM 0x00000800 /* Timer Expired (DC21041) */
-#define STS_ETI 0x00000400 /* Early Transmit Interupt */
+#define STS_ETI 0x00000400 /* Early Transmit Interrupt */
#define STS_AT 0x00000400 /* AUI/TP Pin */
#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */
#define STS_RPS 0x00000100 /* Receive Process Stopped */
printk(KERN_WARNING "%s: No tx-buffer available!\n", dev->name);
restore_flags(flags);
return 1;
- break;
}
de620_write_block(dev, buffer, len);
/* PCI board */
bp->bus_type = DFX_BUS_TYPE_PCI;
bp->pci_dev = pdev;
- pdev->driver_data = dev;
+ pci_set_drvdata (pdev, dev);
pci_set_master (pdev);
}
static void __devexit dfx_remove_one (struct pci_dev *pdev)
{
- struct net_device *dev = pdev->driver_data;
+ struct net_device *dev = pci_get_drvdata(pdev);
dfx_remove_one_pci_or_eisa(pdev, dev);
+ pci_set_drvdata(pdev, NULL);
}
static struct pci_device_id dfx_pci_tbl[] __devinitdata = {
0.52 16-Oct-00 Fixes for 2.3 io memory accesses
Fix show-stopper (ints left masked) in depca_interrupt
by <peterd@pnd-pc.demon.co.uk>
+ 0.53 12-Jan-01 Release resources on failure, bss tidbits
+ by acme@conectiva.com.br
=========================================================================
*/
-static const char *version = "depca.c:v0.51 1999/6/27 davies@maniac.ultranet.com\n";
-
#include <linux/config.h>
#include <linux/module.h>
#include "depca.h"
+static char version[] __initdata =
+ "depca.c:v0.53 2001/1/12 davies@maniac.ultranet.com\n";
+
#ifdef DEPCA_DEBUG
static int depca_debug = DEPCA_DEBUG;
#else
#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000}
#define DEPCA_IO_PORTS {0x300, 0x200, 0}
#define DEPCA_TOTAL_SIZE 0x10
-static short mem_chkd = 0;
+static short mem_chkd;
/*
** Adapter ID for the MCA EtherWORKS DE210/212 adapter
static int num_depcas, num_eth;
static int mem; /* For loadable module assignment
use insmod mem=0x????? .... */
-static char *adapter_name = '\0'; /* If no PROM when loadable module
+static char *adapter_name; /* = '\0'; If no PROM when loadable module
use insmod adapter_name=DE??? ...
+ bss initializes this to zero
*/
/*
** Miscellaneous defines...
int tmp = num_depcas, status = -ENODEV;
u_long iobase = dev->base_addr;
+ SET_MODULE_OWNER(dev);
+
if ((iobase == 0) && loading_module){
printk("Autoprobing is not supported when loading a module based driver.\n");
status = -EIO;
printk(", h/w address ");
status = get_hw_addr(dev);
- for (i=0; i<ETH_ALEN - 1; i++) { /* get the ethernet address */
- printk("%2.2x:", dev->dev_addr[i]);
- }
- printk("%2.2x", dev->dev_addr[i]);
-
if (status != 0) {
printk(" which has an Ethernet PROM CRC error.\n");
return -ENXIO;
}
+ for (i=0; i<ETH_ALEN - 1; i++) { /* get the ethernet address */
+ printk("%2.2x:", dev->dev_addr[i]);
+ }
+ printk("%2.2x", dev->dev_addr[i]);
/* Set up the maximum amount of network RAM(kB) */
netRAM = ((adapter != DEPCA) ? 64 : 48);
lp->mca_slot = mca_slot;
lp->lock = SPIN_LOCK_UNLOCKED;
sprintf(lp->adapter_name,"%s (%s)", name, dev->name);
+ status = -EBUSY;
if (!request_region(ioaddr, DEPCA_TOTAL_SIZE, lp->adapter_name)) {
printk(KERN_ERR "depca: I/O resource 0x%x @ 0x%lx busy\n",
DEPCA_TOTAL_SIZE, ioaddr);
- return -EBUSY;
+ goto out_priv;
}
/* Initialisation Block */
lp->sh_mem = ioremap(mem_start, mem_len);
+ status = -EIO;
if (lp->sh_mem == NULL) {
printk(KERN_ERR "depca: cannot remap ISA memory, aborting\n");
- return -EIO;
+ goto out_region;
}
lp->device_ram_start = mem_start & LA_MASK;
outw(INEA | INIT, DEPCA_DATA);
irqnum = autoirq_report(1);
+ status = -ENXIO;
if (!irqnum) {
printk(" and failed to detect IRQ line.\n");
- status = -ENXIO;
+ goto out_region;
} else {
- for (dev->irq=0,i=0; (depca_irq[i]) && (!dev->irq); i++) {
+ for (dev->irq=0,i=0; (depca_irq[i]) && (!dev->irq); i++)
if (irqnum == depca_irq[i]) {
dev->irq = irqnum;
printk(" and uses IRQ%d.\n", dev->irq);
}
- }
+ status = -ENXIO;
if (!dev->irq) {
printk(" but incorrect IRQ line detected.\n");
- status = -ENXIO;
+ goto out_region;
}
}
#endif /* MODULE */
printk(" and assigned IRQ%d.\n", dev->irq);
}
- if (!status) {
- if (depca_debug > 1) {
- printk(version);
- }
-
- /* The DEPCA-specific entries in the device structure. */
- dev->open = &depca_open;
- dev->hard_start_xmit = &depca_start_xmit;
- dev->stop = &depca_close;
- dev->get_stats = &depca_get_stats;
- dev->set_multicast_list = &set_multicast_list;
- dev->do_ioctl = &depca_ioctl;
- dev->tx_timeout = depca_tx_timeout;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- dev->mem_start = 0;
-
- /* Fill in the generic field of the device structure. */
- ether_setup(dev);
- } else { /* Incorrectly initialised hardware */
- release_region(ioaddr, DEPCA_TOTAL_SIZE);
- if (dev->priv) {
- kfree(dev->priv);
- dev->priv = NULL;
- }
+ if (depca_debug > 1) {
+ printk(version);
}
+ /* The DEPCA-specific entries in the device structure. */
+ dev->open = &depca_open;
+ dev->hard_start_xmit = &depca_start_xmit;
+ dev->stop = &depca_close;
+ dev->get_stats = &depca_get_stats;
+ dev->set_multicast_list = &set_multicast_list;
+ dev->do_ioctl = &depca_ioctl;
+ dev->tx_timeout = depca_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ dev->mem_start = 0;
+
+ /* Fill in the generic field of the device structure. */
+ ether_setup(dev);
+ return 0;
+out_region:
+ release_region(ioaddr, DEPCA_TOTAL_SIZE);
+out_priv:
+ kfree(dev->priv);
+ dev->priv = NULL;
return status;
}
printk("nicsr: 0x%02x\n",inb(DEPCA_NICSR));
}
}
-
- MOD_INC_USE_COUNT;
-
return status;
}
** Free the associated irq
*/
free_irq(dev->irq, dev);
-
- MOD_DEC_USE_COUNT;
-
return 0;
}
tmp.addr[i] = dev->dev_addr[i];
}
ioc->len = ETH_ALEN;
- if (verify_area(VERIFY_WRITE, (void *)ioc->data, ioc->len)) return -EFAULT;
- copy_to_user(ioc->data, tmp.addr, ioc->len);
+ if (copy_to_user(ioc->data, tmp.addr, ioc->len))
+ return -EFAULT;
break;
case DEPCA_SET_HWADDR: /* Set the hardware address */
if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (verify_area(VERIFY_READ, (void *)ioc->data, ETH_ALEN)) return -EFAULT;
- copy_from_user(tmp.addr,ioc->data,ETH_ALEN);
+ if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN))
+ return -EFAULT;
for (i=0; i<ETH_ALEN; i++) {
dev->dev_addr[i] = tmp.addr[i];
}
case DEPCA_GET_MCA: /* Get the multicast address table */
ioc->len = (HASH_TABLE_LEN >> 3);
- if (verify_area(VERIFY_WRITE, ioc->data, ioc->len)) return -EFAULT;
- copy_to_user(ioc->data, lp->init_block.mcast_table, ioc->len);
+ if (copy_to_user(ioc->data, lp->init_block.mcast_table, ioc->len))
+ return -EFAULT;
break;
case DEPCA_SET_MCA: /* Set a multicast address */
if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (verify_area(VERIFY_READ, ioc->data, ETH_ALEN*ioc->len)) return -EFAULT;
- copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len);
+ if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len))
+ return -EFAULT;
set_multicast_list(dev);
break;
case DEPCA_GET_STATS: /* Get the driver statistics */
cli();
ioc->len = sizeof(lp->pktStats);
- if (verify_area(VERIFY_WRITE, ioc->data, ioc->len)) {
- status = -EFAULT;
- } else {
- copy_to_user(ioc->data, &lp->pktStats, ioc->len);
- }
+ if (copy_to_user(ioc->data, &lp->pktStats, ioc->len))
+ status = -EFAULT;
sti();
break;
tmp.sval[i++] = inw(DEPCA_DATA);
memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init));
ioc->len = i+sizeof(struct depca_init);
- if (verify_area(VERIFY_WRITE, ioc->data, ioc->len)) return -EFAULT;
- copy_to_user(ioc->data, tmp.addr, ioc->len);
+ if (copy_to_user(ioc->data, tmp.addr, ioc->len))
+ return -EFAULT;
break;
default:
}
#ifdef MODULE
-static struct net_device thisDepca = {
- "", /* device name is inserted by /linux/drivers/net/net_init.c */
- 0, 0, 0, 0,
- 0x200, 7, /* I/O address, IRQ */
- 0, 0, 0, NULL, depca_probe
-};
-
+static struct net_device thisDepca;
static int irq=7; /* EDIT THESE LINE FOR YOUR CONFIGURATION */
static int io=0x200; /* Or use the irq= io= options to insmod */
MODULE_PARM(irq, "i");
{
thisDepca.irq=irq;
thisDepca.base_addr=io;
+ thisDepca.init = depca_probe;
if (register_netdev(&thisDepca) != 0)
return -EIO;
* into the kernel.
* - Better handling of multicast addresses.
*
+ * Fixes:
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
+ * - fix dgrs_found_device wrt checking kmalloc return and
+ * rollbacking the partial steps of the whole process when
+ * one of the devices can't be allocated. Fix SET_MODULE_OWNER
+ * on the loop to use devN instead of repeated calls to dev.
+ *
+ * davej <davej@suse.de> - 9/2/2001
+ * - Enable PCI device before reading ioaddr/irq
+ *
*/
-static char *version = "$Id: dgrs.c,v 1.13 2000/06/06 04:07:00 rick Exp $";
-
-#include <linux/version.h>
#include <linux/module.h>
-
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/malloc.h>
+#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
-#include <asm/bitops.h>
-#include <asm/io.h>
-#include <asm/byteorder.h>
-
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/types.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
-/*
- * API changed at linux version 2.1.0
- */
-#if LINUX_VERSION_CODE >= 0x20100
- #include <asm/uaccess.h>
- #define IOREMAP(ADDR, LEN) ioremap(ADDR, LEN)
- #define IOUNMAP(ADDR) iounmap(ADDR)
- #define COPY_FROM_USER(DST,SRC,LEN) copy_from_user(DST,SRC,LEN)
- #define COPY_TO_USER(DST,SRC,LEN) copy_to_user(DST,SRC,LEN)
-#else
- #include <linux/bios32.h>
- #define IOREMAP(ADDR, LEN) vremap(ADDR, LEN)
- #define IOUNMAP(ADDR) vfree(ADDR)
- #define COPY_FROM_USER(DST,SRC,LEN) memcpy_fromfs(DST,SRC,LEN)
- #define COPY_TO_USER(DST,SRC,LEN) memcpy_tofs(DST,SRC,LEN)
-#endif
+static char version[] __initdata =
+ "$Id: dgrs.c,v 1.13 2000/06/06 04:07:00 rick Exp $";
/*
* DGRS include files
#include "dgrs_asstruct.h"
#include "dgrs_bcomm.h"
-#if LINUX_VERSION_CODE >= 0x20400
static struct pci_device_id dgrs_pci_tbl[] __initdata = {
{ SE6_PCI_VENDOR_ID, SE6_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, dgrs_pci_tbl);
-#endif /* LINUX_VERSION_CODE >= 0x20400 */
/*
* Firmware. Compiled separately for local compilation,
* "Space.c" variables, now settable from module interface
* Use the name below, minus the "dgrs_" prefix. See init_module().
*/
-int dgrs_debug = 1;
-int dgrs_dma = 1;
-int dgrs_spantree = -1;
-int dgrs_hashexpire = -1;
-uchar dgrs_ipaddr[4] = { 0xff, 0xff, 0xff, 0xff};
-uchar dgrs_iptrap[4] = { 0xff, 0xff, 0xff, 0xff};
-__u32 dgrs_ipxnet = -1;
-int dgrs_nicmode = 0;
+static int dgrs_debug = 1;
+static int dgrs_dma = 1;
+static int dgrs_spantree = -1;
+static int dgrs_hashexpire = -1;
+static uchar dgrs_ipaddr[4] = { 0xff, 0xff, 0xff, 0xff};
+static uchar dgrs_iptrap[4] = { 0xff, 0xff, 0xff, 0xff};
+static __u32 dgrs_ipxnet = -1;
+static int dgrs_nicmode = 0;
/*
* Chain of device structures
*/
-static struct net_device *dgrs_root_dev = NULL;
+static struct net_device *dgrs_root_dev;
/*
* Private per-board data structure (dev->priv)
/*
* Now map the DMA registers into our virtual space
*/
- priv0->vplxdma = (ulong *) IOREMAP (priv0->plxdma, 256);
+ priv0->vplxdma = (ulong *) ioremap (priv0->plxdma, 256);
if (!priv0->vplxdma)
{
printk("%s: can't *remap() the DMA regs\n", dev0->name);
if (cmd != DGRSIOCTL)
return -EINVAL;
- if(COPY_FROM_USER(&ioc, ifr->ifr_data, sizeof(DGRS_IOCTL)))
+ if(copy_from_user(&ioc, ifr->ifr_data, sizeof(DGRS_IOCTL)))
return -EFAULT;
switch (ioc.cmd)
case DGRS_GETMEM:
if (ioc.len != sizeof(ulong))
return -EINVAL;
- if(COPY_TO_USER(ioc.data, &devN->mem_start, ioc.len))
+ if(copy_to_user(ioc.data, &devN->mem_start, ioc.len))
return -EFAULT;
return (0);
case DGRS_SETFILTER:
if (ioc.len)
{
- if(COPY_FROM_USER(S2HN(privN->bcomm->bc_filter_area),
+ if(copy_from_user(S2HN(privN->bcomm->bc_filter_area),
ioc.data, ioc.len))
return -EFAULT;
privN->bcomm->bc_filter_cmd = BC_FILTER_SET;
/*
* Map in the dual port memory
*/
- priv0->vmem = IOREMAP(dev0->mem_start, 2048*1024);
+ priv0->vmem = ioremap(dev0->mem_start, 2048*1024);
if (!priv0->vmem)
{
printk("%s: cannot map in board memory\n", dev0->name);
memcpy(priv0->vmem, dgrs_code, dgrs_ncode); /* Load code */
if (memcmp(priv0->vmem, dgrs_code, dgrs_ncode))
{
- IOUNMAP(priv0->vmem);
+ iounmap(priv0->vmem);
priv0->vmem = NULL;
printk("%s: download compare failed\n", dev0->name);
return -ENXIO;
)
{
DGRS_PRIV *priv;
- struct net_device *dev;
+ struct net_device *dev, *aux;
/* Allocate and fill new device structure. */
int dev_size = sizeof(struct net_device) + sizeof(DGRS_PRIV);
- int i;
+ int i, ret;
dev = (struct net_device *) kmalloc(dev_size, GFP_KERNEL);
+
+ if (!dev)
+ return -ENOMEM;
+
memset(dev, 0, dev_size);
dev->priv = ((void *)dev) + sizeof(struct net_device);
priv = (DGRS_PRIV *)dev->priv;
dev->init = dgrs_probe1;
SET_MODULE_OWNER(dev);
ether_setup(dev);
- priv->next_dev = dgrs_root_dev;
- dgrs_root_dev = dev;
if (register_netdev(dev) != 0)
return -EIO;
+ priv->next_dev = dgrs_root_dev;
+ dgrs_root_dev = dev;
+
if ( !dgrs_nicmode )
return (0); /* Switch mode, we are done */
/* Allocate new dev and priv structures */
devN = (struct net_device *) kmalloc(dev_size, GFP_KERNEL);
/* Make it an exact copy of dev[0]... */
+ ret = -ENOMEM;
+ if (!devN)
+ goto fail;
memcpy(devN, dev, dev_size);
devN->priv = ((void *)devN) + sizeof(struct net_device);
privN = (DGRS_PRIV *)devN->priv;
devN->irq = 0;
/* ... and base MAC address off address of 1st port */
devN->dev_addr[5] += i;
- privN->chan = i+1;
- priv->devtbl[i] = devN;
devN->init = dgrs_initclone;
- SET_MODULE_OWNER(dev);
+ SET_MODULE_OWNER(devN);
ether_setup(devN);
+ ret = -EIO;
+ if (register_netdev(devN)) {
+ kfree(devN);
+ goto fail;
+ }
+ privN->chan = i+1;
+ priv->devtbl[i] = devN;
privN->next_dev = dgrs_root_dev;
dgrs_root_dev = devN;
- if (register_netdev(devN) != 0)
- return -EIO;
}
- return (0);
+ return 0;
+fail: aux = priv->next_dev;
+ while (dgrs_root_dev != aux) {
+ struct net_device *d = dgrs_root_dev;
+
+ dgrs_root_dev = ((DGRS_PRIV *)d->priv)->next_dev;
+ unregister_netdev(d);
+ kfree(d);
+ }
+ return ret;
}
/*
while ((pdev = pci_find_device(SE6_PCI_VENDOR_ID, SE6_PCI_DEVICE_ID, pdev)) != NULL)
{
+ /*
+ * Get and check the bus-master and latency values.
+ * Some PCI BIOSes fail to set the master-enable bit,
+ * and the latency timer must be set to the maximum
+ * value to avoid data corruption that occurs when the
+ * timer expires during a transfer. Yes, it's a bug.
+ */
+ if (pci_enable_device(pdev))
+ continue;
+ pci_set_master(pdev);
+
plxreg = pci_resource_start (pdev, 0);
io = pci_resource_start (pdev, 1);
mem = pci_resource_start (pdev, 2);
pci_read_config_dword(pdev, 0x30, &plxdma);
plxdma &= ~15;
- /*
- * Get and check the bus-master and latency values.
- * Some PCI BIOSes fail to set the master-enable bit,
- * and the latency timer must be set to the maximum
- * value to avoid data corruption that occurs when the
- * timer expires during a transfer. Yes, it's a bug.
- */
- if (pci_enable_device(pdev))
- continue;
- pci_set_master(pdev);
-
dgrs_found_device(io, mem, irq, plxreg, plxdma);
cards_found++;
if (dgrs_debug)
{
- printk("dgrs: SW=%s FW=Build %d %s\n",
- version, dgrs_firmnum, dgrs_firmdate);
+ printk(KERN_INFO "dgrs: SW=%s FW=Build %d %s\nFW Version=%s\n",
+ version, dgrs_firmnum, dgrs_firmdate, dgrs_firmver);
}
/*
proc_reset(priv->devtbl[0], 1);
if (priv->vmem)
- IOUNMAP(priv->vmem);
+ iounmap(priv->vmem);
if (priv->vplxdma)
- IOUNMAP((uchar *) priv->vplxdma);
+ iounmap((uchar *) priv->vplxdma);
release_region(dgrs_root_dev->base_addr, 256);
-int dgrs_firmnum = 550;
-char dgrs_firmver[] = "$Version$";
-char dgrs_firmdate[] = "11/16/96 03:45:15";
-unsigned char dgrs_code[] __initdata = {
+static int dgrs_firmnum = 550;
+static char dgrs_firmver[] = "$Version$";
+static char dgrs_firmdate[] = "11/16/96 03:45:15";
+static unsigned char dgrs_code[] __initdata = {
213,5,192,8,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,64,40,35,41,
109,46,99,0,114,99,0,0,48,120,0,0,
0,0,0,0,0,0,0,0,0,0,0,0
} ;
-int dgrs_ncode = 119520 ;
+static int dgrs_ncode = 119520 ;
#define DMFE_100MFD 5
#define DMFE_AUTO 8
-#define DMFE_TIMER_WUT jiffies+(HZ*2)/2 /* timer wakeup time : 1 second */
+#define DMFE_TIMER_WUT (HZ) /* timer wakeup time : 1 second */
#define DMFE_TX_TIMEOUT ((HZ*3)/2) /* tx packet time-out time 1.5 s" */
-#define DMFE_DBUG(dbug_now, msg, vaule) if (dmfe_debug || dbug_now) printk("DBUG: %s %x\n", msg, vaule)
+#define DMFE_DBUG(dbug_now, msg, vaule) \
+ if (dmfe_debug || dbug_now) \
+ printk("DBUG: %s %x\n", msg, vaule)
-#define DELAY_5US udelay(5) /* udelay scale 1 usec */
-
-#define DELAY_1US udelay(1) /* udelay scale 1 usec */
-
-#define SHOW_MEDIA_TYPE(mode) printk(KERN_WARNING "dmfe: Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
+#define SHOW_MEDIA_TYPE(mode) \
+ printk(KERN_WARNING "dmfe: Change Speed to %sMhz %s duplex\n", \
+ mode & 1 ? "100" : "10", \
+ mode & 4 ? "full":"half");
/* CR9 definition: SROM/MII */
#define PHY_DATA_0 0x00000
#define MDCLKH 0x10000
-#define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);DELAY_5US;outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);DELAY_5US;outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);DELAY_5US;
-
-#define __CHK_IO_SIZE(pci_id, dev_rev) ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? DM9102A_IO_SIZE: DM9102_IO_SIZE
+#define SROM_CLK_WRITE(data, ioaddr) \
+ outl(data | CR9_SROM_READ | CR9_SRCS , ioaddr); \
+ udelay(5); \
+ outl(data | CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, ioaddr); \
+ udelay(5); \
+ outl(data | CR9_SROM_READ | CR9_SRCS , ioaddr); \
+ udelay(5);
+
+#define __CHK_IO_SIZE(pci_id, dev_rev) \
+ ( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \
+ DM9102A_IO_SIZE : DM9102_IO_SIZE
#define CHK_IO_SIZE(pci_dev, dev_rev) \
__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev)
}
db = dev->priv;
- pdev->driver_data = dev;
+ pci_set_drvdata(pdev, dev);
db->chip_id = ent->driver_data;
db->ioaddr = pci_iobase;
static void __exit dmfe_remove_one (struct pci_dev *pdev)
{
- struct net_device *dev = pdev->driver_data;
+ struct net_device *dev = pci_get_drvdata(pdev);
struct dmfe_board_info *db;
DMFE_DBUG(0, "dmfe_remove_one()", 0);
release_region(dev->base_addr, CHK_IO_SIZE(pdev, db->chip_revision));
kfree(dev); /* free board information */
+ pci_set_drvdata(pdev, NULL);
+
DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
}
/* set and active a timer process */
init_timer(&db->timer);
- db->timer.expires = DMFE_TIMER_WUT;
+ db->timer.expires = jiffies + DMFE_TIMER_WUT;
db->timer.data = (unsigned long) dev;
db->timer.function = &dmfe_timer;
add_timer(&db->timer);
/* Reset DM910x board : need 32 PCI clock to complete */
outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
- DELAY_5US;
+ udelay(5);
outl(db->cr0_data, ioaddr + DCR0);
outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
/* Reset & stop DM910X board */
outl(DM910X_RESET, ioaddr + DCR0);
- DELAY_5US;
+ udelay(5);
/* deleted timer */
del_timer_sync(&db->timer);
*/
DMFE_DBUG(0, "Warn!! Warn!! Tx/Rx moniotr step1", db->tx_packet_cnt);
dmfe_dynamic_reset(dev);
- db->timer.expires = DMFE_TIMER_WUT;
+ db->timer.expires = jiffies + DMFE_TIMER_WUT;
add_timer(&db->timer);
return;
}
allocated_rx_buffer(db);
/* Timer active again */
- db->timer.expires = DMFE_TIMER_WUT;
+ db->timer.expires = jiffies + DMFE_TIMER_WUT;
add_timer(&db->timer);
}
cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
outl(cr6_tmp, ioaddr + DCR6);
- DELAY_5US;
+ udelay(5);
outl(cr6_data, ioaddr + DCR6);
cr6_tmp = inl(ioaddr + DCR6);
/* printk("CR6 update %x ", cr6_tmp); */
for (i = 16; i > 0; i--) {
outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
- DELAY_5US;
+ udelay(5);
srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
- DELAY_5US;
+ udelay(5);
}
outl(CR9_SROM_READ, cr9_ioaddr);
static void dmfe_sense_speed(struct dmfe_board_info *db)
{
int i;
- u16 phy_mode;
+ u16 phy_mode = 0;
for (i = 1000; i; i--) {
- DELAY_5US;
+ udelay(5);
phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
if ((phy_mode & 0x24) == 0x24)
break;
static void phy_write_1bit(u32 ioaddr, u32 phy_data)
{
outl(phy_data, ioaddr); /* MII Clock Low */
- DELAY_1US;
+ udelay(1);
outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
- DELAY_1US;
+ udelay(1);
outl(phy_data, ioaddr); /* MII Clock Low */
- DELAY_1US;
+ udelay(1);
}
/*
u16 phy_data;
outl(0x50000, ioaddr);
- DELAY_1US;
+ udelay(1);
phy_data = (inl(ioaddr) >> 19) & 0x1;
outl(0x40000, ioaddr);
- DELAY_1US;
+ udelay(1);
return phy_data;
}
This is a compatibility hardware problem.
Versions:
+ 0.12c fixing some problems with old cards (aris, 01/08/2001)
0.12b misc fixes (aris, 06/26/2000)
0.12a port of version 0.12a of 2.2.x kernels to 2.3.x
(aris (aris@conectiva.com.br), 05/19/2000)
*/
static const char *version =
- "eepro.c: v0.12b 04/26/2000 aris@conectiva.com.br\n";
+ "eepro.c: v0.12c 01/08/2000 aris@conectiva.com.br\n";
#include <linux/module.h>
/* set diagnose flag */
#define eepro_diag(ioaddr) outb(DIAGNOSE_CMD, ioaddr)
+#ifdef ANSWER_TX_AND_RX /* experimental way of handling interrupts */
/* ack for rx/tx int */
#define eepro_ack_rxtx(ioaddr) outb (RX_INT | TX_INT, ioaddr + STATUS_REG)
+#endif
/* ack for rx int */
#define eepro_ack_rx(ioaddr) outb (RX_INT, ioaddr + STATUS_REG)
return -ENODEV;
}
-void printEEPROMInfo(short ioaddr, struct net_device *dev)
+static void printEEPROMInfo(short ioaddr, struct net_device *dev)
{
unsigned short Word;
int i,j;
}
if (dev->irq < 2) {
printk(" Duh! illegal interrupt vector stored in EEPROM.\n");
- return -ENODEV;
+ kfree(dev->priv);
+ return -ENODEV;
} else
if (dev->irq==2)
|| (irq2dev_map[dev->irq] = dev) == 0) &&
(irq2dev_map[dev->irq]!=dev)) {
/* printk("%s: IRQ map wrong\n", dev->name); */
+ free_irq(dev->irq, dev);
return -EAGAIN;
}
#endif
}
eepro_sel_reset(ioaddr);
+ SLOW_DOWN;
+ SLOW_DOWN;
lp->tx_start = lp->tx_end = XMT_LOWER_LIMIT << 8;
lp->tx_last = 0;
while (((status = inb(ioaddr + STATUS_REG)) & 0x06) && (boguscount--))
{
switch (status & (RX_INT | TX_INT)) {
+#ifdef ANSWER_TX_AND_RX
case (RX_INT | TX_INT):
eepro_ack_rxtx(ioaddr);
break;
+#endif
case RX_INT:
eepro_ack_rx(ioaddr);
break;
/* Get the received packets */
eepro_rx(dev);
+#ifndef ANSWER_TX_AND_RX
+ continue;
+#endif
}
if (status & TX_INT) {
if (net_debug > 4)
/* Re-enable RX and TX interrupts */
eepro_en_int(ioaddr);
}
- eepro_complete_selreset(ioaddr);
+ if (lp->eepro == LAN595FX_10ISA) {
+ eepro_complete_selreset(ioaddr);
+ }
+ else
+ eepro_en_rx(ioaddr);
}
/* The horrible routine to read a word from the serial EEPROM. */
printk(KERN_DEBUG "%s: exiting hardware_send_packet routine.\n", dev->name);
return;
}
- netif_stop_queue(dev);
+ if (lp->eepro == LAN595FX_10ISA)
+ netif_stop_queue(dev);
+
if (net_debug > 5)
printk(KERN_DEBUG "%s: exiting hardware_send_packet routine.\n", dev->name);
}
skb->protocol = eth_type_trans(skb,dev);
netif_rx(skb);
+ dev->last_rx = jiffies;
lp->stats.rx_packets++;
}
xmt_status = inw(ioaddr+IO_PORT);
if ((xmt_status & TX_DONE_BIT) == 0) {
- udelay(40);
- boguscount--;
- continue;
+ if (lp->eepro == LAN595FX_10ISA) {
+ udelay(40);
+ boguscount--;
+ continue;
+ }
+ else
+ break;
}
xmt_status = inw(ioaddr+IO_PORT);
* interrupt again for tx. in other words: tx timeout what will take
* a lot of time to happen, so we'll do a complete selreset.
*/
- if (!boguscount)
+ if (!boguscount && lp->eepro == LAN595FX_10ISA)
eepro_complete_selreset(ioaddr);
}
+#ifdef MODULE
+
#define MAX_EEPRO 8
static struct net_device dev_eepro[MAX_EEPRO];
};
static int autodetect;
-static int n_eepro = 0;
+static int n_eepro;
/* For linux 2.1.xx */
MODULE_AUTHOR("Pascal Dupuis <dupuis@lei.ucl.ac.be> for the 2.1 stuff (locking,...)");
MODULE_PARM(mem, "1-" __MODULE_STRING(MAX_EEPRO) "i");
MODULE_PARM(autodetect, "1-" __MODULE_STRING(1) "i");
-#ifdef MODULE
-
int
init_module(void)
{
ep->rx_ring = (struct epic_rx_desc *)ring_space;
ep->rx_ring_dma = ring_dma;
- if (dev->mem_start && dev->mem_start != ~0) {
+ if (dev->mem_start) {
option = dev->mem_start;
duplex = (dev->mem_start & 16) ? 1 : 0;
} else if (card_idx >= 0 && card_idx < MAX_UNITS) {
break;
}
- /* This looks wierd and it is. At least the BayCom USCC doesn't
+ /* This looks weird and it is. At least the BayCom USCC doesn't
* use the Interrupt Daisy Chain, thus we'll have to start
* all over again to be sure not to miss an interrupt from
* (any of) the other chip(s)...
#define TCREG_POWC 0x4000 /* timer start out of window detect */
#define TCREG_CRCI 0x2000 /* inhibit CRC generation */
#define TCREG_EXDIS 0x1000 /* disable excessive deferral timer */
-#define TCREG_EXD 0x0400 /* excessive deferral occured */
-#define TCREG_DEF 0x0200 /* single deferral occured */
+#define TCREG_EXD 0x0400 /* excessive deferral occurred */
+#define TCREG_DEF 0x0200 /* single deferral occurred */
#define TCREG_NCRS 0x0100 /* no carrier detected */
#define TCREG_CRSL 0x0080 /* carrier lost */
-#define TCREG_EXC 0x0040 /* excessive collisions occured */
-#define TCREG_OWC 0x0020 /* out of window collision occured */
+#define TCREG_EXC 0x0040 /* excessive collisions occurred */
+#define TCREG_OWC 0x0020 /* out of window collision occurred */
#define TCREG_PMB 0x0008 /* packet monitored bad */
#define TCREG_FU 0x0004 /* FIFO underrun */
#define TCREG_BCM 0x0002 /* byte count mismatch of fragments */
/* Interrupt Mask Register */
#define SONIC_IMREG 0x08
-#define IMREG_BREN 0x4000 /* interrupt when bus retry occured */
+#define IMREG_BREN 0x4000 /* interrupt when bus retry occurred */
#define IMREG_HBLEN 0x2000 /* interrupt when heartbeat lost */
#define IMREG_LCDEN 0x1000 /* interrupt when CAM loaded */
#define IMREG_PINTEN 0x0800 /* interrupt when PINT in TDA set */
/* Interrupt Status Register */
#define SONIC_ISREG 0x0a
-#define ISREG_BR 0x4000 /* bus retry occured */
+#define ISREG_BR 0x4000 /* bus retry occurred */
#define ISREG_HBL 0x2000 /* heartbeat lost */
#define ISREG_LCD 0x1000 /* CAM loaded */
#define ISREG_PINT 0x0800 /* PINT in TDA set */
Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
the 2.1 version of the old driver - Alan Cox
+
+ Get rid of check_region, check kmalloc return in lance_probe1
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
*/
static const char *version = "lance.c:v1.15ac 1999/11/13 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
#ifdef LANCE_DEBUG
-int lance_debug = LANCE_DEBUG;
+static int lance_debug = LANCE_DEBUG;
#else
-int lance_debug = 1;
+static int lance_debug = 1;
#endif
/*
MODULE_PARM(io, "1-" __MODULE_STRING(MAX_CARDS) "i");
MODULE_PARM(dma, "1-" __MODULE_STRING(MAX_CARDS) "i");
MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_CARDS) "i");
+MODULE_PARM(lance_debug, "i");
int init_module(void)
{
for (port = lance_portlist; *port; port++) {
int ioaddr = *port;
+ struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
+ "lance-probe");
- if ( check_region(ioaddr, LANCE_TOTAL_SIZE) == 0) {
+ if (r) {
/* Detect "normal" 0x57 0x57 and the NI6510EB 0x52 0x44
signatures w/ minimal I/O reads */
char offset15, offset14 = inb(ioaddr + 14);
if ((offset14 == 0x52 || offset14 == 0x57) &&
- ((offset15 = inb(ioaddr + 15)) == 0x57 || offset15 == 0x44)) {
+ ((offset15 = inb(ioaddr + 15)) == 0x57 ||
+ offset15 == 0x44)) {
result = lance_probe1(dev, ioaddr, 0, 0);
- if ( !result ) return 0;
+ if (!result) {
+ struct lance_private *lp = dev->priv;
+ int ver = lp->chip_version;
+
+ r->name = chip_table[ver].name;
+ return 0;
+ }
}
+ release_resource(r);
}
}
return -ENODEV;
printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i));
dev->base_addr = ioaddr;
- request_region(ioaddr, LANCE_TOTAL_SIZE, chip_table[lance_version].name);
-
/* Make certain the data structures used by the LANCE are aligned and DMAble. */
lp = (struct lance_private *)(((unsigned long)kmalloc(sizeof(*lp)+7,
- GFP_DMA | GFP_KERNEL)+7) & ~7);
+ GFP_DMA | GFP_KERNEL)+7) & ~7);
if(lp==NULL)
return -ENODEV;
if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
dev->priv = lp;
lp->name = chipname;
lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
- GFP_DMA | GFP_KERNEL);
- if (lance_need_isa_bounce_buffers)
+ GFP_DMA | GFP_KERNEL);
+ if (!lp->rx_buffs)
+ goto out_lp;
+ if (lance_need_isa_bounce_buffers) {
lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
- GFP_DMA | GFP_KERNEL);
- else
+ GFP_DMA | GFP_KERNEL);
+ if (!lp->tx_bounce_buffs)
+ goto out_rx;
+ } else
lp->tx_bounce_buffs = NULL;
lp->chip_version = lance_version;
dev->watchdog_timeo = TX_TIMEOUT;
return 0;
+out_rx: kfree((void*)lp->rx_buffs);
+out_lp: kfree(lp);
+ return -ENOMEM;
}
static int
I/O space and NuBus interrupts for these cards, but neglected to
provide anything even remotely resembling a NuBus ROM. Therefore we
have to probe for them in a brain-damaged ISA-like fashion.
+
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
+ check kmalloc and release the allocated memory on failure in
+ mac89x0_probe and in init_module
+ use save_flags/restore_flags in net_get_stat, not just cli/sti
*/
static char *version =
anywhere else until we have a really good reason to do so. */
int __init mac89x0_probe(struct net_device *dev)
{
- static int once_is_enough = 0;
+ static int once_is_enough;
struct net_local *lp;
- static unsigned version_printed = 0;
+ static unsigned version_printed;
int i, slot;
unsigned rev_type = 0;
unsigned long ioaddr;
/* Initialize the net_device structure. */
if (dev->priv == NULL) {
dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (!dev->priv)
+ return -ENOMEM;
memset(dev->priv, 0, sizeof(struct net_local));
}
lp = (struct net_local *)dev->priv;
/* Try to read the MAC address */
if ((readreg(dev, PP_SelfST) & (EEPROM_PRESENT | EEPROM_OK)) == 0) {
printk("\nmac89x0: No EEPROM, giving up now.\n");
+ kfree(dev->priv);
+ dev->priv = NULL;
return -ENODEV;
} else {
for (i = 0; i < ETH_ALEN; i += 2) {
net_get_stats(struct net_device *dev)
{
struct net_local *lp = (struct net_local *)dev->priv;
+ unsigned long flags;
+ save_flags(flags);
cli();
/* Update the statistics from the device registers. */
lp->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
lp->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
- sti();
+ restore_flags(flags);
return &lp->stats;
}
int
init_module(void)
{
- struct net_local *lp;
-
net_debug = debug;
dev_cs89x0.init = mac89x0_probe;
dev_cs89x0.priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
+ if (!dev_cs89x0.priv)
+ return -ENOMEM;
memset(dev_cs89x0.priv, 0, sizeof(struct net_local));
- lp = (struct net_local *)dev_cs89x0.priv;
if (register_netdev(&dev_cs89x0) != 0) {
printk(KERN_WARNING "mac89x0.c: No card found\n");
+ kfree(dev_cs89x0.priv);
return -ENXIO;
}
return 0;
#include "sonic.h"
-static int sonic_debug = 0;
-static int sonic_version_printed = 0;
+static int sonic_debug;
+static int sonic_version_printed;
extern int macsonic_probe(struct net_device* dev);
extern int mac_onboard_sonic_probe(struct net_device* dev);
int __init mac_onboard_sonic_probe(struct net_device* dev)
{
/* Bwahahaha */
- static int once_is_more_than_enough = 0;
+ static int once_is_more_than_enough;
struct sonic_local* lp;
int i;
int __init mac_nubus_sonic_probe(struct net_device* dev)
{
- static int slots = 0;
+ static int slots;
struct nubus_dev* ndev = NULL;
struct sonic_local* lp;
unsigned long base_addr, prom_addr;
#ifdef MODULE
static char namespace[16] = "";
-static struct net_device dev_macsonic = {
- NULL,
- 0, 0, 0, 0,
- 0, 0,
- 0, 0, 0, NULL, NULL };
+static struct net_device dev_macsonic;
MODULE_PARM(sonic_debug, "i");
};
#endif
-static struct { unsigned short vendor, function; char *name; }
-isapnp_clone_list[] __initdata = {
- {ISAPNP_VENDOR('E','D','I'), ISAPNP_FUNCTION(0x0216), "NN NE2000" },
- {ISAPNP_VENDOR('P','N','P'), ISAPNP_FUNCTION(0x80d6), "Generic PNP" },
- {0,}
+static struct isapnp_device_id isapnp_clone_list[] __initdata = {
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('E','D','I'), ISAPNP_FUNCTION(0x0216),
+ (long) "NN NE2000" },
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('P','N','P'), ISAPNP_FUNCTION(0x80d6),
+ (long) "Generic PNP" },
+ { } /* terminate list */
};
+MODULE_DEVICE_TABLE(isapnp, isapnp_clone_list);
+
#ifdef SUPPORT_NE_BAD_CLONES
/* A list of bad clones that we none-the-less recognize. */
static struct { const char *name8, *name16; unsigned char SAprefix[4];}
dev->base_addr = idev->resource[0].start;
dev->irq = idev->irq_resource[0].start;
printk(KERN_INFO "ne.c: ISAPnP reports %s at i/o %#lx, irq %d.\n",
- isapnp_clone_list[i].name,
+ (char *) isapnp_clone_list[i].driver_data,
dev->base_addr, dev->irq);
if (ne_probe1(dev, dev->base_addr) != 0) { /* Shouldn't happen. */
}
if ((status & IS_DMA_INT) == 0) {
- PRINTK((KERN_DEBUG "%s: DMA complete (???)\n", dev->name));
+ PRINTK((KERN_DEBUG "%s: DMA complete (?)\n", dev->name));
outb(0, IE_DMA_RST); /* Reset DMA int */
}
unsigned short cbl_offset; /* pointeroffset, command block list */
unsigned short rfa_offset; /* pointeroffset, receive frame area */
unsigned short crc_errs; /* CRC-Error counter */
- unsigned short aln_errs; /* allignmenterror counter */
+ unsigned short aln_errs; /* alignmenterror counter */
unsigned short rsc_errs; /* Resourceerror counter */
unsigned short ovrn_errs; /* OVerrunerror counter */
};
tp->mmio_addr = ioaddr;
tp->lock = SPIN_LOCK_UNLOCKED;
- pdev->driver_data = dev;
+ pci_set_drvdata(pdev, dev);
tp->phys[0] = 32;
static void __devexit netdrv_remove_one (struct pci_dev *pdev)
{
- struct net_device *dev = pdev->driver_data;
+ struct net_device *dev = pci_get_drvdata (pdev);
struct netdrv_private *np;
DPRINTK ("ENTER\n");
kfree (dev);
- pdev->driver_data = NULL;
+ pci_set_drvdata (pdev, NULL);
pci_power_off (pdev, -1);
static void netdrv_suspend (struct pci_dev *pdev)
{
- struct net_device *dev = pdev->driver_data;
+ struct net_device *dev = pci_get_drvdata (pdev);
struct netdrv_private *tp = (struct netdrv_private *) dev->priv;
void *ioaddr = tp->mmio_addr;
unsigned long flags;
static void netdrv_resume (struct pci_dev *pdev)
{
- struct net_device *dev = pdev->driver_data;
+ struct net_device *dev = pci_get_drvdata (pdev);
pci_power_on (pdev);
netif_device_attach (dev);
err = 0;
break;
- default:
+ default:;
};
return err;
typedef void (*PFNTXCALLBACK)(U32 Status,
U16 PcktCount,
PU32 BufferContext,
- U16 AdaterID);
+ struct net_device *);
/*
** type PFNRXCALLBACK
static void __exit rcpci45_remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pdev->driver_data;
+ struct net_device *dev = pci_get_drvdata(pdev);
PDPA pDpa = dev->priv;
if (!dev) {
kfree(pDpa->pPab);
kfree(pDpa);
kfree(dev);
+ pci_set_drvdata(pdev, NULL);
}
static int RCinit(struct net_device *dev)
pci_start = pci_resource_start(pdev,0);
pci_len = pci_resource_len(pdev,0);
- pdev->driver_data = dev;
+ pci_set_drvdata(pdev, dev);
pDpa = dev->priv;
pDpa->id = card_idx;
static inline int sb1000_rx(struct net_device *dev);
static inline void sb1000_error_dpc(struct net_device *dev);
+static struct isapnp_device_id id_table[] = {
+ { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('G','I','C'), ISAPNP_FUNCTION(0x1000), 0 },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(isapnp, id_table);
+
/* probe for SB1000 using Plug-n-Play mechanism */
int
sb1000_probe(struct net_device *dev)
#define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
#define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
-static void seeq_init_ring(struct net_device *dev)
+static int seeq_init_ring(struct net_device *dev)
{
struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv;
volatile struct sgiseeq_init_block *ib = &sp->srings;
unsigned long buffer;
buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
ib->tx_desc[i].buf_vaddr = KSEG1ADDR(buffer);
ib->tx_desc[i].tdma.pbuf = PHYSADDR(buffer);
// flush_cache_all();
unsigned long buffer;
buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
ib->rx_desc[i].buf_vaddr = KSEG1ADDR(buffer);
ib->rx_desc[i].rdma.pbuf = PHYSADDR(buffer);
// flush_cache_all();
ib->rx_desc[i].rdma.cntinfo = (RCNTINFO_INIT);
}
ib->rx_desc[i - 1].rdma.cntinfo |= (HPCDMA_EOR);
+ return 0;
}
#ifdef DEBUG
#define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
#define RDMACFG_INIT (HPC3_ERXDCFG_FRXDC | HPC3_ERXDCFG_FEOP | HPC3_ERXDCFG_FIRQ)
-static void init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
+static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
volatile struct sgiseeq_regs *sregs)
{
volatile struct hpc3_ethregs *hregs = sp->hregs;
+ int err;
reset_hpc3_and_seeq(hregs, sregs);
- seeq_init_ring(dev);
+ err = seeq_init_ring(dev);
+ if (err)
+ return err;
/* Setup to field the proper interrupt types. */
if (sp->is_edlc) {
hregs->tx_ndptr = PHYSADDR(&sp->srings.tx_desc[0]);
seeq_go(sp, hregs, sregs);
+ return 0;
}
static inline void record_rx_errors(struct sgiseeq_private *sp,
struct sgiseeq_private *sp = (struct sgiseeq_private *)dev->priv;
volatile struct sgiseeq_regs *sregs = sp->sregs;
unsigned long flags;
+ int err;
save_flags(flags); cli();
if (request_irq(dev->irq, sgiseeq_interrupt, 0, sgiseeqstr, (void *) dev)) {
return -EAGAIN;
}
- init_seeq(dev, sp, sregs);
+ err = init_seeq(dev, sp, sregs);
+ if (err)
+ return err;
netif_start_queue(dev);
restore_flags(flags);
{
struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv;
volatile struct sgiseeq_regs *sregs = sp->sregs;
+ int err;
- init_seeq(dev, sp, sregs);
+ err = init_seeq(dev, sp, sregs);
+ if (err)
+ return err;
dev->trans_start = jiffies;
netif_wake_queue(dev);
goto err_out_region;
}
- pci_dev->driver_data = net_dev;
+ pci_set_drvdata(pci_dev, net_dev);
pci_dev->dma_mask = SIS900_DMA_MASK;
/* The SiS900-specific entries in the device structure. */
static void __devexit sis900_remove(struct pci_dev *pci_dev)
{
- struct net_device *net_dev = pci_dev->driver_data;
+ struct net_device *net_dev = pci_get_drvdata(pci_dev);
unregister_netdev(net_dev);
release_region(net_dev->base_addr, SIS900_TOTAL_SIZE);
kfree(net_dev);
+ pci_set_drvdata(pci_dev, NULL);
}
#define SIS900_MODULE_NAME "sis900"
* Transmit descriptor polling was not reenabled after SkGePortInit.
*
* Revision 1.16 1999/07/27 15:17:29 cgoos
- * Added some "\n" in output strings (removed while debuging...).
+ * Added some "\n" in output strings (removed while debugging...).
*
* Revision 1.15 1999/07/23 12:09:30 cgoos
* Performance optimization, rx checksumming, large frame support.
#define VER_STRING "3.05"
-/* for debuging on x86 only */
+/* for debugging on x86 only */
/* #define BREAKPOINT() asm(" int $3"); */
/* use of a transmit complete interrupt */
} /* FreeResources */
+static struct pci_device_id skge_pci_tbl[] __initdata = {
+ { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE, PCI_ANY_ID, PCI_ANY_ID, },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, skge_pci_tbl);
+
MODULE_AUTHOR("Christoph Goos <cgoos@syskonnect.de>");
MODULE_DESCRIPTION("SysKonnect SK-NET Gigabit Ethernet SK-98xx driver");
MODULE_PARM(AutoNeg_A, "1-" __MODULE_STRING(SK_MAX_CARD_PARAM) "s");
* Description:
* This function is called if an ioctl is issued on the device.
* There are three subfunction for reading, writing and test-writing
- * the private MIB data structure (usefull for SysKonnect-internal tools).
+ * the private MIB data structure (useful for SysKonnect-internal tools).
*
* Returns:
* 0, if everything is ok
extern void enable_tx_irq(struct s_smc *smc, u_short queue);
extern void mac_drv_clear_txd(struct s_smc *smc);
+static struct pci_device_id skfddi_pci_tbl[] __initdata = {
+ { PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
// Define module-wide (static) variables
-static int num_boards = 0; /* total number of adapters configured */
-static int num_fddi = 0;
-static int autoprobed = 0;
+static int num_boards; /* total number of adapters configured */
+static int num_fddi;
+static int autoprobed;
#ifdef MODULE
int init_module(void);
static struct net_device *unlink_modules(struct net_device *p);
static int loading_module = 1;
#else
-static int loading_module = 0;
+static int loading_module;
#endif // MODULE
#ifdef DRIVERDEBUG
* This function is called by the hardware dependent module.
* It allocates the memory for the RxD and TxD descriptors.
*
- * This memory must be non-cached, non-movable and non-swapable.
+ * This memory must be non-cached, non-movable and non-swappable.
* This memory should start at a physical page boundary.
* Args
* smc - A pointer to the SMT context struct.
#include <net/checksum.h>
#include <asm/unaligned.h>
-int last_retran;
-
static unsigned char *encode(unsigned char *cp, unsigned short n);
static long decode(unsigned char **cpp);
static unsigned char * put16(unsigned char *cp, unsigned short x);
ip = (struct iphdr *) icp;
/* Bail if this packet isn't TCP, or is an IP fragment */
- if(ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x1fff) ||
- (ip->frag_off & 32)){
+ if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) {
/* Send as regular IP */
if(ip->protocol != IPPROTO_TCP)
comp->sls_o_nontcp++;
*/
oth = &cs->cs_tcp;
- if(last_retran
- || ip->version != cs->cs_ip.version || ip->ihl != cs->cs_ip.ihl
+ if(ip->version != cs->cs_ip.version || ip->ihl != cs->cs_ip.ihl
|| ip->tos != cs->cs_ip.tos
- || (ip->frag_off & 64) != (cs->cs_ip.frag_off & 64)
+ || (ip->frag_off & htons(0x4000)) != (cs->cs_ip.frag_off & htons(0x4000))
|| ip->ttl != cs->cs_ip.ttl
|| th->doff != cs->cs_tcp.doff
|| (ip->ihl > 5 && memcmp(ip+1,cs->cs_ipopt,((ip->ihl)-5)*4) != 0)
int new_rx, new_tx; /* The next free ring entry */
int old_tx, old_rx; /* ring entry to be processed */
struct net_device_stats stats;
-/* These two must be ints for set_bit() */
- int tx_full;
- int lock;
+/* These two must be longs for set_bit() */
+ long tx_full;
+ long lock;
};
/* I/O register access macros */
}
init_etherdev( dev, sizeof(struct lance_private) );
- if (!dev->priv)
+ if (!dev->priv) {
dev->priv = kmalloc( sizeof(struct lance_private), GFP_KERNEL );
+ if (!dev->priv)
+ return 0;
+ }
lp = (struct lance_private *)dev->priv;
MEM = (struct lance_memory *)sun3_dvma_malloc(sizeof(struct
lance_memory));
-/* $Id: sunbmac.c,v 1.23 2001/01/20 03:36:40 davem Exp $
+/* $Id: sunbmac.c,v 1.25 2001/02/18 08:10:21 davem Exp $
* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
static int bigmac_open(struct net_device *dev)
{
struct bigmac *bp = (struct bigmac *) dev->priv;
- int res;
+ int ret;
- if (request_irq(dev->irq, &bigmac_interrupt,
- SA_SHIRQ, "BIG MAC", (void *) bp)) {
+ ret = request_irq(dev->irq, &bigmac_interrupt, SA_SHIRQ, dev->name, bp);
+ if (ret) {
printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
- return -EAGAIN;
+ return ret;
}
init_timer(&bp->bigmac_timer);
- res = bigmac_init(bp, 0);
- if (!res) {
- MOD_INC_USE_COUNT;
- }
- return res;
+ return bigmac_init(bp, 0);
}
static int bigmac_close(struct net_device *dev)
bigmac_stop(bp);
bigmac_clean_rings(bp);
free_irq(dev->irq, (void *)bp);
- MOD_DEC_USE_COUNT;
return 0;
}
int i;
/* Get a new device struct for this interface. */
- dev = init_etherdev(0, sizeof(struct bigmac));
+ dev = init_etherdev(NULL, sizeof(struct bigmac));
+ if (!dev)
+ return -ENOMEM;
+ SET_MODULE_OWNER(dev);
if (version_printed++ == 0)
printk(KERN_INFO "%s", version);
printk("\n");
/* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */
- bp = (struct bigmac *) dev->priv;
+ bp = dev->priv;
bp->qec_sdev = qec_sdev;
bp->bigmac_sdev = qec_sdev->child;
\f
static int netdev_open(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
int i;
static void check_duplex(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
int mii_reg5 = mdio_read(dev, np->phys[0], 5);
int negotiated = mii_reg5 & np->advertising;
static void netdev_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
int next_tick = 10*HZ;
static void tx_timeout(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
printk(KERN_WARNING "%s: Transmit timed out, status %2.2x,"
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void init_ring(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
int i;
np->tx_full = 0;
static int start_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
struct netdev_desc *txdesc;
unsigned entry;
int boguscnt = max_interrupt_work;
ioaddr = dev->base_addr;
- np = (struct netdev_private *)dev->priv;
+ np = dev->priv;
spin_lock(&np->lock);
do {
for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
int entry = np->cur_rx % RX_RING_SIZE;
int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
static void netdev_error(struct net_device *dev, int intr_status)
{
long ioaddr = dev->base_addr;
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
if (intr_status & IntrDrvRqst) {
/* Stop the down counter and turn interrupts back on. */
static struct net_device_stats *get_stats(struct net_device *dev)
{
long ioaddr = dev->base_addr;
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
int i;
/* We should lock this segment of code for SMP eventually, although
static int netdev_close(struct net_device *dev)
{
long ioaddr = dev->base_addr;
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
int i;
netif_stop_queue(dev);
static void __devexit sundance_remove1 (struct pci_dev *pdev)
{
- struct net_device *dev = pdev->driver_data;
+ struct net_device *dev = pci_get_drvdata(pdev);
/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
while (dev) {
- struct netdev_private *np = (void *)(dev->priv);
unregister_netdev(dev);
pci_release_regions(pdev);
#ifndef USE_IO_OPS
kfree(dev);
}
- pdev->driver_data = NULL;
+ pci_set_drvdata(pdev, NULL);
}
static struct pci_driver sundance_driver = {
-/* $Id: sunlance.c,v 1.105 2000/10/22 16:08:38 davem Exp $
+/* $Id: sunlance.c,v 1.107 2001/02/18 08:10:21 davem Exp $
* lance.c: Linux/Sparc/Lance driver
*
* Written 1995, 1996 by Miguel de Icaza
sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
}
- if (!status)
- MOD_INC_USE_COUNT;
-
return status;
}
STOP_LANCE(lp);
free_irq(dev->irq, (void *) dev);
- MOD_DEC_USE_COUNT;
return 0;
}
}
lp->dev = dev;
+ SET_MODULE_OWNER(dev);
dev->open = &lance_open;
dev->stop = &lance_close;
dev->hard_start_xmit = &lance_start_xmit;
-/* $Id: sunqe.c,v 1.47 2000/10/22 16:08:38 davem Exp $
+/* $Id: sunqe.c,v 1.50 2001/02/18 08:10:21 davem Exp $
* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
* Once again I am out to prove that every ethernet
* controller out there can be most efficiently programmed
len, 0);
skb->protocol = eth_type_trans(skb, qep->dev);
netif_rx(skb);
- dev->last_rx = jiffies;
+ qep->dev->last_rx = jiffies;
qep->net_stats.rx_packets++;
qep->net_stats.rx_bytes += len;
}
static int qe_open(struct net_device *dev)
{
struct sunqe *qep = (struct sunqe *) dev->priv;
- int res;
qep->mconfig = (MREGS_MCONFIG_TXENAB |
MREGS_MCONFIG_RXENAB |
MREGS_MCONFIG_MBAENAB);
- res = qe_init(qep, 0);
- if (!res)
- MOD_INC_USE_COUNT;
-
- return res;
+ return qe_init(qep, 0);
}
static int qe_close(struct net_device *dev)
struct sunqe *qep = (struct sunqe *) dev->priv;
qe_stop(qep);
- MOD_DEC_USE_COUNT;
return 0;
}
}
for (i = 0; i < 4; i++) {
+ SET_MODULE_OWNER(qe_devs[i]);
qe_devs[i]->open = qe_open;
qe_devs[i]->stop = qe_close;
qe_devs[i]->hard_start_xmit = qe_start_xmit;
mainmenu_option next_comment
comment 'Token Ring devices'
-bool 'Token Ring driver support' CONFIG_TR
+# So far, we only have PCI, ISA, and MCA token ring devices
+if [ "$CONFIG_PCI" = "y" -o "$CONFIG_ISA" = "y" -o "$CONFIG_MCA" = "y" ]; then
+ bool 'Token Ring driver support' CONFIG_TR
+else
+ define_bool CONFIG_TR n
+fi
+
if [ "$CONFIG_TR" != "n" ]; then
- dep_tristate ' IBM Tropic chipset based adapter support' CONFIG_IBMTR $CONFIG_TR
- dep_tristate ' IBM Olympic chipset PCI adapter support' CONFIG_IBMOL $CONFIG_TR
- dep_tristate ' IBM Lanstreamer chipset PCI adapter support' CONFIG_IBMLS $CONFIG_TR
- dep_tristate ' Generic TMS380 Token Ring ISA/PCI adapter support' CONFIG_TMS380TR $CONFIG_TR
+ if [ "$CONFIG_ISA" = "y" -o "$CONFIG_MCA" = "y" ]; then
+ tristate ' IBM Tropic chipset based adapter support' CONFIG_IBMTR
+ fi
+ dep_tristate ' IBM Olympic chipset PCI adapter support' CONFIG_IBMOL $CONFIG_TR $CONFIG_PCI
+ dep_tristate ' IBM Lanstreamer chipset PCI adapter support' CONFIG_IBMLS $CONFIG_TR $CONFIG_PCI
+ tristate ' Generic TMS380 Token Ring ISA/PCI adapter support' CONFIG_TMS380TR
if [ "$CONFIG_TMS380TR" != "n" ]; then
- dep_tristate ' Generic TMS380 PCI support' CONFIG_TMSPCI $CONFIG_TMS380TR
- dep_tristate ' Generic TMS380 ISA support' CONFIG_TMSISA $CONFIG_TMS380TR
- dep_tristate ' Madge Smart 16/4 PCI Mk2 support' CONFIG_ABYSS $CONFIG_TMS380TR
- if [ "$CONFIG_MCA" = "y" ]; then
- dep_tristate ' Madge Smart 16/4 Ringnode MicroChannel' CONFIG_MADGEMC $CONFIG_TMS380TR
- fi
+ dep_tristate ' Generic TMS380 PCI support' CONFIG_TMSPCI $CONFIG_PCI
+ dep_tristate ' Generic TMS380 ISA support' CONFIG_TMSISA $CONFIG_ISA
+ dep_tristate ' Madge Smart 16/4 PCI Mk2 support' CONFIG_ABYSS $CONFIG_PCI
+ dep_tristate ' Madge Smart 16/4 Ringnode MicroChannel' CONFIG_MADGEMC $CONFIG_MCA
+ fi
+ if [ "$CONFIG_ISA" = "y" -o "$CONFIG_MCA" = "y" ]; then
+ tristate ' SMC ISA/MCA adapter support' CONFIG_SMCTR
fi
- dep_tristate ' SMC ISA/MCA adapter support' CONFIG_SMCTR $CONFIG_TR
fi
endmenu
/*
* olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
- * 1999 Mike Phillips (phillim@amtrak.com)
+ * 1999 Mike Phillips (mikep@linuxtr.net)
*
* Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
* chipset.
* Fixing the hardware descriptors was another matter,
* because they weren't going through read[wl](), there all
* the results had to be in memory in le32 values. kdaaker
- *
+ * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
*
* To Do:
- *
+ * Complete full Cardbus / hot-swap support.
+ *
* If Problems do Occur
* Most problems can be rectified by either closing and opening the interface
* (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
*/
static char *version =
-"Olympic.c v0.5.0 3/10/00 - Peter De Schrijver & Mike Phillips" ;
+"Olympic.c v0.5.C 12/23/00 - Peter De Schrijver & Mike Phillips" ;
static struct pci_device_id olympic_pci_tbl[] __initdata = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR_WAKE, PCI_ANY_ID, PCI_ANY_ID, },
if (pci_enable_device(pci_device))
continue;
- /* These lines are needed by the PowerPC, it appears
-that these flags
- * are not being set properly for the PPC, this may
-well be fixed with
- * the new PCI code */
+ /* These lines are needed by the PowerPC, it appears that these flags
+ * are not being set properly for the PPC, this may well be fixed with
+ * the new PCI code */
pci_read_config_word(pci_device, PCI_COMMAND, &pci_command);
pci_command |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
pci_write_config_word(pci_device, PCI_COMMAND,pci_command);
#endif
dev->irq=pci_device->irq;
dev->base_addr=pci_resource_start(pci_device, 0);
- dev->init=&olympic_init;
+ dev->init=&olympic_init; /* AKPM: Not needed */
olympic_priv->olympic_card_name = (char *)pci_device->resource[0].name ;
olympic_priv->olympic_mmio =
ioremap(pci_resource_start(pci_device,1),256);
olympic_priv->olympic_message_level = message_level[card_no] ;
if(olympic_init(dev)==-1) {
- unregister_netdevice(dev);
kfree(dev->priv);
return 0;
}
spin_lock_init(&olympic_priv->olympic_lock) ;
+ /* Needed for cardbus */
+ if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR))
+ writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
+
#if OLYMPIC_DEBUG
printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
printk("GPR: %x\n",readw(olympic_mmio+GPR));
/*
* olympic.h (c) 1999 Peter De Schrijver All Rights Reserved
- * 1999 Mike Phillips (phillim@amtrak.com)
+ * 1999 Mike Phillips (mikep@linuxtr.net)
*
* Linux driver for IBM PCI tokenring cards based on the olympic and the PIT/PHY chipset.
*
#define BCTL 0x70
#define BCTL_SOFTRESET (1<<15)
#define BCTL_MIMREB (1<<6)
+#define BCTL_MODE_INDICATOR (1<<5)
#define GPR 0x4a
#define GPR_OPTI_BF (1<<6)
#define TXSTATQCNT_2 0xe4
#define TXCSA_1 0xc8
#define TXCSA_2 0xe8
+/* Cardbus */
+#define FERMASK 0xf4
+#define FERMASK_INT_BIT (1<<15)
#define OLYMPIC_IO_SPACE 256
smctr_clear_trc_reset(ioaddr);
mdelay(200); /* ~2 ms */
- /* Remove any latched interrupts that occured prior to reseting the
+ /* Remove any latched interrupts that occurred prior to reseting the
* adapter or possibily caused by line glitches due to the reset.
*/
outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR);
/* OPEN Options (high-low) */
#define WRAP_INTERFACE 0x0080 /* Inserting omitted for test
* purposes; transmit data appears
- * as receive data. (usefull for
+ * as receive data. (useful for
* testing; change: CLOSE necessary)
*/
#define DISABLE_HARD_ERROR 0x0040 /* On HARD_ERROR & TRANSMIT_BEACON
#include <linux/netdevice.h>
#include <linux/timer.h>
#include <asm/io.h>
+#include <asm/irq.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/bitops.h>
#include <asm/io.h>
+#include <asm/irq.h>
/* These identify the driver base version and may not be removed. */
static char version1[] __devinitdata =
np->drv_flags = via_rhine_chip_info[chip_id].drv_flags;
np->pdev = pdev;
- if (dev->mem_start && dev->mem_start != ~0)
+ if (dev->mem_start)
option = dev->mem_start;
/* The lower four bits are the media type. */
dev->tx_timeout = via_rhine_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
- pdev->driver_data = dev;
+ pci_set_drvdata(pdev, dev);
if (np->drv_flags & CanHaveMII) {
int phy, phy_idx = 0;
static void via_rhine_tx_timeout (struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *) dev->priv;
+ struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
printk (KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
static void __devexit via_rhine_remove_one (struct pci_dev *pdev)
{
- struct net_device *dev = pdev->driver_data;
+ struct net_device *dev = pci_get_drvdata(pdev);
struct netdev_private *np = (struct netdev_private *)(dev->priv);
unregister_netdev(dev);
np->rx_ring, np->rx_ring_dma);
kfree(dev);
+
+ pci_set_drvdata(pdev, NULL);
}
fi
dep_tristate ' Support for Frame Relay on MultiGate boards' CONFIG_COMX_PROTO_FR $CONFIG_COMX
fi
+#
+# The Etinc driver has not been tested as non-modular yet.
+#
+
+ dep_tristate ' Etinc PCISYNC serial board support (EXPERIMENTAL)' CONFIG_DSCC4 m
#
# Lan Media's board. Currently 1000, 1200, 5200, 5245
dep_tristate ' SyncLink HDLC/SYNCPPP support' CONFIG_SYNCLINK_SYNCPPP m
+ tristate ' Generic HDLC driver' CONFIG_HDLC
+ if [ "$CONFIG_HDLC" != "n" ]; then
+ bool ' Synchronous Point-to-Point Protocol (PPP) support' CONFIG_HDLC_PPP
+ if [ "$CONFIG_LAPB" = "m" -a "$CONFIG_HDLC" = "m" -o "$CONFIG_LAPB" = "y" ]; then
+ bool ' X.25 protocol support' CONFIG_HDLC_X25
+ else
+ comment ' X.25/LAPB support is disabled'
+ fi
+ dep_tristate ' SDL RISCom/N2 support' CONFIG_N2 $CONFIG_HDLC
+ dep_tristate ' Moxa C101 support' CONFIG_C101 $CONFIG_HDLC
+ fi
+
tristate ' Frame relay DLCI support' CONFIG_DLCI
if [ "$CONFIG_DLCI" != "n" ]; then
int ' Max open DLCI' CONFIG_DLCI_COUNT 24
O_TARGET := wan.o
-export-objs = z85230.o syncppp.o comx.o sdladrv.o cycx_drv.o
+export-objs = z85230.o syncppp.o comx.o sdladrv.o cycx_drv.o hdlc.o
list-multi = wanpipe.o cyclomx.o
wanpipe-objs = sdlamain.o $(wanpipe-y)
obj-$(CONFIG_COMX_PROTO_LAPB) += comx-proto-lapb.o
obj-$(CONFIG_COMX_PROTO_FR) += comx-proto-fr.o
obj-$(CONFIG_COSA) += syncppp.o cosa.o
+obj-$(CONFIG_DSCC4) += dscc4.o
obj-$(CONFIG_LANMEDIA) += syncppp.o
obj-$(CONFIG_SYNCLINK_SYNCPPP) += syncppp.o
obj-$(CONFIG_X25_ASY) += x25_asy.o
obj-$(CONFIG_CYCLADES_SYNC) += cycx_drv.o cyclomx.o
obj-$(CONFIG_LAPBETHER) += lapbether.o
obj-$(CONFIG_SBNI) += sbni.o
+obj-$(CONFIG_HDLC) += hdlc.o
+obj-$(CONFIG_HDLC_PPP) += syncppp.o
+obj-$(CONFIG_N2) += n2.o
+obj-$(CONFIG_C101) += c101.o
include $(TOPDIR)/Rules.make
--- /dev/null
+/*
+ * Moxa C101 synchronous serial card driver for Linux
+ *
+ * Copyright (C) 2000 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * For information see http://hq.pm.waw.pl/hdlc/
+ *
+ * Sources of information:
+ * Hitachi HD64570 SCA User's Manual
+ * Moxa C101 User's Manual
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+#include "hd64570.h"
+
+#define DEBUG_RINGS
+/* #define DEBUG_PKT */
+
+static const char* version = "Moxa C101 driver revision: 1.02 for Linux 2.4";
+static const char* devname = "C101";
+
+#define C101_PAGE 0x1D00
+#define C101_DTR 0x1E00
+#define C101_SCA 0x1F00
+#define C101_WINDOW_SIZE 0x2000
+#define C101_MAPPED_RAM_SIZE 0x4000
+
+#define RAM_SIZE (256 * 1024)
+#define CLOCK_BASE 9830400 /* 9.8304 MHz */
+#define PAGE0_ALWAYS_MAPPED
+
+static char *hw; /* pointer to hw=xxx command line string */
+
+
+typedef struct card_s {
+ hdlc_device hdlc; /* HDLC device struct - must be first */
+ spinlock_t lock; /* TX lock */
+ int clkmode; /* clock mode */
+ int clkrate; /* clock speed */
+ int line; /* loopback only */
+ u8 *win0base; /* ISA window base address */
+ u32 phy_winbase; /* ISA physical base address */
+ u16 buff_offset; /* offset of first buffer of first channel */
+ u8 rxs, txs, tmc; /* SCA registers */
+ u8 irq; /* IRQ (3-15) */
+ u8 ring_buffers; /* number of buffers in a ring */
+ u8 page;
+
+ u8 rxin; /* rx ring buffer 'in' pointer */
+ u8 txin; /* tx ring buffer 'in' and 'last' pointers */
+ u8 txlast;
+ u8 rxpart; /* partial frame received, next frame invalid*/
+
+ struct card_s *next_card;
+}card_t;
+
+typedef card_t port_t;
+
+
+#define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg))
+#define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg))
+#define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg))
+#define sca_outw(value, reg, card) writew(value, (card)->win0base + C101_SCA + (reg))
+
+#define port_to_card(port) (port)
+#define log_node(port) (0)
+#define phy_node(port) (0)
+#define winsize(card) (C101_WINDOW_SIZE)
+#define win0base(card) ((card)->win0base)
+#define winbase(card) ((card)->win0base + 0x2000)
+#define get_port(card, port) ((port) == 0 ? (card) : NULL)
+
+
+static inline u8 sca_get_page(card_t *card)
+{
+ return card->page;
+}
+
+static inline void openwin(card_t *card, u8 page)
+{
+ card->page = page;
+ writeb(page, card->win0base + C101_PAGE);
+}
+
+
+#define close_windows(card) {} /* no hardware support */
+
+
+#include "hd6457x.c"
+
+
+static int c101_set_clock(port_t *port, int value)
+{
+ u8 msci = get_msci(port);
+ u8 rxs = port->rxs & CLK_BRG_MASK;
+ u8 txs = port->txs & CLK_BRG_MASK;
+
+ switch(value) {
+ case CLOCK_EXT:
+ rxs |= CLK_LINE_RX; /* RXC input */
+ txs |= CLK_LINE_TX; /* TXC input */
+ break;
+
+ case CLOCK_INT:
+ rxs |= CLK_BRG_RX; /* TX clock */
+ txs |= CLK_RXCLK_TX; /* BRG output */
+ break;
+
+ case CLOCK_TXINT:
+ rxs |= CLK_LINE_RX; /* RXC input */
+ txs |= CLK_BRG_TX; /* BRG output */
+ break;
+
+ case CLOCK_TXFROMRX:
+ rxs |= CLK_LINE_RX; /* RXC input */
+ txs |= CLK_RXCLK_TX; /* RX clock */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ port->rxs = rxs;
+ port->txs = txs;
+ sca_out(rxs, msci + RXS, port);
+ sca_out(txs, msci + TXS, port);
+ port->clkmode = value;
+ return 0;
+}
+
+
+static int c101_open(hdlc_device *hdlc)
+{
+ port_t *port = hdlc_to_port(hdlc);
+
+ MOD_INC_USE_COUNT;
+ writeb(1, port->win0base + C101_DTR);
+ sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */
+ sca_open(hdlc);
+ c101_set_clock(port, port->clkmode);
+ return 0;
+}
+
+
+static void c101_close(hdlc_device *hdlc)
+{
+ port_t *port = hdlc_to_port(hdlc);
+
+ sca_close(hdlc);
+ writeb(0, port->win0base + C101_DTR);
+ sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port);
+ MOD_DEC_USE_COUNT;
+}
+
+
+static int c101_ioctl(hdlc_device *hdlc, struct ifreq *ifr, int cmd)
+{
+ int value = ifr->ifr_ifru.ifru_ivalue;
+ int result = 0;
+ port_t *port = hdlc_to_port(hdlc);
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch(cmd) {
+ case HDLCSCLOCK:
+ result = c101_set_clock(port, value);
+ case HDLCGCLOCK:
+ value = port->clkmode;
+ break;
+
+ case HDLCSCLOCKRATE:
+ port->clkrate = value;
+ sca_set_clock(port);
+ case HDLCGCLOCKRATE:
+ value = port->clkrate;
+ break;
+
+ case HDLCSLINE:
+ result = sca_set_loopback(port, value);
+ case HDLCGLINE:
+ value = port->line;
+ break;
+
+#ifdef DEBUG_RINGS
+ case HDLCRUN:
+ sca_dump_rings(hdlc);
+ return 0;
+#endif /* DEBUG_RINGS */
+
+ default:
+ return -EINVAL;
+ }
+
+ ifr->ifr_ifru.ifru_ivalue = value;
+ return result;
+}
+
+
+
+static void c101_destroy_card(card_t *card)
+{
+ if (card->irq)
+ free_irq(card->irq, card);
+
+ if (card->win0base) {
+ iounmap(card->win0base);
+ release_mem_region(card->phy_winbase, C101_MAPPED_RAM_SIZE);
+ }
+
+ kfree(card);
+}
+
+
+
+static int c101_run(unsigned long irq, unsigned long winbase)
+{
+ card_t *card;
+ int result;
+
+ if (irq<3 || irq>15 || irq == 6) /* FIXME */ {
+ printk(KERN_ERR "c101: invalid IRQ value\n");
+ return -ENODEV;
+ }
+
+ if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) !=0) {
+ printk(KERN_ERR "c101: invalid RAM value\n");
+ return -ENODEV;
+ }
+
+ card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ if (card == NULL) {
+ printk(KERN_ERR "c101: unable to allocate memory\n");
+ return -ENOBUFS;
+ }
+ memset(card, 0, sizeof(card_t));
+
+ if (request_irq(irq, sca_intr, 0, devname, card)) {
+ printk(KERN_ERR "c101: could not allocate IRQ\n");
+ c101_destroy_card(card);
+ return(-EBUSY);
+ }
+ card->irq = irq;
+
+ if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) {
+ printk(KERN_ERR "c101: could not request RAM window\n");
+ c101_destroy_card(card);
+ return(-EBUSY);
+ }
+ card->phy_winbase = winbase;
+ card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE);
+ if (!card->win0base) {
+ printk(KERN_ERR "c101: could not map I/O address\n");
+ c101_destroy_card(card);
+ return -EBUSY;
+ }
+
+ /* 2 rings required for 1 port */
+ card->ring_buffers = (RAM_SIZE -C101_WINDOW_SIZE) / (2 * HDLC_MAX_MRU);
+ printk(KERN_DEBUG "c101: using %u packets rings\n",card->ring_buffers);
+
+ card->buff_offset = C101_WINDOW_SIZE; /* Bytes 1D00-1FFF reserved */
+
+ readb(card->win0base + C101_PAGE); /* Resets SCA? */
+ udelay(100);
+ writeb(0, card->win0base + C101_PAGE);
+ writeb(0, card->win0base + C101_DTR); /* Power-up for RAM? */
+
+ sca_init(card, 0);
+
+ spin_lock_init(&card->lock);
+ hdlc_to_dev(&card->hdlc)->irq = irq;
+ hdlc_to_dev(&card->hdlc)->mem_start = winbase;
+ hdlc_to_dev(&card->hdlc)->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1;
+ hdlc_to_dev(&card->hdlc)->tx_queue_len = 50;
+ card->hdlc.ioctl = c101_ioctl;
+ card->hdlc.open = c101_open;
+ card->hdlc.close = c101_close;
+ card->hdlc.xmit = sca_xmit;
+
+ result = register_hdlc_device(&card->hdlc);
+ if (result) {
+ printk(KERN_WARNING "c101: unable to register hdlc device\n");
+ c101_destroy_card(card);
+ return result;
+ }
+
+ sca_init_sync_port(card); /* Set up C101 memory */
+
+ *new_card = card;
+ new_card = &card->next_card;
+ return 0;
+}
+
+
+
+static int __init c101_init(void)
+{
+ if (hw == NULL) {
+#ifdef MODULE
+ printk(KERN_INFO "c101: no card initialized\n");
+#endif
+ return -ENOSYS; /* no parameters specified, abort */
+ }
+
+ printk(KERN_INFO "%s\n", version);
+
+ do {
+ unsigned long irq, ram;
+
+ irq = simple_strtoul(hw, &hw, 0);
+
+ if (*hw++ != ',')
+ break;
+ ram = simple_strtoul(hw, &hw, 0);
+
+ if (*hw == ':' || *hw == '\x0')
+ c101_run(irq, ram);
+
+ if (*hw == '\x0')
+ return 0;
+ }while(*hw++ == ':');
+
+ printk(KERN_ERR "c101: invalid hardware parameters\n");
+ return first_card ? 0 : -ENOSYS;
+}
+
+
+#ifndef MODULE
+static int __init c101_setup(char *str)
+{
+ hw = str;
+ return 1;
+}
+
+__setup("c101=", c101_setup);
+#endif
+
+
+static void __exit c101_cleanup(void)
+{
+ card_t *card = first_card;
+
+ while (card) {
+ card_t *ptr = card;
+ card = card->next_card;
+ unregister_hdlc_device(&ptr->hdlc);
+ c101_destroy_card(ptr);
+ }
+}
+
+
+module_init(c101_init);
+module_exit(c101_cleanup);
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("Moxa C101 serial port driver");
+MODULE_PARM(hw, "s"); /* hw=irq,ram:irq,... */
+EXPORT_NO_SYMBOLS;
#include <asm/uaccess.h>
#include <linux/init.h>
-#include "syncppp.h"
+#include <net/syncppp.h>
#include "comx.h"
MODULE_AUTHOR("Author: Gergely Madarasz <gorgo@itc.hu>");
#error For now, COMX really needs the /proc filesystem
#endif
+#include <net/syncppp.h>
#include "comx.h"
-#include "syncppp.h"
MODULE_AUTHOR("Gergely Madarasz <gorgo@itc.hu>");
MODULE_DESCRIPTION("Common code for the COMX synchronous serial adapters");
#include <asm/dma.h>
#include <asm/byteorder.h>
-#include "syncppp.h"
+#include <net/syncppp.h>
#include "cosa.h"
/* Linux version stuff */
#define COSA_MTU 2000 /* FIXME: I don't know this exactly */
-#undef DEBUG_DATA 1 /* Dump the data read or written to the channel */
-#undef DEBUG_IRQS 1 /* Print the message when the IRQ is received */
-#undef DEBUG_IO 1 /* Dump the I/O traffic */
+#undef DEBUG_DATA //1 /* Dump the data read or written to the channel */
+#undef DEBUG_IRQS //1 /* Print the message when the IRQ is received */
+#undef DEBUG_IO //1 /* Dump the I/O traffic */
#define TX_TIMEOUT (5*HZ)
chan->stats.rx_bytes += chan->cosa->rxsize;
netif_rx(chan->rx_skb);
chan->rx_skb = 0;
- chan->pppdev.dev->trans_start = jiffies;
+ chan->pppdev.dev->last_rx = jiffies;
return 0;
}
up(&chan->rsem);
if (copy_to_user(buf, kbuf, count)) {
- kfree(buf);
+ kfree(kbuf);
return -EFAULT;
}
kfree(kbuf);
/* ---------- I/O debugging routines ---------- */
/*
* These routines can be used to monitor COSA/SRP I/O and to printk()
- * the data being transfered on the data and status I/O port in a
+ * the data being transferred on the data and status I/O port in a
* readable way.
*/
#define SR_RX_INT_ENA 0x80 /* receiver interrupt enable bit */
/* status register - input bits */
-#define SR_USR_RQ 0x20 /* user interupt request pending */
+#define SR_USR_RQ 0x20 /* user interrupt request pending */
#define SR_TX_RDY 0x40 /* transmitter empty (ready) */
#define SR_RX_RDY 0x80 /* receiver data ready */
--- /dev/null
+/*
+ * drivers/net/wan/dscc4/dscc4_main.c: a DSCC4 HDLC driver for Linux
+ *
+ * This software may be used and distributed according to the terms of the
+ * GNU Public License.
+ *
+ * The author may be reached as romieu@cogenit.fr.
+ * Specific bug reports/asian food will be welcome.
+ *
+ * Special thanks to the nice people at CS-Telecom for the hardware and the
+ * access to the test/measure tools.
+ *
+ *
+ * Theory of Operation
+ *
+ * I. Board Compatibility
+ *
+ * This device driver is designed for the Siemens PEB20534 4 ports serial
+ * controller as found on Etinc PCISYNC cards. The documentation for the
+ * chipset is available at http://www.infineon.com:
+ * - Data Sheet "DSCC4, DMA Supported Serial Communication Controller with
+ * 4 Channels, PEB 20534 Version 2.1, PEF 20534 Version 2.1";
+ * - Application Hint "Management of DSCC4 on-chip FIFO resources".
+ * Jens David has built an adapter based on the same chipset. Take a look
+ * at http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 for a specific
+ * driver.
+ * Sample code (2 revisions) is available at Infineon.
+ *
+ * II. Board-specific settings
+ *
+ * Pcisync can transmit some clock signal to the outside world on the
+ * *first two* ports provided you put a quartz and a line driver on it and
+ * remove the jumpers. The operation is described on Etinc web site. If you
+ * go DCE on these ports, don't forget to use an adequate cable.
+ *
+ * Sharing of the PCI interrupt line for this board is possible.
+ *
+ * III. Driver operation
+ *
+ * The rx/tx operations are based on a linked list of descriptor. I haven't
+ * tried the start/stop descriptor method as this one looks like the cheapest
+ * in terms of PCI manipulation.
+ *
+ * Tx direction
+ * Once the data section of the current descriptor processed, the next linked
+ * descriptor is loaded if the HOLD bit isn't set in the current descriptor.
+ * If HOLD is met, the transmission is stopped until the host unsets it and
+ * signals the change via TxPOLL.
+ * When the tx ring is full, the xmit routine issues a call to netdev_stop.
+ * The device is supposed to be enabled again during an ALLS irq (we could
+ * use HI but as it's easy to loose events, it's fscked).
+ *
+ * Rx direction
+ * The received frames aren't supposed to span over multiple receiving areas.
+ * I may implement it some day but it isn't the highest ranked item.
+ *
+ * IV. Notes
+ * The chipset is buggy. Typically, under some specific load patterns (I
+ * wouldn't call them "high"), the irq queues and the descriptors look like
+ * some event has been lost. Even assuming some fancy PCI feature, it won't
+ * explain the reproductible missing "C" bit in the descriptors. Faking an
+ * irq in the periodic timer isn't really elegant but at least it seems
+ * reliable.
+ * The current error (XDU, RFO) recovery code is untested.
+ * So far, RDO takes his RX channel down and the right sequence to enable it
+ * again is still a mistery. If RDO happens, plan a reboot. More details
+ * in the code (NB: as this happens, TX still works).
+ * Don't mess the cables during operation, especially on DTE ports. I don't
+ * suggest it for DCE either but at least one can get some messages instead
+ * of a complete instant freeze.
+ * Tests are done on Rev. 20 of the silicium. The RDO handling changes with
+ * the documentation/chipset releases. An on-line errata would be welcome.
+ *
+ * TODO:
+ * - some trivial error lurk,
+ * - the stats are fscked,
+ * - use polling at high irq/s,
+ * - performance analysis,
+ * - endianness.
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/cache.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <net/syncppp.h>
+#include <linux/hdlc.h>
+
+/* Version */
+static const char * version = "$Id: dscc4.c,v 1.130 2001/02/25 15:27:34 romieu Exp $\n";
+static int debug;
+
+
+/* Module parameters */
+MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
+MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
+MODULE_PARM(debug,"i");
+
+/* Structures */
+struct TxFD {
+ u32 state;
+ u32 next;
+ u32 data;
+ u32 complete;
+ u32 jiffies; /* more hack to come :o) */
+};
+
+struct RxFD {
+ u32 state1;
+ u32 next;
+ u32 data;
+ u32 state2;
+ u32 end;
+};
+
+#define DEBUG
+#define DEBUG_PARANOID
+#define TX_RING_SIZE 32
+#define RX_RING_SIZE 32
+#define IRQ_RING_SIZE 64 /* Keep it A multiple of 32 */
+#define TX_TIMEOUT (HZ/10)
+#define BRR_DIVIDER_MAX 64*0x00008000
+#define dev_per_card 4
+
+#define SOURCE_ID(flags) ((flags >> 28 ) & 0x03)
+#define TO_SIZE(state) ((state >> 16) & 0x1fff)
+#define TO_STATE(len) cpu_to_le32((len & TxSizeMax) << 16)
+#define RX_MAX(len) ((((len) >> 5) + 1) << 5)
+#define SCC_REG_START(id) SCC_START+(id)*SCC_OFFSET
+
+#undef DEBUG
+
+struct dscc4_pci_priv {
+ u32 *iqcfg;
+ int cfg_cur;
+ spinlock_t lock;
+ struct pci_dev *pdev;
+
+ struct net_device *root;
+ dma_addr_t iqcfg_dma;
+ u32 xtal_hz;
+};
+
+struct dscc4_dev_priv {
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+
+ struct RxFD *rx_fd;
+ struct TxFD *tx_fd;
+ u32 *iqrx;
+ u32 *iqtx;
+
+ u32 rx_current;
+ u32 tx_current;
+ u32 iqrx_current;
+ u32 iqtx_current;
+
+ u32 tx_dirty;
+ int bad_tx_frame;
+ int bad_rx_frame;
+ int rx_needs_refill;
+
+ dma_addr_t tx_fd_dma;
+ dma_addr_t rx_fd_dma;
+ dma_addr_t iqtx_dma;
+ dma_addr_t iqrx_dma;
+
+ struct net_device_stats stats;
+ struct timer_list timer;
+
+ struct dscc4_pci_priv *pci_priv;
+ spinlock_t lock;
+
+ int dev_id;
+ u32 flags;
+ u32 timer_help;
+ u32 hi_expected;
+
+ struct hdlc_device_struct hdlc;
+ int usecount;
+};
+
+/* GLOBAL registers definitions */
+#define GCMDR 0x00
+#define GSTAR 0x04
+#define GMODE 0x08
+#define IQLENR0 0x0C
+#define IQLENR1 0x10
+#define IQRX0 0x14
+#define IQTX0 0x24
+#define IQCFG 0x3c
+#define FIFOCR1 0x44
+#define FIFOCR2 0x48
+#define FIFOCR3 0x4c
+#define FIFOCR4 0x34
+#define CH0CFG 0x50
+#define CH0BRDA 0x54
+#define CH0BTDA 0x58
+
+/* SCC registers definitions */
+#define SCC_START 0x0100
+#define SCC_OFFSET 0x80
+#define CMDR 0x00
+#define STAR 0x04
+#define CCR0 0x08
+#define CCR1 0x0c
+#define CCR2 0x10
+#define BRR 0x2C
+#define RLCR 0x40
+#define IMR 0x54
+#define ISR 0x58
+
+/* Bit masks */
+#define IntRxScc0 0x10000000
+#define IntTxScc0 0x01000000
+
+#define TxPollCmd 0x00000400
+#define RxActivate 0x08000000
+#define MTFi 0x04000000
+#define Rdr 0x00400000
+#define Rdt 0x00200000
+#define Idr 0x00100000
+#define Idt 0x00080000
+#define TxSccRes 0x01000000
+#define RxSccRes 0x00010000
+#define TxSizeMax 0x1ffc
+#define RxSizeMax 0x1ffc
+
+#define Ccr0ClockMask 0x0000003f
+#define Ccr1LoopMask 0x00000200
+#define BrrExpMask 0x00000f00
+#define BrrMultMask 0x0000003f
+#define EncodingMask 0x00700000
+#define Hold 0x40000000
+#define SccBusy 0x10000000
+#define FrameOk (FrameVfr | FrameCrc)
+#define FrameVfr 0x80
+#define FrameRdo 0x40
+#define FrameCrc 0x20
+#define FrameAborted 0x00000200
+#define FrameEnd 0x80000000
+#define DataComplete 0x40000000
+#define LengthCheck 0x00008000
+#define SccEvt 0x02000000
+#define NoAck 0x00000200
+#define Action 0x00000001
+#define HiDesc 0x20000000
+
+/* SCC events */
+#define RxEvt 0xf0000000
+#define TxEvt 0x0f000000
+#define Alls 0x00040000
+#define Xdu 0x00010000
+#define Xmr 0x00002000
+#define Xpr 0x00001000
+#define Rdo 0x00000080
+#define Rfs 0x00000040
+#define Rfo 0x00000002
+#define Flex 0x00000001
+
+/* DMA core events */
+#define Cfg 0x00200000
+#define Hi 0x00040000
+#define Fi 0x00020000
+#define Err 0x00010000
+#define Arf 0x00000002
+#define ArAck 0x00000001
+
+/* Misc */
+#define NeedIDR 0x00000001
+#define NeedIDT 0x00000002
+#define RdoSet 0x00000004
+
+/* Functions prototypes */
+static __inline__ void dscc4_rx_irq(struct dscc4_pci_priv *, struct net_device *);
+static __inline__ void dscc4_tx_irq(struct dscc4_pci_priv *, struct net_device *);
+static int dscc4_found1(struct pci_dev *, unsigned long ioaddr);
+static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent);
+static int dscc4_open(struct net_device *);
+static int dscc4_start_xmit(struct sk_buff *, struct net_device *);
+static int dscc4_close(struct net_device *);
+static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int dscc4_change_mtu(struct net_device *dev, int mtu);
+static int dscc4_init_ring(struct net_device *);
+static void dscc4_release_ring(struct dscc4_dev_priv *);
+static void dscc4_timer(unsigned long);
+static void dscc4_tx_timeout(struct net_device *);
+static void dscc4_irq(int irq, void *dev_id, struct pt_regs *ptregs);
+static struct net_device_stats *dscc4_get_stats(struct net_device *);
+static int dscc4_attach_hdlc_device(struct net_device *);
+static void dscc4_unattach_hdlc_device(struct net_device *);
+static int dscc4_hdlc_open(struct hdlc_device_struct *);
+static void dscc4_hdlc_close(struct hdlc_device_struct *);
+static int dscc4_hdlc_ioctl(struct hdlc_device_struct *, struct ifreq *, int);
+static int dscc4_hdlc_xmit(hdlc_device *, struct sk_buff *);
+#ifdef EXPERIMENTAL_POLLING
+static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *);
+#endif
+
+void inline reset_TxFD(struct TxFD *tx_fd) {
+ /* FIXME: test with the last arg (size specification) = 0 */
+ tx_fd->state = FrameEnd | Hold | 0x00100000;
+ tx_fd->complete = 0x00000000;
+}
+
+void inline dscc4_release_ring_skbuff(struct sk_buff **p, int n)
+{
+ for(; n > 0; n--) {
+ if (*p)
+ dev_kfree_skb(*p);
+ p++;
+ }
+}
+
+static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
+{
+ struct pci_dev *pdev = dpriv->pci_priv->pdev;
+
+ pci_free_consistent(pdev, TX_RING_SIZE*sizeof(struct TxFD),
+ dpriv->tx_fd, dpriv->tx_fd_dma);
+ pci_free_consistent(pdev, RX_RING_SIZE*sizeof(struct RxFD),
+ dpriv->rx_fd, dpriv->rx_fd_dma);
+ dscc4_release_ring_skbuff(dpriv->tx_skbuff, TX_RING_SIZE);
+ dscc4_release_ring_skbuff(dpriv->rx_skbuff, RX_RING_SIZE);
+}
+
+void inline try_get_rx_skb(struct dscc4_dev_priv *priv, int cur, struct net_device *dev)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(RX_MAX(dev->mtu+2));
+ priv->rx_skbuff[cur] = skb;
+ if (!skb) {
+ priv->rx_fd[cur--].data = (u32) NULL;
+ priv->rx_fd[cur%RX_RING_SIZE].state1 |= Hold;
+ priv->rx_needs_refill++;
+ return;
+ }
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_IP);
+ skb->mac.raw = skb->data;
+ priv->rx_fd[cur].data = pci_map_single(priv->pci_priv->pdev, skb->data,
+ skb->len, PCI_DMA_FROMDEVICE);
+}
+
+/*
+ * IRQ/thread/whatever safe
+ */
+static int dscc4_wait_ack_cec(u32 ioaddr, struct net_device *dev, char *msg)
+{
+ s16 i = 0;
+
+ while (readl(ioaddr + STAR) & SccBusy) {
+ if (i++ < 0) {
+ printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
+ return -1;
+ }
+ }
+ printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name, msg, i);
+ return 0;
+}
+
+static int dscc4_do_action(struct net_device *dev, char *msg)
+{
+ unsigned long ioaddr = dev->base_addr;
+ u32 state;
+ s16 i;
+
+ writel(Action, ioaddr + GCMDR);
+ ioaddr += GSTAR;
+ for (i = 0; i >= 0; i++) {
+ state = readl(ioaddr);
+ if (state & Arf) {
+ printk(KERN_ERR "%s: %s failed\n", dev->name, msg);
+ writel(Arf, ioaddr);
+ return -1;
+ } else if (state & ArAck) {
+ printk(KERN_DEBUG "%s: %s ack (%d try)\n",
+ dev->name, msg, i);
+ writel(ArAck, ioaddr);
+ return 0;
+ }
+ }
+ printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
+ return -1;
+}
+
+static __inline__ int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
+{
+ int cur;
+ s16 i;
+
+ cur = dpriv->iqtx_current%IRQ_RING_SIZE;
+ for (i = 0; i >= 0; i++) {
+ if (!(dpriv->flags & (NeedIDR | NeedIDT)) ||
+ (dpriv->iqtx[cur] & Xpr))
+ return 0;
+ }
+ printk(KERN_ERR "%s: %s timeout\n", "dscc4", "XPR");
+ return -1;
+}
+
+static __inline__ void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, int cur,
+ struct RxFD *rx_fd, struct net_device *dev)
+{
+ struct pci_dev *pdev = dpriv->pci_priv->pdev;
+ struct sk_buff *skb;
+ int pkt_len;
+
+ skb = dpriv->rx_skbuff[cur];
+ pkt_len = TO_SIZE(rx_fd->state2);
+ pci_dma_sync_single(pdev, rx_fd->data, pkt_len, PCI_DMA_FROMDEVICE);
+ if((skb->data[pkt_len - 1] & FrameOk) == FrameOk) {
+ pci_unmap_single(pdev, rx_fd->data, skb->len, PCI_DMA_FROMDEVICE);
+ dpriv->stats.rx_packets++;
+ dpriv->stats.rx_bytes += pkt_len;
+ skb->tail += pkt_len;
+ skb->len = pkt_len;
+ if (netif_running(hdlc_to_dev(&dpriv->hdlc)))
+ hdlc_netif_rx(&dpriv->hdlc, skb);
+ else
+ netif_rx(skb);
+ try_get_rx_skb(dpriv, cur, dev);
+ } else {
+ if(skb->data[pkt_len - 1] & FrameRdo)
+ dpriv->stats.rx_fifo_errors++;
+ else if(!(skb->data[pkt_len - 1] | ~FrameCrc))
+ dpriv->stats.rx_crc_errors++;
+ else if(!(skb->data[pkt_len - 1] | ~FrameVfr))
+ dpriv->stats.rx_length_errors++;
+ else
+ dpriv->stats.rx_errors++;
+ }
+ rx_fd->state1 |= Hold;
+ rx_fd->state2 = 0x00000000;
+ rx_fd->end = 0xbabeface;
+ if (!rx_fd->data)
+ return;
+ rx_fd--;
+ if (!cur)
+ rx_fd += RX_RING_SIZE;
+ rx_fd->state1 &= ~Hold;
+}
+
+static int __init dscc4_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct dscc4_pci_priv *priv;
+ struct dscc4_dev_priv *dpriv;
+ int i;
+ static int cards_found = 0;
+ unsigned long ioaddr;
+
+ printk(KERN_DEBUG "%s", version);
+
+ if (pci_enable_device(pdev))
+ goto err_out;
+ if (!request_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0), "registers")) {
+ printk (KERN_ERR "dscc4: can't reserve MMIO region (regs)\n");
+ goto err_out;
+ }
+ if (!request_mem_region(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1), "LBI interface")) {
+ printk (KERN_ERR "dscc4: can't reserve MMIO region (lbi)\n");
+ goto err_out_free_mmio_region0;
+ }
+ ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!ioaddr) {
+ printk(KERN_ERR "dscc4: cannot remap MMIO region %lx @ %lx\n",
+ pci_resource_len(pdev, 0), pci_resource_start(pdev, 0));
+ goto err_out_free_mmio_region;
+ }
+ printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#lx (regs), %#lx (lbi), IRQ %d.\n",
+ pci_resource_start(pdev, 0),
+ pci_resource_start(pdev, 1), pdev->irq);
+
+ /* High PCI latency useless. Cf app. note. */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x10);
+ pci_set_master(pdev);
+
+ if (dscc4_found1(pdev, ioaddr))
+ goto err_out_iounmap;
+
+ priv = (struct dscc4_pci_priv *)pci_get_drvdata(pdev);
+
+ if (request_irq(pdev->irq, &dscc4_irq, SA_SHIRQ, "dscc4", priv->root)) {
+ printk(KERN_WARNING "dscc4: IRQ %d is busy\n", pdev->irq);
+ goto err_out_iounmap;
+ }
+ priv->pdev = pdev;
+
+ /* power up/little endian/dma core controlled via hold bit */
+ writel(0x00000000, ioaddr + GMODE);
+ /* Shared interrupt queue */
+ {
+ u32 bits;
+
+ bits = (IRQ_RING_SIZE >> 5) - 1;
+ bits |= bits << 4;
+ bits |= bits << 8;
+ bits |= bits << 16;
+ writel(bits, ioaddr + IQLENR0);
+ }
+ /* Global interrupt queue */
+ writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
+ priv->iqcfg = (u32 *) pci_alloc_consistent(pdev,
+ IRQ_RING_SIZE*sizeof(u32), &priv->iqcfg_dma);
+ if (!priv->iqcfg)
+ goto err_out_free_irq;
+ writel(priv->iqcfg_dma, ioaddr + IQCFG);
+
+ /*
+ * SCC 0-3 private rx/tx irq structures
+ * IQRX/TXi needs to be set soon. Learned it the hard way...
+ */
+ for(i = 0; i < dev_per_card; i++) {
+ dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;
+ dpriv->iqtx = (u32 *) pci_alloc_consistent(pdev,
+ IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma);
+ if (!dpriv->iqtx)
+ goto err_out_free_iqtx;
+ writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4);
+ }
+ for(i = 0; i < dev_per_card; i++) {
+ dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;
+ dpriv->iqrx = (u32 *) pci_alloc_consistent(pdev,
+ IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma);
+ if (!dpriv->iqrx)
+ goto err_out_free_iqrx;
+ writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4);
+ }
+
+ /*
+ * Cf application hint. Beware of hard-lock condition on
+ * threshold .
+ */
+ writel(0x42104000, ioaddr + FIFOCR1);
+ //writel(0x9ce69800, ioaddr + FIFOCR2);
+ writel(0xdef6d800, ioaddr + FIFOCR2);
+ //writel(0x11111111, ioaddr + FIFOCR4);
+ writel(0x18181818, ioaddr + FIFOCR4);
+ // FIXME: should depend on the chipset revision
+ writel(0x0000000e, ioaddr + FIFOCR3);
+
+ writel(0xff200001, ioaddr + GCMDR);
+
+ cards_found++;
+ return 0;
+
+err_out_free_iqrx:
+ while (--i >= 0) {
+ dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;
+ pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
+ dpriv->iqrx, dpriv->iqrx_dma);
+ }
+ i = dev_per_card;
+err_out_free_iqtx:
+ while (--i >= 0) {
+ dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;
+ pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
+ dpriv->iqtx, dpriv->iqtx_dma);
+ }
+ pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg,
+ priv->iqcfg_dma);
+err_out_free_irq:
+ free_irq(pdev->irq, priv->root);
+err_out_iounmap:
+ iounmap ((void *)ioaddr);
+err_out_free_mmio_region:
+ release_mem_region(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+err_out_free_mmio_region0:
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+err_out:
+ return -ENODEV;
+};
+
+static int dscc4_found1(struct pci_dev *pdev, unsigned long ioaddr)
+{
+ struct dscc4_pci_priv *ppriv;
+ struct dscc4_dev_priv *dpriv;
+ struct net_device *dev;
+ int i = 0;
+
+ dpriv = (struct dscc4_dev_priv *)
+ kmalloc(dev_per_card*sizeof(struct dscc4_dev_priv), GFP_KERNEL);
+ if (!dpriv) {
+ printk(KERN_ERR "dscc4: can't allocate data\n");
+ goto err_out;
+ }
+ memset(dpriv, 0, dev_per_card*sizeof(struct dscc4_dev_priv));
+
+ dev = (struct net_device *)
+ kmalloc(dev_per_card*sizeof(struct net_device), GFP_KERNEL);
+ if (!dev) {
+ printk(KERN_ERR "dscc4: can't allocate net_device\n");
+ goto err_dealloc_priv;
+ }
+ memset(dev, 0, dev_per_card*sizeof(struct net_device));
+
+ ppriv = (struct dscc4_pci_priv *)
+ kmalloc(sizeof(struct dscc4_pci_priv), GFP_KERNEL);
+ if (!ppriv) {
+ printk(KERN_ERR "dscc4: can't allocate pci private data.\n");
+ goto err_dealloc_dev;
+ }
+ memset(ppriv, 0, sizeof(struct dscc4_pci_priv));
+
+ for (i = 0; i < dev_per_card; i++) {
+ struct dscc4_dev_priv *p;
+ struct net_device *d;
+
+ d = dev + i;
+ d->base_addr = ioaddr;
+ d->init = NULL;
+ d->irq = pdev->irq;
+ /* The card adds the crc */
+ d->type = ARPHRD_RAWHDLC;
+ d->open = dscc4_open;
+ d->stop = dscc4_close;
+ d->hard_start_xmit = dscc4_start_xmit;
+ d->set_multicast_list = NULL;
+ d->do_ioctl = dscc4_ioctl;
+ d->get_stats = dscc4_get_stats;
+ d->change_mtu = dscc4_change_mtu;
+ d->mtu = HDLC_MAX_MTU;
+ d->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP;
+ d->tx_timeout = dscc4_tx_timeout;
+ d->watchdog_timeo = TX_TIMEOUT;
+
+ p = dpriv + i;
+ p->dev_id = i;
+ p->pci_priv = ppriv;
+ spin_lock_init(&p->lock);
+ d->priv = p;
+
+ if (dev_alloc_name(d, "scc%d")<0) {
+ printk(KERN_ERR "dev_alloc_name failed for scc.\n");
+ goto err_dealloc_dev;
+ }
+ if (register_netdev(d)) {
+ printk(KERN_ERR "%s: register_netdev != 0.\n", d->name);
+ goto err_dealloc_dev;
+ }
+ dscc4_attach_hdlc_device(d);
+ SET_MODULE_OWNER(d);
+ }
+ ppriv->root = dev;
+ ppriv->pdev = pdev;
+ spin_lock_init(&ppriv->lock);
+ pdev->driver_data = ppriv;
+ pci_set_drvdata(pdev, ppriv);
+ return 0;
+
+err_dealloc_dev:
+ while (--i >= 0)
+ unregister_netdev(dev + i);
+ kfree(dev);
+err_dealloc_priv:
+ kfree(dpriv);
+err_out:
+ return -1;
+};
+
+static void dscc4_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct dscc4_dev_priv *dpriv;
+ struct dscc4_pci_priv *ppriv;
+
+ dpriv = dev->priv;
+ if (netif_queue_stopped(dev) &&
+ ((jiffies - dev->trans_start) > TX_TIMEOUT)) {
+ ppriv = dpriv->pci_priv;
+ if (dpriv->iqtx[dpriv->iqtx_current%IRQ_RING_SIZE]) {
+ u32 flags;
+
+ printk(KERN_DEBUG "%s: pending events\n", dev->name);
+ dev->trans_start = jiffies;
+ spin_lock_irqsave(&ppriv->lock, flags);
+ dscc4_tx_irq(ppriv, dev);
+ spin_unlock_irqrestore(&ppriv->lock, flags);
+ } else {
+ struct TxFD *tx_fd;
+ struct sk_buff *skb;
+ int i,j;
+
+ printk(KERN_DEBUG "%s: missing events\n", dev->name);
+ i = dpriv->tx_dirty%TX_RING_SIZE;
+ j = dpriv->tx_current - dpriv->tx_dirty;
+ dpriv->stats.tx_dropped += j;
+ while(j--) {
+ skb = dpriv->tx_skbuff[i];
+ tx_fd = dpriv->tx_fd + i;
+ if (skb) {
+ dpriv->tx_skbuff[i] = NULL;
+ pci_unmap_single(ppriv->pdev, tx_fd->data, skb->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(skb);
+ } else
+ printk(KERN_INFO "%s: hardware on drugs!\n", dev->name);
+ tx_fd->data = 0; /* DEBUG */
+ tx_fd->complete &= ~DataComplete;
+ i++;
+ i %= TX_RING_SIZE;
+ }
+ dpriv->tx_dirty = dpriv->tx_current;
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ printk(KERN_DEBUG "%s: re-enabled\n", dev->name);
+ }
+ }
+ dpriv->timer.expires = jiffies + TX_TIMEOUT;
+ add_timer(&dpriv->timer);
+}
+
+static void dscc4_tx_timeout(struct net_device *dev)
+{
+ /* FIXME: something is missing there */
+};
+
+static int dscc4_open(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv;
+ struct dscc4_pci_priv *ppriv;
+ u32 ioaddr = 0;
+
+ MOD_INC_USE_COUNT;
+
+ ppriv = dpriv->pci_priv;
+
+ if (dscc4_init_ring(dev))
+ goto err_out;
+
+ ioaddr = dev->base_addr + SCC_REG_START(dpriv->dev_id);
+
+ /* FIXME: VIS */
+ writel(readl(ioaddr + CCR0) | 0x80001000, ioaddr + CCR0);
+
+ writel(LengthCheck | (dev->mtu >> 5), ioaddr + RLCR);
+
+ /* no address recognition/crc-CCITT/cts enabled */
+ writel(readl(ioaddr + CCR1) | 0x021c8000, ioaddr + CCR1);
+
+ /* Ccr2.Rac = 0 */
+ writel(0x00050008 & ~RxActivate, ioaddr + CCR2);
+
+#ifdef EXPERIMENTAL_POLLING
+ writel(0xfffeef7f, ioaddr + IMR); /* Interrupt mask */
+#else
+ /* Don't mask RDO. Ever. */
+ //writel(0xfffaef7f, ioaddr + IMR); /* Interrupt mask */
+ writel(0xfffaef7e, ioaddr + IMR); /* Interrupt mask */
+#endif
+ /* IDT+IDR during XPR */
+ dpriv->flags = NeedIDR | NeedIDT;
+
+ /*
+ * The following is a bit paranoid...
+ *
+ * NB: the datasheet "...CEC will stay active if the SCC is in
+ * power-down mode or..." and CCR2.RAC = 1 are two different
+ * situations.
+ */
+ if (readl(ioaddr + STAR) & SccBusy) {
+ printk(KERN_ERR "%s busy. Try later\n", dev->name);
+ goto err_free_ring;
+ }
+ writel(TxSccRes | RxSccRes, ioaddr + CMDR);
+
+ /* ... the following isn't */
+ if (dscc4_wait_ack_cec(ioaddr, dev, "Cec"))
+ goto err_free_ring;
+
+ /*
+ * I would expect XPR near CE completion (before ? after ?).
+ * At worst, this code won't see a late XPR and people
+ * will have to re-issue an ifconfig (this is harmless).
+ * WARNING, a really missing XPR usually means a hardware
+ * reset is needed. Suggestions anyone ?
+ */
+ if (dscc4_xpr_ack(dpriv))
+ goto err_free_ring;
+
+ netif_start_queue(dev);
+
+ init_timer(&dpriv->timer);
+ dpriv->timer.expires = jiffies + 10*HZ;
+ dpriv->timer.data = (unsigned long)dev;
+ dpriv->timer.function = &dscc4_timer;
+ add_timer(&dpriv->timer);
+ netif_carrier_on(dev);
+
+ return 0;
+
+err_free_ring:
+ dscc4_release_ring(dpriv);
+err_out:
+ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+}
+
+#ifdef EXPERIMENTAL_POLLING
+static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev)
+{
+ /* FIXME: it's gonna be easy (TM), for sure */
+}
+#endif /* EXPERIMENTAL_POLLING */
+
+static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ struct dscc4_pci_priv *ppriv;
+ struct TxFD *tx_fd;
+ int cur, next;
+
+ ppriv = dpriv->pci_priv;
+ cur = dpriv->tx_current++%TX_RING_SIZE;
+ next = dpriv->tx_current%TX_RING_SIZE;
+ dpriv->tx_skbuff[next] = skb;
+ tx_fd = dpriv->tx_fd + next;
+ tx_fd->state = FrameEnd | Hold | TO_STATE(skb->len & TxSizeMax);
+ tx_fd->data = pci_map_single(ppriv->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ tx_fd->complete = 0x00000000;
+ mb(); // FIXME: suppress ?
+
+#ifdef EXPERIMENTAL_POLLING
+ spin_lock(&dpriv->lock);
+ while(dscc4_tx_poll(dpriv, dev));
+ spin_unlock(&dpriv->lock);
+#endif
+ /*
+ * I know there's a window for a race in the following lines but
+ * dscc4_timer will take good care of it. The chipset eats events
+ * (especially the net_dev re-enabling ones) thus there is no
+ * reason to try and be smart.
+ */
+ if ((dpriv->tx_dirty + 16) < dpriv->tx_current) {
+ netif_stop_queue(dev);
+ dpriv->hi_expected = 2;
+ }
+ tx_fd = dpriv->tx_fd + cur;
+ tx_fd->state &= ~Hold;
+ mb(); // FIXME: suppress ?
+
+ /*
+ * One may avoid some pci transactions during intense TX periods.
+ * Not sure it's worth the pain...
+ */
+ writel((TxPollCmd << dpriv->dev_id) | NoAck, dev->base_addr + GCMDR);
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+static int dscc4_close(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv;
+ u32 ioaddr = dev->base_addr;
+ int dev_id;
+
+ del_timer_sync(&dpriv->timer);
+ netif_stop_queue(dev);
+
+ dev_id = dpriv->dev_id;
+
+ writel(0x00050000, ioaddr + SCC_REG_START(dev_id) + CCR2);
+ writel(MTFi|Rdr|Rdt, ioaddr + CH0CFG + dev_id*0x0c); /* Reset Rx/Tx */
+ writel(0x00000001, ioaddr + GCMDR);
+
+ dscc4_release_ring(dpriv);
+
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
+{
+ struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv;
+ u32 brr;
+
+ *state &= ~Ccr0ClockMask;
+ if (*bps) { /* DCE */
+ u32 n = 0, m = 0, divider;
+ int xtal;
+
+ xtal = dpriv->pci_priv->xtal_hz;
+ if (!xtal)
+ return -1;
+ divider = xtal / *bps;
+ if (divider > BRR_DIVIDER_MAX) {
+ divider >>= 4;
+ *state |= 0x00000036; /* Clock mode 6b (BRG/16) */
+ } else
+ *state |= 0x00000037; /* Clock mode 7b (BRG) */
+ if (divider >> 22) {
+ n = 63;
+ m = 15;
+ } else if (divider) {
+ /* Extraction of the 6 highest weighted bits */
+ m = 0;
+ while (0xffffffc0 & divider) {
+ m++;
+ divider >>= 1;
+ }
+ n = divider;
+ }
+ brr = (m << 8) | n;
+ divider = n << m;
+ if (!(*state & 0x00000001)) /* Clock mode 6b */
+ divider <<= 4;
+ *bps = xtal / divider;
+ } else { /* DTE */
+ /*
+ * "state" already reflects Clock mode 0a.
+ * Nothing more to be done
+ */
+ brr = 0;
+ }
+ writel(brr, dev->base_addr + BRR + SCC_REG_START(dpriv->dev_id));
+
+ return 0;
+}
+
+#ifdef LATER_PLEASE
+/*
+ * -*- [RFC] Configuring Synchronous Interfaces in Linux -*-
+ */
+
+// FIXME: MEDIA already defined in linux/hdlc.h
+#define HDLC_MEDIA_V35 0
+#define HDLC_MEDIA_RS232 1
+#define HDLC_MEDIA_X21 2
+#define HDLC_MEDIA_E1 3
+#define HDLC_MEDIA_HSSI 4
+
+#define HDLC_CODING_NRZ 0
+#define HDLC_CODING_NRZI 1
+#define HDLC_CODING_FM0 2
+#define HDLC_CODING_FM1 3
+#define HDLC_CODING_MANCHESTER 4
+
+#define HDLC_CRC_NONE 0
+#define HDLC_CRC_16 1
+#define HDLC_CRC_32 2
+#define HDLC_CRC_CCITT 3
+
+/* RFC: add the crc reset value ? */
+struct hdlc_physical {
+ u8 media;
+ u8 coding;
+ u32 rate;
+ u8 crc;
+ u8 crc_siz; /* 2 or 4 bytes */
+ u8 shared_flags; /* Discouraged on the DSCC4 */
+};
+
+// FIXME: PROTO already defined in linux/hdlc.h
+#define HDLC_PROTO_RAW 0
+#define HDLC_PROTO_FR 1
+#define HDLC_PROTO_X25 2
+#define HDLC_PROTO_PPP 3
+#define HDLC_PROTO_CHDLC 4
+
+struct hdlc_protocol {
+ u8 proto;
+
+ union {
+ } u;
+};
+
+struct screq {
+ u16 media_group;
+
+ union {
+ struct hdlc_physical hdlc_phy;
+ struct hdlc_protocol hdlc_proto;
+ } u;
+};
+
+// FIXME: go sub-module
+static struct {
+ u16 coding;
+ u16 bits;
+} map[] = {
+ {HDLC_CODING_NRZ, 0x00},
+ {HDLC_CODING_NRZI, 0x20},
+ {HDLC_CODING_FM0, 0x40},
+ {HDLC_CODING_FM1, 0x50},
+ {HDLC_CODING_MANCHESTER, 0x60},
+ {65535, 0x00}
+};
+#endif /* LATER_PLEASE */
+
+static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ u32 state, ioaddr;
+
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ switch (cmd) {
+ /* Set built-in quartz frequency */
+ case SIOCDEVPRIVATE: {
+ u32 hz;
+
+ hz = ifr->ifr_ifru.ifru_ivalue;
+ if (hz >= 33000000) /* 33 MHz */
+ return -EOPNOTSUPP;
+ dpriv->pci_priv->xtal_hz = hz;
+ return 0;
+ }
+ /* Set/unset loopback */
+ case SIOCDEVPRIVATE+1: {
+ u32 flags;
+
+ ioaddr = dev->base_addr + CCR1 +
+ SCC_REG_START(dpriv->dev_id);
+ state = readl(ioaddr);
+ flags = ifr->ifr_ifru.ifru_ivalue;
+ if (flags & 0x00000001) {
+ printk(KERN_DEBUG "%s: loopback\n", dev->name);
+ state |= 0x00000100;
+ } else {
+ printk(KERN_DEBUG "%s: normal\n", dev->name);
+ state &= ~0x00000100;
+ }
+ writel(state, ioaddr);
+ return 0;
+ }
+
+#ifdef LATER_PLEASE
+ case SIOCDEVPRIVATE+2: {
+ {
+ struct screq scr;
+
+ err = copy_from_user(&scr, ifr->ifr_ifru.ifru_data, sizeof(struct screq));
+ if (err)
+ return err;
+ do {
+ if (scr.u.hdlc_phy.coding == map[i].coding)
+ break;
+ } while (map[++i].coding != 65535);
+ if (!map[i].coding)
+ return -EOPNOTSUPP;
+
+ ioaddr = dev->base_addr + CCR0 +
+ SCC_REG_START(dpriv->dev_id);
+ state = readl(ioaddr) & ~EncodingMask;
+ state |= (u32)map[i].bits << 16;
+ writel(state, ioaddr);
+ printk("state: %08x\n", state); /* DEBUG */
+ return 0;
+ }
+ case SIOCDEVPRIVATE+3: {
+ struct screq *scr = (struct screq *)ifr->ifr_ifru.ifru_data;
+
+ ioaddr = dev->base_addr + CCR0 +
+ SCC_REG_START(dpriv->dev_id);
+ state = (readl(ioaddr) & EncodingMask) >> 16;
+ do {
+ if (state == map[i].bits)
+ break;
+ } while (map[++i].coding);
+ return put_user(map[i].coding, (u16 *)scr->u.hdlc_phy.coding);
+ }
+#endif /* LATER_PLEASE */
+
+ case HDLCSCLOCKRATE:
+ {
+ u32 state, bps;
+
+ bps = ifr->ifr_ifru.ifru_ivalue;
+ ioaddr = dev->base_addr + CCR0 +
+ SCC_REG_START(dpriv->dev_id);
+ state = readl(ioaddr);
+ if(dscc4_set_clock(dev, &bps, &state) < 0)
+ return -EOPNOTSUPP;
+ if (bps) { /* DCE */
+ printk(KERN_DEBUG "%s: generated RxClk (DCE)\n",
+ dev->name);
+ ifr->ifr_ifru.ifru_ivalue = bps;
+ } else { /* DTE */
+ state = 0x80001000;
+ printk(KERN_DEBUG "%s: external RxClk (DTE)\n",
+ dev->name);
+ }
+ writel(state, ioaddr);
+ return 0;
+ }
+ case HDLCGCLOCKRATE: {
+ u32 brr;
+ int bps;
+
+ brr = readl(dev->base_addr + BRR +
+ SCC_REG_START(dpriv->dev_id));
+ bps = dpriv->pci_priv->xtal_hz >> (brr >> 8);
+ bps /= (brr & 0x3f) + 1;
+ ifr->ifr_ifru.ifru_ivalue = bps;
+ return 0;
+ }
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dscc4_change_mtu(struct net_device *dev, int mtu)
+{
+ /* FIXME: chainsaw coded... */
+ if ((mtu <= 3) || (mtu > 65531))
+ return -EINVAL;
+ if(dev->flags & IFF_UP)
+ return -EBUSY;
+ dev->mtu = mtu;
+ return(0);
+}
+
+static void dscc4_irq(int irq, void *dev_instance, struct pt_regs *ptregs)
+{
+ struct net_device *dev = dev_instance;
+ struct dscc4_pci_priv *priv;
+ u32 ioaddr, state;
+ unsigned long flags;
+ int i;
+
+ priv = ((struct dscc4_dev_priv *)dev->priv)->pci_priv;
+ /*
+ * FIXME: shorten the protected area (set some bit telling we're
+ * in an interrupt or increment some work-to-do counter etc...)
+ */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ioaddr = dev->base_addr;
+
+ state = readl(ioaddr + GSTAR);
+ if (!state)
+ goto out;
+ writel(state, ioaddr + GSTAR);
+
+ if (state & Arf) {
+ printk(KERN_ERR "%s: failure (Arf). Harass the maintener\n",
+ dev->name);
+ goto out;
+ }
+ state &= ~ArAck;
+ if (state & Cfg) {
+ if (debug)
+ printk(KERN_DEBUG "CfgIV\n");
+ if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & Arf)
+ printk(KERN_ERR "%s: %s failed\n", dev->name, "CFG");
+ if (!(state &= ~Cfg))
+ goto out;
+ }
+ if (state & RxEvt) {
+ i = dev_per_card - 1;
+ do {
+ dscc4_rx_irq(priv, dev + i);
+ } while (--i >= 0);
+ state &= ~RxEvt;
+ }
+ if (state & TxEvt) {
+ i = dev_per_card - 1;
+ do {
+ dscc4_tx_irq(priv, dev + i);
+ } while (--i >= 0);
+ state &= ~TxEvt;
+ }
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static __inline__ void dscc4_tx_irq(struct dscc4_pci_priv *ppriv,
+ struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ u32 state;
+ int cur, loop = 0;
+
+try:
+ cur = dpriv->iqtx_current%IRQ_RING_SIZE;
+ state = dpriv->iqtx[cur];
+ if (!state) {
+#ifdef DEBUG
+ if (loop > 1)
+ printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop);
+#endif
+ if (loop && netif_queue_stopped(dev))
+ if ((dpriv->tx_dirty + 8) >= dpriv->tx_current)
+ netif_wake_queue(dev);
+ return;
+ }
+ loop++;
+ dpriv->iqtx[cur] = 0;
+ dpriv->iqtx_current++;
+
+#ifdef DEBUG_PARANOID
+ if (SOURCE_ID(state) != dpriv->dev_id) {
+ printk(KERN_DEBUG "%s (Tx): Source Id=%d, state=%08x\n",
+ dev->name, SOURCE_ID(state), state );
+ return;
+ }
+ if (state & 0x0df80c01) {
+ printk(KERN_DEBUG "%s (Tx): state=%08x (UFO alert)\n",
+ dev->name, state);
+ return;
+ }
+#endif
+ // state &= 0x0fffffff; /* Tracking the analyzed bits */
+ if (state & SccEvt) {
+ if (state & Alls) {
+ struct TxFD *tx_fd;
+ struct sk_buff *skb;
+
+ cur = dpriv->tx_dirty%TX_RING_SIZE;
+ tx_fd = dpriv->tx_fd + cur;
+
+ skb = dpriv->tx_skbuff[cur];
+
+ /* XXX: hideous kludge - to be removed "later" */
+ if (!skb) {
+ printk(KERN_ERR "%s: NULL skb in tx_irq at index %d\n", dev->name, cur);
+ goto try;
+ }
+ dpriv->tx_dirty++; // MUST be after skb test
+
+ /* Happens sometime. Don't know what triggers it */
+ if (!(tx_fd->complete & DataComplete)) {
+ u32 ioaddr, isr;
+
+ ioaddr = dev->base_addr +
+ SCC_REG_START(dpriv->dev_id) + ISR;
+ isr = readl(ioaddr);
+ printk(KERN_DEBUG
+ "%s: DataComplete=0 cur=%d isr=%08x state=%08x\n",
+ dev->name, cur, isr, state);
+ writel(isr, ioaddr);
+ dpriv->stats.tx_dropped++;
+ } else {
+ tx_fd->complete &= ~DataComplete;
+ if (tx_fd->state & FrameEnd) {
+ dpriv->stats.tx_packets++;
+ dpriv->stats.tx_bytes += skb->len;
+ }
+ }
+
+ dpriv->tx_skbuff[cur] = NULL;
+ pci_unmap_single(ppriv->pdev, tx_fd->data, skb->len,
+ PCI_DMA_TODEVICE);
+ tx_fd->data = 0; /* DEBUG */
+ dev_kfree_skb_irq(skb);
+{ // DEBUG
+ cur = (dpriv->tx_dirty-1)%TX_RING_SIZE;
+ tx_fd = dpriv->tx_fd + cur;
+ tx_fd->state |= Hold;
+}
+ if (!(state &= ~Alls))
+ goto try;
+ }
+ /*
+ * Transmit Data Underrun
+ */
+ if (state & Xdu) {
+ printk(KERN_ERR "dscc4: XDU. Contact maintainer\n");
+ dpriv->flags = NeedIDT;
+ /* Tx reset */
+ writel(MTFi | Rdt,
+ dev->base_addr + 0x0c*dpriv->dev_id + CH0CFG);
+ writel(0x00000001, dev->base_addr + GCMDR);
+ return;
+ }
+ if (state & Xmr) {
+ /* Frame needs to be sent again - FIXME */
+ //dscc4_start_xmit(dpriv->tx_skbuff[dpriv->tx_dirty], dev);
+ if (!(state &= ~0x00002000)) /* DEBUG */
+ goto try;
+ }
+ if (state & Xpr) {
+ unsigned long ioaddr = dev->base_addr;
+ unsigned long scc_offset;
+ u32 scc_addr;
+
+ scc_offset = ioaddr + SCC_REG_START(dpriv->dev_id);
+ scc_addr = ioaddr + 0x0c*dpriv->dev_id;
+ if (readl(scc_offset + STAR) & SccBusy)
+ printk(KERN_DEBUG "%s busy. Fatal\n",
+ dev->name);
+ /*
+ * Keep this order: IDT before IDR
+ */
+ if (dpriv->flags & NeedIDT) {
+ writel(MTFi | Idt, scc_addr + CH0CFG);
+ writel(dpriv->tx_fd_dma +
+ (dpriv->tx_dirty%TX_RING_SIZE)*
+ sizeof(struct TxFD), scc_addr + CH0BTDA);
+ if(dscc4_do_action(dev, "IDT"))
+ goto err_xpr;
+ dpriv->flags &= ~NeedIDT;
+ mb();
+ }
+ if (dpriv->flags & NeedIDR) {
+ writel(MTFi | Idr, scc_addr + CH0CFG);
+ writel(dpriv->rx_fd_dma +
+ (dpriv->rx_current%RX_RING_SIZE)*
+ sizeof(struct RxFD), scc_addr + CH0BRDA);
+ if(dscc4_do_action(dev, "IDR"))
+ goto err_xpr;
+ dpriv->flags &= ~NeedIDR;
+ mb();
+ /* Activate receiver and misc */
+ writel(0x08050008, scc_offset + CCR2);
+ }
+ err_xpr:
+ if (!(state &= ~Xpr))
+ goto try;
+ }
+ } else { /* ! SccEvt */
+ if (state & Hi) {
+#ifdef EXPERIMENTAL_POLLING
+ while(!dscc4_tx_poll(dpriv, dev));
+#endif
+ state &= ~Hi;
+ }
+ /*
+ * FIXME: it may be avoided. Re-re-re-read the manual.
+ */
+ if (state & Err) {
+ printk(KERN_ERR "%s: Tx ERR\n", dev->name);
+ dpriv->stats.tx_errors++;
+ state &= ~Err;
+ }
+ }
+ goto try;
+}
+
+static __inline__ void dscc4_rx_irq(struct dscc4_pci_priv *priv, struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ u32 state;
+ int cur;
+
+try:
+ cur = dpriv->iqrx_current%IRQ_RING_SIZE;
+ state = dpriv->iqrx[cur];
+ if (!state)
+ return;
+ dpriv->iqrx[cur] = 0;
+ dpriv->iqrx_current++;
+
+#ifdef DEBUG_PARANOID
+ if (SOURCE_ID(state) != dpriv->dev_id) {
+ printk(KERN_DEBUG "%s (Rx): Source Id=%d, state=%08x\n",
+ dev->name, SOURCE_ID(state), state);
+ goto try;
+ }
+ if (state & 0x0df80c01) {
+ printk(KERN_DEBUG "%s (Rx): state=%08x (UFO alert)\n",
+ dev->name, state);
+ goto try;
+ }
+#endif
+ if (!(state & SccEvt)){
+ struct RxFD *rx_fd;
+
+ state &= 0x00ffffff;
+ if (state & Err) { /* Hold or reset */
+ printk(KERN_DEBUG "%s (Rx): ERR\n", dev->name);
+ cur = dpriv->rx_current;
+ rx_fd = dpriv->rx_fd + cur;
+ /*
+ * Presume we're not facing a DMAC receiver reset.
+ * As We use the rx size-filtering feature of the
+ * DSCC4, the beginning of a new frame is waiting in
+ * the rx fifo. I bet a Receive Data Overflow will
+ * happen most of time but let's try and avoid it.
+ * Btw (as for RDO) if one experiences ERR whereas
+ * the system looks rather idle, there may be a
+ * problem with latency. In this case, increasing
+ * RX_RING_SIZE may help.
+ */
+ while (dpriv->rx_needs_refill) {
+ while(!(rx_fd->state1 & Hold)) {
+ rx_fd++;
+ cur++;
+ if (!(cur = cur%RX_RING_SIZE))
+ rx_fd = dpriv->rx_fd;
+ }
+ dpriv->rx_needs_refill--;
+ try_get_rx_skb(dpriv, cur, dev);
+ if (!rx_fd->data)
+ goto try;
+ rx_fd->state1 &= ~Hold;
+ rx_fd->state2 = 0x00000000;
+ rx_fd->end = 0xbabeface;
+ }
+ goto try;
+ }
+ if (state & Fi) {
+ cur = dpriv->rx_current%RX_RING_SIZE;
+ rx_fd = dpriv->rx_fd + cur;
+ dscc4_rx_skb(dpriv, cur, rx_fd, dev);
+ dpriv->rx_current++;
+ goto try;
+ }
+ if (state & Hi ) { /* HI bit */
+ state &= ~Hi;
+ goto try;
+ }
+ } else { /* ! SccEvt */
+#ifdef DEBUG_PARANOIA
+ int i;
+ static struct {
+ u32 mask;
+ const char *irq_name;
+ } evts[] = {
+ { 0x00008000, "TIN"},
+ { 0x00004000, "CSC"},
+ { 0x00000020, "RSC"},
+ { 0x00000010, "PCE"},
+ { 0x00000008, "PLLA"},
+ { 0x00000004, "CDSC"},
+ { 0, NULL}
+ };
+#endif /* DEBUG_PARANOIA */
+ state &= 0x00ffffff;
+#ifdef DEBUG_PARANOIA
+ for (i = 0; evts[i].irq_name; i++) {
+ if (state & evts[i].mask) {
+ printk(KERN_DEBUG "dscc4(%s): %s\n",
+ dev->name, evts[i].irq_name);
+ if (!(state &= ~evts[i].mask))
+ goto try;
+ }
+ }
+#endif /* DEBUG_PARANOIA */
+ /*
+ * Receive Data Overflow (FIXME: untested)
+ */
+ if (state & Rdo) {
+ u32 ioaddr, scc_offset, scc_addr;
+ struct RxFD *rx_fd;
+ int cur;
+
+ //if (debug)
+ // dscc4_rx_dump(dpriv);
+ ioaddr = dev->base_addr;
+ scc_addr = ioaddr + 0x0c*dpriv->dev_id;
+ scc_offset = ioaddr + SCC_REG_START(dpriv->dev_id);
+
+ writel(readl(scc_offset + CCR2) & ~RxActivate,
+ scc_offset + CCR2);
+ /*
+ * This has no effect. Why ?
+ * ORed with TxSccRes, one sees the CFG ack (for
+ * the TX part only).
+ */
+ writel(RxSccRes, scc_offset + CMDR);
+ dpriv->flags |= RdoSet;
+
+ /*
+ * Let's try and save something in the received data.
+ * rx_current must be incremented at least once to
+ * avoid HOLD in the BRDA-to-be-pointed desc.
+ */
+ do {
+ cur = dpriv->rx_current++%RX_RING_SIZE;
+ rx_fd = dpriv->rx_fd + cur;
+ if (!(rx_fd->state2 & DataComplete))
+ break;
+ if (rx_fd->state2 & FrameAborted) {
+ dpriv->stats.rx_over_errors++;
+ rx_fd->state1 |= Hold;
+ rx_fd->state2 = 0x00000000;
+ rx_fd->end = 0xbabeface;
+ } else
+ dscc4_rx_skb(dpriv, cur, rx_fd, dev);
+ } while (1);
+
+ if (debug) {
+ if (dpriv->flags & RdoSet)
+ printk(KERN_DEBUG
+ "dscc4: no RDO in Rx data\n");
+ }
+#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
+ /*
+ * FIXME: must the reset be this violent ?
+ */
+ writel(dpriv->rx_fd_dma +
+ (dpriv->rx_current%RX_RING_SIZE)*
+ sizeof(struct RxFD), scc_addr + CH0BRDA);
+ writel(MTFi|Rdr|Idr, scc_addr + CH0CFG);
+ if(dscc4_do_action(dev, "RDR")) {
+ printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
+ dev->name, "RDR");
+ goto rdo_end;
+ }
+ writel(MTFi|Idr, scc_addr + CH0CFG);
+ if(dscc4_do_action(dev, "IDR")) {
+ printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
+ dev->name, "IDR");
+ goto rdo_end;
+ }
+ rdo_end:
+#endif
+ writel(readl(scc_offset + CCR2) | RxActivate,
+ scc_offset + CCR2);
+ goto try;
+ }
+ /* These will be used later */
+ if (state & Rfs) {
+ if (!(state &= ~Rfs))
+ goto try;
+ }
+ if (state & Rfo) {
+ if (!(state &= ~Rfo))
+ goto try;
+ }
+ if (state & Flex) {
+ if (!(state &= ~Flex))
+ goto try;
+ }
+ }
+}
+
+static int dscc4_init_ring(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv;
+ struct TxFD *tx_fd;
+ struct RxFD *rx_fd;
+ int i;
+
+ tx_fd = (struct TxFD *) pci_alloc_consistent(dpriv->pci_priv->pdev,
+ TX_RING_SIZE*sizeof(struct TxFD), &dpriv->tx_fd_dma);
+ if (!tx_fd)
+ goto err_out;
+ rx_fd = (struct RxFD *) pci_alloc_consistent(dpriv->pci_priv->pdev,
+ RX_RING_SIZE*sizeof(struct RxFD), &dpriv->rx_fd_dma);
+ if (!rx_fd)
+ goto err_free_dma_tx;
+
+ dpriv->tx_fd = tx_fd;
+ dpriv->rx_fd = rx_fd;
+ dpriv->rx_current = 0;
+ dpriv->tx_current = 0;
+ dpriv->tx_dirty = 0;
+
+ /* the dma core of the dscc4 will be locked on the first desc */
+ for(i = 0; i < TX_RING_SIZE; ) {
+ reset_TxFD(tx_fd);
+ /* FIXME: NULL should be ok - to be tried */
+ tx_fd->data = dpriv->tx_fd_dma;
+ dpriv->tx_skbuff[i] = NULL;
+ i++;
+ tx_fd->next = (u32)(dpriv->tx_fd_dma + i*sizeof(struct TxFD));
+ tx_fd++;
+ }
+ (--tx_fd)->next = (u32)dpriv->tx_fd_dma;
+{
+ /*
+ * XXX: I would expect the following to work for the first descriptor
+ * (tx_fd->state = 0xc0000000)
+ * - Hold=1 (don't try and branch to the next descripto);
+ * - No=0 (I want an empty data section, i.e. size=0);
+ * - Fe=1 (required by No=0 or we got an Err irq and must reset).
+ * Alas, it fails (and locks solid). Thus the introduction of a dummy
+ * skb to avoid No=0 (choose one: Ugly [ ] Tasteless [ ] VMS [ ]).
+ * TODO: fiddle the tx threshold when time permits.
+ */
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(32);
+ if (!skb)
+ goto err_free_dma_tx;
+ skb->len = 32;
+ memset(skb->data, 0xaa, 16);
+ tx_fd -= (TX_RING_SIZE - 1);
+ tx_fd->state = 0xc0000000;
+ tx_fd->state |= ((u32)(skb->len & TxSizeMax)) << 16;
+ tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ dpriv->tx_skbuff[0] = skb;
+}
+ for (i = 0; i < RX_RING_SIZE;) {
+ /* size set by the host. Multiple of 4 bytes please */
+ rx_fd->state1 = HiDesc; /* Hi, no Hold */
+ rx_fd->state2 = 0x00000000;
+ rx_fd->end = 0xbabeface;
+ rx_fd->state1 |= ((u32)(dev->mtu & RxSizeMax)) << 16;
+ try_get_rx_skb(dpriv, i, dev);
+ i++;
+ rx_fd->next = (u32)(dpriv->rx_fd_dma + i*sizeof(struct RxFD));
+ rx_fd++;
+ }
+ (--rx_fd)->next = (u32)dpriv->rx_fd_dma;
+ rx_fd->state1 |= 0x40000000; /* Hold */
+
+ return 0;
+
+err_free_dma_tx:
+ pci_free_consistent(dpriv->pci_priv->pdev, TX_RING_SIZE*sizeof(*tx_fd),
+ tx_fd, dpriv->tx_fd_dma);
+err_out:
+ return -1;
+}
+
+static struct net_device_stats *dscc4_get_stats(struct net_device *dev)
+{
+ struct dscc4_dev_priv *priv = (struct dscc4_dev_priv *)dev->priv;
+
+ return &priv->stats;
+}
+
+static void __exit dscc4_remove_one(struct pci_dev *pdev)
+{
+ struct dscc4_pci_priv *ppriv;
+ struct net_device *root;
+ int i;
+
+ ppriv = pci_get_drvdata(pdev);
+ root = ppriv->root;
+
+ free_irq(pdev->irq, root);
+ pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg,
+ ppriv->iqcfg_dma);
+ for (i=0; i < dev_per_card; i++) {
+ struct dscc4_dev_priv *dpriv;
+ struct net_device *dev;
+
+ dev = ppriv->root + i;
+ dscc4_unattach_hdlc_device(dev);
+
+ dpriv = (struct dscc4_dev_priv *)dev->priv;
+ pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
+ dpriv->iqrx, dpriv->iqrx_dma);
+ pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
+ dpriv->iqtx, dpriv->iqtx_dma);
+ unregister_netdev(dev);
+ }
+ kfree(root->priv);
+
+ iounmap((void *)root->base_addr);
+ kfree(root);
+
+ kfree(ppriv);
+
+ release_mem_region(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+}
+
+static int dscc4_hdlc_ioctl(struct hdlc_device_struct *hdlc, struct ifreq *ifr, int cmd)
+{
+ struct net_device *dev = (struct net_device *)hdlc->netdev.base_addr;
+ int result;
+
+ /* FIXME: locking ? */
+ result = dscc4_ioctl(dev, ifr, cmd);
+ return result;
+}
+
+static int dscc4_hdlc_open(struct hdlc_device_struct *hdlc)
+{
+ struct net_device *dev = (struct net_device *)(hdlc->netdev.base_addr);
+
+ if (netif_running(dev)) {
+ printk(KERN_DEBUG "%s: already running\n", dev->name); // DEBUG
+ return 0;
+ }
+ return dscc4_open(dev);
+}
+
+static int dscc4_hdlc_xmit(hdlc_device *hdlc, struct sk_buff *skb)
+{
+ struct net_device *dev = (struct net_device *)hdlc->netdev.base_addr;
+
+ return dscc4_start_xmit(skb, dev);
+}
+
+static void dscc4_hdlc_close(struct hdlc_device_struct *hdlc)
+{
+ struct net_device *dev = (struct net_device *)hdlc->netdev.base_addr;
+ struct dscc4_dev_priv *dpriv;
+
+ dpriv = dev->priv;
+ --dpriv->usecount;
+}
+
+/* Operated under dev lock */
+static int dscc4_attach_hdlc_device(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ struct hdlc_device_struct *hdlc;
+ int result;
+
+ hdlc = &dpriv->hdlc;
+ /* XXX: Don't look at the next line */
+ hdlc->netdev.base_addr = (unsigned long)dev;
+ // FIXME: set hdlc->set_mode ?
+ hdlc->open = dscc4_hdlc_open;
+ hdlc->close = dscc4_hdlc_close;
+ hdlc->ioctl = dscc4_hdlc_ioctl;
+ hdlc->xmit = dscc4_hdlc_xmit;
+
+ result = register_hdlc_device(hdlc);
+ if (!result)
+ dpriv->usecount++;
+ return result;
+}
+
+/* Operated under dev lock */
+static void dscc4_unattach_hdlc_device(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dev->priv;
+
+ unregister_hdlc_device(&dpriv->hdlc);
+ dpriv->usecount--;
+}
+
+static struct pci_device_id dscc4_pci_tbl[] __devinitdata = {
+ { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl);
+
+static struct pci_driver dscc4_driver = {
+ name: "dscc4",
+ id_table: dscc4_pci_tbl,
+ probe: dscc4_init_one,
+ remove: dscc4_remove_one,
+};
+
+static int __init dscc4_init_module(void)
+{
+ return pci_module_init(&dscc4_driver);
+}
+
+static void __exit dscc4_cleanup_module(void)
+{
+ pci_unregister_driver(&dscc4_driver);
+}
+
+module_init(dscc4_init_module);
+module_exit(dscc4_cleanup_module);
--- /dev/null
+#ifndef __HD64570_H
+#define __HD64570_H
+
+/* SCA HD64570 register definitions - all addresses for mode 0 (8086 MPU)
+ and 1 (64180 MPU). For modes 2 and 3, XOR the address with 0x01.
+
+ Source: HD64570 SCA User's Manual
+*/
+
+
+
+/* SCA Control Registers */
+#define LPR 0x00 /* Low Power */
+
+/* Wait controller registers */
+#define PABR0 0x02 /* Physical Address Boundary 0 */
+#define PABR1 0x03 /* Physical Address Boundary 1 */
+#define WCRL 0x04 /* Wait Control L */
+#define WCRM 0x05 /* Wait Control M */
+#define WCRH 0x06 /* Wait Control H */
+
+#define PCR 0x08 /* DMA Priority Control */
+#define DMER 0x09 /* DMA Master Enable */
+
+
+/* Interrupt registers */
+#define ISR0 0x10 /* Interrupt Status 0 */
+#define ISR1 0x11 /* Interrupt Status 1 */
+#define ISR2 0x12 /* Interrupt Status 2 */
+
+#define IER0 0x14 /* Interrupt Enable 0 */
+#define IER1 0x15 /* Interrupt Enable 1 */
+#define IER2 0x16 /* Interrupt Enable 2 */
+
+#define ITCR 0x18 /* Interrupt Control */
+#define IVR 0x1A /* Interrupt Vector */
+#define IMVR 0x1C /* Interrupt Modified Vector */
+
+
+
+/* MSCI channel (port) 0 registers - offset 0x20
+ MSCI channel (port) 1 registers - offset 0x40 */
+
+#define MSCI0_OFFSET 0x20
+#define MSCI1_OFFSET 0x40
+
+#define TRBL 0x00 /* TX/RX buffer L */
+#define TRBH 0x01 /* TX/RX buffer H */
+#define ST0 0x02 /* Status 0 */
+#define ST1 0x03 /* Status 1 */
+#define ST2 0x04 /* Status 2 */
+#define ST3 0x05 /* Status 3 */
+#define FST 0x06 /* Frame Status */
+#define IE0 0x08 /* Interrupt Enable 0 */
+#define IE1 0x09 /* Interrupt Enable 1 */
+#define IE2 0x0A /* Interrupt Enable 2 */
+#define FIE 0x0B /* Frame Interrupt Enable */
+#define CMD 0x0C /* Command */
+#define MD0 0x0E /* Mode 0 */
+#define MD1 0x0F /* Mode 1 */
+#define MD2 0x10 /* Mode 2 */
+#define CTL 0x11 /* Control */
+#define SA0 0x12 /* Sync/Address 0 */
+#define SA1 0x13 /* Sync/Address 1 */
+#define IDL 0x14 /* Idle Pattern */
+#define TMC 0x15 /* Time Constant */
+#define RXS 0x16 /* RX Clock Source */
+#define TXS 0x17 /* TX Clock Source */
+#define TRC0 0x18 /* TX Ready Control 0 */
+#define TRC1 0x19 /* TX Ready Control 1 */
+#define RRC 0x1A /* RX Ready Control */
+#define CST0 0x1C /* Current Status 0 */
+#define CST1 0x1D /* Current Status 1 */
+
+
+/* Timer channel 0 (port 0 RX) registers - offset 0x60
+ Timer channel 1 (port 0 TX) registers - offset 0x68
+ Timer channel 2 (port 1 RX) registers - offset 0x70
+ Timer channel 3 (port 1 TX) registers - offset 0x78
+*/
+
+#define TIMER0RX_OFFSET 0x60
+#define TIMER0TX_OFFSET 0x68
+#define TIMER1RX_OFFSET 0x70
+#define TIMER1TX_OFFSET 0x78
+
+#define TCNTL 0x00 /* Up-counter L */
+#define TCNTH 0x01 /* Up-counter H */
+#define TCONRL 0x02 /* Constant L */
+#define TCONRH 0x03 /* Constant H */
+#define TCSR 0x04 /* Control/Status */
+#define TEPR 0x05 /* Expand Prescale */
+
+
+
+/* DMA channel 0 (port 0 RX) registers - offset 0x80
+ DMA channel 1 (port 0 TX) registers - offset 0xA0
+ DMA channel 2 (port 1 RX) registers - offset 0xC0
+ DMA channel 3 (port 1 TX) registers - offset 0xE0
+*/
+
+#define DMAC0RX_OFFSET 0x80
+#define DMAC0TX_OFFSET 0xA0
+#define DMAC1RX_OFFSET 0xC0
+#define DMAC1TX_OFFSET 0xE0
+
+#define BARL 0x00 /* Buffer Address L (chained block) */
+#define BARH 0x01 /* Buffer Address H (chained block) */
+#define BARB 0x02 /* Buffer Address B (chained block) */
+
+#define DARL 0x00 /* RX Destination Addr L (single block) */
+#define DARH 0x01 /* RX Destination Addr H (single block) */
+#define DARB 0x02 /* RX Destination Addr B (single block) */
+
+#define SARL 0x04 /* TX Source Address L (single block) */
+#define SARH 0x05 /* TX Source Address H (single block) */
+#define SARB 0x06 /* TX Source Address B (single block) */
+
+#define CPB 0x06 /* Chain Pointer Base (chained block) */
+
+#define CDAL 0x08 /* Current Descriptor Addr L (chained block) */
+#define CDAH 0x09 /* Current Descriptor Addr H (chained block) */
+#define EDAL 0x0A /* Error Descriptor Addr L (chained block) */
+#define EDAH 0x0B /* Error Descriptor Addr H (chained block) */
+#define BFLL 0x0C /* RX Receive Buffer Length L (chained block)*/
+#define BFLH 0x0D /* RX Receive Buffer Length H (chained block)*/
+#define BCRL 0x0E /* Byte Count L */
+#define BCRH 0x0F /* Byte Count H */
+#define DSR 0x10 /* DMA Status */
+#define DSR_RX(node) (DSR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define DSR_TX(node) (DSR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+#define DMR 0x11 /* DMA Mode */
+#define DMR_RX(node) (DMR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define DMR_TX(node) (DMR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+#define FCT 0x13 /* Frame End Interrupt Counter */
+#define FCT_RX(node) (FCT + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define FCT_TX(node) (FCT + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+#define DIR 0x14 /* DMA Interrupt Enable */
+#define DIR_RX(node) (DIR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define DIR_TX(node) (DIR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+#define DCR 0x15 /* DMA Command */
+#define DCR_RX(node) (DCR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
+#define DCR_TX(node) (DCR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
+
+
+
+
+/* Descriptor Structure */
+
+typedef struct {
+ u16 cp; /* Chain Pointer */
+ u32 bp; /* Buffer Pointer (24 bits) */
+ u16 len; /* Data Length */
+ u8 stat; /* Status */
+ u8 unused2;
+}__attribute__ ((packed)) pkt_desc;
+
+
+/* Packet Descriptor Status bits */
+
+#define ST_TX_EOM 0x80 /* End of frame */
+#define ST_TX_EOT 0x01 /* End of transmition */
+
+#define ST_RX_EOM 0x80 /* End of frame */
+#define ST_RX_SHORT 0x40 /* Short frame */
+#define ST_RX_ABORT 0x20 /* Abort */
+#define ST_RX_RESBIT 0x10 /* Residual bit */
+#define ST_RX_OVERRUN 0x08 /* Overrun */
+#define ST_RX_CRC 0x04 /* CRC */
+
+#define ST_ERROR_MASK 0x7C
+
+#define DIR_EOTE 0x80 /* Transfer completed */
+#define DIR_EOME 0x40 /* Frame Transfer Completed (chained-block) */
+#define DIR_BOFE 0x20 /* Buffer Overflow/Underflow (chained-block)*/
+#define DIR_COFE 0x10 /* Counter Overflow (chained-block) */
+
+
+#define DSR_EOT 0x80 /* Transfer completed */
+#define DSR_EOM 0x40 /* Frame Transfer Completed (chained-block) */
+#define DSR_BOF 0x20 /* Buffer Overflow/Underflow (chained-block)*/
+#define DSR_COF 0x10 /* Counter Overflow (chained-block) */
+#define DSR_DE 0x02 /* DMA Enable */
+#define DSR_DWE 0x01 /* DMA Write Disable */
+
+/* DMA Master Enable Register (DMER) bits */
+#define DMER_DME 0x80 /* DMA Master Enable */
+
+
+#define CMD_RESET 0x21 /* Reset Channel */
+#define CMD_TX_ENABLE 0x02 /* Start transmitter */
+#define CMD_RX_ENABLE 0x12 /* Start receiver */
+
+#define MD0_HDLC 0x80 /* Bit-sync HDLC mode */
+#define MD0_CRC_ENA 0x04 /* Enable CRC code calculation */
+#define MD0_CRC_CCITT 0x02 /* CCITT CRC instead of CRC-16 */
+#define MD0_CRC_PR1 0x01 /* Initial all-ones instead of all-zeros */
+
+#define MD0_CRC_NONE 0x00
+#define MD0_CRC_16_0 0x04
+#define MD0_CRC_16 0x05
+#define MD0_CRC_ITU_0 0x06
+#define MD0_CRC_ITU 0x07
+
+#define MD2_NRZI 0x20 /* NRZI mode */
+#define MD2_LOOPBACK 0x03 /* Local data Loopback */
+
+#define CTL_NORTS 0x01
+#define CTL_IDLE 0x10 /* Transmit an idle pattern */
+#define CTL_UDRNC 0x20 /* Idle after CRC or FCS+flag transmition */
+
+#define ST0_TXRDY 0x02 /* TX ready */
+#define ST0_RXRDY 0x01 /* RX ready */
+
+#define ST1_UDRN 0x80 /* MSCI TX underrun */
+
+#define ST3_CTS 0x08 /* modem input - /CTS */
+#define ST3_DCD 0x04 /* modem input - /DCD */
+
+#define IE0_TXINT 0x80 /* TX INT MSCI interrupt enable */
+#define IE1_UDRN 0x80 /* TX underrun MSCI interrupt enable */
+
+#define DCR_ABORT 0x01 /* Software abort command */
+#define DCR_CLEAR_EOF 0x02 /* Clear EOF interrupt */
+
+/* TX and RX Clock Source - RXS and TXS */
+#define CLK_BRG_MASK 0x0F
+#define CLK_LINE_RX 0x00 /* TX/RX clock line input */
+#define CLK_LINE_TX 0x00 /* TX/RX line input */
+#define CLK_BRG_RX 0x40 /* internal baud rate generator */
+#define CLK_BRG_TX 0x40 /* internal baud rate generator */
+#define CLK_RXCLK_TX 0x60 /* TX clock from RX clock */
+
+#endif
--- /dev/null
+/*
+ * Hitachi SCA HD64570 and HD64572 common driver for Linux
+ *
+ * Copyright (C) 1998-2000 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Sources of information:
+ * Hitachi HD64570 SCA User's Manual
+ * Hitachi HD64572 SCA-II User's Manual
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+
+#include <linux/hdlc.h>
+
+#if (!defined (__HD64570_H) && !defined (__HD64572_H)) || \
+ (defined (__HD64570_H) && defined (__HD64572_H))
+#error Either hd64570.h or hd64572.h must be included
+#endif
+
+
+static card_t *first_card;
+static card_t **new_card = &first_card;
+
+
+/* Maximum events to handle at each interrupt - should I increase it? */
+#define INTR_WORK 4
+
+#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET)
+#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
+#define get_dmac_tx(port) (phy_node(port) ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
+
+#define SCA_INTR_MSCI(node) (node ? 0x10 : 0x01)
+#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
+#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
+
+#ifdef __HD64570_H /* HD64570 */
+#define sca_outa(value, reg, card) sca_outw(value, reg, card)
+#define sca_ina(reg, card) sca_inw(reg, card)
+#define writea(value, ptr) writew(value, ptr)
+
+static inline int sca_intr_status(card_t *card)
+{
+ u8 isr0 = sca_in(ISR0, card);
+ u8 isr1 = sca_in(ISR1, card);
+ u8 result = 0;
+
+ if (isr1 & 0x03) result |= SCA_INTR_DMAC_RX(0);
+ if (isr1 & 0x0C) result |= SCA_INTR_DMAC_TX(0);
+ if (isr1 & 0x30) result |= SCA_INTR_DMAC_RX(1);
+ if (isr1 & 0xC0) result |= SCA_INTR_DMAC_TX(1);
+ if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0);
+ if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1);
+
+ return result;
+}
+
+#else /* HD64572 */
+#define sca_outa(value, reg, card) sca_outl(value, reg, card)
+#define sca_ina(reg, card) sca_inl(reg, card)
+#define writea(value, ptr) writel(value, ptr)
+
+
+static inline int sca_intr_status(card_t *card)
+{
+ u32 isr0 = sca_inl(ISR0, card);
+ u8 result = 0;
+
+ if (isr0 & 0x0000000F) result |= SCA_INTR_DMAC_RX(0);
+ if (isr0 & 0x000000F0) result |= SCA_INTR_DMAC_TX(0);
+ if (isr0 & 0x00000F00) result |= SCA_INTR_DMAC_RX(1);
+ if (isr0 & 0x0000F000) result |= SCA_INTR_DMAC_TX(1);
+ if (isr0 & 0x003E0000) result |= SCA_INTR_MSCI(0);
+ if (isr0 & 0x3E000000) result |= SCA_INTR_MSCI(1);
+
+ return result;
+}
+
+#endif /* HD64570 vs HD64572 */
+
+
+
+
+static inline port_t* hdlc_to_port(hdlc_device *hdlc)
+{
+ return (port_t*)hdlc;
+}
+
+
+
+static inline port_t* dev_to_port(struct net_device *dev)
+{
+ return hdlc_to_port(dev_to_hdlc(dev));
+}
+
+
+
+static inline u8 next_desc(port_t *port, u8 desc)
+{
+ return (desc + 1) % port_to_card(port)->ring_buffers;
+}
+
+
+
+static inline u16 desc_offset(port_t *port, u8 desc, u8 transmit)
+{
+ /* Descriptor offset always fits in 16 bytes */
+ u8 buffs = port_to_card(port)->ring_buffers;
+ return ((log_node(port) * 2 + transmit) * buffs + (desc % buffs)) *
+ sizeof(pkt_desc);
+}
+
+
+
+static inline pkt_desc* desc_address(port_t *port, u8 desc, u8 transmit)
+{
+#ifdef PAGE0_ALWAYS_MAPPED
+ return (pkt_desc*)(win0base(port_to_card(port))
+ + desc_offset(port, desc, transmit));
+#else
+ return (pkt_desc*)(winbase(port_to_card(port))
+ + desc_offset(port, desc, transmit));
+#endif
+}
+
+
+
+static inline u32 buffer_offset(port_t *port, u8 desc, u8 transmit)
+{
+ u8 buffs = port_to_card(port)->ring_buffers;
+ return port_to_card(port)->buff_offset +
+ ((log_node(port) * 2 + transmit) * buffs + (desc % buffs)) *
+ (u32)HDLC_MAX_MRU;
+}
+
+
+
+static void sca_init_sync_port(port_t *port)
+{
+ card_t *card = port_to_card(port);
+ u8 transmit, i;
+ u16 dmac, buffs = card->ring_buffers;
+
+ port->rxin = 0;
+ port->txin = 0;
+ port->txlast = 0;
+
+#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+ openwin(card, 0);
+#endif
+
+ for (transmit = 0; transmit < 2; transmit++) {
+ for (i = 0; i < buffs; i++) {
+ pkt_desc* desc = desc_address(port, i, transmit);
+ u16 chain_off = desc_offset(port, i + 1, transmit);
+ u32 buff_off = buffer_offset(port, i, transmit);
+
+ writea(chain_off, &desc->cp);
+ writel(buff_off, &desc->bp);
+ writew(0, &desc->len);
+ writeb(0, &desc->stat);
+ }
+
+ dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port);
+ /* DMA disable - to halt state */
+ sca_out(0, transmit ? DSR_TX(phy_node(port)) :
+ DSR_RX(phy_node(port)), card);
+ /* software ABORT - to initial state */
+ sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) :
+ DCR_RX(phy_node(port)), card);
+
+#ifdef __HD64570_H
+ sca_out(0, dmac + CPB, card); /* pointer base */
+#endif
+ /* current desc addr */
+ sca_outa(desc_offset(port, 0, transmit), dmac + CDAL, card);
+ if (!transmit)
+ sca_outa(desc_offset(port, buffs - 1, transmit),
+ dmac + EDAL, card);
+ else
+ sca_outa(desc_offset(port, 0, transmit), dmac + EDAL,
+ card);
+
+ /* clear frame end interrupt counter */
+ sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) :
+ DCR_RX(phy_node(port)), card);
+
+ if (!transmit) { /* Receive */
+ /* set buffer length */
+ sca_outw(HDLC_MAX_MRU, dmac + BFLL, card);
+ /* Chain mode, Multi-frame */
+ sca_out(0x14, DMR_RX(phy_node(port)), card);
+ sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)),
+ card);
+ /* DMA enable */
+ sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
+ } else { /* Transmit */
+ /* Chain mode, Multi-frame */
+ sca_out(0x14, DMR_TX(phy_node(port)), card);
+ /* enable underflow interrupts */
+ sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card);
+ }
+ }
+}
+
+
+
+/* MSCI interrupt service */
+static inline void sca_msci_intr(port_t *port)
+{
+ u16 msci = get_msci(port);
+ card_t* card = port_to_card(port);
+ u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */
+
+ /* printk(KERN_DEBUG "MSCI INT: ST1=%02X ILAR=%02X\n",
+ stat, sca_in(ILAR, card)); */
+
+ /* Reset MSCI TX underrun status bit */
+ sca_out(stat & ST1_UDRN, msci + ST1, card);
+
+ if (stat & ST1_UDRN) {
+ port->hdlc.stats.tx_errors++; /* TX Underrun error detected */
+ port->hdlc.stats.tx_fifo_errors++;
+ }
+}
+
+
+
+static inline void sca_rx(card_t *card, port_t *port, pkt_desc *desc,
+ u8 rxin)
+{
+ struct sk_buff *skb;
+ u16 len;
+ u32 buff;
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ u32 maxlen;
+ u8 page;
+#endif
+
+ len = readw(&desc->len);
+ skb = dev_alloc_skb(len);
+ if (!skb) {
+ port->hdlc.stats.rx_dropped++;
+ return;
+ }
+
+ buff = buffer_offset(port, rxin, 0);
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ page = buff / winsize(card);
+ buff = buff % winsize(card);
+ maxlen = winsize(card) - buff;
+
+ openwin(card, page);
+
+ if (len > maxlen) {
+ memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
+ openwin(card, page + 1);
+ memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
+ } else
+#endif
+ memcpy_fromio(skb->data, winbase(card) + buff, len);
+
+#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+ /* select pkt_desc table page back */
+ openwin(card, 0);
+#endif
+ skb_put(skb, len);
+#ifdef DEBUG_PKT
+ printk(KERN_DEBUG "%s RX(%i):", hdlc_to_name(&port->hdlc), skb->len);
+ debug_frame(skb);
+#endif
+ port->hdlc.stats.rx_packets++;
+ port->hdlc.stats.rx_bytes += skb->len;
+ hdlc_netif_rx(&port->hdlc, skb);
+}
+
+
+
+/* Receive DMA interrupt service */
+static inline void sca_rx_intr(port_t *port)
+{
+ u16 dmac = get_dmac_rx(port);
+ card_t *card = port_to_card(port);
+ u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */
+ struct net_device_stats *stats = &port->hdlc.stats;
+
+ /* Reset DSR status bits */
+ sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
+ DSR_RX(phy_node(port)), card);
+
+ if (stat & DSR_BOF)
+ stats->rx_over_errors++; /* Dropped one or more frames */
+
+ while (1) {
+ u32 desc_off = desc_offset(port, port->rxin, 0);
+ pkt_desc *desc;
+ u32 cda = sca_ina(dmac + CDAL, card);
+
+ if (cda == desc_off)
+ break; /* No frame received */
+
+#ifdef __HD64572_H
+ if (cda == desc_off + 8)
+ break; /* SCA-II updates CDA in 2 steps */
+#endif
+
+ desc = desc_address(port, port->rxin, 0);
+ stat = readb(&desc->stat);
+ if (!(stat & ST_RX_EOM))
+ port->rxpart = 1; /* partial frame received */
+ else if ((stat & ST_ERROR_MASK) || port->rxpart) {
+ stats->rx_errors++;
+ if (stat & ST_RX_OVERRUN) stats->rx_fifo_errors++;
+ else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
+ ST_RX_RESBIT)) || port->rxpart)
+ stats->rx_frame_errors++;
+ else if (stat & ST_RX_CRC) stats->rx_crc_errors++;
+ if (stat & ST_RX_EOM)
+ port->rxpart = 0; /* received last fragment */
+ } else
+ sca_rx(card, port, desc, port->rxin);
+
+ /* Set new error descriptor address */
+ sca_outa(desc_off, dmac + EDAL, card);
+ port->rxin = next_desc(port, port->rxin);
+ }
+
+ /* make sure RX DMA is enabled */
+ sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
+}
+
+
+
+/* Transmit DMA interrupt service */
+static inline void sca_tx_intr(port_t *port)
+{
+ u16 dmac = get_dmac_tx(port);
+ card_t* card = port_to_card(port);
+ u8 stat;
+
+ spin_lock(&port->lock);
+
+ stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */
+
+ /* Reset DSR status bits */
+ sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
+ DSR_TX(phy_node(port)), card);
+
+ while (1) {
+ u32 desc_off = desc_offset(port, port->txlast, 1);
+ pkt_desc *desc;
+ u16 len;
+
+ if (sca_ina(dmac + CDAL, card) == desc_off)
+ break; /* Transmitter is/will_be sending this frame */
+
+ desc = desc_address(port, port->txlast, 1);
+ len = readw(&desc->len);
+
+ port->hdlc.stats.tx_packets++;
+ port->hdlc.stats.tx_bytes += len;
+ writeb(0, &desc->stat); /* Free descriptor */
+
+ port->txlast = (port->txlast + 1) %
+ port_to_card(port)->ring_buffers;
+ }
+
+ netif_wake_queue(hdlc_to_dev(&port->hdlc));
+ spin_unlock(&port->lock);
+}
+
+
+
+static void sca_intr(int irq, void* dev_id, struct pt_regs *regs)
+{
+ card_t *card = dev_id;
+ int boguscnt = INTR_WORK;
+ int i;
+ u8 stat;
+
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ u8 page = sca_get_page(card);
+#endif
+
+ while((stat = sca_intr_status(card)) != 0) {
+ for (i = 0; i < 2; i++) {
+ port_t *port = get_port(card, i);
+ if (port) {
+ if (stat & SCA_INTR_MSCI(i))
+ sca_msci_intr(port);
+
+ if (stat & SCA_INTR_DMAC_RX(i))
+ sca_rx_intr(port);
+
+ if (stat & SCA_INTR_DMAC_TX(i))
+ sca_tx_intr(port);
+ }
+
+ if (--boguscnt < 0) {
+ printk(KERN_ERR "%s: too much work at "
+ "interrupt\n",
+ hdlc_to_name(&port->hdlc));
+ goto exit;
+ }
+ }
+ }
+
+ exit:
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ openwin(card, page); /* Restore original page */
+#endif
+}
+
+
+
+static inline int sca_set_loopback(port_t *port, int line)
+{
+ card_t* card = port_to_card(port);
+ u8 msci = get_msci(port);
+ u8 md2 = sca_in(msci + MD2, card);
+
+ switch(line) {
+ case LINE_DEFAULT:
+ md2 &= ~MD2_LOOPBACK;
+ port->line &= ~LINE_LOOPBACK;
+ break;
+
+ case LINE_LOOPBACK:
+ md2 |= MD2_LOOPBACK;
+ port->line |= LINE_LOOPBACK;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ sca_out(md2, msci + MD2, card);
+ return 0;
+}
+
+
+
+static void sca_set_clock(port_t *port)
+{
+ card_t *card = port_to_card(port);
+ u8 msci = get_msci(port);
+ unsigned int tmc, br = 10, brv = 1024;
+
+ if (port->clkrate > 0) {
+ /* Try lower br for better accuracy*/
+ do {
+ br--;
+ brv >>= 1; /* brv = 2^9 = 512 max in specs */
+
+ /* Baud Rate = CLOCK_BASE / TMC / 2^BR */
+ tmc = CLOCK_BASE / (brv * port->clkrate);
+ }while(br > 1 && tmc <= 128);
+
+ if (tmc < 1) {
+ tmc = 1;
+ br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
+ brv = 1;
+ } else if (tmc > 255)
+ tmc = 256; /* tmc=0 means 256 - low baud rates */
+
+ port->clkrate = CLOCK_BASE / (brv * tmc);
+ } else {
+ br = 9; /* Minimum clock rate */
+ tmc = 256; /* 8bit = 0 */
+ port->clkrate = CLOCK_BASE / (256 * 512);
+ }
+
+ port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
+ port->txs = (port->txs & ~CLK_BRG_MASK) | br;
+ port->tmc = tmc;
+
+ /* baud divisor - time constant*/
+#ifdef __HD64570_H
+ sca_out(port->tmc, msci + TMC, card);
+#else
+ sca_out(port->tmc, msci + TMCR, card);
+ sca_out(port->tmc, msci + TMCT, card);
+#endif
+
+ /* Set BRG bits */
+ sca_out(port->rxs, msci + RXS, card);
+ sca_out(port->txs, msci + TXS, card);
+}
+
+
+
+static void sca_set_hdlc_mode(port_t *port, u8 idle, u8 crc, u8 nrzi)
+{
+ card_t* card = port_to_card(port);
+ u8 msci = get_msci(port);
+ u8 md2 = (nrzi ? MD2_NRZI : 0) |
+ ((port->line & LINE_LOOPBACK) ? MD2_LOOPBACK : 0);
+ u8 ctl = (idle ? CTL_IDLE : 0);
+#ifdef __HD64572_H
+ ctl |= CTL_URCT | CTL_URSKP; /* Skip the rest of underrun frame */
+#endif
+
+ sca_out(CMD_RESET, msci + CMD, card);
+ sca_out(MD0_HDLC | crc, msci + MD0, card);
+ sca_out(0x00, msci + MD1, card); /* no address field check */
+ sca_out(md2, msci + MD2, card);
+ sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
+ sca_out(ctl, msci + CTL, card);
+
+#ifdef __HD64570_H
+ /* Allow at least 8 bytes before requesting RX DMA operation */
+ /* TX with higher priority and possibly with shorter transfers */
+ sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/
+ sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/
+ sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
+#else
+ sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
+ /* Setting than to larger value may cause Illegal Access */
+ sca_out(0x20, msci + TNR0, card); /* =TX DMA activation condition */
+ sca_out(0x30, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
+ sca_out(0x04, msci + TCR, card); /* =Critical TX DMA activ condition */
+#endif
+
+
+#ifdef __HD64570_H
+ /* MSCI TX INT IRQ enable */
+ sca_out(IE0_TXINT, msci + IE0, card);
+ sca_out(IE1_UDRN, msci + IE1, card); /* TX underrun IRQ */
+ sca_out(sca_in(IER0, card) | (phy_node(port) ? 0x80 : 0x08),
+ IER0, card);
+ /* DMA IRQ enable */
+ sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F),
+ IER1, card);
+#else
+ /* MSCI TX INT and underrrun IRQ enable */
+ sca_outl(IE0_TXINT | IE0_UDRN, msci + IE0, card);
+ /* DMA & MSCI IRQ enable */
+ sca_outl(sca_in(IER0, card) |
+ (phy_node(port) ? 0x02006600 : 0x00020066), IER0, card);
+#endif
+
+#ifdef __HD64570_H
+ sca_out(port->tmc, msci + TMC, card); /* Restore registers */
+#else
+ sca_out(port->tmc, msci + TMCR, card);
+ sca_out(port->tmc, msci + TMCT, card);
+#endif
+ sca_out(port->rxs, msci + RXS, card);
+ sca_out(port->txs, msci + TXS, card);
+ sca_out(CMD_TX_ENABLE, msci + CMD, card);
+ sca_out(CMD_RX_ENABLE, msci + CMD, card);
+}
+
+
+
+#ifdef DEBUG_RINGS
+static void sca_dump_rings(hdlc_device *hdlc)
+{
+ port_t *port = hdlc_to_port(hdlc);
+ card_t *card = port_to_card(port);
+ u16 cnt;
+#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+ u8 page;
+#endif
+
+#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+ page = sca_get_page(card);
+ openwin(card, 0);
+#endif
+
+ printk(KERN_ERR "RX ring: CDA=%u EDA=%u DSR=%02X in=%u "
+ "%sactive",
+ sca_ina(get_dmac_rx(port) + CDAL, card),
+ sca_ina(get_dmac_rx(port) + EDAL, card),
+ sca_in(DSR_RX(phy_node(port)), card),
+ port->rxin,
+ sca_in(DSR_RX(phy_node(port)), card) & DSR_DE?"":"in");
+ for (cnt = 0; cnt<port_to_card(port)->ring_buffers; cnt++)
+ printk(" %02X",
+ readb(&(desc_address(port, cnt, 0)->stat)));
+
+ printk("\n" KERN_ERR "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
+ "last=%u %sactive",
+ sca_ina(get_dmac_tx(port) + CDAL, card),
+ sca_ina(get_dmac_tx(port) + EDAL, card),
+ sca_in(DSR_TX(phy_node(port)), card), port->txin,
+ port->txlast,
+ sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in");
+
+ for (cnt = 0; cnt<port_to_card(port)->ring_buffers; cnt++)
+ printk(" %02X",
+ readb(&(desc_address(port, cnt, 1)->stat)));
+ printk("\n");
+
+ printk(KERN_ERR "MSCI: MD: %02x %02x %02x, "
+ "ST: %02x %02x %02x %02x"
+#ifdef __HD64572_H
+ " %02x"
+#endif
+ ", FST: %02x CST: %02x %02x\n",
+ sca_in(get_msci(port) + MD0, card),
+ sca_in(get_msci(port) + MD1, card),
+ sca_in(get_msci(port) + MD2, card),
+ sca_in(get_msci(port) + ST0, card),
+ sca_in(get_msci(port) + ST1, card),
+ sca_in(get_msci(port) + ST2, card),
+ sca_in(get_msci(port) + ST3, card),
+#ifdef __HD64572_H
+ sca_in(get_msci(port) + ST4, card),
+#endif
+ sca_in(get_msci(port) + FST, card),
+ sca_in(get_msci(port) + CST0, card),
+ sca_in(get_msci(port) + CST1, card));
+
+#ifdef __HD64572_H
+ printk(KERN_ERR "ILAR: %02x\n", sca_in(ILAR, card));
+#endif
+
+#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+ openwin(card, page); /* Restore original page */
+#endif
+}
+#endif /* DEBUG_RINGS */
+
+
+
+static void sca_open(hdlc_device *hdlc)
+{
+ port_t *port = hdlc_to_port(hdlc);
+
+ sca_set_hdlc_mode(port, 1, MD0_CRC_ITU, 0);
+ netif_start_queue(hdlc_to_dev(hdlc));
+}
+
+
+static void sca_close(hdlc_device *hdlc)
+{
+ port_t *port = hdlc_to_port(hdlc);
+
+ /* reset channel */
+ netif_stop_queue(hdlc_to_dev(hdlc));
+ sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
+}
+
+
+
+static int sca_xmit(hdlc_device *hdlc, struct sk_buff *skb)
+{
+ port_t *port = hdlc_to_port(hdlc);
+ struct net_device *dev = hdlc_to_dev(hdlc);
+ card_t *card = port_to_card(port);
+ pkt_desc *desc;
+ u32 buff, len;
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ u8 page;
+ u32 maxlen;
+#endif
+
+ spin_lock_irq(&port->lock);
+
+ desc = desc_address(port, port->txin + 1, 1);
+ if (readb(&desc->stat)) { /* allow 1 packet gap */
+ /* should never happen - previous xmit should stop queue */
+#ifdef DEBUG_PKT
+ printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
+#endif
+ netif_stop_queue(dev);
+ spin_unlock_irq(&port->lock);
+ return 1; /* request packet to be queued */
+ }
+
+#ifdef DEBUG_PKT
+ printk(KERN_DEBUG "%s TX(%i):", hdlc_to_name(hdlc), skb->len);
+ debug_frame(skb);
+#endif
+
+ desc = desc_address(port, port->txin, 1);
+ buff = buffer_offset(port, port->txin, 1);
+ len = skb->len;
+#ifndef ALL_PAGES_ALWAYS_MAPPED
+ page = buff / winsize(card);
+ buff = buff % winsize(card);
+ maxlen = winsize(card) - buff;
+
+ openwin(card, page);
+ if (len > maxlen) {
+ memcpy_toio(winbase(card) + buff, skb->data, maxlen);
+ openwin(card, page + 1);
+ memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
+ }
+ else
+#endif
+ memcpy_toio(winbase(card) + buff, skb->data, len);
+
+#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
+ openwin(card, 0); /* select pkt_desc table page back */
+#endif
+ writew(len, &desc->len);
+ writeb(ST_TX_EOM, &desc->stat);
+ dev->trans_start = jiffies;
+
+ port->txin = next_desc(port, port->txin);
+ sca_outa(desc_offset(port, port->txin, 1),
+ get_dmac_tx(port) + EDAL, card);
+
+ sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */
+
+ desc = desc_address(port, port->txin + 1, 1);
+ if (readb(&desc->stat)) /* allow 1 packet gap */
+ netif_stop_queue(hdlc_to_dev(&port->hdlc));
+
+ spin_unlock_irq(&port->lock);
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+
+static void sca_init(card_t *card, int wait_states)
+{
+ sca_out(wait_states, WCRL, card); /* Wait Control */
+ sca_out(wait_states, WCRM, card);
+ sca_out(wait_states, WCRH, card);
+
+ sca_out(0, DMER, card); /* DMA Master disable */
+ sca_out(0x03, PCR, card); /* DMA priority */
+ sca_out(0, IER1, card); /* DMA interrupt disable */
+ sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
+ sca_out(0, DSR_TX(0), card);
+ sca_out(0, DSR_RX(1), card);
+ sca_out(0, DSR_TX(1), card);
+ sca_out(DMER_DME, DMER, card); /* DMA Master enable */
+}
--- /dev/null
+/*
+ * Generic HDLC support routines for Linux
+ *
+ * Copyright (C) 1999, 2000 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Current status:
+ * - this is work in progress
+ * - not heavily tested on SMP
+ * - currently supported:
+ * * raw IP-in-HDLC
+ * * Cisco HDLC
+ * * Frame Relay with ANSI or CCITT LMI (both user and network side)
+ * * PPP (using syncppp.c)
+ * * X.25
+ *
+ * Use sethdlc utility to set line parameters, protocol and PVCs
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/pkt_sched.h>
+#include <linux/inetdevice.h>
+#include <linux/lapb.h>
+#include <linux/rtnetlink.h>
+#include <linux/hdlc.h>
+
+/* #define DEBUG_PKT */
+/* #define DEBUG_HARD_HEADER */
+/* #define DEBUG_FECN */
+/* #define DEBUG_BECN */
+
+static const char* version = "HDLC support module revision 1.02 for Linux 2.4";
+
+
+#define CISCO_MULTICAST 0x8F /* Cisco multicast address */
+#define CISCO_UNICAST 0x0F /* Cisco unicast address */
+#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
+#define CISCO_SYS_INFO 0x2000 /* Cisco interface/system info */
+#define CISCO_ADDR_REQ 0 /* Cisco address request */
+#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
+#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
+
+static int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+/********************************************************
+ *
+ * Cisco HDLC support
+ *
+ *******************************************************/
+
+static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
+ u16 type, void *daddr, void *saddr,
+ unsigned int len)
+{
+ hdlc_header *data;
+#ifdef DEBUG_HARD_HEADER
+ printk(KERN_DEBUG "%s: cisco_hard_header called\n", dev->name);
+#endif
+
+ skb_push(skb, sizeof(hdlc_header));
+ data = (hdlc_header*)skb->data;
+ if (type == CISCO_KEEPALIVE)
+ data->address = CISCO_MULTICAST;
+ else
+ data->address = CISCO_UNICAST;
+ data->control = 0;
+ data->protocol = htons(type);
+
+ return sizeof(hdlc_header);
+}
+
+
+
+static void cisco_keepalive_send(hdlc_device *hdlc, u32 type,
+ u32 par1, u32 par2)
+{
+ struct sk_buff *skb;
+ cisco_packet *data;
+
+ skb = dev_alloc_skb(sizeof(hdlc_header)+sizeof(cisco_packet));
+ if (!skb) {
+ printk(KERN_WARNING "%s: Memory squeeze on cisco_keepalive_send()\n",
+ hdlc_to_name(hdlc));
+ return;
+ }
+ skb_reserve(skb, 4);
+ cisco_hard_header(skb, hdlc_to_dev(hdlc), CISCO_KEEPALIVE,
+ NULL, NULL, 0);
+ data = (cisco_packet*)skb->tail;
+
+ data->type = htonl(type);
+ data->par1 = htonl(par1);
+ data->par2 = htonl(par2);
+ data->rel = 0xFFFF;
+ data->time = htonl(jiffies * 1000 / HZ);
+
+ skb_put(skb, sizeof(cisco_packet));
+ skb->priority = TC_PRIO_CONTROL;
+ skb->dev = hdlc_to_dev(hdlc);
+
+ dev_queue_xmit(skb);
+}
+
+
+
+static void cisco_netif(hdlc_device *hdlc, struct sk_buff *skb)
+{
+ hdlc_header *data = (hdlc_header*)skb->data;
+ cisco_packet *cisco_data;
+ struct in_device *in_dev;
+ u32 addr, mask;
+
+ if (skb->len<sizeof(hdlc_header))
+ goto rx_error;
+
+ if (data->address != CISCO_MULTICAST &&
+ data->address != CISCO_UNICAST)
+ goto rx_error;
+
+ skb_pull(skb, sizeof(hdlc_header));
+
+ switch(ntohs(data->protocol)) {
+ case ETH_P_IP:
+ case ETH_P_IPX:
+ case ETH_P_IPV6:
+ skb->protocol = data->protocol;
+ skb->dev = hdlc_to_dev(hdlc);
+ netif_rx(skb);
+ return;
+
+ case CISCO_SYS_INFO:
+ /* Packet is not needed, drop it. */
+ dev_kfree_skb_any(skb);
+ return;
+
+ case CISCO_KEEPALIVE:
+ if (skb->len != CISCO_PACKET_LEN &&
+ skb->len != CISCO_BIG_PACKET_LEN) {
+ printk(KERN_INFO "%s: Invalid length of Cisco "
+ "control packet (%d bytes)\n",
+ hdlc_to_name(hdlc), skb->len);
+ goto rx_error;
+ }
+
+ cisco_data = (cisco_packet*)skb->data;
+
+ switch(ntohl (cisco_data->type)) {
+ case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
+ in_dev = hdlc_to_dev(hdlc)->ip_ptr;
+ addr = 0;
+ mask = ~0; /* is the mask correct? */
+
+ if (in_dev != NULL) {
+ struct in_ifaddr **ifap = &in_dev->ifa_list;
+
+ while (*ifap != NULL) {
+ if (strcmp(hdlc_to_name(hdlc),
+ (*ifap)->ifa_label) == 0) {
+ addr = (*ifap)->ifa_local;
+ mask = (*ifap)->ifa_mask;
+ break;
+ }
+ ifap = &(*ifap)->ifa_next;
+ }
+
+ cisco_keepalive_send(hdlc, CISCO_ADDR_REPLY,
+ addr, mask);
+ }
+ dev_kfree_skb_any(skb);
+ return;
+
+ case CISCO_ADDR_REPLY:
+ printk(KERN_INFO "%s: Unexpected Cisco IP address "
+ "reply\n", hdlc_to_name(hdlc));
+ goto rx_error;
+
+ case CISCO_KEEPALIVE_REQ:
+ hdlc->lmi.rxseq = ntohl(cisco_data->par1);
+ if (ntohl(cisco_data->par2) == hdlc->lmi.txseq) {
+ hdlc->lmi.last_poll = jiffies;
+ if (!(hdlc->lmi.state & LINK_STATE_RELIABLE)) {
+ u32 sec, min, hrs, days;
+ sec = ntohl(cisco_data->time) / 1000;
+ min = sec / 60; sec -= min * 60;
+ hrs = min / 60; min -= hrs * 60;
+ days = hrs / 24; hrs -= days * 24;
+ printk(KERN_INFO "%s: Link up (peer "
+ "uptime %ud%uh%um%us)\n",
+ hdlc_to_name(hdlc), days, hrs,
+ min, sec);
+ }
+ hdlc->lmi.state |= LINK_STATE_RELIABLE;
+ }
+
+ dev_kfree_skb_any(skb);
+ return;
+ } /* switch(keepalive type) */
+ } /* switch(protocol) */
+
+ printk(KERN_INFO "%s: Unsupported protocol %x\n", hdlc_to_name(hdlc),
+ data->protocol);
+ dev_kfree_skb_any(skb);
+ return;
+
+ rx_error:
+ hdlc->stats.rx_errors++; /* Mark error */
+ dev_kfree_skb_any(skb);
+}
+
+
+
+static void cisco_timer(unsigned long arg)
+{
+ hdlc_device *hdlc = (hdlc_device*)arg;
+
+ if ((hdlc->lmi.state & LINK_STATE_RELIABLE) &&
+ (jiffies - hdlc->lmi.last_poll >= hdlc->lmi.T392 * HZ)) {
+ hdlc->lmi.state &= ~LINK_STATE_RELIABLE;
+ printk(KERN_INFO "%s: Link down\n", hdlc_to_name(hdlc));
+ }
+
+ cisco_keepalive_send(hdlc, CISCO_KEEPALIVE_REQ, ++hdlc->lmi.txseq,
+ hdlc->lmi.rxseq);
+ hdlc->timer.expires = jiffies + hdlc->lmi.T391*HZ;
+
+ hdlc->timer.function = cisco_timer;
+ hdlc->timer.data = arg;
+ add_timer(&hdlc->timer);
+}
+
+
+
+/******************************************************************
+ *
+ * generic Frame Relay routines
+ *
+ *****************************************************************/
+
+
+static int fr_hard_header(struct sk_buff *skb, struct net_device *dev,
+ u16 type, void *daddr, void *saddr, unsigned int len)
+{
+ u16 head_len;
+
+ if (!daddr)
+ daddr = dev->broadcast;
+
+#ifdef DEBUG_HARD_HEADER
+ printk(KERN_DEBUG "%s: fr_hard_header called\n", dev->name);
+#endif
+
+ switch(type) {
+ case ETH_P_IP:
+ head_len = 4;
+ skb_push(skb, head_len);
+ skb->data[3] = NLPID_IP;
+ break;
+
+ case ETH_P_IPV6:
+ head_len = 4;
+ skb_push(skb, head_len);
+ skb->data[3] = NLPID_IPV6;
+ break;
+
+ case LMI_PROTO:
+ head_len = 4;
+ skb_push(skb, head_len);
+ skb->data[3] = LMI_PROTO;
+ break;
+
+ default:
+ head_len = 10;
+ skb_push(skb, head_len);
+ skb->data[3] = FR_PAD;
+ skb->data[4] = NLPID_SNAP;
+ skb->data[5] = FR_PAD;
+ skb->data[6] = FR_PAD;
+ skb->data[7] = FR_PAD;
+ skb->data[8] = type>>8;
+ skb->data[9] = (u8)type;
+ }
+
+ memcpy(skb->data, daddr, 2);
+ skb->data[2] = FR_UI;
+
+ return head_len;
+}
+
+
+
+static inline void fr_log_dlci_active(pvc_device *pvc)
+{
+ printk(KERN_INFO "%s: %sactive%s\n", pvc_to_name(pvc),
+ pvc->state & PVC_STATE_ACTIVE ? "" : "in",
+ pvc->state & PVC_STATE_NEW ? " new" : "");
+}
+
+
+
+static inline u8 fr_lmi_nextseq(u8 x)
+{
+ x++;
+ return x ? x : 1;
+}
+
+
+
+static void fr_lmi_send(hdlc_device *hdlc, int fullrep)
+{
+ struct sk_buff *skb;
+ pvc_device *pvc = hdlc->first_pvc;
+ int len = mode_is(hdlc, MODE_FR_ANSI) ? LMI_ANSI_LENGTH : LMI_LENGTH;
+ int stat_len = 3;
+ u8 *data;
+ int i = 0;
+
+ if (mode_is(hdlc, MODE_DCE) && fullrep) {
+ len += hdlc->pvc_count * (2 + stat_len);
+ if (len > HDLC_MAX_MTU) {
+ printk(KERN_WARNING "%s: Too many PVCs while sending "
+ "LMI full report\n", hdlc_to_name(hdlc));
+ return;
+ }
+ }
+
+ skb = dev_alloc_skb(len);
+ if (!skb) {
+ printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n",
+ hdlc_to_name(hdlc));
+ return;
+ }
+ memset(skb->data, 0, len);
+ skb_reserve(skb, 4);
+ fr_hard_header(skb, hdlc_to_dev(hdlc), LMI_PROTO, NULL, NULL, 0);
+ data = skb->tail;
+ data[i++] = LMI_CALLREF;
+ data[i++] = mode_is(hdlc, MODE_DCE) ? LMI_STATUS : LMI_STATUS_ENQUIRY;
+ if (mode_is(hdlc, MODE_FR_ANSI))
+ data[i++] = LMI_ANSI_LOCKSHIFT;
+ data[i++] = mode_is(hdlc, MODE_FR_CCITT) ? LMI_CCITT_REPTYPE :
+ LMI_REPTYPE;
+ data[i++] = LMI_REPT_LEN;
+ data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
+
+ data[i++] = mode_is(hdlc, MODE_FR_CCITT) ? LMI_CCITT_ALIVE : LMI_ALIVE;
+ data[i++] = LMI_INTEG_LEN;
+ data[i++] = hdlc->lmi.txseq = fr_lmi_nextseq(hdlc->lmi.txseq);
+ data[i++] = hdlc->lmi.rxseq;
+
+ if (mode_is(hdlc, MODE_DCE) && fullrep) {
+ while (pvc) {
+ data[i++] = mode_is(hdlc, MODE_FR_CCITT) ?
+ LMI_CCITT_PVCSTAT:LMI_PVCSTAT;
+ data[i++] = stat_len;
+
+ if ((hdlc->lmi.state & LINK_STATE_RELIABLE) &&
+ (pvc->netdev.flags & IFF_UP) &&
+ !(pvc->state & (PVC_STATE_ACTIVE|PVC_STATE_NEW))) {
+ pvc->state |= PVC_STATE_NEW;
+ fr_log_dlci_active(pvc);
+ }
+
+ dlci_to_status(hdlc, netdev_dlci(&pvc->netdev),
+ data+i, pvc->state);
+ i += stat_len;
+ pvc = pvc->next;
+ }
+ }
+
+ skb_put(skb, i);
+ skb->priority = TC_PRIO_CONTROL;
+ skb->dev = hdlc_to_dev(hdlc);
+
+ dev_queue_xmit(skb);
+}
+
+
+
+static void fr_timer(unsigned long arg)
+{
+ hdlc_device *hdlc = (hdlc_device*)arg;
+ int i, cnt = 0, reliable;
+ u32 list;
+
+ if (mode_is(hdlc, MODE_DCE))
+ reliable = (jiffies - hdlc->lmi.last_poll < hdlc->lmi.T392*HZ);
+ else {
+ hdlc->lmi.last_errors <<= 1; /* Shift the list */
+ if (hdlc->lmi.state & LINK_STATE_REQUEST) {
+ printk(KERN_INFO "%s: No LMI status reply received\n",
+ hdlc_to_name(hdlc));
+ hdlc->lmi.last_errors |= 1;
+ }
+
+ for (i = 0, list = hdlc->lmi.last_errors; i < hdlc->lmi.N393;
+ i++, list >>= 1)
+ cnt += (list & 1); /* errors count */
+
+ reliable = (cnt < hdlc->lmi.N392);
+ }
+
+ if ((hdlc->lmi.state & LINK_STATE_RELIABLE) !=
+ (reliable ? LINK_STATE_RELIABLE : 0)) {
+ pvc_device *pvc = hdlc->first_pvc;
+
+ while (pvc) {/* Deactivate all PVCs */
+ pvc->state &= ~(PVC_STATE_NEW | PVC_STATE_ACTIVE);
+ pvc = pvc->next;
+ }
+
+ hdlc->lmi.state ^= LINK_STATE_RELIABLE;
+ printk(KERN_INFO "%s: Link %sreliable\n", hdlc_to_name(hdlc),
+ reliable ? "" : "un");
+
+ if (reliable) {
+ hdlc->lmi.N391cnt = 0; /* Request full status */
+ hdlc->lmi.state |= LINK_STATE_CHANGED;
+ }
+ }
+
+ if (mode_is(hdlc, MODE_DCE))
+ hdlc->timer.expires = jiffies + hdlc->lmi.T392*HZ;
+ else {
+ if (hdlc->lmi.N391cnt)
+ hdlc->lmi.N391cnt--;
+
+ fr_lmi_send(hdlc, hdlc->lmi.N391cnt == 0);
+
+ hdlc->lmi.state |= LINK_STATE_REQUEST;
+ hdlc->timer.expires = jiffies + hdlc->lmi.T391*HZ;
+ }
+
+ hdlc->timer.function = fr_timer;
+ hdlc->timer.data = arg;
+ add_timer(&hdlc->timer);
+}
+
+
+
+static int fr_lmi_recv(hdlc_device *hdlc, struct sk_buff *skb)
+{
+ int stat_len;
+ pvc_device *pvc;
+ int reptype = -1, error;
+ u8 rxseq, txseq;
+ int i;
+
+ if (skb->len < (mode_is(hdlc, MODE_FR_ANSI) ?
+ LMI_ANSI_LENGTH : LMI_LENGTH)) {
+ printk(KERN_INFO "%s: Short LMI frame\n", hdlc_to_name(hdlc));
+ return 1;
+ }
+
+ if (skb->data[5] != (!mode_is(hdlc, MODE_DCE) ?
+ LMI_STATUS : LMI_STATUS_ENQUIRY)) {
+ printk(KERN_INFO "%s: LMI msgtype=%x, Not LMI status %s\n",
+ hdlc_to_name(hdlc), skb->data[2],
+ mode_is(hdlc, MODE_DCE) ? "enquiry" : "reply");
+ return 1;
+ }
+
+ i = mode_is(hdlc, MODE_FR_ANSI) ? 7 : 6;
+
+ if (skb->data[i] !=
+ (mode_is(hdlc, MODE_FR_CCITT) ? LMI_CCITT_REPTYPE : LMI_REPTYPE)) {
+ printk(KERN_INFO "%s: Not a report type=%x\n",
+ hdlc_to_name(hdlc), skb->data[i]);
+ return 1;
+ }
+ i++;
+
+ i++; /* Skip length field */
+
+ reptype = skb->data[i++];
+
+ if (skb->data[i]!=
+ (mode_is(hdlc, MODE_FR_CCITT) ? LMI_CCITT_ALIVE : LMI_ALIVE)) {
+ printk(KERN_INFO "%s: Unsupported status element=%x\n",
+ hdlc_to_name(hdlc), skb->data[i]);
+ return 1;
+ }
+ i++;
+
+ i++; /* Skip length field */
+
+ hdlc->lmi.rxseq = skb->data[i++]; /* TX sequence from peer */
+ rxseq = skb->data[i++]; /* Should confirm our sequence */
+
+ txseq = hdlc->lmi.txseq;
+
+ if (mode_is(hdlc, MODE_DCE)) {
+ if (reptype != LMI_FULLREP && reptype != LMI_INTEGRITY) {
+ printk(KERN_INFO "%s: Unsupported report type=%x\n",
+ hdlc_to_name(hdlc), reptype);
+ return 1;
+ }
+ }
+
+ error = 0;
+ if (!(hdlc->lmi.state & LINK_STATE_RELIABLE))
+ error = 1;
+
+ if (rxseq == 0 || rxseq != txseq) {
+ hdlc->lmi.N391cnt = 0; /* Ask for full report next time */
+ error = 1;
+ }
+
+ if (mode_is(hdlc, MODE_DCE)) {
+ if ((hdlc->lmi.state & LINK_STATE_FULLREP_SENT) && !error) {
+/* Stop sending full report - the last one has been confirmed by DTE */
+ hdlc->lmi.state &= ~LINK_STATE_FULLREP_SENT;
+ pvc = hdlc->first_pvc;
+ while (pvc) {
+ if (pvc->state & PVC_STATE_NEW) {
+ pvc->state &= ~PVC_STATE_NEW;
+ pvc->state |= PVC_STATE_ACTIVE;
+ fr_log_dlci_active(pvc);
+
+/* Tell DTE that new PVC is now active */
+ hdlc->lmi.state |= LINK_STATE_CHANGED;
+ }
+ pvc = pvc->next;
+ }
+ }
+
+ if (hdlc->lmi.state & LINK_STATE_CHANGED) {
+ reptype = LMI_FULLREP;
+ hdlc->lmi.state |= LINK_STATE_FULLREP_SENT;
+ hdlc->lmi.state &= ~LINK_STATE_CHANGED;
+ }
+
+ fr_lmi_send(hdlc, reptype == LMI_FULLREP ? 1 : 0);
+ return 0;
+ }
+
+ /* DTE */
+
+ if (reptype != LMI_FULLREP || error)
+ return 0;
+
+ stat_len = 3;
+ pvc = hdlc->first_pvc;
+
+ while (pvc) {
+ pvc->newstate = 0;
+ pvc = pvc->next;
+ }
+
+ while (skb->len >= i + 2 + stat_len) {
+ u16 dlci;
+ u8 state = 0;
+
+ if (skb->data[i] != (mode_is(hdlc, MODE_FR_CCITT) ?
+ LMI_CCITT_PVCSTAT : LMI_PVCSTAT)) {
+ printk(KERN_WARNING "%s: Invalid PVCSTAT ID: %x\n",
+ hdlc_to_name(hdlc), skb->data[i]);
+ return 1;
+ }
+ i++;
+
+ if (skb->data[i] != stat_len) {
+ printk(KERN_WARNING "%s: Invalid PVCSTAT length: %x\n",
+ hdlc_to_name(hdlc), skb->data[i]);
+ return 1;
+ }
+ i++;
+
+ dlci = status_to_dlci(hdlc, skb->data+i, &state);
+ pvc = find_pvc(hdlc, dlci);
+
+ if (pvc)
+ pvc->newstate = state;
+ else if (state == PVC_STATE_NEW)
+ printk(KERN_INFO "%s: new PVC available, DLCI=%u\n",
+ hdlc_to_name(hdlc), dlci);
+
+ i += stat_len;
+ }
+
+ pvc = hdlc->first_pvc;
+
+ while (pvc) {
+ if (pvc->newstate == PVC_STATE_NEW)
+ pvc->newstate = PVC_STATE_ACTIVE;
+
+ pvc->newstate |= (pvc->state &
+ ~(PVC_STATE_NEW|PVC_STATE_ACTIVE));
+ if (pvc->state != pvc->newstate) {
+ pvc->state = pvc->newstate;
+ fr_log_dlci_active(pvc);
+ }
+ pvc = pvc->next;
+ }
+
+ /* Next full report after N391 polls */
+ hdlc->lmi.N391cnt = hdlc->lmi.N391;
+
+ return 0;
+}
+
+
+
+static void fr_netif(hdlc_device *hdlc, struct sk_buff *skb)
+{
+ fr_hdr *fh = (fr_hdr*)skb->data;
+ u8 *data = skb->data;
+ u16 dlci;
+ pvc_device *pvc;
+
+ if (skb->len<4 || fh->ea1 || data[2] != FR_UI)
+ goto rx_error;
+
+ dlci = q922_to_dlci(skb->data);
+
+ if (dlci == LMI_DLCI) {
+ if (data[3] == LMI_PROTO) {
+ if (fr_lmi_recv(hdlc, skb))
+ goto rx_error;
+ else {
+ /* No request pending */
+ hdlc->lmi.state &= ~LINK_STATE_REQUEST;
+ hdlc->lmi.last_poll = jiffies;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
+
+ printk(KERN_INFO "%s: Received non-LMI frame with LMI DLCI\n",
+ hdlc_to_name(hdlc));
+ goto rx_error;
+ }
+
+ pvc = find_pvc(hdlc, dlci);
+ if (!pvc) {
+#ifdef DEBUG_PKT
+ printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n",
+ hdlc_to_name(hdlc), dlci);
+#endif
+ goto rx_error;
+ }
+
+ if ((pvc->netdev.flags & IFF_UP) == 0) {
+#ifdef DEBUG_PKT
+ printk(KERN_INFO "%s: PVC for received frame's DLCI %d is down\n",
+ hdlc_to_name(hdlc), dlci);
+#endif
+ goto rx_error;
+ }
+
+ pvc->stats.rx_packets++; /* PVC traffic */
+ pvc->stats.rx_bytes += skb->len;
+
+ if ((pvc->state & PVC_STATE_FECN) != (fh->fecn ? PVC_STATE_FECN : 0)) {
+#ifdef DEBUG_FECN
+ printk(KERN_DEBUG "%s: FECN O%s\n", pvc_to_name(pvc),
+ fh->fecn ? "N" : "FF");
+#endif
+ pvc->state ^= PVC_STATE_FECN;
+ }
+
+ if ((pvc->state & PVC_STATE_BECN) != (fh->becn ? PVC_STATE_BECN : 0)) {
+#ifdef DEBUG_FECN
+ printk(KERN_DEBUG "%s: BECN O%s\n", pvc_to_name(pvc),
+ fh->becn ? "N" : "FF");
+#endif
+ pvc->state ^= PVC_STATE_BECN;
+ }
+
+ if (pvc->state & PVC_STATE_BECN)
+ pvc->stats.rx_compressed++;
+
+ if (data[3] == NLPID_IP) {
+ skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
+ skb->protocol = htons(ETH_P_IP);
+ skb->dev = &pvc->netdev;
+ netif_rx(skb);
+ return;
+ }
+
+
+ if (data[3] == NLPID_IPV6) {
+ skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
+ skb->protocol = htons(ETH_P_IPV6);
+ skb->dev = &pvc->netdev;
+ netif_rx(skb);
+ return;
+ }
+
+ if (data[3] == FR_PAD && data[4] == NLPID_SNAP && data[5] == FR_PAD &&
+ data[6] == FR_PAD && data[7] == FR_PAD &&
+ ((data[8]<<8) | data[9]) == ETH_P_ARP) {
+ skb_pull(skb, 10);
+ skb->protocol = htons(ETH_P_ARP);
+ skb->dev = &pvc->netdev;
+ netif_rx(skb);
+ return;
+ }
+
+ printk(KERN_INFO "%s: Unusupported protocol %x\n",
+ hdlc_to_name(hdlc), data[3]);
+ dev_kfree_skb_any(skb);
+ return;
+
+ rx_error:
+ hdlc->stats.rx_errors++; /* Mark error */
+ dev_kfree_skb_any(skb);
+}
+
+
+
+static void fr_cisco_open(hdlc_device *hdlc)
+{
+ hdlc->lmi.state = LINK_STATE_CHANGED;
+ hdlc->lmi.txseq = hdlc->lmi.rxseq = 0;
+ hdlc->lmi.last_errors = 0xFFFFFFFF;
+ hdlc->lmi.N391cnt = 0;
+
+ init_timer(&hdlc->timer);
+ hdlc->timer.expires = jiffies + HZ; /* First poll after 1 second */
+ hdlc->timer.function = mode_is(hdlc, MODE_FR) ? fr_timer : cisco_timer;
+ hdlc->timer.data = (unsigned long)hdlc;
+ add_timer(&hdlc->timer);
+}
+
+
+
+static void fr_cisco_close(hdlc_device *hdlc)
+{
+ pvc_device *pvc = hdlc->first_pvc;
+
+ del_timer_sync(&hdlc->timer);
+
+ while(pvc) { /* NULL in Cisco mode */
+ dev_close(&pvc->netdev); /* Shutdown all PVCs for this FRAD */
+ pvc = pvc->next;
+ }
+}
+
+
+
+/******************************************************************
+ *
+ * generic HDLC routines
+ *
+ *****************************************************************/
+
+
+
+static int hdlc_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+
+
+/********************************************************
+ *
+ * PVC device routines
+ *
+ *******************************************************/
+
+static int pvc_open(struct net_device *dev)
+{
+ pvc_device *pvc = dev_to_pvc(dev);
+ int result = 0;
+
+ if ((hdlc_to_dev(pvc->master)->flags & IFF_UP) == 0)
+ return -EIO; /* Master must be UP in order to activate PVC */
+
+ memset(&(pvc->stats), 0, sizeof(struct net_device_stats));
+ pvc->state = 0;
+
+ if (!mode_is(pvc->master, MODE_SOFT) && pvc->master->open_pvc)
+ result = pvc->master->open_pvc(pvc);
+ if (result)
+ return result;
+
+ pvc->master->lmi.state |= LINK_STATE_CHANGED;
+ return 0;
+}
+
+
+
+static int pvc_close(struct net_device *dev)
+{
+ pvc_device *pvc = dev_to_pvc(dev);
+ pvc->state = 0;
+
+ if (!mode_is(pvc->master, MODE_SOFT) && pvc->master->close_pvc)
+ pvc->master->close_pvc(pvc);
+
+ pvc->master->lmi.state |= LINK_STATE_CHANGED;
+ return 0;
+}
+
+
+
+static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ pvc_device *pvc = dev_to_pvc(dev);
+
+ if (pvc->state & PVC_STATE_ACTIVE) {
+ skb->dev = hdlc_to_dev(pvc->master);
+ pvc->stats.tx_bytes += skb->len;
+ pvc->stats.tx_packets++;
+ if (pvc->state & PVC_STATE_FECN)
+ pvc->stats.tx_compressed++; /* TX Congestion counter */
+ dev_queue_xmit(skb);
+ } else {
+ pvc->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ }
+
+ return 0;
+}
+
+
+
+static struct net_device_stats *pvc_get_stats(struct net_device *dev)
+{
+ pvc_device *pvc = dev_to_pvc(dev);
+ return &pvc->stats;
+}
+
+
+
+static int pvc_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+
+
+static void destroy_pvc_list(hdlc_device *hdlc)
+{
+ pvc_device *pvc = hdlc->first_pvc;
+ while(pvc) {
+ pvc_device *next = pvc->next;
+ unregister_netdevice(&pvc->netdev);
+ kfree(pvc);
+ pvc = next;
+ }
+
+ hdlc->first_pvc = NULL; /* All PVCs destroyed */
+ hdlc->pvc_count = 0;
+ hdlc->lmi.state |= LINK_STATE_CHANGED;
+}
+
+
+
+/********************************************************
+ *
+ * X.25 protocol support routines
+ *
+ *******************************************************/
+
+#ifdef CONFIG_HDLC_X25
+/* These functions are callbacks called by LAPB layer */
+
+void x25_connect_disconnect(void *token, int reason, int code)
+{
+ hdlc_device *hdlc = token;
+ struct sk_buff *skb;
+ unsigned char *ptr;
+
+ if ((skb = dev_alloc_skb(1)) == NULL) {
+ printk(KERN_ERR "%s: out of memory\n", hdlc_to_name(hdlc));
+ return;
+ }
+
+ ptr = skb_put(skb, 1);
+ *ptr = code;
+
+ skb->dev = hdlc_to_dev(hdlc);
+ skb->protocol = htons(ETH_P_X25);
+ skb->mac.raw = skb->data;
+ skb->pkt_type = PACKET_HOST;
+
+ netif_rx(skb);
+}
+
+void x25_connected(void *token, int reason)
+{
+ x25_connect_disconnect(token, reason, 1);
+}
+
+void x25_disconnected(void *token, int reason)
+{
+ x25_connect_disconnect(token, reason, 2);
+}
+
+
+int x25_data_indication(void *token, struct sk_buff *skb)
+{
+ hdlc_device *hdlc = token;
+ unsigned char *ptr;
+
+ ptr = skb_push(skb, 1);
+ *ptr = 0;
+
+ skb->dev = hdlc_to_dev(hdlc);
+ skb->protocol = htons(ETH_P_X25);
+ skb->mac.raw = skb->data;
+ skb->pkt_type = PACKET_HOST;
+
+ return netif_rx(skb);
+}
+
+
+void x25_data_transmit(void *token, struct sk_buff *skb)
+{
+ hdlc_device *hdlc = token;
+ hdlc->xmit(hdlc, skb); /* Ignore return value :-( */
+}
+#endif /* CONFIG_HDLC_X25 */
+
+
+/********************************************************
+ *
+ * HDLC device routines
+ *
+ *******************************************************/
+
+static int hdlc_open(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+ int result;
+
+ if (hdlc->mode == MODE_NONE)
+ return -ENOSYS;
+
+ memset(&(hdlc->stats), 0, sizeof(struct net_device_stats));
+
+ if (mode_is(hdlc, MODE_FR | MODE_SOFT) ||
+ mode_is(hdlc, MODE_CISCO | MODE_SOFT))
+ fr_cisco_open(hdlc);
+#ifdef CONFIG_HDLC_PPP
+ else if (mode_is(hdlc, MODE_PPP | MODE_SOFT)) {
+ sppp_attach(&hdlc->pppdev);
+ /* sppp_attach nukes them. We don't need syncppp's ioctl */
+ dev->do_ioctl = hdlc_ioctl;
+ hdlc->pppdev.sppp.pp_flags &= ~PP_CISCO;
+ dev->type = ARPHRD_PPP;
+ result = sppp_open(dev);
+ if (result) {
+ sppp_detach(dev);
+ return result;
+ }
+ }
+#endif
+#ifdef CONFIG_HDLC_X25
+ else if (mode_is(hdlc, MODE_X25)) {
+ struct lapb_register_struct cb;
+
+ cb.connect_confirmation = x25_connected;
+ cb.connect_indication = x25_connected;
+ cb.disconnect_confirmation = x25_disconnected;
+ cb.disconnect_indication = x25_disconnected;
+ cb.data_indication = x25_data_indication;
+ cb.data_transmit = x25_data_transmit;
+
+ result = lapb_register(hdlc, &cb);
+ if (result != LAPB_OK)
+ return result;
+ }
+#endif
+ result = hdlc->open(hdlc);
+ if (result) {
+ if (mode_is(hdlc, MODE_FR | MODE_SOFT) ||
+ mode_is(hdlc, MODE_CISCO | MODE_SOFT))
+ fr_cisco_close(hdlc);
+#ifdef CONFIG_HDLC_PPP
+ else if (mode_is(hdlc, MODE_PPP | MODE_SOFT)) {
+ sppp_close(dev);
+ sppp_detach(dev);
+ dev->rebuild_header = NULL;
+ dev->change_mtu = hdlc_change_mtu;
+ dev->mtu = HDLC_MAX_MTU;
+ dev->hard_header_len = 16;
+ }
+#endif
+#ifdef CONFIG_HDLC_X25
+ else if (mode_is(hdlc, MODE_X25))
+ lapb_unregister(hdlc);
+#endif
+ }
+
+ return result;
+}
+
+
+
+static int hdlc_close(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ hdlc->close(hdlc);
+
+ if (mode_is(hdlc, MODE_FR | MODE_SOFT) ||
+ mode_is(hdlc, MODE_CISCO | MODE_SOFT))
+ fr_cisco_close(hdlc);
+#ifdef CONFIG_HDLC_PPP
+ else if (mode_is(hdlc, MODE_PPP | MODE_SOFT)) {
+ sppp_close(dev);
+ sppp_detach(dev);
+ dev->rebuild_header = NULL;
+ dev->change_mtu = hdlc_change_mtu;
+ dev->mtu = HDLC_MAX_MTU;
+ dev->hard_header_len = 16;
+ }
+#endif
+#ifdef CONFIG_HDLC_X25
+ else if (mode_is(hdlc, MODE_X25))
+ lapb_unregister(hdlc);
+#endif
+ return 0;
+}
+
+
+
+static int hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+#ifdef CONFIG_HDLC_X25
+ if (mode_is(hdlc, MODE_X25 | MODE_SOFT)) {
+ int result;
+
+
+ /* X.25 to LAPB */
+ switch (skb->data[0]) {
+ case 0: /* Data to be transmitted */
+ skb_pull(skb, 1);
+ if ((result = lapb_data_request(hdlc, skb)) != LAPB_OK)
+ dev_kfree_skb(skb);
+ return 0;
+
+ case 1:
+ if ((result = lapb_connect_request(hdlc))!= LAPB_OK) {
+ if (result == LAPB_CONNECTED) {
+ /* Send connect confirm. msg to level 3 */
+ x25_connected(hdlc, 0);
+ } else {
+ printk(KERN_ERR "%s: LAPB connect "
+ "request failed, error code = "
+ "%i\n", hdlc_to_name(hdlc),
+ result);
+ }
+ }
+ break;
+
+ case 2:
+ if ((result=lapb_disconnect_request(hdlc))!=LAPB_OK) {
+ if (result == LAPB_NOTCONNECTED) {
+ /* Send disconnect confirm. msg to level 3 */
+ x25_disconnected(hdlc, 0);
+ } else {
+ printk(KERN_ERR "%s: LAPB disconnect "
+ "request failed, error code = "
+ "%i\n", hdlc_to_name(hdlc),
+ result);
+ }
+ }
+ break;
+
+ default: /* to be defined */
+ }
+
+ dev_kfree_skb(skb);
+ return 0;
+ } /* MODE_X25 */
+#endif /* CONFIG_HDLC_X25 */
+
+ return hdlc->xmit(hdlc, skb);
+}
+
+
+
+void hdlc_netif_rx(hdlc_device *hdlc, struct sk_buff *skb)
+{
+/* skb contains raw HDLC frame, in both hard- and software modes */
+ skb->mac.raw = skb->data;
+
+ switch(hdlc->mode & MODE_MASK) {
+ case MODE_HDLC:
+ skb->protocol = htons(ETH_P_IP);
+ skb->dev = hdlc_to_dev(hdlc);
+ netif_rx(skb);
+ return;
+
+ case MODE_FR:
+ fr_netif(hdlc, skb);
+ return;
+
+ case MODE_CISCO:
+ cisco_netif(hdlc, skb);
+ return;
+
+#ifdef CONFIG_HDLC_PPP
+ case MODE_PPP:
+#if 0
+ sppp_input(hdlc_to_dev(hdlc), skb);
+#else
+ skb->protocol = htons(ETH_P_WAN_PPP);
+ skb->dev = hdlc_to_dev(hdlc);
+ netif_rx(skb);
+#endif
+ return;
+#endif
+#ifdef CONFIG_HDLC_X25
+ case MODE_X25:
+ skb->dev = hdlc_to_dev(hdlc);
+ if (lapb_data_received(hdlc, skb) == LAPB_OK)
+ return;
+ break;
+#endif
+ }
+
+ hdlc->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+}
+
+
+
+static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
+{
+ return &dev_to_hdlc(dev)->stats;
+}
+
+
+
+static int hdlc_set_mode(hdlc_device *hdlc, int mode)
+{
+ int result = -1; /* Default to soft modes */
+ struct net_device *dev = hdlc_to_dev(hdlc);
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if(dev->flags & IFF_UP)
+ return -EBUSY;
+
+ dev->addr_len = 0;
+ dev->hard_header = NULL;
+ hdlc->mode = MODE_NONE;
+
+ if (!(mode & MODE_SOFT))
+ switch(mode & MODE_MASK) {
+ case MODE_HDLC:
+ result = hdlc->set_mode ?
+ hdlc->set_mode(hdlc, MODE_HDLC) : 0;
+ break;
+
+ case MODE_CISCO: /* By card */
+#ifdef CONFIG_HDLC_PPP
+ case MODE_PPP:
+#endif
+#ifdef CONFIG_HDLC_X25
+ case MODE_X25:
+#endif
+ case MODE_FR:
+ result = hdlc->set_mode ?
+ hdlc->set_mode(hdlc, mode) : -ENOSYS;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (result) {
+ mode |= MODE_SOFT; /* Try "host software" protocol */
+
+ switch(mode & MODE_MASK) {
+ case MODE_CISCO:
+ dev->hard_header = cisco_hard_header;
+ break;
+
+#ifdef CONFIG_HDLC_PPP
+ case MODE_PPP:
+ break;
+#endif
+#ifdef CONFIG_HDLC_X25
+ case MODE_X25:
+ break;
+#endif
+
+ case MODE_FR:
+ dev->hard_header = fr_hard_header;
+ dev->addr_len = 2;
+ *(u16*)dev->dev_addr = htons(LMI_DLCI);
+ dlci_to_q922(dev->broadcast, LMI_DLCI);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ result = hdlc->set_mode ?
+ hdlc->set_mode(hdlc, MODE_HDLC) : 0;
+ }
+
+ if (result)
+ return result;
+
+ hdlc->mode = mode;
+ switch(mode & MODE_MASK) {
+#ifdef CONFIG_HDLC_PPP
+ case MODE_PPP: dev->type = ARPHRD_PPP; break;
+#endif
+#ifdef CONFIG_HDLC_X25
+ case MODE_X25: dev->type = ARPHRD_X25; break;
+#endif
+ case MODE_FR: dev->type = ARPHRD_FRAD; break;
+ case MODE_CISCO: dev->type = ARPHRD_CISCO; break;
+ default: dev->type = ARPHRD_RAWHDLC;
+ }
+
+ memset(&(hdlc->stats), 0, sizeof(struct net_device_stats));
+ destroy_pvc_list(hdlc);
+ return 0;
+}
+
+
+
+static int hdlc_fr_pvc(hdlc_device *hdlc, int dlci)
+{
+ pvc_device **pvc_p = &hdlc->first_pvc;
+ pvc_device *pvc;
+ int result, create = 1; /* Create or delete PVC */
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if(dlci<0) {
+ dlci = -dlci;
+ create = 0;
+ }
+
+ if(dlci <= 0 || dlci >= 1024)
+ return -EINVAL; /* Only 10 bits for DLCI, DLCI=0 is reserved */
+
+ if(!mode_is(hdlc, MODE_FR))
+ return -EINVAL; /* Only meaningfull on FR */
+
+ while(*pvc_p) {
+ if (netdev_dlci(&(*pvc_p)->netdev) == dlci)
+ break;
+ pvc_p = &(*pvc_p)->next;
+ }
+
+ if (create) { /* Create PVC */
+ if (*pvc_p != NULL)
+ return -EEXIST;
+
+ pvc = *pvc_p = kmalloc(sizeof(pvc_device), GFP_KERNEL);
+ if (!pvc) {
+ printk(KERN_WARNING "%s: Memory squeeze on "
+ "hdlc_fr_pvc()\n", hdlc_to_name(hdlc));
+ return -ENOBUFS;
+ }
+ memset(pvc, 0, sizeof(pvc_device));
+
+ pvc->netdev.hard_start_xmit = pvc_xmit;
+ pvc->netdev.get_stats = pvc_get_stats;
+ pvc->netdev.open = pvc_open;
+ pvc->netdev.stop = pvc_close;
+ pvc->netdev.change_mtu = pvc_change_mtu;
+ pvc->netdev.mtu = HDLC_MAX_MTU;
+
+ pvc->netdev.type = ARPHRD_DLCI;
+ pvc->netdev.hard_header_len = 16;
+ pvc->netdev.hard_header = fr_hard_header;
+ pvc->netdev.tx_queue_len = 0;
+ pvc->netdev.flags = IFF_POINTOPOINT;
+
+ dev_init_buffers(&pvc->netdev);
+
+ pvc->master = hdlc;
+ *(u16*)pvc->netdev.dev_addr = htons(dlci);
+ dlci_to_q922(pvc->netdev.broadcast, dlci);
+ pvc->netdev.addr_len = 2;
+ pvc->netdev.irq = hdlc_to_dev(hdlc)->irq;
+
+ result = dev_alloc_name(&pvc->netdev, "pvc%d");
+ if (result < 0) {
+ kfree(pvc);
+ *pvc_p = NULL;
+ return result;
+ }
+
+ if (register_netdevice(&pvc->netdev) != 0) {
+ kfree(pvc);
+ *pvc_p = NULL;
+ return -EIO;
+ }
+
+ if (!mode_is(hdlc, MODE_SOFT) && hdlc->create_pvc) {
+ result = hdlc->create_pvc(pvc);
+ if (result) {
+ unregister_netdevice(&pvc->netdev);
+ kfree(pvc);
+ *pvc_p = NULL;
+ return result;
+ }
+ }
+
+ hdlc->lmi.state |= LINK_STATE_CHANGED;
+ hdlc->pvc_count++;
+ return 0;
+ }
+
+ if (*pvc_p == NULL) /* Delete PVC */
+ return -ENOENT;
+
+ pvc = *pvc_p;
+
+ if (pvc->netdev.flags & IFF_UP)
+ return -EBUSY; /* PVC in use */
+
+ if (!mode_is(hdlc, MODE_SOFT) && hdlc->destroy_pvc)
+ hdlc->destroy_pvc(pvc);
+
+ hdlc->lmi.state |= LINK_STATE_CHANGED;
+ hdlc->pvc_count--;
+ *pvc_p = pvc->next;
+ unregister_netdevice(&pvc->netdev);
+ kfree(pvc);
+ return 0;
+}
+
+
+
+static int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ switch(cmd) {
+ case HDLCGMODE:
+ ifr->ifr_ifru.ifru_ivalue = hdlc->mode;
+ return 0;
+
+ case HDLCSMODE:
+ return hdlc_set_mode(hdlc, ifr->ifr_ifru.ifru_ivalue);
+
+ case HDLCPVC:
+ return hdlc_fr_pvc(hdlc, ifr->ifr_ifru.ifru_ivalue);
+
+ default:
+ if (hdlc->ioctl != NULL)
+ return hdlc->ioctl(hdlc, ifr, cmd);
+ }
+
+ return -EINVAL;
+}
+
+
+
+static int hdlc_init(struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ memset(&(hdlc->stats), 0, sizeof(struct net_device_stats));
+
+ dev->get_stats = hdlc_get_stats;
+ dev->open = hdlc_open;
+ dev->stop = hdlc_close;
+ dev->hard_start_xmit = hdlc_xmit;
+ dev->do_ioctl = hdlc_ioctl;
+ dev->change_mtu = hdlc_change_mtu;
+ dev->mtu = HDLC_MAX_MTU;
+
+ dev->type = ARPHRD_RAWHDLC;
+ dev->hard_header_len = 16;
+
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+
+ dev_init_buffers(dev);
+ return 0;
+}
+
+
+
+int register_hdlc_device(hdlc_device *hdlc)
+{
+ int result;
+ struct net_device *dev = hdlc_to_dev(hdlc);
+
+ dev->init = hdlc_init;
+ dev->priv = &hdlc->syncppp_ptr;
+ hdlc->syncppp_ptr = &hdlc->pppdev;
+ hdlc->pppdev.dev = dev;
+ hdlc->mode = MODE_NONE;
+ hdlc->lmi.T391 = 10; /* polling verification timer */
+ hdlc->lmi.T392 = 15; /* link integrity verification polling timer */
+ hdlc->lmi.N391 = 6; /* full status polling counter */
+ hdlc->lmi.N392 = 3; /* error threshold */
+ hdlc->lmi.N393 = 4; /* monitored events count */
+
+ result = dev_alloc_name(dev, "hdlc%d");
+ if (result<0)
+ return result;
+
+ result = register_netdev(dev);
+ if (result != 0)
+ return -EIO;
+
+ dev_init_buffers(dev);
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+
+
+void unregister_hdlc_device(hdlc_device *hdlc)
+{
+ destroy_pvc_list(hdlc);
+ unregister_netdev(hdlc_to_dev(hdlc));
+ MOD_DEC_USE_COUNT;
+}
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("HDLC support module");
+
+EXPORT_SYMBOL(hdlc_netif_rx);
+EXPORT_SYMBOL(register_hdlc_device);
+EXPORT_SYMBOL(unregister_hdlc_device);
+
+static int __init hdlc_module_init(void)
+{
+ printk(KERN_INFO "%s\n", version);
+ return 0;
+}
+
+
+module_init(hdlc_module_init);
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
-#include "syncppp.h"
+#include <net/syncppp.h>
#include "z85230.h"
static int dma;
{
/* Drop the CRC - its not a good idea to try and negotiate it ;) */
skb_trim(skb, skb->len-2);
- skb->protocol=htons(ETH_P_WAN_PPP);
+ skb->protocol=__constant_htons(ETH_P_WAN_PPP);
skb->mac.raw=skb->data;
skb->dev=c->netdevice;
/*
* it right now.
*/
netif_rx(skb);
+ c->netdevice->last_rx = jiffies;
}
/*
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include "../syncppp.h"
+#include <net/syncppp.h>
#include <linux/inet.h>
#if LINUX_VERSION_CODE >= 0x20200
#define ARPHRD_HDLC 513
#endif
-#ifdef MODULE
-#ifdef MODVERSIONS
-#include <linux/modversions.h>
-#endif
#include <linux/module.h>
-#else
-#define MOD_INC_USE_COUNT
-#define MOD_DEC_USE_COUNT
-#endif
#define DRIVER_MAJOR_VERSION 1
#define DRIVER_MINOR_VERSION 34
/*
* Most functions mess with the structure
- * Disable interupts while we do the polling
+ * Disable interrupts while we do the polling
*/
spin_lock_irqsave(&sc->lmc_lock, flags);
udelay(50);
/*
- * Clear reset and activate programing lines
+ * Clear reset and activate programming lines
* Reset: Input
* DP: Input
* Clock: Output
sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
break;
default:
- printk(KERN_WARNING "%s Bad data in xilinx programing data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
+ printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
}
sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
udelay(1);
}
if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
- printk(KERN_WARNING "%s: Reprograming FAILED. Needs to be reprogramed. (corrupted data)\n", dev->name);
+ printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
}
else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
- printk(KERN_WARNING "%s: Reprograming FAILED. Needs to be reprogramed. (done)\n", dev->name);
+ printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
}
else {
- printk(KERN_DEBUG "%s: Done reprograming Xilinx, %d bits, good luck!\n", dev->name, pos);
+ printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
}
lmc_gpio_mkinput(sc, 0xff);
if(sc->check != 0xBEAFCAFE){
printk("LMC: Corrupt net_device stuct, breaking out\n");
+ spin_unlock_irqrestore(&sc->lmc_lock, flags);
return;
}
/* Make sure the tx jabber and rx watchdog are off,
- * and the transmit and recieve processes are running.
+ * and the transmit and receive processes are running.
*/
LMC_CSR_WRITE (sc, csr_15, 0x00000011);
if(sc->failed_recv_alloc == 1){
/*
* We failed to alloc mem in the
- * interupt halder, go through the rings
+ * interrupt handler, go through the rings
* and rebuild them
*/
sc->failed_recv_alloc = 0;
/* Stop Tx and Rx on the chip */
csr6 = LMC_CSR_READ (sc, csr_command);
csr6 &= ~LMC_DEC_ST; /* Turn off the Transmission bit */
- csr6 &= ~LMC_DEC_SR; /* Turn off the Recieve bit */
+ csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
LMC_CSR_WRITE (sc, csr_command, csr6);
dev->flags &= ~IFF_RUNNING;
spin_lock(&sc->lmc_lock);
/*
- * Read the csr to find what interupts we have (if any)
+ * Read the csr to find what interrupts we have (if any)
*/
csr = LMC_CSR_READ (sc, csr_status);
/* always go through this loop at least once */
while (csr & sc->lmc_intrmask) {
/*
- * Clear interupt bits, we handle all case below
+ * Clear interrupt bits, we handle all case below
*/
LMC_CSR_WRITE (sc, csr_status, csr);
}
if (csr & TULIP_STS_RXINTR){
- lmc_trace(dev, "rx interupt");
+ lmc_trace(dev, "rx interrupt");
lmc_rx (dev);
}
/*
* Get current csr status to make sure
- * we've cleared all interupts
+ * we've cleared all interrupts
*/
csr = LMC_CSR_READ (sc, csr_status);
} /* end interrupt loop */
spin_unlock(&sc->lmc_lock);
lmc_trace(dev, "lmc_interrupt out");
-
- return;
}
static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00*/
sc->lmc_rxq[i] = nsb;
nsb->dev = dev;
sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail);
- /* Transfered to 21140 below */
+ /* Transferred to 21140 below */
}
else {
/*
* We've run out of memory, stop trying to allocate
- * memory and exit the interupt handler
+ * memory and exit the interrupt handler
*
* The chip may run out of receivers and stop
* in which care we'll try to allocate the buffer
lmc_trace(sc->lmc_device, "lmc_softreset in");
- /* Initialize the recieve rings and buffers. */
+ /* Initialize the receive rings and buffers. */
sc->lmc_txfull = 0;
sc->lmc_next_rx = 0;
sc->lmc_next_tx = 0;
/*
* Setup each one of the receiver buffers
- * allocate an skbuff for each one, setup the the descriptor table
+ * allocate an skbuff for each one, setup the descriptor table
* and point each buffer at the next one
*/
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include "../syncppp.h"
+#include <net/syncppp.h>
#include <linux/inet.h>
#if LINUX_VERSION_CODE >= 0x20200
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include "../syncppp.h"
+#include <net/syncppp.h>
#include <linux/inet.h>
#include <linux/tqueue.h>
#include <linux/proc_fs.h>
* compiled without referencing any of the sync ppp routines.
*/
#ifdef SPPPSTUB
-#define SYNC_PPP_init() (void)0
#define SPPP_detach(d) (void)0
#define SPPP_open(d) 0
#define SPPP_reopen(d) (void)0
#define SPPP_do_ioctl(d,i,c) -EOPNOTSUPP
#else
#if LINUX_VERSION_CODE < 0x20363
-#define SYNC_PPP_init sync_ppp_init
#define SPPP_attach(x) sppp_attach((struct ppp_device *)(x)->lmc_device)
#define SPPP_detach(x) sppp_detach((x)->lmc_device)
#define SPPP_open(x) sppp_open((x)->lmc_device)
#define SPPP_close(x) sppp_close((x)->lmc_device)
#define SPPP_do_ioctl(x, y, z) sppp_do_ioctl((x)->lmc_device, (y), (z))
#else
-#define SYNC_PPP_init sync_ppp_init
#define SPPP_attach(x) sppp_attach((x)->pd)
#define SPPP_detach(x) sppp_detach((x)->pd->dev)
#define SPPP_open(x) sppp_open((x)->pd->dev)
#endif
#endif
-static int lmc_first_ppp_load = 0;
-
// init
void lmc_proto_init(lmc_softc_t *sc) /*FOLD00*/
{
lmc_trace(sc->lmc_device, "lmc_proto_init in");
switch(sc->if_type){
case LMC_PPP:
- if(lmc_first_ppp_load == 0)
-#ifndef MODULE
- SYNC_PPP_init();
-#endif
#if LINUX_VERSION_CODE >= 0x20363
sc->pd = kmalloc(sizeof(struct ppp_device), GFP_KERNEL);
+ if (!sc->pd) {
+ printk("lmc_proto_init(): kmalloc failure!\n");
+ return;
+ }
sc->pd->dev = sc->lmc_device;
#endif
sc->if_ptr = sc->pd;
#define TULIP_STS_NORMALINTR 0x00010000L /* (RW) Normal Interrupt */
#define TULIP_STS_ABNRMLINTR 0x00008000L /* (RW) Abnormal Interrupt */
-#define TULIP_STS_ERI 0x00004000L /* (RW) Early Receive Interupt */
+#define TULIP_STS_ERI 0x00004000L /* (RW) Early Receive Interrupt */
#define TULIP_STS_SYSERROR 0x00002000L /* (RW) System Error */
#define TULIP_STS_GTE 0x00000800L /* (RW) General Pupose Timer Exp */
-#define TULIP_STS_ETI 0x00000400L /* (RW) Early Transmit Interupt */
+#define TULIP_STS_ETI 0x00000400L /* (RW) Early Transmit Interrupt */
#define TULIP_STS_RXWT 0x00000200L /* (RW) Receiver Watchdog Timeout */
#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receiver Process Stopped */
#define TULIP_STS_RXNOBUF 0x00000080L /* (RW) Receive Buf Unavail */
--- /dev/null
+/*
+ * SDL Inc. RISCom/N2 synchronous serial card driver for Linux
+ *
+ * Copyright (C) 1998-2000 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * For information see http://hq.pm.waw.pl/hdlc/
+ *
+ * Note: integrated CSU/DSU/DDS are not supported by this driver
+ *
+ * Sources of information:
+ * Hitachi HD64570 SCA User's Manual
+ * SDL Inc. PPP/HDLC/CISCO driver
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc.h>
+#include <asm/io.h>
+#include "hd64570.h"
+
+#define DEBUG_RINGS
+/* #define DEBUG_PKT */
+
+static const char* version = "SDL RISCom/N2 driver revision: 1.02 for Linux 2.4";
+static const char* devname = "RISCom/N2";
+
+#define USE_WINDOWSIZE 16384
+#define USE_BUS16BITS 1
+#define CLOCK_BASE 9830400 /* 9.8304 MHz */
+
+#define N2_IOPORTS 0x10
+
+static char *hw = NULL; /* pointer to hw=xxx command line string */
+
+/* RISCom/N2 Board Registers */
+
+/* PC Control Register */
+#define N2_PCR 0
+#define PCR_RUNSCA 1 /* Run 64570 */
+#define PCR_VPM 2 /* Enable VPM - needed if using RAM above 1 MB */
+#define PCR_ENWIN 4 /* Open window */
+#define PCR_BUS16 8 /* 16-bit bus */
+
+
+/* Memory Base Address Register */
+#define N2_BAR 2
+
+
+/* Page Scan Register */
+#define N2_PSR 4
+#define WIN16K 0x00
+#define WIN32K 0x20
+#define WIN64K 0x40
+#define PSR_WINBITS 0x60
+#define PSR_DMAEN 0x80
+#define PSR_PAGEBITS 0x0F
+
+
+/* Modem Control Reg */
+#define N2_MCR 6
+#define CLOCK_OUT_PORT1 0x80
+#define CLOCK_OUT_PORT0 0x40
+#define TX422_PORT1 0x20
+#define TX422_PORT0 0x10
+#define DSR_PORT1 0x08
+#define DSR_PORT0 0x04
+#define DTR_PORT1 0x02
+#define DTR_PORT0 0x01
+
+
+typedef struct port_s {
+ hdlc_device hdlc; /* HDLC device struct - must be first */
+ struct card_s *card;
+ spinlock_t lock; /* TX lock */
+ int clkmode; /* clock mode */
+ int clkrate; /* clock rate */
+ int line; /* loopback only */
+ u8 rxs, txs, tmc; /* SCA registers */
+ u8 valid; /* port enabled */
+ u8 phy_node; /* physical port # - 0 or 1 */
+ u8 log_node; /* logical port # */
+ u8 rxin; /* rx ring buffer 'in' pointer */
+ u8 txin; /* tx ring buffer 'in' and 'last' pointers */
+ u8 txlast;
+ u8 rxpart; /* partial frame received, next frame invalid*/
+}port_t;
+
+
+
+typedef struct card_s {
+ u8 *winbase; /* ISA window base address */
+ u32 phy_winbase; /* ISA physical base address */
+ u32 ram_size; /* number of bytes */
+ u16 io; /* IO Base address */
+ u16 buff_offset; /* offset of first buffer of first channel */
+ u8 irq; /* IRQ (3-15) */
+ u8 ring_buffers; /* number of buffers in a ring */
+
+ port_t ports[2];
+ struct card_s *next_card;
+}card_t;
+
+
+
+#define sca_reg(reg, card) (0x8000 | (card)->io | \
+ ((reg) & 0x0F) | (((reg) & 0xF0) << 6))
+#define sca_in(reg, card) inb(sca_reg(reg, card))
+#define sca_out(value, reg, card) outb(value, sca_reg(reg, card))
+#define sca_inw(reg, card) inw(sca_reg(reg, card))
+#define sca_outw(value, reg, card) outw(value, sca_reg(reg, card))
+
+#define port_to_card(port) ((port)->card)
+#define log_node(port) ((port)->log_node)
+#define phy_node(port) ((port)->phy_node)
+#define winsize(card) (USE_WINDOWSIZE)
+#define winbase(card) ((card)->winbase)
+#define get_port(card, port) ((card)->ports[port].valid ? \
+ &(card)->ports[port] : NULL)
+
+
+
+static __inline__ u8 sca_get_page(card_t *card)
+{
+ return inb(card->io + N2_PSR) & PSR_PAGEBITS;
+}
+
+
+static __inline__ void openwin(card_t *card, u8 page)
+{
+ u8 psr = inb(card->io + N2_PSR);
+ outb((psr & ~PSR_PAGEBITS) | page, card->io + N2_PSR);
+}
+
+
+static __inline__ void close_windows(card_t *card)
+{
+ outb(inb(card->io + N2_PCR) & ~PCR_ENWIN, card->io + N2_PCR);
+}
+
+
+#include "hd6457x.c"
+
+
+
+static int n2_set_clock(port_t *port, int value)
+{
+ card_t *card = port->card;
+ int io = card->io;
+ u8 mcr = inb(io + N2_MCR);
+ u8 msci = get_msci(port);
+ u8 rxs = port->rxs & CLK_BRG_MASK;
+ u8 txs = port->txs & CLK_BRG_MASK;
+
+ switch(value) {
+ case CLOCK_EXT:
+ mcr &= port->phy_node ? ~CLOCK_OUT_PORT1 : ~CLOCK_OUT_PORT0;
+ rxs |= CLK_LINE_RX; /* RXC input */
+ txs |= CLK_LINE_TX; /* TXC input */
+ break;
+
+ case CLOCK_INT:
+ mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
+ rxs |= CLK_BRG_RX; /* BRG output */
+ txs |= CLK_RXCLK_TX; /* RX clock */
+ break;
+
+ case CLOCK_TXINT:
+ mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
+ rxs |= CLK_LINE_RX; /* RXC input */
+ txs |= CLK_BRG_TX; /* BRG output */
+ break;
+
+ case CLOCK_TXFROMRX:
+ mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
+ rxs |= CLK_LINE_RX; /* RXC input */
+ txs |= CLK_RXCLK_TX; /* RX clock */
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ outb(mcr, io + N2_MCR);
+ port->rxs = rxs;
+ port->txs = txs;
+ sca_out(rxs, msci + RXS, card);
+ sca_out(txs, msci + TXS, card);
+ port->clkmode = value;
+ return 0;
+}
+
+
+
+static int n2_open(hdlc_device *hdlc)
+{
+ port_t *port = hdlc_to_port(hdlc);
+ int io = port->card->io;
+ u8 mcr = inb(io + N2_MCR) | (port->phy_node ? TX422_PORT1 : TX422_PORT0);
+
+ MOD_INC_USE_COUNT;
+ mcr &= port->phy_node ? ~DTR_PORT1 : ~DTR_PORT0; /* set DTR ON */
+ outb(mcr, io + N2_MCR);
+
+ outb(inb(io + N2_PCR) | PCR_ENWIN, io + N2_PCR); /* open window */
+ outb(inb(io + N2_PSR) | PSR_DMAEN, io + N2_PSR); /* enable dma */
+ sca_open(hdlc);
+ n2_set_clock(port, port->clkmode);
+ return 0;
+}
+
+
+
+static void n2_close(hdlc_device *hdlc)
+{
+ port_t *port = hdlc_to_port(hdlc);
+ int io = port->card->io;
+ u8 mcr = inb(io+N2_MCR) | (port->phy_node ? TX422_PORT1 : TX422_PORT0);
+
+ sca_close(hdlc);
+ mcr |= port->phy_node ? DTR_PORT1 : DTR_PORT0; /* set DTR OFF */
+ outb(mcr, io + N2_MCR);
+ MOD_DEC_USE_COUNT;
+}
+
+
+
+static int n2_ioctl(hdlc_device *hdlc, struct ifreq *ifr, int cmd)
+{
+ int value = ifr->ifr_ifru.ifru_ivalue;
+ int result = 0;
+ port_t *port = hdlc_to_port(hdlc);
+
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch(cmd) {
+ case HDLCSCLOCK:
+ result = n2_set_clock(port, value);
+ case HDLCGCLOCK:
+ value = port->clkmode;
+ break;
+
+ case HDLCSCLOCKRATE:
+ port->clkrate = value;
+ sca_set_clock(port);
+ case HDLCGCLOCKRATE:
+ value = port->clkrate;
+ break;
+
+ case HDLCSLINE:
+ result = sca_set_loopback(port, value);
+ case HDLCGLINE:
+ value = port->line;
+ break;
+
+#ifdef DEBUG_RINGS
+ case HDLCRUN:
+ sca_dump_rings(hdlc);
+ return 0;
+#endif /* DEBUG_RINGS */
+
+ default:
+ return -EINVAL;
+ }
+
+ ifr->ifr_ifru.ifru_ivalue = value;
+ return result;
+}
+
+
+
+static u8 n2_count_page(card_t *card)
+{
+ u8 page;
+ int i, bcount = USE_WINDOWSIZE, wcount = USE_WINDOWSIZE/2;
+ u16 *dp = (u16*)card->winbase;
+ u8 *bp = (u8*)card->winbase;
+ u8 psr = inb(card->io + N2_PSR) & PSR_WINBITS;
+
+
+ for (page = 0; page < 16; page++) {
+ outb(psr | page, card->io + N2_PSR); /* select a page */
+ writeb(page, dp);
+ if (readb(dp) != page)
+ break; /* If can't read back, no good memory */
+
+ outb(psr, card->io + N2_PSR); /* goto page 0 */
+ if (readb(dp))
+ break; /* If page 0 changed, then wrapped around */
+
+ outb(psr | page, card->io + N2_PSR); /* select page again */
+
+ /* first do byte tests */
+ for (i = 0; i < bcount; i++)
+ writeb(i, bp + i);
+ for (i = 0; i < bcount; i++)
+ if (readb(bp + i) != (i & 0xff))
+ return 0;
+
+ for (i = 0; i < bcount; i++)
+ writeb(~i, bp + i);
+ for (i = 0; i < bcount; i++)
+ if (readb(bp + i) != (~i & 0xff))
+ return 0;
+
+ /* next do 16-bit tests */
+ for (i = 0; i < wcount; i++)
+ writew(0x55AA, dp + i);
+ for (i = 0; i < wcount; i++)
+ if (readw(dp + i) != 0x55AA)
+ return 0;
+
+ for (i = 0; i < wcount; i++)
+ writew(0xAA55, dp + i);
+ for (i = 0; i < wcount; i++)
+ if (readw(dp + i) != 0xAA55)
+ return 0;
+
+ for (i = 0; i < wcount; i++)
+ writew(page, dp + i);
+ }
+
+ return page;
+}
+
+
+
+static void n2_destroy_card(card_t *card)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < 2; cnt++)
+ if (card->ports[cnt].card)
+ unregister_hdlc_device(&card->ports[cnt].hdlc);
+
+ if (card->irq)
+ free_irq(card->irq, card);
+
+ if (card->winbase) {
+ iounmap(card->winbase);
+ release_mem_region(card->phy_winbase, USE_WINDOWSIZE);
+ }
+
+ if (card->io)
+ release_region(card->io, N2_IOPORTS);
+ kfree(card);
+}
+
+
+
+static int n2_run(unsigned long io, unsigned long irq, unsigned long winbase,
+ long valid0, long valid1)
+{
+ card_t *card;
+ u8 cnt, pcr;
+
+ if (io < 0x200 || io > 0x3FF || (io % N2_IOPORTS) != 0) {
+ printk(KERN_ERR "n2: invalid I/O port value\n");
+ return -ENODEV;
+ }
+
+ if (irq < 3 || irq > 15 || irq == 6) /* FIXME */ {
+ printk(KERN_ERR "n2: invalid IRQ value\n");
+ return -ENODEV;
+ }
+
+ if (winbase < 0xA0000 || winbase > 0xFFFFF || (winbase & 0xFFF) != 0) {
+ printk(KERN_ERR "n2: invalid RAM value\n");
+ return -ENODEV;
+ }
+
+ card = kmalloc(sizeof(card_t), GFP_KERNEL);
+ if (card == NULL) {
+ printk(KERN_ERR "n2: unable to allocate memory\n");
+ return -ENOBUFS;
+ }
+ memset(card, 0, sizeof(card_t));
+
+ if (!request_region(io, N2_IOPORTS, devname)) {
+ printk(KERN_ERR "n2: I/O port region in use\n");
+ n2_destroy_card(card);
+ return -EBUSY;
+ }
+ card->io = io;
+
+ if (request_irq(irq, &sca_intr, 0, devname, card)) {
+ printk(KERN_ERR "n2: could not allocate IRQ\n");
+ n2_destroy_card(card);
+ return(-EBUSY);
+ }
+ card->irq = irq;
+
+ if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) {
+ printk(KERN_ERR "n2: could not request RAM window\n");
+ n2_destroy_card(card);
+ return(-EBUSY);
+ }
+ card->phy_winbase = winbase;
+ card->winbase = ioremap(winbase, USE_WINDOWSIZE);
+
+ outb(0, io + N2_PCR);
+ outb(winbase >> 12, io + N2_BAR);
+
+ switch (USE_WINDOWSIZE) {
+ case 16384:
+ outb(WIN16K, io + N2_PSR);
+ break;
+
+ case 32768:
+ outb(WIN32K, io + N2_PSR);
+ break;
+
+ case 65536:
+ outb(WIN64K, io + N2_PSR);
+ break;
+
+ default:
+ printk(KERN_ERR "n2: invalid window size\n");
+ n2_destroy_card(card);
+ return -ENODEV;
+ }
+
+ pcr = PCR_ENWIN | PCR_VPM | (USE_BUS16BITS ? PCR_BUS16 : 0);
+ outb(pcr, io + N2_PCR);
+
+ cnt = n2_count_page(card);
+ if (!cnt) {
+ printk(KERN_ERR "n2: memory test failed.\n");
+ n2_destroy_card(card);
+ return -EIO;
+ }
+
+ card->ram_size = cnt * USE_WINDOWSIZE;
+
+ /* 4 rings required for 2 ports, 2 rings for one port */
+ card->ring_buffers = card->ram_size /
+ ((valid0 + valid1) * 2 * (sizeof(pkt_desc) + HDLC_MAX_MRU));
+
+ card->buff_offset = (valid0 + valid1) * 2 * (sizeof(pkt_desc))
+ * card->ring_buffers;
+
+ printk(KERN_DEBUG "n2: RISCom/N2 %u KB RAM, IRQ%u, "
+ "using %u packets rings\n", card->ram_size / 1024, card->irq,
+ card->ring_buffers);
+
+ pcr |= PCR_RUNSCA; /* run SCA */
+ outb(pcr, io + N2_PCR);
+ outb(0, io + N2_MCR);
+
+ sca_init(card, 0);
+ for (cnt = 0; cnt < 2; cnt++) {
+ port_t *port = &card->ports[cnt];
+
+ if ((cnt == 0 && !valid0) || (cnt == 1 && !valid1))
+ continue;
+
+ port->phy_node = cnt;
+ port->valid = 1;
+
+ if ((cnt == 1) && valid0)
+ port->log_node = 1;
+
+ spin_lock_init(&port->lock);
+ hdlc_to_dev(&port->hdlc)->irq = irq;
+ hdlc_to_dev(&port->hdlc)->mem_start = winbase;
+ hdlc_to_dev(&port->hdlc)->mem_end = winbase + USE_WINDOWSIZE-1;
+ hdlc_to_dev(&port->hdlc)->tx_queue_len = 50;
+ port->hdlc.ioctl = n2_ioctl;
+ port->hdlc.open = n2_open;
+ port->hdlc.close = n2_close;
+ port->hdlc.xmit = sca_xmit;
+
+ if (register_hdlc_device(&port->hdlc)) {
+ printk(KERN_WARNING "n2: unable to register hdlc "
+ "device\n");
+ n2_destroy_card(card);
+ return -ENOBUFS;
+ }
+ port->card = card;
+ sca_init_sync_port(port); /* Set up SCA memory */
+
+ printk(KERN_INFO "%s: RISCom/N2 node %d\n",
+ hdlc_to_name(&port->hdlc), port->phy_node);
+ }
+
+ *new_card = card;
+ new_card = &card->next_card;
+
+ return 0;
+}
+
+
+
+static int __init n2_init(void)
+{
+ if (hw==NULL) {
+#ifdef MODULE
+ printk(KERN_INFO "n2: no card initialized\n");
+#endif
+ return -ENOSYS; /* no parameters specified, abort */
+ }
+
+ printk(KERN_INFO "%s\n", version);
+
+ do {
+ unsigned long io, irq, ram;
+ long valid[2] = { 0, 0 }; /* Default = both ports disabled */
+
+ io = simple_strtoul(hw, &hw, 0);
+
+ if (*hw++ != ',')
+ break;
+ irq = simple_strtoul(hw, &hw, 0);
+
+ if (*hw++ != ',')
+ break;
+ ram = simple_strtoul(hw, &hw, 0);
+
+ if (*hw++ != ',')
+ break;
+ while(1) {
+ if (*hw == '0' && !valid[0])
+ valid[0] = 1; /* Port 0 enabled */
+ else if (*hw == '1' && !valid[1])
+ valid[1] = 1; /* Port 1 enabled */
+ else
+ break;
+ hw++;
+ }
+
+ if (!valid[0] && !valid[1])
+ break; /* at least one port must be used */
+
+ if (*hw == ':' || *hw == '\x0')
+ n2_run(io, irq, ram, valid[0], valid[1]);
+
+ if (*hw == '\x0')
+ return 0;
+ }while(*hw++ == ':');
+
+ printk(KERN_ERR "n2: invalid hardware parameters\n");
+ return first_card ? 0 : -ENOSYS;
+}
+
+
+#ifndef MODULE
+static int __init n2_setup(char *str)
+{
+ hw = str;
+ return 1;
+}
+
+__setup("n2=", n2_setup);
+#endif
+
+
+static void __exit n2_cleanup(void)
+{
+ card_t *card = first_card;
+
+ while (card) {
+ card_t *ptr = card;
+ card = card->next_card;
+ n2_destroy_card(ptr);
+ }
+}
+
+
+module_init(n2_init);
+module_exit(n2_cleanup);
+
+MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
+MODULE_DESCRIPTION("RISCom/N2 serial port driver");
+MODULE_PARM(hw, "s"); /* hw=io,irq,ram,ports:io,irq,... */
+EXPORT_NO_SYMBOLS;
if(test_and_set_bit(0, (void*)&card->wandev.critical)) {
- printk(KERN_INFO "%s: Critical in if_send: %x\n",
+ printk(KERN_INFO "%s: Critical in if_send: %lx\n",
card->wandev.name,card->wandev.critical);
++card->wandev.stats.tx_dropped;
#ifdef LINUX_2_1
*/
if(card->hw.type != SDLA_S514) {
if(test_and_set_bit(0, (void*)&card->wandev.critical)) {
- printk(KERN_INFO "%s: Critical while in ISR: %x\n",
+ printk(KERN_INFO "%s: Critical while in ISR: %lx\n",
card->devname, card->wandev.critical);
card->in_isr = 0;
return;
++card->statistics.isr_intr_test;
break;
- case FR_INTR_DLC: /* Event interrupt occured */
+ case FR_INTR_DLC: /* Event interrupt occurred */
mbox->cmd.command = FR_READ_STATUS;
mbox->cmd.length = 0;
err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
/*============================================================================
* Receive interrupt handler.
* When a receive interrupt occurs do the following:
- * 1- Find the structure for the dlci that the interrupt occured on
+ * 1- Find the structure for the dlci that the interrupt occurred on
* 2- If it doesn't exist then print appropriate msg and goto step 8.
* 3- If it exist then copy data to a skb.
* 4- If skb contains Sangoma UDP data then process them
if (test_and_set_bit(0, (void*)&card->wandev.critical)) {
- printk(KERN_INFO "%s: Critical in if_send: %x\n",
+ printk(KERN_INFO "%s: Critical in if_send: %lx\n",
card->wandev.name,card->wandev.critical);
dev_kfree_skb(skb);
card->devname);
++card->wandev.stats.rx_dropped;
++ppp_priv_area->rx_intr_stat.rx_intr_no_socket;
-
- dev_kfree_skb(skb);
}
} else {
}
ppp_priv_area->udp_pkt_lgth = 0;
-
- return;
}
/*=============================================================================
card->devname, err);
return err;
}else
- printk (KERN_INFO "%s: PPP Deleting dynamic route %s successfuly\n",
+ printk (KERN_INFO "%s: PPP Deleting dynamic route %s successfully\n",
card->devname, in_ntoa(ip_addr));
*
* Notes:
* 1. When allocating a socket buffer, if M-bit is set then more data is
- * comming and we have to allocate buffer for the maximum IP packet size
+ * coming and we have to allocate buffer for the maximum IP packet size
* expected on this channel.
* 2. If something goes wrong and X.25 packet has to be dropped (e.g. no
* socket buffers available) the whole packet sequence must be discarded.
memcpy(bufptr, rxmb->data, len);
if (qdm & 0x01)
- return; /* more data is comming */
+ return; /* more data is coming */
dev->last_rx = jiffies; /* timestamp */
chan->rx_skb = NULL; /* dequeue packet */
{
wanpipe_set_state(card, WAN_CONNECTED);
x25_set_intr_mode(card, 0x83); /* enable Rx interrupts */
- status->imask &= ~0x2; /* mask Tx interupts */
+ status->imask &= ~0x2; /* mask Tx interrupts */
}
else if ((jiffies - card->state_tick) > CONNECT_TIMEOUT)
disconnect(card);
* Note: All data must be explicitly initialized!!!
*/
+static struct pci_device_id sdladrv_pci_tbl[] __initdata = {
+ { V3_VENDOR_ID, V3_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, sdladrv_pci_tbl);
+
/* private data */
static char modname[] = "sdladrv";
static char fullname[] = "SDLA Support Module";
* o verify user buffer
* o copy adapter memory image to user buffer
*
- * Note: when dumping memory, this routine switches curent dual-port memory
+ * Note: when dumping memory, this routine switches current dual-port memory
* vector, so care must be taken to avoid racing conditions.
*/
static int ioctl_dump (sdla_t* card, sdla_dump_t* u_dump)
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
-#include "syncppp.h"
+#include <net/syncppp.h>
#include "z85230.h"
* it right now.
*/
netif_rx(skb);
+ c->netdevice->last_rx = jiffies;
}
/*
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/sched.h>
+#include <linux/init.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/route.h>
#include <linux/pkt_sched.h>
#include <asm/byteorder.h>
#include <linux/spinlock.h>
-#include "syncppp.h"
+#include <net/syncppp.h>
#define MAXALIVECNT 6 /* max. alive packets */
static int debug = 0;
-MODULE_PARM(debug,"1i");
/*
* Interface down stub
skb->dev=dev;
skb->mac.raw=skb->data;
-
+
if (dev->flags & IFF_RUNNING)
{
/* Count received bytes, add FCS and one flag */
};
-void sync_ppp_init(void)
+
+static int __init sync_ppp_init(void)
{
+ if(debug)
+ debug=PP_DEBUG;
printk(KERN_INFO "Cronyx Ltd, Synchronous PPP and CISCO HDLC (c) 1994\n");
printk(KERN_INFO "Linux port (c) 1998 Building Number Three Ltd & Jan \"Yenya\" Kasprzak.\n");
spin_lock_init(&spppq_lock);
sppp_packet_type.type=htons(ETH_P_WAN_PPP);
dev_add_pack(&sppp_packet_type);
-}
-
-#ifdef MODULE
-
-int init_module(void)
-{
- if(debug)
- debug=PP_DEBUG;
- sync_ppp_init();
return 0;
}
-void cleanup_module(void)
+
+static void __exit sync_ppp_cleanup(void)
{
dev_remove_pack(&sppp_packet_type);
}
-#endif
+module_init(sync_ppp_init);
+module_exit(sync_ppp_cleanup);
+MODULE_PARM(debug,"1i");
+++ /dev/null
-/*
- * Defines for synchronous PPP/Cisco link level subroutines.
- *
- * Copyright (C) 1994 Cronyx Ltd.
- * Author: Serge Vakulenko, <vak@zebub.msk.su>
- *
- * This software is distributed with NO WARRANTIES, not even the implied
- * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * Authors grant any other persons or organizations permission to use
- * or modify this software as long as this message is kept with the software,
- * all derivative works or modified versions.
- *
- * Version 1.7, Wed Jun 7 22:12:02 MSD 1995
- *
- *
- *
- */
-
-#ifndef _SYNCPPP_H_
-#define _SYNCPPP_H_ 1
-
-#ifdef __KERNEL__
-struct slcp {
- u16 state; /* state machine */
- u32 magic; /* local magic number */
- u_char echoid; /* id of last keepalive echo request */
- u_char confid; /* id of last configuration request */
-};
-
-struct sipcp {
- u16 state; /* state machine */
- u_char confid; /* id of last configuration request */
-};
-
-struct sppp
-{
- struct sppp * pp_next; /* next interface in keepalive list */
- u32 pp_flags; /* use Cisco protocol instead of PPP */
- u16 pp_alivecnt; /* keepalive packets counter */
- u16 pp_loopcnt; /* loopback detection counter */
- u32 pp_seq; /* local sequence number */
- u32 pp_rseq; /* remote sequence number */
- struct slcp lcp; /* LCP params */
- struct sipcp ipcp; /* IPCP params */
- u32 ibytes,obytes; /* Bytes in/out */
- u32 ipkts,opkts; /* Packets in/out */
- struct timer_list pp_timer;
- struct net_device *pp_if;
- char pp_link_state; /* Link status */
-};
-
-struct ppp_device
-{
- struct net_device *dev; /* Network device pointer */
- struct sppp sppp; /* Synchronous PPP */
-};
-
-#define sppp_of(dev) \
- (&((struct ppp_device *)(*(unsigned long *)((dev)->priv)))->sppp)
-
-#define PP_KEEPALIVE 0x01 /* use keepalive protocol */
-#define PP_CISCO 0x02 /* use Cisco protocol instead of PPP */
-#define PP_TIMO 0x04 /* cp_timeout routine active */
-#define PP_DEBUG 0x08
-
-#define PPP_MTU 1500 /* max. transmit unit */
-
-#define LCP_STATE_CLOSED 0 /* LCP state: closed (conf-req sent) */
-#define LCP_STATE_ACK_RCVD 1 /* LCP state: conf-ack received */
-#define LCP_STATE_ACK_SENT 2 /* LCP state: conf-ack sent */
-#define LCP_STATE_OPENED 3 /* LCP state: opened */
-
-#define IPCP_STATE_CLOSED 0 /* IPCP state: closed (conf-req sent) */
-#define IPCP_STATE_ACK_RCVD 1 /* IPCP state: conf-ack received */
-#define IPCP_STATE_ACK_SENT 2 /* IPCP state: conf-ack sent */
-#define IPCP_STATE_OPENED 3 /* IPCP state: opened */
-
-#define SPPP_LINK_DOWN 0 /* link down - no keepalive */
-#define SPPP_LINK_UP 1 /* link is up - keepalive ok */
-
-void sppp_attach (struct ppp_device *pd);
-void sppp_detach (struct net_device *dev);
-void sppp_input (struct net_device *dev, struct sk_buff *m);
-int sppp_do_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd);
-struct sk_buff *sppp_dequeue (struct net_device *dev);
-int sppp_isempty (struct net_device *dev);
-void sppp_flush (struct net_device *dev);
-int sppp_open (struct net_device *dev);
-int sppp_reopen (struct net_device *dev);
-int sppp_close (struct net_device *dev);
-void sync_ppp_init (void);
-#endif
-
-#define SPPPIOCCISCO (SIOCDEVPRIVATE)
-#define SPPPIOCPPP (SIOCDEVPRIVATE+1)
-#define SPPPIOCDEBUG (SIOCDEVPRIVATE+2)
-
-#endif /* _SYNCPPP_H_ */
#include <linux/if_arp.h>
#include <linux/delay.h>
#include <linux/ioport.h>
+#include <linux/init.h>
#include <asm/dma.h>
#include <asm/io.h>
#define RT_LOCK
#define RT_UNLOCK
#include <linux/spinlock.h>
+#include <net/syncppp.h>
#include "z85230.h"
-#include "syncppp.h"
static spinlock_t z8530_buffer_lock = SPIN_LOCK_UNLOCKED;
* 5uS delay rule.
*/
-extern __inline__ int z8530_read_port(unsigned long p)
+static inline int z8530_read_port(unsigned long p)
{
u8 r=inb(Z8530_PORT_OF(p));
if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
*/
-extern __inline__ void z8530_write_port(unsigned long p, u8 d)
+static inline void z8530_write_port(unsigned long p, u8 d)
{
outb(d,Z8530_PORT_OF(p));
if(p&Z8530_PORT_SLEEP)
* operation.
*/
-extern inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
+static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
{
u8 r;
unsigned long flags;
* have all the 5uS delays to worry about.
*/
-extern inline u8 read_zsdata(struct z8530_channel *c)
+static inline u8 read_zsdata(struct z8530_channel *c)
{
u8 r;
r=z8530_read_port(c->dataio);
* being fast to access.
*/
-extern inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
+static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
{
unsigned long flags;
save_flags(flags);
* Write directly to the control register on the Z8530
*/
-extern inline void write_zsctrl(struct z8530_channel *c, u8 val)
+static inline void write_zsctrl(struct z8530_channel *c, u8 val)
{
z8530_write_port(c->ctrlio, val);
}
*/
-extern inline void write_zsdata(struct z8530_channel *c, u8 val)
+static inline void write_zsdata(struct z8530_channel *c, u8 val)
{
z8530_write_port(c->dataio, val);
}
* z8530_status - Handle a PIO status exception
* @chan: Z8530 channel to process
*
- * A status event occured in PIO synchronous mode. There are several
+ * A status event occurred in PIO synchronous mode. There are several
* reasons the chip will bother us here. A transmit underrun means we
* failed to feed the chip fast enough and just broke a packet. A DCD
* change is a line up or down. We communicate that back to the protocol
* z8530_dma_status - Handle a DMA status exception
* @chan: Z8530 channel to process
*
- * A status event occured on the Z8530. We receive these for two reasons
+ * A status event occurred on the Z8530. We receive these for two reasons
* when in DMA mode. Firstly if we finished a packet transfer we get one
* and kick the next packet out. Secondly we may see a DCD change and
* have to poke the protocol layer.
* thing can only DMA within a 64K block not across the edges of it.
*/
-extern inline int spans_boundary(struct sk_buff *skb)
+static inline int spans_boundary(struct sk_buff *skb)
{
unsigned long a=(unsigned long)skb->data;
a^=(a+skb->len);
EXPORT_SYMBOL(z8530_get_stats);
-#ifdef MODULE
-
/*
* Module support
*/
-
-int init_module(void)
+static const char banner[] __initdata = KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
+
+static int __init z85230_init_driver(void)
{
- printk(KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n");
+ printk(banner);
return 0;
}
+module_init(z85230_init_driver);
-void cleanup_module(void)
+static void __exit z85230_cleanup_driver(void)
{
}
-
-#endif
+module_exit(z85230_cleanup_driver);
#undef DEBUG_RX_INFO /* header of the received packet */
#undef DEBUG_RX_FAIL /* Normal failure conditions */
#define DEBUG_RX_ERROR /* Unexpected conditions */
-#undef DEBUG_PACKET_DUMP 32 /* Dump packet on the screen. */
+
+#undef DEBUG_PACKET_DUMP /* Dump packet on the screen if defined to 32. */
#undef DEBUG_IOCTL_TRACE /* misc. call by Linux */
#undef DEBUG_IOCTL_INFO /* various debugging info */
#define DEBUG_IOCTL_ERROR /* what's going wrong */
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
long mdio_addr = dev->base_addr + MIICtrl;
int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
int i;
\f
static int netdev_open(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
int i;
static void check_duplex(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
int mii_reg5 = mdio_read(dev, np->phys[0], 5);
int negotiated = mii_reg5 & np->advertising;
int duplex;
static void netdev_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
int next_tick = 10*HZ;
int old_csr6 = np->csr6;
static void init_rxtx_rings(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
int i;
np->rx_head_desc = &np->rx_ring[0];
static void init_registers(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
int i;
static void tx_timeout(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static int alloc_ring(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
static int start_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
unsigned entry;
int len1, len2;
static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
{
struct net_device *dev = (struct net_device *)dev_instance;
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
int work_limit = max_interrupt_work;
for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
int entry = np->cur_rx % RX_RING_SIZE;
int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
static void netdev_error(struct net_device *dev, int intr_status)
{
long ioaddr = dev->base_addr;
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
if (debug > 2)
printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
static struct net_device_stats *get_stats(struct net_device *dev)
{
long ioaddr = dev->base_addr;
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
/* The chip only need report frame silently dropped. */
if (netif_running(dev))
static void set_rx_mode(struct net_device *dev)
{
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
u32 mc_filter[2]; /* Multicast hash filter */
u32 rx_mode;
static int netdev_close(struct net_device *dev)
{
long ioaddr = dev->base_addr;
- struct netdev_private *np = (struct netdev_private *)dev->priv;
+ struct netdev_private *np = dev->priv;
int i;
netif_stop_queue(dev);
np->chip_id = chip_idx;
np->drv_flags = drv_flags;
- if (dev->mem_start && dev->mem_start != ~0)
+ if (dev->mem_start)
option = dev->mem_start;
/* The lower four bits are the media type. */
* Andrea Arcangeli
*/
+#undef DEBUG /* undef for production */
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <asm/parport_gsc.h>
-#undef DEBUG /* undef for production */
-
-#ifdef DEBUG
-#define DPRINTK printk
-#else
-#define DPRINTK(stuff...)
-#endif
+MODULE_AUTHOR("Helge Deller <deller@gmx.de>");
+MODULE_DESCRIPTION("HP-PARISC PC-style parallel port driver");
+MODULE_SUPPORTED_DEVICE("integrated PC-style parallel port");
/*
static void parport_gsc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
- DPRINTK(__FILE__ ": got IRQ\n");
parport_generic_irq(irq, (struct parport *) dev_id, regs);
}
void parport_gsc_write_data(struct parport *p, unsigned char d)
{
- DPRINTK(__FILE__ ": write (0x%02x) %c \n", d, d);
parport_writeb (d, DATA (p));
}
unsigned char parport_gsc_read_data(struct parport *p)
{
-#ifdef DEBUG
unsigned char c = parport_readb (DATA (p));
- DPRINTK(__FILE__ ": read (0x%02x) %c\n", c,c);
return c;
-#else
- return parport_readb (DATA (p));
-#endif
}
void parport_gsc_write_control(struct parport *p, unsigned char d)
/* Take this out when drivers have adapted to the newer interface. */
if (d & 0x20) {
- printk (KERN_DEBUG "%s (%s): use data_reverse for this!\n",
- p->name, p->cad->name);
+ pr_debug("%s (%s): use data_reverse for this!\n",
+ p->name, p->cad->name);
parport_gsc_data_reverse (p);
}
/* Take this out when drivers have adapted to the newer interface. */
if (mask & 0x20) {
- printk (KERN_DEBUG "%s (%s): use data_%s for this!\n",
+ pr_debug("%s (%s): use data_%s for this!\n",
p->name, p->cad->name,
(val & 0x20) ? "reverse" : "forward");
if (val & 0x20)
void parport_gsc_inc_use_count(void)
{
-#ifdef MODULE
MOD_INC_USE_COUNT;
-#endif
}
void parport_gsc_dec_use_count(void)
{
-#ifdef MODULE
MOD_DEC_USE_COUNT;
-#endif
}
struct parport tmp;
struct parport *p = &tmp;
+#if 1
+#warning Take this out when region handling works again, <deller@gmx,de>
+#else
if (check_region(base, 3))
return NULL;
+#endif
priv = kmalloc (sizeof (struct parport_gsc_private), GFP_KERNEL);
if (!priv) {
static int __initdata parport_count;
-static int __init
-parport_init_chip(struct hp_device *d, struct pa_iodc_driver *dri)
+static int __init parport_init_chip(struct hp_device *d, struct pa_iodc_driver *dri)
{
unsigned long port;
int irq;
{ 0 }
};
-int __init
-parport_gsc_init ( void )
+int __init parport_gsc_init(void)
{
parport_count = 0;
register_driver(parport_drivers_for);
- return parport_count;
+ return 0;
}
-/* Exported symbols. */
-EXPORT_NO_SYMBOLS;
-
-#ifdef MODULE
-
-MODULE_AUTHOR("Helge Deller <deller@gmx.de>");
-MODULE_DESCRIPTION("HP-PARISC PC-style parallel port driver");
-MODULE_SUPPORTED_DEVICE("integrated PC-style parallel port");
-
-int init_module(void)
+static int __init parport_gsc_init_module(void)
{
- return !parport_gsc_init ();
+ return !parport_gsc_init();
}
-void cleanup_module(void)
+static void __exit parport_gsc_exit_module(void)
{
struct parport *p = parport_enumerate(), *tmp;
while (p) {
p = tmp;
}
}
-#endif
+
+EXPORT_NO_SYMBOLS;
+
+module_init(parport_gsc_init_module);
+module_exit(parport_gsc_exit_module);
pcibios_set_master(dev);
}
+int
+pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
+{
+ if(! pci_dma_supported(dev, mask))
+ return -EIO;
+
+ dev->dma_mask = mask;
+
+ return 0;
+}
+
+
/*
* Translate the low bits of the PCI base
* to the resource type
EXPORT_SYMBOL(pci_find_slot);
EXPORT_SYMBOL(pci_find_subsys);
EXPORT_SYMBOL(pci_set_master);
+EXPORT_SYMBOL(pci_set_dma_mask);
EXPORT_SYMBOL(pci_set_power_state);
EXPORT_SYMBOL(pci_assign_resource);
EXPORT_SYMBOL(pci_register_driver);
1de1 3904 DC390F Ultra Wide SCSI Controller
0012 53c895a
0020 53c1010 Ultra3 SCSI Adapter
+ 0021 53c1010 66MHz Ultra3 SCSI Adapter
008f 53c875J
1092 8000 FirePort 40 SCSI Controller
1092 8760 FirePort 40 Dual SCSI Host Adapter
109e Brooktree Corporation
0350 Bt848 TV with DMA push
0351 Bt849A Video capture
- 036c Bt879(??) Video Capture
+ 036c Bt879(?) Video Capture
13e9 0070 Win/TV (Video Section)
036e Bt878
0070 13eb WinTV/GO
# 1507 HTEC Ltd
# Commented out because there are no known HTEC chips and 1507 is already
# used by mistake by Motorola (see vendor ID 1057)
-1507 Motorola ?? / HTEC
+1507 Motorola ? / HTEC
0001 MPC105 [Eagle]
0002 MPC106 [Grackle]
0003 MPC8240 [Kahlua]
270b Xantel Corporation
270f Chaintech Computer Co. Ltd
2711 AVID Technology Inc.
-2a15 3D Vision(???)
+2a15 3D Vision(?)
3000 Hansol Electronics Inc.
3142 Post Impression Systems.
3388 Hint Corp
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/timer.h>
+#include <linux/proc_fs.h>
#define IN_CARD_SERVICES
#include <pcmcia/cs_types.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/timer.h>
+#include <linux/proc_fs.h>
#include <asm/irq.h>
#include <asm/io.h>
base, base+num-1);
bad = fail = 0;
step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff);
- for (i = base; i < base+num; i = j + step) {
+ for (i = j = base; i < base+num; i = j + step) {
if (!fail) {
for (j = i; j < base+num; j += step)
if ((check_mem_resource(j, step) == 0) && is_valid(j))
#include <linux/timer.h>
#include <linux/ioport.h>
#include <linux/delay.h>
+#include <linux/proc_fs.h>
#include <pcmcia/version.h>
#include <pcmcia/cs_types.h>
MODULE_PARM_DESC (iucv,
"Specify the userids associated with iucv0-iucv9:\n"
"iucv=userid1,userid2,...,userid10\n");
-#ifdef MODVERSIONS
-#include <linux/modversions.h>
-#endif
#else
#define MOD_INC_USE_COUNT
#define MOD_DEC_USE_COUNT
change_speed = ((port->flags & ASYNC_SPD_MASK) !=
(tmp.flags & ASYNC_SPD_MASK));
- if (!suser()) {
+ if (!capable(CAP_SYS_ADMIN)) {
if ((tmp.close_delay != port->close_delay) ||
(tmp.closing_wait != port->closing_wait) ||
((tmp.flags & ~ASYNC_USR_MASK) !=
-/* $Id: flash.c,v 1.22 2001/02/13 01:17:00 davem Exp $
+/* $Id: flash.c,v 1.23 2001/03/02 06:32:40 davem Exp $
* flash.c: Allow mmap access to the OBP Flash, for OBP updates.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
size_t count, loff_t *ppos)
{
unsigned long p = file->f_pos;
+ int i;
if (count > flash.read_size - p)
count = flash.read_size - p;
- if (copy_to_user(buf, flash.read_base + p, count) < 0)
- return -EFAULT;
+ for (i = 0; i < count; i++) {
+ u8 data = readb(flash.read_base + p + i);
+ if (put_user(data, buf))
+ return -EFAULT;
+ buf++;
+ }
file->f_pos += count;
return count;
vfc_capture_poll(dev);
break;
case DIAGMODE:
- if(suser()) {
+ if(capable(CAP_SYS_ADMIN)) {
vfc_lock_device(dev);
dev->control_reg |= VFC_CONTROL_DIAGMODE;
sbus_writel(dev->control_reg, &dev->regs->control);
+Mon Feb 12 22:30 2001 Gerard Roudier (groudier@club-internet.fr)
+ * version ncr53c8xx-3.4.3
+ - Call pci_enable_device() as AC wants this to be done.
+ - Get both the BAR cookies actual and PCI BAR values.
+ (see Changelog.sym53c8xx rev. 1.7.3 for details)
+ - Merge changes for linux-2.4 that declare the host template
+ in the driver object also when the driver is statically
+ linked with the kernel.
+
+Sun Sep 24 21:30 2000 Gerard Roudier (groudier@club-internet.fr)
+ * version ncr53c8xx-3.4.2
+ - See Changelog.sym53c8xx, driver version 1.7.2.
+
+Wed Jul 26 23:30 2000 Gerard Roudier (groudier@club-internet.fr)
+ * version ncr53c8xx-3.4.1
+ - Provide OpenFirmare path through the proc FS on PPC.
+ - Remove trailing argument #2 from a couple of #undefs.
+
+Sun Jul 09 16:30 2000 Gerard Roudier (groudier@club-internet.fr)
+ * version ncr53c8xx-3.4.0
+ - Remove the PROFILE C and SCRIPTS code.
+ This facility was not this useful and thus was not longer
+ desirable given the increasing complexity of the driver code.
+ - Merges from FreeBSD sym-1.6.2 driver:
+ * Clarify memory barriers needed by the driver for architectures
+ that implement a weak memory ordering.
+ - General cleanup:
+ Move definitions for barriers and IO/MMIO operations to the
+ sym53c8xx_defs.h header files. They are now shared by the
+ both drivers.
+ Use SCSI_NCR_IOMAPPED instead of NCR_IOMAPPED.
+
Thu May 11 12:30 2000 Pam Delaney (pam.delaney@lsil.com)
* revision 3.3b
+Sun Mar 4 18:30 2001 Gerard Roudier (groudier@club-internet.fr)
+ * version sym53c8xx-1.7.3a
+ - Fix an issue in the ncr_int_udc() (unexpected disconnect)
+ handling. If the DSA didn't match a CCB, a bad write to
+ memory could happen.
+
+Mon Feb 12 22:30 2001 Gerard Roudier (groudier@club-internet.fr)
+ * version sym53c8xx-1.7.3
+ - Support for hppa.
+ Tiny patch sent to me by Robert Hirst.
+ - Tiny patch for ia64 sent to me by Pamela Delaney.
+
+Tue Feb 6 13:30 2001 Gerard Roudier (groudier@club-internet.fr)
+ * version sym53c8xx-1.7.3-pre1
+ - Call pci_enable_device() as AC wants this to be done.
+ - Get both the BAR cookies used by CPU and actual PCI BAR
+ values used from SCRIPTS. Recent PCI chips are able to
+ access themselves using internal cycles, but they compare
+ BAR values to destination address to make decision.
+ Earlier chips simply use PCI transactions to access IO
+ registers from SCRIPTS.
+ The bus_dvma_to_mem() interface that reverses the actual
+ PCI BAR value from the BAR cookie is now useless.
+ This point had been discussed at the list and the solution
+ got approved by PCI code maintainer (Martin Mares).
+ - Merge changes for linux-2.4 that declare the host template
+ in the driver object also when the driver is statically
+ linked with the kernel.
+ - Increase SCSI message size up to 12 bytes, given that 8
+ bytes was not enough for the PPR message (fix).
+ - Add field 'maxoffs_st' (max offset for ST data transfers).
+ The C1010 supports offset 62 in DT mode but only 31 in
+ ST mode, to 2 different values for the max SCSI offset
+ are needed. Replace the obviously wrong masking of the
+ offset against 0x1f for ST mode by a lowering to
+ maxoffs_st of the SCSI offset in ST mode.
+ - Refine a work-around for the C1010-66. Revision 1 does
+ not requires extra cycles in DT DATA OUT phase.
+ - Add a missing endian-ization (abrt_tbl.addr).
+ - Minor clean-up in the np structure for fields accessed
+ from SCRIPTS that requires special alignments.
+
+Sun Sep 24 21:30 2000 Gerard Roudier (groudier@club-internet.fr)
+ * version sym53c8xx-1.7.2
+ - Remove the hack for PPC added in previous driver version.
+ - Add FE_DAC feature bit to distinguish between 64 bit PCI
+ addressing (FE_DAC) and 64 bit PCI interface (FE_64BIT).
+ - Get rid of the boot command line "ultra:" argument.
+ This parameter wasn't that clever since we can use "sync:"
+ for Ultra/Ultra2 settings, and for Ultra3 we may want to
+ pass PPR options (for now only DT clocking).
+ - Add FE_VARCLK feature bit that indicates that SCSI clock
+ frequency may vary depending on board design and thus,
+ the driver should try to evaluate the SCSI clock.
+ - Simplify the way the driver determine the SCSI clock:
+ ULTRA3 -> 160 MHz, ULTRA2 -> 80 MHz otherwise 40 MHz.
+ Measure the SCSI clock frequency if FE_VARCLK is set.
+ - Remove FE_CLK80 feature bit that got useless.
+ - Add support for the SYM53C875A (Pamela Delaney).
+
+Wed Jul 26 23:30 2000 Gerard Roudier (groudier@club-internet.fr)
+ * version sym53c8xx-1.7.1
+ - Provide OpenFirmare path through the proc FS on PPC.
+ - Download of on-chip SRAM using memcpy_toio() doesn't work
+ on PPC. Restore previous method (MEMORY MOVE from SCRIPTS).
+ - Remove trailing argument #2 from a couple of #undefs.
+
+Sun Jul 09 16:30 2000 Gerard Roudier (groudier@club-internet.fr)
+ * version sym53c8xx-1.7.0
+ - Remove the PROFILE C and SCRIPTS code.
+ This facility was not this useful and thus was not longer
+ desirable given the increasing complexity of the driver code.
+ - Merges from FreeBSD sym-1.6.2 driver:
+ * Clarify memory barriers needed by the driver for architectures
+ that implement a weak memory ordering.
+ * Simpler handling of illegal phases and data overrun from
+ SCRIPTS. These errors are now immediately reported to
+ the C code by an interrupt.
+ * Sync the residual handling code with sym-1.6.2 and now
+ report `resid' to user for linux version >= 2.3.99
+ - General cleanup:
+ Move definitions for barriers and IO/MMIO operations to the
+ sym53c8xx_defs.h header files. They are now shared by the
+ both drivers.
+ Remove unused options that claimed to optimize for the 896.
+ If fact, they were not this clever. :)
+ Use SCSI_NCR_IOMAPPED instead of NCR_IOMAPPED.
+ Remove a couple of unused fields from data structures.
+
Thu May 11 12:40 2000 Pam Delaney (pam.delaney@lsil.com)
* version sym53c8xx-1.6b
- Merged version.
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
obj-$(CONFIG_SCSI_FCAL) += fcal.o
+ifeq ($(CONFIG_ARCH_ACORN),y)
+mod-subdirs += ../acorn/scsi
+subdir-y += ../acorn/scsi
+obj-y += ../acorn/scsi/acorn-scsi.o
+endif
+
obj-$(CONFIG_CHR_DEV_ST) += st.o
obj-$(CONFIG_CHR_DEV_OSST) += osst.o
obj-$(CONFIG_BLK_DEV_SD) += sd_mod.o
obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o
obj-$(CONFIG_CHR_DEV_SG) += sg.o
-
-
scsi_mod-objs := scsi.o hosts.o scsi_ioctl.o constants.o \
scsicam.o scsi_proc.o scsi_error.o \
scsi_obsolete.o scsi_queue.o scsi_lib.o \
* with ESP_CMD_DMA ...
*/
- /* figure out how much needs to be transfered */
+ /* figure out how much needs to be transferred */
hmuch = SCptr->SCp.this_residual;
ESPDATA(("hmuch<%d> pio ", hmuch));
esp->current_transfer_size = hmuch;
/* check int. status */
if (esp->ireg & ESP_INTR_DC) {
/* disconnect */
- ESPDATA(("disconnect; %d transfered ... ", i));
+ ESPDATA(("disconnect; %d transferred ... ", i));
break;
} else if (esp->ireg & ESP_INTR_FDONE) {
/* function done */
- ESPDATA(("function done; %d transfered ... ", i));
+ ESPDATA(("function done; %d transferred ... ", i));
break;
}
/* XXX fixme: bail out on stall */
if (fifo_stuck > 10) {
/* we're stuck */
- ESPDATA(("fifo stall; %d transfered ... ", i));
+ ESPDATA(("fifo stall; %d transferred ... ", i));
break;
}
}
if (thisphase == in_dataout)
hmuch += fifocnt; /* stuck?? adjust data pointer ...*/
- /* tell do_data_finale how much was transfered */
+ /* tell do_data_finale how much was transferred */
esp->current_transfer_size -= hmuch;
/* still not completely sure on this one ... */
if(TESTLO(DMASTAT, DFIFOEMP)) {
int data_count = (DATA_LEN - CURRENT_SC->resid) - GETSTCNT();
- DPRINTK(debug_datao, DEBUG_LEAD "datao: %d bytes to resend (%d written, %d transfered)\n",
+ DPRINTK(debug_datao, DEBUG_LEAD "datao: %d bytes to resend (%d written, %d transferred)\n",
CMDINFO(CURRENT_SC),
data_count,
DATA_LEN-CURRENT_SC->resid,
/* Boards 3,4 slots are reserved for ISAPnP/MCA scans */
-static unsigned int bases[MAXBOARDS] = {0x330, 0x334, 0, 0};
+static unsigned int bases[MAXBOARDS] __initdata = {0x330, 0x334, 0, 0};
-/* set by aha1542_setup according to the command line */
+/* set by aha1542_setup according to the command line; they also may
+ be marked __initdata, but require zero initializers then */
static int setup_called[MAXBOARDS];
static int setup_buson[MAXBOARDS];
static int setup_busoff[MAXBOARDS];
-static int setup_dmaspeed[MAXBOARDS] = { -1, -1, -1, -1 };
+static int setup_dmaspeed[MAXBOARDS] __initdata = { -1, -1, -1, -1 };
-static char *setup_str[MAXBOARDS];
+static char *setup_str[MAXBOARDS] __initdata;
/*
* LILO/Module params: aha1542=<PORTBASE>[,<BUSON>,<BUSOFF>[,<DMASPEED>]]
*/
#if defined(MODULE)
-int isapnp=0;
+int isapnp = 0;
int aha1542[] = {0x330, 11, 4, -1};
MODULE_PARM(aha1542, "1-4i");
MODULE_PARM(isapnp, "i");
-static struct isapnp_device_id id_table[] __devinitdata = {
+static struct isapnp_device_id id_table[] __initdata = {
{
ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1542),
MODULE_DEVICE_TABLE(isapnp, id_table);
#else
-int isapnp=1;
+static int isapnp = 1;
#endif
#define BIOS_TRANSLATION_1632 0 /* Used by some old 1542A boards */
return scsierr | (hosterr << 16);
}
-static int aha1542_test_port(int bse, struct Scsi_Host *shpnt)
+static int __init aha1542_test_port(int bse, struct Scsi_Host *shpnt)
{
unchar inquiry_cmd[] = {CMD_INQUIRY};
unchar inquiry_result[4];
};
}
-int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
+static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
{
unchar ahacmd = CMD_START_SCSI;
unchar direction;
SCpnt->SCp.Status++;
}
-int aha1542_command(Scsi_Cmnd * SCpnt)
+static int aha1542_command(Scsi_Cmnd * SCpnt)
{
DEB(printk("aha1542_command: ..calling aha1542_queuecommand\n"));
aha1542_intr_reset(bse);
}
-static int aha1542_getconfig(int base_io, unsigned char *irq_level, unsigned char *dma_chan, unsigned char *scsi_id)
+static int __init aha1542_getconfig(int base_io, unsigned char *irq_level, unsigned char *dma_chan, unsigned char *scsi_id)
{
unchar inquiry_cmd[] = {CMD_RETCONF};
unchar inquiry_result[3];
}
/* Query the board to find out if it is a 1542 or a 1740, or whatever. */
-static int aha1542_query(int base_io, int *transl)
+static int __init aha1542_query(int base_io, int *transl)
{
unchar inquiry_cmd[] = {CMD_INQUIRY};
unchar inquiry_result[4];
#endif
/* return non-zero on detection */
-int aha1542_detect(Scsi_Host_Template * tpnt)
+static int __init aha1542_detect(Scsi_Host_Template * tpnt)
{
unsigned char dma_chan;
unsigned char irq_level;
#ifdef MODULE
bases[0] = aha1542[0];
- setup_buson[0]=aha1542[1];
+ setup_buson[0] = aha1542[1];
setup_busoff[0] = aha1542[2];
{
int atbt = -1;
return 0;
}
-int aha1542_abort(Scsi_Cmnd * SCpnt)
+static int aha1542_abort(Scsi_Cmnd * SCpnt)
{
/*
* This is a device reset. This is handled by sending a special command
* to the device.
*/
-int aha1542_dev_reset(Scsi_Cmnd * SCpnt)
+static int aha1542_dev_reset(Scsi_Cmnd * SCpnt)
{
unsigned long flags;
struct mailbox *mb;
#endif /* ERIC_neverdef */
}
-int aha1542_bus_reset(Scsi_Cmnd * SCpnt)
+static int aha1542_bus_reset(Scsi_Cmnd * SCpnt)
{
int i;
return FAILED;
}
-int aha1542_host_reset(Scsi_Cmnd * SCpnt)
+static int aha1542_host_reset(Scsi_Cmnd * SCpnt)
{
int i;
* These are the old error handling routines. They are only temporarily
* here while we play with the new error handling code.
*/
-int aha1542_old_abort(Scsi_Cmnd * SCpnt)
+static int aha1542_old_abort(Scsi_Cmnd * SCpnt)
{
#if 0
unchar ahacmd = CMD_START_SCSI;
For a first go, we assume that the 1542 notifies us with all of the
pending commands (it does implement soft reset, after all). */
-int aha1542_old_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+static int aha1542_old_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
{
unchar ahacmd = CMD_START_SCSI;
int i;
#include "sd.h"
-int aha1542_biosparam(Scsi_Disk * disk, kdev_t dev, int *ip)
+static int aha1542_biosparam(Scsi_Disk * disk, kdev_t dev, int *ip)
{
int translation_algorithm;
int size = disk->capacity;
/* REQUEST SENSE */
};
-int aha1542_detect(Scsi_Host_Template *);
-int aha1542_command(Scsi_Cmnd *);
-int aha1542_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
-int aha1542_abort(Scsi_Cmnd * SCpnt);
-int aha1542_bus_reset(Scsi_Cmnd * SCpnt);
-int aha1542_dev_reset(Scsi_Cmnd * SCpnt);
-int aha1542_host_reset(Scsi_Cmnd * SCpnt);
-extern int aha1542_old_abort(Scsi_Cmnd * SCpnt);
-int aha1542_old_reset(Scsi_Cmnd *, unsigned int);
-int aha1542_biosparam(Disk *, kdev_t, int*);
+static int aha1542_detect(Scsi_Host_Template *);
+static int aha1542_command(Scsi_Cmnd *);
+static int aha1542_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+static int aha1542_abort(Scsi_Cmnd * SCpnt);
+static int aha1542_bus_reset(Scsi_Cmnd * SCpnt);
+static int aha1542_dev_reset(Scsi_Cmnd * SCpnt);
+static int aha1542_host_reset(Scsi_Cmnd * SCpnt);
+static int aha1542_old_abort(Scsi_Cmnd * SCpnt);
+static int aha1542_old_reset(Scsi_Cmnd *, unsigned int);
+static int aha1542_biosparam(Disk *, kdev_t, int*);
#define AHA1542_MAILBOXES 8
#define AHA1542_SCATTER 16
if(*cmd == REQUEST_SENSE)
{
+#if 0
+ /* scsi_request_sense() provides a buffer of size 256,
+ so there is no reason to expect equality */
+
if (bufflen != sizeof(SCpnt->sense_buffer))
{
printk("Wrong buffer length supplied for request sense (%d)\n",
bufflen);
}
+#endif
SCpnt->result = 0;
done(SCpnt);
return 0;
* check/allocate region code, but this may change at some point,
* so we go through the motions.
*/
- if (check_region(slotbase, SLOTSIZE)) /* See if in use */
+ if (!request_region(slotbase, SLOTSIZE, "aha1740")) /* See if in use */
continue;
if (!aha1740_test_port(slotbase))
- continue;
+ goto err_release;
aha1740_getconfig(slotbase,&irq_level,&translation);
if ((inb(G2STAT(slotbase)) &
(G2STAT_MBXOUT|G2STAT_BUSY)) != G2STAT_MBXOUT)
DEB(printk("aha1740_detect: enable interrupt channel %d\n",irq_level));
if (request_irq(irq_level,aha1740_intr_handle,0,"aha1740",NULL)) {
printk("Unable to allocate IRQ for adaptec controller.\n");
- continue;
+ goto err_release;
}
shpnt = scsi_register(tpnt, sizeof(struct aha1740_hostdata));
if(shpnt == NULL)
- {
- free_irq(irq_level, NULL);
- continue;
- }
- request_region(slotbase, SLOTSIZE, "aha1740");
+ goto err_free_irq;
+
shpnt->base = 0;
shpnt->io_port = slotbase;
shpnt->n_io_port = SLOTSIZE;
host->translation = translation;
aha_host[irq_level - 9] = shpnt;
count++;
+ continue;
+
+ err_free_irq:
+ free_irq(irq_level, aha1740_intr_handle);
+ err_release:
+ release_region(slotbase, SLOTSIZE);
}
return count;
}
{
uint32_t command;
u_long base;
+#ifdef MMAPIO
u_long start;
u_long base_page;
u_long base_offset;
+#endif
uint8_t *maddr;
command = ahc_pci_read_config(ahc->dev_softc, PCIR_COMMAND, 4);
#endif
#define mb() \
__asm__ __volatile__("mb": : :"memory")
+#elif defined(__sparc__)
+#define MMAPIO
+/* The default mb() define does what this driver wants. -DaveM */
#endif
static __inline uint8_t ahc_inb(struct ahc_softc * ahc, long port);
.SUFFIXES= .l .y .c
$(PROG): $(SRCS)
- $(CC) $(AICASM_CFLAGS) $(SRCS) -o $(PROG)
+ $(HOSTCC) $(AICASM_CFLAGS) $(SRCS) -o $(PROG)
clean:
rm -f $(CLEANFILES) $(PROG)
*
* 4. When this function is left, the address pointer (start_addr) is
* converted to a physical address. Because it points one byte
- * further than the last transfered byte, it can point outside the
+ * further than the last transferred byte, it can point outside the
* current page. If virt_to_phys() is called with this address we
* might get an access error. Therefore virt_to_phys() is called with
* start_addr - 1 if the count has reached zero. The result is
/* This int is actually "pseudo-slow", i.e. it acts like a slow
* interrupt after having cleared the pending flag for the DMA
* interrupt. */
- request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW,
- "SCSI NCR5380", scsi_tt_intr);
+ if (request_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr, IRQ_TYPE_SLOW,
+ "SCSI NCR5380", scsi_tt_intr)) {
+ printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting",IRQ_TT_MFP_SCSI);
+ scsi_unregister(atari_scsi_host);
+ atari_stram_free(atari_dma_buffer);
+ atari_dma_buffer = 0;
+ return 0;
+ }
tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */
#ifdef REAL_DMA
tt_scsi_dma.dma_ctrl = 0;
atari_dma_residual = 0;
-#endif /* REAL_DMA */
-#ifdef REAL_DMA
#ifdef CONFIG_TT_DMA_EMUL
if (MACH_IS_HADES) {
- request_irq(IRQ_AUTO_2, hades_dma_emulator,
- IRQ_TYPE_PRIO, "Hades DMA emulator",
- hades_dma_emulator);
+ if (request_irq(IRQ_AUTO_2, hades_dma_emulator,
+ IRQ_TYPE_PRIO, "Hades DMA emulator",
+ hades_dma_emulator)) {
+ printk(KERN_ERR "atari_scsi_detect: cannot allocate irq %d, aborting (MACH_IS_HADES)",IRQ_AUTO_2);
+ free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr);
+ scsi_unregister(atari_scsi_host);
+ atari_stram_free(atari_dma_buffer);
+ atari_dma_buffer = 0;
+ return 0;
+ }
}
#endif
if (MACH_IS_MEDUSA || MACH_IS_HADES) {
* the rest data bug is fixed, this can be lowered to 1.
*/
atari_read_overruns = 4;
- }
-#endif
-
+ }
+#endif /*REAL_DMA*/
}
else { /* ! IS_A_TT */
volatile unsigned char cmd_buffer[16];
/* This is where all commands are put
- * before they are transfered to the ESP chip
+ * before they are transferred to the ESP chip
* via PIO.
*/
esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
udelay(5);
- if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7)){
- esp_deallocate(esp);
- scsi_unregister(esp->ehost);
- release_mem_region(board+REAL_BLZ1230_ESP_ADDR,
- sizeof(struct ESP_regs));
- return 0; /* Bail out if address did not hold data */
- }
+ if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7))
+ goto err_out;
/* Do command transfer with programmed I/O */
esp->do_pio_cmds = 1;
esp->irq = IRQ_AMIGA_PORTS;
esp->slot = board+REAL_BLZ1230_ESP_ADDR;
- request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "Blizzard 1230 SCSI IV", esp_intr);
+ if (request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
+ "Blizzard 1230 SCSI IV", esp_intr))
+ goto err_out;
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
}
}
return 0;
+
+ err_out:
+ scsi_unregister(esp->ehost);
+ esp_deallocate(esp);
+ release_mem_region(board+REAL_BLZ1230_ESP_ADDR,
+ sizeof(struct ESP_regs));
+ return 0;
}
/************************************************************* DMA Functions */
int board; /* Use NCR53c400, Ricoh, etc. extensions ? */
} overrides
#ifdef GENERIC_NCR5380_OVERRIDE
- [] __initdata = GENERIC_NCR5380_OVERRIDE
+ [] __initdata = GENERIC_NCR5380_OVERRIDE;
#else
[1] __initdata = {{0,},};
#endif
MODULE_PARM(ncr_53c400a, "i");
MODULE_PARM(dtc_3181e, "i");
+#else
+
+static int __init do_NCR5380_setup(char *str)
+{
+ int ints[10];
+
+ get_options(str, sizeof(ints)/sizeof(int), ints);
+ generic_NCR5380_setup(str,ints);
+
+ return 1;
+}
+
+static int __init do_NCR53C400_setup(char *str)
+{
+ int ints[10];
+
+ get_options(str, sizeof(ints)/sizeof(int), ints);
+ generic_NCR53C400_setup(str,ints);
+
+ return 1;
+}
+
+static int __init do_NCR53C400A_setup(char *str)
+{
+ int ints[10];
+
+ get_options(str, sizeof(ints)/sizeof(int), ints);
+ generic_NCR53C400A_setup(str,ints);
+
+ return 1;
+}
+
+static int __init do_DTC3181E_setup(char *str)
+{
+ int ints[10];
+
+ get_options(str, sizeof(ints)/sizeof(int), ints);
+ generic_DTC3181E_setup(str,ints);
+
+ return 1;
+}
+
+__setup("ncr5380=", do_NCR5380_setup);
+__setup("ncr53c400=", do_NCR53C400_setup);
+__setup("ncr53c400a=", do_NCR53C400A_setup);
+__setup("dtc3181e=", do_DTC3181E_setup);
+
+static struct isapnp_device_id id_table[] __devinitdata = {
+ {
+ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
+ ISAPNP_VENDOR('D','T','C'), ISAPNP_FUNCTION(0x436e),
+ 0
+ },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(isapnp, id_table);
+
#endif
*
* Linux MegaRAID device driver
*
- * Copyright 1999 American Megatrends Inc.
+ * Copyright 2001 American Megatrends Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Version : 1.07b
- *
+ * Version : v1.14g (Feb 5, 2001)
+ *
* Description: Linux device driver for AMI MegaRAID controller
*
- * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 490
- *
+ * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490
+ * 493.
* History:
*
* Version 0.90:
* Original source contributed by Dell; integrated it into the kernel and
* cleaned up some things. Added support for 438/466 controllers.
- *
* Version 0.91:
* Aligned mailbox area on 16-byte boundry.
* Added schedule() at the end to properly clean up.
* 8 Oct 98 Alan Cox <alan.cox@linux.org>
*
* Merged with 2.1.131 source tree.
- * 12 Dec 98 K. Baranowski <kgb@knm.org.pl>
+ * 12 Dec 98 K. Baranowski <kgb@knm.org.pl>
*
* Version 0.93:
- * Added support for vendor specific ioctl commands (0x80+xxh)
+ * Added support for vendor specific ioctl commands (M_RD_IOCTL_CMD+xxh)
* Changed some fields in MEGARAID struct to better values.
* Added signature check for Rp controllers under 2.0 kernels
* Changed busy-wait loop to be time-based
*
* Version 0.97:
* Changed megaraid_command to use wait_queue.
- * Fixed bug of undesirably detecting HP onboard controllers which
- * are disabled.
- *
+ *
* Version 1.00:
* Checks to see if an irq ocurred while in isr, and runs through
* routine again.
*
* Version 1.01:
* Fixed bug in mega_cmd_done() for megamgr control commands,
- * the host_byte in the result code from the scsi request to
- * scsi midlayer is set to DID_BAD_TARGET when adapter's
- * returned codes are 0xF0 and 0xF4.
+ * the host_byte in the result code from the scsi request to
+ * scsi midlayer is set to DID_BAD_TARGET when adapter's
+ * returned codes are 0xF0 and 0xF4.
*
* Version 1.02:
* Fixed the tape drive bug by extending the adapter timeout value
- * for passthrough command to 60 seconds in mega_build_cmd().
+ * for passthrough command to 60 seconds in mega_build_cmd().
*
* Version 1.03:
* Fixed Madrona support.
* Added driver version printout at driver loadup time
*
* Version 1.04
- * Added code for 40 ld FW support.
+ * Added code for 40 ld FW support.
* Added new ioctl command 0x81 to support NEW_READ/WRITE_CONFIG with
* data area greater than 4 KB, which is the upper bound for data
* tranfer through scsi_ioctl interface.
* Fixed the problem of unnecessary aborts in the abort entry point, which
* also enables the driver to handle large amount of I/O requests for
* long duration of time.
- *
+ * Version 1.06
+ * Intel Release
* Version 1.07
* Removed the usage of uaccess.h file for kernel versions less than
* 2.0.36, as this file is not present in those versions.
*
- * Version 1.07b
- * The MegaRAID 466 cards with 3.00 firmware lockup and seem to very
- * occasionally hang. We check such cards and report them. You can
- * get firmware upgrades to flash the board to 3.10 for free.
+ * Version 108
+ * Modified mega_ioctl so that 40LD megamanager would run
+ * Made some changes for 2.3.XX compilation , esp wait structures
+ * Code merge between 1.05 and 1.06 .
+ * Bug fixed problem with ioctl interface for concurrency between
+ * 8ld and 40ld firwmare
+ * Removed the flawed semaphore logic for handling new config command
+ * Added support for building own scatter / gather list for big user
+ * mode buffers
+ * Added /proc file system support ,so that information is available in
+ * human readable format
+ *
+ * Version 1a08
+ * Changes for IA64 kernels. Checked for CONFIG_PROC_FS flag
+ *
+ * Version 1b08
+ * Include file changes.
+ * Version 1b08b
+ * Change PCI ID value for the 471 card, use #defines when searching
+ * for megaraid cards.
+ *
+ * Version 1.10
+ *
+ * I) Changes made to make following ioctl commands work in 0x81 interface
+ * a)DCMD_DELETE_LOGDRV
+ * b)DCMD_GET_DISK_CONFIG
+ * c)DCMD_DELETE_DRIVEGROUP
+ * d)NC_SUBOP_ENQUIRY3
+ * e)DCMD_CHANGE_LDNO
+ * f)DCMD_CHANGE_LOOPID
+ * g)DCMD_FC_READ_NVRAM_CONFIG
+ * h)DCMD_WRITE_CONFIG
+ * II) Added mega_build_kernel_sg function
+ * III)Firmware flashing option added
+ *
+ * Version 1.10a
+ *
+ * I)Dell updates included in the source code.
+ * Note: This change is not tested due to the unavailability of IA64 kernel
+ * and it is in the #ifdef DELL_MODIFICATION macro which is not defined
+ *
+ * Version 1.10b
+ *
+ * I)In M_RD_IOCTL_CMD_NEW command the wrong way of copying the data
+ * to the user address corrected
+ *
+ * Version 1.10c
+ *
+ * I) DCMD_GET_DISK_CONFIG opcode updated for the firmware changes.
+ *
+ * Version 1.11
+ * I) Version number changed from 1.10c to 1.11
+ * II) DCMD_WRITE_CONFIG(0x0D) command in the driver changed from
+ * scatter/gather list mode to direct pointer mode..
+ * Fixed bug of undesirably detecting HP onboard controllers which
+ * are disabled.
+ *
+ * Version 1.12 (Sep 21, 2000)
+ *
+ * I. Changes have been made for Dynamic DMA mapping in IA64 platform.
+ * To enable all these changes define M_RD_DYNAMIC_DMA_SUPPORT in megaraid.h
+ * II. Got rid of windows mode comments
+ * III. Removed unwanted code segments
+ * IV. Fixed bug of HP onboard controller information (commented with
+ * MEGA_HP_FIX)
+ *
+ * Version 1a12
+ * I. reboot notifer and new ioctl changes ported from 1c09
+ *
+ * Veriosn 1b12
+ * I. Changes in new ioctl interface routines ( Nov 06, 2000 )
+ *
+ * Veriosn 1c12
+ * I. Changes in new ioctl interface routines ( Nov 07, 2000 )
+ *
+ * Veriosn 1d12
+ * I. Compilation error under kernel 2.4.0 for 32-bit machine in mega_ioctl
+ *
+ * Veriosn 1e12, 1f12
+ * 1. Fixes for pci_map_single, pci_alloc_consistent along with mailbox
+ * alignment
+ *
+ * Version 1.13beta
+ * Added Support for Full 64bit address space support. If firmware
+ * supports 64bit, it goes to 64 bit mode even on x86 32bit
+ * systems. Data Corruption Issues while running on test9 kernel
+ * on IA64 systems. This issue not seen on test11 on x86 system
+ *
+ * Version 1.13c
+ * 1. Resolved Memory Leak when using M_RD_IOCTL_CMD interface
+ * 2. Resolved Queuing problem when MailBox Blocks
+ * 3. Added unregister_reboot_notifier support
+ *
+ * Version 1.13d
+ * Experimental changes in interfacing with the controller in ISR
+ *
+ * Version 1.13e
+ * Fixed Broken 2.2.XX compilation changes + misc changes
+ *
+ * Version 1.13f to 1.13i
+ * misc changes + code clean up
+ * Cleaned up the ioctl code and added set_mbox_xfer_addr()
+ * Support for START_DEV (6)
+ *
+ * Version 1.13j
+ * Moved some code to megaraid.h file, replaced some hard coded values
+ * with respective macros. Chaged some funtions to static
+ *
+ * Version 1.13k
+ * Only some idendation correction to 1.13j
+ *
+ * Version 1.13l , 1.13m, 1.13n, 1.13o
+ * Minor Identation changes + misc changes
+ *
+ * Version 1.13q
+ * Paded the new uioctl_t MIMD structure for maintaining alignment
+ * and size across 32 / 64 bit platforms
+ * Changed the way MIMD IOCTL interface used virt_to_bus() to use pci
+ * memory location
+ *
+ * Version 1.13r
+ * 2.4.xx SCSI Changes.
+ *
+ * Version 1.13s
+ * Stats counter fixes
+ * Temporary fix for some 64 bit firmwares in 2.4.XX kernels
+ *
+ * Version 1.13t
+ * Support for 64bit version of READ/WRITE/VIEW DISK CONFIG
+ *
+ * Version 1.14
+ * Did away with MEGADEV_IOCTL flag. It is now standard part of driver
+ * without need for a special #define flag
+ * Disabled old scsi ioctl path for kernel versions > 2.3.xx. This is due
+ * to the nature in which the new scsi code queues a new scsi command to
+ * controller during SCSI IO Completion
+ * Driver now checks for sub-system vendor id before taking ownership of
+ * the controller
+ *
+ * Version 1.14a
+ * Added Host re-ordering
+ *
+ * Version 1.14b
+ * Corrected some issue which caused the older cards not to work
+ *
+ * Version 1.14c
+ * IOCTL changes for not handling the non-64bit firmwares under 2.4.XX
+ * kernel
+ *
+ * Version 1.14d
+ * Fixed Various MIMD Synchronization Issues
+ *
+ * Version 1.14e
+ * Fixed the error handling during card initialization
+ *
+ * Version 1.14f
+ * Multiple invocations of mimd phase I ioctl stalls the cpu. Replaced
+ * spinlock with semaphore(mutex)
+ *
+ * Version 1.14g
+ * Fixed running out of scbs issues while running MIMD apps under heavy IO
+ *
+ * Version 1.14g-ac - 02/03/01
+ * Reformatted to Linux format so I could compare to old one and cross
+ * check bug fixes
+ * Re fixed the assorted missing 'static' cases
+ * Removed some unneeded version checks
+ * Cleaned up some of the VERSION checks in the code
+ * Left 2.0 support but removed 2.1.x support.
+ * Collected much of the compat glue into one spot
*
* BUGS:
* Some older 2.1 kernels (eg. 2.1.90) have a bug in pci.c that
*
* Timeout period for upper scsi layer, i.e. SD_TIMEOUT in
* /drivers/scsi/sd.c, is too short for this controller. SD_TIMEOUT
- * value must be increased to (30 * HZ) otherwise false timeouts
+ * value must be increased to (30 * HZ) otherwise false timeouts
* will occur in the upper layer.
*
+ * Never set skip_id. The existing PCI code the megaraid uses fails
+ * to properly check the vendor subid in some cases. Setting this then
+ * makes it steal other i960's and crashes some boxes
+ *
+ * Far too many ifdefs for versions.
+ *
*===================================================================*/
-#define CRLFSTR "\n"
-#define IOCTL_CMD_NEW 0x81
-
-#define MEGARAID_VERSION "v107 (December 22, 1999)"
-
-
+#include <linux/config.h>
#include <linux/version.h>
-
-#ifdef MODULE
#include <linux/module.h>
-
-char kernel_version[] = UTS_RELEASE;
-
-MODULE_AUTHOR ("American Megatrends Inc.");
-MODULE_DESCRIPTION ("AMI MegaRAID driver");
-#endif
-
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/wait.h>
#include <linux/tqueue.h>
#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+#include <linux/sched.h>
#include <linux/stat.h>
+#include <linux/malloc.h> /* for kmalloc() */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0) /* 0x20100 */
+#include <linux/bios32.h>
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) /* 0x20300 */
+#include <asm/spinlock.h>
+#else
#include <linux/spinlock.h>
+#endif
+#endif
#include <asm/io.h>
#include <asm/irq.h>
-#if LINUX_VERSION_CODE > 0x020024
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,0,24) /* 0x020024 */
#include <asm/uaccess.h>
#endif
+/*
+ * These header files are required for Shutdown Notification routines
+ */
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+
#include "sd.h"
#include "scsi.h"
#include "hosts.h"
#include "megaraid.h"
-/*================================================================
- *
- * #Defines
- *
+/*
+ *================================================================
+ * #Defines
*================================================================
*/
#define MAX_SERBUF 160
#define COM_BASE 0x2f8
-
-u32 RDINDOOR (mega_host_config * megaCfg)
+static ulong RDINDOOR (mega_host_config * megaCfg)
{
- return readl (megaCfg->base + 0x20);
+ return readl (megaCfg->base + 0x20);
}
-void WRINDOOR (mega_host_config * megaCfg, u32 value)
+static void WRINDOOR (mega_host_config * megaCfg, ulong value)
{
- writel (value, megaCfg->base + 0x20);
+ writel (value, megaCfg->base + 0x20);
}
-u32 RDOUTDOOR (mega_host_config * megaCfg)
+static ulong RDOUTDOOR (mega_host_config * megaCfg)
{
- return readl (megaCfg->base + 0x2C);
+ return readl (megaCfg->base + 0x2C);
}
-void WROUTDOOR (mega_host_config * megaCfg, u32 value)
+static void WROUTDOOR (mega_host_config * megaCfg, ulong value)
{
- writel (value, megaCfg->base + 0x2C);
+ writel (value, megaCfg->base + 0x2C);
}
-/*================================================================
- *
- * Function prototypes
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) /* 0x020100 */
+
+/*
+ * Linux 2.4 and higher
*
- *================================================================
+ * No driver private lock
+ * Use the io_request_lock not cli/sti
+ * queue task is a simple api without irq forms
*/
-static int __init megaraid_setup(char *);
-
-static int megaIssueCmd (mega_host_config * megaCfg,
- u_char * mboxData,
- mega_scb * scb,
- int intr);
-static int mega_build_sglist (mega_host_config * megaCfg, mega_scb * scb,
- u32 * buffer, u32 * length);
-
-static int mega_busyWaitMbox(mega_host_config *);
-static void mega_runpendq (mega_host_config *);
-static void mega_rundoneq (mega_host_config *);
-static void mega_cmd_done (mega_host_config *, mega_scb *, int);
-static mega_scb *mega_ioctl (mega_host_config * megaCfg, Scsi_Cmnd * SCpnt);
-static inline void mega_freeSgList(mega_host_config *megaCfg);
-static void mega_Convert8ldTo40ld( mega_RAIDINQ *inquiry,
- mega_Enquiry3 *enquiry3,
- megaRaidProductInfo *productInfo );
-
#include <linux/smp.h>
+#define cpuid smp_processor_id()
+
+char kernel_version[] = UTS_RELEASE;
+MODULE_AUTHOR ("American Megatrends Inc.");
+MODULE_DESCRIPTION ("AMI MegaRAID driver");
+
+#define DRIVER_LOCK_T
+#define DRIVER_LOCK_INIT(p)
+#define DRIVER_LOCK(p)
+#define DRIVER_UNLOCK(p)
+#define IO_LOCK_T unsigned long io_flags = 0;
+#define IO_LOCK spin_lock_irqsave(&io_request_lock,io_flags);
+#define IO_UNLOCK spin_unlock_irqrestore(&io_request_lock,io_flags);
+
+#define queue_task_irq(a,b) queue_task(a,b)
+#define queue_task_irq_off(a,b) queue_task(a,b)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0) /* 0x020200 */
+/*
+ * Linux 2.2 and higher
+ *
+ * No driver private lock
+ * Use the io_request_lock not cli/sti
+ * No pci region api
+ * queue_task is now a single simple API
+ */
+
+#include <linux/smp.h>
#define cpuid smp_processor_id()
+
+char kernel_version[] = UTS_RELEASE;
+MODULE_AUTHOR ("American Megatrends Inc.");
+MODULE_DESCRIPTION ("AMI MegaRAID driver");
+
#define DRIVER_LOCK_T
#define DRIVER_LOCK_INIT(p)
#define DRIVER_LOCK(p)
#define IO_LOCK spin_lock_irqsave(&io_request_lock,io_flags);
#define IO_UNLOCK spin_unlock_irqrestore(&io_request_lock,io_flags);
+#define pci_free_consistent(a,b,c,d)
+#define pci_unmap_single(a,b,c,d,e)
+
+#define init_MUTEX_LOCKED(x) ((x)=MUTEX_LOCKED)
+#define init_MUTEX(x) ((x)=MUTEX)
+
+#define queue_task_irq(a,b) queue_task(a,b)
+#define queue_task_irq_off(a,b) queue_task(a,b)
+
+#define DECLARE_WAIT_QUEUE_HEAD(x) struct wait_queue *x = NULL
+#else
+
+/*
+ * Linux 2.0 macros. Here we have to provide some of our own
+ * functionality. We also only work little endian 32bit.
+ * Again no pci_alloc/free api
+ * IO_LOCK/IO_LOCK_T were never used in 2.0 so now are empty
+ */
+
+#define cpuid 0
+#define DRIVER_LOCK_T long cpu_flags;
+#define DRIVER_LOCK_INIT(p)
+#define DRIVER_LOCK(p) \
+ save_flags(cpu_flags); \
+ cli();
+#define DRIVER_UNLOCK(p) \
+ restore_flags(cpu_flags);
+#define IO_LOCK_T
+#define IO_LOCK(p)
+#define IO_UNLOCK(p)
+#define le32_to_cpu(x) (x)
+#define cpu_to_le32(x) (x)
+
+#define pci_free_consistent(a,b,c,d)
+#define pci_unmap_single(a,b,c,d,e)
+
+#define init_MUTEX_LOCKED(x) ((x)=MUTEX_LOCKED)
+#define init_MUTEX(x) ((x)=MUTEX)
+
+/*
+ * 2.0 lacks spinlocks, iounmap/ioremap
+ */
+
+#define ioremap vremap
+#define iounmap vfree
+
+ /* simulate spin locks */
+typedef struct {
+ volatile char lock;
+} spinlock_t;
+
+#define spin_lock_init(x) { (x)->lock = 0;}
+#define spin_lock_irqsave(x,flags) { while ((x)->lock) barrier();\
+ (x)->lock=1; save_flags(flags);\
+ cli();}
+#define spin_unlock_irqrestore(x,flags) { (x)->lock=0; restore_flags(flags);}
+
+#define DECLARE_WAIT_QUEUE_HEAD(x) struct wait_queue *x = NULL
+
+#endif
+
+
+
/* set SERDEBUG to 1 to enable serial debugging */
#define SERDEBUG 0
#if SERDEBUG
static void ser_init (void);
static void ser_puts (char *str);
static void ser_putc (char c);
-static int ser_printk (const char *fmt,...);
+static int ser_printk (const char *fmt, ...);
#endif
-/*================================================================
- *
+#ifdef CONFIG_PROC_FS
+#define COPY_BACK if (offset > megaCfg->procidx) { \
+ *eof = TRUE; \
+ megaCfg->procidx = 0; \
+ megaCfg->procbuf[0] = 0; \
+ return 0;} \
+ if ((count + offset) > megaCfg->procidx) { \
+ count = megaCfg->procidx - offset; \
+ *eof = TRUE; } \
+ memcpy(page, &megaCfg->procbuf[offset], count); \
+ megaCfg->procidx = 0; \
+ megaCfg->procbuf[0] = 0;
+#endif
+
+/*
+ * ================================================================
* Global variables
- *
*================================================================
*/
/* Use "megaraid=skipXX" as LILO option to prohibit driver from scanning
XX scsi id on each channel. Used for Madrona motherboard, where SAF_TE
processor id cannot be scanned */
+
+static char *megaraid;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,0) /* 0x20100 */
#ifdef MODULE
-static char *megaraid = NULL;
-MODULE_PARM(megaraid, "s");
+MODULE_PARM (megaraid, "s");
#endif
-static int skip_id;
-
+#endif
+static int skip_id = -1;
static int numCtlrs = 0;
-static mega_host_config *megaCtlrs[FC_MAX_CHANNELS] = {0};
+static mega_host_config *megaCtlrs[FC_MAX_CHANNELS] = { 0 };
+static struct proc_dir_entry *mega_proc_dir_entry;
#if DEBUG
static u32 maxCmdTime = 0;
#endif
static mega_scb *pLastScb = NULL;
+static struct notifier_block mega_notifier = {
+ megaraid_reboot_notify,
+ NULL,
+ 0
+};
+
+/* For controller re-ordering */
+struct mega_hbas mega_hbas[MAX_CONTROLLERS];
+
+/*
+ * The File Operations structure for the serial/ioctl interface of the driver
+ */
+/* For controller re-ordering */
+
+static struct file_operations megadev_fops = {
+ ioctl:megadev_ioctl_entry,
+ open:megadev_open,
+ release:megadev_close,
+};
+
+/*
+ * Array to structures for storing the information about the controllers. This
+ * information is sent to the user level applications, when they do an ioctl
+ * for this information.
+ */
+static struct mcontroller mcontroller[MAX_CONTROLLERS];
+
+/* The current driver version */
+static u32 driver_ver = 114;
+/* major number used by the device for character interface */
+static int major;
+
+static struct semaphore mimd_ioctl_sem;
+static struct semaphore mimd_entry_mtx;
#if SERDEBUG
-static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED;
+volatile static spinlock_t serial_lock;
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) /* 0x20300 */
+static struct proc_dir_entry proc_scsi_megaraid = {
+ PROC_SCSI_MEGARAID, 8, "megaraid",
+ S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+#endif
+
+#ifdef CONFIG_PROC_FS
+extern struct proc_dir_entry proc_root;
#endif
+
#if SERDEBUG
static char strbuf[MAX_SERBUF + 1];
-static void ser_init ()
+static void ser_init (void)
{
- unsigned port = COM_BASE;
-
- outb (0x80, port + 3);
- outb (0, port + 1);
- /* 9600 Baud, if 19200: outb(6,port) */
- outb (12, port);
- outb (3, port + 3);
- outb (0, port + 1);
+ unsigned port = COM_BASE;
+
+ outb (0x80, port + 3);
+ outb (0, port + 1);
+ /* 9600 Baud, if 19200: outb(6,port) */
+ outb (12, port);
+ outb (3, port + 3);
+ outb (0, port + 1);
}
static void ser_puts (char *str)
{
- char *ptr;
+ char *ptr;
- ser_init ();
- for (ptr = str; *ptr; ++ptr)
- ser_putc (*ptr);
+ ser_init ();
+ for (ptr = str; *ptr; ++ptr)
+ ser_putc (*ptr);
}
static void ser_putc (char c)
{
- unsigned port = COM_BASE;
-
- while ((inb (port + 5) & 0x20) == 0);
- outb (c, port);
- if (c == 0x0a) {
- while ((inb (port + 5) & 0x20) == 0);
- outb (0x0d, port);
- }
+ unsigned port = COM_BASE;
+
+ while ((inb (port + 5) & 0x20) == 0) ;
+ outb (c, port);
+ if (c == 0x0a) {
+ while ((inb (port + 5) & 0x20) == 0) ;
+ outb (0x0d, port);
+ }
}
-static int ser_printk (const char *fmt,...)
+static int ser_printk (const char *fmt, ...)
{
- va_list args;
- int i;
- long flags;
-
- spin_lock_irqsave(&serial_lock,flags);
- va_start (args, fmt);
- i = vsprintf (strbuf, fmt, args);
- ser_puts (strbuf);
- va_end (args);
- spin_unlock_irqrestore(&serial_lock,flags);
-
- return i;
+ va_list args;
+ int i;
+ long flags;
+
+ spin_lock_irqsave (&serial_lock, flags);
+ va_start (args, fmt);
+ i = vsprintf (strbuf, fmt, args);
+ ser_puts (strbuf);
+ va_end (args);
+ spin_unlock_irqrestore (&serial_lock, flags);
+
+ return i;
}
#define TRACE(a) { ser_printk a;}
#define TRACE(A)
#endif
+#define TRACE1(a)
+
static void callDone (Scsi_Cmnd * SCpnt)
{
- if (SCpnt->result) {
- TRACE (("*** %.08lx %.02x <%d.%d.%d> = %x\n", SCpnt->serial_number,
- SCpnt->cmnd[0], SCpnt->channel, SCpnt->target, SCpnt->lun,
- SCpnt->result));
- }
- SCpnt->scsi_done (SCpnt);
+ if (SCpnt->result) {
+ TRACE (("*** %.08lx %.02x <%d.%d.%d> = %x\n",
+ SCpnt->serial_number, SCpnt->cmnd[0], SCpnt->channel,
+ SCpnt->target, SCpnt->lun, SCpnt->result));
+ }
+ SCpnt->scsi_done (SCpnt);
}
/*-------------------------------------------------------------------------
* Free a SCB structure
*=======================
*/
-static void mega_freeSCB (mega_host_config *megaCfg, mega_scb * pScb)
+static void mega_freeSCB (mega_host_config * megaCfg, mega_scb * pScb)
{
- mega_scb *pScbtmp;
-
- if ((pScb == NULL) || (pScb->idx >= 0xFE)) {
- return ;
- }
-
- /* Unlink from pending queue */
-
- if(pScb == megaCfg->qPendingH) {
- if(megaCfg->qPendingH == megaCfg->qPendingT )
- megaCfg->qPendingH = megaCfg->qPendingT = NULL;
- else {
- megaCfg->qPendingH = megaCfg->qPendingH->next;
- }
- megaCfg->qPcnt--;
- }
- else {
- for(pScbtmp=megaCfg->qPendingH; pScbtmp; pScbtmp=pScbtmp->next) {
- if (pScbtmp->next == pScb) {
- pScbtmp->next = pScb->next;
- if(pScb == megaCfg->qPendingT) {
- megaCfg->qPendingT = pScbtmp;
- }
- megaCfg->qPcnt--;
- break;
- }
- }
- }
-
- /* Link back into free list */
- pScb->state = SCB_FREE;
- pScb->SCpnt = NULL;
-
- if(megaCfg->qFreeH == (mega_scb *) NULL ) {
- megaCfg->qFreeH = megaCfg->qFreeT = pScb;
- }
- else {
- megaCfg->qFreeT->next = pScb;
- megaCfg->qFreeT = pScb;
- }
- megaCfg->qFreeT->next = NULL;
- megaCfg->qFcnt++;
+ mega_scb *pScbtmp;
+
+ if ((pScb == NULL) || (pScb->idx >= 0xFE)) {
+ return;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ switch (pScb->dma_type) {
+ case M_RD_DMA_TYPE_NONE:
+ break;
+ case M_RD_PTHRU_WITH_BULK_DATA:
+ pci_unmap_single (megaCfg->dev, pScb->dma_h_bulkdata,
+ pScb->pthru->dataxferlen,
+ pScb->dma_direction);
+ break;
+ case M_RD_PTHRU_WITH_SGLIST:
+ {
+ int count;
+ for (count = 0; count < pScb->sglist_count; count++) {
+ pci_unmap_single (megaCfg->dev,
+ pScb->dma_h_sglist[count],
+ pScb->sgList[count].length,
+ pScb->dma_direction);
+
+ }
+ break;
+ }
+ case M_RD_BULK_DATA_ONLY:
+ pci_unmap_single (megaCfg->dev,
+ pScb->dma_h_bulkdata,
+ pScb->iDataSize, pScb->dma_direction);
+
+ break;
+ case M_RD_SGLIST_ONLY:
+ pci_unmap_sg (megaCfg->dev,
+ pScb->SCpnt->request_buffer,
+ pScb->SCpnt->use_sg, pScb->dma_direction);
+ break;
+ default:
+ break;
+ }
+#endif
+
+ /* Unlink from pending queue */
+ if (pScb == megaCfg->qPendingH) {
+
+ if (megaCfg->qPendingH == megaCfg->qPendingT)
+ megaCfg->qPendingH = megaCfg->qPendingT = NULL;
+ else
+ megaCfg->qPendingH = megaCfg->qPendingH->next;
+
+ megaCfg->qPcnt--;
+
+ } else {
+ for (pScbtmp = megaCfg->qPendingH; pScbtmp;
+ pScbtmp = pScbtmp->next) {
+
+ if (pScbtmp->next == pScb) {
+
+ pScbtmp->next = pScb->next;
+
+ if (pScb == megaCfg->qPendingT) {
+ megaCfg->qPendingT = pScbtmp;
+ }
+
+ megaCfg->qPcnt--;
+ break;
+ }
+ }
+ }
+
+ /* Link back into free list */
+ pScb->state = SCB_FREE;
+ pScb->SCpnt = NULL;
+
+ if (megaCfg->qFreeH == (mega_scb *) NULL) {
+ megaCfg->qFreeH = megaCfg->qFreeT = pScb;
+ } else {
+ megaCfg->qFreeT->next = pScb;
+ megaCfg->qFreeT = pScb;
+ }
+
+ megaCfg->qFreeT->next = NULL;
+ megaCfg->qFcnt++;
}
* Allocate a SCB structure
*===========================
*/
-static mega_scb * mega_allocateSCB (mega_host_config * megaCfg, Scsi_Cmnd * SCpnt)
+static mega_scb *mega_allocateSCB (mega_host_config * megaCfg, Scsi_Cmnd * SCpnt)
{
- mega_scb *pScb;
+ mega_scb *pScb;
- /* Unlink command from Free List */
- if ((pScb = megaCfg->qFreeH) != NULL) {
- megaCfg->qFreeH = pScb->next;
- megaCfg->qFcnt--;
-
- pScb->isrcount = jiffies;
- pScb->next = NULL;
- pScb->state = SCB_ACTIVE;
- pScb->SCpnt = SCpnt;
+ /* Unlink command from Free List */
+ if ((pScb = megaCfg->qFreeH) != NULL) {
+ megaCfg->qFreeH = pScb->next;
+ megaCfg->qFcnt--;
- return pScb;
- }
+ pScb->isrcount = jiffies;
+ pScb->next = NULL;
+ pScb->state = SCB_ACTIVE;
+ pScb->SCpnt = SCpnt;
- printk (KERN_WARNING "Megaraid: Could not allocate free SCB!!!\n");
-
- return NULL;
-}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ pScb->dma_type = M_RD_DMA_TYPE_NONE;
+#endif
-/*================================================
- * Initialize SCB structures
- *================================================
- */
-static int mega_initSCB (mega_host_config * megaCfg)
-{
- int idx;
+ return pScb;
+ }
- megaCfg->qFreeH = NULL;
- megaCfg->qFcnt = 0;
-#if DEBUG
-if(megaCfg->max_cmds >= MAX_COMMANDS) {
-printk("megaraid:ctlr max cmds = %x : MAX_CMDS = %x", megaCfg->max_cmds, MAX_COMMANDS);
-}
-#endif
+ printk (KERN_WARNING "Megaraid: Could not allocate free SCB!!!\n");
- for (idx = megaCfg->max_cmds-1; idx >= 0; idx--) {
- megaCfg->scbList[idx].idx = idx;
- megaCfg->scbList[idx].sgList = kmalloc(sizeof(mega_sglist) * MAX_SGLIST,
- GFP_ATOMIC | GFP_DMA);
- if (megaCfg->scbList[idx].sgList == NULL) {
- printk(KERN_WARNING "Can't allocate sglist for id %d\n",idx);
- mega_freeSgList(megaCfg);
- return -1;
- }
-
- if (idx < MAX_COMMANDS) {
- /* Link to free list */
- mega_freeSCB(megaCfg, &megaCfg->scbList[idx]);
- }
- }
- return 0;
+ return NULL;
}
-/* Run through the list of completed requests */
-static void mega_rundoneq (mega_host_config *megaCfg)
+/* Run through the list of completed requests and finish it */
+static void mega_rundoneq (mega_host_config * megaCfg)
{
- Scsi_Cmnd *SCpnt;
+ Scsi_Cmnd *SCpnt;
- while ((SCpnt = megaCfg->qCompletedH) != NULL) {
- megaCfg->qCompletedH = (Scsi_Cmnd *)SCpnt->host_scribble;
- megaCfg->qCcnt--;
+ while ((SCpnt = megaCfg->qCompletedH) != NULL) {
+ megaCfg->qCompletedH = (Scsi_Cmnd *) SCpnt->host_scribble;
+ megaCfg->qCcnt--;
- SCpnt->host_scribble = (unsigned char *) NULL ; // XC : sep 14
- /* Callback */
- callDone (SCpnt);
- }
- megaCfg->qCompletedH = megaCfg->qCompletedT = NULL;
+ SCpnt->host_scribble = (unsigned char *) NULL; /* XC : sep 14 */
+ /* Callback */
+ callDone (SCpnt);
+ }
+
+ megaCfg->qCompletedH = megaCfg->qCompletedT = NULL;
}
/*
* Runs through the list of pending requests
* Assumes that mega_lock spin_lock has been acquired.
*/
-static void mega_runpendq(mega_host_config *megaCfg)
+static int mega_runpendq (mega_host_config * megaCfg)
{
- mega_scb *pScb;
-
- /* Issue any pending commands to the card */
- for(pScb=megaCfg->qPendingH; pScb; pScb=pScb->next) {
- if (pScb->state == SCB_ACTIVE) {
- if(megaIssueCmd(megaCfg, pScb->mboxData, pScb, 1))
- return;
- }
- }
+ mega_scb *pScb;
+ int rc;
+
+ /* Issue any pending commands to the card */
+ for (pScb = megaCfg->qPendingH; pScb; pScb = pScb->next) {
+ if (pScb->state == SCB_ACTIVE) {
+ if ((rc =
+ megaIssueCmd (megaCfg, pScb->mboxData, pScb, 1)) == -1)
+ return rc;
+ }
+ }
+ return 0;
}
/* Add command to the list of completed requests */
-static void
-mega_cmd_done (mega_host_config * megaCfg, mega_scb * pScb,
- int status)
+
+static void mega_cmd_done (mega_host_config * megaCfg, mega_scb * pScb, int status)
{
- int islogical;
- Scsi_Cmnd *SCpnt;
- mega_passthru *pthru;
- mega_mailbox *mbox;
-
- if (pScb == NULL) {
- TRACE(("NULL pScb in mega_cmd_done!"));
- printk("NULL pScb in mega_cmd_done!");
- }
-
- SCpnt = pScb->SCpnt;
- pthru = &pScb->pthru;
- mbox = (mega_mailbox *) &pScb->mboxData;
-
- if (SCpnt == NULL) {
- TRACE(("NULL SCpnt in mega_cmd_done!"));
- TRACE(("pScb->idx = ",pScb->idx));
- TRACE(("pScb->state = ",pScb->state));
- TRACE(("pScb->state = ",pScb->state));
- printk("megaraid:Problem...!\n");
- while(1);
- }
-
- islogical = (SCpnt->channel == megaCfg->host->max_channel);
-
- if (SCpnt->cmnd[0] == INQUIRY &&
- ((((u_char *) SCpnt->request_buffer)[0] & 0x1F) == TYPE_DISK) &&
- !islogical) {
- status = 0xF0;
- }
-
-/* clear result; otherwise, success returns corrupt value */
- SCpnt->result = 0;
-
-if ((SCpnt->cmnd[0] & 0x80) ) {/* i.e. ioctl cmd such as 0x80, 0x81 of megamgr*/
- switch (status) {
- case 0xF0:
- case 0xF4:
- SCpnt->result=(DID_BAD_TARGET<<16)|status;
- break;
- default:
- SCpnt->result|=status;
- }/*end of switch*/
-}
-else{
- /* Convert MegaRAID status to Linux error code */
- switch (status) {
- case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD*/
- SCpnt->result |= (DID_OK << 16);
- break;
- case 0x02: /* ERROR_ABORTED, i.e. SCSI_STATUS_CHECK_CONDITION */
- /*set sense_buffer and result fields*/
- if( mbox->cmd==MEGA_MBOXCMD_PASSTHRU ){
- memcpy( SCpnt->sense_buffer , pthru->reqsensearea, 14);
- SCpnt->result = (DRIVER_SENSE<<24)|(DID_ERROR << 16)|status;
- }
- else{
- SCpnt->sense_buffer[0]=0x70;
- SCpnt->sense_buffer[2]=ABORTED_COMMAND;
- SCpnt->result |= (CHECK_CONDITION << 1);
- }
- break;
- case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e. SCSI_STATUS_BUSY */
- SCpnt->result |= (DID_BUS_BUSY << 16)|status;
- break;
- default:
- SCpnt->result |= (DID_BAD_TARGET << 16)|status;
- break;
- }
- }
- if ( SCpnt->cmnd[0]!=IOCTL_CMD_NEW )
- /* not IOCTL_CMD_NEW SCB, freeSCB()*/
- /* For IOCTL_CMD_NEW SCB, delay freeSCB() in megaraid_queue()
- * after copy data back to user space*/
- mega_freeSCB(megaCfg, pScb);
-
- /* Add Scsi_Command to end of completed queue */
- if( megaCfg->qCompletedH == NULL ) {
- megaCfg->qCompletedH = megaCfg->qCompletedT = SCpnt;
- }
- else {
- megaCfg->qCompletedT->host_scribble = (unsigned char *) SCpnt;
- megaCfg->qCompletedT = SCpnt;
- }
- megaCfg->qCompletedT->host_scribble = (unsigned char *) NULL;
- megaCfg->qCcnt++;
+ int islogical;
+ Scsi_Cmnd *SCpnt;
+ mega_passthru *pthru;
+ mega_mailbox *mbox;
+
+ if (pScb == NULL) {
+ TRACE (("NULL pScb in mega_cmd_done!"));
+ printk(KERN_CRIT "NULL pScb in mega_cmd_done!");
+ }
+
+ SCpnt = pScb->SCpnt;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ pthru = pScb->pthru;
+#else
+ pthru = &pScb->pthru;
+#endif
+
+ mbox = (mega_mailbox *) & pScb->mboxData;
+
+ if (SCpnt == NULL) {
+ TRACE (("NULL SCpnt in mega_cmd_done!"));
+ TRACE (("pScb->idx = ", pScb->idx));
+ TRACE (("pScb->state = ", pScb->state));
+ TRACE (("pScb->state = ", pScb->state));
+ panic(KERN_ERR "megaraid:Problem...!\n");
+ }
+
+ islogical = (SCpnt->channel == megaCfg->host->max_channel);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ /* Special Case to handle PassThrough->XferAddrress > 4GB */
+ switch (SCpnt->cmnd[0]) {
+ case INQUIRY:
+ case READ_CAPACITY:
+ memcpy (SCpnt->request_buffer,
+ pScb->bounce_buffer, SCpnt->request_bufflen);
+ break;
+ }
+#endif
+
+ mega_freeSCB (megaCfg, pScb);
+
+ if (SCpnt->cmnd[0] == INQUIRY && ((((u_char *) SCpnt->request_buffer)[0] & 0x1F) == TYPE_DISK) && !islogical) {
+ status = 0xF0;
+ }
+
+ /* clear result; otherwise, success returns corrupt value */
+ SCpnt->result = 0;
+
+ if ((SCpnt->cmnd[0] & M_RD_IOCTL_CMD)) { /* i.e. ioctl cmd such as M_RD_IOCTL_CMD, M_RD_IOCTL_CMD_NEW of megamgr */
+ switch (status) {
+ case 2:
+ case 0xF0:
+ case 0xF4:
+ SCpnt->result = (DID_BAD_TARGET << 16) | status;
+ break;
+ default:
+ SCpnt->result |= status;
+ } /*end of switch */
+ } else {
+ /* Convert MegaRAID status to Linux error code */
+ switch (status) {
+ case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */
+ SCpnt->result |= (DID_OK << 16);
+ break;
+
+ case 0x02: /* ERROR_ABORTED, i.e. SCSI_STATUS_CHECK_CONDITION */
+
+ /*set sense_buffer and result fields */
+ if (mbox->cmd == MEGA_MBOXCMD_PASSTHRU) {
+ memcpy (SCpnt->sense_buffer, pthru->reqsensearea, 14);
+ SCpnt->result = (DRIVER_SENSE << 24) | (DID_ERROR << 16) | status;
+ } else {
+ SCpnt->sense_buffer[0] = 0x70;
+ SCpnt->sense_buffer[2] = ABORTED_COMMAND;
+ SCpnt->result |= (CHECK_CONDITION << 1);
+ }
+ break;
+
+ case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e. SCSI_STATUS_BUSY */
+ SCpnt->result |= (DID_BUS_BUSY << 16) | status;
+ break;
+
+ default:
+ SCpnt->result |= (DID_BAD_TARGET << 16) | status;
+ break;
+ }
+ }
+
+ /* Add Scsi_Command to end of completed queue */
+ if (megaCfg->qCompletedH == NULL) {
+ megaCfg->qCompletedH = megaCfg->qCompletedT = SCpnt;
+ } else {
+ megaCfg->qCompletedT->host_scribble = (unsigned char *) SCpnt;
+ megaCfg->qCompletedT = SCpnt;
+ }
+
+ megaCfg->qCompletedT->host_scribble = (unsigned char *) NULL;
+ megaCfg->qCcnt++;
}
/*-------------------------------------------------------------------
* If NULL is returned, the scsi_done function MUST have been called
*
*-------------------------------------------------------------------*/
-static mega_scb * mega_build_cmd (mega_host_config * megaCfg,
- Scsi_Cmnd * SCpnt)
-{
- mega_scb *pScb;
- mega_mailbox *mbox;
- mega_passthru *pthru;
- long seg;
- char islogical;
- char lun = SCpnt->lun;
-
- if ((SCpnt->cmnd[0] == 0x80) || (SCpnt->cmnd[0] == IOCTL_CMD_NEW) ) /* ioctl */
- return mega_ioctl (megaCfg, SCpnt);
-
- islogical = (SCpnt->channel == megaCfg->host->max_channel);
-
- if (!islogical && lun != 0) {
- SCpnt->result = (DID_BAD_TARGET << 16);
- callDone (SCpnt);
- return NULL;
- }
-
- if (!islogical && SCpnt->target == skip_id) {
- SCpnt->result = (DID_BAD_TARGET << 16);
- callDone (SCpnt);
- return NULL;
- }
-
- if ( islogical ) {
- lun = (SCpnt->target * 8) + lun;
- if ( lun > FC_MAX_LOGICAL_DRIVES ){
- SCpnt->result = (DID_BAD_TARGET << 16);
- callDone (SCpnt);
- return NULL;
- }
- }
- /*-----------------------------------------------------
- *
- * Logical drive commands
- *
- *-----------------------------------------------------*/
- if (islogical) {
- switch (SCpnt->cmnd[0]) {
- case TEST_UNIT_READY:
- memset (SCpnt->request_buffer, 0, SCpnt->request_bufflen);
- SCpnt->result = (DID_OK << 16);
- callDone (SCpnt);
- return NULL;
-
- case MODE_SENSE:
- memset (SCpnt->request_buffer, 0, SCpnt->cmnd[4]);
- SCpnt->result = (DID_OK << 16);
- callDone (SCpnt);
- return NULL;
-
- case READ_CAPACITY:
- case INQUIRY:
- /* Allocate a SCB and initialize passthru */
- if ((pScb = mega_allocateSCB (megaCfg, SCpnt)) == NULL) {
- SCpnt->result = (DID_ERROR << 16);
- callDone (SCpnt);
- return NULL;
- }
- pthru = &pScb->pthru;
- mbox = (mega_mailbox *) & pScb->mboxData;
-
- memset (mbox, 0, sizeof (pScb->mboxData));
- memset (pthru, 0, sizeof (mega_passthru));
- pthru->timeout = 0;
- pthru->ars = 1;
- pthru->reqsenselen = 14;
- pthru->islogical = 1;
- pthru->logdrv = lun;
- pthru->cdblen = SCpnt->cmd_len;
- pthru->dataxferaddr = virt_to_bus (SCpnt->request_buffer);
- pthru->dataxferlen = SCpnt->request_bufflen;
- memcpy (pthru->cdb, SCpnt->cmnd, SCpnt->cmd_len);
-
- /* Initialize mailbox area */
- mbox->cmd = MEGA_MBOXCMD_PASSTHRU;
- mbox->xferaddr = virt_to_bus (pthru);
-
- return pScb;
-
- case READ_6:
- case WRITE_6:
- case READ_10:
- case WRITE_10:
- /* Allocate a SCB and initialize mailbox */
- if ((pScb = mega_allocateSCB (megaCfg, SCpnt)) == NULL) {
- SCpnt->result = (DID_ERROR << 16);
- callDone (SCpnt);
- return NULL;
- }
- mbox = (mega_mailbox *) & pScb->mboxData;
-
- memset (mbox, 0, sizeof (pScb->mboxData));
- mbox->logdrv = lun;
- mbox->cmd = (*SCpnt->cmnd == READ_6 || *SCpnt->cmnd == READ_10) ?
- MEGA_MBOXCMD_LREAD : MEGA_MBOXCMD_LWRITE;
-
- /* 6-byte */
- if (*SCpnt->cmnd == READ_6 || *SCpnt->cmnd == WRITE_6) {
- mbox->numsectors =
- (u32) SCpnt->cmnd[4];
- mbox->lba =
- ((u32) SCpnt->cmnd[1] << 16) |
- ((u32) SCpnt->cmnd[2] << 8) |
- (u32) SCpnt->cmnd[3];
- mbox->lba &= 0x1FFFFF;
- }
-
- /* 10-byte */
- if (*SCpnt->cmnd == READ_10 || *SCpnt->cmnd == WRITE_10) {
- mbox->numsectors =
- (u32) SCpnt->cmnd[8] |
- ((u32) SCpnt->cmnd[7] << 8);
- mbox->lba =
- ((u32) SCpnt->cmnd[2] << 24) |
- ((u32) SCpnt->cmnd[3] << 16) |
- ((u32) SCpnt->cmnd[4] << 8) |
- (u32) SCpnt->cmnd[5];
- }
-
- /* Calculate Scatter-Gather info */
- mbox->numsgelements = mega_build_sglist (megaCfg, pScb,
- (u32 *) & mbox->xferaddr,
- (u32 *) & seg);
-
- return pScb;
-
- default:
- SCpnt->result = (DID_BAD_TARGET << 16);
- callDone (SCpnt);
- return NULL;
- }
- }
- /*-----------------------------------------------------
- *
- * Passthru drive commands
- *
- *-----------------------------------------------------*/
- else {
- /* Allocate a SCB and initialize passthru */
- if ((pScb = mega_allocateSCB (megaCfg, SCpnt)) == NULL) {
- SCpnt->result = (DID_ERROR << 16);
- callDone (SCpnt);
- return NULL;
- }
- pthru = &pScb->pthru;
- mbox = (mega_mailbox *) pScb->mboxData;
-
- memset (mbox, 0, sizeof (pScb->mboxData));
- memset (pthru, 0, sizeof (mega_passthru));
- pthru->timeout = 2; /*set adapter timeout value to 10 min. for tape drive*/
- /* 0=6sec/1=60sec/2=10min/3=3hrs */
- pthru->ars = 1;
- pthru->reqsenselen = 14;
- pthru->islogical = 0;
- pthru->channel = (megaCfg->flag & BOARD_40LD) ? 0 : SCpnt->channel;
- pthru->target = (megaCfg->flag & BOARD_40LD) ? /*BOARD_40LD*/
- (SCpnt->channel<<4)|SCpnt->target : SCpnt->target;
- pthru->cdblen = SCpnt->cmd_len;
- memcpy (pthru->cdb, SCpnt->cmnd, SCpnt->cmd_len);
-
- pthru->numsgelements = mega_build_sglist (megaCfg, pScb,
- (u32 *) & pthru->dataxferaddr,
- (u32 *) & pthru->dataxferlen);
-
- /* Initialize mailbox */
- mbox->cmd = MEGA_MBOXCMD_PASSTHRU;
- mbox->xferaddr = virt_to_bus (pthru);
-
- return pScb;
- }
- return NULL;
-}
-/*--------------------------------------------------------------------
- * build RAID commands for controller, passed down through ioctl()
- *--------------------------------------------------------------------*/
-static mega_scb * mega_ioctl (mega_host_config * megaCfg, Scsi_Cmnd * SCpnt)
+static mega_scb *mega_build_cmd (mega_host_config * megaCfg, Scsi_Cmnd * SCpnt)
{
- mega_scb *pScb;
- mega_ioctl_mbox *mbox;
- mega_mailbox *mailbox;
- mega_passthru *pthru;
- u8 *mboxdata;
- long seg;
- unsigned char *data = (unsigned char *)SCpnt->request_buffer;
- int i;
-
- if ((pScb = mega_allocateSCB (megaCfg, SCpnt)) == NULL) {
- SCpnt->result = (DID_ERROR << 16);
- callDone (SCpnt);
- return NULL;
- }
-
- mboxdata = (u8 *) & pScb->mboxData;
- mbox = (mega_ioctl_mbox *) & pScb->mboxData;
- mailbox = (mega_mailbox *) & pScb->mboxData;
- memset (mailbox, 0, sizeof (pScb->mboxData));
-
- if (data[0] == 0x03) { /* passthrough command */
- unsigned char cdblen = data[2];
- pthru = &pScb->pthru;
- memset (pthru, 0, sizeof (mega_passthru));
- pthru->islogical = (data[cdblen+3] & 0x80) ? 1:0;
- pthru->timeout = data[cdblen+3] & 0x07;
- pthru->reqsenselen = 14;
- pthru->ars = (data[cdblen+3] & 0x08) ? 1:0;
- pthru->logdrv = data[cdblen+4];
- pthru->channel = data[cdblen+5];
- pthru->target = data[cdblen+6];
- pthru->cdblen = cdblen;
- memcpy (pthru->cdb, &data[3], cdblen);
-
- mailbox->cmd = MEGA_MBOXCMD_PASSTHRU;
- mailbox->xferaddr = virt_to_bus (pthru);
-
- pthru->numsgelements = mega_build_sglist (megaCfg, pScb,
- (u32 *) & pthru->dataxferaddr,
- (u32 *) & pthru->dataxferlen);
-
- for (i=0;i<(SCpnt->request_bufflen-cdblen-7);i++) {
- data[i] = data[i+cdblen+7];
- }
-
- return pScb;
- }
- /* else normal (nonpassthru) command */
-
-#if LINUX_VERSION_CODE > 0x020024
-/*
- * usage of the function copy from user is used in case of data more than
- * 4KB. This is used only with adapters which supports more than 8 logical
- * drives. This feature is disabled on kernels earlier or same as 2.0.36
- * as the uaccess.h file is not available with those kernels.
- */
+ mega_scb *pScb;
+ mega_mailbox *mbox;
+ mega_passthru *pthru;
+ long seg;
+ char islogical;
+ char lun = SCpnt->lun;
+
+ if ((SCpnt->cmnd[0] == MEGADEVIOC))
+ return megadev_doioctl (megaCfg, SCpnt);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+ if ((SCpnt->cmnd[0] == M_RD_IOCTL_CMD)
+ || (SCpnt->cmnd[0] == M_RD_IOCTL_CMD_NEW))
+ return mega_ioctl (megaCfg, SCpnt); /* Handle IOCTL command */
+#endif
- if (SCpnt->cmnd[0] == IOCTL_CMD_NEW) {
- /* use external data area for large xfers */
- /* If cmnd[0] is set to IOCTL_CMD_NEW then *
- * cmnd[4..7] = external user buffer *
- * cmnd[8..11] = length of buffer *
- * */
- char *kern_area;
- char *user_area = *((char **)&SCpnt->cmnd[4]);
- u32 xfer_size = *((u32 *)&SCpnt->cmnd[8]);
- if (verify_area(VERIFY_READ, user_area, xfer_size)) {
- printk("megaraid: Got bad user address.\n");
- SCpnt->result = (DID_ERROR << 16);
- callDone (SCpnt);
- return NULL;
- }
- kern_area = kmalloc(xfer_size, GFP_ATOMIC | GFP_DMA);
- if (kern_area == NULL) {
- printk("megaraid: Couldn't allocate kernel mem.\n");
- SCpnt->result = (DID_ERROR << 16);
- callDone (SCpnt);
- return NULL;
- }
- copy_from_user(kern_area,user_area,xfer_size);
- pScb->kern_area = kern_area;
- }
-#endif
-
- mbox->cmd = data[0];
- mbox->channel = data[1];
- mbox->param = data[2];
- mbox->pad[0] = data[3];
- mbox->logdrv = data[4];
-
- if(SCpnt->cmnd[0] == IOCTL_CMD_NEW) {
- if(data[0]==DCMD_FC_CMD){ /*i.e. 0xA1, then override some mbox data */
- *(mboxdata+0) = data[0]; /*mailbox byte 0: DCMD_FC_CMD*/
- *(mboxdata+2) = data[2]; /*sub command*/
- *(mboxdata+3) = 0; /*number of elements in SG list*/
- mbox->xferaddr /*i.e. mboxdata byte 0x8 to 0xb*/
- = virt_to_bus(pScb->kern_area);
- }
- else{
- mbox->xferaddr = virt_to_bus(pScb->kern_area);
- mbox->numsgelements = 0;
- }
- }
- else {
-
- mbox->numsgelements = mega_build_sglist (megaCfg, pScb,
- (u32 *) & mbox->xferaddr,
- (u32 *) & seg);
-
- for (i=0;i<(SCpnt->request_bufflen-6);i++) {
- data[i] = data[i+6];
- }
- }
-
- return (pScb);
-}
+ islogical = (SCpnt->channel == megaCfg->host->max_channel);
-#if DEBUG
-static void showMbox(mega_scb *pScb)
-{
- mega_mailbox *mbox;
+ if (!islogical && lun != 0) {
+ SCpnt->result = (DID_BAD_TARGET << 16);
+ callDone (SCpnt);
+ return NULL;
+ }
- if (pScb == NULL) return;
+ if (!islogical && SCpnt->target == skip_id) {
+ SCpnt->result = (DID_BAD_TARGET << 16);
+ callDone (SCpnt);
+ return NULL;
+ }
- mbox = (mega_mailbox *)pScb->mboxData;
- printk("%u cmd:%x id:%x #scts:%x lba:%x addr:%x logdrv:%x #sg:%x\n",
- pScb->SCpnt->pid,
- mbox->cmd, mbox->cmdid, mbox->numsectors,
- mbox->lba, mbox->xferaddr, mbox->logdrv,
- mbox->numsgelements);
-}
+ if (islogical) {
+ lun = (SCpnt->target * 8) + lun;
+ if (lun > FC_MAX_LOGICAL_DRIVES) {
+ SCpnt->result = (DID_BAD_TARGET << 16);
+ callDone (SCpnt);
+ return NULL;
+ }
+ }
+ /*-----------------------------------------------------
+ *
+ * Logical drive commands
+ *
+ *-----------------------------------------------------*/
+ if (islogical) {
+ switch (SCpnt->cmnd[0]) {
+ case TEST_UNIT_READY:
+ memset (SCpnt->request_buffer, 0, SCpnt->request_bufflen);
+ SCpnt->result = (DID_OK << 16);
+ callDone (SCpnt);
+ return NULL;
+
+ case MODE_SENSE:
+ memset (SCpnt->request_buffer, 0, SCpnt->cmnd[4]);
+ SCpnt->result = (DID_OK << 16);
+ callDone (SCpnt);
+ return NULL;
+
+ case READ_CAPACITY:
+ case INQUIRY:
+ /* Allocate a SCB and initialize passthru */
+ if ((pScb = mega_allocateSCB (megaCfg, SCpnt)) == NULL) {
+ SCpnt->result = (DID_ERROR << 16);
+ callDone (SCpnt);
+ return NULL;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ pthru = pScb->pthru;
+#else
+ pthru = &pScb->pthru;
#endif
-#if DEBUG
-static unsigned int cum_time = 0;
-static unsigned int cum_time_cnt = 0;
+ mbox = (mega_mailbox *) & pScb->mboxData;
+ memset (mbox, 0, sizeof (pScb->mboxData));
+ memset (pthru, 0, sizeof (mega_passthru));
+ pthru->timeout = 0;
+ pthru->ars = 1;
+ pthru->reqsenselen = 14;
+ pthru->islogical = 1;
+ pthru->logdrv = lun;
+ pthru->cdblen = SCpnt->cmd_len;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ /*Not sure about the direction */
+ pScb->dma_direction = PCI_DMA_BIDIRECTIONAL;
+ pScb->dma_type = M_RD_PTHRU_WITH_BULK_DATA;
+
+#if 0
+/* Normal Code w/o the need for bounce buffer */
+ pScb->dma_h_bulkdata
+ = pci_map_single (megaCfg->dev,
+ SCpnt->request_buffer,
+ SCpnt->request_bufflen,
+ pScb->dma_direction);
+
+ pthru->dataxferaddr = pScb->dma_h_bulkdata;
+#else
+/* Special Code to use bounce buffer for READ_CAPA/INQ */
+ pthru->dataxferaddr = pScb->dma_bounce_buffer;
+ pScb->dma_type = M_RD_DMA_TYPE_NONE;
#endif
-/*--------------------------------------------------------------------
- * Interrupt service routine
- *--------------------------------------------------------------------*/
-static void megaraid_isr (int irq, void *devp, struct pt_regs *regs)
-{
-#if LINUX_VERSION_CODE >= 0x20100
- IO_LOCK_T
-#endif
- mega_host_config *megaCfg;
- u_char byte, idx, sIdx, tmpBox[MAILBOX_SIZE];
- u32 dword=0;
- mega_mailbox *mbox;
- mega_scb *pScb;
- u_char qCnt, qStatus;
- u_char completed[MAX_FIRMWARE_STATUS];
- Scsi_Cmnd *SCpnt;
-
- megaCfg = (mega_host_config *) devp;
- mbox = (mega_mailbox *)tmpBox;
-
- if (megaCfg->host->irq == irq) {
- if (megaCfg->flag & IN_ISR) {
- printk(KERN_ERR "ISR called reentrantly!!\n");
- }
-
- megaCfg->flag |= IN_ISR;
-
- if (mega_busyWaitMbox(megaCfg)) {
- printk(KERN_WARNING "Error: mailbox busy in isr!\n");
- }
-
- /* Check if a valid interrupt is pending */
- if (megaCfg->flag & BOARD_QUARTZ) {
- dword = RDOUTDOOR (megaCfg);
- if (dword != 0x10001234) {
- /* Spurious interrupt */
- megaCfg->flag &= ~IN_ISR;
- return;
- }
- }
- else {
- byte = READ_PORT (megaCfg->host->io_port, INTR_PORT);
- if ((byte & VALID_INTR_BYTE) == 0) {
- /* Spurious interrupt */
- megaCfg->flag &= ~IN_ISR;
- return;
- }
- WRITE_PORT (megaCfg->host->io_port, INTR_PORT, byte);
- }
-
- for(idx=0;idx<MAX_FIRMWARE_STATUS;idx++ ) completed[idx] = 0;
-
- IO_LOCK;
-
- qCnt = 0xff;
- while ((qCnt = megaCfg->mbox->numstatus) == 0xFF)
- ;
-
- qStatus = 0xff;
- while ((qStatus = megaCfg->mbox->status) == 0xFF)
- ;
-
- /* Get list of completed requests */
- for (idx = 0; idx<qCnt; idx++) {
- while ((sIdx = megaCfg->mbox->completed[idx]) == 0xFF) {
- printk("p");
- }
- completed[idx] = sIdx;
- sIdx = 0xFF;
- }
-
- if (megaCfg->flag & BOARD_QUARTZ) {
- WROUTDOOR (megaCfg, dword);
- /* Acknowledge interrupt */
- WRINDOOR (megaCfg, virt_to_bus (megaCfg->mbox) | 0x2);
- while (RDINDOOR (megaCfg) & 0x02);
- }
- else {
- CLEAR_INTR (megaCfg->host->io_port);
- }
-
-#if DEBUG
- if(qCnt >= MAX_FIRMWARE_STATUS) {
- printk("megaraid_isr: cmplt=%d ", qCnt);
- }
+#else
+ pthru->dataxferaddr =
+ virt_to_bus (SCpnt->request_buffer);
#endif
- for (idx = 0; idx < qCnt; idx++) {
- sIdx = completed[idx];
- if ((sIdx > 0) && (sIdx <= MAX_COMMANDS)) {
- pScb = &megaCfg->scbList[sIdx - 1];
+ pthru->dataxferlen = SCpnt->request_bufflen;
+ memcpy (pthru->cdb, SCpnt->cmnd, SCpnt->cmd_len);
- /* ASSERT(pScb->state == SCB_ISSUED); */
+ /* Initialize mailbox area */
+ mbox->cmd = MEGA_MBOXCMD_PASSTHRU;
-#if DEBUG
- if (((jiffies) - pScb->isrcount) > maxCmdTime) {
- maxCmdTime = (jiffies) - pScb->isrcount;
- printk("megaraid_isr : cmd time = %u\n", maxCmdTime);
- }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ mbox->xferaddr = pScb->dma_passthruhandle64;
+ TRACE1 (("M_RD_PTHRU_WITH_BULK_DATA Enabled \n"));
+#else
+ mbox->xferaddr = virt_to_bus (pthru);
+#endif
+ return pScb;
+
+ case READ_6:
+ case WRITE_6:
+ case READ_10:
+ case WRITE_10:
+ /* Allocate a SCB and initialize mailbox */
+ if ((pScb = mega_allocateSCB (megaCfg, SCpnt)) == NULL) {
+ SCpnt->result = (DID_ERROR << 16);
+ callDone (SCpnt);
+ return NULL;
+ }
+ mbox = (mega_mailbox *) & pScb->mboxData;
+
+ memset (mbox, 0, sizeof (pScb->mboxData));
+ mbox->logdrv = lun;
+
+ if (megaCfg->flag & BOARD_64BIT) {
+ mbox->cmd = (*SCpnt->cmnd == READ_6
+ || *SCpnt->cmnd ==
+ READ_10) ? MEGA_MBOXCMD_LREAD64 :
+ MEGA_MBOXCMD_LWRITE64;
+ } else {
+ mbox->cmd = (*SCpnt->cmnd == READ_6
+ || *SCpnt->cmnd ==
+ READ_10) ? MEGA_MBOXCMD_LREAD :
+ MEGA_MBOXCMD_LWRITE;
+ }
+
+ /* 6-byte */
+ if (*SCpnt->cmnd == READ_6 || *SCpnt->cmnd == WRITE_6) {
+ mbox->numsectors = (u32) SCpnt->cmnd[4];
+ mbox->lba =
+ ((u32) SCpnt->cmnd[1] << 16) |
+ ((u32) SCpnt->cmnd[2] << 8) |
+ (u32) SCpnt->cmnd[3];
+ mbox->lba &= 0x1FFFFF;
+
+ if (*SCpnt->cmnd == READ_6) {
+ megaCfg->nReads[(int) lun]++;
+ megaCfg->nReadBlocks[(int) lun] +=
+ mbox->numsectors;
+ } else {
+ megaCfg->nWrites[(int) lun]++;
+ megaCfg->nWriteBlocks[(int) lun] +=
+ mbox->numsectors;
+ }
+ }
+
+ /* 10-byte */
+ if (*SCpnt->cmnd == READ_10 || *SCpnt->cmnd == WRITE_10) {
+ mbox->numsectors =
+ (u32) SCpnt->cmnd[8] |
+ ((u32) SCpnt->cmnd[7] << 8);
+ mbox->lba =
+ ((u32) SCpnt->cmnd[2] << 24) |
+ ((u32) SCpnt->cmnd[3] << 16) |
+ ((u32) SCpnt->cmnd[4] << 8) |
+ (u32) SCpnt->cmnd[5];
+
+ if (*SCpnt->cmnd == READ_10) {
+ megaCfg->nReads[(int) lun]++;
+ megaCfg->nReadBlocks[(int) lun] +=
+ mbox->numsectors;
+ } else {
+ megaCfg->nWrites[(int) lun]++;
+ megaCfg->nWriteBlocks[(int) lun] +=
+ mbox->numsectors;
+ }
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ if (*SCpnt->cmnd == READ_6 || *SCpnt->cmnd == READ_10) {
+ pScb->dma_direction = PCI_DMA_FROMDEVICE;
+ } else { /*WRITE_6 or WRITE_10 */
+ pScb->dma_direction = PCI_DMA_TODEVICE;
+ }
#endif
-/*
- * Assuming that the scsi command, for which an abort request was received
- * earlier has completed.
- */
- if (pScb->state == SCB_ABORTED) {
- SCpnt = pScb->SCpnt;
- }
- if (pScb->state == SCB_RESET) {
- SCpnt = pScb->SCpnt;
- mega_freeSCB (megaCfg, pScb);
- SCpnt->result = (DID_RESET << 16) ;
- if( megaCfg->qCompletedH == NULL ) {
- megaCfg->qCompletedH = megaCfg->qCompletedT = SCpnt;
- }
- else {
- megaCfg->qCompletedT->host_scribble = (unsigned char *) SCpnt;
- megaCfg->qCompletedT = SCpnt;
- }
- megaCfg->qCompletedT->host_scribble = (unsigned char *) NULL;
- megaCfg->qCcnt++;
- continue;
- }
-
- if (*(pScb->SCpnt->cmnd)==IOCTL_CMD_NEW)
- { /* external user buffer */
- up(&pScb->sem);
- }
- /* Mark command as completed */
- mega_cmd_done(megaCfg, pScb, qStatus);
-
- }
- else {
- printk(KERN_ERR "megaraid: wrong cmd id completed from firmware:id=%x\n",sIdx);
- }
- }
-
- mega_rundoneq(megaCfg);
-
- megaCfg->flag &= ~IN_ISR;
-
- /* Loop through any pending requests */
- mega_runpendq(megaCfg);
-#if LINUX_VERSION_CODE >= 0x20100
- IO_UNLOCK;
-#endif
-
- }
-}
+
+ /* Calculate Scatter-Gather info */
+ mbox->numsgelements = mega_build_sglist (megaCfg, pScb,
+ (u32 *) &
+ mbox->xferaddr,
+ (u32 *) & seg);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ pScb->iDataSize = seg;
+
+ if (mbox->numsgelements) {
+ pScb->dma_type = M_RD_SGLIST_ONLY;
+ TRACE1 (("M_RD_SGLIST_ONLY Enabled \n"));
+ } else {
+ pScb->dma_type = M_RD_BULK_DATA_ONLY;
+ TRACE1 (("M_RD_BULK_DATA_ONLY Enabled \n"));
+ }
+#endif
+
+ return pScb;
+ default:
+ SCpnt->result = (DID_BAD_TARGET << 16);
+ callDone (SCpnt);
+ return NULL;
+ }
+ }
+ /*-----------------------------------------------------
+ *
+ * Passthru drive commands
+ *
+ *-----------------------------------------------------*/
+ else {
+ /* Allocate a SCB and initialize passthru */
+ if ((pScb = mega_allocateSCB (megaCfg, SCpnt)) == NULL) {
+ SCpnt->result = (DID_ERROR << 16);
+ callDone (SCpnt);
+ return NULL;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ pthru = pScb->pthru;
+#else
+ pthru = &pScb->pthru;
+#endif
+ mbox = (mega_mailbox *) pScb->mboxData;
+
+ memset (mbox, 0, sizeof (pScb->mboxData));
+ memset (pthru, 0, sizeof (mega_passthru));
+
+ /* set adapter timeout value to 10 min. for tape drive */
+ /* 0=6sec/1=60sec/2=10min/3=3hrs */
+ pthru->timeout = 2;
+ pthru->ars = 1;
+ pthru->reqsenselen = 14;
+ pthru->islogical = 0;
+ pthru->channel =
+ (megaCfg->flag & BOARD_40LD) ? 0 : SCpnt->channel;
+ pthru->target = (megaCfg->flag & BOARD_40LD) ? /*BOARD_40LD */
+ (SCpnt->channel << 4) | SCpnt->target : SCpnt->target;
+ pthru->cdblen = SCpnt->cmd_len;
+
+ memcpy (pthru->cdb, SCpnt->cmnd, SCpnt->cmd_len);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ /* Not sure about the direction */
+ pScb->dma_direction = PCI_DMA_BIDIRECTIONAL;
+
+ /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
+ switch (SCpnt->cmnd[0]) {
+ case INQUIRY:
+ case READ_CAPACITY:
+ pthru->numsgelements = 0;
+ pthru->dataxferaddr = pScb->dma_bounce_buffer;
+ pthru->dataxferlen = SCpnt->request_bufflen;
+ break;
+ default:
+ pthru->numsgelements = mega_build_sglist (megaCfg, pScb,
+ (u32 *) &
+ pthru->
+ dataxferaddr,
+ (u32 *) &
+ pthru->
+ dataxferlen);
+ break;
+ }
+#else
+ pthru->numsgelements = mega_build_sglist (megaCfg, pScb,
+ (u32 *) & pthru->
+ dataxferaddr,
+ (u32 *) & pthru->
+ dataxferlen);
+#endif
+
+ /* Initialize mailbox */
+ mbox->cmd = MEGA_MBOXCMD_PASSTHRU;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ mbox->xferaddr = pScb->dma_passthruhandle64;
+
+ if (pthru->numsgelements) {
+ pScb->dma_type = M_RD_PTHRU_WITH_SGLIST;
+ TRACE1 (("M_RD_PTHRU_WITH_SGLIST Enabled \n"));
+ } else {
+ pScb->dma_type = M_RD_PTHRU_WITH_BULK_DATA
+ TRACE1 (("M_RD_PTHRU_WITH_BULK_DATA Enabled \n"));
+ }
+#else
+ mbox->xferaddr = virt_to_bus (pthru);
+#endif
+
+ return pScb;
+ }
+ return NULL;
+}
+
+/* Handle Driver Level IOCTLs
+ * Return value of 0 indicates this function could not handle , so continue
+ * processing
+*/
+
+static int mega_driver_ioctl (mega_host_config * megaCfg, Scsi_Cmnd * SCpnt)
+{
+ unsigned char *data = (unsigned char *) SCpnt->request_buffer;
+ mega_driver_info driver_info;
+
+ /* If this is not our command dont do anything */
+ if (SCpnt->cmnd[0] != M_RD_DRIVER_IOCTL_INTERFACE)
+ return 0;
+
+ switch (SCpnt->cmnd[1]) {
+ case GET_DRIVER_INFO:
+ if (SCpnt->request_bufflen < sizeof (driver_info)) {
+ SCpnt->result = DID_BAD_TARGET << 16;
+ callDone (SCpnt);
+ return 1;
+ }
+
+ driver_info.size = sizeof (driver_info) - sizeof (int);
+ driver_info.version = MEGARAID_IOCTL_VERSION;
+ memcpy (data, &driver_info, sizeof (driver_info));
+ break;
+ default:
+ SCpnt->result = DID_BAD_TARGET << 16;
+ }
+
+ callDone (SCpnt);
+ return 1;
+}
+
+static void inline set_mbox_xfer_addr (mega_host_config * megaCfg, mega_scb * pScb,
+ mega_ioctl_mbox * mbox, u32 direction)
+{
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ switch (direction) {
+ case TO_DEVICE:
+ pScb->dma_direction = PCI_DMA_TODEVICE;
+ break;
+ case FROM_DEVICE:
+ pScb->dma_direction = PCI_DMA_FROMDEVICE;
+ break;
+ case FROMTO_DEVICE:
+ pScb->dma_direction = PCI_DMA_BIDIRECTIONAL;
+ break;
+ }
+
+ pScb->dma_h_bulkdata
+ = pci_map_single (megaCfg->dev,
+ pScb->buff_ptr,
+ pScb->iDataSize, pScb->dma_direction);
+ mbox->xferaddr = pScb->dma_h_bulkdata;
+ pScb->dma_type = M_RD_BULK_DATA_ONLY;
+ TRACE1 (("M_RD_BULK_DATA_ONLY Enabled \n"));
+#else
+ mbox->xferaddr = virt_to_bus (pScb->buff_ptr);
+#endif
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+
+/*--------------------------------------------------------------------
+ * build RAID commands for controller, passed down through ioctl()
+ *--------------------------------------------------------------------*/
+static mega_scb *mega_ioctl (mega_host_config * megaCfg, Scsi_Cmnd * SCpnt)
+{
+ mega_scb *pScb;
+ mega_ioctl_mbox *mbox;
+ mega_mailbox *mailbox;
+ mega_passthru *pthru;
+ u8 *mboxdata;
+ long seg, i = 0;
+ unsigned char *data = (unsigned char *) SCpnt->request_buffer;
+
+ if ((pScb = mega_allocateSCB (megaCfg, SCpnt)) == NULL) {
+ SCpnt->result = (DID_ERROR << 16);
+ callDone (SCpnt);
+ return NULL;
+ }
+ pthru = &pScb->pthru;
+
+ mboxdata = (u8 *) & pScb->mboxData;
+ mbox = (mega_ioctl_mbox *) & pScb->mboxData;
+ mailbox = (mega_mailbox *) & pScb->mboxData;
+ memset (mailbox, 0, sizeof (pScb->mboxData));
+
+ if (data[0] == 0x03) { /* passthrough command */
+ unsigned char cdblen = data[2];
+ memset (pthru, 0, sizeof (mega_passthru));
+ pthru->islogical = (data[cdblen + 3] & 0x80) ? 1 : 0;
+ pthru->timeout = data[cdblen + 3] & 0x07;
+ pthru->reqsenselen = 14;
+ pthru->ars = (data[cdblen + 3] & 0x08) ? 1 : 0;
+ pthru->logdrv = data[cdblen + 4];
+ pthru->channel = data[cdblen + 5];
+ pthru->target = data[cdblen + 6];
+ pthru->cdblen = cdblen;
+ memcpy (pthru->cdb, &data[3], cdblen);
+
+ mailbox->cmd = MEGA_MBOXCMD_PASSTHRU;
+
+
+ pthru->numsgelements = mega_build_sglist (megaCfg, pScb,
+ (u32 *) & pthru->
+ dataxferaddr,
+ (u32 *) & pthru->
+ dataxferlen);
+
+ mailbox->xferaddr = virt_to_bus (pthru);
+
+ for (i = 0; i < (SCpnt->request_bufflen - cdblen - 7); i++) {
+ data[i] = data[i + cdblen + 7];
+ }
+ return pScb;
+ }
+ /* else normal (nonpassthru) command */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,0,24) /*0x020024 */
+ /*
+ *usage of the function copy from user is used in case of data more than
+ *4KB.This is used only with adapters which supports more than 8 logical
+ * drives.This feature is disabled on kernels earlier or same as 2.0.36
+ * as the uaccess.h file is not available with those kernels.
+ */
+
+ if (SCpnt->cmnd[0] == M_RD_IOCTL_CMD_NEW) {
+ /* use external data area for large xfers */
+ /* If cmnd[0] is set to M_RD_IOCTL_CMD_NEW then *
+ * cmnd[4..7] = external user buffer *
+ * cmnd[8..11] = length of buffer *
+ * */
+ char *user_area = *((char **) &SCpnt->cmnd[4]);
+ u32 xfer_size = *((u32 *) & SCpnt->cmnd[8]);
+ switch (data[0]) {
+ case FW_FIRE_WRITE:
+ case FW_FIRE_FLASH:
+ if ((ulong) user_area & (PAGE_SIZE - 1)) {
+ printk
+ ("megaraid:user address not aligned on 4K boundary.Error.\n");
+ SCpnt->result = (DID_ERROR << 16);
+ callDone (SCpnt);
+ return NULL;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (!(pScb->buff_ptr = kmalloc (xfer_size, GFP_KERNEL))) {
+ printk
+ ("megaraid: Insufficient mem for M_RD_IOCTL_CMD_NEW.\n");
+ SCpnt->result = (DID_ERROR << 16);
+ callDone (SCpnt);
+ return NULL;
+ }
+
+ copy_from_user (pScb->buff_ptr, user_area, xfer_size);
+ pScb->iDataSize = xfer_size;
+
+ switch (data[0]) {
+ case DCMD_FC_CMD:
+ switch (data[1]) {
+ case DCMD_FC_READ_NVRAM_CONFIG:
+ case DCMD_GET_DISK_CONFIG:
+ {
+ if ((ulong) pScb->
+ buff_ptr & (PAGE_SIZE - 1)) {
+ printk
+ ("megaraid:user address not sufficient Error.\n");
+ SCpnt->result =
+ (DID_ERROR << 16);
+ callDone (SCpnt);
+ return NULL;
+ }
+
+ /*building SG list */
+ mega_build_kernel_sg (pScb->buff_ptr,
+ xfer_size,
+ pScb, mbox);
+ break;
+ }
+ default:
+ break;
+ } /*switch (data[1]) */
+ break;
+ }
+
+ }
+#endif
+
+ mbox->cmd = data[0];
+ mbox->channel = data[1];
+ mbox->param = data[2];
+ mbox->pad[0] = data[3];
+ mbox->logdrv = data[4];
+
+ if (SCpnt->cmnd[0] == M_RD_IOCTL_CMD_NEW) {
+ switch (data[0]) {
+ case FW_FIRE_WRITE:
+ mbox->cmd = FW_FIRE_WRITE;
+ mbox->channel = data[1]; /* Current Block Number */
+ set_mbox_xfer_addr (megaCfg, pScb, mbox, TO_DEVICE);
+ mbox->numsgelements = 0;
+ break;
+ case FW_FIRE_FLASH:
+ mbox->cmd = FW_FIRE_FLASH;
+ mbox->channel = data[1] | 0x80; /* Origin */
+ set_mbox_xfer_addr (megaCfg, pScb, mbox, TO_DEVICE);
+ mbox->numsgelements = 0;
+ break;
+ case DCMD_FC_CMD:
+ *(mboxdata + 0) = data[0]; /*mailbox byte 0: DCMD_FC_CMD */
+ *(mboxdata + 2) = data[1]; /*sub command */
+ switch (data[1]) {
+ case DCMD_FC_READ_NVRAM_CONFIG:
+ case DCMD_FC_READ_NVRAM_CONFIG_64:
+ /* number of elements in SG list */
+ *(mboxdata + 3) = mbox->numsgelements;
+ if (megaCfg->flag & BOARD_64BIT)
+ *(mboxdata + 2) =
+ DCMD_FC_READ_NVRAM_CONFIG_64;
+ break;
+ case DCMD_WRITE_CONFIG:
+ case DCMD_WRITE_CONFIG_64:
+ if (megaCfg->flag & BOARD_64BIT)
+ *(mboxdata + 2) = DCMD_WRITE_CONFIG_64;
+ set_mbox_xfer_addr (megaCfg, pScb, mbox,
+ TO_DEVICE);
+ mbox->numsgelements = 0;
+ break;
+ case DCMD_GET_DISK_CONFIG:
+ case DCMD_GET_DISK_CONFIG_64:
+ if (megaCfg->flag & BOARD_64BIT)
+ *(mboxdata + 2) =
+ DCMD_GET_DISK_CONFIG_64;
+ *(mboxdata + 3) = data[2]; /*number of elements in SG list */
+ /*nr of elements in SG list */
+ *(mboxdata + 4) = mbox->numsgelements;
+ break;
+ case DCMD_DELETE_LOGDRV:
+ case DCMD_DELETE_DRIVEGROUP:
+ case NC_SUBOP_ENQUIRY3:
+ *(mboxdata + 3) = data[2];
+ set_mbox_xfer_addr (megaCfg, pScb, mbox,
+ FROMTO_DEVICE);
+ mbox->numsgelements = 0;
+ break;
+ case DCMD_CHANGE_LDNO:
+ case DCMD_CHANGE_LOOPID:
+ *(mboxdata + 3) = data[2];
+ *(mboxdata + 4) = data[3];
+ set_mbox_xfer_addr (megaCfg, pScb, mbox,
+ TO_DEVICE);
+ mbox->numsgelements = 0;
+ break;
+ default:
+ set_mbox_xfer_addr (megaCfg, pScb, mbox,
+ FROMTO_DEVICE);
+ mbox->numsgelements = 0;
+ break;
+ } /*switch */
+ break;
+ default:
+ set_mbox_xfer_addr (megaCfg, pScb, mbox, FROMTO_DEVICE);
+ mbox->numsgelements = 0;
+ break;
+ }
+ } else {
+
+ mbox->numsgelements = mega_build_sglist (megaCfg, pScb,
+ (u32 *) & mbox->
+ xferaddr,
+ (u32 *) & seg);
+
+ /* Handling some of the fw special commands */
+ switch (data[0]) {
+ case 6: /* START_DEV */
+ mbox->xferaddr = *((u32 *) & data[i + 6]);
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < (SCpnt->request_bufflen - 6); i++) {
+ data[i] = data[i + 6];
+ }
+ }
+
+ return (pScb);
+}
+
+
+static void mega_build_kernel_sg (char *barea, ulong xfersize, mega_scb * pScb, mega_ioctl_mbox * mbox)
+{
+ ulong i, buffer_area, len, end, end_page, x, idx = 0;
+
+ buffer_area = (ulong) barea;
+ i = buffer_area;
+ end = buffer_area + xfersize;
+ end_page = (end) & ~(PAGE_SIZE - 1);
+
+ do {
+ len = PAGE_SIZE - (i % PAGE_SIZE);
+ x = pScb->sgList[idx].address =
+ virt_to_bus ((volatile void *) i);
+ pScb->sgList[idx].length = len;
+ i += len;
+ idx++;
+ } while (i < end_page);
+
+ if ((end - i) < 0) {
+ printk ("megaraid:Error in user address\n");
+ }
+
+ if (end - i) {
+ pScb->sgList[idx].address = virt_to_bus ((volatile void *) i);
+ pScb->sgList[idx].length = end - i;
+ idx++;
+ }
+ mbox->xferaddr = virt_to_bus (pScb->sgList);
+ mbox->numsgelements = idx;
+}
+
+#endif /* KERNEL_VERSION(2,3,0) */
+
+#if DEBUG
+static unsigned int cum_time = 0;
+static unsigned int cum_time_cnt = 0;
+
+static void showMbox (mega_scb * pScb)
+{
+ mega_mailbox *mbox;
+
+ if (pScb == NULL)
+ return;
+
+ mbox = (mega_mailbox *) pScb->mboxData;
+ printk ("%u cmd:%x id:%x #scts:%x lba:%x addr:%x logdrv:%x #sg:%x\n",
+ pScb->SCpnt->pid,
+ mbox->cmd, mbox->cmdid, mbox->numsectors,
+ mbox->lba, mbox->xferaddr, mbox->logdrv, mbox->numsgelements);
+}
+
+#endif
+
+/*--------------------------------------------------------------------
+ * Interrupt service routine
+ *--------------------------------------------------------------------*/
+static void megaraid_isr (int irq, void *devp, struct pt_regs *regs)
+{
+ IO_LOCK_T
+ mega_host_config * megaCfg;
+ u_char byte, idx, sIdx, tmpBox[MAILBOX_SIZE];
+ u32 dword = 0;
+ mega_mailbox *mbox;
+ mega_scb *pScb;
+ u_char qCnt, qStatus;
+ u_char completed[MAX_FIRMWARE_STATUS];
+ Scsi_Cmnd *SCpnt;
+
+ megaCfg = (mega_host_config *) devp;
+ mbox = (mega_mailbox *) tmpBox;
+
+ if (megaCfg->host->irq == irq) {
+ if (megaCfg->flag & IN_ISR) {
+ TRACE (("ISR called reentrantly!!\n"));
+ printk ("ISR called reentrantly!!\n");
+ }
+ megaCfg->flag |= IN_ISR;
+
+ if (mega_busyWaitMbox (megaCfg)) {
+ printk (KERN_WARNING "Error: mailbox busy in isr!\n");
+ }
+
+ /* Check if a valid interrupt is pending */
+ if (megaCfg->flag & BOARD_QUARTZ) {
+ dword = RDOUTDOOR (megaCfg);
+ if (dword != 0x10001234) {
+ /* Spurious interrupt */
+ megaCfg->flag &= ~IN_ISR;
+ return;
+ }
+ } else {
+ byte = READ_PORT (megaCfg->host->io_port, INTR_PORT);
+ if ((byte & VALID_INTR_BYTE) == 0) {
+ /* Spurious interrupt */
+ megaCfg->flag &= ~IN_ISR;
+ return;
+ }
+ WRITE_PORT (megaCfg->host->io_port, INTR_PORT, byte);
+ }
+
+ for (idx = 0; idx < MAX_FIRMWARE_STATUS; idx++)
+ completed[idx] = 0;
+
+ IO_LOCK;
+
+ megaCfg->nInterrupts++;
+ qCnt = 0xff;
+ while ((qCnt = megaCfg->mbox->numstatus) == 0xFF) ;
+
+ qStatus = 0xff;
+ while ((qStatus = megaCfg->mbox->status) == 0xFF) ;
+
+ /* Get list of completed requests */
+ for (idx = 0; idx < qCnt; idx++) {
+ while ((sIdx = megaCfg->mbox->completed[idx]) == 0xFF) {
+ printk ("p");
+ }
+ completed[idx] = sIdx;
+ sIdx = 0xFF;
+ }
+
+ if (megaCfg->flag & BOARD_QUARTZ) {
+ WROUTDOOR (megaCfg, dword);
+ /* Acknowledge interrupt */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ /* In this case mbox contains physical address */
+#if 0
+ WRINDOOR (megaCfg, megaCfg->adjdmahandle64 | 0x2);
+#else
+ WRINDOOR (megaCfg, 0x2);
+#endif
+
+#else
+
+#if 0
+ WRINDOOR (megaCfg, virt_to_bus (megaCfg->mbox) | 0x2);
+#else
+ WRINDOOR (megaCfg, 0x2);
+#endif
+
+#endif
+
+#if 0
+ while (RDINDOOR (megaCfg) & 0x02) ;
+#endif
+ } else {
+ CLEAR_INTR (megaCfg->host->io_port);
+ }
+
+#if DEBUG
+ if (qCnt >= MAX_FIRMWARE_STATUS) {
+ printk ("megaraid_isr: cmplt=%d ", qCnt);
+ }
+#endif
+
+ for (idx = 0; idx < qCnt; idx++) {
+ sIdx = completed[idx];
+ if ((sIdx > 0) && (sIdx <= MAX_COMMANDS)) {
+ pScb = &megaCfg->scbList[sIdx - 1];
+
+ /* ASSERT(pScb->state == SCB_ISSUED); */
+
+#if DEBUG
+ if (((jiffies) - pScb->isrcount) > maxCmdTime) {
+ maxCmdTime = (jiffies) - pScb->isrcount;
+ printk
+ ("megaraid_isr : cmd time = %u\n",
+ maxCmdTime);
+ }
+#endif
+ /*
+ * Assuming that the scsi command, for which
+ * an abort request was received earlier, has
+ * completed.
+ */
+ if (pScb->state == SCB_ABORTED) {
+ SCpnt = pScb->SCpnt;
+ }
+ if (pScb->state == SCB_RESET) {
+ SCpnt = pScb->SCpnt;
+ mega_freeSCB (megaCfg, pScb);
+ SCpnt->result = (DID_RESET << 16);
+ if (megaCfg->qCompletedH == NULL) {
+ megaCfg->qCompletedH =
+ megaCfg->qCompletedT =
+ SCpnt;
+ } else {
+ megaCfg->qCompletedT->
+ host_scribble =
+ (unsigned char *) SCpnt;
+ megaCfg->qCompletedT = SCpnt;
+ }
+ megaCfg->qCompletedT->host_scribble =
+ (unsigned char *) NULL;
+ megaCfg->qCcnt++;
+ continue;
+ }
+
+ /* We don't want the ISR routine to touch M_RD_IOCTL_CMD_NEW commands, so
+ * don't mark them as complete, instead we pop their semaphore so
+ * that the queue routine can finish them off
+ */
+ if (pScb->SCpnt->cmnd[0] == M_RD_IOCTL_CMD_NEW) {
+ /* save the status byte for the queue routine to use */
+ pScb->SCpnt->result = qStatus;
+ up (&pScb->ioctl_sem);
+ } else {
+ /* Mark command as completed */
+ mega_cmd_done (megaCfg, pScb, qStatus);
+ }
+ } else {
+ printk
+ ("megaraid: wrong cmd id completed from firmware:id=%x\n",
+ sIdx);
+ }
+ }
+
+ mega_rundoneq (megaCfg);
+
+ megaCfg->flag &= ~IN_ISR;
+ /* Loop through any pending requests */
+ mega_runpendq (megaCfg);
+ IO_UNLOCK;
+
+ }
+
+}
/*==================================================*/
/* Wait until the controller's mailbox is available */
/*==================================================*/
+
static int mega_busyWaitMbox (mega_host_config * megaCfg)
{
- mega_mailbox *mbox = (mega_mailbox *) megaCfg->mbox;
- long counter;
-
- for (counter = 0; counter < 10000; counter++) {
- if (!mbox->busy) {
- return 0;
- }
- udelay (100);
- barrier();
- }
- return -1; /* give up after 1 second */
+ mega_mailbox *mbox = (mega_mailbox *) megaCfg->mbox;
+ long counter;
+
+ for (counter = 0; counter < 10000; counter++) {
+ if (!mbox->busy) {
+ return 0;
+ }
+ udelay (100);
+ barrier ();
+ }
+ return -1; /* give up after 1 second */
}
/*=====================================================
* -1: the command was not actually issued out
* othercases:
* intr==0, return ScsiStatus, i.e. mbox->status
- * intr==1, return 0
+ * intr==1, return 0
*=====================================================
*/
-static int megaIssueCmd (mega_host_config * megaCfg,
- u_char * mboxData,
- mega_scb * pScb,
- int intr)
+static int megaIssueCmd (mega_host_config * megaCfg, u_char * mboxData,
+ mega_scb * pScb, int intr)
{
- mega_mailbox *mbox = (mega_mailbox *) megaCfg->mbox;
- u_char byte;
- u32 cmdDone;
- u32 phys_mbox;
- u8 retval=-1;
+ volatile mega_mailbox *mbox = (mega_mailbox *) megaCfg->mbox;
- mboxData[0x1] = (pScb ? pScb->idx + 1: 0x0); /* Set cmdid */
- mboxData[0xF] = 1; /* Set busy */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ volatile mega_mailbox64 *mbox64 = (mega_mailbox64 *) megaCfg->mbox64;
+#endif
- phys_mbox = virt_to_bus (megaCfg->mbox);
+ u_char byte;
+
+#ifdef __LP64__
+ u64 phys_mbox;
+#else
+ u32 phys_mbox;
+#endif
+ u8 retval = -1;
+
+ mboxData[0x1] = (pScb ? pScb->idx + 1 : 0xFE); /* Set cmdid */
+ mboxData[0xF] = 1; /* Set busy */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ /* In this case mbox contains physical address */
+ phys_mbox = megaCfg->adjdmahandle64;
+#else
+ phys_mbox = virt_to_bus (megaCfg->mbox);
+#endif
#if DEBUG
- showMbox(pScb);
+ ShowMbox (pScb);
#endif
- /* Wait until mailbox is free */
- if (mega_busyWaitMbox (megaCfg)) {
- printk("Blocked mailbox......!!\n");
- udelay(1000);
+ /* Wait until mailbox is free */
+ if (mega_busyWaitMbox (megaCfg)) {
+ printk ("Blocked mailbox......!!\n");
+ udelay (1000);
#if DEBUG
- showMbox(pLastScb);
-#endif
-
- /* Abort command */
- if (pScb == NULL) {
- TRACE(("NULL pScb in megaIssue\n"));
- printk("NULL pScb in megaIssue\n");
- }
- mega_cmd_done (megaCfg, pScb, 0x08);
- return -1;
- }
-
- pLastScb = pScb;
-
- /* Copy mailbox data into host structure */
- megaCfg->mbox64->xferSegment = 0;
- memcpy (mbox, mboxData, 16);
-
- /* Kick IO */
- if (intr) {
-
- /* Issue interrupt (non-blocking) command */
- if (megaCfg->flag & BOARD_QUARTZ) {
- mbox->mraid_poll = 0;
- mbox->mraid_ack = 0;
- WRINDOOR (megaCfg, phys_mbox | 0x1);
- }
- else {
- ENABLE_INTR (megaCfg->host->io_port);
- ISSUE_COMMAND (megaCfg->host->io_port);
- }
- pScb->state = SCB_ISSUED;
-
- retval=0;
- }
- else { /* Issue non-ISR (blocking) command */
- disable_irq(megaCfg->host->irq);
- if (megaCfg->flag & BOARD_QUARTZ) {
- mbox->mraid_poll = 0;
- mbox->mraid_ack = 0;
- WRINDOOR (megaCfg, phys_mbox | 0x1);
-
- while ((cmdDone = RDOUTDOOR (megaCfg)) != 0x10001234);
- WROUTDOOR (megaCfg, cmdDone);
-
- if (pScb) {
- mega_cmd_done (megaCfg, pScb, mbox->status);
- }
-
- WRINDOOR (megaCfg, phys_mbox | 0x2);
- while (RDINDOOR (megaCfg) & 0x2);
-
- }
- else {
- DISABLE_INTR (megaCfg->host->io_port);
- ISSUE_COMMAND (megaCfg->host->io_port);
-
- while (!((byte = READ_PORT (megaCfg->host->io_port, INTR_PORT)) & INTR_VALID));
- WRITE_PORT (megaCfg->host->io_port, INTR_PORT, byte);
-
- ENABLE_INTR (megaCfg->host->io_port);
- CLEAR_INTR (megaCfg->host->io_port);
-
- if (pScb) {
- mega_cmd_done (megaCfg, pScb, mbox->status);
- }
- else {
- TRACE (("Error: NULL pScb!\n"));
- }
- }
- enable_irq(megaCfg->host->irq);
- retval=mbox->status;
- }
+ showMbox (pLastScb);
+#endif
+
+ /* Abort command */
+ if (pScb == NULL) {
+ TRACE (("NULL pScb in megaIssue\n"));
+ printk ("NULL pScb in megaIssue\n");
+ }
+ mega_cmd_done (megaCfg, pScb, 0x08);
+ return -1;
+ }
+
+ pLastScb = pScb;
+
+ /* Copy mailbox data into host structure */
+ megaCfg->mbox64->xferSegment_lo = 0;
+ megaCfg->mbox64->xferSegment_hi = 0;
+
+ memcpy ((char *) mbox, mboxData, 16);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ switch (mboxData[0]) {
+ case MEGA_MBOXCMD_LREAD64:
+ case MEGA_MBOXCMD_LWRITE64:
+ mbox64->xferSegment_lo = mbox->xferaddr;
+ mbox64->xferSegment_hi = 0;
+ mbox->xferaddr = 0xFFFFFFFF;
+ break;
+ }
+#endif
+
+ /* Kick IO */
+ if (intr) {
+ /* Issue interrupt (non-blocking) command */
+ if (megaCfg->flag & BOARD_QUARTZ) {
+ mbox->mraid_poll = 0;
+ mbox->mraid_ack = 0;
+
+ WRINDOOR (megaCfg, phys_mbox | 0x1);
+ } else {
+ ENABLE_INTR (megaCfg->host->io_port);
+ ISSUE_COMMAND (megaCfg->host->io_port);
+ }
+ pScb->state = SCB_ISSUED;
+
+ retval = 0;
+ } else { /* Issue non-ISR (blocking) command */
+ disable_irq (megaCfg->host->irq);
+ if (megaCfg->flag & BOARD_QUARTZ) {
+ mbox->mraid_poll = 0;
+ mbox->mraid_ack = 0;
+ mbox->numstatus = 0xFF;
+ mbox->status = 0xFF;
+ WRINDOOR (megaCfg, phys_mbox | 0x1);
+
+ while (mbox->numstatus == 0xFF) ;
+ while (mbox->status == 0xFF) ;
+ while (mbox->mraid_poll != 0x77) ;
+ mbox->mraid_poll = 0;
+ mbox->mraid_ack = 0x77;
+
+ /* while ((cmdDone = RDOUTDOOR (megaCfg)) != 0x10001234);
+ WROUTDOOR (megaCfg, cmdDone); */
+
+ if (pScb) {
+ mega_cmd_done (megaCfg, pScb, mbox->status);
+ }
+
+ WRINDOOR (megaCfg, phys_mbox | 0x2);
+ while (RDINDOOR (megaCfg) & 0x2) ;
+
+ } else {
+ DISABLE_INTR (megaCfg->host->io_port);
+ ISSUE_COMMAND (megaCfg->host->io_port);
+
+ while (!
+ ((byte =
+ READ_PORT (megaCfg->host->io_port,
+ INTR_PORT)) & INTR_VALID)) ;
+ WRITE_PORT (megaCfg->host->io_port, INTR_PORT, byte);
+
+ ENABLE_INTR (megaCfg->host->io_port);
+ CLEAR_INTR (megaCfg->host->io_port);
+
+ if (pScb) {
+ mega_cmd_done (megaCfg, pScb, mbox->status);
+ } else {
+ TRACE (("Error: NULL pScb!\n"));
+ }
+ }
+ enable_irq (megaCfg->host->irq);
+ retval = mbox->status;
+ }
#if DEBUG
- while (mega_busyWaitMbox (megaCfg)) {
- printk("Blocked mailbox on exit......!\n");
- udelay(1000);
- }
+ while (mega_busyWaitMbox (megaCfg)) {
+ printk(KERN_ERR "Blocked mailbox on exit......!\n");
+ udelay (1000);
+ }
#endif
- return retval;
+ return retval;
}
/*-------------------------------------------------------------------
* Copies data to SGLIST
*-------------------------------------------------------------------*/
-static int mega_build_sglist (mega_host_config * megaCfg, mega_scb * scb,
- u32 * buffer, u32 * length)
+/* Note:
+ For 64 bit cards, we need a minimum of one SG element for read/write
+*/
+
+static int
+mega_build_sglist (mega_host_config * megaCfg, mega_scb * scb,
+ u32 * buffer, u32 * length)
{
- struct scatterlist *sgList;
- int idx;
-
- /* Scatter-gather not used */
- if (scb->SCpnt->use_sg == 0) {
- *buffer = virt_to_bus (scb->SCpnt->request_buffer);
- *length = (u32) scb->SCpnt->request_bufflen;
- return 0;
- }
-
- sgList = (struct scatterlist *) scb->SCpnt->request_buffer;
- if (scb->SCpnt->use_sg == 1) {
- *buffer = virt_to_bus (sgList[0].address);
- *length = (u32) sgList[0].length;
- return 0;
- }
-
- /* Copy Scatter-Gather list info into controller structure */
- for (idx = 0; idx < scb->SCpnt->use_sg; idx++) {
- scb->sgList[idx].address = virt_to_bus (sgList[idx].address);
- scb->sgList[idx].length = (u32) sgList[idx].length;
- }
-
- /* Reset pointer and length fields */
- *buffer = virt_to_bus (scb->sgList);
- *length = 0;
-
- /* Return count of SG requests */
- return scb->SCpnt->use_sg;
+ struct scatterlist *sgList;
+ int idx;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ int sgcnt;
+#endif
+
+ mega_mailbox *mbox = NULL;
+
+ mbox = (mega_mailbox *) scb->mboxData;
+ /* Scatter-gather not used */
+ if (scb->SCpnt->use_sg == 0) {
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ scb->dma_h_bulkdata = pci_map_single (megaCfg->dev,
+ scb->SCpnt->request_buffer,
+ scb->SCpnt->request_bufflen,
+ scb->dma_direction);
+ /* We need to handle special commands like READ64, WRITE64
+ as they need a minimum of 1 SG irrespective of actaully SG
+ */
+ if ((megaCfg->flag & BOARD_64BIT) &&
+ ((mbox->cmd == MEGA_MBOXCMD_LREAD64) ||
+ (mbox->cmd == MEGA_MBOXCMD_LWRITE64))) {
+ scb->sg64List[0].address = scb->dma_h_bulkdata;
+ scb->sg64List[0].length = scb->SCpnt->request_bufflen;
+ *buffer = scb->dma_sghandle64;
+ *length = 0;
+ scb->sglist_count = 1;
+ return 1;
+ } else {
+ *buffer = scb->dma_h_bulkdata;
+ *length = (u32) scb->SCpnt->request_bufflen;
+ }
+#else
+ *buffer = virt_to_bus (scb->SCpnt->request_buffer);
+ *length = (u32) scb->SCpnt->request_bufflen;
+#endif
+ return 0;
+ }
+
+ sgList = (struct scatterlist *) scb->SCpnt->request_buffer;
+
+ if (scb->SCpnt->use_sg == 1) {
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ scb->dma_h_bulkdata = pci_map_single (megaCfg->dev,
+ sgList[0].address,
+ sgList[0].length, scb->dma_direction);
+
+ if ((megaCfg->flag & BOARD_64BIT) &&
+ ((mbox->cmd == MEGA_MBOXCMD_LREAD64) ||
+ (mbox->cmd == MEGA_MBOXCMD_LWRITE64))) {
+ scb->sg64List[0].address = scb->dma_h_bulkdata;
+ scb->sg64List[0].length = scb->SCpnt->request_bufflen;
+ *buffer = scb->dma_sghandle64;
+ *length = 0;
+ scb->sglist_count = 1;
+ return 1;
+ } else {
+ *buffer = scb->dma_h_bulkdata;
+ *length = (u32) sgList[0].length;
+ }
+#else
+ *buffer = virt_to_bus (sgList[0].address);
+ *length = (u32) sgList[0].length;
+#endif
+
+ return 0;
+ }
+
+ /* Copy Scatter-Gather list info into controller structure */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ sgcnt = pci_map_sg (megaCfg->dev,
+ sgList, scb->SCpnt->use_sg, scb->dma_direction);
+
+ /* Determine the validity of the new count */
+ if (sgcnt == 0)
+ printk ("pci_map_sg returned zero!!! ");
+
+ for (idx = 0; idx < sgcnt; idx++, sgList++) {
+
+ if ((megaCfg->flag & BOARD_64BIT) &&
+ ((mbox->cmd == MEGA_MBOXCMD_LREAD64) ||
+ (mbox->cmd == MEGA_MBOXCMD_LWRITE64))) {
+ scb->sg64List[idx].address = sg_dma_address (sgList);
+ scb->sg64List[idx].length = sg_dma_len (sgList);
+ } else {
+ scb->sgList[idx].address = sg_dma_address (sgList);
+ scb->sgList[idx].length = sg_dma_len (sgList);
+ }
+
+ }
+
+#else
+ for (idx = 0; idx < scb->SCpnt->use_sg; idx++) {
+ scb->sgList[idx].address = virt_to_bus (sgList[idx].address);
+ scb->sgList[idx].length = (u32) sgList[idx].length;
+ }
+#endif
+
+ /* Reset pointer and length fields */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ *buffer = scb->dma_sghandle64;
+ scb->sglist_count = scb->SCpnt->use_sg;
+#else
+ *buffer = virt_to_bus (scb->sgList);
+#endif
+ *length = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ /* Return count of SG requests */
+ return sgcnt;
+#else
+ /* Return count of SG requests */
+ return scb->SCpnt->use_sg;
+#endif
}
/*--------------------------------------------------------------------
* 10 01 numstatus byte
* 11 01 status byte
*--------------------------------------------------------------------*/
-static int mega_register_mailbox (mega_host_config * megaCfg, u32 paddr)
+static int
+mega_register_mailbox (mega_host_config * megaCfg, u32 paddr)
{
- /* align on 16-byte boundry */
- megaCfg->mbox = &megaCfg->mailbox64.mailbox;
- megaCfg->mbox = (mega_mailbox *) ((((u32) megaCfg->mbox) + 16) & 0xfffffff0);
- megaCfg->mbox64 = (mega_mailbox64 *) (megaCfg->mbox - 4);
- paddr = (paddr + 4 + 16) & 0xfffffff0;
-
- /* Register mailbox area with the firmware */
- if (!(megaCfg->flag & BOARD_QUARTZ)) {
- WRITE_PORT (megaCfg->host->io_port, MBOX_PORT0, paddr & 0xFF);
- WRITE_PORT (megaCfg->host->io_port, MBOX_PORT1, (paddr >> 8) & 0xFF);
- WRITE_PORT (megaCfg->host->io_port, MBOX_PORT2, (paddr >> 16) & 0xFF);
- WRITE_PORT (megaCfg->host->io_port, MBOX_PORT3, (paddr >> 24) & 0xFF);
- WRITE_PORT (megaCfg->host->io_port, ENABLE_MBOX_REGION, ENABLE_MBOX_BYTE);
-
- CLEAR_INTR (megaCfg->host->io_port);
- ENABLE_INTR (megaCfg->host->io_port);
- }
- return 0;
-}
+ /* align on 16-byte boundry */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ megaCfg->mbox = &megaCfg->mailbox64ptr->mailbox;
+#else
+ megaCfg->mbox = &megaCfg->mailbox64.mailbox;
+#endif
+
+#ifdef __LP64__
+ megaCfg->mbox = (mega_mailbox *) ((((u64) megaCfg->mbox) + 16) & ((u64) (-1) ^ 0x0F));
+ megaCfg->adjdmahandle64 = (megaCfg->dma_handle64 + 16) & ((u64) (-1) ^ 0x0F);
+ megaCfg->mbox64 = (mega_mailbox64 *) ((u_char *) megaCfg->mbox - sizeof (u64));
+ paddr = (paddr + 4 + 16) & ((u64) (-1) ^ 0x0F);
+#else
+ megaCfg->mbox
+ = (mega_mailbox *) ((((u32) megaCfg->mbox) + 16) & 0xFFFFFFF0);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ megaCfg->adjdmahandle64 = ((megaCfg->dma_handle64 + 16) & 0xFFFFFFF0);
+#endif
+
+ megaCfg->mbox64 = (mega_mailbox64 *) ((u_char *) megaCfg->mbox - 8);
+ paddr = (paddr + 4 + 16) & 0xFFFFFFF0;
+#endif
+
+ /* Register mailbox area with the firmware */
+ if (!(megaCfg->flag & BOARD_QUARTZ)) {
+ WRITE_PORT (megaCfg->host->io_port, MBOX_PORT0, paddr & 0xFF);
+ WRITE_PORT (megaCfg->host->io_port, MBOX_PORT1,
+ (paddr >> 8) & 0xFF);
+ WRITE_PORT (megaCfg->host->io_port, MBOX_PORT2,
+ (paddr >> 16) & 0xFF);
+ WRITE_PORT (megaCfg->host->io_port, MBOX_PORT3,
+ (paddr >> 24) & 0xFF);
+ WRITE_PORT (megaCfg->host->io_port, ENABLE_MBOX_REGION,
+ ENABLE_MBOX_BYTE);
+
+ CLEAR_INTR (megaCfg->host->io_port);
+ ENABLE_INTR (megaCfg->host->io_port);
+ }
+ return 0;
+}
/*---------------------------------------------------------------------------
* mega_Convert8ldTo40ld() -- takes all info in AdapterInquiry structure and
* puts it into ProductInfo and Enquiry3 structures for later use
*---------------------------------------------------------------------------*/
-static void mega_Convert8ldTo40ld( mega_RAIDINQ *inquiry,
- mega_Enquiry3 *enquiry3,
- megaRaidProductInfo *productInfo )
+static void mega_Convert8ldTo40ld (mega_RAIDINQ * inquiry,
+ mega_Enquiry3 * enquiry3,
+ megaRaidProductInfo * productInfo)
{
- int i;
-
- productInfo->MaxConcCmds = inquiry->AdpInfo.MaxConcCmds;
- enquiry3->rbldRate = inquiry->AdpInfo.RbldRate;
- productInfo->SCSIChanPresent = inquiry->AdpInfo.ChanPresent;
- for (i=0;i<4;i++) {
- productInfo->FwVer[i] = inquiry->AdpInfo.FwVer[i];
- productInfo->BiosVer[i] = inquiry->AdpInfo.BiosVer[i];
- }
- enquiry3->cacheFlushInterval = inquiry->AdpInfo.CacheFlushInterval;
- productInfo->DramSize = inquiry->AdpInfo.DramSize;
-
- enquiry3->numLDrv = inquiry->LogdrvInfo.NumLDrv;
- for (i=0;i<MAX_LOGICAL_DRIVES;i++) {
- enquiry3->lDrvSize[i] = inquiry->LogdrvInfo.LDrvSize[i];
- enquiry3->lDrvProp[i] = inquiry->LogdrvInfo.LDrvProp[i];
- enquiry3->lDrvState[i] = inquiry->LogdrvInfo.LDrvState[i];
- }
-
- for (i=0;i<(MAX_PHYSICAL_DRIVES);i++) {
- enquiry3->pDrvState[i] = inquiry->PhysdrvInfo.PDrvState[i];
- }
-}
+ int i;
+ productInfo->MaxConcCmds = inquiry->AdpInfo.MaxConcCmds;
+ enquiry3->rbldRate = inquiry->AdpInfo.RbldRate;
+ productInfo->SCSIChanPresent = inquiry->AdpInfo.ChanPresent;
+
+ for (i = 0; i < 4; i++) {
+ productInfo->FwVer[i] = inquiry->AdpInfo.FwVer[i];
+ productInfo->BiosVer[i] = inquiry->AdpInfo.BiosVer[i];
+ }
+ enquiry3->cacheFlushInterval = inquiry->AdpInfo.CacheFlushInterval;
+ productInfo->DramSize = inquiry->AdpInfo.DramSize;
+
+ enquiry3->numLDrv = inquiry->LogdrvInfo.NumLDrv;
+
+ for (i = 0; i < MAX_LOGICAL_DRIVES; i++) {
+ enquiry3->lDrvSize[i] = inquiry->LogdrvInfo.LDrvSize[i];
+ enquiry3->lDrvProp[i] = inquiry->LogdrvInfo.LDrvProp[i];
+ enquiry3->lDrvState[i]
+ = inquiry->LogdrvInfo.LDrvState[i];
+ }
+
+ for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++) {
+ enquiry3->pDrvState[i]
+ = inquiry->PhysdrvInfo.PDrvState[i];
+ }
+}
/*-------------------------------------------------------------------
* Issue an adapter info query to the controller
*-------------------------------------------------------------------*/
static int mega_i_query_adapter (mega_host_config * megaCfg)
{
- mega_Enquiry3 *enquiry3Pnt;
- mega_mailbox *mbox;
- u_char mboxData[16];
- u32 paddr;
- u8 retval;
+ mega_Enquiry3 *enquiry3Pnt;
+ mega_mailbox *mbox;
+ u_char mboxData[16];
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ dma_addr_t raid_inq_dma_handle = 0, prod_info_dma_handle = 0, enquiry3_dma_handle = 0;
+#endif
+ u8 retval;
+
+ /* Initialize adapter inquiry mailbox */
- /* Initialize adapter inquiry mailbox*/
- paddr = virt_to_bus (megaCfg->mega_buffer);
- mbox = (mega_mailbox *) mboxData;
+ mbox = (mega_mailbox *) mboxData;
- memset ((void *) megaCfg->mega_buffer, 0, sizeof (megaCfg->mega_buffer));
- memset (mbox, 0, 16);
+ memset ((void *) megaCfg->mega_buffer, 0,
+ sizeof (megaCfg->mega_buffer));
+ memset (mbox, 0, 16);
/*
- * Try to issue Enquiry3 command
- * if not suceeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
+ * Try to issue Enquiry3 command
+ * if not suceeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
* update enquiry3 structure
*/
- mbox->xferaddr = virt_to_bus ( (void*) megaCfg->mega_buffer);
- /* Initialize mailbox databuffer addr */
- enquiry3Pnt = (mega_Enquiry3 *) megaCfg->mega_buffer;
- /* point mega_Enguiry3 to the data buf */
-
- mboxData[0]=FC_NEW_CONFIG ; /* i.e. mbox->cmd=0xA1 */
- mboxData[2]=NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */
- mboxData[3]=ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */
-
- /* Issue a blocking command to the card */
- if ( (retval=megaIssueCmd(megaCfg, mboxData, NULL, 0)) != 0 )
- { /* the adapter does not support 40ld*/
-
- mega_RAIDINQ adapterInquiryData;
- mega_RAIDINQ *adapterInquiryPnt = &adapterInquiryData;
-
- mbox->xferaddr = virt_to_bus ( (void*) adapterInquiryPnt);
-
- mbox->cmd = MEGA_MBOXCMD_ADAPTERINQ; /*issue old 0x05 command to adapter*/
- /* Issue a blocking command to the card */;
- retval=megaIssueCmd (megaCfg, mboxData, NULL, 0);
-
- /*update Enquiry3 and ProductInfo structures with mega_RAIDINQ structure*/
- mega_Convert8ldTo40ld( adapterInquiryPnt,
- enquiry3Pnt,
- (megaRaidProductInfo * ) &megaCfg->productInfo );
-
- }
- else{ /* adapter supports 40ld */
- megaCfg->flag |= BOARD_40LD;
-
- /*get productInfo, which is static information and will be unchanged*/
- mbox->xferaddr = virt_to_bus ( (void*) &megaCfg->productInfo );
-
- mboxData[0]=FC_NEW_CONFIG ; /* i.e. mbox->cmd=0xA1 */
- mboxData[2]=NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
-
- if( (retval=megaIssueCmd(megaCfg, mboxData, NULL, 0)) != 0 )
- printk("ami:Product_info (0x0E) cmd failed with error: %d\n", retval);
-
- }
-
- megaCfg->host->max_channel = megaCfg->productInfo.SCSIChanPresent;
- megaCfg->host->max_id = 16; /* max targets per channel */
- /*(megaCfg->flag & BOARD_40LD)?FC_MAX_TARGETS_PER_CHANNEL:MAX_TARGET+1;*/
- megaCfg->host->max_lun = /* max lun */
- (megaCfg->flag & BOARD_40LD) ? FC_MAX_LOGICAL_DRIVES : MAX_LOGICAL_DRIVES;
- megaCfg->host->cmd_per_lun = MAX_CMD_PER_LUN;
-
- megaCfg->numldrv = enquiry3Pnt->numLDrv;
- megaCfg->max_cmds = megaCfg->productInfo.MaxConcCmds;
- if(megaCfg->max_cmds > MAX_COMMANDS) megaCfg->max_cmds = MAX_COMMANDS - 1;
-
- megaCfg->host->can_queue = megaCfg->max_cmds;
-
- if (megaCfg->host->can_queue >= MAX_COMMANDS) {
- megaCfg->host->can_queue = MAX_COMMANDS-1;
- }
-
-#ifdef HP /* use HP firmware and bios version encoding */
- sprintf (megaCfg->fwVer, "%c%d%d.%d%d",
- megaCfg->productInfo.FwVer[2],
- megaCfg->productInfo.FwVer[1] >> 8,
- megaCfg->productInfo.FwVer[1] & 0x0f,
- megaCfg->productInfo.FwVer[2] >> 8,
- megaCfg->productInfo.FwVer[2] & 0x0f);
- sprintf (megaCfg->biosVer, "%c%d%d.%d%d",
- megaCfg->productInfo.BiosVer[2],
- megaCfg->productInfo.BiosVer[1] >> 8,
- megaCfg->productInfo.BiosVer[1] & 0x0f,
- megaCfg->productInfo.BiosVer[2] >> 8,
- megaCfg->productInfo.BiosVer[2] & 0x0f);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ enquiry3_dma_handle = pci_map_single (megaCfg->dev,
+ (void *) megaCfg->mega_buffer,
+ (2 * 1024L), PCI_DMA_FROMDEVICE);
+
+ mbox->xferaddr = enquiry3_dma_handle;
+#else
+ /*Taken care */
+ mbox->xferaddr = virt_to_bus ((void *) megaCfg->mega_buffer);
+#endif
+
+ /* Initialize mailbox databuffer addr */
+ enquiry3Pnt = (mega_Enquiry3 *) megaCfg->mega_buffer;
+ /* point mega_Enguiry3 to the data buf */
+
+ mboxData[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
+ mboxData[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */
+ mboxData[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */
+
+ /* Issue a blocking command to the card */
+ if ((retval = megaIssueCmd (megaCfg, mboxData, NULL, 0)) != 0) { /* the adapter does not support 40ld */
+ mega_RAIDINQ adapterInquiryData;
+ mega_RAIDINQ *adapterInquiryPnt = &adapterInquiryData;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ raid_inq_dma_handle = pci_map_single (megaCfg->dev,
+ (void *) adapterInquiryPnt,
+ sizeof (mega_RAIDINQ),
+ PCI_DMA_FROMDEVICE);
+ mbox->xferaddr = raid_inq_dma_handle;
+#else
+ /*taken care */
+ mbox->xferaddr = virt_to_bus ((void *) adapterInquiryPnt);
+#endif
+
+ mbox->cmd = MEGA_MBOXCMD_ADAPTERINQ; /*issue old 0x05 command to adapter */
+ /* Issue a blocking command to the card */ ;
+ retval = megaIssueCmd (megaCfg, mboxData, NULL, 0);
+
+ pci_unmap_single (megaCfg->dev,
+ raid_inq_dma_handle,
+ sizeof (mega_RAIDINQ), PCI_DMA_FROMDEVICE);
+
+ /*update Enquiry3 and ProductInfo structures with mega_RAIDINQ structure*/
+ mega_Convert8ldTo40ld (adapterInquiryPnt,
+ enquiry3Pnt,
+ (megaRaidProductInfo *) & megaCfg->
+ productInfo);
+
+ } else { /* adapter supports 40ld */
+ megaCfg->flag |= BOARD_40LD;
+
+ pci_unmap_single (megaCfg->dev,
+ enquiry3_dma_handle,
+ (2 * 1024L), PCI_DMA_FROMDEVICE);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+/*get productInfo, which is static information and will be unchanged*/
+ prod_info_dma_handle
+ = pci_map_single (megaCfg->dev,
+ (void *) &megaCfg->productInfo,
+ sizeof (megaRaidProductInfo),
+ PCI_DMA_FROMDEVICE);
+ mbox->xferaddr = prod_info_dma_handle;
+#else
+ /*taken care */
+ mbox->xferaddr = virt_to_bus ((void *) &megaCfg->productInfo);
+#endif
+
+ mboxData[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
+ mboxData[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
+
+ if ((retval = megaIssueCmd (megaCfg, mboxData, NULL, 0)) != 0)
+ printk ("ami:Product_info cmd failed with error: %d\n",
+ retval);
+
+ pci_unmap_single (megaCfg->dev,
+ prod_info_dma_handle,
+ sizeof (megaRaidProductInfo),
+ PCI_DMA_FROMDEVICE);
+ }
+
+ megaCfg->host->max_channel = megaCfg->productInfo.SCSIChanPresent;
+ megaCfg->host->max_id = 16; /* max targets per channel */
+ /*(megaCfg->flag & BOARD_40LD)?FC_MAX_TARGETS_PER_CHANNEL:MAX_TARGET+1; */
+ megaCfg->host->max_lun = /* max lun */
+ (megaCfg->
+ flag & BOARD_40LD) ? FC_MAX_LOGICAL_DRIVES : MAX_LOGICAL_DRIVES;
+ megaCfg->host->cmd_per_lun = MAX_CMD_PER_LUN;
+
+ megaCfg->numldrv = enquiry3Pnt->numLDrv;
+ megaCfg->max_cmds = megaCfg->productInfo.MaxConcCmds;
+ if (megaCfg->max_cmds > MAX_COMMANDS)
+ megaCfg->max_cmds = MAX_COMMANDS - 1;
+
+ megaCfg->host->can_queue = megaCfg->max_cmds - 1;
+
+#if 0
+ if (megaCfg->host->can_queue >= MAX_COMMANDS) {
+ megaCfg->host->can_queue = MAX_COMMANDS - 16;
+ }
+#endif
+
+#ifdef MEGA_HP_FIX /* use HP firmware and bios version encoding */
+ sprintf (megaCfg->fwVer, "%c%d%d.%d%d",
+ megaCfg->productInfo.FwVer[2],
+ megaCfg->productInfo.FwVer[1] >> 8,
+ megaCfg->productInfo.FwVer[1] & 0x0f,
+ megaCfg->productInfo.FwVer[2] >> 8,
+ megaCfg->productInfo.FwVer[2] & 0x0f);
+ sprintf (megaCfg->biosVer, "%c%d%d.%d%d",
+ megaCfg->productInfo.BiosVer[2],
+ megaCfg->productInfo.BiosVer[1] >> 8,
+ megaCfg->productInfo.BiosVer[1] & 0x0f,
+ megaCfg->productInfo.BiosVer[2] >> 8,
+ megaCfg->productInfo.BiosVer[2] & 0x0f);
#else
- memcpy (megaCfg->fwVer, (void *)megaCfg->productInfo.FwVer, 4);
+ memcpy (megaCfg->fwVer, (char *) megaCfg->productInfo.FwVer, 4);
megaCfg->fwVer[4] = 0;
- memcpy (megaCfg->biosVer, (void *)megaCfg->productInfo.BiosVer, 4);
+ memcpy (megaCfg->biosVer, (char *) megaCfg->productInfo.BiosVer, 4);
megaCfg->biosVer[4] = 0;
#endif
- printk ("megaraid: [%s:%s] detected %d logical drives" CRLFSTR,
- megaCfg->fwVer,
- megaCfg->biosVer,
- megaCfg->numldrv);
+ printk (KERN_INFO "megaraid: [%s:%s] detected %d logical drives" M_RD_CRLFSTR,
+ megaCfg->fwVer, megaCfg->biosVer, megaCfg->numldrv);
+ /*
+ * I hope that I can unmap here, reason DMA transaction is not required any more
+ * after this
+ */
return 0;
}
/*----------------------------------------------------------
* Returns data to be displayed in /proc/scsi/megaraid/X
*----------------------------------------------------------*/
-int megaraid_proc_info (char *buffer, char **start, off_t offset,
+
+static int megaraid_proc_info (char *buffer, char **start, off_t offset,
int length, int host_no, int inout)
{
- *start = buffer;
- return 0;
+ *start = buffer;
+ return 0;
}
-int mega_findCard (Scsi_Host_Template * pHostTmpl,
- u16 pciVendor, u16 pciDev,
- long flag)
+static int mega_findCard (Scsi_Host_Template * pHostTmpl,
+ u16 pciVendor, u16 pciDev, long flag)
{
- mega_host_config *megaCfg;
- struct Scsi_Host *host;
- u_char megaIrq;
- u32 megaBase;
- u16 numFound = 0;
-
- struct pci_dev *pdev = NULL;
-
- while ((pdev = pci_find_device (pciVendor, pciDev, pdev))) {
- if (pci_enable_device(pdev))
- continue;
- if ((flag & BOARD_QUARTZ) && (skip_id == -1)) {
- u16 magic;
- pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
- if ((magic != AMI_SIGNATURE) && (magic != AMI_SIGNATURE_471))
- continue; /* not an AMI board */
- }
- printk (KERN_INFO "megaraid: found 0x%4.04x:0x%4.04x: in %s\n",
- pciVendor,
- pciDev,
- pdev->slot_name);
-
- /* Read the base port and IRQ from PCI */
- megaBase = pci_resource_start (pdev, 0);
- megaIrq = pdev->irq;
-
- if (flag & BOARD_QUARTZ)
- megaBase = (long) ioremap (megaBase, 128);
- else
- megaBase += 0x10;
-
- /* Initialize SCSI Host structure */
- host = scsi_register (pHostTmpl, sizeof (mega_host_config));
- if(host == NULL)
- continue;
- megaCfg = (mega_host_config *) host->hostdata;
- memset (megaCfg, 0, sizeof (mega_host_config));
-
- printk ("scsi%d : Found a MegaRAID controller at 0x%x, IRQ: %d" CRLFSTR,
- host->host_no, (u_int) megaBase, megaIrq);
-
- /* Copy resource info into structure */
- megaCfg->qCompletedH = NULL;
- megaCfg->qCompletedT = NULL;
- megaCfg->qPendingH = NULL;
- megaCfg->qPendingT = NULL;
- megaCfg->qFreeH = NULL;
- megaCfg->qFreeT = NULL;
- megaCfg->qFcnt = 0;
- megaCfg->qPcnt = 0;
- megaCfg->qCcnt = 0;
- megaCfg->flag = flag;
- megaCfg->host = host;
- megaCfg->base = megaBase;
- megaCfg->host->irq = megaIrq;
- megaCfg->host->io_port = megaBase;
- megaCfg->host->n_io_port = 16;
- megaCfg->host->unique_id = (pdev->bus->number << 8) | pdev->devfn;
- megaCtlrs[numCtlrs++] = megaCfg;
- if (flag != BOARD_QUARTZ) {
- /* Request our IO Range */
- if (request_region (megaBase, 16, "megaraid")) {
- printk (KERN_WARNING "megaraid: Couldn't register I/O range!" CRLFSTR);
- scsi_unregister (host);
- continue;
- }
- }
-
- /* Request our IRQ */
- if (request_irq (megaIrq, megaraid_isr, SA_SHIRQ,
- "megaraid", megaCfg)) {
- printk (KERN_WARNING "megaraid: Couldn't register IRQ %d!" CRLFSTR,
- megaIrq);
- scsi_unregister (host);
- continue;
- }
-
- mega_register_mailbox (megaCfg, virt_to_bus ((void *) &megaCfg->mailbox64));
- mega_i_query_adapter (megaCfg);
-
- if (flag == BOARD_QUARTZ) {
- /* Check to see if this is a Dell PERC RAID controller model 466 */
- u16 subsysid, subsysvid;
-#if LINUX_VERSION_CODE < 0x20100
- pcibios_read_config_word (pciBus, pciDevFun,
- PCI_SUBSYSTEM_VENDOR_ID,
- &subsysvid);
- pcibios_read_config_word (pciBus, pciDevFun,
- PCI_SUBSYSTEM_ID,
- &subsysid);
+ mega_host_config *megaCfg = NULL;
+ struct Scsi_Host *host = NULL;
+ u_char pciBus, pciDevFun, megaIrq;
+
+ u16 magic;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ u32 magic64;
+#endif
+
+ int i;
+
+#ifdef __LP64__
+ u64 megaBase;
+#else
+ u32 megaBase;
+#endif
+
+ u16 pciIdx = 0;
+ u16 numFound = 0;
+ u16 subsysid, subsysvid;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0) /* 0x20100 */
+ while (!pcibios_find_device
+ (pciVendor, pciDev, pciIdx, &pciBus, &pciDevFun)) {
+#else
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0) /*0x20300 */
+ struct pci_dev *pdev = NULL;
+#else
+ struct pci_dev *pdev = pci_devices;
+#endif
+
+ while ((pdev = pci_find_device (pciVendor, pciDev, pdev))) {
+ if (pci_enable_device (pdev))
+ continue;
+ pciBus = pdev->bus->number;
+ pciDevFun = pdev->devfn;
+#endif
+ if ((flag & BOARD_QUARTZ) && (skip_id == -1)) {
+ pcibios_read_config_word (pciBus, pciDevFun,
+ PCI_CONF_AMISIG, &magic);
+ if ((magic != AMI_SIGNATURE)
+ && (magic != AMI_SIGNATURE_471)) {
+ pciIdx++;
+ continue; /* not an AMI board */
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ pcibios_read_config_dword (pciBus, pciDevFun,
+ PCI_CONF_AMISIG64, &magic64);
+
+ if (magic64 == AMI_64BIT_SIGNATURE)
+ flag |= BOARD_64BIT;
+#endif
+ }
+
+ /* Hmmm...Should we not make this more modularized so that in future we dont add
+ for each firmware */
+
+ if (flag & BOARD_QUARTZ) {
+ /* Check to see if this is a Dell PERC RAID controller model 466 */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0) /* 0x20100 */
+ pcibios_read_config_word (pciBus, pciDevFun,
+ PCI_SUBSYSTEM_VENDOR_ID,
+ &subsysvid);
+ pcibios_read_config_word (pciBus, pciDevFun,
+ PCI_SUBSYSTEM_ID, &subsysid);
+#else
+ pci_read_config_word (pdev,
+ PCI_SUBSYSTEM_VENDOR_ID,
+ &subsysvid);
+ pci_read_config_word (pdev,
+ PCI_SUBSYSTEM_ID, &subsysid);
+#endif
+ if ((subsysid == 0x1111) && (subsysvid == 0x1111) &&
+ (!strcmp (megaCfg->fwVer, "3.00")
+ || !strcmp (megaCfg->fwVer, "3.01"))) {
+ printk (KERN_WARNING
+ "megaraid: Your card is a Dell PERC 2/SC RAID controller with firmware\n"
+ "megaraid: 3.00 or 3.01. This driver is known to have corruption issues\n"
+ "megaraid: with those firmware versions on this specific card. In order\n"
+ "megaraid: to protect your data, please upgrade your firmware to version\n"
+ "megaraid: 3.10 or later, available from the Dell Technical Support web\n"
+ "megaraid: site at\n"
+ "http://support.dell.com/us/en/filelib/download/index.asp?fileid=2940\n");
+ continue;
+ }
+
+ /* If we dont detect this valid subsystem vendor id's
+ we refuse to load the driver
+ PART of PC200X compliance
+ */
+
+ if ((subsysvid != AMI_SUBSYS_ID)
+ && (subsysvid != DELL_SUBSYS_ID)
+ && (subsysvid != HP_SUBSYS_ID))
+ continue;
+ }
+
+ printk (KERN_INFO
+ "megaraid: found 0x%4.04x:0x%4.04x:idx %d:bus %d:slot %d:func %d\n",
+ pciVendor, pciDev, pciIdx, pciBus, PCI_SLOT (pciDevFun),
+ PCI_FUNC (pciDevFun));
+ /* Read the base port and IRQ from PCI */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0) /* 0x20100 */
+ pcibios_read_config_dword (pciBus, pciDevFun,
+ PCI_BASE_ADDRESS_0,
+ (u_int *) & megaBase);
+ pcibios_read_config_byte (pciBus, pciDevFun,
+ PCI_INTERRUPT_LINE, &megaIrq);
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) /*0x20300 */
+ megaBase = pdev->base_address[0];
+ megaIrq = pdev->irq;
+#else
+
+ megaBase = pci_resource_start (pdev, 0);
+ megaIrq = pdev->irq;
+#endif
+
+ pciIdx++;
+
+ if (flag & BOARD_QUARTZ) {
+ megaBase = (long) ioremap (megaBase, 128);
+ if (!megaBase)
+ continue;
+ } else
+ megaBase += 0x10;
+
+ /* Initialize SCSI Host structure */
+ host = scsi_register (pHostTmpl, sizeof (mega_host_config));
+ if (!host)
+ goto err_unmap;
+
+ megaCfg = (mega_host_config *) host->hostdata;
+ memset (megaCfg, 0, sizeof (mega_host_config));
+
+ printk (KERN_INFO "scsi%d : Found a MegaRAID controller at 0x%x, IRQ: %d"
+ M_RD_CRLFSTR, host->host_no, (u_int) megaBase, megaIrq);
+
+ if (flag & BOARD_64BIT)
+ printk (KERN_INFO "scsi%d : Enabling 64 bit support\n",
+ host->host_no);
+
+ /* Copy resource info into structure */
+ megaCfg->qCompletedH = NULL;
+ megaCfg->qCompletedT = NULL;
+ megaCfg->qPendingH = NULL;
+ megaCfg->qPendingT = NULL;
+ megaCfg->qFreeH = NULL;
+ megaCfg->qFreeT = NULL;
+ megaCfg->qFcnt = 0;
+ megaCfg->qPcnt = 0;
+ megaCfg->qCcnt = 0;
+ megaCfg->lock_free = SPIN_LOCK_UNLOCKED;
+ megaCfg->lock_pend = SPIN_LOCK_UNLOCKED;
+ megaCfg->lock_scsicmd = SPIN_LOCK_UNLOCKED;
+ megaCfg->flag = flag;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ megaCfg->dev = pdev;
+#endif
+ megaCfg->host = host;
+ megaCfg->base = megaBase;
+ megaCfg->host->irq = megaIrq;
+ megaCfg->host->io_port = megaBase;
+ megaCfg->host->n_io_port = 16;
+ megaCfg->host->unique_id = (pciBus << 8) | pciDevFun;
+ megaCtlrs[numCtlrs] = megaCfg;
+
+ if (!(flag & BOARD_QUARTZ)) {
+ /* Request our IO Range */
+ if (!request_region(megaBase, 16, "megaraid")) {
+ printk (KERN_WARNING "megaraid: Couldn't register I/O range!" M_RD_CRLFSTR);
+ goto err_unregister;
+ }
+ }
+
+ /* Request our IRQ */
+ if (request_irq (megaIrq, megaraid_isr, SA_SHIRQ,
+ "megaraid", megaCfg)) {
+ printk (KERN_WARNING
+ "megaraid: Couldn't register IRQ %d!\n",
+ megaIrq);
+ goto err_release;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ /*
+ * unmap while releasing the driver, Is it required to be
+ * PCI_DMA_BIDIRECTIONAL
+ */
+
+ megaCfg->mailbox64ptr
+ = pci_alloc_consistent (megaCfg->dev,
+ sizeof (mega_mailbox64),
+ &(megaCfg->dma_handle64));
+
+ mega_register_mailbox (megaCfg,
+ virt_to_bus ((void *) megaCfg->
+ mailbox64ptr));
#else
- pci_read_config_word (pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsysvid);
- pci_read_config_word (pdev, PCI_SUBSYSTEM_ID, &subsysid);
-#endif
- if ( (subsysid == 0x1111) && (subsysvid == 0x1111) &&
- (!strcmp(megaCfg->fwVer,"3.00") || !strcmp(megaCfg->fwVer,"3.01"))) {
- printk(KERN_WARNING
-"megaraid: Your card is a Dell PERC 2/SC RAID controller with firmware\n"
-"megaraid: 3.00 or 3.01. This driver is known to have corruption issues\n"
-"megaraid: with those firmware versions on this specific card. In order\n"
-"megaraid: to protect your data, please upgrade your firmware to version\n"
-"megaraid: 3.10 or later, available from the Dell Technical Support web\n"
-"megaraid: site at\n"
-"http://support.dell.com/us/en/filelib/download/index.asp?fileid=2940\n");
- megaraid_release (host);
-#ifdef MODULE
- continue;
+ /*Taken care */
+ mega_register_mailbox (megaCfg,
+ virt_to_bus ((void *) &megaCfg->
+ mailbox64));
+#endif
+
+ mega_i_query_adapter (megaCfg);
+
+ if (mega_is_bios_enabled (megaCfg)) {
+ mega_hbas[numCtlrs].is_bios_enabled = 1;
+ }
+ mega_hbas[numCtlrs].hostdata_addr = megaCfg;
+
+ /* Initialize SCBs */
+ if (mega_init_scb (megaCfg)) {
+ pci_free_consistent (megaCfg->dev,
+ sizeof (mega_mailbox64),
+ (void *) megaCfg->mailbox64ptr,
+ megaCfg->dma_handle64);
+ scsi_unregister (host);
+ continue;
+ }
+
+ /*
+ * Fill in the structure which needs to be passed back to the
+ * application when it does an ioctl() for controller related
+ * information.
+ */
+
+ i = numCtlrs;
+ numCtlrs++;
+
+ mcontroller[i].base = megaBase;
+ mcontroller[i].irq = megaIrq;
+ mcontroller[i].numldrv = megaCfg->numldrv;
+ mcontroller[i].pcibus = pciBus;
+ mcontroller[i].pcidev = pciDev;
+ mcontroller[i].pcifun = PCI_FUNC (pciDevFun);
+ mcontroller[i].pciid = pciIdx;
+ mcontroller[i].pcivendor = pciVendor;
+ mcontroller[i].pcislot = PCI_SLOT (pciDevFun);
+ mcontroller[i].uid = (pciBus << 8) | pciDevFun;
+
+ numFound++;
+
+ /* Set the Mode of addressing to 64 bit */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ if ((megaCfg->flag & BOARD_64BIT) && BITS_PER_LONG == 64)
+#ifdef __LP64__
+ pdev->dma_mask = 0xffffffffffffffff;
#else
- while(1) schedule_timeout(1 * HZ);
-#endif
- }
- }
-
- /* Initialize SCBs */
- if (mega_initSCB (megaCfg)) {
- megaraid_release (host);
- continue;
- }
-
- numFound++;
- }
- return numFound;
+ pdev->dma_mask = 0xffffffff;
+#endif
+#endif
+ continue;
+ err_release:
+ if (flag & BOARD_QUARTZ)
+ release_region (megaBase, 16);
+ err_unregister:
+ scsi_unregister (host);
+ err_unmap:
+ if (flag & BOARD_QUARTZ)
+ iounmap ((void *) megaBase);
+ }
+ return numFound;
}
/*---------------------------------------------------------
* Detects if a megaraid controller exists in this system
*---------------------------------------------------------*/
+
int megaraid_detect (Scsi_Host_Template * pHostTmpl)
{
- int count = 0;
+ int ctlridx = 0, count = 0;
-#ifdef MODULE
- if (megaraid)
- megaraid_setup(megaraid);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) /*0x20300 */
+ pHostTmpl->proc_dir = &proc_scsi_megaraid;
+#else
+ pHostTmpl->proc_name = "megaraid";
#endif
- pHostTmpl->proc_name = "megaraid";
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0) /* 0x20100 */
+ if (!pcibios_present ()) {
+ printk (KERN_WARNING "megaraid: PCI bios not present."
+ M_RD_CRLFSTR);
+ return 0;
+ }
+#endif
+ skip_id = -1;
+ if (megaraid && !strncmp (megaraid, "skip", strlen ("skip"))) {
+ if (megaraid[4] != '\0') {
+ skip_id = megaraid[4] - '0';
+ if (megaraid[5] != '\0') {
+ skip_id = (skip_id * 10) + (megaraid[5] - '0');
+ }
+ }
+ skip_id = (skip_id > 15) ? -1 : skip_id;
+ }
+
+ printk (KERN_INFO "megaraid: " MEGARAID_VERSION M_RD_CRLFSTR);
- printk ("megaraid: " MEGARAID_VERSION CRLFSTR);
+ memset (mega_hbas, 0, sizeof (mega_hbas));
- count += mega_findCard (pHostTmpl, 0x101E, 0x9010, 0);
- count += mega_findCard (pHostTmpl, 0x101E, 0x9060, 0);
- count += mega_findCard (pHostTmpl, 0x8086, 0x1960, BOARD_QUARTZ);
+ count += mega_findCard (pHostTmpl, PCI_VENDOR_ID_AMI,
+ PCI_DEVICE_ID_AMI_MEGARAID, 0);
+ count += mega_findCard (pHostTmpl, PCI_VENDOR_ID_AMI,
+ PCI_DEVICE_ID_AMI_MEGARAID2, 0);
+ count += mega_findCard (pHostTmpl, 0x8086,
+ PCI_DEVICE_ID_AMI_MEGARAID3, BOARD_QUARTZ);
+ count += mega_findCard (pHostTmpl, PCI_VENDOR_ID_AMI,
+ PCI_DEVICE_ID_AMI_MEGARAID3, BOARD_QUARTZ);
- return count;
+ mega_reorder_hosts ();
+
+#ifdef CONFIG_PROC_FS
+ if (count) {
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0) /*0x20300 */
+ mega_proc_dir_entry = proc_mkdir ("megaraid", &proc_root);
+#else
+ mega_proc_dir_entry = create_proc_entry ("megaraid",
+ S_IFDIR | S_IRUGO |
+ S_IXUGO, &proc_root);
+#endif
+ if (!mega_proc_dir_entry)
+ printk ("megaraid: failed to create megaraid root\n");
+ else
+ for (ctlridx = 0; ctlridx < count; ctlridx++)
+ mega_create_proc_entry (ctlridx,
+ mega_proc_dir_entry);
+ }
+#endif
+
+ /*
+ * Register the driver as a character device, for appliactions to access
+ * it for ioctls.
+ * Ideally, this should go in the init_module() routine, but since it is
+ * hidden in the file "scsi_module.c" ( included in the end ), we define
+ * it here
+ * First argument (major) to register_chrdev implies a dynamic major
+ * number allocation.
+ */
+ major = register_chrdev (0, "megadev", &megadev_fops);
+
+ /*
+ * Register the Shutdown Notification hook in kernel
+ */
+ if (register_reboot_notifier (&mega_notifier)) {
+ printk ("MegaRAID Shutdown routine not registered!!\n");
+ }
+ init_MUTEX (&mimd_entry_mtx);
+
+ return count;
}
/*---------------------------------------------------------------------
* Release the controller's resources
*---------------------------------------------------------------------*/
-int megaraid_release (struct Scsi_Host *pSHost)
+static int megaraid_release (struct Scsi_Host *pSHost)
{
- mega_host_config *megaCfg;
- mega_mailbox *mbox;
- u_char mboxData[16];
+ mega_host_config *megaCfg;
+ mega_mailbox *mbox;
+ u_char mboxData[16];
+ int i;
- megaCfg = (mega_host_config *) pSHost->hostdata;
- mbox = (mega_mailbox *) mboxData;
+ megaCfg = (mega_host_config *) pSHost->hostdata;
+ mbox = (mega_mailbox *) mboxData;
- /* Flush cache to disk */
- memset (mbox, 0, 16);
- mboxData[0] = 0xA;
+ /* Flush cache to disk */
+ memset (mbox, 0, 16);
+ mboxData[0] = 0xA;
- free_irq (megaCfg->host->irq, megaCfg);/* Must be freed first, otherwise
- extra interrupt is generated */
+ free_irq (megaCfg->host->irq, megaCfg); /* Must be freed first, otherwise
+ extra interrupt is generated */
- /* Issue a blocking (interrupts disabled) command to the card */
- megaIssueCmd (megaCfg, mboxData, NULL, 0);
+ /* Issue a blocking (interrupts disabled) command to the card */
+ megaIssueCmd (megaCfg, mboxData, NULL, 0);
- /* Free our resources */
- if (megaCfg->flag & BOARD_QUARTZ) {
- iounmap ((void *) megaCfg->base);
- }
- else {
- release_region (megaCfg->host->io_port, 16);
- }
+ /* Free our resources */
+ if (megaCfg->flag & BOARD_QUARTZ) {
+ iounmap ((void *) megaCfg->base);
+ } else {
+ release_region (megaCfg->host->io_port, 16);
+ }
+
+ mega_freeSgList (megaCfg);
+ pci_free_consistent (megaCfg->dev,
+ sizeof (mega_mailbox64),
+ (void *) megaCfg->mailbox64ptr,
+ megaCfg->dma_handle64);
+ scsi_unregister (pSHost);
+
+#ifdef CONFIG_PROC_FS
+ if (megaCfg->controller_proc_dir_entry) {
+ remove_proc_entry ("stat", megaCfg->controller_proc_dir_entry);
+ remove_proc_entry ("status",
+ megaCfg->controller_proc_dir_entry);
+ remove_proc_entry ("config",
+ megaCfg->controller_proc_dir_entry);
+ remove_proc_entry ("mailbox",
+ megaCfg->controller_proc_dir_entry);
+ for (i = 0; i < numCtlrs; i++) {
+ char buf[12] = { 0 };
+ sprintf (buf, "%d", i);
+ remove_proc_entry (buf, mega_proc_dir_entry);
+ }
+ remove_proc_entry ("megaraid", &proc_root);
+ }
+#endif
- mega_freeSgList(megaCfg);
- scsi_unregister (pSHost);
+ /*
+ * Unregister the character device interface to the driver. Ideally this
+ * should have been done in cleanup_module routine. Since this is hidden
+ * in file "scsi_module.c", we do it here.
+ * major is the major number of the character device returned by call to
+ * register_chrdev() routine.
+ */
+ unregister_chrdev (major, "megadev");
+ unregister_reboot_notifier (&mega_notifier);
- return 0;
+ return 0;
}
-static inline void mega_freeSgList(mega_host_config *megaCfg)
+static int mega_is_bios_enabled (mega_host_config * megacfg)
{
- int i;
+ mega_mailbox *mboxpnt;
+ unsigned char mbox[16];
+ int ret;
+
+ mboxpnt = (mega_mailbox *) mbox;
+
+ memset (mbox, 0, sizeof (mbox));
+ memset ((void *) megacfg->mega_buffer,
+ 0, sizeof (megacfg->mega_buffer));
- for (i = 0; i < megaCfg->max_cmds; i++) {
- if (megaCfg->scbList[i].sgList)
- kfree (megaCfg->scbList[i].sgList); /* free sgList */
- }
+ /*
+ * issue command to find out if the BIOS is enbled for this controller
+ */
+ mbox[0] = IS_BIOS_ENABLED;
+ mbox[2] = GET_BIOS;
+
+ mboxpnt->xferaddr = virt_to_bus ((void *) megacfg->mega_buffer);
+
+ ret = megaIssueCmd (megacfg, mbox, NULL, 0);
+
+ return (*(char *) megacfg->mega_buffer);
+}
+
+static void mega_reorder_hosts (void)
+{
+ struct Scsi_Host *shpnt;
+ struct Scsi_Host *shone;
+ struct Scsi_Host *shtwo;
+ mega_host_config *boot_host;
+ int i;
+
+ /*
+ * Find the (first) host which has it's BIOS enabled
+ */
+ boot_host = NULL;
+ for (i = 0; i < MAX_CONTROLLERS; i++) {
+ if (mega_hbas[i].is_bios_enabled) {
+ boot_host = mega_hbas[i].hostdata_addr;
+ break;
+ }
+ }
+
+ if (boot_host == NULL) {
+ printk (KERN_WARNING "megaraid: no BIOS enabled.\n");
+ return;
+ }
+
+ /*
+ * Traverse through the list of SCSI hosts for our HBA locations
+ */
+ shone = shtwo = NULL;
+ for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+ /* Is it one of ours? */
+ for (i = 0; i < MAX_CONTROLLERS; i++) {
+ if ((mega_host_config *) shpnt->hostdata ==
+ mega_hbas[i].hostdata_addr) {
+ /* Does this one has BIOS enabled */
+ if (mega_hbas[i].hostdata_addr == boot_host) {
+
+ /* Are we first */
+ if (shtwo == NULL) /* Yes! */
+ return;
+ else { /* :-( */
+ shone = shpnt;
+ }
+ } else {
+ if (!shtwo) {
+ /* were we here before? xchng first */
+ shtwo = shpnt;
+ }
+ }
+ break;
+ }
+ }
+ /*
+ * Have we got the boot host and one which does not have the bios
+ * enabled.
+ */
+ if (shone && shtwo)
+ break;
+ }
+ if (shone && shtwo) {
+ mega_swap_hosts (shone, shtwo);
+ }
+
+ return;
+}
+
+static void mega_swap_hosts (struct Scsi_Host *shone, struct Scsi_Host *shtwo)
+{
+ struct Scsi_Host *prevtoshtwo;
+ struct Scsi_Host *prevtoshone;
+ struct Scsi_Host *save = NULL;;
+
+ /* Are these two nodes adjacent */
+ if (shtwo->next == shone) {
+
+ if (shtwo == scsi_hostlist && shone->next == NULL) {
+
+ /* just two nodes */
+ scsi_hostlist = shone;
+ shone->next = shtwo;
+ shtwo->next = NULL;
+ } else if (shtwo == scsi_hostlist) {
+ /* first two nodes of the list */
+
+ scsi_hostlist = shone;
+ shtwo->next = shone->next;
+ scsi_hostlist->next = shtwo;
+ } else if (shone->next == NULL) {
+ /* last two nodes of the list */
+
+ prevtoshtwo = scsi_hostlist;
+
+ while (prevtoshtwo->next != shtwo)
+ prevtoshtwo = prevtoshtwo->next;
+
+ prevtoshtwo->next = shone;
+ shone->next = shtwo;
+ shtwo->next = NULL;
+ } else {
+ prevtoshtwo = scsi_hostlist;
+
+ while (prevtoshtwo->next != shtwo)
+ prevtoshtwo = prevtoshtwo->next;
+
+ prevtoshtwo->next = shone;
+ shtwo->next = shone->next;
+ shone->next = shtwo;
+ }
+
+ } else if (shtwo == scsi_hostlist && shone->next == NULL) {
+ /* shtwo at head, shone at tail, not adjacent */
+
+ prevtoshone = scsi_hostlist;
+
+ while (prevtoshone->next != shone)
+ prevtoshone = prevtoshone->next;
+
+ scsi_hostlist = shone;
+ shone->next = shtwo->next;
+ prevtoshone->next = shtwo;
+ shtwo->next = NULL;
+ } else if (shtwo == scsi_hostlist && shone->next != NULL) {
+ /* shtwo at head, shone is not at tail */
+
+ prevtoshone = scsi_hostlist;
+ while (prevtoshone->next != shone)
+ prevtoshone = prevtoshone->next;
+
+ scsi_hostlist = shone;
+ prevtoshone->next = shtwo;
+ save = shtwo->next;
+ shtwo->next = shone->next;
+ shone->next = save;
+ } else if (shone->next == NULL) {
+ /* shtwo not at head, shone at tail */
+
+ prevtoshtwo = scsi_hostlist;
+ prevtoshone = scsi_hostlist;
+
+ while (prevtoshtwo->next != shtwo)
+ prevtoshtwo = prevtoshtwo->next;
+ while (prevtoshone->next != shone)
+ prevtoshone = prevtoshone->next;
+
+ prevtoshtwo->next = shone;
+ shone->next = shtwo->next;
+ prevtoshone->next = shtwo;
+ shtwo->next = NULL;
+
+ } else {
+ prevtoshtwo = scsi_hostlist;
+ prevtoshone = scsi_hostlist;
+ save = NULL;;
+
+ while (prevtoshtwo->next != shtwo)
+ prevtoshtwo = prevtoshtwo->next;
+ while (prevtoshone->next != shone)
+ prevtoshone = prevtoshone->next;
+
+ prevtoshtwo->next = shone;
+ save = shone->next;
+ shone->next = shtwo->next;
+ prevtoshone->next = shtwo;
+ shtwo->next = save;
+ }
+ return;
+}
+
+static inline void mega_freeSgList (mega_host_config * megaCfg)
+{
+ int i;
+
+ for (i = 0; i < megaCfg->max_cmds; i++) {
+ if (megaCfg->scbList[i].sgList)
+ pci_free_consistent (megaCfg->dev,
+ sizeof (mega_64sglist) *
+ MAX_SGLIST,
+ megaCfg->scbList[i].sgList,
+ megaCfg->scbList[i].
+ dma_sghandle64);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) /* 0x020400 */
+ kfree (megaCfg->scbList[i].sgList); /* free sgList */
+#endif
+ }
}
/*----------------------------------------------
- * Get information about the card/driver
+ * Get information about the card/driver
*----------------------------------------------*/
-const char * megaraid_info (struct Scsi_Host *pSHost)
+static const char *megaraid_info (struct Scsi_Host *pSHost)
{
- static char buffer[512];
- mega_host_config *megaCfg;
-
- megaCfg = (mega_host_config *) pSHost->hostdata;
-
- sprintf (buffer, "AMI MegaRAID %s %d commands %d targs %d chans %d luns",
- megaCfg->fwVer,
- megaCfg->productInfo.MaxConcCmds,
- megaCfg->host->max_id,
- megaCfg->host->max_channel,
- megaCfg->host->max_lun);
- return buffer;
+ static char buffer[512];
+ mega_host_config *megaCfg;
+
+ megaCfg = (mega_host_config *) pSHost->hostdata;
+
+ sprintf (buffer,
+ "AMI MegaRAID %s %d commands %d targs %d chans %d luns",
+ megaCfg->fwVer, megaCfg->productInfo.MaxConcCmds,
+ megaCfg->host->max_id, megaCfg->host->max_channel,
+ megaCfg->host->max_lun);
+ return buffer;
}
/*-----------------------------------------------------------------
* 0E 01 reserved
* 0F 01 mailbox busy
* 10 01 numstatus byte
- * 11 01 status byte
+ * 11 01 status byte
*-----------------------------------------------------------------*/
-int megaraid_queue (Scsi_Cmnd * SCpnt, void (*pktComp) (Scsi_Cmnd *))
+static int megaraid_queue (Scsi_Cmnd * SCpnt, void (*pktComp) (Scsi_Cmnd *))
{
- DRIVER_LOCK_T
- mega_host_config *megaCfg;
- mega_scb *pScb;
-
- megaCfg = (mega_host_config *) SCpnt->host->hostdata;
- DRIVER_LOCK(megaCfg);
-
- if (!(megaCfg->flag & (1L << SCpnt->channel))) {
- if (SCpnt->channel < SCpnt->host->max_channel)
- printk (/*KERN_INFO*/ "scsi%d: scanning channel %c for devices.\n",
- megaCfg->host->host_no,
- SCpnt->channel + '1');
- else
- printk(/*KERN_INFO*/ "scsi%d: scanning virtual channel for logical drives.\n", megaCfg->host->host_no);
-
- megaCfg->flag |= (1L << SCpnt->channel);
- }
-
- SCpnt->scsi_done = pktComp;
-
- /* If driver in abort or reset.. cancel this command */
- if (megaCfg->flag & IN_ABORT) {
- SCpnt->result = (DID_ABORT << 16);
- /* Add Scsi_Command to end of completed queue */
- if( megaCfg->qCompletedH == NULL ) {
- megaCfg->qCompletedH = megaCfg->qCompletedT = SCpnt;
- }
- else {
- megaCfg->qCompletedT->host_scribble = (unsigned char *) SCpnt;
- megaCfg->qCompletedT = SCpnt;
- }
- megaCfg->qCompletedT->host_scribble = (unsigned char *) NULL;
- megaCfg->qCcnt++;
-
- DRIVER_UNLOCK(megaCfg);
- return 0;
- }
- else if (megaCfg->flag & IN_RESET) {
- SCpnt->result = (DID_RESET << 16);
- /* Add Scsi_Command to end of completed queue */
- if( megaCfg->qCompletedH == NULL ) {
- megaCfg->qCompletedH = megaCfg->qCompletedT = SCpnt;
- }
- else {
- megaCfg->qCompletedT->host_scribble = (unsigned char *) SCpnt;
- megaCfg->qCompletedT = SCpnt;
- }
- megaCfg->qCompletedT->host_scribble = (unsigned char *) NULL;
- megaCfg->qCcnt++;
-
- DRIVER_UNLOCK(megaCfg);
- return 0;
- }
-
- megaCfg->flag |= IN_QUEUE;
- /* Allocate and build a SCB request */
- if ((pScb = mega_build_cmd (megaCfg, SCpnt)) != NULL) {
- /*build SCpnt for IOCTL_CMD_NEW cmd in mega_ioctl()*/
- /* Add SCB to the head of the pending queue */
- /* Add SCB to the head of the pending queue */
- if( megaCfg->qPendingH == NULL ) {
- megaCfg->qPendingH = megaCfg->qPendingT = pScb;
- }
- else {
- megaCfg->qPendingT->next = pScb;
- megaCfg->qPendingT = pScb;
- }
- megaCfg->qPendingT->next = NULL;
- megaCfg->qPcnt++;
-
- mega_runpendq(megaCfg);
-
-#if LINUX_VERSION_CODE > 0x020024
- if ( SCpnt->cmnd[0]==IOCTL_CMD_NEW )
- { /* user data from external user buffer */
- char *user_area;
- u32 xfer_size;
-
- init_MUTEX_LOCKED(&pScb->sem);
- down(&pScb->sem);
-
- user_area = *((char **)&pScb->SCpnt->cmnd[4]);
- xfer_size = *((u32 *)&pScb->SCpnt->cmnd[8]);
-
- copy_to_user(user_area,pScb->kern_area,xfer_size);
-
- kfree(pScb->kern_area);
-
- mega_freeSCB(megaCfg, pScb);
- }
-#endif
- }
-
- megaCfg->flag &= ~IN_QUEUE;
- DRIVER_UNLOCK(megaCfg);
-
- return 0;
+ DRIVER_LOCK_T mega_host_config * megaCfg;
+ mega_scb *pScb;
+ char *user_area = NULL;
+
+ megaCfg = (mega_host_config *) SCpnt->host->hostdata;
+ DRIVER_LOCK (megaCfg);
+
+ if (!(megaCfg->flag & (1L << SCpnt->channel))) {
+ if (SCpnt->channel < SCpnt->host->max_channel)
+ printk ( /*KERN_INFO */
+ "scsi%d: scanning channel %c for devices.\n",
+ megaCfg->host->host_no, SCpnt->channel + '1');
+ else
+ printk ( /*KERN_INFO */
+ "scsi%d: scanning virtual channel for logical drives.\n",
+ megaCfg->host->host_no);
+
+ megaCfg->flag |= (1L << SCpnt->channel);
+ }
+
+ SCpnt->scsi_done = pktComp;
+
+ if (mega_driver_ioctl (megaCfg, SCpnt))
+ return 0;
+
+ /* If driver in abort or reset.. cancel this command */
+ if (megaCfg->flag & IN_ABORT) {
+ SCpnt->result = (DID_ABORT << 16);
+ /* Add Scsi_Command to end of completed queue */
+ if (megaCfg->qCompletedH == NULL) {
+ megaCfg->qCompletedH = megaCfg->qCompletedT = SCpnt;
+ } else {
+ megaCfg->qCompletedT->host_scribble =
+ (unsigned char *) SCpnt;
+ megaCfg->qCompletedT = SCpnt;
+ }
+ megaCfg->qCompletedT->host_scribble = (unsigned char *) NULL;
+ megaCfg->qCcnt++;
+
+ DRIVER_UNLOCK (megaCfg);
+ return 0;
+ } else if (megaCfg->flag & IN_RESET) {
+ SCpnt->result = (DID_RESET << 16);
+ /* Add Scsi_Command to end of completed queue */
+ if (megaCfg->qCompletedH == NULL) {
+ megaCfg->qCompletedH = megaCfg->qCompletedT = SCpnt;
+ } else {
+ megaCfg->qCompletedT->host_scribble =
+ (unsigned char *) SCpnt;
+ megaCfg->qCompletedT = SCpnt;
+ }
+ megaCfg->qCompletedT->host_scribble = (unsigned char *) NULL;
+ megaCfg->qCcnt++;
+
+ DRIVER_UNLOCK (megaCfg);
+ return 0;
+ }
+
+ megaCfg->flag |= IN_QUEUE;
+ /* Allocate and build a SCB request */
+ if ((pScb = mega_build_cmd (megaCfg, SCpnt)) != NULL) {
+ /*build SCpnt for M_RD_IOCTL_CMD_NEW cmd in mega_ioctl() */
+ /* Add SCB to the head of the pending queue */
+ /* Add SCB to the head of the pending queue */
+ if (megaCfg->qPendingH == NULL) {
+ megaCfg->qPendingH = megaCfg->qPendingT = pScb;
+ } else {
+ megaCfg->qPendingT->next = pScb;
+ megaCfg->qPendingT = pScb;
+ }
+ megaCfg->qPendingT->next = NULL;
+ megaCfg->qPcnt++;
+
+ if (mega_runpendq (megaCfg) == -1) {
+ DRIVER_UNLOCK (megaCfg);
+ return 0;
+ }
+
+ if (pScb->SCpnt->cmnd[0] == M_RD_IOCTL_CMD_NEW) {
+ init_MUTEX_LOCKED (&pScb->ioctl_sem);
+ spin_unlock_irq (&io_request_lock);
+ down (&pScb->ioctl_sem);
+ user_area = *((char **) &pScb->SCpnt->cmnd[4]);
+ if (copy_to_user
+ (user_area, pScb->buff_ptr, pScb->iDataSize)) {
+ printk
+ ("megaraid: Error copying ioctl return value to user buffer.\n");
+ pScb->SCpnt->result = (DID_ERROR << 16);
+ }
+ spin_lock_irq (&io_request_lock);
+ DRIVER_LOCK (megaCfg);
+ kfree (pScb->buff_ptr);
+ pScb->buff_ptr = NULL;
+ mega_cmd_done (megaCfg, pScb, pScb->SCpnt->result);
+ mega_rundoneq (megaCfg);
+ mega_runpendq (megaCfg);
+ DRIVER_UNLOCK (megaCfg);
+ }
+
+ megaCfg->flag &= ~IN_QUEUE;
+
+ }
+
+ DRIVER_UNLOCK (megaCfg);
+ return 0;
}
/*----------------------------------------------------------------------
*----------------------------------------------------------------------*/
volatile static int internal_done_flag = 0;
volatile static int internal_done_errcode = 0;
-static DECLARE_WAIT_QUEUE_HEAD(internal_wait);
+
+static DECLARE_WAIT_QUEUE_HEAD (internal_wait);
static void internal_done (Scsi_Cmnd * SCpnt)
{
- internal_done_errcode = SCpnt->result;
- internal_done_flag++;
- wake_up(&internal_wait);
+ internal_done_errcode = SCpnt->result;
+ internal_done_flag++;
+ wake_up (&internal_wait);
}
/* shouldn't be used, but included for completeness */
-int megaraid_command (Scsi_Cmnd * SCpnt)
+static int megaraid_command (Scsi_Cmnd * SCpnt)
{
- internal_done_flag = 0;
+ internal_done_flag = 0;
- /* Queue command, and wait until it has completed */
- megaraid_queue (SCpnt, internal_done);
+ /* Queue command, and wait until it has completed */
+ megaraid_queue (SCpnt, internal_done);
- while (!internal_done_flag) {
- interruptible_sleep_on(&internal_wait);
- }
+ while (!internal_done_flag) {
+ interruptible_sleep_on (&internal_wait);
+ }
- return internal_done_errcode;
+ return internal_done_errcode;
}
/*---------------------------------------------------------------------
* Abort a previous SCSI request
*---------------------------------------------------------------------*/
-int
-megaraid_abort (Scsi_Cmnd * SCpnt)
+static int megaraid_abort (Scsi_Cmnd * SCpnt)
{
- mega_host_config *megaCfg;
- int rc; //, idx;
- mega_scb *pScb;
+ mega_host_config *megaCfg;
+ int rc; /*, idx; */
+ mega_scb *pScb;
- rc = SCSI_ABORT_NOT_RUNNING;
+ rc = SCSI_ABORT_NOT_RUNNING;
- megaCfg = (mega_host_config *) SCpnt->host->hostdata;
+ megaCfg = (mega_host_config *) SCpnt->host->hostdata;
- megaCfg->flag |= IN_ABORT;
+ megaCfg->flag |= IN_ABORT;
- for(pScb=megaCfg->qPendingH; pScb; pScb=pScb->next) {
- if (pScb->SCpnt == SCpnt) {
- /* Found an aborting command */
+ for (pScb = megaCfg->qPendingH; pScb; pScb = pScb->next) {
+ if (pScb->SCpnt == SCpnt) {
+ /* Found an aborting command */
#if DEBUG
- showMbox(pScb);
+ showMbox (pScb);
#endif
-/*
- * If the command is queued to be issued to the firmware, abort the scsi cmd,
- * If the command is already aborted in a previous call to the _abort entry
- * point, return SCSI_ABORT_SNOOZE, suggesting a reset.
- * If the command is issued to the firmware, which might complete after
- * some time, we will mark the scb as aborted, and return to the mid layer,
- * that abort could not be done.
- * In the ISR, when this command actually completes, we will perform a normal
- * completion.
- *
- * Oct 27, 1999
- */
+ /*
+ * If the command is queued to be issued to the firmware, abort the scsi cmd,
+ * If the command is already aborted in a previous call to the _abort entry
+ * point, return SCSI_ABORT_SNOOZE, suggesting a reset.
+ * If the command is issued to the firmware, which might complete after
+ * some time, we will mark the scb as aborted, and return to the mid layer,
+ * that abort could not be done.
+ * In the ISR, when this command actually completes, we will perform a normal
+ * completion.
+ *
+ * Oct 27, 1999
+ */
+
+ switch (pScb->state) {
+ case SCB_ABORTED: /* Already aborted */
+ rc = SCSI_ABORT_SNOOZE;
+ break;
+ case SCB_ISSUED: /* Waiting on ISR result */
+ rc = SCSI_ABORT_NOT_RUNNING;
+ pScb->state = SCB_ABORTED;
+ break;
+ case SCB_ACTIVE: /* still on the pending queue */
+ mega_freeSCB (megaCfg, pScb);
+ SCpnt->result = (DID_ABORT << 16);
+ if (megaCfg->qCompletedH == NULL) {
+ megaCfg->qCompletedH =
+ megaCfg->qCompletedT = SCpnt;
+ } else {
+ megaCfg->qCompletedT->host_scribble =
+ (unsigned char *) SCpnt;
+ megaCfg->qCompletedT = SCpnt;
+ }
+ megaCfg->qCompletedT->host_scribble =
+ (unsigned char *) NULL;
+ megaCfg->qCcnt++;
+ rc = SCSI_ABORT_SUCCESS;
+ break;
+ default:
+ printk
+ ("megaraid_abort: unknown command state!!\n");
+ rc = SCSI_ABORT_NOT_RUNNING;
+ break;
+ }
+ break;
+ }
+ }
- switch(pScb->state) {
- case SCB_ABORTED: /* Already aborted */
- rc = SCSI_ABORT_SNOOZE;
- break;
- case SCB_ISSUED: /* Waiting on ISR result */
- rc = SCSI_ABORT_NOT_RUNNING;
- pScb->state = SCB_ABORTED;
- break;
- case SCB_ACTIVE: /* still on the pending queue */
- mega_freeSCB (megaCfg, pScb);
- SCpnt->result = (DID_ABORT << 16) ;
- if( megaCfg->qCompletedH == NULL ) {
- megaCfg->qCompletedH = megaCfg->qCompletedT = SCpnt;
- }
- else {
- megaCfg->qCompletedT->host_scribble = (unsigned char *) SCpnt;
- megaCfg->qCompletedT = SCpnt;
- }
- megaCfg->qCompletedT->host_scribble = (unsigned char *) NULL;
- megaCfg->qCcnt++;
- rc = SCSI_ABORT_SUCCESS;
- break;
- default:
- printk("megaraid_abort: unknown command state!!\n");
- rc = SCSI_ABORT_NOT_RUNNING;
- break;
- }
- break;
- }
- }
-
- megaCfg->flag &= ~IN_ABORT;
+ megaCfg->flag &= ~IN_ABORT;
#if DEBUG
-if(megaCfg->flag & IN_QUEUE) printk("ma:flag is in queue\n");
-if(megaCfg->qCompletedH == NULL) printk("ma:qchead == null\n");
+ if (megaCfg->flag & IN_QUEUE)
+ printk ("ma:flag is in queue\n");
+ if (megaCfg->qCompletedH == NULL)
+ printk ("ma:qchead == null\n");
#endif
-
-/*
- * This is required here to complete any completed requests to be communicated
- * over to the mid layer.
- * Calling just mega_rundoneq() did not work.
- */
-if(megaCfg->qCompletedH) {
- SCpnt = megaCfg->qCompletedH;
- megaCfg->qCompletedH = (Scsi_Cmnd *)SCpnt->host_scribble;
- megaCfg->qCcnt--;
-
- SCpnt->host_scribble = (unsigned char *) NULL ;
- /* Callback */
- callDone (SCpnt);
-}
- mega_rundoneq(megaCfg);
- return rc;
+ /*
+ * This is required here to complete any completed requests to be communicated
+ * over to the mid layer.
+ * Calling just mega_rundoneq() did not work.
+ */
+ if (megaCfg->qCompletedH) {
+ SCpnt = megaCfg->qCompletedH;
+ megaCfg->qCompletedH = (Scsi_Cmnd *) SCpnt->host_scribble;
+ megaCfg->qCcnt--;
+
+ SCpnt->host_scribble = (unsigned char *) NULL;
+ /* Callback */
+ callDone (SCpnt);
+ }
+ mega_rundoneq (megaCfg);
+
+ return rc;
}
/*---------------------------------------------------------------------
* Reset a previous SCSI request
*---------------------------------------------------------------------*/
-int megaraid_reset (Scsi_Cmnd * SCpnt, unsigned int rstflags)
+
+static int megaraid_reset (Scsi_Cmnd * SCpnt, unsigned int rstflags)
+{
+ mega_host_config *megaCfg;
+ int idx;
+ int rc;
+ mega_scb *pScb;
+
+ rc = SCSI_RESET_NOT_RUNNING;
+ megaCfg = (mega_host_config *) SCpnt->host->hostdata;
+
+ megaCfg->flag |= IN_RESET;
+
+ printk
+ ("megaraid_RESET: %.08lx cmd=%.02x <c=%d.t=%d.l=%d>, flag = %x\n",
+ SCpnt->serial_number, SCpnt->cmnd[0], SCpnt->channel,
+ SCpnt->target, SCpnt->lun, rstflags);
+
+ TRACE (("RESET: %.08lx %.02x <%d.%d.%d>\n",
+ SCpnt->serial_number, SCpnt->cmnd[0], SCpnt->channel,
+ SCpnt->target, SCpnt->lun));
+
+ /*
+ * Walk list of SCBs for any that are still outstanding
+ */
+ for (idx = 0; idx < megaCfg->max_cmds; idx++) {
+ if (megaCfg->scbList[idx].state != SCB_FREE) {
+ SCpnt = megaCfg->scbList[idx].SCpnt;
+ pScb = &megaCfg->scbList[idx];
+ if (SCpnt != NULL) {
+ pScb->state = SCB_RESET;
+ break;
+ }
+ }
+ }
+
+ megaCfg->flag &= ~IN_RESET;
+
+ mega_rundoneq (megaCfg);
+ return rc;
+}
+
+#ifdef CONFIG_PROC_FS
+/* Following code handles /proc fs */
+static int proc_printf (mega_host_config * megaCfg, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ if (megaCfg->procidx > PROCBUFSIZE)
+ return 0;
+
+ va_start (args, fmt);
+ i = vsprintf ((megaCfg->procbuf + megaCfg->procidx), fmt, args);
+ va_end (args);
+
+ megaCfg->procidx += i;
+ return i;
+}
+
+static int proc_read_config (char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+
+ mega_host_config *megaCfg = (mega_host_config *) data;
+
+ *start = page;
+
+ if (megaCfg->productInfo.ProductName[0] != 0)
+ proc_printf (megaCfg, "%s\n", megaCfg->productInfo.ProductName);
+
+ proc_printf (megaCfg, "Controller Type: ");
+
+ if (megaCfg->flag & BOARD_QUARTZ)
+ proc_printf (megaCfg, "438/466/467/471/493\n");
+ else
+ proc_printf (megaCfg, "418/428/434\n");
+
+ if (megaCfg->flag & BOARD_40LD)
+ proc_printf (megaCfg,
+ "Controller Supports 40 Logical Drives\n");
+
+ if (megaCfg->flag & BOARD_64BIT)
+ proc_printf (megaCfg,
+ "Controller / Driver uses 64 bit memory addressing\n");
+
+ proc_printf (megaCfg, "Base = %08x, Irq = %d, ", megaCfg->base,
+ megaCfg->host->irq);
+
+ proc_printf (megaCfg, "Logical Drives = %d, Channels = %d\n",
+ megaCfg->numldrv, megaCfg->productInfo.SCSIChanPresent);
+
+ proc_printf (megaCfg, "Version =%s:%s, DRAM = %dMb\n",
+ megaCfg->fwVer, megaCfg->biosVer,
+ megaCfg->productInfo.DramSize);
+
+ proc_printf (megaCfg,
+ "Controller Queue Depth = %d, Driver Queue Depth = %d\n",
+ megaCfg->productInfo.MaxConcCmds, megaCfg->max_cmds);
+ COPY_BACK;
+ return count;
+}
+
+static int proc_read_stat (char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ int i;
+ mega_host_config *megaCfg = (mega_host_config *) data;
+
+ *start = page;
+
+ proc_printf (megaCfg, "Statistical Information for this controller\n");
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) /* 0x020100 */
+ proc_printf (megaCfg, "Interrupts Collected = %Lu\n",
+ megaCfg->nInterrupts);
+#else
+ proc_printf (megaCfg, "Interrupts Collected = %u\n",
+ (u32) megaCfg->nInterrupts);
+#endif
+
+ for (i = 0; i < megaCfg->numldrv; i++) {
+ proc_printf (megaCfg, "Logical Drive %d:\n", i);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ proc_printf (megaCfg,
+ "\tReads Issued = %Lu, Writes Issued = %Lu\n",
+ megaCfg->nReads[i], megaCfg->nWrites[i]);
+
+ proc_printf (megaCfg,
+ "\tSectors Read = %Lu, Sectors Written = %Lu\n\n",
+ megaCfg->nReadBlocks[i], megaCfg->nWriteBlocks[i]);
+#else
+ proc_printf (megaCfg,
+ "\tReads Issued = %10u, Writes Issued = %10u\n",
+ (u32) megaCfg->nReads[i],
+ (u32) megaCfg->nWrites[i]);
+
+ proc_printf (megaCfg,
+ "\tSectors Read = %10u, Sectors Written = %10u\n\n",
+ (u32) megaCfg->nReadBlocks[i],
+ (u32) megaCfg->nWriteBlocks[i]);
+#endif
+
+ }
+
+ COPY_BACK;
+ return count;
+}
+
+static int proc_read_status (char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
{
- mega_host_config *megaCfg;
- int idx;
- int rc;
- mega_scb *pScb;
-
- rc = SCSI_RESET_NOT_RUNNING;
- megaCfg = (mega_host_config *) SCpnt->host->hostdata;
-
- megaCfg->flag |= IN_RESET;
-
- printk ("megaraid_RESET: %.08lx cmd=%.02x <c=%d.t=%d.l=%d>, flag = %x\n",
- SCpnt->serial_number, SCpnt->cmnd[0], SCpnt->channel, SCpnt->target,
- SCpnt->lun, rstflags);
-
- TRACE (("RESET: %.08lx %.02x <%d.%d.%d>\n",
- SCpnt->serial_number, SCpnt->cmnd[0], SCpnt->channel, SCpnt->target,
- SCpnt->lun));
-
- /*
- * Walk list of SCBs for any that are still outstanding
- */
- for (idx = 0; idx < megaCfg->max_cmds; idx++) {
- if (megaCfg->scbList[idx].state != SCB_FREE) {
- SCpnt = megaCfg->scbList[idx].SCpnt;
- pScb = &megaCfg->scbList[idx];
- if (SCpnt != NULL) {
- pScb->state = SCB_RESET;
- break;
- }
- }
- }
-
- megaCfg->flag &= ~IN_RESET;
-
- mega_rundoneq(megaCfg);
- return rc;
+ mega_host_config *megaCfg = (mega_host_config *) data;
+ *start = page;
+
+ proc_printf (megaCfg, "TBD\n");
+ COPY_BACK;
+ return count;
+}
+
+static int proc_read_mbox (char *page, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+
+ mega_host_config *megaCfg = (mega_host_config *) data;
+ volatile mega_mailbox *mbox = megaCfg->mbox;
+
+ *start = page;
+
+ proc_printf (megaCfg, "Contents of Mail Box Structure\n");
+ proc_printf (megaCfg, " Fw Command = 0x%02x\n", mbox->cmd);
+ proc_printf (megaCfg, " Cmd Sequence = 0x%02x\n", mbox->cmdid);
+ proc_printf (megaCfg, " No of Sectors= %04d\n", mbox->numsectors);
+ proc_printf (megaCfg, " LBA = 0x%02x\n", mbox->lba);
+ proc_printf (megaCfg, " DTA = 0x%08x\n", mbox->xferaddr);
+ proc_printf (megaCfg, " Logical Drive= 0x%02x\n", mbox->logdrv);
+ proc_printf (megaCfg, " No of SG Elmt= 0x%02x\n", mbox->numsgelements);
+ proc_printf (megaCfg, " Busy = %01x\n", mbox->busy);
+ proc_printf (megaCfg, " Status = 0x%02x\n", mbox->status);
+
+ /* proc_printf(megaCfg, "Dump of MailBox\n");
+ for (i = 0; i < 16; i++)
+ proc_printf(megaCfg, "%02x ",*(mbox + i));
+
+ proc_printf(megaCfg, "\n\nNumber of Status = %02d\n",mbox->numstatus);
+
+ for (i = 0; i < 46; i++) {
+ proc_printf(megaCfg,"%02d ",*(mbox + 16 + i));
+ if (i%16)
+ proc_printf(megaCfg,"\n");
+ }
+
+ if (!mbox->numsgelements) {
+ dta = phys_to_virt(mbox->xferaddr);
+ for (i = 0; i < mbox->numsgelements; i++)
+ if (dta) {
+ proc_printf(megaCfg,"Addr = %08x\n", (ulong)*(dta + i)); proc_printf(megaCfg,"Length = %08x\n",
+ (ulong)*(dta + i + 4));
+ }
+ }*/
+ COPY_BACK;
+ return count;
+}
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0) /*0x20300 */
+#define CREATE_READ_PROC(string, fxn) create_proc_read_entry(string, \
+ S_IRUSR | S_IFREG,\
+ controller_proc_dir_entry,\
+ fxn, megaCfg)
+#else
+#define CREATE_READ_PROC(string, fxn) create_proc_read_entry(string,S_IRUSR | S_IFREG, controller_proc_dir_entry, fxn, megaCfg)
+
+static struct proc_dir_entry *
+create_proc_read_entry (const char *string,
+ int mode,
+ struct proc_dir_entry *parent,
+ read_proc_t * fxn, mega_host_config * megaCfg)
+{
+ struct proc_dir_entry *temp = NULL;
+
+ temp = kmalloc (sizeof (struct proc_dir_entry), GFP_KERNEL);
+ if (!temp)
+ return NULL;
+ memset (temp, 0, sizeof (struct proc_dir_entry));
+
+ if ((temp->name = kmalloc (strlen (string) + 1, GFP_KERNEL)) == NULL) {
+ kfree (temp);
+ return NULL;
+ }
+
+ strcpy ((char *) temp->name, string);
+ temp->namelen = strlen (string);
+ temp->mode = mode; /*S_IFREG | S_IRUSR */ ;
+ temp->data = (void *) megaCfg;
+ temp->read_proc = fxn;
+ proc_register (parent, temp);
+ return temp;
}
+#endif
+
+static void mega_create_proc_entry (int index, struct proc_dir_entry *parent)
+{
+ u_char string[64] = { 0 };
+ mega_host_config *megaCfg = megaCtlrs[index];
+ struct proc_dir_entry *controller_proc_dir_entry = NULL;
+
+ sprintf (string, "%d", index);
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0) /*0x20300 */
+ controller_proc_dir_entry =
+ megaCfg->controller_proc_dir_entry = proc_mkdir (string, parent);
+#else
+ controller_proc_dir_entry =
+ megaCfg->controller_proc_dir_entry =
+ create_proc_entry (string, S_IFDIR | S_IRUGO | S_IXUGO, parent);
+#endif
+
+ if (!controller_proc_dir_entry)
+ printk ("\nmegaraid: proc_mkdir failed\n");
+ else {
+ megaCfg->proc_read =
+ CREATE_READ_PROC ("config", proc_read_config);
+ megaCfg->proc_status =
+ CREATE_READ_PROC ("status", proc_read_status);
+ megaCfg->proc_stat = CREATE_READ_PROC ("stat", proc_read_stat);
+ megaCfg->proc_mbox =
+ CREATE_READ_PROC ("mailbox", proc_read_mbox);
+ }
+
+}
+#endif /* CONFIG_PROC_FS */
/*-------------------------------------------------------------
* Return the disk geometry for a particular disk
* geom[1] = sectors
* geom[2] = cylinders
*-------------------------------------------------------------*/
-int megaraid_biosparam (Disk * disk, kdev_t dev, int *geom)
+static int megaraid_biosparam (Disk * disk, kdev_t dev, int *geom)
+{
+ int heads, sectors, cylinders;
+ mega_host_config *megaCfg;
+
+ /* Get pointer to host config structure */
+ megaCfg = (mega_host_config *) disk->device->host->hostdata;
+
+ /* Default heads (64) & sectors (32) */
+ heads = 64;
+ sectors = 32;
+ cylinders = disk->capacity / (heads * sectors);
+
+ /* Handle extended translation size for logical drives > 1Gb */
+ if (disk->capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ cylinders = disk->capacity / (heads * sectors);
+ }
+
+ /* return result */
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return 0;
+}
+
+/*
+ * This routine will be called when the use has done a forced shutdown on the
+ * system. Flush the Adapter cache, that's the most we can do.
+ */
+static int megaraid_reboot_notify (struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ struct Scsi_Host *pSHost;
+ mega_host_config *megaCfg;
+ mega_mailbox *mbox;
+ u_char mboxData[16];
+ int i;
+
+ if (code == SYS_DOWN || code == SYS_HALT) {
+ for (i = 0; i < numCtlrs; i++) {
+ pSHost = megaCtlrs[i]->host;
+
+ megaCfg = (mega_host_config *) pSHost->hostdata;
+ mbox = (mega_mailbox *) mboxData;
+
+ /* Flush cache to disk */
+ memset (mbox, 0, 16);
+ mboxData[0] = 0xA;
+
+ /*
+ * Free irq, otherwise extra interrupt is generated
+ */
+ free_irq (megaCfg->host->irq, megaCfg);
+
+ /*
+ * Issue a blocking (interrupts disabled) command to
+ * the card
+ */
+ megaIssueCmd (megaCfg, mboxData, NULL, 0);
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static int mega_init_scb (mega_host_config * megacfg)
+{
+ int idx;
+
+#if DEBUG
+ if (megacfg->max_cmds >= MAX_COMMANDS) {
+ printk ("megaraid:ctlr max cmds = %x : MAX_CMDS = %x",
+ megacfg->max_cmds, MAX_COMMANDS);
+ }
+#endif
+
+ for (idx = megacfg->max_cmds - 1; idx >= 0; idx--) {
+
+ megacfg->scbList[idx].idx = idx;
+
+ /*
+ * ISR will make this flag zero to indicate the command has been
+ * completed. This is only for user ioctl calls. Rest of the driver
+ * and the mid-layer operations are not connected with this flag.
+ */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ megacfg->scbList[idx].sgList =
+ pci_alloc_consistent (megacfg->dev,
+ sizeof (mega_64sglist) * MAX_SGLIST,
+ &(megacfg->scbList[idx].
+ dma_sghandle64));
+
+ megacfg->scbList[idx].sg64List =
+ (mega_64sglist *) megacfg->scbList[idx].sgList;
+#else
+ megacfg->scbList[idx].sgList = kmalloc (sizeof (mega_sglist) * MAX_SGLIST, GFP_ATOMIC | GFP_DMA);
+#endif
+
+ if (megacfg->scbList[idx].sgList == NULL) {
+ printk (KERN_WARNING
+ "Can't allocate sglist for id %d\n", idx);
+ mega_freeSgList (megacfg);
+ return -1;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ megacfg->scbList[idx].pthru = pci_alloc_consistent (megacfg->dev,
+ sizeof (mega_passthru),
+ &(megacfg->scbList[idx].
+ dma_passthruhandle64));
+
+ if (megacfg->scbList[idx].pthru == NULL) {
+ printk (KERN_WARNING
+ "Can't allocate passthru for id %d\n", idx);
+ }
+ /*
+ * Allocate a 256 Byte Bounce Buffer for handling INQ/RD_CAPA
+ */
+ megacfg->scbList[idx].bounce_buffer = pci_alloc_consistent (megacfg->dev,
+ 256,
+ &(megacfg->scbList[idx].
+ dma_bounce_buffer));
+
+ if (!megacfg->scbList[idx].bounce_buffer)
+ printk
+ ("megaraid: allocation for bounce buffer failed\n");
+
+ megacfg->scbList[idx].dma_type = M_RD_DMA_TYPE_NONE;
+#endif
+
+ if (idx < MAX_COMMANDS) {
+ /*
+ * Link to free list
+ * lock not required since we are loading the driver, so no
+ * commands possible right now.
+ */
+ enq_scb_freelist (megacfg, &megacfg->scbList[idx],
+ NO_LOCK, INTR_ENB);
+
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Enqueues a SCB
+ */
+static void enq_scb_freelist (mega_host_config * megacfg, mega_scb * scb, int lock,
+ int intr)
+{
+
+ if (lock == INTERNAL_LOCK || intr == INTR_DIS) {
+ if (intr == INTR_DIS)
+ spin_lock_irq (&megacfg->lock_free);
+ else
+ spin_lock (&megacfg->lock_free);
+ }
+
+ scb->state = SCB_FREE;
+ scb->SCpnt = NULL;
+
+ if (megacfg->qFreeH == (mega_scb *) NULL) {
+ megacfg->qFreeH = megacfg->qFreeT = scb;
+ } else {
+ megacfg->qFreeT->next = scb;
+ megacfg->qFreeT = scb;
+ }
+
+ megacfg->qFreeT->next = NULL;
+ megacfg->qFcnt++;
+
+ if (lock == INTERNAL_LOCK || intr == INTR_DIS) {
+ if (intr == INTR_DIS)
+ spin_unlock_irq (&megacfg->lock_free);
+ else
+ spin_unlock (&megacfg->lock_free);
+ }
+}
+
+/*
+ * Routines for the character/ioctl interface to the driver
+ */
+static int megadev_open (struct inode *inode, struct file *filep)
{
- int heads, sectors, cylinders;
- mega_host_config *megaCfg;
-
- /* Get pointer to host config structure */
- megaCfg = (mega_host_config *) disk->device->host->hostdata;
-
- /* Default heads (64) & sectors (32) */
- heads = 64;
- sectors = 32;
- cylinders = disk->capacity / (heads * sectors);
-
- /* Handle extended translation size for logical drives > 1Gb */
- if (disk->capacity >= 0x200000) {
- heads = 255;
- sectors = 63;
- cylinders = disk->capacity / (heads * sectors);
- }
-
- /* return result */
- geom[0] = heads;
- geom[1] = sectors;
- geom[2] = cylinders;
-
- return 0;
+ MOD_INC_USE_COUNT;
+ return 0; /* success */
}
-static int __init megaraid_setup(char *str)
+static int megadev_ioctl_entry (struct inode *inode, struct file *filep,
+ unsigned int cmd, unsigned long arg)
{
- skip_id = -1;
- if (str && !strncmp(str, "skip", strlen("skip"))) {
- if (str[4] != '\0') {
- skip_id = str[4] - '0';
- if (str[5] != '\0') {
- skip_id = (skip_id * 10) + (str[5] - '0');
- }
- }
- skip_id = (skip_id > 15) ? -1 : skip_id;
- }
- return 1;
+ int ret = -1;
+
+ /*
+ * We do not allow parallel ioctls to the driver as of now.
+ */
+ down (&mimd_entry_mtx);
+ ret = megadev_ioctl (inode, filep, cmd, arg);
+ up (&mimd_entry_mtx);
+
+ return ret;
+
}
-__setup("megaraid=", megaraid_setup);
+static int megadev_ioctl (struct inode *inode, struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ int adapno;
+ kdev_t dev;
+ u32 inlen;
+ struct uioctl_t ioc;
+ char *kphysaddr = NULL;
+ int nadap = numCtlrs;
+ int npages;
+ u8 opcode;
+ int order = 0;
+ u32 outlen;
+ int ret;
+ u8 subopcode;
+ Scsi_Cmnd *scsicmd;
+ struct Scsi_Host *shpnt;
+ char *uaddr;
+ struct uioctl_t *uioc;
+ IO_LOCK_T;
+
+ if (!inode || !(dev = inode->i_rdev))
+ return -EINVAL;
+
+ if (_IOC_TYPE (cmd) != MEGAIOC_MAGIC)
+ return (-EINVAL);
+
+ /*
+ * We do not transfer more than IOCTL_MAX_DATALEN (see megaraid.h) with
+ * this interface.If the user needs to transfer more than this,he should
+ * use 0x81 command op-code.
+ */
+
+ /*
+ * Get the user ioctl structure
+ */
+ ret = verify_area (VERIFY_WRITE, (char *) arg, sizeof (struct uioctl_t));
+
+ if (ret)
+ return ret;
+
+ if(copy_from_user (&ioc, (char *) arg, sizeof (struct uioctl_t)))
+ return -EFAULT;
+
+ /*
+ * The first call the applications should make is to find out the number
+ * of controllers in the system. The next logical call should be for
+ * getting the list of controllers in the system as detected by the
+ * driver.
+ */
+
+ /*
+ * Get the opcode and subopcode for the commands
+ */
+ opcode = ioc.ui.fcs.opcode;
+ subopcode = ioc.ui.fcs.subopcode;
+
+ switch (opcode) {
+ case M_RD_DRIVER_IOCTL_INTERFACE:
+ switch (subopcode) {
+ case MEGAIOC_QDRVRVER: /* Query driver version */
+ put_user (driver_ver, (u32 *) ioc.data);
+ return 0;
+
+ case MEGAIOC_QNADAP: /* Get # of adapters */
+ put_user (nadap, (int *) ioc.data);
+ return nadap;
+
+ case MEGAIOC_QADAPINFO: /* Get adapter information */
+ /*
+ * which adapter?
+ */
+ adapno = ioc.ui.fcs.adapno;
+
+ /*
+ * The adapter numbers do not start with 0, at least in
+ * the user space. This is just to make sure, 0 is not the
+ * default value which will refer to adapter 1. So the
+ * user needs to make use of macros MKADAP() and GETADAP()
+ * (See megaraid.h) while making ioctl() call.
+ */
+ adapno = GETADAP (adapno);
+
+ if (adapno >= numCtlrs)
+ return (-ENODEV);
+
+ ret = verify_area (VERIFY_WRITE,
+ ioc.data,
+ sizeof (struct mcontroller));
+ if (ret)
+ return ret;
+
+ /*
+ * Copy struct mcontroller to user area
+ */
+ copy_to_user (ioc.data,
+ mcontroller + adapno,
+ sizeof (struct mcontroller));
+ return 0;
+
+ default:
+ return (-EINVAL);
+
+ } /* inner switch */
+ break;
+
+ case M_RD_IOCTL_CMD_NEW:
+ /* which adapter? */
+ adapno = ioc.ui.fcs.adapno;
+
+ /* See comment above: MEGAIOC_QADAPINFO */
+ adapno = GETADAP (adapno);
+
+ if (adapno >= numCtlrs)
+ return (-ENODEV);
+
+ /* Check for zero length buffer. */
+ if (!ioc.ui.fcs.length)
+ return -EINVAL;
+
+ /* save the user address */
+ uaddr = ioc.ui.fcs.buffer;
+/*
+* For M_RD_IOCTL_CMD_NEW commands, the fields outlen and inlen of uioctl_t
+* structure are treated as flags. If outlen is 1, the data is
+* transferred from the device and if inlen is 1, the data is
+* transferred to the device.
+*/
+ outlen = ioc.outlen;
+ inlen = ioc.inlen;
+#if 0
+ if (inlen && outlen)
+ return -EINVAL;
+#endif
+ if (outlen) {
+ ret = verify_area (VERIFY_WRITE,
+ (char *) ioc.ui.fcs.buffer,
+ ioc.ui.fcs.length);
+ if (ret)
+ return ret;
+ } else if (inlen) {
+ ret = verify_area (VERIFY_READ,
+ (char *) ioc.ui.fcs.buffer,
+ ioc.ui.fcs.length);
+
+ if (ret)
+ return ret;
+ }
+
+ /* How many pages required of size PAGE_SIZE */
+ npages = ioc.ui.fcs.length / PAGE_SIZE;
+ /* Do we need one more? */
+
+ if (ioc.ui.fcs.length % PAGE_SIZE)
+ npages++;
+
+ /* ioctl does not support data xfer > 32KB */
+ if (npages == 1)
+ order = 0;
+ else if (npages == 2)
+ order = 1;
+ else if (npages <= 4)
+ order = 2;
+ else if (npages <= 8)
+ order = 3;
+ else
+ return -EINVAL;
+
+ if (outlen || inlen) {
+ /*
+ * Allocate kernel space for npages.
+ *
+ * Since we need the memory for DMA, it needs to be physically
+ * contiguous. __get_free_pags() return consecutive free pages
+ * in kernel space.
+ * Note: We don't do __get_dma_pages(), since for PCI devices,
+ * the DMA memory is not restriceted to 16M, which is ensured
+ * by __get_dma_pages()
+ */
+
+ if ((kphysaddr = (char *) __get_free_pages (GFP_KERNEL,
+ order)) ==
+ 0) {
+ printk (KERN_INFO
+ "megaraid:allocation failed\n");
+ return -ENOMEM;
+ }
+
+ memset (kphysaddr, 0, npages * PAGE_SIZE);
+ ioc.ui.fcs.buffer = kphysaddr;
+
+ if (inlen) {
+ /* copyin the user data */
+ copy_from_user (kphysaddr,
+ (char *) uaddr,
+ ioc.ui.fcs.length);
+ }
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ scsicmd = (Scsi_Cmnd *) kmalloc (sizeof (Scsi_Cmnd),
+ GFP_KERNEL | GFP_DMA);
+ memset (scsicmd, 0, sizeof (Scsi_Cmnd));
+#else
+ scsicmd = (Scsi_Cmnd *) scsi_init_malloc (sizeof (Scsi_Cmnd),
+ GFP_ATOMIC | GFP_DMA);
+#endif
+ if (!scsicmd) {
+ if (kphysaddr)
+ free_pages ((unsigned long) kphysaddr, order);
+ return -ENOMEM;
+ }
+
+ scsicmd->host = NULL;
+
+ /*
+ * Find this host
+ */
+ for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+ if (shpnt->hostdata ==
+ (unsigned long *) megaCtlrs[adapno])
+ scsicmd->host = shpnt;
+ }
+
+ if (scsicmd->host == NULL) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ kfree (scsicmd);
+#else
+ scsi_init_free ((char *) scsicmd, sizeof (Scsi_Cmnd));
+#endif
+ if (kphysaddr)
+ free_pages ((unsigned long) kphysaddr, order);
+ return -ENODEV;
+ }
+
+ scsicmd->cmnd[0] = MEGADEVIOC;
+ scsicmd->request_buffer = (void *) &ioc;
+
+ init_MUTEX_LOCKED (&mimd_ioctl_sem);
+
+ IO_LOCK;
+ megaraid_queue (scsicmd, megadev_ioctl_done);
+
+ IO_UNLOCK;
+
+ down (&mimd_ioctl_sem);
+
+ if (!scsicmd->result && outlen) {
+ copy_to_user (uaddr, kphysaddr, ioc.ui.fcs.length);
+ }
+
+ /*
+ * copyout the result
+ */
+ uioc = (struct uioctl_t *) arg;
+
+ if (ioc.mbox[0] == MEGA_MBOXCMD_PASSTHRU) {
+ put_user (scsicmd->result, &uioc->pthru.scsistatus);
+ } else {
+ put_user (1, &uioc->mbox[16]); /* numstatus */
+ /* status */
+ put_user (scsicmd->result, &uioc->mbox[17]);
+ }
+
+ if (kphysaddr) {
+ free_pages ((ulong) kphysaddr, order);
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) /*0x20400 */
+ kfree (scsicmd);
+#else
+ scsi_init_free ((char *) scsicmd, sizeof (Scsi_Cmnd));
+#endif
+
+ return ret;
+
+ case M_RD_IOCTL_CMD:
+ /* which adapter? */
+ adapno = ioc.ui.fcs.adapno;
+ /* See comment above: MEGAIOC_QADAPINFO */
+ adapno = GETADAP (adapno);
+
+ if (adapno >= numCtlrs)
+ return (-ENODEV);
+
+ /* save the user address */
+ uaddr = ioc.data;
+ outlen = ioc.outlen;
+ inlen = ioc.inlen;
+
+ if ((outlen >= IOCTL_MAX_DATALEN)
+ || (inlen >= IOCTL_MAX_DATALEN))
+ return (-EINVAL);
+
+ if (outlen) {
+ ret = verify_area (VERIFY_WRITE, ioc.data, outlen);
+ if (ret)
+ return ret;
+ } else if (inlen) {
+ ret = verify_area (VERIFY_READ, ioc.data, inlen);
+
+ if (ret)
+ return ret;
+ }
+
+ if (outlen || inlen) {
+ /*
+ * Allocate a page of kernel space.
+ */
+ if ((kphysaddr =
+ (char *) __get_free_pages (GFP_KERNEL, 0)) == 0) {
+
+ printk (KERN_INFO
+ "megaraid:allocation failed\n");
+ return -ENOMEM;
+ }
+
+ memset (kphysaddr, 0, PAGE_SIZE);
+ ioc.data = kphysaddr;
+
+ if (inlen) {
+ if (ioc.mbox[0] == MEGA_MBOXCMD_PASSTHRU) {
+ /* copyin the user data */
+ copy_from_user (kphysaddr,
+ uaddr,
+ ioc.pthru.dataxferlen);
+ } else {
+ copy_from_user (kphysaddr,
+ uaddr, inlen);
+ }
+ }
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) /* 0x020400 */
+ scsicmd = (Scsi_Cmnd *) kmalloc (sizeof (Scsi_Cmnd),
+ GFP_KERNEL | GFP_DMA);
+ memset (scsicmd, 0, sizeof (Scsi_Cmnd));
+#else
+ scsicmd = (Scsi_Cmnd *) scsi_init_malloc (sizeof (Scsi_Cmnd),
+ GFP_ATOMIC | GFP_DMA);
+#endif
+
+ if (!scsicmd) {
+ if (kphysaddr)
+ free_pages ((unsigned long) kphysaddr, 0);
+ return -ENOMEM;
+ }
+
+ scsicmd->host = NULL;
+
+ /*
+ * Find this host in the hostlist
+ */
+ for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
+ if (shpnt->hostdata ==
+ (unsigned long *) megaCtlrs[adapno])
+ scsicmd->host = shpnt;
+ }
+
+ if (scsicmd->host == NULL) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ kfree (scsicmd);
+#else
+ scsi_init_free ((char *) scsicmd, sizeof (Scsi_Cmnd));
+#endif
+ if (kphysaddr)
+ free_pages ((unsigned long) kphysaddr, 0);
+
+ return -ENODEV;
+ }
+
+ scsicmd->cmnd[0] = MEGADEVIOC;
+ scsicmd->request_buffer = (void *) &ioc;
+
+ init_MUTEX_LOCKED (&mimd_ioctl_sem);
+
+ IO_LOCK;
+ megaraid_queue (scsicmd, megadev_ioctl_done);
+
+ IO_UNLOCK;
+ down (&mimd_ioctl_sem);
+
+ if (!scsicmd->result && outlen) {
+ if (ioc.mbox[0] == MEGA_MBOXCMD_PASSTHRU) {
+ copy_to_user (uaddr,
+ kphysaddr, ioc.pthru.dataxferlen);
+ } else {
+ copy_to_user (uaddr, kphysaddr, outlen);
+ }
+ }
+
+ /*
+ * copyout the result
+ */
+ uioc = (struct uioctl_t *) arg;
+
+ if (ioc.mbox[0] == MEGA_MBOXCMD_PASSTHRU) {
+ put_user (scsicmd->result, &uioc->pthru.scsistatus);
+ } else {
+ put_user (1, &uioc->mbox[16]); /* numstatus */
+ /* status */
+ put_user (scsicmd->result, &uioc->mbox[17]);
+ }
+
+ if (kphysaddr)
+ free_pages ((unsigned long) kphysaddr, 0);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ kfree (scsicmd);
+#else
+ scsi_init_free ((char *) scsicmd, sizeof (Scsi_Cmnd));
+#endif
+ return ret;
+
+ default:
+ return (-EINVAL);
+
+ } /* Outer switch */
+
+ return 0;
+}
+
+static void
+megadev_ioctl_done (Scsi_Cmnd * sc)
+{
+ up (&mimd_ioctl_sem);
+}
+
+static mega_scb *
+megadev_doioctl (mega_host_config * megacfg, Scsi_Cmnd * sc)
+{
+ u8 cmd;
+ struct uioctl_t *ioc = NULL;
+ mega_mailbox *mbox = NULL;
+ mega_ioctl_mbox *mboxioc = NULL;
+ struct mbox_passthru *mboxpthru = NULL;
+ mega_scb *scb = NULL;
+ mega_passthru *pthru = NULL;
+
+ if ((scb = mega_allocateSCB (megacfg, sc)) == NULL) {
+ sc->result = (DID_ERROR << 16);
+ callDone (sc);
+ return NULL;
+ }
+
+ ioc = (struct uioctl_t *) sc->request_buffer;
+
+ memcpy (scb->mboxData, ioc->mbox, sizeof (scb->mboxData));
+
+ /* The generic mailbox */
+ mbox = (mega_mailbox *) ioc->mbox;
+
+ /*
+ * Get the user command
+ */
+ cmd = ioc->mbox[0];
+
+ switch (cmd) {
+ case MEGA_MBOXCMD_PASSTHRU:
+ /*
+ * prepare the SCB with information from the user ioctl structure
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ pthru = scb->pthru;
+#else
+ pthru = &scb->pthru;
+#endif
+ memcpy (pthru, &ioc->pthru, sizeof (mega_passthru));
+ mboxpthru = (struct mbox_passthru *) scb->mboxData;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ if (megacfg->flag & BOARD_64BIT) {
+ /* This is just a sample with one element
+ * This if executes onlu on 2.4 kernels
+ */
+ mboxpthru->dataxferaddr = scb->dma_passthruhandle64;
+ scb->sg64List[0].address =
+ pci_map_single (megacfg->dev,
+ ioc->data,
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ scb->sg64List[0].length = 4096; // TODO: Check this
+ pthru->dataxferaddr = scb->dma_sghandle64;
+ pthru->numsgelements = 1;
+ mboxpthru->cmd = 0xC3;
+ } else {
+ mboxpthru->dataxferaddr = scb->dma_passthruhandle64;
+ pthru->dataxferaddr =
+ pci_map_single (megacfg->dev,
+ ioc->data,
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ pthru->numsgelements = 0;
+ }
+
+#else
+ {
+ mboxpthru->dataxferaddr = virt_to_bus (&scb->pthru);
+ pthru->dataxferaddr = virt_to_bus (ioc->data);
+ pthru->numsgelements = 0;
+ }
+#endif
+
+ pthru->reqsenselen = 14;
+ break;
+
+ default: /* Normal command */
+ mboxioc = (mega_ioctl_mbox *) scb->mboxData;
+
+ if (ioc->ui.fcs.opcode == M_RD_IOCTL_CMD_NEW) {
+ scb->buff_ptr = ioc->ui.fcs.buffer;
+ scb->iDataSize = ioc->ui.fcs.length;
+ } else {
+ scb->buff_ptr = ioc->data;
+ scb->iDataSize = 4096; // TODO:check it
+ }
+
+ set_mbox_xfer_addr (megacfg, scb, mboxioc, FROMTO_DEVICE);
+ mboxioc->numsgelements = 0;
+ break;
+ }
+
+ return scb;
+}
+
+static int
+megadev_close (struct inode *inode, struct file *filep)
+{
+#ifdef MODULE
+ MOD_DEC_USE_COUNT;
+#endif
+ return 0;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
static Scsi_Host_Template driver_template = MEGARAID;
+#include "scsi_module.c"
+#else
+#ifdef MODULE
+Scsi_Host_Template driver_template = MEGARAID;
#include "scsi_module.c"
+#endif /* MODULE */
+#endif /* LINUX VERSION 2.4.XX test */
#include <linux/version.h>
#endif
-#define IN_ISR 0x80000000L
-#define IN_ABORT 0x40000000L
-#define IN_RESET 0x20000000L
-#define IN_QUEUE 0x10000000L
-#define BOARD_QUARTZ 0x08000000L
-#define BOARD_40LD 0x04000000L
-
-#ifndef HOSTS_C
+#define IN_ISR 0x80000000L
+#define IN_ABORT 0x40000000L
+#define IN_RESET 0x20000000L
+#define IN_QUEUE 0x10000000L
+
+#define BOARD_QUARTZ 0x08000000L
+#define BOARD_40LD 0x04000000L
+#define BOARD_64BIT 0x02000000L
+
#define SCB_FREE 0x0
#define SCB_ACTIVE 0x1
#define SCB_WAITQ 0x2
#define SCB_COMPLETE 0x4
#define SCB_ABORTED 0x5
#define SCB_RESET 0x6
-#endif
-#define MEGA_CMD_TIMEOUT 10
+#define M_RD_CRLFSTR "\n"
+#define M_RD_IOCTL_CMD 0x80
+#define M_RD_IOCTL_CMD_NEW 0x81
+#define M_RD_DRIVER_IOCTL_INTERFACE 0x82
+
+#define MEGARAID_VERSION "v1.14g (Release Date: Feb 5, 2001; 11:42)"
+#define MEGARAID_IOCTL_VERSION 114
+
+/* Methods */
+#define GET_DRIVER_INFO 0x1
+
+#define MEGA_CMD_TIMEOUT 10
/* Feel free to fiddle with these.. max values are:
SGLIST 0..26
COMMANDS 0..253
CMDPERLUN 0..63
*/
-#define MAX_SGLIST 0x1A
-#define MAX_COMMANDS 127
-#define MAX_CMD_PER_LUN 63
+
+#define MAX_SGLIST 0x1A
+#define MAX_COMMANDS 127
+#define MAX_CMD_PER_LUN 63
#define MAX_FIRMWARE_STATUS 46
#define MAX_LOGICAL_DRIVES 8
-#define MAX_CHANNEL 5
-#define MAX_TARGET 15
+#define MAX_CHANNEL 5
+#define MAX_TARGET 15
#define MAX_PHYSICAL_DRIVES MAX_CHANNEL*MAX_TARGET
#define INQUIRY_DATA_SIZE 0x24
-#define MAX_CDB_LEN 0x0A
+#define MAX_CDB_LEN 0x0A
#define MAX_REQ_SENSE_LEN 0x20
-#define INTR_VALID 0x40
+#define INTR_VALID 0x40
+
+/* Direction Macros for MBOX Data direction */
+#define TO_DEVICE 0x0
+#define FROM_DEVICE 0x1
+#define FROMTO_DEVICE 0x2
/* Mailbox commands */
#define MEGA_MBOXCMD_LREAD 0x01
#define MEGA_MBOXCMD_LWRITE 0x02
+#define MEGA_MBOXCMD_LREAD64 0xA7
+#define MEGA_MBOXCMD_LWRITE64 0xA8
#define MEGA_MBOXCMD_PASSTHRU 0x03
#define MEGA_MBOXCMD_ADAPTERINQ 0x05
/* Offsets into Mailbox */
-#define COMMAND_PORT 0x00
-#define COMMAND_ID_PORT 0x01
-#define SG_LIST_PORT0 0x08
-#define SG_LIST_PORT1 0x09
-#define SG_LIST_PORT2 0x0a
-#define SG_LIST_PORT3 0x0b
-#define SG_ELEMENT_PORT 0x0d
-#define NO_FIRED_PORT 0x0f
+#define COMMAND_PORT 0x00
+#define COMMAND_ID_PORT 0x01
+#define SG_LIST_PORT0 0x08
+#define SG_LIST_PORT1 0x09
+#define SG_LIST_PORT2 0x0a
+#define SG_LIST_PORT3 0x0b
+#define SG_ELEMENT_PORT 0x0d
+#define NO_FIRED_PORT 0x0f
/* I/O Port offsets */
-#define I_CMD_PORT 0x00
-#define I_ACK_PORT 0x00
-#define I_TOGGLE_PORT 0x01
-#define INTR_PORT 0x0a
-
-#define MAILBOX_SIZE (sizeof(mega_mailbox)-16)
-#define MBOX_BUSY_PORT 0x00
-#define MBOX_PORT0 0x04
-#define MBOX_PORT1 0x05
-#define MBOX_PORT2 0x06
-#define MBOX_PORT3 0x07
-#define ENABLE_MBOX_REGION 0x0B
+#define I_CMD_PORT 0x00
+#define I_ACK_PORT 0x00
+#define I_TOGGLE_PORT 0x01
+#define INTR_PORT 0x0a
+
+#define MAILBOX_SIZE (sizeof(mega_mailbox)-16)
+#define MBOX_BUSY_PORT 0x00
+#define MBOX_PORT0 0x04
+#define MBOX_PORT1 0x05
+#define MBOX_PORT2 0x06
+#define MBOX_PORT3 0x07
+#define ENABLE_MBOX_REGION 0x0B
/* I/O Port Values */
-#define ISSUE_BYTE 0x10
-#define ACK_BYTE 0x08
-#define ENABLE_INTR_BYTE 0xc0
-#define DISABLE_INTR_BYTE 0x00
-#define VALID_INTR_BYTE 0x40
-#define MBOX_BUSY_BYTE 0x10
-#define ENABLE_MBOX_BYTE 0x00
+#define ISSUE_BYTE 0x10
+#define ACK_BYTE 0x08
+#define ENABLE_INTR_BYTE 0xc0
+#define DISABLE_INTR_BYTE 0x00
+#define VALID_INTR_BYTE 0x40
+#define MBOX_BUSY_BYTE 0x10
+#define ENABLE_MBOX_BYTE 0x00
/* Setup some port macros here */
-#define WRITE_MAILBOX(base,offset,value) *(base+offset)=value
-#define READ_MAILBOX(base,offset) *(base+offset)
-
-#define WRITE_PORT(base,offset,value) outb_p(value,base+offset)
-#define READ_PORT(base,offset) inb_p(base+offset)
-
-#define ISSUE_COMMAND(base) WRITE_PORT(base,I_CMD_PORT,ISSUE_BYTE)
-#define CLEAR_INTR(base) WRITE_PORT(base,I_ACK_PORT,ACK_BYTE)
-#define ENABLE_INTR(base) WRITE_PORT(base,I_TOGGLE_PORT,ENABLE_INTR_BYTE)
-#define DISABLE_INTR(base) WRITE_PORT(base,I_TOGGLE_PORT,DISABLE_INTR_BYTE)
-
-/* Define AMI's PCI codes */
-#undef PCI_VENDOR_ID_AMI
-#undef PCI_DEVICE_ID_AMI_MEGARAID
-
-#ifndef PCI_VENDOR_ID_AMI
-#define PCI_VENDOR_ID_AMI 0x101E
-#define PCI_DEVICE_ID_AMI_MEGARAID 0x9010
-#endif
-
-#define PCI_CONF_BASE_ADDR_OFFSET 0x10
-#define PCI_CONF_IRQ_OFFSET 0x3c
-#define PCI_CONF_AMISIG 0xa0
-#define AMI_SIGNATURE 0x3344
-#define AMI_SIGNATURE_471 0xCCCC
-
-#if LINUX_VERSION_CODE < 0x20100
+#define WRITE_MAILBOX(base,offset,value) *(base+offset)=value
+#define READ_MAILBOX(base,offset) *(base+offset)
+
+#define WRITE_PORT(base,offset,value) outb_p(value,base+offset)
+#define READ_PORT(base,offset) inb_p(base+offset)
+
+#define ISSUE_COMMAND(base) WRITE_PORT(base,I_CMD_PORT,ISSUE_BYTE)
+#define CLEAR_INTR(base) WRITE_PORT(base,I_ACK_PORT,ACK_BYTE)
+#define ENABLE_INTR(base) WRITE_PORT(base,I_TOGGLE_PORT,ENABLE_INTR_BYTE)
+#define DISABLE_INTR(base) WRITE_PORT(base,I_TOGGLE_PORT,DISABLE_INTR_BYTE)
+
+/* Special Adapter Commands */
+#define FW_FIRE_WRITE 0x2C
+#define FW_FIRE_FLASH 0x2D
+
+#define FC_NEW_CONFIG 0xA1
+#define DCMD_FC_CMD 0xA1
+#define DCMD_FC_PROCEED 0x02
+#define DCMD_DELETE_LOGDRV 0x03
+#define DCMD_FC_READ_NVRAM_CONFIG 0x04
+#define DCMD_FC_READ_NVRAM_CONFIG_64 0xC0
+#define DCMD_FC_READ_FINAL_CONFIG 0x05
+#define DCMD_GET_DISK_CONFIG 0x06
+#define DCMD_GET_DISK_CONFIG_64 0xC2
+#define DCMD_CHANGE_LDNO 0x07
+#define DCMD_COMPACT_CONFIG 0x08
+#define DCMD_DELETE_DRIVEGROUP 0x09
+#define DCMD_GET_LOOPID_INFO 0x0A
+#define DCMD_CHANGE_LOOPID 0x0B
+#define DCMD_GET_NUM_SCSI_CHANS 0x0C
+#define DCMD_WRITE_CONFIG 0x0D
+#define DCMD_WRITE_CONFIG_64 0xC1
+
+#define NC_SUBOP_PRODUCT_INFO 0x0E
+#define NC_SUBOP_ENQUIRY3 0x0F
+#define ENQ3_GET_SOLICITED_NOTIFY_ONLY 0x01
+#define ENQ3_GET_SOLICITED_FULL 0x02
+#define ENQ3_GET_UNSOLICITED 0x03
+
+#define PCI_CONF_BASE_ADDR_OFFSET 0x10
+#define PCI_CONF_IRQ_OFFSET 0x3c
+#define PCI_CONF_AMISIG 0xa0
+#define PCI_CONF_AMISIG64 0xa4
+
+/* Sub-System Vendor ID sorted on alphabetical order*/
+#define AMI_SUBSYS_ID 0x101E
+#define DELL_SUBSYS_ID 0x1028
+#define HP_SUBSYS_ID 0x103C
+
+#define AMI_SIGNATURE 0x3344
+#define AMI_SIGNATURE_471 0xCCCC
+#define AMI_64BIT_SIGNATURE 0x0299
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0) /*0x20100 */
#define MEGARAID \
- { NULL, /* Next */\
- NULL, /* Usage Count Pointer */\
- NULL, /* /proc Directory Entry */\
- megaraid_proc_info, /* /proc Info Function */\
- "MegaRAID", /* Driver Name */\
- megaraid_detect, /* Detect Host Adapter */\
- megaraid_release, /* Release Host Adapter */\
- megaraid_info, /* Driver Info Function */\
- megaraid_command, /* Command Function */\
- megaraid_queue, /* Queue Command Function */\
- megaraid_abort, /* Abort Command Function */\
- megaraid_reset, /* Reset Command Function */\
- NULL, /* Slave Attach Function */\
- megaraid_biosparam, /* Disk BIOS Parameters */\
- MAX_COMMANDS, /* # of cmds that can be\
- outstanding at any time */\
- 7, /* HBA Target ID */\
- MAX_SGLIST, /* Scatter/Gather Table Size */\
- MAX_CMD_PER_LUN, /* SCSI Commands per LUN */\
- 0, /* Present */\
- 0, /* Default Unchecked ISA DMA */\
- ENABLE_CLUSTERING } /* Enable Clustering */
+ { NULL, /* Next */\
+ NULL, /* Usage Count Pointer */\
+ NULL, /* proc Directory Entry */\
+ megaraid_proc_info, /* proc Info Function */\
+ "MegaRAID", /* Driver Name */\
+ megaraid_detect, /* Detect Host Adapter */\
+ megaraid_release, /* Release Host Adapter */\
+ megaraid_info, /* Driver Info Function */\
+ megaraid_command, /* Command Function */\
+ megaraid_queue, /* Queue Command Function */\
+ megaraid_abort, /* Abort Command Function */\
+ megaraid_reset, /* Reset Command Function */\
+ NULL, /* Slave Attach Function */\
+ megaraid_biosparam, /* Disk BIOS Parameters */\
+ MAX_COMMANDS, /* # of cmds that can be\
+ outstanding at any time */\
+ 7, /* HBA Target ID */\
+ MAX_SGLIST, /* Scatter/Gather Table Size */\
+ MAX_CMD_PER_LUN, /* SCSI Commands per LUN */\
+ 0, /* Present */\
+ 0, /* Default Unchecked ISA DMA */\
+ ENABLE_CLUSTERING } /* Enable Clustering */
#else
#define MEGARAID \
{\
- name: "MegaRAID", /* Driver Name */\
- proc_info: megaraid_proc_info, /* /proc driver info */\
- detect: megaraid_detect, /* Detect Host Adapter */\
- release: megaraid_release, /* Release Host Adapter */\
- info: megaraid_info, /* Driver Info Function */\
- command: megaraid_command, /* Command Function */\
- queuecommand: megaraid_queue, /* Queue Command Function */\
- abort: megaraid_abort, /* Abort Command Function */\
- reset: megaraid_reset, /* Reset Command Function */\
- bios_param: megaraid_biosparam, /* Disk BIOS Parameters */\
- can_queue: MAX_COMMANDS, /* Can Queue */\
- this_id: 7, /* HBA Target ID */\
- sg_tablesize: MAX_SGLIST, /* Scatter/Gather Table Size */\
- cmd_per_lun: MAX_CMD_PER_LUN, /* SCSI Commands per LUN */\
- present: 0, /* Present */\
- unchecked_isa_dma:0, /* Default Unchecked ISA DMA */\
- use_clustering: ENABLE_CLUSTERING /* Enable Clustering */\
+ name: "MegaRAID", /* Driver Name */\
+ proc_info: megaraid_proc_info, /* /proc driver info */\
+ detect: megaraid_detect, /* Detect Host Adapter */\
+ release: megaraid_release, /* Release Host Adapter */\
+ info: megaraid_info, /* Driver Info Function */\
+ command: megaraid_command, /* Command Function */\
+ queuecommand: megaraid_queue, /* Queue Command Function */\
+ abort: megaraid_abort, /* Abort Command Function */\
+ reset: megaraid_reset, /* Reset Command Function */\
+ bios_param: megaraid_biosparam, /* Disk BIOS Parameters */\
+ can_queue: MAX_COMMANDS, /* Can Queue */\
+ this_id: 7, /* HBA Target ID */\
+ sg_tablesize: MAX_SGLIST, /* Scatter/Gather Table Size */\
+ cmd_per_lun: MAX_CMD_PER_LUN, /* SCSI Commands per LUN */\
+ present: 0, /* Present */\
+ unchecked_isa_dma: 0, /* Default Unchecked ISA DMA */\
+ use_clustering: ENABLE_CLUSTERING /* Enable Clustering */\
}
#endif
-
/***********************************************************************
* Structure Declarations for the Firmware supporting 40 Logical Drives
* and 256 Physical Drives.
***********************************************************************/
-#define FC_MAX_LOGICAL_DRIVES 40
-#define FC_MAX_LOG_DEVICES FC_MAX_LOGICAL_DRIVES
-#define FC_MAX_SPAN_DEPTH 8
-#define FC_MAX_ROW_SIZE 32
-
-#define FC_MAX_CHANNELS 16
-#define FC_MAX_TARGETS_PER_CHANNEL 16
-#define FC_MAX_PHYSICAL_DEVICES 256
-
-#define FC_NEW_CONFIG 0xA1
-#define DCMD_FC_CMD 0xA1
- #define NC_SUBOP_PRODUCT_INFO 0x0E
- #define NC_SUBOP_ENQUIRY3 0x0F
- #define ENQ3_GET_SOLICITED_NOTIFY_ONLY 0x01
- #define ENQ3_GET_SOLICITED_FULL 0x02
- #define ENQ3_GET_UNSOLICITED 0x03
+#define FC_MAX_LOGICAL_DRIVES 40
+#define FC_MAX_LOG_DEVICES FC_MAX_LOGICAL_DRIVES
+#define FC_MAX_SPAN_DEPTH 8
+#define FC_MAX_ROW_SIZE 32
+#define FC_MAX_CHANNELS 16
+#define FC_MAX_TARGETS_PER_CHANNEL 16
+#define FC_MAX_PHYSICAL_DEVICES 256
/********************************************
- * PRODUCT_INFO Strucure
+ * PRODUCT_INFO
********************************************/
#define SIG_40LOG_32STR_8SPN 0x00282008
-/*
+/*
* Utilities declare this strcture size as 1024 bytes. So more fields can
* be added in future.
*/
-struct MRaidProductInfo
-{
- u32 DataSize; /* current size in bytes (not including resvd) */
- u32 ConfigSignature;
- /* Current value is 0x00282008
- * 0x28=MAX_LOGICAL_DRIVES,
- * 0x20=Number of stripes and
- * 0x08=Number of spans */
- u8 FwVer[16]; /* printable ASCI string */
- u8 BiosVer[16]; /* printable ASCI string */
- u8 ProductName[80]; /* printable ASCI string */
-
- u8 MaxConcCmds; /* Max. concurrent commands supported */
- u8 SCSIChanPresent; /* Number of SCSI Channels detected */
- u8 FCLoopPresent; /* Number of Fibre Loops detected */
- u8 memType; /* EDO, FPM, SDRAM etc */
-
- u32 signature;
- u16 DramSize; /* In terms of MB */
- u16 subSystemID;
-
- u16 subSystemVendorID;
- u8 numNotifyCounters;
- u8 pad1k[889]; /* 135 + 889 resvd = 1024 total size */
-}__attribute__((packed));
+struct MRaidProductInfo {
+ u32 DataSize; /* current size in bytes (not including resvd) */
+ u32 ConfigSignature;
+ /* Current value is 0x00282008
+ * 0x28=MAX_LOGICAL_DRIVES,
+ * 0x20=Number of stripes and
+ * 0x08=Number of spans */
+ u8 FwVer[16]; /* printable ASCI string */
+ u8 BiosVer[16]; /* printable ASCI string */
+ u8 ProductName[80]; /* printable ASCI string */
+
+ u8 MaxConcCmds; /* Max. concurrent commands supported */
+ u8 SCSIChanPresent; /* Number of SCSI Channels detected */
+ u8 FCLoopPresent; /* Number of Fibre Loops detected */
+ u8 memType; /* EDO, FPM, SDRAM etc */
+
+ u32 signature;
+ u16 DramSize; /* In terms of MB */
+ u16 subSystemID;
+
+ u16 subSystemVendorID;
+ u8 numNotifyCounters;
+ u8 pad1k[889]; /* 135 + 889 resvd = 1024 total size */
+} __attribute__ ((packed));
typedef struct MRaidProductInfo megaRaidProductInfo;
/********************************************
- * Standard ENQUIRY Strucure
+ * Standard ENQUIRY
********************************************/
-struct FC_ADP_INFO
-{
- u8 MaxConcCmds; /* Max. concurrent commands supported. */
- u8 RbldRate; /* Rebuild Rate. Varies from 0%-100% */
- u8 MaxTargPerChan; /* Max. Targets supported per chan. */
- u8 ChanPresent; /* No. of Chans present on this adapter. */
- u8 FwVer[4]; /* Firmware version. */
- u16 AgeOfFlash; /* No. of times FW has been downloaded. */
- u8 ChipSetValue; /* Contents of 0xC0000832 */
- u8 DramSize; /* In terms of MB */
- u8 CacheFlushInterval; /* In terms of Seconds */
- u8 BiosVersion[4];
- u8 BoardType;
- u8 sense_alert;
- u8 write_config_count; /* Increase with evry configuration change */
- u8 drive_inserted_count; /* Increase with every drive inserted */
- u8 inserted_drive; /* Channel: Id of inserted drive */
- u8 battery_status;
- /*
- BIT 0 : battery module missing
- BIT 1 : VBAD
- BIT 2 : temp high
- BIT 3 : battery pack missing
- BIT 4,5 : 00 - charge complete
- 01 - fast charge in prog
- 10 - fast charge fail
- 11 - undefined
- BIt 6 : counter > 1000
- Bit 7 : undefined
- */
- u8 dec_fault_bus_info; /* was resvd */
-}__attribute__((packed));
-
-struct FC_LDRV_INFO
-{
- u8 NumLDrv; /* No. of Log. Drvs configured. */
- u8 recon_state[FC_MAX_LOGICAL_DRIVES/8];
- /* bit field for State of reconstruct */
- u16 LDrvOpStatus[FC_MAX_LOGICAL_DRIVES/8];
- /* bit field Status of Long Operations. */
-
- u32 LDrvSize[FC_MAX_LOGICAL_DRIVES]; /* Size of each log. Drv. */
- u8 LDrvProp[FC_MAX_LOGICAL_DRIVES];
- u8 LDrvState[FC_MAX_LOGICAL_DRIVES]; /* State of Logical Drives. */
-}__attribute__((packed));
+struct FC_ADP_INFO {
+ u8 MaxConcCmds; /* Max. concurrent commands supported. */
+ u8 RbldRate; /* Rebuild Rate. Varies from 0%-100% */
+ u8 MaxTargPerChan; /* Max. Targets supported per chan. */
+ u8 ChanPresent; /* No. of Chans present on this adapter. */
+ u8 FwVer[4]; /* Firmware version. */
+ u16 AgeOfFlash; /* No. of times FW has been downloaded. */
+ u8 ChipSetValue; /* Contents of 0xC0000832 */
+ u8 DramSize; /* In terms of MB */
+ u8 CacheFlushInterval; /* In terms of Seconds */
+ u8 BiosVersion[4];
+ u8 BoardType;
+ u8 sense_alert;
+ u8 write_config_count; /* Increase with evry configuration change */
+ u8 drive_inserted_count;/* Increase with every drive inserted */
+ u8 inserted_drive; /* Channel: Id of inserted drive */
+ u8 battery_status;
+ /*
+ BIT 0 : battery module missing
+ BIT 1 : VBAD
+ BIT 2 : temp high
+ BIT 3 : battery pack missing
+ BIT 4,5 : 00 - charge complete
+ 01 - fast charge in prog
+ 10 - fast charge fail
+ 11 - undefined
+ BIt 6 : counter > 1000
+ Bit 7 : undefined
+ */
+ u8 dec_fault_bus_info; /* was resvd */
+} __attribute__ ((packed));
+
+struct FC_LDRV_INFO {
+ u8 NumLDrv; /* No. of Log. Drvs configured. */
+ u8 recon_state[FC_MAX_LOGICAL_DRIVES / 8];
+ /* bit field for State of reconstruct */
+ u16 LDrvOpStatus[FC_MAX_LOGICAL_DRIVES / 8];
+ /* bit field Status of Long Operations. */
+
+ u32 LDrvSize[FC_MAX_LOGICAL_DRIVES]; /* Size of each log. Drv. */
+ u8 LDrvProp[FC_MAX_LOGICAL_DRIVES];
+ u8 LDrvState[FC_MAX_LOGICAL_DRIVES]; /* State of Logical Drives. */
+} __attribute__ ((packed));
#define PREVSTAT_MASK 0xf0
#define CURRSTAT_MASK 0x0f
-struct FC_PDRV_INFO
-{
- u8 PDrvState[FC_MAX_PHYSICAL_DEVICES]; /* State of Phys Drvs. */
-}__attribute__((packed));
+struct FC_PDRV_INFO {
+ u8 PDrvState[FC_MAX_PHYSICAL_DEVICES]; /* State of Phys Drvs. */
+} __attribute__ ((packed));
+struct FC_AdapterInq {
+ struct FC_ADP_INFO AdpInfo;
+ struct FC_LDRV_INFO LogdrvInfo;
+ struct FC_PDRV_INFO PhysdrvInfo;
+} __attribute__ ((packed));
-struct FC_AdapterInq
-{
- struct FC_ADP_INFO AdpInfo;
- struct FC_LDRV_INFO LogdrvInfo;
- struct FC_PDRV_INFO PhysdrvInfo;
-}__attribute__((packed));
-
-
-typedef struct FC_AdapterInq mega_RAIDINQ_FC;
+typedef struct FC_AdapterInq mega_RAIDINQ_FC;
/********************************************
- * NOTIFICATION Strucure
+ * NOTIFICATION
********************************************/
#define MAX_NOTIFY_SIZE 0x80
#define CUR_NOTIFY_SIZE sizeof(struct MegaRAID_Notify)
-/*
+/*
* Utilities declare this strcture size as ?? bytes. So more fields can
* be added in future.
*/
-struct MegaRAID_Notify
-{
- u32 globalCounter; /* Any change increments this counter */
-
- u8 paramCounter; /* Indicates any params changed */
- u8 paramId; /* Param modified - defined below */
- u16 paramVal; /* New val of last param modified */
-
- u8 writeConfigCounter; /* write config occurred */
- u8 writeConfigRsvd[3];
-
- u8 ldrvOpCounter; /* Indicates ldrv op started/completed */
- u8 ldrvOpId; /* ldrv num */
- u8 ldrvOpCmd; /* ldrv operation - defined below */
- u8 ldrvOpStatus; /* status of the operation */
-
- u8 ldrvStateCounter; /* Indicates change of ldrv state */
- u8 ldrvStateId; /* ldrv num */
- u8 ldrvStateNew; /* New state */
- u8 ldrvStateOld; /* old state */
-
- u8 pdrvStateCounter; /* Indicates change of ldrv state */
- u8 pdrvStateId; /* pdrv id */
- u8 pdrvStateNew; /* New state */
- u8 pdrvStateOld; /* old state */
-
- u8 pdrvFmtCounter; /* Indicates pdrv format started/over */
- u8 pdrvFmtId; /* pdrv id */
- u8 pdrvFmtVal; /* format started/over */
- u8 pdrvFmtRsvd;
-
- u8 targXferCounter; /* Indicates SCSI-2 Xfer rate change */
- u8 targXferId; /* pdrv Id */
- u8 targXferVal; /* new Xfer params of last pdrv */
- u8 targXferRsvd;
-
- u8 fcLoopIdChgCounter; /* Indicates loopid changed */
- u8 fcLoopIdPdrvId; /* pdrv id */
- u8 fcLoopId0; /* loopid on fc loop 0 */
- u8 fcLoopId1; /* loopid on fc loop 1 */
-
- u8 fcLoopStateCounter; /* Indicates loop state changed */
- u8 fcLoopState0; /* state of fc loop 0 */
- u8 fcLoopState1; /* state of fc loop 1 */
- u8 fcLoopStateRsvd;
-}__attribute__((packed));
-
+struct MegaRAID_Notify {
+ u32 globalCounter; /* Any change increments this counter */
+
+ u8 paramCounter; /* Indicates any params changed */
+ u8 paramId; /* Param modified - defined below */
+ u16 paramVal; /* New val of last param modified */
+
+ u8 writeConfigCounter; /* write config occurred */
+ u8 writeConfigRsvd[3];
+
+ u8 ldrvOpCounter; /* Indicates ldrv op started/completed */
+ u8 ldrvOpId; /* ldrv num */
+ u8 ldrvOpCmd; /* ldrv operation - defined below */
+ u8 ldrvOpStatus; /* status of the operation */
+
+ u8 ldrvStateCounter; /* Indicates change of ldrv state */
+ u8 ldrvStateId; /* ldrv num */
+ u8 ldrvStateNew; /* New state */
+ u8 ldrvStateOld; /* old state */
+
+ u8 pdrvStateCounter; /* Indicates change of ldrv state */
+ u8 pdrvStateId; /* pdrv id */
+ u8 pdrvStateNew; /* New state */
+ u8 pdrvStateOld; /* old state */
+
+ u8 pdrvFmtCounter; /* Indicates pdrv format started/over */
+ u8 pdrvFmtId; /* pdrv id */
+ u8 pdrvFmtVal; /* format started/over */
+ u8 pdrvFmtRsvd;
+
+ u8 targXferCounter; /* Indicates SCSI-2 Xfer rate change */
+ u8 targXferId; /* pdrv Id */
+ u8 targXferVal; /* new Xfer params of last pdrv */
+ u8 targXferRsvd;
+
+ u8 fcLoopIdChgCounter; /* Indicates loopid changed */
+ u8 fcLoopIdPdrvId; /* pdrv id */
+ u8 fcLoopId0; /* loopid on fc loop 0 */
+ u8 fcLoopId1; /* loopid on fc loop 1 */
+
+ u8 fcLoopStateCounter; /* Indicates loop state changed */
+ u8 fcLoopState0; /* state of fc loop 0 */
+ u8 fcLoopState1; /* state of fc loop 1 */
+ u8 fcLoopStateRsvd;
+} __attribute__ ((packed));
/********************************************
* PARAM IDs in Notify struct
********************************************/
-#define PARAM_RBLD_RATE 0x01
+#define PARAM_RBLD_RATE 0x01
/*--------------------------------------
- * Param val =
- * byte 0: new rbld rate
+ * Param val =
+ * byte 0: new rbld rate
*--------------------------------------*/
#define PARAM_CACHE_FLUSH_INTERVAL 0x02
/*--------------------------------------
- * Param val =
+ * Param val =
* byte 0: new cache flush interval
*--------------------------------------*/
-#define PARAM_SENSE_ALERT 0x03
+#define PARAM_SENSE_ALERT 0x03
/*--------------------------------------
- * Param val =
+ * Param val =
* byte 0: last pdrv id causing chkcond
*--------------------------------------*/
-#define PARAM_DRIVE_INSERTED 0x04
+#define PARAM_DRIVE_INSERTED 0x04
/*--------------------------------------
- * Param val =
+ * Param val =
* byte 0: last pdrv id inserted
*--------------------------------------*/
-#define PARAM_BATTERY_STATUS 0x05
+#define PARAM_BATTERY_STATUS 0x05
/*--------------------------------------
- * Param val =
+ * Param val =
* byte 0: battery status
*--------------------------------------*/
/********************************************
* Ldrv operation cmd in Notify struct
********************************************/
-#define LDRV_CMD_CHKCONSISTANCY 0x01
-#define LDRV_CMD_INITIALIZE 0x02
-#define LDRV_CMD_RECONSTRUCTION 0x03
+#define LDRV_CMD_CHKCONSISTANCY 0x01
+#define LDRV_CMD_INITIALIZE 0x02
+#define LDRV_CMD_RECONSTRUCTION 0x03
/********************************************
* Ldrv operation status in Notify struct
********************************************/
-#define LDRV_OP_SUCCESS 0x00
-#define LDRV_OP_FAILED 0x01
-#define LDRV_OP_ABORTED 0x02
-#define LDRV_OP_CORRECTED 0x03
-#define LDRV_OP_STARTED 0x04
-
+#define LDRV_OP_SUCCESS 0x00
+#define LDRV_OP_FAILED 0x01
+#define LDRV_OP_ABORTED 0x02
+#define LDRV_OP_CORRECTED 0x03
+#define LDRV_OP_STARTED 0x04
/********************************************
* Raid Logical drive states.
********************************************/
-#define RDRV_OFFLINE 0
-#define RDRV_DEGRADED 1
-#define RDRV_OPTIMAL 2
-#define RDRV_DELETED 3
+#define RDRV_OFFLINE 0
+#define RDRV_DEGRADED 1
+#define RDRV_OPTIMAL 2
+#define RDRV_DELETED 3
/*******************************************
* Physical drive states.
*******************************************/
-#define PDRV_UNCNF 0
-#define PDRV_ONLINE 3
-#define PDRV_FAILED 4
-#define PDRV_RBLD 5
-/* #define PDRV_HOTSPARE 6 */
+#define PDRV_UNCNF 0
+#define PDRV_ONLINE 3
+#define PDRV_FAILED 4
+#define PDRV_RBLD 5
/*******************************************
* Formal val in Notify struct
*******************************************/
-#define PDRV_FMT_START 0x01
-#define PDRV_FMT_OVER 0x02
+#define PDRV_FMT_START 0x01
+#define PDRV_FMT_OVER 0x02
/********************************************
* FC Loop State in Notify Struct
********************************************/
-#define ENQ_FCLOOP_FAILED 0
-#define ENQ_FCLOOP_ACTIVE 1
-#define ENQ_FCLOOP_TRANSIENT 2
-
+#define ENQ_FCLOOP_FAILED 0
+#define ENQ_FCLOOP_ACTIVE 1
+#define ENQ_FCLOOP_TRANSIENT 2
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+#define M_RD_DMA_TYPE_NONE 0xFFFF
+#define M_RD_PTHRU_WITH_BULK_DATA 0x0001
+#define M_RD_PTHRU_WITH_SGLIST 0x0002
+#define M_RD_BULK_DATA_ONLY 0x0004
+#define M_RD_SGLIST_ONLY 0x0008
+#endif
/********************************************
- * ENQUIRY3 Strucure
+ * ENQUIRY3
********************************************/
-/*
+/*
* Utilities declare this strcture size as 1024 bytes. So more fields can
* be added in future.
*/
-struct MegaRAID_Enquiry3
-{
- u32 dataSize; /* current size in bytes (not including resvd) */
+struct MegaRAID_Enquiry3 {
+ u32 dataSize; /* current size in bytes (not including resvd) */
- struct MegaRAID_Notify notify;
+ struct MegaRAID_Notify notify;
- u8 notifyRsvd[MAX_NOTIFY_SIZE - CUR_NOTIFY_SIZE];
+ u8 notifyRsvd[MAX_NOTIFY_SIZE - CUR_NOTIFY_SIZE];
- u8 rbldRate; /* Rebuild rate (0% - 100%) */
- u8 cacheFlushInterval; /* In terms of Seconds */
- u8 senseAlert;
- u8 driveInsertedCount; /* drive insertion count */
+ u8 rbldRate; /* Rebuild rate (0% - 100%) */
+ u8 cacheFlushInterval; /* In terms of Seconds */
+ u8 senseAlert;
+ u8 driveInsertedCount; /* drive insertion count */
- u8 batteryStatus;
- u8 numLDrv; /* No. of Log Drives configured */
- u8 reconState[FC_MAX_LOGICAL_DRIVES/8]; /* State of reconstruct */
- u16 lDrvOpStatus[FC_MAX_LOGICAL_DRIVES/8]; /* log. Drv Status */
+ u8 batteryStatus;
+ u8 numLDrv; /* No. of Log Drives configured */
+ u8 reconState[FC_MAX_LOGICAL_DRIVES / 8]; /* State of reconstruct */
+ u16 lDrvOpStatus[FC_MAX_LOGICAL_DRIVES / 8]; /* log. Drv Status */
- u32 lDrvSize[FC_MAX_LOGICAL_DRIVES]; /* Size of each log. Drv */
- u8 lDrvProp[FC_MAX_LOGICAL_DRIVES];
- u8 lDrvState[FC_MAX_LOGICAL_DRIVES]; /* State of Logical Drives */
- u8 pDrvState[FC_MAX_PHYSICAL_DEVICES]; /* State of Phys. Drvs. */
- u16 physDrvFormat[FC_MAX_PHYSICAL_DEVICES/16];
+ u32 lDrvSize[FC_MAX_LOGICAL_DRIVES]; /* Size of each log. Drv */
+ u8 lDrvProp[FC_MAX_LOGICAL_DRIVES];
+ u8 lDrvState[FC_MAX_LOGICAL_DRIVES]; /* State of Logical Drives */
+ u8 pDrvState[FC_MAX_PHYSICAL_DEVICES]; /* State of Phys. Drvs. */
+ u16 physDrvFormat[FC_MAX_PHYSICAL_DEVICES / 16];
- u8 targXfer[80]; /* phys device transfer rate */
- u8 pad1k[263]; /* 761 + 263reserved = 1024 bytes total size */
-}__attribute__((packed));
+ u8 targXfer[80]; /* phys device transfer rate */
+ u8 pad1k[263]; /* 761 + 263reserved = 1024 bytes total size */
+} __attribute__ ((packed));
typedef struct MegaRAID_Enquiry3 mega_Enquiry3;
/* Structures */
typedef struct _mega_ADP_INFO {
- u8 MaxConcCmds;
- u8 RbldRate;
- u8 MaxTargPerChan;
- u8 ChanPresent;
- u8 FwVer[4];
- u16 AgeOfFlash;
- u8 ChipSetValue;
- u8 DramSize;
- u8 CacheFlushInterval;
- u8 BiosVer[4];
- u8 resvd[7];
+ u8 MaxConcCmds;
+ u8 RbldRate;
+ u8 MaxTargPerChan;
+ u8 ChanPresent;
+ u8 FwVer[4];
+ u16 AgeOfFlash;
+ u8 ChipSetValue;
+ u8 DramSize;
+ u8 CacheFlushInterval;
+ u8 BiosVer[4];
+ u8 resvd[7];
} mega_ADP_INFO;
typedef struct _mega_LDRV_INFO {
- u8 NumLDrv;
- u8 resvd[3];
- u32 LDrvSize[MAX_LOGICAL_DRIVES];
- u8 LDrvProp[MAX_LOGICAL_DRIVES];
- u8 LDrvState[MAX_LOGICAL_DRIVES];
+ u8 NumLDrv;
+ u8 resvd[3];
+ u32 LDrvSize[MAX_LOGICAL_DRIVES];
+ u8 LDrvProp[MAX_LOGICAL_DRIVES];
+ u8 LDrvState[MAX_LOGICAL_DRIVES];
} mega_LDRV_INFO;
typedef struct _mega_PDRV_INFO {
- u8 PDrvState[MAX_PHYSICAL_DRIVES];
- u8 resvd;
+ u8 PDrvState[MAX_PHYSICAL_DRIVES];
+ u8 resvd;
} mega_PDRV_INFO;
-// RAID inquiry: Mailbox command 0x5
+/* RAID inquiry: Mailbox command 0x5*/
typedef struct _mega_RAIDINQ {
- mega_ADP_INFO AdpInfo;
- mega_LDRV_INFO LogdrvInfo;
- mega_PDRV_INFO PhysdrvInfo;
+ mega_ADP_INFO AdpInfo;
+ mega_LDRV_INFO LogdrvInfo;
+ mega_PDRV_INFO PhysdrvInfo;
} mega_RAIDINQ;
-// Passthrough command: Mailbox command 0x3
+/* Passthrough command: Mailbox command 0x3*/
typedef struct mega_passthru {
- u8 timeout:3; /* 0=6sec/1=60sec/2=10min/3=3hrs */
- u8 ars:1;
- u8 reserved:3;
- u8 islogical:1;
- u8 logdrv; /* if islogical == 1 */
- u8 channel; /* if islogical == 0 */
- u8 target; /* if islogical == 0 */
- u8 queuetag; /* unused */
- u8 queueaction; /* unused */
- u8 cdb[MAX_CDB_LEN];
- u8 cdblen;
- u8 reqsenselen;
- u8 reqsensearea[MAX_REQ_SENSE_LEN];
- u8 numsgelements;
- u8 scsistatus;
- u32 dataxferaddr;
- u32 dataxferlen;
+ u8 timeout:3; /* 0=6sec/1=60sec/2=10min/3=3hrs */
+ u8 ars:1;
+ u8 reserved:3;
+ u8 islogical:1;
+ u8 logdrv; /* if islogical == 1 */
+ u8 channel; /* if islogical == 0 */
+ u8 target; /* if islogical == 0 */
+ u8 queuetag; /* unused */
+ u8 queueaction; /* unused */
+ u8 cdb[MAX_CDB_LEN];
+ u8 cdblen;
+ u8 reqsenselen;
+ u8 reqsensearea[MAX_REQ_SENSE_LEN];
+ u8 numsgelements;
+ u8 scsistatus;
+ u32 dataxferaddr;
+ u32 dataxferlen;
} mega_passthru;
struct _mega_mailbox {
- /* 0x0 */ u8 cmd;
- /* 0x1 */ u8 cmdid;
- /* 0x2 */ u16 numsectors;
- /* 0x4 */ u32 lba;
- /* 0x8 */ u32 xferaddr;
- /* 0xC */ u8 logdrv;
- /* 0xD */ u8 numsgelements;
- /* 0xE */ u8 resvd;
- /* 0xF */ u8 busy;
- /* 0x10 */ u8 numstatus;
- /* 0x11 */ u8 status;
- /* 0x12 */ u8 completed[46];
- u8 mraid_poll;
- u8 mraid_ack;
- u8 pad[16]; /* for alignment purposes */
-}__attribute__((packed));
+ /* 0x0 */ u8 cmd;
+ /* 0x1 */ u8 cmdid;
+ /* 0x2 */ u16 numsectors;
+ /* 0x4 */ u32 lba;
+ /* 0x8 */ u32 xferaddr;
+ /* 0xC */ u8 logdrv;
+ /* 0xD */ u8 numsgelements;
+ /* 0xE */ u8 resvd;
+ /* 0xF */ u8 busy;
+ /* 0x10 */ u8 numstatus;
+ /* 0x11 */ u8 status;
+ /* 0x12 */ u8 completed[46];
+ volatile u8 mraid_poll;
+ volatile u8 mraid_ack;
+ u8 pad[16]; /* for alignment purposes */
+} __attribute__ ((packed));
typedef struct _mega_mailbox mega_mailbox;
typedef struct {
- u32 xferSegment; /* for 64-bit controllers */
- mega_mailbox mailbox;
+ u32 xferSegment_lo;
+ u32 xferSegment_hi;
+ mega_mailbox mailbox;
} mega_mailbox64;
typedef struct _mega_ioctl_mbox {
- /* 0x0 */ u8 cmd;
- /* 0x1 */ u8 cmdid;
- /* 0x2 */ u8 channel;
- /* 0x3 */ u8 param;
- /* 0x4 */ u8 pad[4];
- /* 0x8 */ u32 xferaddr;
- /* 0xC */ u8 logdrv;
- /* 0xD */ u8 numsgelements;
- /* 0xE */ u8 resvd;
- /* 0xF */ u8 busy;
- /* 0x10 */ u8 numstatus;
- /* 0x11 */ u8 status;
- /* 0x12 */ u8 completed[46];
- u8 mraid_poll;
- u8 mraid_ack;
- u8 malign[16];
+ /* 0x0 */ u8 cmd;
+ /* 0x1 */ u8 cmdid;
+ /* 0x2 */ u8 channel;
+ /* 0x3 */ u8 param;
+ /* 0x4 */ u8 pad[4];
+ /* 0x8 */ u32 xferaddr;
+ /* 0xC */ u8 logdrv;
+ /* 0xD */ u8 numsgelements;
+ /* 0xE */ u8 resvd;
+ /* 0xF */ u8 busy;
+ /* 0x10 */ u8 numstatus;
+ /* 0x11 */ u8 status;
+ /* 0x12 */ u8 completed[46];
+ u8 mraid_poll;
+ u8 mraid_ack;
+ u8 malign[16];
} mega_ioctl_mbox;
+typedef struct _mega_64sglist32 {
+ u64 address;
+ u32 length;
+} __attribute__ ((packed)) mega_64sglist;
+
typedef struct _mega_sglist {
- u32 address;
- u32 length;
+ u32 address;
+ u32 length;
} mega_sglist;
/* Queued command data */
typedef struct _mega_scb mega_scb;
struct _mega_scb {
- int idx;
- u32 state;
- u32 isrcount;
- u8 mboxData[16];
- mega_passthru pthru;
- Scsi_Cmnd *SCpnt;
- mega_sglist *sgList;
- char *kern_area; /* Only used for large ioctl xfers */
- struct wait_queue *ioctl_wait;
- struct semaphore sem;
- mega_scb *next;
+ int idx;
+ u32 state;
+ u32 isrcount;
+ u8 mboxData[16];
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ u32 dma_type;
+ dma_addr_t dma_h_bulkdata; /*Dma handle for bulk data transfter */
+ u32 dma_direction; /*Dma direction */
+ dma_addr_t dma_h_sgdata; /*Dma handle for the sglist structure */
+ dma_addr_t dma_h_sglist[MAX_SGLIST]; /*Dma handle for all SGL elements */
+ u8 sglist_count;
+ dma_addr_t dma_sghandle64;
+ dma_addr_t dma_passthruhandle64;
+ dma_addr_t dma_bounce_buffer;
+ u8 *bounce_buffer;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ mega_passthru *pthru;
+#else
+ mega_passthru pthru;
+#endif
+
+ Scsi_Cmnd *SCpnt;
+ mega_sglist *sgList;
+ mega_64sglist *sg64List;
+ struct semaphore ioctl_sem;
+ void *buff_ptr;
+ u32 iDataSize;
+ mega_scb *next;
};
+/* internal locking by the queue manipulting routines */
+#define INTERNAL_LOCK 0
+/* external locking by the queue manipulting routines */
+#define EXTERNAL_LOCK 1
+#define NO_LOCK 2
+#define INTR_ENB 0 /* do not disable interrupt while manipulating */
+#define INTR_DIS 1 /* disable interrupt while manipulating */
+
/* Per-controller data */
typedef struct _mega_host_config {
- u8 numldrv;
- u32 flag;
- u32 base;
-
- mega_scb *qFreeH;
- mega_scb *qFreeT;
- mega_scb *qPendingH;
- mega_scb *qPendingT;
-
- Scsi_Cmnd *qCompletedH;
- Scsi_Cmnd *qCompletedT;
- u32 qFcnt;
- u32 qPcnt;
- u32 qCcnt;
-
- u32 nReads[FC_MAX_LOGICAL_DRIVES];
- u32 nWrites[FC_MAX_LOGICAL_DRIVES];
-
- /* Host adapter parameters */
- u8 fwVer[7];
- u8 biosVer[7];
-
- struct Scsi_Host *host;
-
- volatile mega_mailbox64 *mbox64; /* ptr to beginning of 64-bit mailbox */
- volatile mega_mailbox *mbox; /* ptr to beginning of standard mailbox */
- volatile mega_mailbox64 mailbox64;
-#if 0
- volatile union {
- u8 generic_buffer[2 * 1024L];
- mega_RAIDINQ adapterInfoData;
- mega_Enquiry3 enquiry3Data;
- }mega_buffer;
+ u8 numldrv;
+ u32 flag;
+
+#ifdef __LP64__
+ u64 base;
#else
- volatile u8 mega_buffer[2*1024L];
+ u32 base;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+ dma_addr_t dma_handle64, adjdmahandle64;
+ struct pci_dev *dev;
#endif
- volatile megaRaidProductInfo productInfo;
- u8 max_cmds;
- mega_scb scbList[MAX_COMMANDS];
+ mega_scb *qFreeH;
+ mega_scb *qFreeT;
+ spinlock_t lock_free;
+
+ mega_scb *qPendingH;
+ mega_scb *qPendingT;
+ spinlock_t lock_pend;
+
+ Scsi_Cmnd *qCompletedH;
+ Scsi_Cmnd *qCompletedT;
+ spinlock_t lock_scsicmd;
+
+ u32 qFcnt;
+ u32 qPcnt;
+ u32 qCcnt;
+
+ unsigned long nReads[FC_MAX_LOGICAL_DRIVES];
+ unsigned long nReadBlocks[FC_MAX_LOGICAL_DRIVES];
+ unsigned long nWrites[FC_MAX_LOGICAL_DRIVES];
+ unsigned long nWriteBlocks[FC_MAX_LOGICAL_DRIVES];
+ unsigned long nInterrupts;
+ /* Host adapter parameters */
+ u8 fwVer[7];
+ u8 biosVer[7];
+
+ struct Scsi_Host *host;
+
+ volatile mega_mailbox64 *mbox64; /* ptr to beginning of 64-bit mailbox */
+ volatile mega_mailbox *mbox; /* ptr to beginning of standard mailbox */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+/* ptr to beginning of standard mailbox */
+ volatile mega_mailbox64 *mailbox64ptr;
+#else
+ volatile mega_mailbox64 mailbox64;
+#endif
+
+ volatile u8 mega_buffer[2 * 1024L];
+ volatile megaRaidProductInfo productInfo;
+
+ u8 max_cmds;
+ mega_scb scbList[MAX_COMMANDS];
+
+#define PROCBUFSIZE 4096
+ char procbuf[PROCBUFSIZE];
+ int procidx;
+ struct proc_dir_entry *controller_proc_dir_entry;
+ struct proc_dir_entry *proc_read, *proc_stat, *proc_status, *proc_mbox;
} mega_host_config;
-const char *megaraid_info(struct Scsi_Host *);
-int megaraid_detect(Scsi_Host_Template *);
-int megaraid_release(struct Scsi_Host *);
-int megaraid_command(Scsi_Cmnd *);
-int megaraid_abort(Scsi_Cmnd *);
-int megaraid_reset(Scsi_Cmnd *, unsigned int);
-int megaraid_queue(Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
-int megaraid_biosparam(Disk *, kdev_t, int *);
-int megaraid_proc_info(char *buffer, char **start, off_t offset,
- int length, int hostno, int inout);
+typedef struct _driver_info {
+ int size;
+ ulong version;
+} mega_driver_info;
+
+/*
+ * User ioctl structure.
+ * This structure will be used for Traditional Method ioctl interface
+ * commands (M_RD_IOCTL_CMD),Alternate Buffer Method (M_RD_IOCTL_CMD_NEW)
+ * ioctl commands and the Driver ioctls(M_RD_DRIVER_IOCTL_INTERFACE).
+ * The Driver ioctl interface handles the commands at
+ * the driver level, without being sent to the card.
+ */
+#define MEGADEVIOC 0x84
+
+/* system call imposed limit. Change accordingly */
+#define IOCTL_MAX_DATALEN 4096
+
+#pragma pack(1)
+struct uioctl_t {
+ u32 inlen;
+ u32 outlen;
+ union {
+ u8 fca[16];
+ struct {
+ u8 opcode;
+ u8 subopcode;
+ u16 adapno;
+#if BITS_PER_LONG == 32
+ u8 *buffer;
+ u8 pad[4];
+#endif
+#if BITS_PER_LONG == 64
+ u8 *buffer;
+#endif
+ u32 length;
+ } fcs;
+ } ui;
+ u8 mbox[18]; /* 16 bytes + 2 status bytes */
+ mega_passthru pthru;
+#if BITS_PER_LONG == 32
+ char *data; /* buffer <= 4096 for 0x80 commands */
+ char pad[4];
+#endif
+#if BITS_PER_LONG == 64
+ char *data;
+#endif
+};
+#pragma pack()
+
+/*
+ * struct mcontroller is used to pass information about the controllers in the
+ * system. Its upto the application how to use the information. We are passing
+ * as much info about the cards as possible and useful. Before issuing the
+ * call to find information about the cards, the applicaiton needs to issue a
+ * ioctl first to find out the number of controllers in the system.
+ */
+#define MAX_CONTROLLERS 32
+
+struct mcontroller {
+ u64 base;
+ u8 irq;
+ u8 numldrv;
+ u8 pcibus;
+ u16 pcidev;
+ u8 pcifun;
+ u16 pciid;
+ u16 pcivendor;
+ u8 pcislot;
+ u32 uid;
+};
+
+struct mbox_passthru {
+ u8 cmd;
+ u8 cmdid;
+ u16 pad1;
+ u32 pad2;
+ u32 dataxferaddr;
+ u8 pad3;
+ u8 pad4;
+ u8 rsvd;
+ u8 mboxbusy;
+ u8 nstatus;
+ u8 status;
+};
+
+/*
+ * Defines for Driver IOCTL interface, Op-code:M_RD_DRIVER_IOCTL_INTERFACE
+ */
+#define MEGAIOC_MAGIC 'm'
+#define MEGAIOCCMD _IOWR(MEGAIOC_MAGIC, 0) /* Mega IOCTL command */
+
+#define MEGAIOC_QNADAP 'm' /* Query # of adapters */
+#define MEGAIOC_QDRVRVER 'e' /* Query driver version */
+#define MEGAIOC_QADAPINFO 'g' /* Query adapter information */
+#define MKADAP(adapno) (MEGAIOC_MAGIC << 8 | (adapno) )
+#define GETADAP(mkadap) ( (mkadap) ^ MEGAIOC_MAGIC << 8 )
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) /*0x20300 */
+extern struct proc_dir_entry proc_scsi_megaraid;
+#endif
+
+/* For Host Re-Ordering */
+#define MAX_CONTROLLERS 32
+
+struct mega_hbas {
+ int is_bios_enabled;
+ mega_host_config *hostdata_addr;
+};
+
+#define IS_BIOS_ENABLED 0x62
+#define GET_BIOS 0x01
+
+/*================================================================
+ *
+ * Function prototypes
+ *
+ *================================================================
+ */
+static const char *megaraid_info (struct Scsi_Host *);
+int megaraid_detect (Scsi_Host_Template *);
+static int megaraid_release (struct Scsi_Host *);
+static int megaraid_command (Scsi_Cmnd *);
+static int megaraid_abort (Scsi_Cmnd *);
+static int megaraid_reset (Scsi_Cmnd *, unsigned int);
+static int megaraid_queue (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
+static int megaraid_biosparam (Disk *, kdev_t, int *);
+static int megaraid_proc_info (char *buffer, char **start, off_t offset,
+ int length, int hostno, int inout);
+
+static int megaIssueCmd (mega_host_config * megaCfg, u_char * mboxData,
+ mega_scb * scb, int intr);
+static int mega_build_sglist (mega_host_config * megaCfg, mega_scb * scb,
+ u32 * buffer, u32 * length);
+static int mega_busyWaitMbox (mega_host_config *);
+static int mega_runpendq (mega_host_config *);
+static void mega_rundoneq (mega_host_config *);
+static void mega_cmd_done (mega_host_config *, mega_scb *, int);
+static inline void mega_freeSgList (mega_host_config * megaCfg);
+static void mega_Convert8ldTo40ld (mega_RAIDINQ * inquiry,
+ mega_Enquiry3 * enquiry3,
+ megaRaidProductInfo * productInfo);
+
+static int megaraid_reboot_notify (struct notifier_block *,
+ unsigned long, void *);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+static mega_scb *mega_ioctl (mega_host_config * megaCfg, Scsi_Cmnd * SCpnt);
+static void mega_build_kernel_sg (char *barea, ulong xfersize, mega_scb * pScb,
+ mega_ioctl_mbox * mbox);
+#endif
+
+static int megadev_open (struct inode *, struct file *);
+static int megadev_ioctl_entry (struct inode *, struct file *,
+ unsigned int, unsigned long);
+static int megadev_ioctl (struct inode *, struct file *,
+ unsigned int, unsigned long);
+static mega_scb *megadev_doioctl (mega_host_config *, Scsi_Cmnd *);
+static int megadev_close (struct inode *, struct file *);
+static void megadev_ioctl_done (Scsi_Cmnd *);
+static int mega_init_scb (mega_host_config *);
+static void enq_scb_freelist (mega_host_config *, mega_scb *,
+ int lock, int intr);
+
+static int mega_is_bios_enabled (mega_host_config *);
+static void mega_reorder_hosts (void);
+static void mega_swap_hosts (struct Scsi_Host *, struct Scsi_Host *);
+
+static void mega_create_proc_entry (int index, struct proc_dir_entry *);
#endif
*/
/*
-** May 11 2000, version 3.3b
-**
** Supported SCSI-II features:
** Synchronous negotiation
** Wide negotiation (depends on the NCR Chip)
/*
** Name and version of the driver
*/
-#define SCSI_NCR_DRIVER_NAME "ncr53c8xx - version 3.3b"
+#define SCSI_NCR_DRIVER_NAME "ncr53c8xx-3.4.3-20010212"
#define SCSI_NCR_DEBUG_FLAGS (0)
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/string.h>
-#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/time.h>
** Donnot compile integrity checking code for Linux-2.3.0
** and above since SCSI data structures are not ready yet.
*/
-#if LINUX_VERSION_CODE < LinuxVersionCode(2,3,0)
+/* #if LINUX_VERSION_CODE < LinuxVersionCode(2,3,0) */
+#if 0
#define SCSI_NCR_INTEGRITY_CHECKING
#endif
#define UC_SETORDER 13
#define UC_SETWIDE 14
#define UC_SETFLAG 15
-#define UC_CLEARPROF 16
#define UC_SETVERBOSE 17
#define UF_TRACE (0x01)
#define UF_NODISC (0x02)
#define UF_NOSCAN (0x04)
-/*---------------------------------------
-**
-** Timestamps for profiling
-**
-**---------------------------------------
-*/
-
-#ifdef SCSI_NCR_PROFILE_SUPPORT
-
-struct tstamp {
- u_long start;
- u_long end;
- u_long command;
- u_long status;
- u_long disconnect;
- u_long reselect;
-};
-
-/*
-** profiling data (per device)
-*/
-
-struct profile {
- u_long num_trans;
- u_long num_kbytes;
- u_long rest_bytes;
- u_long num_disc;
- u_long num_break;
- u_long num_int;
- u_long num_fly;
- u_long ms_setup;
- u_long ms_data;
- u_long ms_disc;
- u_long ms_post;
-};
-#endif
-
/*========================================================================
**
** Declaration of structs: target control block
*/
ccb_p cp;
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- /*----------------------------------------------------------------
- ** Space for some timestamps to gather profiling data.
- **----------------------------------------------------------------
- */
- struct tstamp stamp;
-#endif
-
/*----------------------------------------------------------------
** Status fields.
**----------------------------------------------------------------
*/
struct ncr_reg regdump; /* Register dump */
u_long regtime; /* Time it has been done */
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- struct profile profile; /* Profiling data */
- u_int disc_phys; /* Disconnection counters */
- u_int disc_ref;
-#endif
/*----------------------------------------------------------------
** Miscellaneous buffers accessed by the scripts-processor.
** Fields that should be removed or changed.
**----------------------------------------------------------------
*/
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- u_long ktime; /* Copy of kernel time */
-#endif
struct ccb *ccb; /* Global CCB */
struct usrcmd user; /* Command from user */
u_char release_stage; /* Synchronisation stage on release */
ncrcmd send_ident [ 9];
ncrcmd prepare [ 6];
ncrcmd prepare2 [ 7];
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- ncrcmd command [ 9];
-#else
ncrcmd command [ 6];
-#endif
ncrcmd dispatch [ 32];
ncrcmd clrack [ 4];
ncrcmd no_data [ 17];
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- ncrcmd status [ 11];
-#else
ncrcmd status [ 8];
-#endif
ncrcmd msg_in [ 2];
ncrcmd msg_in2 [ 16];
ncrcmd msg_bad [ 4];
#endif
ncrcmd save_dp [ 7];
ncrcmd restore_dp [ 5];
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- ncrcmd disconnect [ 28];
-#else
ncrcmd disconnect [ 17];
-#endif
ncrcmd msg_out [ 9];
ncrcmd msg_out_done [ 7];
ncrcmd idle [ 2];
ncrcmd reselect [ 8];
ncrcmd reselected [ 8];
ncrcmd resel_dsa [ 6];
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- ncrcmd loadpos1 [ 7];
-#else
ncrcmd loadpos1 [ 4];
-#endif
ncrcmd resel_lun [ 6];
ncrcmd resel_tag [ 6];
ncrcmd jump_to_nexus [ 4];
static int ncr_ic_nego(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd, u_char *msgptr);
#endif
-#ifdef SCSI_NCR_PROFILE_SUPPORT
-static void ncb_profile (ncb_p np, ccb_p cp);
-#endif
-
static void ncr_script_copy_and_bind
(ncb_p np, ncrcmd *src, ncrcmd *dst, int len);
static void ncr_script_fill (struct script * scr, struct scripth * scripth);
PADDR (dispatch),
}/*-------------------------< COMMAND >--------------------*/,{
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- /*
- ** ... set a timestamp ...
- */
- SCR_COPY (sizeof (u_long)),
- NADDR (ktime),
- NADDR (header.stamp.command),
-#endif
/*
** ... and send the command
*/
PADDR (no_data),
}/*-------------------------< STATUS >--------------------*/,{
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- /*
- ** set the timestamp.
- */
- SCR_COPY (sizeof (u_long)),
- NADDR (ktime),
- NADDR (header.stamp.status),
-#endif
/*
** get the status
*/
*/
SCR_WAIT_DISC,
0,
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- /*
- ** Profiling:
- ** Set a time stamp,
- ** and count the disconnects.
- */
- SCR_COPY (sizeof (u_long)),
- NADDR (ktime),
- NADDR (header.stamp.disconnect),
- SCR_COPY (4),
- NADDR (disc_phys),
- RADDR (scratcha),
- SCR_REG_REG (scratcha, SCR_ADD, 0x01),
- 0,
- SCR_COPY (4),
- RADDR (scratcha),
- NADDR (disc_phys),
-#endif
/*
** Status is: DISCONNECTED.
*/
}/*-------------------------< LOADPOS1 >-------------------*/,{
0,
NADDR (header),
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- /*
- ** Set a time stamp for this reselection
- */
- SCR_COPY (sizeof (u_long)),
- NADDR (ktime),
- NADDR (header.stamp.reselect),
-#endif
/*
** The DSA contains the data structure address.
*/
switch (old & RELOC_MASK) {
case RELOC_REGISTER:
- new = (old & ~RELOC_MASK)
- + pcivtobus(np->paddr);
+ new = (old & ~RELOC_MASK) + np->paddr;
break;
case RELOC_LABEL:
new = (old & ~RELOC_MASK) + np->p_script;
np->maxwide = (np->features & FE_WIDE)? 1 : 0;
- /*
- ** Get the frequency of the chip's clock.
- ** Find the right value for scntl3.
- */
+ /*
+ * Guess the frequency of the chip's clock.
+ */
+ if (np->features & (FE_ULTRA3 | FE_ULTRA2))
+ np->clock_khz = 160000;
+ else if (np->features & FE_ULTRA)
+ np->clock_khz = 80000;
+ else
+ np->clock_khz = 40000;
+ /*
+ * Get the clock multiplier factor.
+ */
if (np->features & FE_QUAD)
np->multiplier = 4;
else if (np->features & FE_DBLR)
else
np->multiplier = 1;
- np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000;
- np->clock_khz *= np->multiplier;
-
- if (np->clock_khz != 40000)
+ /*
+ * Measure SCSI clock frequency for chips
+ * it may vary from assumed one.
+ */
+ if (np->features & FE_VARCLK)
ncr_getclock(np, np->multiplier);
/*
np->paddr = device->slot.base;
np->paddr2 = (np->features & FE_RAM)? device->slot.base_2 : 0;
-#ifndef NCR_IOMAPPED
- np->vaddr = remap_pci_mem((u_long) np->paddr, (u_long) 128);
+#ifndef SCSI_NCR_IOMAPPED
+ np->vaddr = remap_pci_mem(device->slot.base_c, (u_long) 128);
if (!np->vaddr) {
printk(KERN_ERR
"%s: can't map memory mapped IO region\n",ncr_name(np));
np->reg = (struct ncr_reg*) np->vaddr;
-#endif /* !defined NCR_IOMAPPED */
+#endif /* !defined SCSI_NCR_IOMAPPED */
/*
** Try to map the controller chip into iospace.
instance->this_id = np->myaddr;
instance->max_id = np->maxwide ? 16 : 8;
instance->max_lun = SCSI_NCR_MAX_LUN;
-#ifndef NCR_IOMAPPED
+#ifndef SCSI_NCR_IOMAPPED
#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,29)
instance->base = (unsigned long) np->reg;
#else
np->scripth = np->scripth0;
np->p_scripth = vtobus(np->scripth);
- np->p_script = (np->paddr2) ?
- pcivtobus(np->paddr2) : vtobus(np->script0);
+ np->p_script = (np->paddr2) ? np->paddr2 : vtobus(np->script0);
ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script0, sizeof(struct script));
ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth0, sizeof(struct scripth));
printk(KERN_INFO "%s: detaching...\n", ncr_name(np));
if (!np)
goto unregister;
-#ifndef NCR_IOMAPPED
+#ifndef SCSI_NCR_IOMAPPED
if (np->vaddr) {
#ifdef DEBUG_NCR53C8XX
printk(KERN_DEBUG "%s: releasing memory mapped IO region %lx[%d]\n", ncr_name(np), (u_long) np->vaddr, 128);
#endif
unmap_pci_mem((vm_offset_t) np->vaddr, (u_long) 128);
}
-#endif /* !NCR_IOMAPPED */
+#endif /* !SCSI_NCR_IOMAPPED */
if (np->base_io) {
#ifdef DEBUG_NCR53C8XX
printk(KERN_DEBUG "%s: releasing IO region %x[%d]\n", ncr_name(np), np->base_io, 128);
**
**---------------------------------------------
*/
- if (cmd->cmnd[0] == 0 && (tp->usrflag & UF_NOSCAN)) {
+ if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12) &&
+ (tp->usrflag & UF_NOSCAN)) {
tp->usrflag &= ~UF_NOSCAN;
return DID_BAD_TARGET;
}
}
#endif
- /*---------------------------------------------------
- **
- ** timestamp
- **
- **----------------------------------------------------
- */
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- bzero (&cp->phys.header.stamp, sizeof (struct tstamp));
- cp->phys.header.stamp.start = jiffies;
-#endif
-
-
/*----------------------------------------------------
**
** Build the identify / tag / sdtr message
** Release Memory mapped IO region and IO mapped region
*/
-#ifndef NCR_IOMAPPED
+#ifndef SCSI_NCR_IOMAPPED
#ifdef DEBUG_NCR53C8XX
printk("%s: releasing memory mapped IO region %lx[%d]\n", ncr_name(np), (u_long) np->vaddr, 128);
#endif
unmap_pci_mem((vm_offset_t) np->vaddr, (u_long) 128);
-#endif /* !NCR_IOMAPPED */
+#endif /* !SCSI_NCR_IOMAPPED */
#ifdef DEBUG_NCR53C8XX
printk("%s: releasing IO region %x[%d]\n", ncr_name(np), np->base_io, 128);
return;
/*
- ** timestamp
- ** Optional, spare some CPU time
+ ** Print minimal debug information.
*/
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- ncb_profile (np, cp);
-#endif
if (DEBUG_FLAGS & DEBUG_TINY)
printk ("CCB=%lx STAT=%x/%x\n", (unsigned long)cp,
/*
** Start script processor.
*/
- MEMORY_BARRIER();
if (np->paddr2) {
if (bootverbose)
printk ("%s: Downloading SCSI SCRIPTS.\n",
ncr_name(np));
OUTL (nc_scratcha, vtobus(np->script0));
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, start_ram));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, start_ram));
}
else
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, start));
}
/*==========================================================
tp->usrflag = np->user.data;
};
break;
-
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- case UC_CLEARPROF:
- bzero(&np->profile, sizeof(np->profile));
- break;
-#endif
}
np->user.cmd=0;
}
return;
}
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- np->ktime = thistime;
- np->timer.expires = ktime_get(1);
-#else
np->timer.expires = ktime_get(SCSI_NCR_TIMER_INTERVAL);
-#endif
add_timer(&np->timer);
/*
** block ncr interrupts
*/
np->lasttime = thistime;
-
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- /*
- ** Reset profile data to avoid ugly overflow
- ** (Limited to 1024 GB for 32 bit architecture)
- */
- if (np->profile.num_kbytes > (~0UL >> 2))
- bzero(&np->profile, sizeof(np->profile));
-#endif
}
#ifdef SCSI_NCR_BROKEN_INTR
OUTB (nc_istat, (istat & SIGP) | INTF);
istat = INB (nc_istat);
if (DEBUG_FLAGS & DEBUG_TINY) printk ("F ");
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- np->profile.num_fly++;
-#endif
ncr_wakeup_done (np);
};
if (!(istat & (SIP|DIP)))
return;
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- np->profile.num_int++;
-#endif
-
if (istat & CABRT)
OUTB (nc_istat, CABRT);
ncr_name(np), istat, dstat, sist);
return;
}
- OUTONB (nc_dcntl, (STD|NOCOM));
+ OUTONB_STD ();
return;
};
if (sist & UDC) {
printk ("%s: unexpected disconnect\n", ncr_name(np));
OUTB (HS_PRT, HS_UNEXPECTED);
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, cleanup));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, cleanup));
return;
};
** repair start queue and jump to start point.
*/
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, sto_restart));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, sto_restart));
return;
}
OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
np->msgout[0] = msg;
- OUTL (nc_dsp, jmp);
+ OUTL_DSP (jmp);
return 1;
reset_all:
** fake the return address (to the patch).
** and restart script processor at dispatcher.
*/
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- np->profile.num_break++;
-#endif
OUTL (nc_temp, newtmp);
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, dispatch));
return;
/*
}
if (nxtdsp) {
- OUTL (nc_dsp, nxtdsp);
+ OUTL_DSP (nxtdsp);
return;
}
ncr_put_start_queue(np, cp);
if (disc_cnt)
INB (nc_ctest2); /* Clear SIGP */
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, reselect));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, reselect));
return;
case S_TERMINATED:
case S_CHECK_COND:
ncr_put_start_queue(np, cp);
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, start));
return;
}
out:
- OUTONB (nc_dcntl, (STD|NOCOM));
+ OUTONB_STD ();
return;
}
** We just assume lun=0, 1 CCB, no tag.
*/
if (tp->lp[0]) {
- OUTL (nc_dsp, scr_to_cpu(tp->lp[0]->jump_ccb[0]));
+ OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0]));
return;
}
case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */
** Answer wasn't acceptable.
*/
ncr_setsync (np, cp, 0, 0xe0);
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, msg_bad));
} else {
/*
** Answer is ok.
*/
ncr_setsync (np, cp, scntl3, (fak<<5)|ofs);
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
};
return;
}
if (!ofs) {
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, msg_bad));
return;
}
np->msgin [0] = M_NOOP;
** Answer wasn't acceptable.
*/
ncr_setwide (np, cp, 0, 1);
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, msg_bad));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, msg_bad));
} else {
/*
** Answer is ok.
*/
ncr_setwide (np, cp, wide, 1);
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
};
return;
};
out:
- OUTONB (nc_dcntl, (STD|NOCOM));
+ OUTONB_STD ();
}
/*==========================================================
}
-#define ncr_reg_bus_addr(r) \
- (pcivtobus(np->paddr) + offsetof (struct ncr_reg, r))
+#define ncr_reg_bus_addr(r) (np->paddr + offsetof (struct ncr_reg, r))
/*------------------------------------------------------------------------
** Initialize the fixed part of a CCB structure.
**==========================================================
*/
-#ifndef NCR_IOMAPPED
+#ifndef SCSI_NCR_IOMAPPED
static int __init ncr_regtest (struct ncb* np)
{
register volatile u_int32 data;
{
u_int32 ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc;
int i, err=0;
-#ifndef NCR_IOMAPPED
+#ifndef SCSI_NCR_IOMAPPED
if (np->reg) {
err |= ncr_regtest (np);
if (err) return (err);
/*
** Start script (exchange values)
*/
- OUTL (nc_dsp, pc);
+ OUTL_DSP (pc);
/*
** Wait 'til done (with timeout)
*/
/*==========================================================
**
**
-** Profiling the drivers and targets performance.
-**
-**
-**==========================================================
-*/
-
-#ifdef SCSI_NCR_PROFILE_SUPPORT
-
-/*
-** Compute the difference in jiffies ticks.
-*/
-
-#define ncr_delta(from, to) \
- ( ((to) && (from))? (to) - (from) : -1 )
-
-#define PROFILE cp->phys.header.stamp
-static void ncb_profile (ncb_p np, ccb_p cp)
-{
- long co, st, en, di, re, post, work, disc;
- u_int diff;
-
- PROFILE.end = jiffies;
-
- st = ncr_delta (PROFILE.start,PROFILE.status);
- if (st<0) return; /* status not reached */
-
- co = ncr_delta (PROFILE.start,PROFILE.command);
- if (co<0) return; /* command not executed */
-
- en = ncr_delta (PROFILE.start,PROFILE.end),
- di = ncr_delta (PROFILE.start,PROFILE.disconnect),
- re = ncr_delta (PROFILE.start,PROFILE.reselect);
- post = en - st;
-
- /*
- ** @PROFILE@ Disconnect time invalid if multiple disconnects
- */
-
- if (di>=0) disc = re - di; else disc = 0;
-
- work = (st - co) - disc;
-
- diff = (scr_to_cpu(np->disc_phys) - np->disc_ref) & 0xff;
- np->disc_ref += diff;
-
- np->profile.num_trans += 1;
- if (cp->cmd) {
- np->profile.num_kbytes += (cp->cmd->request_bufflen >> 10);
- np->profile.rest_bytes += (cp->cmd->request_bufflen & (0x400-1));
- if (np->profile.rest_bytes >= 0x400) {
- ++np->profile.num_kbytes;
- np->profile.rest_bytes -= 0x400;
- }
- }
- np->profile.num_disc += diff;
- np->profile.ms_setup += co;
- np->profile.ms_data += work;
- np->profile.ms_disc += disc;
- np->profile.ms_post += post;
-}
-#undef PROFILE
-
-#endif /* SCSI_NCR_PROFILE_SUPPORT */
-
-/*==========================================================
-**
-**
** Device lookup.
**
** @GENSCSI@ should be integrated to scsiconf.c
uc->cmd = UC_SETDEBUG;
else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0)
uc->cmd = UC_SETFLAG;
- else if ((arg_len = is_keyword(ptr, len, "clearprof")) != 0)
- uc->cmd = UC_CLEARPROF;
else
arg_len = 0;
#ifdef SCSI_NCR_USER_INFO_SUPPORT
/*
-** Copy formatted profile information into the input buffer.
+** Copy formatted information into the input buffer.
*/
-#define to_ms(t) ((t) * 1000 / HZ)
-
static int ncr_host_info(ncb_p np, char *ptr, off_t offset, int len)
{
struct info_str info;
driver_setup.debug, driver_setup.verbose);
}
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- copy_info(&info, "Profiling information:\n");
- copy_info(&info, " %-12s = %lu\n", "num_trans",np->profile.num_trans);
- copy_info(&info, " %-12s = %lu\n", "num_kbytes",np->profile.num_kbytes);
- copy_info(&info, " %-12s = %lu\n", "num_disc", np->profile.num_disc);
- copy_info(&info, " %-12s = %lu\n", "num_break",np->profile.num_break);
- copy_info(&info, " %-12s = %lu\n", "num_int", np->profile.num_int);
- copy_info(&info, " %-12s = %lu\n", "num_fly", np->profile.num_fly);
- copy_info(&info, " %-12s = %lu\n", "ms_setup", to_ms(np->profile.ms_setup));
- copy_info(&info, " %-12s = %lu\n", "ms_data", to_ms(np->profile.ms_data));
- copy_info(&info, " %-12s = %lu\n", "ms_disc", to_ms(np->profile.ms_disc));
- copy_info(&info, " %-12s = %lu\n", "ms_post", to_ms(np->profile.ms_post));
-#endif
-
return info.pos > info.offset? info.pos - info.offset : 0;
}
** Module stuff
*/
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0)
static Scsi_Host_Template driver_template = NCR53C8XX;
#include "scsi_module.c"
+#elif defined(MODULE)
+Scsi_Host_Template driver_template = NCR53C8XX;
+#include "scsi_module.c"
+#endif
** Used by hosts.c and ncr53c8xx.c with module configuration.
*/
+#if (LINUX_VERSION_CODE >= 0x020400) || defined(HOSTS_C) || defined(MODULE)
+
#include <scsi/scsicam.h>
int ncr53c8xx_abort(Scsi_Cmnd *);
#endif /* LINUX_VERSION_CODE */
+#endif /* defined(HOSTS_C) || defined(MODULE) */
+
#endif /* NCR53C8XX_H */
#endif
/* found a adapter */
host = scsi_register(template, sizeof(scsi_qla_host_t));
+ if (!host) {
+ printk(KERN_WARNING "qla1280: Failed to register host, aborting.\n");
+ return 0;
+ }
ha = (scsi_qla_host_t *) host->hostdata;
/* Clear our data area */
for( j =0, cp = (char *)ha; j < sizeof(scsi_qla_host_t); j++)
CMD_HANDLE(cmd) = (unsigned char *)handle;
/* Bookkeeping information */
- sp->r_start = jiffies; /* time the request was recieved */
+ sp->r_start = jiffies; /* time the request was received */
sp->u_start = 0;
/* add the command to our queue */
continue;
host = scsi_register(tmpt, sizeof(struct isp1020_hostdata));
+ if (!host)
+ continue;
+
hostdata = (struct isp1020_hostdata *) host->hostdata;
memset(hostdata, 0, sizeof(struct isp1020_hostdata));
if (isp1020_init(host))
goto fail_and_unregister;
-
+
if (isp1020_reset_hardware(host)
#if USE_NVRAM_DEFAULTS
|| isp1020_get_defaults(host)
|| isp1020_set_defaults(host)
#endif /* USE_NVRAM_DEFAULTS */
|| isp1020_load_parameters(host)) {
- iounmap((void *)hostdata->memaddr);
- release_region(host->io_port, 0xff);
- goto fail_and_unregister;
+ goto fail_uninit;
}
host->this_id = hostdata->host_param.initiator_scsi_id;
{
printk("qlogicisp : interrupt %d already in use\n",
host->irq);
- iounmap((void *)hostdata->memaddr);
- release_region(host->io_port, 0xff);
- goto fail_and_unregister;
+ goto fail_uninit;
}
isp_outw(0x0, host, PCI_SEMAPHORE);
hosts++;
continue;
+ fail_uninit:
+ iounmap((void *)hostdata->memaddr);
+ release_region(host->io_port, 0xff);
fail_and_unregister:
if (hostdata->res_cpu)
pci_free_consistent(hostdata->pci_dev,
sh->io_port = io_base;
- if (check_region(sh->io_port, 0xff)) {
+ if (!request_region(sh->io_port, 0xff, "qlogicisp")) {
printk("qlogicisp : i/o region 0x%lx-0x%lx already "
"in use\n",
sh->io_port, sh->io_port + 0xff);
return 1;
}
- request_region(sh->io_port, 0xff, "qlogicisp");
-
if ((command & PCI_COMMAND_MEMORY) &&
((mem_flags & 1) == 0)) {
mem_base = (u_long) ioremap(mem_base, PAGE_SIZE);
+ if (!mem_base) {
+ printk("qlogicisp : i/o remapping failed.\n");
+ goto out_release;
+ }
hostdata->memaddr = mem_base;
} else {
- if (command & PCI_COMMAND_IO && (io_flags & 3) != 1)
- {
- printk("qlogicisp : i/o mapping is disabled\n");
- release_region(sh->io_port, 0xff);
- return 1;
+ if (command & PCI_COMMAND_IO && (io_flags & 3) != 1) {
+ printk("qlogicisp : i/o mapping is disabled\n");
+ goto out_release;
}
hostdata->memaddr = 0; /* zero to signify no i/o mapping */
mem_base = 0;
printk("qlogicisp : can't decode %s address space 0x%lx\n",
(io_base ? "I/O" : "MEM"),
(io_base ? io_base : mem_base));
- iounmap((void *)hostdata->memaddr);
- release_region(sh->io_port, 0xff);
- return 1;
+ goto out_unmap;
}
hostdata->revision = revision;
&hostdata->res_dma);
if (hostdata->res_cpu == NULL) {
printk("qlogicisp : can't allocate response queue\n");
- return 1;
+ goto out_unmap;
}
hostdata->req_cpu = pci_alloc_consistent(hostdata->pci_dev,
hostdata->res_cpu,
hostdata->res_dma);
printk("qlogicisp : can't allocate request queue\n");
- return 1;
+ goto out_unmap;
}
LEAVE("isp1020_init");
return 0;
+
+out_unmap:
+ iounmap((void *)hostdata->memaddr);
+out_release:
+ release_region(sh->io_port, 0xff);
+ return 1;
}
sizeof (cmd_buffer));
esp->irq = 2;
- request_irq(esp->irq, esp_intr, SA_INTERRUPT, "SUN3X SCSI", NULL);
+ if (request_irq(esp->irq, esp_intr, SA_INTERRUPT,
+ "SUN3X SCSI", NULL)) {
+ esp_deallocate(esp);
+ return 0;
+ }
esp->scsi_id = 7;
esp->diff = 0;
#define sym53c416_base_2 sym53c416_2
#define sym53c416_base_3 sym53c416_3
-static unsigned short sym53c416_base = 0;
-static unsigned int sym53c416_irq = 0;
-static unsigned short sym53c416_base_1 = 0;
-static unsigned int sym53c416_irq_1 = 0;
-static unsigned short sym53c416_base_2 = 0;
-static unsigned int sym53c416_irq_2 = 0;
-static unsigned short sym53c416_base_3 = 0;
-static unsigned int sym53c416_irq_3 = 0;
+static unsigned int sym53c416_base[2] = {0,0};
+static unsigned int sym53c416_base_1[2] = {0,0};
+static unsigned int sym53c416_base_2[2] = {0,0};
+static unsigned int sym53c416_base_3[2] = {0,0};
#endif
ints[0] = 2;
if(sym53c416_base)
{
- ints[1] = sym53c416_base;
- ints[2] = sym53c416_irq;
+ ints[1] = sym53c416_base[0];
+ ints[2] = sym53c416_base[1];
sym53c416_setup(NULL, ints);
}
if(sym53c416_base_1)
{
- ints[1] = sym53c416_base_1;
- ints[2] = sym53c416_irq_1;
+ ints[1] = sym53c416_base_1[0];
+ ints[2] = sym53c416_base_1[1];
sym53c416_setup(NULL, ints);
}
if(sym53c416_base_2)
{
- ints[1] = sym53c416_base_2;
- ints[2] = sym53c416_irq_2;
+ ints[1] = sym53c416_base_2[0];
+ ints[2] = sym53c416_base_2[1];
sym53c416_setup(NULL, ints);
}
if(sym53c416_base_3)
{
- ints[1] = sym53c416_base_3;
- ints[2] = sym53c416_irq_3;
+ ints[1] = sym53c416_base_3[0];
+ ints[2] = sym53c416_base_3[1];
sym53c416_setup(NULL, ints);
}
#endif
*/
/*
-** May 11 2000, sym53c8xx 1.6b
-**
** Supported SCSI features:
** Synchronous data transfers
** Wide16 SCSI BUS
/*
** Name and version of the driver
*/
-#define SCSI_NCR_DRIVER_NAME "sym53c8xx - version 1.6b"
-
-/* #define DEBUG_896R1 */
-#define SCSI_NCR_OPTIMIZE_896
-/* #define SCSI_NCR_OPTIMIZE_896_1 */
+#define SCSI_NCR_DRIVER_NAME "sym53c8xx-1.7.3a-20010304"
#define SCSI_NCR_DEBUG_FLAGS (0)
#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
-#include <linux/config.h>
#ifdef MODULE
#include <linux/module.h>
#endif
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/string.h>
-#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/time.h>
** Donnot compile integrity checking code for Linux-2.3.0
** and above since SCSI data structures are not ready yet.
*/
-#if LINUX_VERSION_CODE < LinuxVersionCode(2,3,0)
+/* #if LINUX_VERSION_CODE < LinuxVersionCode(2,3,0) */
+#if 0
#define SCSI_NCR_INTEGRITY_CHECKING
#endif
/*==========================================================
**
-** On x86 architecture, write buffers management does
-** not reorder writes to memory. So, using compiler
-** optimization barriers is enough to guarantee some
-** ordering when the CPU is writing data accessed by
-** the NCR.
-** On Alpha architecture, explicit memory barriers have
-** to be used.
-** Other architectures are defaulted to mb() macro if
-** defined, otherwise use compiler barrier.
-**
-**==========================================================
-*/
-
-#if defined(__i386__)
-#define MEMORY_BARRIER() barrier()
-#elif defined(__alpha__)
-#define MEMORY_BARRIER() mb()
-#else
-# ifdef mb
-# define MEMORY_BARRIER() mb()
-# else
-# define MEMORY_BARRIER() barrier()
-# endif
-#endif
-
-/*==========================================================
-**
** Configuration and Debugging
**
**==========================================================
#define SCR_SG_SIZE (2)
/*
-** Io mapped or memory mapped.
-*/
-
-#if defined(SCSI_NCR_IOMAPPED) || defined(SCSI_NCR_PCI_MEM_NOT_SUPPORTED)
-#define NCR_IOMAPPED
-#endif
-
-/*
** other
*/
#define PciDeviceId(d) (d)->device
#define PciIrqLine(d) (d)->irq
-#if LINUX_VERSION_CODE > LinuxVersionCode(2,3,12)
-
-static int __init
-pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+static u_long __init
+pci_get_base_cookie(struct pci_dev *pdev, int index)
{
- *base = pdev->resource[index].start;
- if ((pdev->resource[index].flags & 0x7) == 0x4)
- ++index;
- return ++index;
-}
+ u_long base;
+
+#if LINUX_VERSION_CODE > LinuxVersionCode(2,3,12)
+ base = pdev->resource[index].start;
#else
-static int __init
+ base = pdev->base_address[index];
+#if BITS_PER_LONG > 32
+ if ((base & 0x7) == 0x4)
+ *base |= (((u_long)pdev->base_address[++index]) << 32);
+#endif
+#endif
+ return (base & ~0x7ul);
+}
+
+static int __init
pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
{
- *base = pdev->base_address[index++];
- if ((*base & 0x7) == 0x4) {
+ u32 tmp;
+#define PCI_BAR_OFFSET(index) (PCI_BASE_ADDRESS_0 + (index<<2))
+
+ pci_read_config_dword(pdev, PCI_BAR_OFFSET(index), &tmp);
+ *base = tmp;
+ ++index;
+ if ((tmp & 0x7) == 0x4) {
#if BITS_PER_LONG > 32
- *base |= (((u_long)pdev->base_address[index]) << 32);
+ pci_read_config_dword(pdev, PCI_BAR_OFFSET(index), &tmp);
+ *base |= (((u_long)tmp) << 32);
#endif
++index;
}
return index;
+#undef PCI_BAR_OFFSET
}
-#endif
#else /* Incomplete emulation of current PCI code for pre-2.2 kernels */
}
return offset;
}
+static u_long __init
+pci_get_base_cookie(struct pci_dev *pdev, int offset)
+{
+ u_long base;
+
+ (void) pci_get_base_address(dev, offset, &base);
+
+ return base;
+}
#endif /* LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0) */
+/* Does not make sense in earlier kernels */
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0)
+#define pci_enable_device(pdev) (0)
+#endif
+
/*==========================================================
**
** Debugging tags
#ifdef __sparc__
# include <asm/irq.h>
-# define pcivtobus(p) bus_dvma_to_mem(p)
# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
#elif defined(__alpha__)
-# define pcivtobus(p) ((p) & 0xfffffffful)
-# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
-#elif defined(CONFIG_PPC)
-# define pcivtobus(p) phys_to_bus(p)
# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
#else /* others */
-# define pcivtobus(p) (p)
# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
#endif
u_long base;
u_long base_2;
u_long io_port;
+ u_long base_c;
+ u_long base_2_c;
int irq;
/* port and reg fields to use INB, OUTB macros */
u_long base_io;
/*==========================================================
**
-** Big/Little endian support.
-**
-**==========================================================
-*/
-
-/*
-** If the NCR uses big endian addressing mode over the
-** PCI, actual io register addresses for byte and word
-** accesses must be changed according to lane routing.
-** Btw, ncr_offb() and ncr_offw() macros only apply to
-** constants and so donnot generate bloated code.
-*/
-
-#if defined(SCSI_NCR_BIG_ENDIAN)
-
-#define ncr_offb(o) (((o)&~3)+((~((o)&3))&3))
-#define ncr_offw(o) (((o)&~3)+((~((o)&3))&2))
-
-#else
-
-#define ncr_offb(o) (o)
-#define ncr_offw(o) (o)
-
-#endif
-
-/*
-** If the CPU and the NCR use same endian-ness adressing,
-** no byte reordering is needed for script patching.
-** Macro cpu_to_scr() is to be used for script patching.
-** Macro scr_to_cpu() is to be used for getting a DWORD
-** from the script.
-*/
-
-#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
-
-#define cpu_to_scr(dw) cpu_to_le32(dw)
-#define scr_to_cpu(dw) le32_to_cpu(dw)
-
-#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
-
-#define cpu_to_scr(dw) cpu_to_be32(dw)
-#define scr_to_cpu(dw) be32_to_cpu(dw)
-
-#else
-
-#define cpu_to_scr(dw) (dw)
-#define scr_to_cpu(dw) (dw)
-
-#endif
-
-/*==========================================================
-**
-** Access to the controller chip.
-**
-** If NCR_IOMAPPED is defined, the driver will use
-** normal IOs instead of the MEMORY MAPPED IO method
-** recommended by PCI specifications.
-** If all PCI bridges, host brigdes and architectures
-** would have been correctly designed for PCI, this
-** option would be useless.
-**
-**==========================================================
-*/
-
-/*
-** If the CPU and the NCR use same endian-ness adressing,
-** no byte reordering is needed for accessing chip io
-** registers. Functions suffixed by '_raw' are assumed
-** to access the chip over the PCI without doing byte
-** reordering. Functions suffixed by '_l2b' are
-** assumed to perform little-endian to big-endian byte
-** reordering, those suffixed by '_b2l' blah, blah,
-** blah, ...
-*/
-
-#if defined(NCR_IOMAPPED)
-
-/*
-** IO mapped only input / ouput
-*/
-
-#define INB_OFF(o) inb (np->base_io + ncr_offb(o))
-#define OUTB_OFF(o, val) outb ((val), np->base_io + ncr_offb(o))
-
-#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
-
-#define INW_OFF(o) inw_l2b (np->base_io + ncr_offw(o))
-#define INL_OFF(o) inl_l2b (np->base_io + (o))
-
-#define OUTW_OFF(o, val) outw_b2l ((val), np->base_io + ncr_offw(o))
-#define OUTL_OFF(o, val) outl_b2l ((val), np->base_io + (o))
-
-#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
-
-#define INW_OFF(o) inw_b2l (np->base_io + ncr_offw(o))
-#define INL_OFF(o) inl_b2l (np->base_io + (o))
-
-#define OUTW_OFF(o, val) outw_l2b ((val), np->base_io + ncr_offw(o))
-#define OUTL_OFF(o, val) outl_l2b ((val), np->base_io + (o))
-
-#else
-
-#define INW_OFF(o) inw_raw (np->base_io + ncr_offw(o))
-#define INL_OFF(o) inl_raw (np->base_io + (o))
-
-#define OUTW_OFF(o, val) outw_raw ((val), np->base_io + ncr_offw(o))
-#define OUTL_OFF(o, val) outl_raw ((val), np->base_io + (o))
-
-#endif /* ENDIANs */
-
-#else /* defined NCR_IOMAPPED */
-
-/*
-** MEMORY mapped IO input / output
-*/
-
-#define INB_OFF(o) readb((char *)np->reg + ncr_offb(o))
-#define OUTB_OFF(o, val) writeb((val), (char *)np->reg + ncr_offb(o))
-
-#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
-
-#define INW_OFF(o) readw_l2b((char *)np->reg + ncr_offw(o))
-#define INL_OFF(o) readl_l2b((char *)np->reg + (o))
-
-#define OUTW_OFF(o, val) writew_b2l((val), (char *)np->reg + ncr_offw(o))
-#define OUTL_OFF(o, val) writel_b2l((val), (char *)np->reg + (o))
-
-#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
-
-#define INW_OFF(o) readw_b2l((char *)np->reg + ncr_offw(o))
-#define INL_OFF(o) readl_b2l((char *)np->reg + (o))
-
-#define OUTW_OFF(o, val) writew_l2b((val), (char *)np->reg + ncr_offw(o))
-#define OUTL_OFF(o, val) writel_l2b((val), (char *)np->reg + (o))
-
-#else
-
-#define INW_OFF(o) readw_raw((char *)np->reg + ncr_offw(o))
-#define INL_OFF(o) readl_raw((char *)np->reg + (o))
-
-#define OUTW_OFF(o, val) writew_raw((val), (char *)np->reg + ncr_offw(o))
-#define OUTL_OFF(o, val) writel_raw((val), (char *)np->reg + (o))
-
-#endif
-
-#endif /* defined NCR_IOMAPPED */
-
-#define INB(r) INB_OFF (offsetof(struct ncr_reg,r))
-#define INW(r) INW_OFF (offsetof(struct ncr_reg,r))
-#define INL(r) INL_OFF (offsetof(struct ncr_reg,r))
-
-#define OUTB(r, val) OUTB_OFF (offsetof(struct ncr_reg,r), (val))
-#define OUTW(r, val) OUTW_OFF (offsetof(struct ncr_reg,r), (val))
-#define OUTL(r, val) OUTL_OFF (offsetof(struct ncr_reg,r), (val))
-
-/*
-** Set bit field ON, OFF
-*/
-
-#define OUTONB(r, m) OUTB(r, INB(r) | (m))
-#define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m))
-#define OUTONW(r, m) OUTW(r, INW(r) | (m))
-#define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m))
-#define OUTONL(r, m) OUTL(r, INL(r) | (m))
-#define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m))
-
-
-/*==========================================================
-**
** Command control block states.
**
**==========================================================
#define SIR_MSG_OUT_DONE (19)
#define SIR_AUTO_SENSE_DONE (20)
#define SIR_DUMMY_INTERRUPT (21)
-#define SIR_MAX (21)
+#define SIR_DATA_OVERRUN (22)
+#define SIR_BAD_PHASE (23)
+#define SIR_MAX (23)
/*==========================================================
**
#define UC_SETORDER 13
#define UC_SETWIDE 14
#define UC_SETFLAG 15
-#define UC_CLEARPROF 16
#define UC_SETVERBOSE 17
#define UC_RESETDEV 18
#define UC_CLEARDEV 19
#define UF_NODISC (0x02)
#define UF_NOSCAN (0x04)
-#ifdef SCSI_NCR_PROFILE_SUPPORT
-/*
-** profiling data (per host)
-*/
-
-struct profile {
- u_long num_trans;
- u_long num_disc;
- u_long num_disc0;
- u_long num_break;
- u_long num_int;
- u_long num_fly;
- u_long num_kbytes;
-#if 000
- u_long num_br1k;
- u_long num_br2k;
- u_long num_br4k;
- u_long num_br8k;
- u_long num_brnk;
-#endif
-};
-#endif
-
/*========================================================================
**
** Declaration of structs: target control block
ccb_p nego_cp;
/*----------------------------------------------------------------
- ** statistical data
- **----------------------------------------------------------------
- */
- u_long transfers;
- u_long bytes;
-
- /*----------------------------------------------------------------
** negotiation of wide and synch transfer and device quirks.
** sval, wval and uval are read from SCRIPTS and so have alignment
** constraints.
**----------------------------------------------------------------
*/
-/*0*/ u_char minsync;
+/*0*/ u_char uval;
/*1*/ u_char sval;
-/*2*/ u_short period;
-/*0*/ u_char maxoffs;
-/*1*/ u_char quirks;
-/*2*/ u_char widedone;
+/*2*/ u_char filler2;
/*3*/ u_char wval;
-/*0*/ u_char uval;
+ u_short period;
+ u_char minsync;
+ u_char maxoffs;
+ u_char quirks;
+ u_char widedone;
#ifdef SCSI_NCR_INTEGRITY_CHECKING
u_char ic_min_sync;
** Status fields.
**----------------------------------------------------------------
*/
- u_char scr_st[4]; /* script status */
u_char status[4]; /* host status */
};
/*
** The status bytes are used by the host and the script processor.
**
-** The last four bytes (status[4]) are copied to the scratchb register
+** The four bytes (status[4]) are copied to the scratchb register
** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect,
** and copied back just after disconnecting.
** Inside the script the XX_REG are used.
-**
-** The first four bytes (scr_st[4]) are used inside the script by
-** "LOAD/STORE" commands.
-** Because source and destination must have the same alignment
-** in a DWORD, the fields HAVE to be at the choosen offsets.
-** xerr_st 0 (0x34) scratcha
-** sync_st 1 (0x05) sxfer
-** wide_st 3 (0x03) scntl3
*/
/*
*/
#define HF_DATA_ST (1u<<7)
-/*
-** First four bytes (script)
-*/
-#define xerr_st header.scr_st[0]
-#define sync_st header.scr_st[1]
-#define nego_st header.scr_st[2]
-#define wide_st header.scr_st[3]
-
-/*
-** First four bytes (host)
-*/
-#define xerr_status phys.xerr_st
-#define nego_status phys.nego_st
-
/*==========================================================
**
** Declaration of structs: Data structure block
struct pm_ctx pm0;
struct pm_ctx pm1;
-
- /*
- ** Extra bytes count transferred
- ** in case of data overrun.
- */
- u_int32 extra_bytes;
-
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- /*
- ** Disconnection counter
- */
- u_int32 num_disc;
-#endif
};
** a SDTR or WDTR message is appended.
**----------------------------------------------------------------
*/
- u_char scsi_smsg [8];
- u_char scsi_smsg2[8];
+ u_char scsi_smsg [12];
+ u_char scsi_smsg2[12];
+
+ /*----------------------------------------------------------------
+ ** Miscellaneous status'.
+ **----------------------------------------------------------------
+ */
+ u_char nego_status; /* Negotiation status */
+ u_char xerr_status; /* Extended error flags */
+ u_int32 extra_bytes; /* Extraneous bytes transferred */
/*----------------------------------------------------------------
** Saved info for auto-sense
u_char minsync; /* Minimum sync period factor */
u_char maxsync; /* Maximum sync period factor */
u_char maxoffs; /* Max scsi offset */
+ u_char maxoffs_st; /* Max scsi offset in ST mode */
u_char multiplier; /* Clock multiplier (1,2,4) */
u_char clock_divn; /* Number of clock divisors */
u_long clock_khz; /* SCSI clock frequency in KHz */
*/
struct ncr_reg regdump; /* Register dump */
u_long regtime; /* Time it has been done */
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- struct profile profile; /* Profiling data */
-#endif
/*----------------------------------------------------------------
** Miscellaneous buffers accessed by the scripts-processor.
**----------------------------------------------------------------
*/
struct usrcmd user; /* Command from user */
- u_char release_stage; /* Synchronisation stage on release */
+ volatile u_char release_stage; /* Synchronisation stage on release */
/*----------------------------------------------------------------
** Fields that are used (primarily) for integrity check
ncrcmd select2 [ 2];
#endif
ncrcmd command [ 2];
- ncrcmd dispatch [ 30];
+ ncrcmd dispatch [ 28];
ncrcmd sel_no_cmd [ 10];
ncrcmd init [ 6];
ncrcmd clrack [ 4];
ncrcmd datai_done [ 26];
ncrcmd datao_done [ 12];
ncrcmd ign_i_w_r_msg [ 4];
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- ncrcmd datai_phase [ 4];
-#else
ncrcmd datai_phase [ 2];
-#endif
ncrcmd datao_phase [ 4];
ncrcmd msg_in [ 2];
ncrcmd msg_in2 [ 10];
ncrcmd done_end [ 2];
ncrcmd save_dp [ 8];
ncrcmd restore_dp [ 4];
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- ncrcmd disconnect [ 32];
-#else
ncrcmd disconnect [ 20];
-#endif
#ifdef SCSI_NCR_IARB_SUPPORT
ncrcmd idle [ 4];
#else
ncrcmd nego_bad_phase [ 4];
ncrcmd msg_out [ 4];
ncrcmd msg_out_done [ 4];
- ncrcmd data_ovrun [ 18];
- ncrcmd data_ovrun1 [ 20];
+ ncrcmd data_ovrun [ 2];
+ ncrcmd data_ovrun1 [ 22];
+ ncrcmd data_ovrun2 [ 8];
ncrcmd abort_resel [ 16];
ncrcmd resend_ident [ 4];
ncrcmd ident_break [ 4];
#ifdef SCSI_NCR_INTEGRITY_CHECKING
static int ncr_ic_nego(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd, u_char *msgptr);
#endif
-#ifdef SCSI_NCR_PROFILE_SUPPORT
-static void ncb_profile (ncb_p np, ccb_p cp);
-#endif
static void ncr_script_copy_and_bind
(ncb_p np, ncrcmd *src, ncrcmd *dst, int len);
static void ncr_script_fill (struct script * scr, struct scripth * scripth);
SCR_JUMP ^ IFTRUE (IF (SCR_MSG_OUT)),
PADDRH (msg_out),
/*
- * Set the extended error flag.
+ * Discard as many illegal phases as
+ * required and tell the C code about.
*/
- SCR_REG_REG (HF_REG, SCR_OR, HF_EXT_ERR),
- 0,
-
- /*
- ** Discard one illegal phase byte, if required.
- */
- SCR_LOAD_REL (scratcha, 1),
- offsetof (struct ccb, xerr_status),
- SCR_REG_REG (scratcha, SCR_OR, XE_BAD_PHASE),
- 0,
- SCR_STORE_REL (scratcha, 1),
- offsetof (struct ccb, xerr_status),
- SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_OUT)),
- 8,
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_OUT)),
+ 16,
SCR_MOVE_ABS (1) ^ SCR_ILG_OUT,
NADDR (scratch),
- SCR_JUMPR ^ IFFALSE (IF (SCR_ILG_IN)),
- 8,
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_OUT)),
+ -16,
+ SCR_JUMPR ^ IFFALSE (WHEN (SCR_ILG_IN)),
+ 16,
SCR_MOVE_ABS (1) ^ SCR_ILG_IN,
NADDR (scratch),
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_ILG_IN)),
+ -16,
+ SCR_INT,
+ SIR_BAD_PHASE,
SCR_JUMP,
PADDR (dispatch),
-
}/*---------------------< SEL_NO_CMD >----------------------*/,{
/*
** The target does not switch to command
PADDR (clrack),
}/*-------------------------< DATAI_PHASE >------------------*/,{
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- SCR_REG_REG (QU_REG, SCR_OR, HF_DATA_ST),
- 0,
-#endif
SCR_RETURN,
0,
}/*-------------------------< DATAO_PHASE >------------------*/,{
*/
SCR_WAIT_DISC,
0,
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- /*
- ** Count the disconnects.
- ** Disconnect without DATA PHASE having been
- ** entered are counted in bits 8..15.
- */
- SCR_LOAD_REL (scratcha, 4),
- offsetof (struct ccb, phys.num_disc),
- SCR_FROM_REG (QU_REG),
- 0,
- SCR_JUMPR ^ IFTRUE (MASK (HF_DATA_ST, HF_DATA_ST)),
- 8,
- SCR_REG_REG (scratcha1, SCR_ADD, 0x01),
- 0,
- SCR_REG_REG (scratcha, SCR_ADD, 0x01),
- 0,
- SCR_STORE_REL (scratcha, 4),
- offsetof (struct ccb, phys.num_disc),
-#endif
/*
** Status is: DISCONNECTED.
*/
SCR_JUMP,
PADDR (dispatch),
-}/*-------------------------< DATA_OVRUN >--------------------*/,{
+}/*-------------------------< DATA_OVRUN >-----------------------*/,{
+ /*
+ * Use scratcha to count the extra bytes.
+ */
+ SCR_LOAD_ABS (scratcha, 4),
+ PADDRH (zero),
+}/*-------------------------< DATA_OVRUN1 >----------------------*/,{
/*
* The target may want to transfer too much data.
*
SCR_CHMOV_ABS (1) ^ SCR_DATA_OUT,
NADDR (scratch),
SCR_JUMP,
- PADDRH (data_ovrun1),
+ PADDRH (data_ovrun2),
/*
* If WSR is set, clear this condition, and
* count this byte.
SCR_REG_REG (scntl2, SCR_OR, WSR),
0,
SCR_JUMP,
- PADDRH (data_ovrun1),
+ PADDRH (data_ovrun2),
/*
* Finally check against DATA IN phase.
- * Jump to dispatcher if not so.
+ * Signal data overrun to the C code
+ * and jump to dispatcher if not so.
* Read 1 byte otherwise and count it.
*/
- SCR_JUMP ^ IFFALSE (IF (SCR_DATA_IN)),
+ SCR_JUMPR ^ IFTRUE (WHEN (SCR_DATA_IN)),
+ 16,
+ SCR_INT,
+ SIR_DATA_OVERRUN,
+ SCR_JUMP,
PADDR (dispatch),
SCR_CHMOV_ABS (1) ^ SCR_DATA_IN,
NADDR (scratch),
-}/*-------------------------< DATA_OVRUN1 >--------------------*/,{
- /*
- * Set the extended error flag.
- */
- SCR_REG_REG (HF_REG, SCR_OR, HF_EXT_ERR),
- 0,
- SCR_LOAD_REL (scratcha, 1),
- offsetof (struct ccb, xerr_status),
- SCR_REG_REG (scratcha, SCR_OR, XE_EXTRA_DATA),
- 0,
- SCR_STORE_REL (scratcha, 1),
- offsetof (struct ccb, xerr_status),
+}/*-------------------------< DATA_OVRUN2 >----------------------*/,{
/*
* Count this byte.
* This will allow to return a negative
* residual to user.
*/
- SCR_LOAD_REL (scratcha, 4),
- offsetof (struct ccb, phys.extra_bytes),
SCR_REG_REG (scratcha, SCR_ADD, 0x01),
0,
SCR_REG_REG (scratcha1, SCR_ADDC, 0),
0,
SCR_REG_REG (scratcha2, SCR_ADDC, 0),
0,
- SCR_STORE_REL (scratcha, 4),
- offsetof (struct ccb, phys.extra_bytes),
/*
* .. and repeat as required.
*/
SCR_JUMP,
- PADDRH (data_ovrun),
+ PADDRH (data_ovrun1),
}/*-------------------------< ABORT_RESEL >----------------*/,{
SCR_SET (SCR_ATN),
switch (old & RELOC_MASK) {
case RELOC_REGISTER:
- new = (old & ~RELOC_MASK) + pcivtobus(np->base_ba);
+ new = (old & ~RELOC_MASK) + np->base_ba;
break;
case RELOC_LABEL:
new = (old & ~RELOC_MASK) + np->p_script;
np->maxwide = (np->features & FE_WIDE)? 1 : 0;
- /*
- ** Get the frequency of the chip's clock.
- ** Find the right value for scntl3.
- */
+ /*
+ * Guess the frequency of the chip's clock.
+ */
+ if (np->features & (FE_ULTRA3 | FE_ULTRA2))
+ np->clock_khz = 160000;
+ else if (np->features & FE_ULTRA)
+ np->clock_khz = 80000;
+ else
+ np->clock_khz = 40000;
+ /*
+ * Get the clock multiplier factor.
+ */
if (np->features & FE_QUAD)
np->multiplier = 4;
else if (np->features & FE_DBLR)
else
np->multiplier = 1;
- np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000;
- np->clock_khz *= np->multiplier;
-
- if (np->clock_khz != 40000)
+ /*
+ * Measure SCSI clock frequency for chips
+ * it may vary from assumed one.
+ */
+ if (np->features & FE_VARCLK)
ncr_getclock(np, np->multiplier);
/*
/*
* Fix up. If sync. factor is 10 (160000Khz clock) and chip
* supports ultra3, then min. sync. period 12.5ns and the factor is 9
+ * Also keep track of the maximum offset in ST mode which may differ
+ * from the maximum offset in DT mode. For now hardcoded to 31.
*/
- if ((np->minsync == 10) && (np->features & FE_ULTRA3))
- np->minsync = 9;
+ if (np->features & FE_ULTRA3) {
+ if (np->minsync == 10)
+ np->minsync = 9;
+ np->maxoffs_st = 31;
+ }
+ else
+ np->maxoffs_st = np->maxoffs;
/*
* Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
/*
** 64 bit (53C895A or 53C896) ?
*/
- if (np->features & FE_64BIT)
+ if (np->features & FE_DAC)
#ifdef SCSI_NCR_USE_64BIT_DAC
np->rv_ccntl1 |= (XTIMOD | EXTIBMV);
#else
np->rv_dmode |= BOF; /* Burst Opcode Fetch */
if (np->features & FE_ERMP)
np->rv_dmode |= ERMP; /* Enable Read Multiple */
-#ifdef SCSI_NCR_OPTIMIZE_896
+#if 1
if ((np->features & FE_PFEN) && !np->base2_ba)
#else
if (np->features & FE_PFEN)
np->base_ws = (np->features & FE_IO256)? 256 : 128;
np->base2_ba = (np->features & FE_RAM)? device->slot.base_2 : 0;
-#ifndef NCR_IOMAPPED
- np->base_va = remap_pci_mem(np->base_ba, np->base_ws);
+#ifndef SCSI_NCR_IOMAPPED
+ np->base_va = remap_pci_mem(device->slot.base_c, np->base_ws);
if (!np->base_va) {
printk(KERN_ERR "%s: can't map PCI MMIO region\n",ncr_name(np));
goto attach_error;
np->reg = (struct ncr_reg *) np->base_va;
-#endif /* !defined NCR_IOMAPPED */
+#endif /* !defined SCSI_NCR_IOMAPPED */
/*
** If on-chip RAM is used, make sure SCRIPTS isn't too large.
np->p_scripth0 = np->p_scripth;
if (np->base2_ba) {
- np->p_script = pcivtobus(np->base2_ba);
+ np->p_script = np->base2_ba;
if (np->features & FE_RAM8K) {
np->base2_ws = 8192;
np->p_scripth = np->p_script + 4096;
else
np->base2_ws = 4096;
#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
- np->base2_va = remap_pci_mem(np->base2_ba, np->base2_ws);
+ np->base2_va =
+ remap_pci_mem(device->slot.base_2_c, np->base2_ws);
if (!np->base2_va) {
printk(KERN_ERR "%s: can't map PCI MEMORY region\n",
ncr_name(np));
/*
** Patch the script to provide an extra clock cycle on
** data out phase - 53C1010_66MHz part only.
+ ** (Fixed in rev. 1 of the chip)
*/
- if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66){
+ if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66 &&
+ np->revision_id < 1){
np->script0->datao_phase[0] =
cpu_to_scr(SCR_REG_REG(scntl4, SCR_OR, 0x0c));
}
if (np->device_id == PCI_DEVICE_ID_NCR_53C896 &&
np->revision_id <= 0x1 && (np->features & FE_NOPM)) {
np->scatter = ncr_scatter_896R1;
-#ifndef SCSI_NCR_PROFILE_SUPPORT
-#define XXX 0
-#else
-#define XXX 2
-#endif
- np->script0->datai_phase[XXX] = cpu_to_scr(SCR_JUMP);
- np->script0->datai_phase[XXX+1] =
+ np->script0->datai_phase[0] = cpu_to_scr(SCR_JUMP);
+ np->script0->datai_phase[1] =
cpu_to_scr(NCB_SCRIPTH_PHYS (np, tweak_pmj));
np->script0->datao_phase[0] = cpu_to_scr(SCR_JUMP);
np->script0->datao_phase[1] =
cpu_to_scr(NCB_SCRIPTH_PHYS (np, tweak_pmj));
-#undef XXX
}
else
#ifdef DEBUG_896R1
instance->this_id = np->myaddr;
instance->max_id = np->maxwide ? 16 : 8;
instance->max_lun = MAX_LUN;
-#ifndef NCR_IOMAPPED
+#ifndef SCSI_NCR_IOMAPPED
#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,29)
instance->base = (unsigned long) np->reg;
#else
new_period = 0x0A;
cmd->ic_nego_width = 1;
new_width = 1;
- new_offset &= 0x1f;
}
}
- else if (new_period > 0x09)
- new_offset &= 0x1f;
+ if (!options_byte && new_offset > np->maxoffs_st)
+ new_offset = np->maxoffs_st;
nego = NS_PPR;
tp->ic_min_sync = 0x0A;
new_period = 0x0A;
}
+ if (new_offset > np->maxoffs_st)
+ new_offset = np->maxoffs_st;
nego = NS_SYNC;
msgptr[msglen++] = M_EXTENDED;
msgptr[msglen++] = 3;
msgptr[msglen++] = M_X_SYNC_REQ;
msgptr[msglen++] = new_period;
- msgptr[msglen++] = new_offset & 0x1f;
+ msgptr[msglen++] = new_offset;
}
else
cmd->ic_nego_sync = 0;
if ( (factor==9) && offset) {
if (!width) {
factor = 0x0A;
- offset &= 0x1f;
}
else
last_byte = 0x02;
}
- else if (factor > 0x09)
- offset &= 0x1f;
+ if (!last_byte && offset > np->maxoffs_st)
+ offset = np->maxoffs_st;
msgptr[msglen++] = M_EXTENDED;
msgptr[msglen++] = 6;
factor = 0x0A;
tp->minsync = 0x0A;
}
+ if (offset > np->maxoffs_st)
+ offset = np->maxoffs_st;
msgptr[msglen++] = M_EXTENDED;
msgptr[msglen++] = 3;
msgptr[msglen++] = M_X_SYNC_REQ;
msgptr[msglen++] = factor;
- msgptr[msglen++] = offset & 0x1f;
+ msgptr[msglen++] = offset;
break;
case NS_WIDE:
msgptr[msglen++] = M_EXTENDED;
**
**---------------------------------------------
*/
- if (cmd->cmnd[0] == 0 && (tp->usrflag & UF_NOSCAN)) {
+ if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12) &&
+ (tp->usrflag & UF_NOSCAN)) {
tp->usrflag &= ~UF_NOSCAN;
return DID_BAD_TARGET;
}
}
#endif
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- cp->phys.num_disc = 0;
-#endif
-
/*----------------------------------------------------
**
** Build the identify / tag / sdtr message
cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
cp->scsi_status = S_ILLEGAL;
cp->xerr_status = 0;
- cp->phys.extra_bytes = 0;
+ cp->extra_bytes = 0;
/*
** extreme data pointer.
return;
/*
- ** Gather profiling data
+ ** Print some debugging info.
*/
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- ncb_profile (np, cp);
-#endif
if (DEBUG_FLAGS & DEBUG_TINY)
printk ("CCB=%lx STAT=%x/%x\n", (unsigned long)cp,
}
else {
cp->resid = 0;
- if (cp->phys.header.lastp != cp->phys.header.goalp)
+ if (cp->xerr_status ||
+ cp->phys.header.lastp != cp->phys.header.goalp)
cp->resid = ncr_compute_residual(np, cp);
}
printk ("ERROR: cmd=%x host_status=%x scsi_status=%x "
"data_len=%d residual=%d\n",
cmd->cmnd[0], cp->host_status, cp->scsi_status,
- cp->data_len, -cp->resid);
+ cp->data_len, cp->resid);
}
}
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,3,99)
+ /*
+ ** Move residual byte count to user structure.
+ */
+ cmd->resid = cp->resid;
+#endif
/*
** Check the status.
*/
(char *) cmd->request_buffer);
}
- tp->bytes += cp->data_len;
- tp->transfers ++;
-
/*
** If tags was reduced due to queue full,
** increase tags if 1000 good status received.
/*
** The NCR has completed CCBs.
** Look at the DONE QUEUE.
+**
+** On architectures that may reorder LOAD/STORE operations,
+** a memory barrier may be needed after the reading of the
+** so-called `flag' and prior to dealing with the data.
*/
int ncr_wakeup_done (ncb_p np)
{
cp = ncr_ccb_from_dsa(np, dsa);
if (cp) {
+ MEMORY_BARRIER();
ncr_complete (np, cp);
++n;
}
OUTB(nc_aipcntl1, (1<<3));
/*
- ** If 64 bit (895A/896/1010/1010_66) write the CCNTL1 register to
- ** enable 40 bit address table indirect addressing for MOVE.
- ** Also write CCNTL0 if 64 bit chip, since this register seems
- ** to only be used by 64 bit cores.
+ ** Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing
+ ** and/or hardware phase mismatch, since only such chips
+ ** seem to support those IO registers.
*/
- if (np->features & FE_64BIT) {
+ if (np->features & (FE_DAC | FE_NOPM)) {
OUTB (nc_ccntl0, np->rv_ccntl0);
OUTB (nc_ccntl1, np->rv_ccntl1);
}
** For platforms that may not support PCI memory mapping,
** we use a simple SCRIPTS that performs MEMORY MOVEs.
*/
- MEMORY_BARRIER();
if (np->base2_ba) {
if (bootverbose)
printk ("%s: Downloading SCSI SCRIPTS.\n",
np->istat_sem = 0;
OUTL (nc_dsa, np->p_ncb);
- OUTL (nc_dsp, phys);
+ OUTL_DSP (phys);
}
/*==========================================================
else if (tp->period < 2000) scsi = "FAST-10";
else scsi = "FAST-5";
- printk ("%s %sSCSI %d.%d MB/s (%d ns, offset %d)\n", scsi,
+ printk ("%s %sSCSI %d.%d MB/s (%d.%d ns, offset %d)\n", scsi,
tp->widedone > 1 ? "WIDE " : "",
- mb10 / 10, mb10 % 10, tp->period / 10, offset);
+ mb10 / 10, mb10 % 10, tp->period / 10, tp->period % 10,
+ offset);
} else
printk ("%sasynchronous.\n", tp->widedone > 1 ? "wide " : "");
next:
else if (tp->period < 2000) scsi = "FAST-10";
else scsi = "FAST-5";
- printk ("%s %sSCSI %d.%d MB/s (%d ns, offset %d)\n", scsi,
+ printk ("%s %sSCSI %d.%d MB/s (%d.%d ns, offset %d)\n", scsi,
tp->widedone > 1 ? "WIDE " : "",
- mb10 / 10, mb10 % 10, tp->period / 10, offset);
+ mb10 / 10, mb10 % 10, tp->period / 10, tp->period % 10,
+ offset);
} else
printk ("%sasynchronous.\n", tp->widedone > 1 ? "wide " : "");
next:
np->verbose = np->user.data;
break;
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- case UC_CLEARPROF:
- bzero(&np->profile, sizeof(np->profile));
- break;
-#endif
default:
/*
** We assume that other commands apply to targets.
u_short sist;
int i;
-#ifdef SCSI_NCR_OPTIMIZE_896_1
- /*
- ** This optimization when used with a 896 that handles
- ** phase mismatch from the SCRIPTS allows to only do
- ** PCI memory writes transactions from the CPU and so to
- ** take advantage of PCI posted writes.
- ** Who wants his 500 MHz CPU to wait several micro-seconds
- ** for the PCI BUS to be granted when this can be avoided?
- ** I don't, even for my slow 233 MHz PII. :-)
- **
- ** We assume we have been called for command completion.
- ** If no completion found, go with normal handling.
- ** Ordering is ensured by the SCRIPTS performing a read
- ** from main memory prior to raising INTFLY.
- ** We have to raise SIGP since the chip may be currently
- ** going to a wait reselect instruction. IMO, SIGP should
- ** not be clearable in ISTAT since it can be polled and
- ** cleared by reading CTEST2. This tiny chip misdesign is a
- ** penalty here.
- **
- ** The MA interrupt and interrupt sharing may also have
- ** adverse effects on this optimization, so we only want
- ** to use it if it is enabled by user.
- ** (BTW, this optimization seems to even have some goodness
- ** with my 895 that unfortunately suffers of the MA int.).
- */
- if (driver_setup.optimize & 1) {
- OUTB(nc_istat, (INTF | SIGP | np->istat_sem));
- if (ncr_wakeup_done (np)) {
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- ++np->profile.num_fly;
-#endif
- return;
- }
- }
-#endif /* SCSI_NCR_OPTIMIZE_896_1 */
-
/*
** interrupt on the fly ?
**
- ** For bridges that donnot flush posted writes
- ** in the reverse direction on read, a dummy read
- ** may help not to miss completions.
+ ** A `dummy read' is needed to ensure that the
+ ** clear of the INTF flag reaches the device
+ ** before the scanning of the DONE queue.
*/
istat = INB (nc_istat);
if (istat & INTF) {
OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem);
-#ifdef SCSI_NCR_PCIQ_MAY_NOT_FLUSH_PW_UPSTREAM
istat = INB (nc_istat); /* DUMMY READ */
-#endif
if (DEBUG_FLAGS & DEBUG_TINY) printk ("F ");
(void)ncr_wakeup_done (np);
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- ++np->profile.num_fly;
-#endif
};
if (!(istat & (SIP|DIP)))
return;
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- ++np->profile.num_int;
-#endif
-
#if 0 /* We should never get this one */
if (istat & CABRT)
OUTB (nc_istat, CABRT);
(unsigned)INL(nc_dsp),
(unsigned)INL(nc_dbc));
+ /*
+ ** On paper, a memory barrier may be needed here.
+ ** And since we are paranoid ... :)
+ */
+ MEMORY_BARRIER();
+
/*========================================================
** First, interrupts we want to service cleanly.
**
if (sist & PAR) ncr_int_par (np, sist);
else if (sist & MA) ncr_int_ma (np);
else if (dstat & SIR) ncr_int_sir (np);
- else if (dstat & SSI) OUTONB (nc_dcntl, (STD|NOCOM));
+ else if (dstat & SSI) OUTONB_STD ();
else goto unknown_int;
return;
};
OUTL (nc_dsa, DSA_INVALID);
OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, start));
}
else
goto reset_all;
{
u_int32 dsa = INL (nc_dsa);
ccb_p cp = ncr_ccb_from_dsa(np, dsa);
- tcb_p tp = &np->target[cp->target];
/*
* Fix Up. Some disks respond to a PPR negotation with
* Disable ppr negotiation if this is first time
* tried ppr negotiation.
*/
-
- if (tp->ppr_negotiation == 1)
- tp->ppr_negotiation = 0;
+ if (cp) {
+ tcb_p tp = &np->target[cp->target];
+ if (tp->ppr_negotiation == 1)
+ tp->ppr_negotiation = 0;
+ }
printk ("%s: unexpected disconnect\n", ncr_name(np));
ncr_recover_scsi_int(np, HS_UNEXPECTED);
if ((phase == 1) || (phase == 5)) {
/* Phase mismatch handled by SCRIPTS */
if (dsp == NCB_SCRIPTH_PHYS (np, pm_handle))
- OUTL (nc_dsp, dsp);
+ OUTL_DSP (dsp);
/* Phase mismatch handled by the C code */
else if (sist & MA)
ncr_int_ma (np);
/* No phase mismatch occurred */
else {
OUTL (nc_temp, dsp);
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, dispatch));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, dispatch));
}
}
else
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
return;
reset_all:
struct pm_ctx *pm;
ccb_p cp;
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- ++np->profile.num_break;
-#endif
-
dsp = INL (nc_dsp);
dbc = INL (nc_dbc);
dsa = INL (nc_dsa);
** raising the MA interrupt for interrupted INPUT phases.
** For DATA IN phase, we will check for the SWIDE later.
*/
- if ( !(((cmd & 7) == 1) || ((cmd & 7) == 5) ) ) {
+ if ((cmd & 7) != 1 && (cmd & 7) != 5) {
u_int32 dfifo;
u_char ss0, ss2;
/*
** check cmd against assumed interrupted script command.
+ ** If dt data phase, the MOVE instruction hasn't bit 4 of
+ ** the phase.
*/
- if (cmd != (scr_to_cpu(vdsp[0]) >> 24)) {
+ if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) {
PRINT_ADDR(cp->cmd);
printk ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
(unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24);
*/
OUTL (nc_temp, newcmd);
- OUTL (nc_dsp, nxtdsp);
+ OUTL_DSP (nxtdsp);
return;
/*
}
if (nxtdsp) {
- OUTL (nc_dsp, nxtdsp);
+ OUTL_DSP (nxtdsp);
return;
}
/*
** Now we can restart the SCRIPTS processor safely.
*/
- MEMORY_BARRIER();
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, start));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, start));
switch(s_status) {
default:
cp->host_status = HS_BUSY;
cp->scsi_status = S_ILLEGAL;
cp->xerr_status = 0;
- cp->phys.extra_bytes = 0;
+ cp->extra_bytes = 0;
cp->host_flags &= (HF_PM_TO_C|HF_DATA_IN);
break;
np->abrt_sel.sel_sxfer = tp->sval;
np->abrt_sel.sel_scntl4 = tp->uval;
OUTL(nc_dsa, np->p_ncb);
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, sel_for_abort));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, sel_for_abort));
return;
}
target = (INB (nc_sdid) & 0xf);
tp = &np->target[target];
- np->abrt_tbl.addr = vtobus(np->abrt_msg);
+ np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg));
/*
** If the target is to be reset, prepare a
/*
** Let the SCRIPTS processor continue.
*/
- OUTONB (nc_dcntl, (STD|NOCOM));
+ OUTONB_STD ();
}
out_ok:
OUTL (nc_temp, dp_scr);
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
return;
out_reject:
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, msg_bad));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
}
*/
if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) {
if (cp->xerr_status & XE_EXTRA_DATA)
- resid -= scr_to_cpu(cp->phys.extra_bytes);
+ resid -= cp->extra_bytes;
if (cp->xerr_status & XE_SODL_UNRUN)
++resid;
if (cp->xerr_status & XE_SWIDE_OVRUN)
/*
- ** If all data has been transferred,
- ** there is no residual.
+ ** If SCRIPTS reaches its goal point, then
+ ** there is no additionnal residual.
*/
if (cp->phys.header.lastp == cp->phys.header.goalp)
- return 0;
+ return resid;
/*
** If the last data pointer is data_io (direction
** taken place.
*/
if (cp->phys.header.lastp == NCB_SCRIPTH_PHYS (np, data_io))
- return -cp->data_len;
-
- /*
- ** If the device asked for more data than available,
- ** return a positive residual value.
- */
- if (cp->phys.extra_bytes)
- return scr_to_cpu(cp->phys.extra_bytes);
+ return cp->data_len;
/*
- ** Evaluate the pointer saved on message COMPLETE.
- ** According to our alchemy:), the extreme data
- ** pointer will also be updated if needed.
- ** On error, assume no data transferred (this may
- ** happen if the data direction is unknown).
+ ** If no data transfer occurs, or if the data
+ ** pointer is weird, return full residual.
*/
- tmp = cpu_to_scr(cp->phys.header.lastp);
- if (ncr_evaluate_dp(np, cp, tmp, &dp_ofs) < 0)
- return -cp->data_len;
+ if (cp->startp == cp->phys.header.lastp ||
+ ncr_evaluate_dp(np, cp, scr_to_cpu(cp->phys.header.lastp),
+ &dp_ofs) < 0) {
+ return cp->data_len;
+ }
/*
** We are now full comfortable in the computation
** of the data residual (2's complement).
*/
dp_sgmin = MAX_SCATTER - cp->segments;
- resid = cp->ext_ofs;
+ resid = -cp->ext_ofs;
for (dp_sg = cp->ext_sg; dp_sg < MAX_SCATTER; ++dp_sg) {
tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
- resid -= (tmp & 0xffffff);
+ resid += (tmp & 0xffffff);
}
/*
{chg = 1; per = np->minsync;}
if (per < tp->minsync)
{chg = 1; per = tp->minsync;}
+ if (ofs > np->maxoffs_st)
+ {chg = 1; ofs = np->maxoffs_st;}
if (ofs > tp->maxoffs)
{chg = 1; ofs = tp->maxoffs;}
** Answer wasn't acceptable.
*/
ncr_setsync (np, cp, 0, 0xe0, 0);
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, msg_bad));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
} else {
/*
** Answer is ok.
else
ncr_setsync (np, cp, scntl3, ofs, scntl4);
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
};
return;
np->msgin [0] = M_NOOP;
if (!ofs)
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, msg_bad));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
else
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, sdtr_resp));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, sdtr_resp));
}
/*==========================================================
** Answer wasn't acceptable.
*/
ncr_setwide (np, cp, 0, 1);
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, msg_bad));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
} else {
/*
** Answer is ok.
*/
ncr_setwide (np, cp, wide, 1);
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
};
return;
ncr_print_msg(cp, "wide msgout", np->msgout);
}
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, wdtr_resp));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, wdtr_resp));
}
/*==========================================================
**
else if (( (per > 0x09) && dt) )
chg = 2;
+ /* Not acceptable since beyond controller limit */
+ if (!dt && ofs > np->maxoffs_st)
+ {chg = 2; ofs = np->maxoffs_st;}
if (DEBUG_FLAGS & DEBUG_NEGO) {
PRINT_ADDR(cp->cmd);
tp->widedone = 0;
}
ncr_setsyncwide (np, cp, 0, 0xe0, 0, 0);
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, msg_bad));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
} else {
/*
** Answer is ok.
else
ncr_setsyncwide (np, cp, scntl3, ofs, scntl4, wth);
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
};
return;
if ((per == 0x09) && ofs && (!wth || !dt)) {
per = 0x0A;
dt = 0;
- ofs &= 0x1f;
}
else if ( (per > 0x09) && dt) {
dt = 0;
- ofs &= 0x1f;
}
+ if (!dt && ofs > np->maxoffs_st)
+ ofs = np->maxoffs_st;
if ((np->device_id != PCI_DEVICE_ID_LSI_53C1010) &&
(np->device_id != PCI_DEVICE_ID_LSI_53C1010_66))
np->msgin [0] = M_NOOP;
if (!ofs)
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, msg_bad));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
else
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, ppr_resp));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, ppr_resp));
}
*/
if (tp->l0p) {
OUTL (nc_dsa, scr_to_cpu(tp->l0p->tasktbl[0]));
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, resel_go));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, resel_go));
return;
}
/*
}
goto out;
/*
+ ** The device wants us to tranfer more data than
+ ** expected or in the wrong direction.
+ ** The number of extra bytes is in scratcha.
+ ** It is a data overrun condition.
+ */
+ case SIR_DATA_OVERRUN:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_EXTRA_DATA;
+ cp->extra_bytes += INL (nc_scratcha);
+ }
+ goto out;
+ /*
+ ** The device switched to an illegal phase (4/5).
+ */
+ case SIR_BAD_PHASE:
+ if (cp) {
+ OUTONB (HF_PRT, HF_EXT_ERR);
+ cp->xerr_status |= XE_BAD_PHASE;
+ }
+ goto out;
+ /*
** We received a message.
*/
case SIR_MSG_RECEIVED:
*/
case SIR_MSG_WEIRD:
ncr_print_msg(cp, "WEIRD message received", np->msgin);
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, msg_weird));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_weird));
return;
/*
** Negotiation failed.
};
out:
- OUTONB (nc_dcntl, (STD|NOCOM));
+ OUTONB_STD ();
return;
out_reject:
- OUTL (nc_dsp, NCB_SCRIPTH_PHYS (np, msg_bad));
+ OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad));
return;
out_clrack:
- OUTL (nc_dsp, NCB_SCRIPT_PHYS (np, clrack));
+ OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack));
return;
-out_stuck:;
+out_stuck:
}
**==========================================================
*/
-#ifndef NCR_IOMAPPED
+#ifndef SCSI_NCR_IOMAPPED
static int __init ncr_regtest (struct ncb* np)
{
register volatile u_int32 data;
{
u_int32 ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc;
int i, err=0;
-#ifndef NCR_IOMAPPED
+#ifndef SCSI_NCR_IOMAPPED
if (np->reg) {
err |= ncr_regtest (np);
if (err) return (err);
** Start script (exchange values)
*/
OUTL (nc_dsa, np->p_ncb);
- OUTL (nc_dsp, pc);
+ OUTL_DSP (pc);
/*
** Wait 'til done (with timeout)
*/
/*==========================================================
**
-**
-** Profiling the drivers and targets performance.
-**
-**
-**==========================================================
-*/
-
-#ifdef SCSI_NCR_PROFILE_SUPPORT
-
-static void ncb_profile (ncb_p np, ccb_p cp)
-{
- int num_disc = (cp->phys.num_disc & 0xff);
- int num_disc0 = (cp->phys.num_disc >> 8);
-
- ++np->profile.num_trans;
- np->profile.num_disc += num_disc;
- np->profile.num_disc0 += num_disc0;
- np->profile.num_kbytes += (cp->data_len >> 10);
-#if 000
- if (num_disc > num_disc0) {
- if (cp->data_len <= 1024)
- np->profile.num_br1k += (num_disc - num_disc0);
- else if (cp->data_len <= 2048)
- np->profile.num_br2k += (num_disc - num_disc0);
- else if (cp->data_len <= 4096)
- np->profile.num_br4k += (num_disc - num_disc0);
- else if (cp->data_len <= 8192)
- np->profile.num_br8k += (num_disc - num_disc0);
- else
- np->profile.num_brnk += (num_disc - num_disc0);
- }
-#endif
-}
-
-#endif /* SCSI_NCR_PROFILE_SUPPORT */
-
-/*==========================================================
-**
** Determine the ncr's clock frequency.
** This is essential for the negotiation
** of the synchronous transfer rate.
/*
* adjust for prescaler, and convert into KHz
- * scale values derived empirically. C1010 uses
- * different dividers
+ * scale values derived empirically.
*/
-#if 0
- if (np->device_id == PCI_DEVICE_ID_LSI_53C1010)
- f = ms ? ((1 << gen) * 2866 ) / ms : 0;
- else
-#endif
f = ms ? ((1 << gen) * 4340) / ms : 0;
if (bootverbose >= 2)
}
/*
- ** If multiplier not found but a C1010, assume a mult of 4.
** If multiplier not found or scntl3 not 7,5,3,
** reset chip and get frequency from general purpose timer.
** Otherwise trust scntl3 BIOS setting.
*/
- if ((np->device_id == PCI_DEVICE_ID_LSI_53C1010) ||
- (np->device_id == PCI_DEVICE_ID_LSI_53C1010_66)) {
- f1=40000;
- np->multiplier = mult;
- if (bootverbose >= 2)
- printk ("%s: clock multiplier assumed\n", ncr_name(np));
- }
- else if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
+ if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
OUTB (nc_stest1, 0); /* make sure doubler is OFF */
f1 = ncr_getfreq (np);
#define OPT_SCSI_PARITY 3
#define OPT_DISCONNECTION 4
#define OPT_SPECIAL_FEATURES 5
-#define OPT_ULTRA_SCSI 6
+#define OPT_RESERVED_1 6
#define OPT_FORCE_SYNC_NEGO 7
#define OPT_REVERSE_PROBE 8
#define OPT_DEFAULT_SYNC 9
static char setup_token[] __initdata =
"tags:" "mpar:"
"spar:" "disc:"
- "specf:" "ultra:"
+ "specf:" "_rsvd1:"
"fsn:" "revprob:"
"sync:" "verb:"
"debug:" "burst:"
case OPT_SPECIAL_FEATURES:
driver_setup.special_features = val;
break;
- case OPT_ULTRA_SCSI:
- driver_setup.ultra_scsi = val;
- break;
case OPT_FORCE_SYNC_NEGO:
driver_setup.force_sync_nego = val;
break;
static void __init ncr_print_driver_setup(void)
{
#define YesNo(y) y ? 'y' : 'n'
- printk (NAME53C8XX ": setup=disc:%c,specf:%d,ultra:%d,tags:%d,sync:%d,"
+ printk (NAME53C8XX ": setup=disc:%c,specf:%d,tags:%d,sync:%d,"
"burst:%d,wide:%c,diff:%d,revprob:%c,buschk:0x%x\n",
YesNo(driver_setup.disconnection),
driver_setup.special_features,
- driver_setup.ultra_scsi,
driver_setup.default_tags,
driver_setup.default_sync,
driver_setup.burst_max,
++j;
continue;
}
+ if (pci_enable_device(pcidev)) /* @!*!$&*!%-*#;! */
+ continue;
/* Some HW as the HP LH4 may report twice PCI devices */
for (i = 0; i < count ; i++) {
if (devtbl[i].slot.bus == PciBusNumber(pcidev) &&
u_char pci_fix_up = driver_setup.pci_fix_up;
u_char revision;
u_int irq;
- u_long base, base_2, io_port;
+ u_long base, base_c, base_2, base_2_c, io_port;
int i;
ncr_chip *chip;
vendor_id = PciVendorId(pdev);
device_id = PciDeviceId(pdev);
irq = PciIrqLine(pdev);
- i = 0;
- i = pci_get_base_address(pdev, i, &io_port);
- i = pci_get_base_address(pdev, i, &base);
- (void) pci_get_base_address(pdev, i, &base_2);
+
+ i = pci_get_base_address(pdev, 0, &io_port);
+ io_port = pci_get_base_cookie(pdev, 0);
+
+ base_c = pci_get_base_cookie(pdev, i);
+ i = pci_get_base_address(pdev, i, &base);
+
+ base_2_c = pci_get_base_cookie(pdev, i);
+ (void) pci_get_base_address(pdev, i, &base_2);
pci_read_config_word(pdev, PCI_COMMAND, &command);
pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
** This controller sets value 0x52414944 at RAM end - 16.
*/
#if defined(__i386__) && !defined(SCSI_NCR_PCI_MEM_NOT_SUPPORTED)
- if (chip && (base_2 & PCI_BASE_ADDRESS_MEM_MASK)) {
+ if (chip && (base_2_c & PCI_BASE_ADDRESS_MEM_MASK)) {
unsigned int ram_size, ram_val;
u_long ram_ptr;
else
ram_size = 4096;
- ram_ptr = remap_pci_mem(base_2 & PCI_BASE_ADDRESS_MEM_MASK,
+ ram_ptr = remap_pci_mem(base_2_c & PCI_BASE_ADDRESS_MEM_MASK,
ram_size);
if (ram_ptr) {
ram_val = readl_raw(ram_ptr + ram_size - 16);
** from attaching devices from the both drivers.
** If you have a better idea, let me know.
*/
-/* #ifdef NCR_IOMAPPED */
+/* #ifdef SCSI_NCR_IOMAPPED */
#if 1
if (!(command & PCI_COMMAND_IO)) {
printk(NAME53C8XX ": I/O base address (0x%lx) disabled.\n",
base &= PCI_BASE_ADDRESS_MEM_MASK;
base_2 &= PCI_BASE_ADDRESS_MEM_MASK;
-/* #ifdef NCR_IOMAPPED */
+/* #ifdef SCSI_NCR_IOMAPPED */
#if 1
if (io_port && check_region (io_port, 128)) {
printk(NAME53C8XX ": IO region 0x%lx[0..127] is in use\n",
if (!io_port)
return -1;
#endif
-#ifndef NCR_IOMAPPED
+#ifndef SCSI_NCR_IOMAPPED
if (!base) {
printk(NAME53C8XX ": MMIO base address disabled.\n");
return -1;
}
}
- if (driver_setup.ultra_scsi < 3 && (chip->features & FE_ULTRA3)) {
- chip->features |= FE_ULTRA2;
- chip->features &= ~FE_ULTRA3;
- }
- if (driver_setup.ultra_scsi < 2 && (chip->features & FE_ULTRA2)) {
- chip->features |= FE_ULTRA;
- chip->features &= ~FE_ULTRA2;
- }
- if (driver_setup.ultra_scsi < 1)
- chip->features &= ~FE_ULTRA;
-
- if (!driver_setup.max_wide)
- chip->features &= ~FE_WIDE;
-
- /*
- * C1010 Ultra3 support requires 16 bit data transfers.
- */
- if (!driver_setup.max_wide && (chip->features & FE_ULTRA3)) {
- chip->features |= FE_ULTRA2;
- chip->features |= ~FE_ULTRA3;
- }
-
/*
** Some features are required to be enabled in order to
** work around some chip problems. :) ;)
device->slot.device_fn = PciDeviceFn(pdev);
device->slot.base = base;
device->slot.base_2 = base_2;
+ device->slot.base_c = base_c;
+ device->slot.base_2_c = base_2_c;
device->slot.io_port = io_port;
device->slot.irq = irq;
device->attach_done = 0;
/*
** Get access to chip IO registers
*/
-#ifdef NCR_IOMAPPED
+#ifdef SCSI_NCR_IOMAPPED
request_region(devp->slot.io_port, 128, NAME53C8XX);
devp->slot.base_io = devp->slot.io_port;
#else
- devp->slot.reg = (struct ncr_reg *) remap_pci_mem(devp->slot.base, 128);
+ devp->slot.reg =
+ (struct ncr_reg *) remap_pci_mem(devp->slot.base_c, 128);
if (!devp->slot.reg)
return;
#endif
/*
** Release access to chip IO registers
*/
-#ifdef NCR_IOMAPPED
+#ifdef SCSI_NCR_IOMAPPED
release_region(devp->slot.base_io, 128);
#else
unmap_pci_mem((u_long) devp->slot.reg, 128ul);
/*=========================================================================
** Proc file system stuff
**
-** A read operation returns profile information.
+** A read operation returns adapter information.
** A write operation is a control command.
** The string is parsed in the driver code and the command is passed
** to the ncr_usercmd() function.
uc->cmd = UC_RESETDEV;
else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0)
uc->cmd = UC_CLEARDEV;
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- else if ((arg_len = is_keyword(ptr, len, "clearprof")) != 0)
- uc->cmd = UC_CLEARPROF;
-#endif
else
arg_len = 0;
}
/*
-** Copy formatted profile information into the input buffer.
+** Copy formatted information into the input buffer.
*/
-#define to_ms(t) ((t) * 1000 / HZ)
-
static int ncr_host_info(ncb_p np, char *ptr, off_t offset, int len)
{
struct info_str info;
driver_setup.debug, driver_setup.verbose);
}
-#ifdef SCSI_NCR_PROFILE_SUPPORT
- copy_info(&info, "Profiling information:\n");
- copy_info(&info, " %-12s = %lu\n", "num_fly", np->profile.num_fly);
- copy_info(&info, " %-12s = %lu\n", "num_trans",np->profile.num_trans);
- copy_info(&info, " %-12s = %lu\n", "num_disc", np->profile.num_disc);
- copy_info(&info, " %-12s = %lu\n", "num_disc0",np->profile.num_disc0);
- copy_info(&info, " %-12s = %lu\n", "num_break",np->profile.num_break);
-#if 000
- copy_info(&info, " %-12s = %lu\n", "num_br1k",np->profile.num_br1k);
- copy_info(&info, " %-12s = %lu\n", "num_br2k",np->profile.num_br2k);
- copy_info(&info, " %-12s = %lu\n", "num_br4k",np->profile.num_br4k);
- copy_info(&info, " %-12s = %lu\n", "num_br8k",np->profile.num_br8k);
- copy_info(&info, " %-12s = %lu\n", "num_brnk",np->profile.num_brnk);
-#endif
- copy_info(&info, " %-12s = %lu\n", "num_int", np->profile.num_int);
- copy_info(&info, " %-12s = %lu\n","num_kbytes",np->profile.num_kbytes);
-#endif
-
return info.pos > info.offset? info.pos - info.offset : 0;
}
/*
** Entry point of the scsi proc fs of the driver.
-** - func = 0 means read (returns profile data)
+** - func = 0 means read (returns adapter infos)
** - func = 1 means write (parse user control command)
*/
return retv;
}
-#undef SET_BIT /* 0 */
-#undef CLR_BIT /* 1 */
-#undef SET_CLK /* 2 */
-#undef CLR_CLK /* 3 */
+#undef SET_BIT
+#undef CLR_BIT
+#undef SET_CLK
+#undef CLR_CLK
/*
* Try reading Symbios NVRAM.
** Module stuff
*/
+#if LINUX_VERSION_CODE >= LinuxVersionCode(2,4,0)
static Scsi_Host_Template driver_template = SYM53C8XX;
#include "scsi_module.c"
+#elif defined(MODULE)
+Scsi_Host_Template driver_template = SYM53C8XX;
+#include "scsi_module.c"
+#endif
** Used by hosts.c and sym53c8xx.c with module configuration.
*/
+#if (LINUX_VERSION_CODE >= 0x020400) || defined(HOSTS_C) || defined(MODULE)
+
#include <scsi/scsicam.h>
int sym53c8xx_abort(Scsi_Cmnd *);
#endif /* LINUX_VERSION_CODE */
+#endif /* defined(HOSTS_C) || defined(MODULE) */
+
#endif /* SYM53C8XX_H */
/*==========================================================
**
-** Io mapped versus memory mapped.
-**
-**==========================================================
-*/
-
-#if defined(SCSI_NCR_IOMAPPED) || defined(SCSI_NCR_PCI_MEM_NOT_SUPPORTED)
-#define NCR_IOMAPPED
-#endif
-
-/*==========================================================
-**
** Miscallaneous defines.
**
**==========================================================
/*==========================================================
**
-** On x86 architecture, write buffers management does
-** not reorder writes to memory. So, using compiler
-** optimization barriers is enough to guarantee some
-** ordering when the CPU is writing data accessed by
-** the NCR.
-** On Alpha architecture, explicit memory barriers have
-** to be used.
-** Other architectures are defaulted to mb() macro if
-** defined, otherwise use compiler barrier.
-**
-**==========================================================
-*/
-
-#if defined(__i386__)
-#define MEMORY_BARRIER() barrier()
-#elif defined(__alpha__)
-#define MEMORY_BARRIER() mb()
-#else
-# ifdef mb
-# define MEMORY_BARRIER() mb()
-# else
-# define MEMORY_BARRIER() barrier()
-# endif
-#endif
-
-/*==========================================================
-**
** Simple Wrapper to kernel PCI bus interface.
**
** This wrapper allows to get rid of old kernel PCI
#define PciDeviceId(d) (d)->device
#define PciIrqLine(d) (d)->irq
-#if LINUX_VERSION_CODE > LinuxVersionCode(2,3,12)
-
-static int __init
-pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
+static u_long __init
+pci_get_base_cookie(struct pci_dev *pdev, int index)
{
- *base = pdev->resource[index].start;
- if ((pdev->resource[index].flags & 0x7) == 0x4)
- ++index;
- return ++index;
-}
+ u_long base;
+
+#if LINUX_VERSION_CODE > LinuxVersionCode(2,3,12)
+ base = pdev->resource[index].start;
#else
-static int __init
+ base = pdev->base_address[index];
+#if BITS_PER_LONG > 32
+ if ((base & 0x7) == 0x4)
+ *base |= (((u_long)pdev->base_address[++index]) << 32);
+#endif
+#endif
+ return (base & ~0x7ul);
+}
+
+static int __init
pci_get_base_address(struct pci_dev *pdev, int index, u_long *base)
{
- *base = pdev->base_address[index++];
- if ((*base & 0x7) == 0x4) {
+ u32 tmp;
+#define PCI_BAR_OFFSET(index) (PCI_BASE_ADDRESS_0 + (index<<2))
+
+ pci_read_config_dword(pdev, PCI_BAR_OFFSET(index), &tmp);
+ *base = tmp;
+ ++index;
+ if ((tmp & 0x7) == 0x4) {
#if BITS_PER_LONG > 32
- *base |= (((u_long)pdev->base_address[index]) << 32);
+ pci_read_config_dword(pdev, PCI_BAR_OFFSET(index), &tmp);
+ *base |= (((u_long)tmp) << 32);
#endif
++index;
}
return index;
+#undef PCI_BAR_OFFSET
}
-#endif
#else /* Incomplete emulation of current PCI code for pre-2.2 kernels */
}
return offset;
}
+static u_long __init
+pci_get_base_cookie(struct pci_dev *pdev, int offset)
+{
+ u_long base;
+
+ (void) pci_get_base_address(dev, offset, &base);
+
+ return base;
+}
#endif /* LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0) */
+/* Does not make sense in earlier kernels */
+#if LINUX_VERSION_CODE < LinuxVersionCode(2,4,0)
+#define pci_enable_device(pdev) (0)
+#endif
+
/*==========================================================
**
** SMP threading.
#ifdef __sparc__
# include <asm/irq.h>
-# define pcivtobus(p) bus_dvma_to_mem(p)
# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
#elif defined(__alpha__)
-# define pcivtobus(p) ((p) & 0xfffffffful)
# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
#else /* others */
-# define pcivtobus(p) (p)
# define memcpy_to_pci(a, b, c) memcpy_toio((a), (b), (c))
#endif
-#if (defined(SCSI_NCR_NVRAM_SUPPORT) && !defined(NCR_IOMAPPED)) || \
- (defined(__i386__) && !defined(SCSI_NCR_PCI_MEM_NOT_SUPPORTED))
+#ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED
static u_long __init remap_pci_mem(u_long base, u_long size)
{
u_long page_base = ((u_long) base) & PAGE_MASK;
#define initverbose (driver_setup.verbose)
#define bootverbose (np->verbose)
-/*==========================================================
-**
-** Big/Little endian support.
-**
-** If the NCR uses big endian addressing mode over the
-** PCI, actual io register addresses for byte and word
-** accesses must be changed according to lane routing.
-** Btw, ncr_offb() and ncr_offw() macros only apply to
-** constants and so donnot generate bloated code.
-**
-** If the CPU and the NCR use same endian-ness adressing,
-** no byte reordering is needed for script patching.
-** Macro cpu_to_scr() is to be used for script patching.
-** Macro scr_to_cpu() is to be used for getting a DWORD
-** from the script.
-**
-**==========================================================
-*/
-
-#if defined(SCSI_NCR_BIG_ENDIAN)
-
-#define ncr_offb(o) (((o)&~3)+((~((o)&3))&3))
-#define ncr_offw(o) (((o)&~3)+((~((o)&3))&2))
-
-#else
-
-#define ncr_offb(o) (o)
-#define ncr_offw(o) (o)
-
-#endif
-
-#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
-
-#define cpu_to_scr(dw) cpu_to_le32(dw)
-#define scr_to_cpu(dw) le32_to_cpu(dw)
-
-#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
-
-#define cpu_to_scr(dw) cpu_to_be32(dw)
-#define scr_to_cpu(dw) be32_to_cpu(dw)
-
-#else
-
-#define cpu_to_scr(dw) (dw)
-#define scr_to_cpu(dw) (dw)
-
-#endif
-
-/*==========================================================
-**
-** Access to the controller chip.
-**
-** If NCR_IOMAPPED is defined, the driver will use
-** normal IOs instead of the MEMORY MAPPED IO method
-** recommended by PCI specifications.
-** If all PCI bridges, host brigdes and architectures
-** would have been correctly designed for PCI, this
-** option would be useless.
-**
-** If the CPU and the NCR use same endian-ness adressing,
-** no byte reordering is needed for accessing chip io
-** registers. Functions suffixed by '_raw' are assumed
-** to access the chip over the PCI without doing byte
-** reordering. Functions suffixed by '_l2b' are
-** assumed to perform little-endian to big-endian byte
-** reordering, those suffixed by '_b2l' blah, blah,
-** blah, ...
-**
-**==========================================================
-*/
-
-#if defined(NCR_IOMAPPED)
-
-/*
-** IO mapped only input / ouput
-*/
-
-#define INB_OFF(o) inb (np->base_io + ncr_offb(o))
-#define OUTB_OFF(o, val) outb ((val), np->base_io + ncr_offb(o))
-
-#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
-
-#define INW_OFF(o) inw_l2b (np->base_io + ncr_offw(o))
-#define INL_OFF(o) inl_l2b (np->base_io + (o))
-
-#define OUTW_OFF(o, val) outw_b2l ((val), np->base_io + ncr_offw(o))
-#define OUTL_OFF(o, val) outl_b2l ((val), np->base_io + (o))
-
-#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
-
-#define INW_OFF(o) inw_b2l (np->base_io + ncr_offw(o))
-#define INL_OFF(o) inl_b2l (np->base_io + (o))
-
-#define OUTW_OFF(o, val) outw_l2b ((val), np->base_io + ncr_offw(o))
-#define OUTL_OFF(o, val) outl_l2b ((val), np->base_io + (o))
-
-#else
-
-#define INW_OFF(o) inw_raw (np->base_io + ncr_offw(o))
-#define INL_OFF(o) inl_raw (np->base_io + (o))
-
-#define OUTW_OFF(o, val) outw_raw ((val), np->base_io + ncr_offw(o))
-#define OUTL_OFF(o, val) outl_raw ((val), np->base_io + (o))
-
-#endif /* ENDIANs */
-
-#else /* defined NCR_IOMAPPED */
-
-/*
-** MEMORY mapped IO input / output
-*/
-
-#define INB_OFF(o) readb((char *)np->reg + ncr_offb(o))
-#define OUTB_OFF(o, val) writeb((val), (char *)np->reg + ncr_offb(o))
-
-#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
-
-#define INW_OFF(o) readw_l2b((char *)np->reg + ncr_offw(o))
-#define INL_OFF(o) readl_l2b((char *)np->reg + (o))
-
-#define OUTW_OFF(o, val) writew_b2l((val), (char *)np->reg + ncr_offw(o))
-#define OUTL_OFF(o, val) writel_b2l((val), (char *)np->reg + (o))
-
-#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
-
-#define INW_OFF(o) readw_b2l((char *)np->reg + ncr_offw(o))
-#define INL_OFF(o) readl_b2l((char *)np->reg + (o))
-
-#define OUTW_OFF(o, val) writew_l2b((val), (char *)np->reg + ncr_offw(o))
-#define OUTL_OFF(o, val) writel_l2b((val), (char *)np->reg + (o))
-
-#else
-
-#define INW_OFF(o) readw_raw((char *)np->reg + ncr_offw(o))
-#define INL_OFF(o) readl_raw((char *)np->reg + (o))
-
-#define OUTW_OFF(o, val) writew_raw((val), (char *)np->reg + ncr_offw(o))
-#define OUTL_OFF(o, val) writel_raw((val), (char *)np->reg + (o))
-
-#endif
-
-#endif /* defined NCR_IOMAPPED */
-
-#define INB(r) INB_OFF (offsetof(struct ncr_reg,r))
-#define INW(r) INW_OFF (offsetof(struct ncr_reg,r))
-#define INL(r) INL_OFF (offsetof(struct ncr_reg,r))
-
-#define OUTB(r, val) OUTB_OFF (offsetof(struct ncr_reg,r), (val))
-#define OUTW(r, val) OUTW_OFF (offsetof(struct ncr_reg,r), (val))
-#define OUTL(r, val) OUTL_OFF (offsetof(struct ncr_reg,r), (val))
-
-/*
-** Set bit field ON, OFF
-*/
-
-#define OUTONB(r, m) OUTB(r, INB(r) | (m))
-#define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m))
-#define OUTONW(r, m) OUTW(r, INW(r) | (m))
-#define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m))
-#define OUTONL(r, m) OUTL(r, INL(r) | (m))
-#define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m))
-
/*==========================================================
**
u_long base;
u_long base_2;
u_long io_port;
+ u_long base_c;
+ u_long base_2_c;
int irq;
/* port and reg fields to use INB, OUTB macros */
u_long base_io;
return retv;
}
-#undef SET_BIT
-#undef CLR_BIT
-#undef SET_CLK
-#undef CLR_CLK
+#undef SET_BIT
+#undef CLR_BIT
+#undef SET_CLK
+#undef CLR_CLK
/*
* Try reading Symbios NVRAM.
/*
** Get access to chip IO registers
*/
-#ifdef NCR_IOMAPPED
+#ifdef SCSI_NCR_IOMAPPED
request_region(devp->slot.io_port, 128, NAME53C8XX);
devp->slot.base_io = devp->slot.io_port;
#else
- devp->slot.reg = (struct ncr_reg *) remap_pci_mem(devp->slot.base, 128);
+ devp->slot.reg =
+ (struct ncr_reg *) remap_pci_mem(devp->slot.base_c, 128);
if (!devp->slot.reg)
return;
#endif
/*
** Release access to chip IO registers
*/
-#ifdef NCR_IOMAPPED
+#ifdef SCSI_NCR_IOMAPPED
release_region(devp->slot.base_io, 128);
#else
unmap_pci_mem((u_long) devp->slot.reg, 128ul);
#define OPT_SCSI_PARITY 3
#define OPT_DISCONNECTION 4
#define OPT_SPECIAL_FEATURES 5
-#define OPT_ULTRA_SCSI 6
+#define OPT_UNUSED_1 6
#define OPT_FORCE_SYNC_NEGO 7
#define OPT_REVERSE_PROBE 8
#define OPT_DEFAULT_SYNC 9
case OPT_SPECIAL_FEATURES:
driver_setup.special_features = val;
break;
- case OPT_ULTRA_SCSI:
- driver_setup.ultra_scsi = val;
- break;
case OPT_FORCE_SYNC_NEGO:
driver_setup.force_sync_nego = val;
break;
static void __init ncr_print_driver_setup(void)
{
#define YesNo(y) y ? 'y' : 'n'
- printk (NAME53C8XX ": setup=disc:%c,specf:%d,ultra:%d,tags:%d,sync:%d,"
+ printk (NAME53C8XX ": setup=disc:%c,specf:%d,tags:%d,sync:%d,"
"burst:%d,wide:%c,diff:%d,revprob:%c,buschk:0x%x\n",
YesNo(driver_setup.disconnection),
driver_setup.special_features,
- driver_setup.ultra_scsi,
driver_setup.default_tags,
driver_setup.default_sync,
driver_setup.burst_max,
u_char pci_fix_up = driver_setup.pci_fix_up;
u_char revision;
u_int irq;
- u_long base, base_2, io_port;
+ u_long base, base_c, base_2, base_2_c, io_port;
int i;
ncr_chip *chip;
vendor_id = PciVendorId(pdev);
device_id = PciDeviceId(pdev);
irq = PciIrqLine(pdev);
- i = 0;
- i = pci_get_base_address(pdev, i, &io_port);
- i = pci_get_base_address(pdev, i, &base);
- (void) pci_get_base_address(pdev, i, &base_2);
+
+ i = pci_get_base_address(pdev, 0, &io_port);
+ io_port = pci_get_base_cookie(pdev, 0);
+
+ base_c = pci_get_base_cookie(pdev, i);
+ i = pci_get_base_address(pdev, i, &base);
+
+ base_2_c = pci_get_base_cookie(pdev, i);
+ (void) pci_get_base_address(pdev, i, &base_2);
pci_read_config_word(pdev, PCI_COMMAND, &command);
pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
** This controller sets value 0x52414944 at RAM end - 16.
*/
#if defined(__i386__) && !defined(SCSI_NCR_PCI_MEM_NOT_SUPPORTED)
- if (chip && (base_2 & PCI_BASE_ADDRESS_MEM_MASK)) {
+ if (chip && (base_2_c & PCI_BASE_ADDRESS_MEM_MASK)) {
unsigned int ram_size, ram_val;
u_long ram_ptr;
else
ram_size = 4096;
- ram_ptr = remap_pci_mem(base_2 & PCI_BASE_ADDRESS_MEM_MASK,
+ ram_ptr = remap_pci_mem(base_2_c & PCI_BASE_ADDRESS_MEM_MASK,
ram_size);
if (ram_ptr) {
ram_val = readl_raw(ram_ptr + ram_size - 16);
** from attaching devices from the both drivers.
** If you have a better idea, let me know.
*/
-/* #ifdef NCR_IOMAPPED */
+/* #ifdef SCSI_NCR_IOMAPPED */
#if 1
if (!(command & PCI_COMMAND_IO)) {
printk(NAME53C8XX ": I/O base address (0x%lx) disabled.\n",
base &= PCI_BASE_ADDRESS_MEM_MASK;
base_2 &= PCI_BASE_ADDRESS_MEM_MASK;
-/* #ifdef NCR_IOMAPPED */
+/* #ifdef SCSI_NCR_IOMAPPED */
#if 1
if (io_port && check_region (io_port, 128)) {
printk(NAME53C8XX ": IO region 0x%lx[0..127] is in use\n",
if (!io_port)
return -1;
#endif
-#ifndef NCR_IOMAPPED
+#ifndef SCSI_NCR_IOMAPPED
if (!base) {
printk(NAME53C8XX ": MMIO base address disabled.\n");
return -1;
if (driver_setup.special_features & 4)
chip->features &= ~FE_NOPM;
}
- if (driver_setup.ultra_scsi < 2 && (chip->features & FE_ULTRA2)) {
- chip->features |= FE_ULTRA;
- chip->features &= ~FE_ULTRA2;
- }
- if (driver_setup.ultra_scsi < 1)
- chip->features &= ~FE_ULTRA;
- if (!driver_setup.max_wide)
- chip->features &= ~FE_WIDE;
/*
** Some features are required to be enabled in order to
device->slot.device_fn = PciDeviceFn(pdev);
device->slot.base = base;
device->slot.base_2 = base_2;
+ device->slot.base_c = base_c;
+ device->slot.base_2_c = base_2_c;
device->slot.io_port = io_port;
device->slot.irq = irq;
device->attach_done = 0;
++j;
continue;
}
+ if (pci_enable_device(pcidev)) /* @!*!$&*!%-*#;! */
+ continue;
/* Some HW as the HP LH4 may report twice PCI devices */
for (i = 0; i < count ; i++) {
if (devtbl[i].slot.bus == PciBusNumber(pcidev) &&
#define SCSI_NCR_DEBUG_INFO_SUPPORT
#define SCSI_NCR_PCI_FIX_UP_SUPPORT
#ifdef SCSI_NCR_PROC_INFO_SUPPORT
-# ifdef CONFIG_SCSI_NCR53C8XX_PROFILE
-# define SCSI_NCR_PROFILE_SUPPORT
-# endif
# define SCSI_NCR_USER_COMMAND_SUPPORT
# define SCSI_NCR_USER_INFO_SUPPORT
#endif
*/
#define SCSI_NCR_SETUP_SPECIAL_FEATURES (3)
-/*
- * For Ultra2 and Ultra3 SCSI support allow 80Mhz synchronous data transfers.
- * Value means:
- * 0 - Ultra speeds disabled
- * 1 - Ultra enabled (Maximum 20Mtrans/sec)
- * 2 - Ultra2 enabled (Maximum 40Mtrans/sec)
- * 3 - Ultra3 enabled (Maximum 80Mtrans/sec)
- *
- * Use boot options sym53c8xx=ultra:3 to enable Ultra3 support.
- */
-
-#define SCSI_NCR_SETUP_ULTRA_SCSI (3)
#define SCSI_NCR_MAX_SYNC (80)
/*
#endif
/*
- * Use normal IO if configured. Forced for alpha and ppc.
+ * Use normal IO if configured. Forced for alpha and powerpc.
+ * Powerpc fails copying to on-chip RAM using memcpy_toio().
*/
#if defined(CONFIG_SCSI_NCR53C8XX_IOMAPPED)
#define SCSI_NCR_IOMAPPED
-#elif defined(__alpha__) || defined(__powerpc__)
+#elif defined(__alpha__)
#define SCSI_NCR_IOMAPPED
+#elif defined(__powerpc__)
+#define SCSI_NCR_IOMAPPED
+#define SCSI_NCR_PCI_MEM_NOT_SUPPORTED
#elif defined(__sparc__)
#undef SCSI_NCR_IOMAPPED
#endif
/*
+ * Should we enable DAC cycles on Sparc64 platform?
+ * Until further investigation we do not enable it
+ * at the moment.
+ * We may want to enable it for __ia64__ (untested)
+ */
+#if defined(__ia64__)
+# if !defined(SCSI_NCR_USE_64BIT_DAC)
+# define SCSI_NCR_USE_64BIT_DAC
+# endif
+#else
+# undef SCSI_NCR_USE_64BIT_DAC
+#endif
+
+/*
* Immediate arbitration
*/
#if defined(CONFIG_SCSI_NCR53C8XX_IARB)
#define ktime_add(a, o) ((a) + (u_long)(o))
#define ktime_sub(a, o) ((a) - (u_long)(o))
+
/*
-** IO functions definition for big/little endian support.
-** For now, the NCR is only supported in little endian addressing mode,
-** and big endian byte ordering is only supported for the PPC.
-** MMIO is not used on PPC.
-*/
+ * IO functions definition for big/little endian CPU support.
+ * For now, the NCR is only supported in little endian addressing mode,
+ */
#ifdef __BIG_ENDIAN
#error "BIG ENDIAN byte ordering needs kernel version >= 2.1.0"
#endif
-#if defined(__powerpc__)
#define inw_l2b inw
#define inl_l2b inl
#define outw_b2l outw
#define outl_b2l outl
-#elif defined(__sparc__)
+
+#define readb_raw readb
+#define writeb_raw writeb
+
+#if defined(__hppa__)
+#define readw_l2b(a) le16_to_cpu(readw(a))
+#define readl_l2b(a) le32_to_cpu(readl(a))
+#define writew_b2l(v,a) writew(cpu_to_le16(v),a)
+#define writel_b2l(v,a) writel(cpu_to_le32(v),a)
+#else /* Other bid-endian */
#define readw_l2b readw
#define readl_l2b readl
#define writew_b2l writew
#define writel_b2l writel
-#else
-#error "Support for BIG ENDIAN is only available for PowerPC and SPARC"
#endif
#else /* little endian */
-#if defined(__i386__) /* i386 implements full FLAT memory/MMIO model */
#define inw_raw inw
#define inl_raw inl
#define outw_raw outw
#define outl_raw outl
+
+#if defined(__i386__) /* i386 implements full FLAT memory/MMIO model */
#define readb_raw(a) (*(volatile unsigned char *) (a))
#define readw_raw(a) (*(volatile unsigned short *) (a))
#define readl_raw(a) (*(volatile unsigned int *) (a))
#define writew_raw(b,a) ((*(volatile unsigned short *) (a)) = (b))
#define writel_raw(b,a) ((*(volatile unsigned int *) (a)) = (b))
-#else /* Other little-endian (for now alpha) */
-#define inw_raw inw
-#define inl_raw inl
-#define outw_raw outw
-#define outl_raw outl
+#else /* Other little-endian */
+#define readb_raw readb
#define readw_raw readw
#define readl_raw readl
+#define writeb_raw writeb
#define writew_raw writew
#define writel_raw writel
#error "The NCR in BIG ENDIAN addressing mode is not (yet) supported"
#endif
+
+/*
+ * IA32 architecture does not reorder STORES and prevents
+ * LOADS from passing STORES. It is called `program order'
+ * by Intel and allows device drivers to deal with memory
+ * ordering by only ensuring that the code is not reordered
+ * by the compiler when ordering is required.
+ * Other architectures implement a weaker ordering that
+ * requires memory barriers (and also IO barriers when they
+ * make sense) to be used.
+ * We want to be paranoid for ppc and ia64. :)
+ */
+
+#if defined __i386__
+#define MEMORY_BARRIER() do { ; } while(0)
+#elif defined __powerpc__
+#define MEMORY_BARRIER() __asm__ volatile("eieio; sync" : : : "memory")
+#elif defined __ia64__
+#define MEMORY_BARRIER() __asm__ volatile("mf.a; mf" : : : "memory")
+#else
+#define MEMORY_BARRIER() mb()
+#endif
+
+
+/*
+ * If the NCR uses big endian addressing mode over the
+ * PCI, actual io register addresses for byte and word
+ * accesses must be changed according to lane routing.
+ * Btw, ncr_offb() and ncr_offw() macros only apply to
+ * constants and so donnot generate bloated code.
+ */
+
+#if defined(SCSI_NCR_BIG_ENDIAN)
+
+#define ncr_offb(o) (((o)&~3)+((~((o)&3))&3))
+#define ncr_offw(o) (((o)&~3)+((~((o)&3))&2))
+
+#else
+
+#define ncr_offb(o) (o)
+#define ncr_offw(o) (o)
+
+#endif
+
+/*
+ * If the CPU and the NCR use same endian-ness adressing,
+ * no byte reordering is needed for script patching.
+ * Macro cpu_to_scr() is to be used for script patching.
+ * Macro scr_to_cpu() is to be used for getting a DWORD
+ * from the script.
+ */
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_le32(dw)
+#define scr_to_cpu(dw) le32_to_cpu(dw)
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define cpu_to_scr(dw) cpu_to_be32(dw)
+#define scr_to_cpu(dw) be32_to_cpu(dw)
+
+#else
+
+#define cpu_to_scr(dw) (dw)
+#define scr_to_cpu(dw) (dw)
+
+#endif
+
+/*
+ * Access to the controller chip.
+ *
+ * If SCSI_NCR_IOMAPPED is defined, the driver will use
+ * normal IOs instead of the MEMORY MAPPED IO method
+ * recommended by PCI specifications.
+ * If all PCI bridges, host brigdes and architectures
+ * would have been correctly designed for PCI, this
+ * option would be useless.
+ *
+ * If the CPU and the NCR use same endian-ness adressing,
+ * no byte reordering is needed for accessing chip io
+ * registers. Functions suffixed by '_raw' are assumed
+ * to access the chip over the PCI without doing byte
+ * reordering. Functions suffixed by '_l2b' are
+ * assumed to perform little-endian to big-endian byte
+ * reordering, those suffixed by '_b2l' blah, blah,
+ * blah, ...
+ */
+
+#if defined(SCSI_NCR_IOMAPPED)
+
+/*
+ * IO mapped only input / ouput
+ */
+
+#define INB_OFF(o) inb (np->base_io + ncr_offb(o))
+#define OUTB_OFF(o, val) outb ((val), np->base_io + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) inw_l2b (np->base_io + ncr_offw(o))
+#define INL_OFF(o) inl_l2b (np->base_io + (o))
+
+#define OUTW_OFF(o, val) outw_b2l ((val), np->base_io + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_b2l ((val), np->base_io + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) inw_b2l (np->base_io + ncr_offw(o))
+#define INL_OFF(o) inl_b2l (np->base_io + (o))
+
+#define OUTW_OFF(o, val) outw_l2b ((val), np->base_io + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_l2b ((val), np->base_io + (o))
+
+#else
+
+#define INW_OFF(o) inw_raw (np->base_io + ncr_offw(o))
+#define INL_OFF(o) inl_raw (np->base_io + (o))
+
+#define OUTW_OFF(o, val) outw_raw ((val), np->base_io + ncr_offw(o))
+#define OUTL_OFF(o, val) outl_raw ((val), np->base_io + (o))
+
+#endif /* ENDIANs */
+
+#else /* defined SCSI_NCR_IOMAPPED */
+
+/*
+ * MEMORY mapped IO input / output
+ */
+
+#define INB_OFF(o) readb_raw((char *)np->reg + ncr_offb(o))
+#define OUTB_OFF(o, val) writeb_raw((val), (char *)np->reg + ncr_offb(o))
+
+#if defined(__BIG_ENDIAN) && !defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_l2b((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_l2b((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_b2l((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_b2l((val), (char *)np->reg + (o))
+
+#elif defined(__LITTLE_ENDIAN) && defined(SCSI_NCR_BIG_ENDIAN)
+
+#define INW_OFF(o) readw_b2l((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_b2l((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_l2b((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_l2b((val), (char *)np->reg + (o))
+
+#else
+
+#define INW_OFF(o) readw_raw((char *)np->reg + ncr_offw(o))
+#define INL_OFF(o) readl_raw((char *)np->reg + (o))
+
+#define OUTW_OFF(o, val) writew_raw((val), (char *)np->reg + ncr_offw(o))
+#define OUTL_OFF(o, val) writel_raw((val), (char *)np->reg + (o))
+
+#endif
+
+#endif /* defined SCSI_NCR_IOMAPPED */
+
+#define INB(r) INB_OFF (offsetof(struct ncr_reg,r))
+#define INW(r) INW_OFF (offsetof(struct ncr_reg,r))
+#define INL(r) INL_OFF (offsetof(struct ncr_reg,r))
+
+#define OUTB(r, val) OUTB_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTW(r, val) OUTW_OFF (offsetof(struct ncr_reg,r), (val))
+#define OUTL(r, val) OUTL_OFF (offsetof(struct ncr_reg,r), (val))
+
+/*
+ * Set bit field ON, OFF
+ */
+
+#define OUTONB(r, m) OUTB(r, INB(r) | (m))
+#define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m))
+#define OUTONW(r, m) OUTW(r, INW(r) | (m))
+#define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m))
+#define OUTONL(r, m) OUTL(r, INL(r) | (m))
+#define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m))
+
+/*
+ * We normally want the chip to have a consistent view
+ * of driver internal data structures when we restart it.
+ * Thus these macros.
+ */
+#define OUTL_DSP(v) \
+ do { \
+ MEMORY_BARRIER(); \
+ OUTL (nc_dsp, (v)); \
+ } while (0)
+
+#define OUTONB_STD() \
+ do { \
+ MEMORY_BARRIER(); \
+ OUTONB (nc_dcntl, (STD|NOCOM)); \
+ } while (0)
+
+
/*
** NCR53C8XX Device Ids
*/
#define PCI_DEVICE_ID_NCR_53C895A 0x12
#endif
+#ifndef PCI_DEVICE_ID_NCR_53C875A
+#define PCI_DEVICE_ID_NCR_53C875A 0x13
+#endif
+
#ifndef PCI_DEVICE_ID_NCR_53C1510D
#define PCI_DEVICE_ID_NCR_53C1510D 0xa
#endif
#define FE_PFEN (1<<12) /* Prefetch enable */
#define FE_LDSTR (1<<13) /* Load/Store supported */
#define FE_RAM (1<<14) /* On chip RAM present */
-#define FE_CLK80 (1<<15) /* Board clock is 80 MHz */
+#define FE_VARCLK (1<<15) /* SCSI lock may vary */
#define FE_RAM8K (1<<16) /* On chip RAM sized 8Kb */
-#define FE_64BIT (1<<17) /* Supports 64-bit addressing */
+#define FE_64BIT (1<<17) /* Have a 64-bit PCI interface */
#define FE_IO256 (1<<18) /* Requires full 256 bytes in PCI space */
#define FE_NOPM (1<<19) /* Scripts handles phase mismatch */
#define FE_LEDC (1<<20) /* Hardware control of LED */
#define FE_DIFF (1<<21) /* Support Differential SCSI */
#define FE_ULTRA3 (1<<22) /* Ultra-3 80Mtrans/sec */
#define FE_66MHZ (1<<23) /* 66MHz PCI Support */
+#define FE_DAC (1<<24) /* Support DAC cycles (64 bit addressing) */
#define FE_CACHE_SET (FE_ERL|FE_CLSE|FE_WRIE|FE_ERMP)
#define FE_SCSI_SET (FE_WIDE|FE_ULTRA|FE_ULTRA2|FE_DBLR|FE_QUAD|F_CLK80)
FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} \
, \
{PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, \
- FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} \
+ FE_ULTRA|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} \
, \
{PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, \
- FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|\
- FE_RAM|FE_DIFF} \
- , \
- {PCI_DEVICE_ID_NCR_53C875, 0x0f, "875", 6, 16, 5, \
- FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
- FE_RAM|FE_DIFF} \
- , \
- {PCI_DEVICE_ID_NCR_53C875, 0x1f, "876", 6, 16, 5, \
- FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
- FE_RAM|FE_DIFF} \
- , \
- {PCI_DEVICE_ID_NCR_53C875, 0x2f, "875E", 6, 16, 5, \
- FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
- FE_RAM|FE_DIFF} \
+ FE_WIDE|FE_ULTRA|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DIFF|FE_VARCLK} \
, \
- {PCI_DEVICE_ID_NCR_53C875, 0xff, "876", 6, 16, 5, \
+ {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, \
FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
- FE_RAM|FE_DIFF} \
+ FE_RAM|FE_DIFF|FE_VARCLK} \
, \
{PCI_DEVICE_ID_NCR_53C875J,0xff, "875J", 6, 16, 5, \
FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
- FE_RAM} \
+ FE_RAM|FE_VARCLK} \
, \
{PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, \
FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
- FE_RAM|FE_DIFF} \
+ FE_RAM|FE_DIFF|FE_VARCLK} \
, \
{PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, \
FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
, \
{PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, \
FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
- FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC} \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC} \
, \
{PCI_DEVICE_ID_NCR_53C895A, 0xff, "895a", 6, 31, 7, \
FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
- FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC} \
+ FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC} \
+ , \
+ {PCI_DEVICE_ID_NCR_53C875A, 0xff, "875a", 6, 31, 7, \
+ FE_WIDE|FE_ULTRA|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
+ FE_RAM|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC} \
, \
{PCI_DEVICE_ID_NCR_53C1510D, 0xff, "1510D", 7, 31, 7, \
FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
FE_RAM|FE_IO256} \
, \
- {PCI_DEVICE_ID_LSI_53C1010, 0xff, "1010", 6, 62, 7, \
+ {PCI_DEVICE_ID_LSI_53C1010, 0xff, "1010-33", 6, 62, 7, \
FE_WIDE|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
- FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_ULTRA3} \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_ULTRA3} \
, \
- {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010_66", 6, 62, 7, \
+ {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010-66", 6, 62, 7, \
FE_WIDE|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| \
- FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_ULTRA3|FE_66MHZ} \
+ FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_ULTRA3| \
+ FE_66MHZ} \
}
/*
u_char scsi_parity;
u_char disconnection;
u_char special_features;
- u_char ultra_scsi;
u_char force_sync_nego;
u_char reverse_probe;
u_char pci_fix_up;
SCSI_NCR_SETUP_SCSI_PARITY, \
SCSI_NCR_SETUP_DISCONNECTION, \
SCSI_NCR_SETUP_SPECIAL_FEATURES, \
- SCSI_NCR_SETUP_ULTRA_SCSI, \
SCSI_NCR_SETUP_FORCE_SYNC_NEGO, \
0, \
0, \
1, \
- 1, \
+ 0, \
SCSI_NCR_SETUP_DEFAULT_TAGS, \
SCSI_NCR_SETUP_DEFAULT_SYNC, \
- 0x0200, \
+ 0x00, \
7, \
SCSI_NCR_SETUP_LED_PIN, \
1, \
0, \
0, \
0, \
- 0, \
1, \
2, \
0, \
# Prompt user for primary drivers.
-dep_tristate ' C-Media PCI (CMI8338/8378)' CONFIG_SOUND_CMPCI $CONFIG_SOUND
+dep_tristate ' C-Media PCI (CMI8338/8378)' CONFIG_SOUND_CMPCI $CONFIG_SOUND $CONFIG_PCI
if [ "$CONFIG_SOUND_CMPCI" = "y" -o "$CONFIG_SOUND_CMPCI" = "m" ]; then
bool ' Enable S/PDIF loop for CMI8738' CONFIG_SOUND_CMPCI_SPDIFLOOP
bool ' Enable 4 channel mode for CMI8738' CONFIG_SOUND_CMPCI_4CH
* Added __init to gus_midi_init()
*/
-#include "linux/init.h"
+#include <linux/init.h>
#include "sound_config.h"
#include "gus.h"
/* maxinum number of AC97 codecs connected, AC97 2.0 defined 4 */
#define NR_AC97 2
+/* Please note that an 8bit mono stream is not valid on this card, you must have a 16bit */
+/* stream at a minimum for this card to be happy */
static const unsigned sample_size[] = { 1, 2, 2, 4 };
-static const unsigned sample_shift[] = { 0, 1, 1, 2 };
+/* Samples are 16bit values, so we are shifting to a word, not to a byte, hence shift */
+/* values are one less than might be expected */
+static const unsigned sample_shift[] = { -1, 0, 0, 1 };
enum {
ICH82801AA = 0,
unsigned char fmt, enable;
/* hardware channel */
- struct i810_channel *channel;
+ struct i810_channel *read_channel;
+ struct i810_channel *write_channel;
/* OSS buffer management stuff */
void *rawbuf;
/* our buffer acts like a circular ring */
unsigned hwptr; /* where dma last started, updated by update_ptr */
unsigned swptr; /* where driver last clear/filled, updated by read/write */
- int count; /* bytes to be comsumed or been generated by dma machine */
+ int count; /* bytes to be consumed or been generated by dma machine */
unsigned total_bytes; /* total bytes dmaed by hardware */
unsigned error; /* number of over/underruns */
/* Function support */
struct i810_channel *(*alloc_pcm_channel)(struct i810_card *);
struct i810_channel *(*alloc_rec_pcm_channel)(struct i810_card *);
+ struct i810_channel *(*alloc_rec_mic_channel)(struct i810_card *);
void (*free_pcm_channel)(struct i810_card *, int chan);
};
if(card->channel[1].used==1)
return NULL;
card->channel[1].used=1;
- card->channel[1].offset = 0;
- card->channel[1].port = 0x10;
- card->channel[1].num=1;
return &card->channel[1];
}
if(card->channel[0].used==1)
return NULL;
card->channel[0].used=1;
- card->channel[0].offset = 0;
- card->channel[0].port = 0x00;
- card->channel[1].num=0;
return &card->channel[0];
}
+static struct i810_channel *i810_alloc_rec_mic_channel(struct i810_card *card)
+{
+ if(card->channel[2].used==1)
+ return NULL;
+ card->channel[2].used=1;
+ return &card->channel[2];
+}
+
static void i810_free_pcm_channel(struct i810_card *card, int channel)
{
card->channel[channel].used=0;
static unsigned int i810_set_dac_rate(struct i810_state * state, unsigned int rate)
{
struct dmabuf *dmabuf = &state->dmabuf;
- u32 dacp;
+ u32 dacp, new_rate;
struct ac97_codec *codec=state->card->ac97_codec[0];
if(!(state->card->ac97_features&0x0001))
rate = 48000;
if (rate < 8000)
rate = 8000;
+ dmabuf->rate = rate;
/*
* Adjust for misclocked crap
rate = ( rate * clocking)/48000;
- /* Analog codecs can go lower via magic registers but others
- might not */
-
- if(rate < 8000)
- rate = 8000;
-
if(rate != i810_ac97_get(codec, AC97_PCM_FRONT_DAC_RATE))
{
/* Power down the DAC */
i810_ac97_set(codec, AC97_POWER_CONTROL, dacp|0x0200);
/* Load the rate and read the effective rate */
i810_ac97_set(codec, AC97_PCM_FRONT_DAC_RATE, rate);
- rate=i810_ac97_get(codec, AC97_PCM_FRONT_DAC_RATE);
+ new_rate=i810_ac97_get(codec, AC97_PCM_FRONT_DAC_RATE);
/* Power it back up */
i810_ac97_set(codec, AC97_POWER_CONTROL, dacp);
+ if(new_rate != rate) {
+ dmabuf->rate = (new_rate * 48000)/clocking;
+ rate = new_rate;
+ }
}
- rate=(rate * 48000) / clocking;
- dmabuf->rate = rate;
#ifdef DEBUG
- printk("i810_audio: called i810_set_dac_rate : rate = %d\n", rate);
+ printk("i810_audio: called i810_set_dac_rate : rate = %d/%d\n", dmabuf->rate, rate);
#endif
-
- return rate;
+ return dmabuf->rate;
}
/* set recording sample rate */
static unsigned int i810_set_adc_rate(struct i810_state * state, unsigned int rate)
{
struct dmabuf *dmabuf = &state->dmabuf;
- u32 dacp;
+ u32 dacp, new_rate;
struct ac97_codec *codec=state->card->ac97_codec[0];
if(!(state->card->ac97_features&0x0001))
rate = 48000;
if (rate < 8000)
rate = 8000;
+ dmabuf->rate = rate;
/*
* Adjust for misclocked crap
rate = ( rate * clocking)/48000;
- /* Analog codecs can go lower via magic registers but others
- might not */
-
- if(rate < 8000)
- rate = 8000;
-
if(rate != i810_ac97_get(codec, AC97_PCM_LR_DAC_RATE))
{
/* Power down the ADC */
i810_ac97_set(codec, AC97_POWER_CONTROL, dacp|0x0100);
/* Load the rate and read the effective rate */
i810_ac97_set(codec, AC97_PCM_LR_DAC_RATE, rate);
- rate=i810_ac97_get(codec, AC97_PCM_LR_DAC_RATE);
+ new_rate=i810_ac97_get(codec, AC97_PCM_LR_DAC_RATE);
/* Power it back up */
i810_ac97_set(codec, AC97_POWER_CONTROL, dacp);
+ if(new_rate != rate) {
+ dmabuf->rate = (new_rate * 48000)/clocking;
+ rate = new_rate;
+ }
}
- rate = (rate * 48000) / clocking;
- dmabuf->rate = rate;
#ifdef DEBUG
- printk("i810_audio: called i810_set_adc_rate : rate = %d\n", rate);
+ printk("i810_audio: called i810_set_adc_rate : rate = %d/%d\n", dmabuf->rate, rate);
#endif
- return rate;
+ return dmabuf->rate;
}
/* prepare channel attributes for playback */
{
struct dmabuf *dmabuf = &state->dmabuf;
unsigned int civ, offset;
- struct i810_channel *c = dmabuf->channel;
+ struct i810_channel *c;
if (!dmabuf->enable)
return 0;
+ if (dmabuf->enable & DAC_RUNNING)
+ c = dmabuf->write_channel;
+ else if (dmabuf->enable & ADC_RUNNING)
+ c = dmabuf->read_channel;
+ else {
+ printk("i810_audio: invalid dmabuf->enable state in get_dma_addr\n");
+ return 0;
+ }
do {
civ = inb(state->card->iobase+c->port+OFF_CIV);
offset = (civ + 1) * (dmabuf->dmasize/SG_LEN) -
return offset;
}
-static void resync_dma_ptrs(struct i810_state *state)
+static void resync_dma_ptrs(struct i810_state *state, int rec)
{
struct dmabuf *dmabuf = &state->dmabuf;
- struct i810_channel *c = dmabuf->channel;
+ struct i810_channel *c;
int offset;
-
+
+ if(rec) {
+ c = dmabuf->read_channel;
+ } else {
+ c = dmabuf->write_channel;
+ }
offset = inb(state->card->iobase+c->port+OFF_CIV);
offset *= (dmabuf->dmasize/SG_LEN);
spin_lock_irqsave(&card->lock, flags);
if ((dmabuf->mapped || dmabuf->count > 0) && dmabuf->ready) {
- if(!(dmabuf->enable&DAC_RUNNING))
- {
- dmabuf->enable |= DAC_RUNNING;
- outb((1<<4) | 1<<2 | 1, card->iobase + PO_CR);
- }
+ dmabuf->enable |= DAC_RUNNING;
+ outb((1<<4) | 1<<2 | 1, card->iobase + PO_CR);
}
spin_unlock_irqrestore(&card->lock, flags);
}
-#define DMABUF_DEFAULTORDER (15-PAGE_SHIFT)
+#define DMABUF_DEFAULTORDER (16-PAGE_SHIFT)
#define DMABUF_MINORDER 1
/* allocate DMA buffer, playback and recording buffer should be allocated seperately */
static int alloc_dmabuf(struct i810_state *state)
{
struct dmabuf *dmabuf = &state->dmabuf;
- void *rawbuf;
+ void *rawbuf= NULL;
int order;
struct page *page, *pend;
static int prog_dmabuf(struct i810_state *state, unsigned rec)
{
struct dmabuf *dmabuf = &state->dmabuf;
+ struct i810_channel *c;
struct sg_item *sg;
unsigned bytepersec;
unsigned bufsize;
int i;
spin_lock_irqsave(&state->card->lock, flags);
- resync_dma_ptrs(state);
+ resync_dma_ptrs(state, rec);
dmabuf->total_bytes = 0;
dmabuf->count = dmabuf->error = 0;
spin_unlock_irqrestore(&state->card->lock, flags);
return ret;
/* FIXME: figure out all this OSS fragment stuff */
- bytepersec = dmabuf->rate << sample_shift[dmabuf->fmt];
+ /* sample_shift is for 16 byte samples, add an extra shift for bytes */
+ bytepersec = dmabuf->rate << (sample_shift[dmabuf->fmt] + 1);
bufsize = PAGE_SIZE << dmabuf->buforder;
if (dmabuf->ossfragshift) {
if ((1000 << dmabuf->ossfragshift) < bytepersec)
memset(dmabuf->rawbuf, (dmabuf->fmt & I810_FMT_16BIT) ? 0 : 0x80,
dmabuf->dmasize);
- /*
- * Now set up the ring
- */
-
- sg=&dmabuf->channel->sg[0];
fragsize = bufsize / SG_LEN;
-
/*
- * Load up 32 sg entries and take an interrupt at half
- * way (we might want more interrupts later..)
+ * Now set up the ring
*/
+ if(dmabuf->read_channel)
+ c = dmabuf->read_channel;
+ else
+ c = dmabuf->write_channel;
+ while(c != NULL) {
+ sg=&c->sg[0];
+ /*
+ * Load up 32 sg entries and take an interrupt at half
+ * way (we might want more interrupts later..)
+ */
- for(i=0;i<32;i++)
- {
- sg->busaddr=virt_to_bus(dmabuf->rawbuf+fragsize*i);
- sg->control=(fragsize>>1);
- sg->control|=CON_IOC;
- sg++;
- }
+ for(i=0;i<32;i++)
+ {
+ sg->busaddr=virt_to_bus(dmabuf->rawbuf+fragsize*i);
+ sg->control=(fragsize>>sample_shift[dmabuf->fmt]);
+ sg->control|=CON_IOC;
+ sg++;
+ }
+ spin_lock_irqsave(&state->card->lock, flags);
+ outb(2, state->card->iobase+c->port+OFF_CR); /* reset DMA machine */
+ outl(virt_to_bus(&c->sg[0]), state->card->iobase+c->port+OFF_BDBAR);
+ outb(31, state->card->iobase+c->port+OFF_LVI);
+ outb(0, state->card->iobase+c->port+OFF_CIV);
- spin_lock_irqsave(&state->card->lock, flags);
- outb(2, state->card->iobase+dmabuf->channel->port+OFF_CR); /* reset DMA machine */
- outl(virt_to_bus(&dmabuf->channel->sg[0]), state->card->iobase+dmabuf->channel->port+OFF_BDBAR);
- outb(16, state->card->iobase+dmabuf->channel->port+OFF_LVI);
- outb(0, state->card->iobase+dmabuf->channel->port+OFF_CIV);
+ if (c == dmabuf->read_channel) {
+ i810_rec_setup(state);
+ } else {
+ i810_play_setup(state);
+ }
+ spin_unlock_irqrestore(&state->card->lock, flags);
- if (rec) {
- i810_rec_setup(state);
- } else {
- i810_play_setup(state);
+ if(c != dmabuf->write_channel)
+ c = dmabuf->write_channel;
+ else
+ c = NULL;
}
- spin_unlock_irqrestore(&state->card->lock, flags);
-
+
/* set the ready flag for the dma buffer */
dmabuf->ready = 1;
#ifdef DEBUG
- printk("i810_audio: prog_dmabuf, sample rate = %d, format = %d, numfrag = %d, "
+ printk("i810_audio: prog_dmabuf, sample rate = %d, format = %d,\n\tnumfrag = %d, "
"fragsize = %d dmasize = %d\n",
dmabuf->rate, dmabuf->fmt, dmabuf->numfrag,
dmabuf->fragsize, dmabuf->dmasize);
{
int i;
-// printk("CHANNEL IRQ .. ");
for(i=0;i<NR_HW_CH;i++)
{
struct i810_state *state = card->states[i];
continue;
if(!state->dmabuf.ready)
continue;
- c=state->dmabuf.channel;
+ if(state->dmabuf.enable & DAC_RUNNING)
+ c=state->dmabuf.write_channel;
+ else
+ c=state->dmabuf.read_channel;
port+=c->port;
-// printk("PORT %lX (", port);
-
status = inw(port + OFF_SR);
-// printk("ST%d ", status);
-
- if(status & DMA_INT_LVI)
- {
- /* Back to the start */
-// printk("LVI - STOP");
- outb((inb(port+OFF_CIV)-1)&31, port+OFF_LVI);
- i810_update_ptr(state);
- outb(0, port + OFF_CR);
- }
if(status & DMA_INT_COMPLETE)
{
int x;
/* Keep the card chasing its tail */
outb(x=((inb(port+OFF_CIV)-1)&31), port+OFF_LVI);
i810_update_ptr(state);
-// printk("COMP%d ",x);
}
-// printk(")");
+ if(status & DMA_INT_LVI)
+ {
+ /* Back to the start */
+ i810_update_ptr(state);
+ outb(0, port + OFF_CR);
+ }
outw(status & DMA_INT_MASK, port + OFF_SR);
}
-// printk("\n");
}
+static u32 jiff = 0;
+static u32 jiff_count = 0;
+
static void i810_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct i810_card *card = (struct i810_card *)dev_id;
spin_lock(&card->lock);
status = inl(card->iobase + GLOB_STA);
+
if(!(status & INT_MASK))
{
spin_unlock(&card->lock);
return; /* not for us */
}
-// printk("Interrupt %X: ", status);
if(status & (INT_PO|INT_PI|INT_MC))
i810_channel_interrupt(card);
return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
+ if (dmabuf->enable & DAC_RUNNING)
+ return -ENODEV;
+ if (!dmabuf->read_channel) {
+ dmabuf->ready = 0;
+ dmabuf->read_channel = state->card->alloc_rec_pcm_channel(state->card);
+ if (!dmabuf->read_channel) {
+ return -ENODEV;
+ }
+ }
if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
return ret;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -ESPIPE;
if (dmabuf->mapped)
return -ENXIO;
+ if (dmabuf->enable & ADC_RUNNING)
+ return -ENODEV;
+ if (!dmabuf->write_channel) {
+ dmabuf->ready = 0;
+ dmabuf->write_channel = state->card->alloc_pcm_channel(state->card);
+ if(!dmabuf->write_channel)
+ return -ENODEV;
+ }
if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
return ret;
if (!access_ok(VERIFY_READ, buffer, count))
return ret;
}
/* Not strictly correct but works */
- tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
- tmo >>= sample_shift[dmabuf->fmt];
+ tmo = dmabuf->rate << (sample_shift[dmabuf->fmt] + 1);
+ tmo = dmabuf->dmasize * HZ / tmo;
/* There are two situations when sleep_on_timeout returns, one is when
the interrupt is serviced correctly and the process is waked up by
ISR ON TIME. Another is when timeout is expired, which means that
unsigned int mask = 0;
if (file->f_mode & FMODE_WRITE) {
+ if (!dmabuf->write_channel)
+ return 0;
if (!dmabuf->ready && prog_dmabuf(state, 0))
return 0;
poll_wait(file, &dmabuf->wait, wait);
- }
- if (file->f_mode & FMODE_READ) {
+ } else {
+ // don't do both read and write paths or we won't get woke up properly
+ // when we have a file with both permissions
+ if (!dmabuf->read_channel)
+ return 0;
if (!dmabuf->ready && prog_dmabuf(state, 1))
return 0;
poll_wait(file, &dmabuf->wait, wait);
}
spin_lock_irqsave(&state->card->lock, flags);
i810_update_ptr(state);
- if (file->f_mode & FMODE_READ) {
+ if (file->f_mode & FMODE_READ && dmabuf->enable & ADC_RUNNING) {
if (dmabuf->count >= (signed)dmabuf->fragsize)
mask |= POLLIN | POLLRDNORM;
}
- if (file->f_mode & FMODE_WRITE) {
+ if (file->f_mode & FMODE_WRITE && dmabuf->enable & DAC_RUNNING) {
if (dmabuf->mapped) {
if (dmabuf->count >= (signed)dmabuf->fragsize)
mask |= POLLOUT | POLLWRNORM;
int ret = -EINVAL;
unsigned long size;
+ /*
+ * Until we figure out a few problems
+ */
+
lock_kernel();
if (vma->vm_flags & VM_WRITE) {
+ if (!dmabuf->write_channel && (dmabuf->write_channel = state->card->alloc_pcm_channel(state->card)) == NULL)
+ goto out;
if ((ret = prog_dmabuf(state, 0)) != 0)
goto out;
} else if (vma->vm_flags & VM_READ) {
+ if (!dmabuf->read_channel && (dmabuf->read_channel = state->card->alloc_rec_pcm_channel(state->card)) == NULL)
+ goto out;
if ((ret = prog_dmabuf(state, 1)) != 0)
goto out;
} else
goto out;
dmabuf->mapped = 1;
ret = 0;
+#ifdef DEBUG
+ printk("i810_audio: mmap'ed %d bytes of data space\n", size);
+#endif
out:
unlock_kernel();
return ret;
stop_dac(state);
synchronize_irq();
dmabuf->ready = 0;
- resync_dma_ptrs(state);
+ resync_dma_ptrs(state, 0);
dmabuf->swptr = dmabuf->hwptr = 0;
dmabuf->count = dmabuf->total_bytes = 0;
}
if (file->f_mode & FMODE_READ) {
stop_adc(state);
synchronize_irq();
- resync_dma_ptrs(state);
+ resync_dma_ptrs(state, 1);
dmabuf->ready = 0;
dmabuf->swptr = dmabuf->hwptr = 0;
dmabuf->count = dmabuf->total_bytes = 0;
if (file->f_mode & FMODE_WRITE) {
stop_dac(state);
dmabuf->ready = 0;
- dmabuf->fmt = I810_FMT_STEREO;
+ dmabuf->fmt |= I810_FMT_STEREO;
}
if (file->f_mode & FMODE_READ) {
stop_adc(state);
dmabuf->ready = 0;
- dmabuf->fmt = I810_FMT_STEREO;
+ dmabuf->fmt |= I810_FMT_STEREO;
}
return 0;
case SNDCTL_DSP_GETBLKSIZE:
if (file->f_mode & FMODE_WRITE) {
- if ((val = prog_dmabuf(state, 0)))
+ if (!dmabuf->ready && (val = prog_dmabuf(state, 0)))
return val;
return put_user(dmabuf->fragsize, (int *)arg);
}
if (file->f_mode & FMODE_READ) {
- if ((val = prog_dmabuf(state, 1)))
+ if (!dmabuf->ready && (val = prog_dmabuf(state, 1)))
return val;
return put_user(dmabuf->fragsize, (int *)arg);
}
if (file->f_mode & FMODE_WRITE) {
stop_dac(state);
dmabuf->ready = 0;
+ dmabuf->fmt |= I810_FMT_16BIT;
}
if (file->f_mode & FMODE_READ) {
stop_adc(state);
dmabuf->ready = 0;
+ dmabuf->fmt |= I810_FMT_16BIT;
}
}
return put_user(AFMT_S16_LE, (int *)arg);
if (val != 1 && val != 2 && val != 4)
return -EINVAL;
dmabuf->subdivision = val;
+ dmabuf->ready = 0;
return 0;
case SNDCTL_DSP_SETFRAGMENT:
dmabuf->ossfragshift = 15;
if (dmabuf->ossmaxfrags < 4)
dmabuf->ossmaxfrags = 4;
+ dmabuf->ready = 0;
return 0;
case SNDCTL_DSP_GETOSPACE:
if (!(file->f_mode & FMODE_WRITE))
return -EINVAL;
- if (!dmabuf->enable && (val = prog_dmabuf(state, 0)) != 0)
+ if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
return val;
spin_lock_irqsave(&state->card->lock, flags);
i810_update_ptr(state);
case SNDCTL_DSP_GETISPACE:
if (!(file->f_mode & FMODE_READ))
return -EINVAL;
- if (!dmabuf->enable && (val = prog_dmabuf(state, 1)) != 0)
+ if (!dmabuf->ready && (val = prog_dmabuf(state, 1)) != 0)
return val;
spin_lock_irqsave(&state->card->lock, flags);
i810_update_ptr(state);
case SNDCTL_DSP_GETTRIGGER:
val = 0;
- if (file->f_mode & FMODE_READ && dmabuf->enable)
+ if (file->f_mode & FMODE_READ && dmabuf->enable & ADC_RUNNING)
val |= PCM_ENABLE_INPUT;
- if (file->f_mode & FMODE_WRITE && dmabuf->enable)
+ if (file->f_mode & FMODE_WRITE && dmabuf->enable & DAC_RUNNING)
val |= PCM_ENABLE_OUTPUT;
return put_user(val, (int *)arg);
case SNDCTL_DSP_SETTRIGGER:
if (get_user(val, (int *)arg))
return -EFAULT;
- if (file->f_mode & FMODE_READ) {
- if (val & PCM_ENABLE_INPUT) {
- if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
- return ret;
- start_adc(state);
- } else
- stop_adc(state);
+ if (file->f_mode & FMODE_READ && val & PCM_ENABLE_INPUT) {
+ if (dmabuf->enable & DAC_RUNNING)
+ return -ENODEV;
+ if (!dmabuf->read_channel) {
+ dmabuf->ready = 0;
+ dmabuf->read_channel = state->card->alloc_rec_pcm_channel(state->card);
+ if (!dmabuf->read_channel)
+ return -ENODEV;
+ }
+ if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
+ return ret;
+ start_adc(state);
}
- if (file->f_mode & FMODE_WRITE) {
- if (val & PCM_ENABLE_OUTPUT) {
- if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
- return ret;
- start_dac(state);
- } else
- stop_dac(state);
+ if (file->f_mode & FMODE_WRITE && val & PCM_ENABLE_OUTPUT) {
+ if (dmabuf->enable & ADC_RUNNING)
+ return -ENODEV;
+ if (!dmabuf->write_channel) {
+ dmabuf->ready = 0;
+ dmabuf->write_channel = state->card->alloc_pcm_channel(state->card);
+ if (!dmabuf->write_channel)
+ return -ENODEV;
+ }
+ if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
+ return ret;
+ start_dac(state);
}
return 0;
return -ENODEV;
found_virt:
- /* found a free virtual channel, allocate hardware channels */
- if(file->f_mode & FMODE_READ)
- dmabuf->channel = card->alloc_rec_pcm_channel(card);
- else
- dmabuf->channel = card->alloc_pcm_channel(card);
-
- if (dmabuf->channel == NULL) {
- kfree (card->states[i]);
- card->states[i] = NULL;;
- return -ENODEV;
- }
-
/* initialize the virtual channel */
state->virt = i;
state->card = card;
init_MUTEX(&state->open_sem);
file->private_data = state;
+ /* allocate hardware channels */
+ if(file->f_mode & FMODE_READ) {
+ if((dmabuf->read_channel = card->alloc_rec_pcm_channel(card)) == NULL) {
+ kfree (card->states[i]);
+ card->states[i] = NULL;;
+ return -ENODEV;
+ }
+ i810_set_adc_rate(state, 48000);
+ }
+ if(file->f_mode & FMODE_WRITE) {
+ if((dmabuf->write_channel = card->alloc_pcm_channel(card)) == NULL) {
+ kfree (card->states[i]);
+ card->states[i] = NULL;;
+ return -ENODEV;
+ }
+ i810_set_dac_rate(state, 48000);
+ }
+
down(&state->open_sem);
/* set default sample format. According to OSS Programmer's Guide /dev/dsp
should be default to unsigned 8-bits, mono, with sample rate 8kHz and
/dev/dspW will accept 16-bits sample */
- if (file->f_mode & FMODE_WRITE) {
- dmabuf->fmt &= ~I810_FMT_MASK;
- dmabuf->fmt |= I810_FMT_16BIT;
- dmabuf->ossfragshift = 0;
- dmabuf->ossmaxfrags = 0;
- dmabuf->subdivision = 0;
- i810_set_dac_rate(state, 48000);
- }
-
- if (file->f_mode & FMODE_READ) {
- dmabuf->fmt &= ~I810_FMT_MASK;
- dmabuf->fmt |= I810_FMT_16BIT;
- dmabuf->ossfragshift = 0;
- dmabuf->ossmaxfrags = 0;
- dmabuf->subdivision = 0;
- i810_set_adc_rate(state, 48000);
- }
+ dmabuf->fmt &= ~I810_FMT_MASK;
+ dmabuf->fmt |= I810_FMT_16BIT;
+ dmabuf->ossfragshift = 0;
+ dmabuf->ossmaxfrags = 0;
+ dmabuf->subdivision = 0;
state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
up(&state->open_sem);
lock_kernel();
if (file->f_mode & FMODE_WRITE) {
- i810_clear_tail(state);
- drain_dac(state, file->f_flags & O_NONBLOCK);
}
/* stop DMA state machine and free DMA buffers/channels */
down(&state->open_sem);
- if (file->f_mode & FMODE_WRITE) {
+ if (dmabuf->enable & DAC_RUNNING) {
+ i810_clear_tail(state);
+ drain_dac(state, file->f_flags & O_NONBLOCK);
stop_dac(state);
dealloc_dmabuf(state);
- state->card->free_pcm_channel(state->card, dmabuf->channel->num);
}
- if (file->f_mode & FMODE_READ) {
+ if(dmabuf->enable & ADC_RUNNING) {
stop_adc(state);
dealloc_dmabuf(state);
- state->card->free_pcm_channel(state->card, dmabuf->channel->num);
+ }
+ if (file->f_mode & FMODE_WRITE) {
+ state->card->free_pcm_channel(state->card, dmabuf->write_channel->num);
+ }
+ if (file->f_mode & FMODE_READ) {
+ state->card->free_pcm_channel(state->card, dmabuf->read_channel->num);
}
/* we're covered by the open_sem */
up(&state->open_sem);
- kfree(state->card->states[state->virt]);
state->card->states[state->virt] = NULL;
+ kfree(state);
unlock_kernel();
return 0;
for (card = devs; card != NULL; card = card->next)
for (i = 0; i < NR_AC97; i++)
if (card->ac97_codec[i] != NULL &&
- card->ac97_codec[i]->dev_mixer == minor)
- goto match;
-
- if (!card)
- return -ENODEV;
-
- match:
- file->private_data = card->ac97_codec[i];
-
- return 0;
+ card->ac97_codec[i]->dev_mixer == minor) {
+ file->private_data = card->ac97_codec[i];
+ return 0;
+ }
+ return -ENODEV;
}
static int i810_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
printk(KERN_WARNING "i810_audio: only 48Khz playback available.\n");
else
{
- /* Enable variable rate mode */
- i810_ac97_set(codec, AC97_EXTENDED_STATUS, 9);
- i810_ac97_set(codec,AC97_EXTENDED_STATUS,
- i810_ac97_get(codec, AC97_EXTENDED_STATUS)|0xE800);
/* power up everything, modify this when implementing power saving */
i810_ac97_set(codec, AC97_POWER_CONTROL,
i810_ac97_get(codec, AC97_POWER_CONTROL) & ~0x7f00);
schedule_timeout(HZ/20);
}
+ /* Enable variable rate mode */
+ i810_ac97_set(codec, AC97_EXTENDED_STATUS, 9);
+ i810_ac97_set(codec,AC97_EXTENDED_STATUS,
+ i810_ac97_get(codec, AC97_EXTENDED_STATUS)|0xE800);
+
if(!(i810_ac97_get(codec, AC97_EXTENDED_STATUS)&1))
{
printk(KERN_WARNING "i810_audio: Codec refused to allow VRA, using 48Khz only.\n");
card->alloc_pcm_channel = i810_alloc_pcm_channel;
card->alloc_rec_pcm_channel = i810_alloc_rec_pcm_channel;
+ card->alloc_rec_mic_channel = i810_alloc_rec_mic_channel;
card->free_pcm_channel = i810_free_pcm_channel;
+ card->channel[0].offset = 0;
+ card->channel[0].port = 0x00;
+ card->channel[0].num=0;
+ card->channel[1].offset = 0;
+ card->channel[1].port = 0x10;
+ card->channel[1].num=1;
+ card->channel[2].offset = 0;
+ card->channel[2].port = 0x20;
+ card->channel[2].num=2;
/* claim our iospace and irq */
request_region(card->iobase, 64, card_names[pci_id->driver_data]);
remove: i810_remove,
};
+static void __init i810_configure_clocking (void)
+{
+ struct i810_card *card;
+ struct i810_state *state;
+ struct dmabuf *dmabuf;
+ unsigned int i, offset, new_offset;
+ unsigned long flags;
+
+ card = devs;
+ /* We could try to set the clocking for multiple cards, but can you even have
+ * more than one i810 in a machine? Besides, clocking is global, so unless
+ * someone actually thinks more than one i810 in a machine is possible and
+ * decides to rewrite that little bit, setting the rate for more than one card
+ * is a waste of time.
+ */
+ if(card != NULL) {
+ state = card->states[0] = (struct i810_state *)
+ kmalloc(sizeof(struct i810_state), GFP_KERNEL);
+ if (state == NULL)
+ return;
+ memset(state, 0, sizeof(struct i810_state));
+ dmabuf = &state->dmabuf;
+
+ dmabuf->write_channel = card->alloc_pcm_channel(card);
+ state->virt = 0;
+ state->card = card;
+ state->magic = I810_STATE_MAGIC;
+ init_waitqueue_head(&dmabuf->wait);
+ init_MUTEX(&state->open_sem);
+ dmabuf->fmt = I810_FMT_STEREO | I810_FMT_16BIT;
+ i810_set_dac_rate(state, 48000);
+ if(prog_dmabuf(state, 0) != 0) {
+ goto config_out_nodmabuf;
+ }
+ if(dmabuf->dmasize < 16384) {
+ goto config_out;
+ }
+ dmabuf->count = dmabuf->dmasize;
+ save_flags(flags);
+ cli();
+ start_dac(state);
+ offset = i810_get_dma_addr(state);
+ mdelay(50);
+ new_offset = i810_get_dma_addr(state);
+ stop_dac(state);
+ outb(2,card->iobase+dmabuf->write_channel->port+OFF_CR);
+ restore_flags(flags);
+ i = new_offset - offset;
+ printk("i810_audio: %d bytes in 50 milliseconds\n", i);
+ i = i / 4 * 20;
+ if (i > 48500 || i < 47500) {
+ clocking = clocking * clocking / i;
+ printk("i810_audio: setting clocking to %d to compensate\n", clocking);
+ }
+config_out_nodmabuf:
+ dealloc_dmabuf(state);
+config_out:
+ state->card->free_pcm_channel(state->card,state->dmabuf.write_channel->num);
+ kfree(state);
+ card->states[0] = NULL;
+ }
+}
+
static int __init i810_init_module (void)
{
if (!pci_present()) /* No PCI bus in this machine! */
return -ENODEV;
- if(ftsodell==1)
- clocking=41194;
-
printk(KERN_INFO "Intel 810 + AC97 Audio, version "
DRIVER_VERSION ", " __TIME__ " " __DATE__ "\n");
pci_unregister_driver(&i810_pci_driver);
return -ENODEV;
}
+ if(ftsodell != 0) {
+ printk("i810_audio: ftsodell is now a deprecated option.\n");
+ }
+ if(clocking == 48000) {
+ i810_configure_clocking();
+ }
return 0;
}
#include "sound_config.h"
#include "sound_firmware.h"
#ifdef MSND_CLASSIC
+# ifndef __alpha__
# define SLOWIO
+# endif
#endif
#include "msnd.h"
#ifdef MSND_CLASSIC
return 0;
}
-#ifdef MODULE
static void __exit unload_multisound(void)
{
release_region(dev.io, dev.numio);
unregister_sound_dsp(dev.dsp_minor);
msnd_unregister(&dev);
}
-#endif
#ifndef MSND_CLASSIC
static int ymf_open(struct inode *inode, struct file *file)
{
struct list_head *list;
- ymfpci_t *unit;
+ ymfpci_t *unit = NULL;
int minor;
struct ymf_state *state;
int err;
extern struct semaphore us_list_semaphore;
/* The structure which defines our driver */
-struct usb_driver usb_storage_driver;
+extern struct usb_driver usb_storage_driver;
/* Function to fill an inquiry response. See usb.c for details */
extern void fill_inquiry_response(struct us_data *us,
/*
* drivers/video/clgenfb.c - driver for Cirrus Logic chipsets
*
- * Copyright 1999,2000 Jeff Garzik <jgarzik@mandrakesoft.com>
+ * Copyright 1999-2001 Jeff Garzik <jgarzik@mandrakesoft.com>
*
* Contributors (thanks, all!)
*
USHORT SetFlag,RVBHCFACT,RVBHCMAX,VGAVT,VGAHT,VT,HT,VGAVDE,VGAHDE;
USHORT VDE,HDE,RVBHRS,NewFlickerMode,RY1COE,RY2COE,RY3COE,RY4COE;
-;USHORT LCDResInfo,LCDTypeInfo,LCDInfo;
+extern USHORT LCDResInfo,LCDTypeInfo,LCDInfo;
USHORT VCLKLen;
USHORT LCDHDES,LCDVDES;
error = bprm->file->f_op->read(bprm->file, (char *)text_addr,
ex.a_text+ex.a_data, &pos);
- if (error < 0) {
+ if ((signed long)error < 0) {
send_sig(SIGKILL, current, 0);
return error;
}
tsk->session = 1;
tsk->pgrp = 1;
- strcpy(tsk->comm, "kupdate");
+ strcpy(tsk->comm, "kupdated");
/* sigstop and sigcont will stop and wakeup kupdate */
spin_lock_irq(&tsk->sigmask_lock);
ChangeLog for smbfs.
+2001-03-06 Urban Widmark <urban@teststation.com>
+
+ * cache.c: d_add on hashed dentries corrupts d_hash list and
+ causes loops in d_lookup. Inherited bug. :)
+ * inode.c: tail -f fix for non-readonly opened files
+ (related to the smb_proc_open change).
+ * inode.c: tail -f fix for fast size changes with the same mtime.
+
+2001-03-02 Michael Kockelkorn <m.kockelkorn@biodata.com>
+
+ * proc.c: fix smb_proc_open to allow open being called more than once
+ with different modes (O_RDONLY -> O_WRONLY) without closing.
+
2001-02-10 Urban Widmark <urban@teststation.com>
- * dir.c: replace non-bigmem safe cache with cache code from ncpfs
- and fix some other bigmem bugs in smbfs.
+ * dir.c, cache.c: replace non-bigmem safe cache with cache code
+ from ncpfs and fix some other bigmem bugs in smbfs.
* inode.c: root dentry not properly initialized
* proc.c, sock.c: adjust max parameters & max data to follow max_xmit
lots of servers were having find_next trouble with this.
struct inode *newino, *inode = dentry->d_inode;
struct smb_cache_control ctl = *ctrl;
int valid = 0;
+ int hashed = 0;
ino_t ino = 0;
qname->hash = full_name_hash(qname->name, qname->len);
newdent = d_alloc(dentry, qname);
if (!newdent)
goto end_advance;
- } else
+ } else {
+ hashed = 1;
memcpy((char *) newdent->d_name.name, qname->name,
newdent->d_name.len);
+ }
if (!newdent->d_inode) {
smb_renew_times(newdent);
newino = smb_iget(inode->i_sb, entry);
if (newino) {
smb_new_dentry(newdent);
- d_add(newdent, newino);
+ d_instantiate(newdent, newino);
+ if (!hashed)
+ d_rehash(newdent);
}
} else
smb_set_inode_attr(newdent->d_inode, entry);
struct smb_fattr fattr;
error = smb_proc_getattr(dentry, &fattr);
- if (!error)
- {
+ if (!error) {
smb_renew_times(dentry);
/*
* Check whether the type part of the mode changed,
* and don't update the attributes if it did.
*/
- if ((inode->i_mode & S_IFMT) == (fattr.f_mode & S_IFMT))
+ if ((inode->i_mode & S_IFMT) == (fattr.f_mode & S_IFMT)) {
smb_set_inode_attr(inode, &fattr);
- else
- {
+ } else {
/*
* Big trouble! The inode has become a new object,
* so any operations attempted on it are invalid.
struct smb_sb_info *s = server_from_dentry(dentry);
struct inode *inode = dentry->d_inode;
time_t last_time;
+ loff_t last_sz;
int error = 0;
DEBUG1("smb_revalidate_inode\n");
- /*
- * If this is a file opened with write permissions,
- * the inode will be up-to-date.
- */
lock_kernel();
- if (S_ISREG(inode->i_mode) && smb_is_open(inode)) {
- if (inode->u.smbfs_i.access != SMB_O_RDONLY)
- goto out;
- }
/*
* Check whether we've recently refreshed the inode.
/*
* Save the last modified time, then refresh the inode.
- * (Note: a size change should have a different mtime.)
+ * (Note: a size change should have a different mtime,
+ * or same mtime but different size.)
*/
last_time = inode->i_mtime;
+ last_sz = inode->i_size;
error = smb_refresh_inode(dentry);
- if (error || inode->i_mtime != last_time) {
+ if (error || inode->i_mtime != last_time || inode->i_size != last_sz) {
VERBOSE("%s/%s changed, old=%ld, new=%ld\n",
DENTRY_PATH(dentry),
(long) last_time, (long) inode->i_mtime);
#if 0
/* FIXME: why is this code not in? below we fix it so that a caller
wanting RO doesn't get RW. smb_revalidate_inode does some
- optimization based on access mode. tail -f needs it to be correct. */
+ optimization based on access mode. tail -f needs it to be correct.
+
+ We must open rw since we don't do the open if called a second time
+ with different 'wish'. Is that not supported by smb servers? */
if (!(wish & (O_WRONLY | O_RDWR)))
mode = read_only;
#endif
/* smb_vwv2 has mtime */
/* smb_vwv4 has size */
ino->u.smbfs_i.access = (WVAL(server->packet, smb_vwv6) & SMB_ACCMASK);
- if (!(wish & (O_WRONLY | O_RDWR)))
- ino->u.smbfs_i.access = SMB_O_RDONLY;
ino->u.smbfs_i.open = server->generation;
out:
int result;
result = -ENOENT;
- if (!inode)
- {
+ if (!inode) {
printk(KERN_ERR "smb_open: no inode for dentry %s/%s\n",
DENTRY_PATH(dentry));
goto out;
}
- if (!smb_is_open(inode))
- {
+ if (!smb_is_open(inode)) {
struct smb_sb_info *server = SMB_SERVER(inode);
smb_lock_server(server);
result = 0;
if (!smb_is_open(inode))
result = smb_proc_open(server, dentry, wish);
smb_unlock_server(server);
- if (result)
- {
+ if (result) {
PARANOIA("%s/%s open failed, result=%d\n",
DENTRY_PATH(dentry), result);
goto out;
frag_to_free = tmp;
free_count = uspi->s_fpb;
}
-next2:
+next2:;
}
if (free_count > 0)
}
inode->i_blocks -= uspi->s_nspb;
mark_inode_dirty(inode);
-next:
+next:;
}
if (free_count > 0) {
#include <linux/umsdos_fs.h>
#include <linux/dcache.h>
#include <linux/pagemap.h>
-
-#include <asm/delay.h>
+#include <linux/delay.h>
static void copy_entry(struct umsdos_dirent *p, struct umsdos_dirent *q)
{
/* max length of hostname */
#define MAXHOSTNAMELEN 64
+#ifdef __KERNEL__
+# define CLOCKS_PER_SEC HZ
+#endif
+
#endif
/* FIXME: this is not correct */
#define kern_addr_valid(addr) (1)
-#define io_remap_page_range remap_page_range
-
#include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */
/* We use 33-bit arithmetic here... */
#define __range_ok(addr,size) ({ \
unsigned long flag, sum; \
- __asm__ __volatile__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
+ __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (sum) \
: "r" (addr), "Ir" (size), "0" (current->addr_limit) \
: "cc"); \
#define __addr_ok(addr) ({ \
unsigned long flag; \
- __asm__ __volatile__("cmp %2, %0; movlo %0, #0" \
+ __asm__("cmp %2, %0; movlo %0, #0" \
: "=&r" (flag) \
: "0" (current->addr_limit), "r" (addr) \
: "cc"); \
#define __put_user_asm_half(x,addr,err) \
({ \
unsigned long __temp = (unsigned long)(x); \
- __asm__ __volatile__( \
- "1: strbt %1,[%3],#0\n" \
- "2: strbt %2,[%4],#0\n" \
- "3:\n" \
- " .section .fixup,\"ax\"\n" \
- " .align 2\n" \
- "4: mov %0, %5\n" \
- " b 3b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 4b\n" \
- " .long 2b, 4b\n" \
- " .previous" \
- : "=r" (err) \
- : "r" (__temp), "r" (__temp >> 8), \
- "r" (addr), "r" ((int)(addr) + 1), \
- "i" (-EFAULT), "0" (err)); \
+ __put_user_asm_byte(__temp, addr, err); \
+ __put_user_asm_byte(__temp >> 8, (int)(addr) + 1, err); \
})
#define __put_user_asm_word(x,addr,err) \
#define __get_user_asm_half(x,addr,err) \
({ \
- unsigned long __temp; \
- __asm__ __volatile__( \
- "1: ldrbt %1,[%3],#0\n" \
- "2: ldrbt %2,[%4],#0\n" \
- " orr %1, %1, %2, lsl #8\n" \
- "3:\n" \
- " .section .fixup,\"ax\"\n" \
- " .align 2\n" \
- "4: mov %0, %5\n" \
- " mov %1, #0\n" \
- " b 3b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 4b\n" \
- " .long 2b, 4b\n" \
- " .previous" \
- : "=r" (err), "=r" (x), "=&r" (__temp) \
- : "r" (addr), "r" ((int)(addr) + 1), \
- "i" (-EFAULT), "0" (err)); \
+ unsigned long __b1, __b2; \
+ __get_user_asm_byte(__b1, addr, err); \
+ __get_user_asm_byte(__b2, (int)(addr) + 1, err); \
+ (x) = __b1 | (__b2 << 8); \
})
#ifdef CONFIG_CPU_32
# define CPU_INCLUDE_NAME "asm/cpu-multi32.h"
-# ifdef CONFIG_CPU_ARM6
+# ifdef CONFIG_CPU_ARM610
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# define CPU_NAME arm6
# endif
# endif
-# ifdef CONFIG_CPU_ARM7
+# ifdef CONFIG_CPU_ARM710
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# define CPU_NAME arm7
# endif
# endif
-# ifdef CONFIG_CPU_ARM720
+# ifdef CONFIG_CPU_ARM720T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# define CPU_NAME arm720
# endif
# endif
-# ifdef CONFIG_CPU_ARM920
+# ifdef CONFIG_CPU_ARM920T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err = 0; \
- __put_user_size((x),(ptr),(size),__pu_err); \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ __put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
})
static inline pid_t waitpid(pid_t pid, int *wait_stat, int options)
{
- extern long sys_wait4(int, int *, int, struct rusage *);
return sys_wait4((int)pid, wait_stat, options, NULL);
}
static inline pid_t wait(int * wait_stat)
{
- extern long sys_wait4(int, int *, int, struct rusage *);
return sys_wait4(-1, wait_stat, 0, NULL);
}
+/*
+ * linux/include/asm-arm/xor.h
+ *
+ * Copyright (C) 2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
#include <asm-generic/xor.h>
+
+#define __XOR(a1, a2) a1 ^= a2
+
+#define GET_BLOCK_2(dst) \
+ __asm__("ldmia %0, {%1, %2}" \
+ : "=r" (dst), "=r" (a1), "=r" (a2) \
+ : "0" (dst))
+
+#define GET_BLOCK_4(dst) \
+ __asm__("ldmia %0, {%1, %2, %3, %4}" \
+ : "=r" (dst), "=r" (a1), "=r" (a2), "=r" (a3), "=r" (a4) \
+ : "0" (dst))
+
+#define XOR_BLOCK_2(src) \
+ __asm__("ldmia %0!, {%1, %2}" \
+ : "=r" (src), "=r" (b1), "=r" (b2) \
+ : "0" (src)); \
+ __XOR(a1, b1); __XOR(a2, b2);
+
+#define XOR_BLOCK_4(src) \
+ __asm__("ldmia %0!, {%1, %2, %3, %4}" \
+ : "=r" (src), "=r" (b1), "=r" (b2), "=r" (b3), "=r" (b4) \
+ : "0" (src)); \
+ __XOR(a1, b1); __XOR(a2, b2); __XOR(a3, b3); __XOR(a4, b4)
+
+#define PUT_BLOCK_2(dst) \
+ __asm__ __volatile__("stmia %0!, {%2, %3}" \
+ : "=r" (dst) \
+ : "0" (dst), "r" (a1), "r" (a2))
+
+#define PUT_BLOCK_4(dst) \
+ __asm__ __volatile__("stmia %0!, {%2, %3, %4, %5}" \
+ : "=r" (dst) \
+ : "0" (dst), "r" (a1), "r" (a2), "r" (a3), "r" (a4))
+
+static void
+xor_arm4regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ unsigned int lines = bytes / sizeof(unsigned long) / 4;
+ register unsigned int a1 __asm__("r4");
+ register unsigned int a2 __asm__("r5");
+ register unsigned int a3 __asm__("r6");
+ register unsigned int a4 __asm__("r7");
+ register unsigned int b1 __asm__("r8");
+ register unsigned int b2 __asm__("r9");
+ register unsigned int b3 __asm__("ip");
+ register unsigned int b4 __asm__("lr");
+
+ do {
+ GET_BLOCK_4(p1);
+ XOR_BLOCK_4(p2);
+ PUT_BLOCK_4(p1);
+ } while (--lines);
+}
+
+static void
+xor_arm4regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ unsigned int lines = bytes / sizeof(unsigned long) / 4;
+ register unsigned int a1 __asm__("r4");
+ register unsigned int a2 __asm__("r5");
+ register unsigned int a3 __asm__("r6");
+ register unsigned int a4 __asm__("r7");
+ register unsigned int b1 __asm__("r8");
+ register unsigned int b2 __asm__("r9");
+ register unsigned int b3 __asm__("ip");
+ register unsigned int b4 __asm__("lr");
+
+ do {
+ GET_BLOCK_4(p1);
+ XOR_BLOCK_4(p2);
+ XOR_BLOCK_4(p3);
+ PUT_BLOCK_4(p1);
+ } while (--lines);
+}
+
+static void
+xor_arm4regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ unsigned int lines = bytes / sizeof(unsigned long) / 2;
+ register unsigned int a1 __asm__("r8");
+ register unsigned int a2 __asm__("r9");
+ register unsigned int b1 __asm__("ip");
+ register unsigned int b2 __asm__("lr");
+
+ do {
+ GET_BLOCK_2(p1);
+ XOR_BLOCK_2(p2);
+ XOR_BLOCK_2(p3);
+ XOR_BLOCK_2(p4);
+ PUT_BLOCK_2(p1);
+ } while (--lines);
+}
+
+static void
+xor_arm4regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ unsigned int lines = bytes / sizeof(unsigned long) / 2;
+ register unsigned int a1 __asm__("r8");
+ register unsigned int a2 __asm__("r9");
+ register unsigned int b1 __asm__("ip");
+ register unsigned int b2 __asm__("lr");
+
+ do {
+ GET_BLOCK_2(p1);
+ XOR_BLOCK_2(p2);
+ XOR_BLOCK_2(p3);
+ XOR_BLOCK_2(p4);
+ XOR_BLOCK_2(p5);
+ PUT_BLOCK_2(p1);
+ } while (--lines);
+}
+
+static struct xor_block_template xor_block_arm4regs = {
+ name: "arm4regs",
+ do_2: xor_arm4regs_2,
+ do_3: xor_arm4regs_3,
+ do_4: xor_arm4regs_4,
+ do_5: xor_arm4regs_5,
+};
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_arm4regs); \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_32regs); \
+ } while (0)
* highmem.h: virtual kernel memory mappings for high memory
*
* Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual adresses.
+ * are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
- * up to 16 Terrabyte physical memory. With current x86 CPUs
+ * up to 16 Terabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
extern inline int pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if(mask < 0x00ffffff)
+ return 0;
+
return 1;
}
ds:2, /* Data size */
gbr:1, /* GBR enable */
vbpm:1, /* VBPM message */
- error:1, /* Error occured */
+ error:1, /* Error occurred */
barr:1, /* Barrier op */
rsvd:8;
} berr_st;
#define BRIDGE_INT_ADDR(x) (BRIDGE_INT_ADDR0+(x)*BRIDGE_INT_ADDR_OFF)
#define BRIDGE_INT_VIEW 0x000174 /* Interrupt view */
-#define BRIDGE_MULTIPLE_INT 0x00017c /* Multiple interrupt occured */
+#define BRIDGE_MULTIPLE_INT 0x00017c /* Multiple interrupt occurred */
#define BRIDGE_FORCE_ALWAYS0 0x000184 /* Force an interrupt (always)*/
#define BRIDGE_FORCE_ALWAYS_OFF 0x000008 /* Force Always offset */
#ifdef IRIX
toid_t bserr_toutid; /* Timeout started by errintr */
#endif
- iopaddr_t bserr_addr; /* Address where error occured */
+ iopaddr_t bserr_addr; /* Address where error occurred */
bridgereg_t bserr_intstat; /* interrupts active at error time */
} bs_errinfo;
* corresponds to the valid bit, and bit 1 of each two-bit field *
* corresponds to the overrun bit. *
* The rule for the valid bit is that it gets set whenever that error *
- * occurs, regardless of whether a higher priority error has occured. *
+ * occurs, regardless of whether a higher priority error has occurred. *
* The rule for the overrun bit is that it gets set whenever we are *
* unable to record the address information for this particular *
* error, due to a previous error of the same or higher priority. *
* corresponds to the valid bit, and bit 1 of each two-bit field *
* corresponds to the overrun bit. *
* The rule for the valid bit is that it gets set whenever that error *
- * occurs, regardless of whether a higher priority error has occured. *
+ * occurs, regardless of whether a higher priority error has occurred. *
* The rule for the overrun bit is that it gets set whenever we are *
* unable to record the address information for this particular *
* error, due to a previous error of the same or higher priority. *
#define MD_SDIR_MASK 0xffffffff
/* When premium mode is on for probing but standard directory memory
- is installed, the vaild directory bits depend on the phys. bank */
+ is installed, the valid directory bits depend on the phys. bank */
#define MD_PDIR_PROBE_MASK(pb) 0xffffffffffffffff
#define MD_SDIR_PROBE_MASK(pb) (0xffff0000ffff << ((pb) ? 16 : 0))
typedef union xbw0_status_u {
xbowreg_t statusword;
struct {
- uint32_t mult_err:1, /* Multiple error occured */
+ uint32_t mult_err:1, /* Multiple error occurred */
connect_tout:1, /* Connection timeout */
xtalk_err:1, /* Xtalk pkt with error bit */
/* End of Xbridge only */
/* End of Xbridge only */
xtalk_err:1, /* Xtalk pkt with error bit */
connect_tout:1, /* Connection timeout */
- mult_err:1; /* Multiple error occured */
+ mult_err:1; /* Multiple error occurred */
} xbw0_stfield;
} xbw0_status_t;
} console_info;
__u16 num_pci_vectors; /* number of ACPI derived PCI IRQ's*/
__u64 pci_vectors; /* physical address of PCI data (pci_vector_struct)*/
- __u64 fpswa; /* physical address of the the fpswa interface */
+ __u64 fpswa; /* physical address of the fpswa interface */
__u64 initrd_start;
__u64 initrd_size;
} ia64_boot_param;
-/* $Id: asi.h,v 1.1 1996/11/20 12:59:45 davem Exp $ */
+/* $Id: asi.h,v 1.2 2001/03/01 21:28:37 davem Exp $ */
#ifndef _SPARC64_ASI_H
#define _SPARC64_ASI_H
#define ASI_PNFL 0x8a /* Primary, no fault, little endian */
#define ASI_SNFL 0x8b /* Secondary, no fault, little endian */
-/* SpitFire extended ASIs. */
+/* SpitFire and later extended ASIs. The "(III)" marker designates
+ * UltraSparc-III specific ASIs.
+ */
#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */
#define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-cachable, E-bit */
#define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian */
#define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-cachable, E-bit, little endian */
#define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */
#define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, little endian */
+#define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data status RAM diag */
+#define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */
+#define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */
+#define ASI_PCACHE_SNOOP_TAG 0x33 /* (III) PCache snoop tag RAM diag */
+#define ASI_WCACHE_VALID_BITS 0x38 /* (III) WCache Valid Bits diag */
+#define ASI_WCACHE_DATA 0x39 /* (III) WCache data RAM diag */
+#define ASI_WCACHE_TAG 0x3a /* (III) WCache tag RAM diag */
+#define ASI_WCACHE_SNOOP_TAG 0x3b /* (III) WCache snoop tag RAM diag */
+#define ASI_DCACHE_INVALIDATE 0x42 /* (III) DCache Invalidate diag */
+#define ASI_DCACHE_UTAG 0x43 /* (III) DCache uTag diag */
+#define ASI_DCACHE_SNOOP_TAG 0x44 /* (III) DCache snoop tag RAM diag */
#define ASI_LSU_CONTROL 0x45 /* Load-store control unit */
+#define ASI_DCU_CONTROL_REG 0x45 /* (III) DCache Unit Control Register */
#define ASI_DCACHE_DATA 0x46 /* Data cache data-ram diag access */
#define ASI_DCACHE_TAG 0x47 /* Data cache tag/valid ram diag access */
#define ASI_INTR_DISPATCH_STAT 0x48 /* IRQ vector dispatch status */
#define ASI_INTR_RECEIVE 0x49 /* IRQ vector receive status */
#define ASI_UPA_CONFIG 0x4a /* UPA config space */
+#define ASI_SAFARI_CONFIG 0x4a /* (III) Safari Config Register */
+#define ASI_SAFARI_ADDRESS 0x4a /* (III) Safari Address Register */
#define ASI_ESTATE_ERROR_EN 0x4b /* E-cache error enable space */
#define ASI_AFSR 0x4c /* Async fault status register */
#define ASI_AFAR 0x4d /* Async fault address register */
#define ASI_DTLB_DATA_ACCESS 0x5d /* Data-MMU TLB data access register */
#define ASI_DTLB_TAG_READ 0x5e /* Data-MMU TLB tag read register */
#define ASI_DMMU_DEMAP 0x5f /* Data-MMU TLB demap */
+#define ASI_IIU_INST_TRAP 0x60 /* (III) Instruction Breakpoint register */
#define ASI_IC_INSTR 0x66 /* Insn cache instrucion ram diag access */
#define ASI_IC_TAG 0x67 /* Insn cache tag/valid ram diag access */
+#define ASI_IC_STAG 0x68 /* (III) Insn cache snoop tag ram diag */
#define ASI_IC_PRE_DECODE 0x6e /* Insn cache pre-decode ram diag access */
#define ASI_IC_NEXT_FIELD 0x6f /* Insn cache next-field ram diag access */
+#define ASI_BRPRED_ARRAY 0x6f /* (III) Branch Prediction RAM diag */
#define ASI_BLK_AIUP 0x70 /* Primary, user, block load/store */
#define ASI_BLK_AIUS 0x71 /* Secondary, user, block load/store */
+#define ASI_EC_DATA 0x74 /* (III) E-cache data staging register */
+#define ASI_EC_CTRL 0x75 /* (III) E-cache control register */
#define ASI_EC_W 0x76 /* E-cache diag write access */
#define ASI_UDB_ERROR_W 0x77 /* External UDB error registers write */
#define ASI_UDB_CONTROL_W 0x77 /* External UDB control registers write */
#define ASI_UDB_INTR_W 0x77 /* External UDB IRQ vector dispatch write */
+#define ASI_INTR_DATAN_W 0x77 /* (III) Outgoing irq vector data reg N */
+#define ASI_INTR_DISPATCH_W 0x77 /* (III) Interrupt vector dispatch */
#define ASI_BLK_AIUPL 0x78 /* Primary, user, little, blk ld/st */
#define ASI_BLK_AIUSL 0x79 /* Secondary, user, little, blk ld/st */
#define ASI_EC_R 0x7e /* E-cache diag read access */
#define ASI_UDBH_CONTROL_R 0x7f /* External UDB control registers read hi */
#define ASI_UDBL_CONTROL_R 0x7f /* External UDB control registers read low */
#define ASI_UDB_INTR_R 0x7f /* External UDB IRQ vector dispatch read */
+#define ASI_INTR_DATAN_R 0x7f /* (III) Incoming irq vector data reg N */
#define ASI_PST8_P 0xc0 /* Primary, 8 8-bit, partial */
#define ASI_PST8_S 0xc1 /* Secondary, 8 8-bit, partial */
#define ASI_PST16_P 0xc2 /* Primary, 4 16-bit, partial */
--- /dev/null
+/* $Id: dcr.h,v 1.3 2001/03/01 23:23:33 davem Exp $ */
+#ifndef _SPARC64_DCR_H
+#define _SPARC64_DCR_H
+
+/* UltraSparc-III Dispatch Control Register, ASR 0x12 */
+#define DCR_BPE 0x0000000000000020 /* Branch Predict Enable */
+#define DCR_RPE 0x0000000000000010 /* Return Address Prediction Enable*/
+#define DCR_SI 0x0000000000000008 /* Single Instruction Disable */
+#define DCR_MS 0x0000000000000001 /* Multi-Scalar dispatch */
+
+#endif /* _SPARC64_DCR_H */
--- /dev/null
+/* $Id: dcu.h,v 1.2 2001/03/01 23:23:33 davem Exp $ */
+#ifndef _SPARC64_DCU_H
+#define _SPARC64_DCU_H
+
+/* UltraSparc-III Data Cache Unit Control Register */
+#define DCU_CP 0x0002000000000000 /* Physical Cache Enable w/o mmu*/
+#define DCU_CV 0x0001000000000000 /* Virtual Cache Enable w/o mmu */
+#define DCU_ME 0x0000800000000000 /* NC-store Merging Enable */
+#define DCU_RE 0x0000400000000000 /* RAW bypass Enable */
+#define DCU_PE 0x0000200000000000 /* PCache Enable */
+#define DCU_HPE 0x0000100000000000 /* HW prefetch Enable */
+#define DCU_SPE 0x0000080000000000 /* SW prefetch Enable */
+#define DCU_SL 0x0000040000000000 /* Secondary load steering Enab */
+#define DCU_WE 0x0000020000000000 /* WCache enable */
+#define DCU_PM 0x000001fe00000000 /* PA Watchpoint Byte Mask */
+#define DCU_VM 0x00000001fe000000 /* VA Watchpoint Byte Mask */
+#define DCU_PR 0x0000000001000000 /* PA Watchpoint Read Enable */
+#define DCU_PW 0x0000000000800000 /* PA Watchpoint Write Enable */
+#define DCU_VR 0x0000000000400000 /* VA Watchpoint Read Enable */
+#define DCU_VW 0x0000000000200000 /* VA Watchpoint Write Enable */
+#define DCU_DM 0x0000000000000008 /* DMMU Enable */
+#define DCU_IM 0x0000000000000004 /* IMMU Enable */
+#define DCU_DC 0x0000000000000002 /* Data Cache Enable */
+#define DCU_IC 0x0000000000000001 /* Instruction Cache Enable */
+
+#endif /* _SPARC64_DCU_H */
-/* $Id: pbm.h,v 1.23 2001/01/11 16:26:45 davem Exp $
+/* $Id: pbm.h,v 1.25 2001/02/28 03:28:55 davem Exp $
* pbm.h: UltraSparc PCI controller software state.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
struct resource io_space;
struct resource mem_space;
+ /* Base of PCI Config space, can be per-PBM or shared. */
+ unsigned long config_space;
+
/* State of 66MHz capabilities on this PBM. */
int is_66mhz_capable;
int all_devs_66mhz;
/* List of all PCI controllers. */
struct pci_controller_info *next;
- /* Physical address base of controller registers
- * and PCI config space.
- */
+ /* Physical address base of controller registers. */
unsigned long controller_regs;
- unsigned long config_space;
/* Opaque 32-bit system bus Port ID. */
u32 portid;
/* Operations which are controller specific. */
void (*scan_bus)(struct pci_controller_info *);
- unsigned int (*irq_build)(struct pci_controller_info *, struct pci_dev *, unsigned int);
+ unsigned int (*irq_build)(struct pci_pbm_info *, struct pci_dev *, unsigned int);
void (*base_address_update)(struct pci_dev *, int);
void (*resource_adjust)(struct pci_dev *, struct resource *, struct resource *);
-/* $Id: pgalloc.h,v 1.14 2000/12/09 04:15:24 anton Exp $ */
+/* $Id: pgalloc.h,v 1.15 2001/03/04 18:31:00 davem Exp $ */
#ifndef _SPARC64_PGALLOC_H
#define _SPARC64_PGALLOC_H
#endif /* ! CONFIG_SMP */
-/* This will change for Cheetah and later chips. */
-#define VPTE_BASE 0xfffffffe00000000
+#define VPTE_BASE_SPITFIRE 0xfffffffe00000000
+#define VPTE_BASE_CHEETAH 0xffe0000000000000
extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
/* Note the signed type. */
- long s = start, e = end;
+ long s = start, e = end, vpte_base;
if (s > e)
/* Nobody should call us with start below VM hole and end above.
See if it is really true. */
s &= PMD_MASK;
e = (e + PMD_SIZE - 1) & PMD_MASK;
#endif
+ vpte_base = (tlb_type == spitfire ?
+ VPTE_BASE_SPITFIRE :
+ VPTE_BASE_CHEETAH);
flush_tlb_range(mm,
- VPTE_BASE + (s >> (PAGE_SHIFT - 3)),
- VPTE_BASE + (e >> (PAGE_SHIFT - 3)));
+ vpte_base + (s >> (PAGE_SHIFT - 3)),
+ vpte_base + (e >> (PAGE_SHIFT - 3)));
}
/* Page table allocation/freeing. */
-/* $Id: pgtable.h,v 1.135 2000/11/08 04:49:24 davem Exp $
+/* $Id: pgtable.h,v 1.137 2001/03/02 03:12:01 davem Exp $
* pgtable.h: SpitFire page table operations.
*
* Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
#endif /* !(__ASSEMBLY__) */
-/* SpitFire TTE bits. */
+/* Spitfire/Cheetah TTE bits. */
#define _PAGE_VALID 0x8000000000000000 /* Valid TTE */
#define _PAGE_R 0x8000000000000000 /* Used to keep ref bit up to date */
#define _PAGE_SZ4MB 0x6000000000000000 /* 4MB Page */
#define _PAGE_SZ8K 0x0000000000000000 /* 8K Page */
#define _PAGE_NFO 0x1000000000000000 /* No Fault Only */
#define _PAGE_IE 0x0800000000000000 /* Invert Endianness */
-#define _PAGE_SOFT2 0x07FC000000000000 /* Second set of software bits */
-#define _PAGE_DIAG 0x0003FE0000000000 /* Diagnostic TTE bits */
-#define _PAGE_PADDR 0x000001FFFFFFE000 /* Physical Address bits [40:13] */
-#define _PAGE_SOFT 0x0000000000001F80 /* First set of software bits */
+#define _PAGE_SN 0x0000800000000000 /* Snoop */
+#define _PAGE_PADDR_SF 0x000001FFFFFFE000 /* (Spitfire) Phys Address [40:13] */
+#define _PAGE_PADDR 0x000007FFFFFFE000 /* (Cheetah) Phys Address [42:13] */
+#define _PAGE_SOFT 0x0000000000001F80 /* Software bits */
#define _PAGE_L 0x0000000000000040 /* Locked TTE */
#define _PAGE_CP 0x0000000000000020 /* Cacheable in Physical Cache */
#define _PAGE_CV 0x0000000000000010 /* Cacheable in Virtual Cache */
-/* $Id: spitfire.h,v 1.10 2000/10/06 13:10:29 anton Exp $
+/* $Id: spitfire.h,v 1.11 2001/03/03 10:34:45 davem Exp $
* spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
#ifndef __ASSEMBLY__
+enum ultra_tlb_layout {
+ spitfire = 0,
+ cheetah = 1
+};
+
+extern enum ultra_tlb_layout tlb_type;
+
+#define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
+#define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
+
+#define sparc64_highest_locked_tlbent() \
+ (tlb_type == spitfire ? \
+ SPITFIRE_HIGHEST_LOCKED_TLBENT : \
+ CHEETAH_HIGHEST_LOCKED_TLBENT)
+
extern __inline__ unsigned long spitfire_get_isfsr(void)
{
unsigned long ret;
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (data)
: "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
+
+ /* Clear TTE diag bits. */
+ data &= ~0x0003fe0000000000UL;
+
return data;
}
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (data)
: "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
+
+ /* Clear TTE diag bits. */
+ data &= ~0x0003fe0000000000UL;
+
return data;
}
: "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
}
+/* Cheetah has "all non-locked" tlb flushes. */
+extern __inline__ void cheetah_flush_dtlb_all(void)
+{
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : /* No outputs */
+ : "r" (0x80), "i" (ASI_DMMU_DEMAP));
+}
+
+extern __inline__ void cheetah_flush_itlb_all(void)
+{
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : /* No outputs */
+ : "r" (0x80), "i" (ASI_IMMU_DEMAP));
+}
+
+/* Cheetah has a 4-tlb layout so direct access is a bit different.
+ * The first two TLBs are fully assosciative, hold 16 entries, and are
+ * used only for locked and >8K sized translations. One exists for
+ * data accesses and one for instruction accesses.
+ *
+ * The third TLB is for data accesses to 8K non-locked translations, is
+ * 2 way assosciative, and holds 512 entries. The fourth TLB is for
+ * instruction accesses to 8K non-locked translations, is 2 way
+ * assosciative, and holds 128 entries.
+ */
+extern __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
+{
+ unsigned long data;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=r" (data)
+ : "r" ((0 << 16) | (entry << 3)),
+ "i" (ASI_DTLB_DATA_ACCESS));
+
+ return data;
+}
+
+extern __inline__ unsigned long cheetah_get_litlb_data(int entry)
+{
+ unsigned long data;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=r" (data)
+ : "r" ((0 << 16) | (entry << 3)),
+ "i" (ASI_ITLB_DATA_ACCESS));
+
+ return data;
+}
+
+extern __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
+{
+ unsigned long tag;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=r" (tag)
+ : "r" ((0 << 16) | (entry << 3)),
+ "i" (ASI_DTLB_TAG_READ));
+
+ return tag;
+}
+
+extern __inline__ unsigned long cheetah_get_litlb_tag(int entry)
+{
+ unsigned long tag;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=r" (tag)
+ : "r" ((0 << 16) | (entry << 3)),
+ "i" (ASI_ITLB_TAG_READ));
+
+ return tag;
+}
+
+extern __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
+{
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : /* No outputs */
+ : "r" (data),
+ "r" ((0 << 16) | (entry << 3)),
+ "i" (ASI_DTLB_DATA_ACCESS));
+}
+
+extern __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
+{
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : /* No outputs */
+ : "r" (data),
+ "r" ((0 << 16) | (entry << 3)),
+ "i" (ASI_ITLB_DATA_ACCESS));
+}
+
+extern __inline__ unsigned long cheetah_get_dtlb_data(int entry)
+{
+ unsigned long data;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=r" (data)
+ : "r" ((2 << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
+
+ return data;
+}
+
+extern __inline__ unsigned long cheetah_get_dtlb_tag(int entry)
+{
+ unsigned long tag;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=r" (tag)
+ : "r" ((2 << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
+ return tag;
+}
+
+extern __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data)
+{
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : /* No outputs */
+ : "r" (data),
+ "r" ((2 << 16) | (entry << 3)),
+ "i" (ASI_DTLB_DATA_ACCESS));
+}
+
+extern __inline__ unsigned long cheetah_get_itlb_data(int entry)
+{
+ unsigned long data;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=r" (data)
+ : "r" ((2 << 16) | (entry << 3)),
+ "i" (ASI_ITLB_DATA_ACCESS));
+
+ return data;
+}
+
+extern __inline__ unsigned long cheetah_get_itlb_tag(int entry)
+{
+ unsigned long tag;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=r" (tag)
+ : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
+ return tag;
+}
+
+extern __inline__ void cheetah_put_itlb_data(int entry, unsigned long data)
+{
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : /* No outputs */
+ : "r" (data), "r" ((2 << 16) | (entry << 3)),
+ "i" (ASI_ITLB_DATA_ACCESS));
+}
+
#endif /* !(__ASSEMBLY__) */
#endif /* !(_SPARC64_SPITFIRE_H) */
}
-#endif
\ No newline at end of file
+#endif
--- /dev/null
+/*
+ * Generic HDLC support routines for Linux
+ *
+ * Copyright (C) 1999, 2000 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __HDLC_H
+#define __HDLC_H
+
+/* Ioctls - to be changed */
+#define HDLCGSLOTMAP (0x89F4) /* E1/T1 slot bitmap */
+#define HDLCGCLOCK (0x89F5) /* clock sources */
+#define HDLCGCLOCKRATE (0x89F6) /* clock rate */
+#define HDLCGMODE (0x89F7) /* internal to hdlc.c - protocol used */
+#define HDLCGLINE (0x89F8) /* physical interface */
+#define HDLCSSLOTMAP (0x89F9)
+#define HDLCSCLOCK (0x89FA)
+#define HDLCSCLOCKRATE (0x89FB)
+#define HDLCSMODE (0x89FC) /* internal to hdlc.c - select protocol */
+#define HDLCPVC (0x89FD) /* internal to hdlc.c - create/delete PVC */
+#define HDLCSLINE (0x89FE)
+#define HDLCRUN (0x89FF) /* Download firmware and run board */
+
+/* Modes */
+#define MODE_NONE 0x00000000 /* Not initialized */
+#define MODE_DCE 0x00000080 /* DCE */
+#define MODE_HDLC 0x00000100 /* Raw HDLC frames */
+#define MODE_CISCO 0x00000200
+#define MODE_PPP 0x00000400
+#define MODE_FR 0x00000800 /* Any LMI */
+#define MODE_FR_ANSI 0x00000801
+#define MODE_FR_CCITT 0x00000802
+#define MODE_X25 0x00001000
+#define MODE_MASK 0x0000FF00
+#define MODE_SOFT 0x80000000 /* Driver modes, using hardware HDLC */
+
+/* Lines */
+#define LINE_DEFAULT 0x00000000
+#define LINE_V35 0x00000001
+#define LINE_RS232 0x00000002
+#define LINE_X21 0x00000003
+#define LINE_T1 0x00000004
+#define LINE_E1 0x00000005
+#define LINE_MASK 0x000000FF
+#define LINE_LOOPBACK 0x80000000 /* On-card loopback */
+
+#define CLOCK_EXT 0 /* External TX and RX clock - DTE */
+#define CLOCK_INT 1 /* Internal TX and RX clock - DCE */
+#define CLOCK_TXINT 2 /* Internal TX and external RX clock */
+#define CLOCK_TXFROMRX 3 /* TX clock derived from external RX clock */
+
+
+#define HDLC_MAX_MTU 1500 /* Ethernet 1500 bytes */
+#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10) /* max 10 bytes for FR */
+
+#ifdef __KERNEL__
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/syncppp.h>
+
+#define MAXLEN_LMISTAT 20 /* max size of status enquiry frame */
+
+#define LINK_STATE_RELIABLE 0x01
+#define LINK_STATE_REQUEST 0x02 /* full stat sent (DCE) / req pending (DTE) */
+#define LINK_STATE_CHANGED 0x04 /* change in PVCs state, send full report */
+#define LINK_STATE_FULLREP_SENT 0x08 /* full report sent */
+
+#define PVC_STATE_NEW 0x01
+#define PVC_STATE_ACTIVE 0x02
+#define PVC_STATE_FECN 0x08 /* FECN condition */
+#define PVC_STATE_BECN 0x10 /* BECN condition */
+
+
+#define FR_UI 0x03
+#define FR_PAD 0x00
+
+#define NLPID_IP 0xCC
+#define NLPID_IPV6 0x8E
+#define NLPID_SNAP 0x80
+#define NLPID_PAD 0x00
+#define NLPID_Q933 0x08
+
+
+#define LMI_DLCI 0 /* LMI DLCI */
+#define LMI_PROTO 0x08
+#define LMI_CALLREF 0x00 /* Call Reference */
+#define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI lockshift */
+#define LMI_REPTYPE 1 /* report type */
+#define LMI_CCITT_REPTYPE 0x51
+#define LMI_ALIVE 3 /* keep alive */
+#define LMI_CCITT_ALIVE 0x53
+#define LMI_PVCSTAT 7 /* pvc status */
+#define LMI_CCITT_PVCSTAT 0x57
+#define LMI_FULLREP 0 /* full report */
+#define LMI_INTEGRITY 1 /* link integrity report */
+#define LMI_SINGLE 2 /* single pvc report */
+#define LMI_STATUS_ENQUIRY 0x75
+#define LMI_STATUS 0x7D /* reply */
+
+#define LMI_REPT_LEN 1 /* report type element length */
+#define LMI_INTEG_LEN 2 /* link integrity element length */
+
+#define LMI_LENGTH 13 /* standard LMI frame length */
+#define LMI_ANSI_LENGTH 14
+
+
+
+typedef struct {
+ unsigned ea1 : 1;
+ unsigned cr : 1;
+ unsigned dlcih: 6;
+
+ unsigned ea2 : 1;
+ unsigned de : 1;
+ unsigned becn : 1;
+ unsigned fecn : 1;
+ unsigned dlcil: 4;
+}__attribute__ ((packed)) fr_hdr;
+
+
+
+typedef struct { /* Used in Cisco and PPP mode */
+ u8 address;
+ u8 control;
+ u16 protocol;
+}__attribute__ ((packed)) hdlc_header;
+
+
+
+typedef struct {
+ u32 type; /* code */
+ u32 par1;
+ u32 par2;
+ u16 rel; /* reliability */
+ u32 time;
+}__attribute__ ((packed)) cisco_packet;
+#define CISCO_PACKET_LEN 18
+#define CISCO_BIG_PACKET_LEN 20
+
+
+
+typedef struct pvc_device_struct {
+ struct net_device netdev; /* PVC net device - must be first */
+ struct net_device_stats stats;
+ struct hdlc_device_struct *master;
+ struct pvc_device_struct *next;
+
+ u8 state;
+ u8 newstate;
+}pvc_device;
+
+
+
+typedef struct {
+ u32 last_errors; /* last errors bit list */
+ int last_poll; /* ! */
+ u8 T391; /* ! link integrity verification polling timer */
+ u8 T392; /* ! polling verification timer */
+ u8 N391; /* full status polling counter */
+ u8 N392; /* error threshold */
+ u8 N393; /* monitored events count */
+ u8 N391cnt;
+
+ u8 state; /* ! */
+ u32 txseq; /* ! TX sequence number - Cisco uses 4 bytes */
+ u32 rxseq; /* ! RX sequence number */
+}fr_lmi; /* ! means used in Cisco HDLC as well */
+
+
+typedef struct hdlc_device_struct {
+ /* to be initialized by hardware driver: */
+ struct net_device netdev; /* master net device - must be first */
+ struct net_device_stats stats;
+
+ struct ppp_device pppdev;
+ struct ppp_device *syncppp_ptr;
+
+ /* set_mode may be NULL if HDLC-only board */
+ int (*set_mode)(struct hdlc_device_struct *hdlc, int mode);
+ int (*open)(struct hdlc_device_struct *hdlc);
+ void (*close)(struct hdlc_device_struct *hdlc);
+ int (*xmit)(struct hdlc_device_struct *hdlc, struct sk_buff *skb);
+ int (*ioctl)(struct hdlc_device_struct *hdlc, struct ifreq *ifr,
+ int cmd);
+
+ /* Only in "hardware" FR modes etc. - may be NULL */
+ int (*create_pvc)(pvc_device *pvc);
+ void (*destroy_pvc)(pvc_device *pvc);
+ int (*open_pvc)(pvc_device *pvc);
+ void (*close_pvc)(pvc_device *pvc);
+
+ /* for hdlc.c internal use only */
+ pvc_device *first_pvc;
+ u16 pvc_count;
+ int mode;
+
+ struct timer_list timer;
+ fr_lmi lmi;
+}hdlc_device;
+
+
+int register_hdlc_device(hdlc_device *hdlc);
+void unregister_hdlc_device(hdlc_device *hdlc);
+void hdlc_netif_rx(hdlc_device *hdlc, struct sk_buff *skb);
+
+
+extern __inline__ struct net_device* hdlc_to_dev(hdlc_device *hdlc)
+{
+ return &hdlc->netdev;
+}
+
+
+extern __inline__ hdlc_device* dev_to_hdlc(struct net_device *dev)
+{
+ return (hdlc_device*)dev;
+}
+
+
+extern __inline__ struct net_device* pvc_to_dev(pvc_device *pvc)
+{
+ return &pvc->netdev;
+}
+
+
+extern __inline__ pvc_device* dev_to_pvc(struct net_device *dev)
+{
+ return (pvc_device*)dev;
+}
+
+
+extern __inline__ const char *hdlc_to_name(hdlc_device *hdlc)
+{
+ return hdlc_to_dev(hdlc)->name;
+}
+
+
+extern __inline__ const char *pvc_to_name(pvc_device *pvc)
+{
+ return pvc_to_dev(pvc)->name;
+}
+
+
+extern __inline__ u16 status_to_dlci(hdlc_device *hdlc, u8 *status, u8 *state)
+{
+ *state &= ~(PVC_STATE_ACTIVE | PVC_STATE_NEW);
+ if (status[2] & 0x08)
+ *state |= PVC_STATE_NEW;
+ else if (status[2] & 0x02)
+ *state |= PVC_STATE_ACTIVE;
+
+ return ((status[0] & 0x3F)<<4) | ((status[1] & 0x78)>>3);
+}
+
+
+extern __inline__ void dlci_to_status(hdlc_device *hdlc, u16 dlci, u8 *status,
+ u8 state)
+{
+ status[0] = (dlci>>4) & 0x3F;
+ status[1] = ((dlci<<3) & 0x78) | 0x80;
+ status[2] = 0x80;
+
+ if (state & PVC_STATE_NEW)
+ status[2] |= 0x08;
+ else if (state & PVC_STATE_ACTIVE)
+ status[2] |= 0x02;
+}
+
+
+
+extern __inline__ u16 netdev_dlci(struct net_device *dev)
+{
+ return ntohs(*(u16*)dev->dev_addr);
+}
+
+
+
+extern __inline__ u16 q922_to_dlci(u8 *hdr)
+{
+ return ((hdr[0] & 0xFC)<<2) | ((hdr[1] & 0xF0)>>4);
+}
+
+
+
+extern __inline__ void dlci_to_q922(u8 *hdr, u16 dlci)
+{
+ hdr[0] = (dlci>>2) & 0xFC;
+ hdr[1] = ((dlci<<4) & 0xF0) | 0x01;
+}
+
+
+
+extern __inline__ int mode_is(hdlc_device *hdlc, int mask)
+{
+ return (hdlc->mode & mask) == mask;
+}
+
+
+
+extern __inline__ pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
+{
+ pvc_device *pvc=hdlc->first_pvc;
+
+ while (pvc) {
+ if (netdev_dlci(&pvc->netdev) == dlci)
+ return pvc;
+ pvc=pvc->next;
+ }
+
+ return NULL;
+}
+
+
+
+extern __inline__ void debug_frame(const struct sk_buff *skb)
+{
+ int i;
+
+ for (i=0; i<skb->len; i++) {
+ if (i == 100) {
+ printk("...\n");
+ return;
+ }
+ printk(" %02X", skb->data[i]);
+ }
+ printk("\n");
+}
+
+
+#endif /* __KERNEL */
+#endif /* __HDLC_H */
/****************/
/* error values */
/****************/
-#define ERR_NONE 0 /* no error occured */
+#define ERR_NONE 0 /* no error occurred */
#define ERR_ALREADY_BOOT 1000 /* we are already booting */
#define EPOF_BAD_MAGIC 1001 /* bad magic in POF header */
#define ERR_BOARD_DPRAM 1002 /* board DPRAM failed */
* corresponding header files.
*/
/* -> bit-adapter specific ioctls */
-#define I2C_RETRIES 0x0701 /* number times a device adress should */
+#define I2C_RETRIES 0x0701 /* number times a device address should */
/* be polled when not acknowledging */
#define I2C_TIMEOUT 0x0702 /* set timeout - call with int */
#define I2C_FUNCS 0x0705 /* Get the adapter functionality */
#define I2C_RDWR 0x0707 /* Combined R/W transfer (one stop only)*/
#if 0
-#define I2C_ACK_TEST 0x0710 /* See if a slave is at a specific adress */
+#define I2C_ACK_TEST 0x0710 /* See if a slave is at a specific address */
#endif
#define I2C_SMBUS 0x0720 /* SMBus-level access */
* (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
*
* This software may be used and distributed according to the terms
- * of the GNU Public License, incorporated herein by reference.
+ * of the GNU General Public License, incorporated herein by reference.
*
*/
*
*
* This software may be used and distributed according to the terms
- * of the GNU Public License, incorporated herein by reference.
+ * of the GNU General Public License, incorporated herein by reference.
*
* The author may be reached as simon@ncm.com, or C/O
* NCM
int register_frad(const char *name);
int unregister_frad(const char *name);
-int (*dlci_ioctl_hook)(unsigned int, void *);
+extern int (*dlci_ioctl_hook)(unsigned int, void *);
-#endif __KERNEL__
+#endif /* __KERNEL__ */
#endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */
void *key_data;
char key_reserved[48]; /* for use by the filter modules */
- int lo_blksize;
int old_gfp_mask;
spinlock_t lo_lock;
return lo->transfer(lo, cmd, rbuf, lbuf, size, rblock);
}
-
-/*
- * used to throttle loop_thread so bdflush/kswapd doesn't go nuts
- */
-#define LOOP_MAX_BUFFERS 2048
-
#endif /* __KERNEL__ */
/*
* http://www.pap-philips.de
* -----------------------------------------------------------
* This software may be used and distributed according to the terms of
- * the GNU Public License, incorporated herein by reference.
+ * the GNU General Public License, incorporated herein by reference.
*
* Author:
* L. Haag
--- /dev/null
+#ifndef _IPT_TCPMSS_H
+#define _IPT_TCPMSS_H
+
+struct ipt_tcpmss_info {
+ u_int16_t mss;
+};
+
+#define IPT_TCPMSS_CLAMP_PMTU 0xffff
+
+#endif /*_IPT_TCPMSS_H*/
--- /dev/null
+#ifndef _IPT_TCPMSS_MATCH_H
+#define _IPT_TCPMSS_MATCH_H
+
+struct ipt_tcpmss_match_info {
+ u_int16_t mss_min, mss_max;
+ u_int8_t invert;
+};
+
+#endif /*_IPT_TCPMSS_MATCH_H*/
int pci_enable_device(struct pci_dev *dev);
void pci_set_master(struct pci_dev *dev);
+int pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
int pci_set_power_state(struct pci_dev *dev, int state);
int pci_assign_resource(struct pci_dev *dev, int i);
/* pg.h (c) 1998 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU public license
+ Under the terms of the GNU General Public License
pg.h defines the user interface to the generic ATAPI packet
#define ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */
#define ASYNC_SHARE_IRQ 0x01000000 /* for multifunction cards
--- no longer used */
-#define ASYNC_NO_FLOW 0x00800000 /* No flow control serial console */
+#define ASYNC_CONS_FLOW 0x00800000 /* flow control for console */
#define ASYNC_INTERNAL_FLAGS 0xFF800000 /* Internal flags */
/* Allow complicated architectures to specify rs_table[] at run time */
extern int early_serial_setup(struct serial_struct *req);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SERIAL_H */
struct termios callout_termios;
int io_type;
struct async_struct *info;
+ struct pci_dev *dev;
};
struct async_struct {
* These register definitions are for the 16C950
*/
#define UART_ASR 0x01 /* Additional Status Register */
-#define UART_RFL 0x03 /* Transmitter FIFO level */
-#define UART_TFL 0x04 /* Receiver FIFO level */
+#define UART_RFL 0x03 /* Receiver FIFO level */
+#define UART_TFL 0x04 /* Transmitter FIFO level */
#define UART_ICR 0x05 /* Index Control Register */
/* The 16950 ICR registers */
* Copyright (C) 1998-2000 by Microgate Corporation
*
* Redistribution of this file is permitted under
- * the terms of the GNU Public License (GPL)
+ * the terms of the GNU General Public License (GPL)
*/
#ifndef _SYNCLINK_H_
--- /dev/null
+/*
+ * Defines for synchronous PPP/Cisco link level subroutines.
+ *
+ * Copyright (C) 1994 Cronyx Ltd.
+ * Author: Serge Vakulenko, <vak@zebub.msk.su>
+ *
+ * This software is distributed with NO WARRANTIES, not even the implied
+ * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Authors grant any other persons or organizations permission to use
+ * or modify this software as long as this message is kept with the software,
+ * all derivative works or modified versions.
+ *
+ * Version 1.7, Wed Jun 7 22:12:02 MSD 1995
+ *
+ *
+ *
+ */
+
+#ifndef _SYNCPPP_H_
+#define _SYNCPPP_H_ 1
+
+#ifdef __KERNEL__
+struct slcp {
+ u16 state; /* state machine */
+ u32 magic; /* local magic number */
+ u_char echoid; /* id of last keepalive echo request */
+ u_char confid; /* id of last configuration request */
+};
+
+struct sipcp {
+ u16 state; /* state machine */
+ u_char confid; /* id of last configuration request */
+};
+
+struct sppp
+{
+ struct sppp * pp_next; /* next interface in keepalive list */
+ u32 pp_flags; /* use Cisco protocol instead of PPP */
+ u16 pp_alivecnt; /* keepalive packets counter */
+ u16 pp_loopcnt; /* loopback detection counter */
+ u32 pp_seq; /* local sequence number */
+ u32 pp_rseq; /* remote sequence number */
+ struct slcp lcp; /* LCP params */
+ struct sipcp ipcp; /* IPCP params */
+ u32 ibytes,obytes; /* Bytes in/out */
+ u32 ipkts,opkts; /* Packets in/out */
+ struct timer_list pp_timer;
+ struct net_device *pp_if;
+ char pp_link_state; /* Link status */
+};
+
+struct ppp_device
+{
+ struct net_device *dev; /* Network device pointer */
+ struct sppp sppp; /* Synchronous PPP */
+};
+
+#define sppp_of(dev) \
+ (&((struct ppp_device *)(*(unsigned long *)((dev)->priv)))->sppp)
+
+#define PP_KEEPALIVE 0x01 /* use keepalive protocol */
+#define PP_CISCO 0x02 /* use Cisco protocol instead of PPP */
+#define PP_TIMO 0x04 /* cp_timeout routine active */
+#define PP_DEBUG 0x08
+
+#define PPP_MTU 1500 /* max. transmit unit */
+
+#define LCP_STATE_CLOSED 0 /* LCP state: closed (conf-req sent) */
+#define LCP_STATE_ACK_RCVD 1 /* LCP state: conf-ack received */
+#define LCP_STATE_ACK_SENT 2 /* LCP state: conf-ack sent */
+#define LCP_STATE_OPENED 3 /* LCP state: opened */
+
+#define IPCP_STATE_CLOSED 0 /* IPCP state: closed (conf-req sent) */
+#define IPCP_STATE_ACK_RCVD 1 /* IPCP state: conf-ack received */
+#define IPCP_STATE_ACK_SENT 2 /* IPCP state: conf-ack sent */
+#define IPCP_STATE_OPENED 3 /* IPCP state: opened */
+
+#define SPPP_LINK_DOWN 0 /* link down - no keepalive */
+#define SPPP_LINK_UP 1 /* link is up - keepalive ok */
+
+void sppp_attach (struct ppp_device *pd);
+void sppp_detach (struct net_device *dev);
+void sppp_input (struct net_device *dev, struct sk_buff *m);
+int sppp_do_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd);
+struct sk_buff *sppp_dequeue (struct net_device *dev);
+int sppp_isempty (struct net_device *dev);
+void sppp_flush (struct net_device *dev);
+int sppp_open (struct net_device *dev);
+int sppp_reopen (struct net_device *dev);
+int sppp_close (struct net_device *dev);
+#endif
+
+#define SPPPIOCCISCO (SIOCDEVPRIVATE)
+#define SPPPIOCPPP (SIOCDEVPRIVATE+1)
+#define SPPPIOCDEBUG (SIOCDEVPRIVATE+2)
+
+#endif /* _SYNCPPP_H_ */
int pm_active;
-static spinlock_t pm_devs_lock = SPIN_LOCK_UNLOCKED;
+/*
+ * Locking notes:
+ * pm_devs_lock can be a semaphore providing pm ops are not called
+ * from an interrupt handler (already a bad idea so no change here). Each
+ * change must be protected so that an unlink of an entry doesnt clash
+ * with a pm send - which is permitted to sleep in the current architecture
+ *
+ * Module unloads clashing with pm events now work out safely, the module
+ * unload path will block until the event has been sent. It may well block
+ * until a resume but that will be fine.
+ */
+
+static DECLARE_MUTEX(pm_devs_lock);
static LIST_HEAD(pm_devs);
/**
{
struct pm_dev *dev = kmalloc(sizeof(struct pm_dev), GFP_KERNEL);
if (dev) {
- unsigned long flags;
-
memset(dev, 0, sizeof(*dev));
dev->type = type;
dev->id = id;
dev->callback = callback;
- spin_lock_irqsave(&pm_devs_lock, flags);
+ down(&pm_devs_lock);
list_add(&dev->entry, &pm_devs);
- spin_unlock_irqrestore(&pm_devs_lock, flags);
+ up(&pm_devs_lock);
}
return dev;
}
void pm_unregister(struct pm_dev *dev)
{
if (dev) {
- unsigned long flags;
-
- spin_lock_irqsave(&pm_devs_lock, flags);
+ down(&pm_devs_lock);
list_del(&dev->entry);
- spin_unlock_irqrestore(&pm_devs_lock, flags);
+ up(&pm_devs_lock);
+
+ kfree(dev);
+ }
+}
+static void __pm_unregister(struct pm_dev *dev)
+{
+ if (dev) {
+ list_del(&dev->entry);
kfree(dev);
}
}
if (!callback)
return;
+ down(&pm_devs_lock);
entry = pm_devs.next;
while (entry != &pm_devs) {
struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
entry = entry->next;
if (dev->callback == callback)
- pm_unregister(dev);
+ __pm_unregister(dev);
}
+ up(&pm_devs_lock);
}
/**
*
* BUGS: what stops two power management requests occuring in parallel
* and conflicting.
+ *
+ * WARNING: Calling pm_send directly is not generally recommended, in
+ * paticular there is no locking against the pm_dev going away. The
+ * caller must maintain all needed locking or have 'inside knowledge'
+ * on the safety. Also remember that this function is not locked against
+ * pm_unregister. This means that you must handle SMP races on callback
+ * execution and unload yourself.
*/
int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data)
* during the processing of this request are restored to their
* previous state.
*
+ * WARNING: This function takes the pm_devs_lock. The lock is not dropped until
+ * the callbacks have completed. This prevents races against pm locking
+ * functions, races against module unload pm_unregister code. It does
+ * mean however that you must not issue pm_ functions within the callback
+ * or you will deadlock and users will hate you.
+ *
* Zero is returned on success. If a suspend fails then the status
* from the device that vetoes the suspend is returned.
*
int pm_send_all(pm_request_t rqst, void *data)
{
- struct list_head *entry = pm_devs.next;
+ struct list_head *entry;
+
+ down(&pm_devs_lock);
+ entry = pm_devs.next;
while (entry != &pm_devs) {
struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
if (dev->callback) {
*/
if (rqst == PM_SUSPEND)
pm_undo_all(dev);
+ up(&pm_devs_lock);
return status;
}
}
entry = entry->next;
}
+ up(&pm_devs_lock);
return 0;
}
* of the list.
*
* To search from the beginning pass %NULL as the @from value.
+ *
+ * The caller MUST hold the pm_devs_lock lock when calling this
+ * function. The instant that the lock is dropped all pointers returned
+ * may become invalid.
*/
struct pm_dev *pm_find(pm_dev_t type, struct pm_dev *from)
wake_up_process(p);
}
+/**
+ * schedule_timeout - sleep until timeout
+ * @timeout: timeout value in jiffies
+ *
+ * Make the current task sleep until @timeout jiffies have
+ * elapsed. The routine will return immediately unless
+ * the current task state has been set (see set_current_state()).
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
+ * pass before the routine returns. The routine will return 0
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task. In this case the remaining time
+ * in jiffies will be returned, or 0 if the timer expired in time
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ *
+ * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
+ * the CPU away without a bound on the timeout. In this case the return
+ * value will be %MAX_SCHEDULE_TIMEOUT.
+ *
+ * In all cases the return value is guaranteed to be non-negative.
+ */
signed long schedule_timeout(signed long timeout)
{
struct timer_list timer;
}
default:
del_from_runqueue(prev);
- case TASK_RUNNING:
+ case TASK_RUNNING:;
}
prev->need_resched = 0;
return 0;
}
+static void deferred_cad(void *dummy)
+{
+ notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
+ machine_restart(NULL);
+}
+
/*
* This function gets called by ctrl-alt-del - ie the keyboard interrupt.
* As it's called within an interrupt, it may NOT sync: the only choice
*/
void ctrl_alt_del(void)
{
- if (C_A_D) {
- notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL);
- machine_restart(NULL);
- } else
+ static struct tq_struct cad_tq = {
+ routine: deferred_cad,
+ };
+
+ if (C_A_D)
+ schedule_task(&cad_tq);
+ else
kill_proc(1, SIGINT, 1);
}
{
int old_rgid = current->gid;
int old_egid = current->egid;
+ int new_rgid = old_rgid;
+ int new_egid = old_egid;
if (rgid != (gid_t) -1) {
if ((old_rgid == rgid) ||
(current->egid==rgid) ||
capable(CAP_SETGID))
- current->gid = rgid;
+ new_rgid = rgid;
else
return -EPERM;
}
(current->egid == egid) ||
(current->sgid == egid) ||
capable(CAP_SETGID))
- current->fsgid = current->egid = egid;
+ new_egid = egid;
else {
- current->gid = old_rgid;
return -EPERM;
}
}
+ if (new_egid != old_egid)
+ {
+ current->dumpable = 0;
+ wmb();
+ }
if (rgid != (gid_t) -1 ||
(egid != (gid_t) -1 && egid != old_rgid))
- current->sgid = current->egid;
- current->fsgid = current->egid;
- if (current->egid != old_egid)
- current->dumpable = 0;
+ current->sgid = new_egid;
+ current->fsgid = new_egid;
+ current->egid = new_egid;
+ current->gid = new_rgid;
return 0;
}
int old_egid = current->egid;
if (capable(CAP_SETGID))
+ {
+ if(old_egid != gid)
+ {
+ current->dumpable=0;
+ wmb();
+ }
current->gid = current->egid = current->sgid = current->fsgid = gid;
+ }
else if ((gid == current->gid) || (gid == current->sgid))
+ {
+ if(old_egid != gid)
+ {
+ current->dumpable=0;
+ wmb();
+ }
current->egid = current->fsgid = gid;
+ }
else
return -EPERM;
-
- if (current->egid != old_egid)
- current->dumpable = 0;
return 0;
}
}
}
-static int set_user(uid_t new_ruid)
+static int set_user(uid_t new_ruid, int dumpclear)
{
struct user_struct *new_user, *old_user;
atomic_dec(&old_user->processes);
atomic_inc(&new_user->processes);
+ if(dumpclear)
+ {
+ current->dumpable = 0;
+ wmb();
+ }
current->uid = new_ruid;
current->user = new_user;
free_uid(old_user);
return -EPERM;
}
- if (new_ruid != old_ruid && set_user(new_ruid) < 0)
+ if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
return -EAGAIN;
+ if (new_euid != old_euid)
+ {
+ current->dumpable=0;
+ wmb();
+ }
current->fsuid = current->euid = new_euid;
if (ruid != (uid_t) -1 ||
(euid != (uid_t) -1 && euid != old_ruid))
current->suid = current->euid;
current->fsuid = current->euid;
- if (current->euid != old_euid)
- current->dumpable = 0;
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
cap_emulate_setxuid(old_ruid, old_euid, old_suid);
asmlinkage long sys_setuid(uid_t uid)
{
int old_euid = current->euid;
- int old_ruid, old_suid, new_ruid;
+ int old_ruid, old_suid, new_ruid, new_suid;
old_ruid = new_ruid = current->uid;
old_suid = current->suid;
+ new_suid = old_suid;
+
if (capable(CAP_SETUID)) {
- if (uid != old_ruid && set_user(uid) < 0)
+ if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
return -EAGAIN;
- current->suid = uid;
- } else if ((uid != current->uid) && (uid != current->suid))
+ new_suid = uid;
+ } else if ((uid != current->uid) && (uid != new_suid))
return -EPERM;
- current->fsuid = current->euid = uid;
-
if (old_euid != uid)
+ {
current->dumpable = 0;
+ wmb();
+ }
+ current->fsuid = current->euid = uid;
+ current->suid = new_suid;
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
cap_emulate_setxuid(old_ruid, old_euid, old_suid);
return -EPERM;
}
if (ruid != (uid_t) -1) {
- if (ruid != current->uid && set_user(ruid) < 0)
+ if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
return -EAGAIN;
}
if (euid != (uid_t) -1) {
if (euid != current->euid)
+ {
current->dumpable = 0;
+ wmb();
+ }
current->euid = euid;
current->fsuid = euid;
}
*/
asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
{
- if (!capable(CAP_SETGID)) {
+ if (!capable(CAP_SETGID)) {
if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
(rgid != current->egid) && (rgid != current->sgid))
return -EPERM;
(sgid != current->egid) && (sgid != current->sgid))
return -EPERM;
}
- if (rgid != (gid_t) -1)
- current->gid = rgid;
if (egid != (gid_t) -1) {
if (egid != current->egid)
+ {
current->dumpable = 0;
+ wmb();
+ }
current->egid = egid;
current->fsgid = egid;
}
+ if (rgid != (gid_t) -1)
+ current->gid = rgid;
if (sgid != (gid_t) -1)
current->sgid = sgid;
return 0;
if (uid == current->uid || uid == current->euid ||
uid == current->suid || uid == current->fsuid ||
capable(CAP_SETUID))
+ {
+ if (uid != old_fsuid)
+ {
+ current->dumpable = 0;
+ wmb();
+ }
current->fsuid = uid;
- if (current->fsuid != old_fsuid)
- current->dumpable = 0;
+ }
/* We emulate fsuid by essentially doing a scaled-down version
* of what we did in setresuid and friends. However, we only
if (gid == current->gid || gid == current->egid ||
gid == current->sgid || gid == current->fsgid ||
capable(CAP_SETGID))
+ {
+ if (gid != old_fsgid)
+ {
+ current->dumpable = 0;
+ wmb();
+ }
current->fsgid = gid;
- if (current->fsgid != old_fsgid)
- current->dumpable = 0;
-
+ }
return old_fsgid;
}
* Little mods for all variable to reside either into rodata or bss segments
* by marking constant variables with 'const' and initializing all the others
* at run-time only. This allows for the kernel uncompressor to run
- * directly from Flash or ROM memory on embeded systems.
+ * directly from Flash or ROM memory on embedded systems.
*/
/*
#include <linux/ctype.h>
#ifndef __HAVE_ARCH_STRNICMP
+/**
+ * strnicmp - Case insensitive, length-limited string comparison
+ * @s1: One string
+ * @s2: The other string
+ * @len: the maximum number of characters to compare
+ */
int strnicmp(const char *s1, const char *s2, size_t len)
{
/* Yes, Virginia, it had better be unsigned */
char * ___strtok;
#ifndef __HAVE_ARCH_STRCPY
+/**
+ * strcpy - Copy a %NUL terminated string
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ */
char * strcpy(char * dest,const char *src)
{
char *tmp = dest;
#endif
#ifndef __HAVE_ARCH_STRNCPY
+/**
+ * strncpy - Copy a length-limited, %NUL-terminated string
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @count: The maximum number of bytes to copy
+ *
+ * Note that unlike userspace strncpy, this does not %NUL-pad the buffer.
+ * However, the result is not %NUL-terminated if the source exceeds
+ * @count bytes.
+ */
char * strncpy(char * dest,const char *src,size_t count)
{
char *tmp = dest;
#endif
#ifndef __HAVE_ARCH_STRCAT
+/**
+ * strcat - Append one %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ */
char * strcat(char * dest, const char * src)
{
char *tmp = dest;
#endif
#ifndef __HAVE_ARCH_STRNCAT
+/**
+ * strncat - Append a length-limited, %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ * @count: The maximum numbers of bytes to copy
+ *
+ * Note that in contrast to strncpy, strncat ensures the result is
+ * terminated.
+ */
char * strncat(char *dest, const char *src, size_t count)
{
char *tmp = dest;
#endif
#ifndef __HAVE_ARCH_STRCMP
+/**
+ * strcmp - Compare two strings
+ * @cs: One string
+ * @ct: Another string
+ */
int strcmp(const char * cs,const char * ct)
{
register signed char __res;
#endif
#ifndef __HAVE_ARCH_STRNCMP
+/**
+ * strncmp - Compare two length-limited strings
+ * @cs: One string
+ * @ct: Another string
+ * @count: The maximum number of bytes to compare
+ */
int strncmp(const char * cs,const char * ct,size_t count)
{
register signed char __res = 0;
#endif
#ifndef __HAVE_ARCH_STRCHR
+/**
+ * strchr - Find the first occurrence of a character in a string
+ * @s: The string to be searched
+ * @c: The character to search for
+ */
char * strchr(const char * s, int c)
{
for(; *s != (char) c; ++s)
#endif
#ifndef __HAVE_ARCH_STRRCHR
+/**
+ * strrchr - Find the last occurrence of a character in a string
+ * @s: The string to be searched
+ * @c: The character to search for
+ */
char * strrchr(const char * s, int c)
{
const char *p = s + strlen(s);
#endif
#ifndef __HAVE_ARCH_STRLEN
+/**
+ * strlen - Find the length of a string
+ * @s: The string to be sized
+ */
size_t strlen(const char * s)
{
const char *sc;
#endif
#ifndef __HAVE_ARCH_STRNLEN
+/**
+ * strnlen - Find the length of a length-limited string
+ * @s: The string to be sized
+ * @count: The maximum number of bytes to search
+ */
size_t strnlen(const char * s, size_t count)
{
const char *sc;
#endif
#ifndef __HAVE_ARCH_STRSPN
+/**
+ * strspn - Calculate the length of the initial substring of @s which only
+ * contain letters in @accept
+ * @s: The string to be searched
+ * @accept: The string to search for
+ */
size_t strspn(const char *s, const char *accept)
{
const char *p;
#endif
#ifndef __HAVE_ARCH_STRPBRK
+/**
+ * strpbrk - Find the first occurrence of a set of characters
+ * @cs: The string to be searched
+ * @ct: The characters to search for
+ */
char * strpbrk(const char * cs,const char * ct)
{
const char *sc1,*sc2;
#endif
#ifndef __HAVE_ARCH_STRTOK
+/**
+ * strtok - Split a string into tokens
+ * @s: The string to be searched
+ * @ct: The characters to search for
+ *
+ * WARNING: strtok is deprecated, use strsep instead.
+ */
char * strtok(char * s,const char * ct)
{
char *sbegin, *send;
#endif
#ifndef __HAVE_ARCH_STRSEP
-
+/**
+ * strsep - Split a string into tokens
+ * @s: The string to be searched
+ * @ct: The characters to search for
+ *
+ * strsep() updates @s to point after the token, ready for the next call.
+ */
char * strsep(char **s, const char * ct)
{
char *sbegin=*s;
#endif
#ifndef __HAVE_ARCH_MEMSET
+/**
+ * memset - Fill a region of memory with the given value
+ * @s: Pointer to the start of the area.
+ * @c: The byte to fill the area with
+ * @count: The size of the area.
+ */
void * memset(void * s,int c,size_t count)
{
char *xs = (char *) s;
#endif
#ifndef __HAVE_ARCH_BCOPY
+/**
+ * bcopy - Copy one area of memory to another
+ * @src: Where to copy from
+ * @dest: Where to copy to
+ * @count: The size of the area.
+ *
+ * When using copies for I/O remember that bcopy and memcpy are entitled
+ * to do out of order writes and may well exactly that.
+ *
+ * Note that this is the same as memcpy, with the arguments reversed. memcpy
+ * is the standard, bcopy is a legacy BSD function.
+ */
char * bcopy(const char * src, char * dest, int count)
{
char *tmp = dest;
#endif
#ifndef __HAVE_ARCH_MEMCPY
+/**
+ * memcpy - Copy one area of memory to another
+ * @dest: Where to copy to
+ * @src: Where to copy from
+ * @count: The size of the area.
+ *
+ * When using copies for I/O remember that bcopy and memcpy are entitled
+ * to do out of order writes and may well exactly that.
+ */
void * memcpy(void * dest,const void *src,size_t count)
{
char *tmp = (char *) dest, *s = (char *) src;
#endif
#ifndef __HAVE_ARCH_MEMMOVE
+/**
+ * memmove - Copy one area of memory to another
+ * @dest: Where to copy to
+ * @src: Where to copy from
+ * @count: The size of the area.
+ *
+ * memmove copes with overlapping areas.
+ */
void * memmove(void * dest,const void *src,size_t count)
{
char *tmp, *s;
#endif
#ifndef __HAVE_ARCH_MEMCMP
+/**
+ * memmove - Compare two areas of memory
+ * @cs: One area of memory
+ * @ct: Another area of memory
+ * @count: The size of the area.
+ */
int memcmp(const void * cs,const void * ct,size_t count)
{
const unsigned char *su1, *su2;
}
#endif
-/*
- * find the first occurrence of byte 'c', or 1 past the area if none
- */
#ifndef __HAVE_ARCH_MEMSCAN
+/**
+ * memscan - Find a character in an area of memory.
+ * @addr: The memory area
+ * @c: The byte to search for
+ * @size: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or 1 byte past
+ * the area if @c is not found
+ */
void * memscan(void * addr, int c, size_t size)
{
unsigned char * p = (unsigned char *) addr;
#endif
#ifndef __HAVE_ARCH_STRSTR
+/**
+ * strstr - Find the first substring in a %NUL terminated string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ */
char * strstr(const char * s1,const char * s2)
{
int l1, l2;
#endif
#ifndef __HAVE_ARCH_MEMCHR
+/**
+ * memchr - Find a character in an area of memory.
+ * @s: The memory area
+ * @c: The byte to search for
+ * @n: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or %NULL
+ * if @c is not found
+ */
void *memchr(const void *s, int c, size_t n)
{
const unsigned char *p = s;
#include <asm/div64.h>
+/**
+ * simple_strtoul - convert a string to an unsigned long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base)
{
unsigned long result = 0,value;
return result;
}
+/**
+ * simple_strtol - convert a string to a signed long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
long simple_strtol(const char *cp,char **endp,unsigned int base)
{
if(*cp=='-')
return simple_strtoul(cp,endp,base);
}
+/**
+ * simple_strtoull - convert a string to an unsigned long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
unsigned long long simple_strtoull(const char *cp,char **endp,unsigned int base)
{
unsigned long long result = 0,value;
return result;
}
+/**
+ * simple_strtoll - convert a string to a signed long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
long long simple_strtoll(const char *cp,char **endp,unsigned int base)
{
if(*cp=='-')
return str;
}
-/* Forward decl. needed for IP address printing stuff... */
-int sprintf(char * buf, const char *fmt, ...);
-
+/**
+ * vsprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ *
+ * Call this function if you are already dealing with a va_list.
+ * You probably want sprintf instead.
+ */
int vsprintf(char *buf, const char *fmt, va_list args)
{
int len;
return str-buf;
}
+/**
+ * sprintf - Format a string and place it in a buffer
+ * @buf: The buffer to place the result into
+ * @fmt: The format string to use
+ * @args: Arguments for the format string
+ */
int sprintf(char * buf, const char *fmt, ...)
{
va_list args;
mainmenu_option next_comment
comment 'QoS and/or fair queueing'
-bool 'QoS and/or fair queueing (EXPERIMENTAL)' CONFIG_NET_SCHED
+bool 'QoS and/or fair queueing' CONFIG_NET_SCHED
if [ "$CONFIG_NET_SCHED" = "y" ]; then
source net/sched/Config.in
fi
#define DPRINTK(format,args...)
#endif
-struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
+extern struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
unsigned char *addr);
-void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
+extern void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
#define DUMP_PACKETS 0 /* 0 = None,
dep_tristate ' netfilter MARK match support' CONFIG_IP_NF_MATCH_MARK $CONFIG_IP_NF_IPTABLES
dep_tristate ' Multiple port match support' CONFIG_IP_NF_MATCH_MULTIPORT $CONFIG_IP_NF_IPTABLES
dep_tristate ' TOS match support' CONFIG_IP_NF_MATCH_TOS $CONFIG_IP_NF_IPTABLES
+ dep_tristate ' tcpmss match support' CONFIG_IP_NF_MATCH_TCPMSS $CONFIG_IP_NF_IPTABLES
if [ "$CONFIG_IP_NF_CONNTRACK" != "n" ]; then
dep_tristate ' Connection state match support' CONFIG_IP_NF_MATCH_STATE $CONFIG_IP_NF_CONNTRACK $CONFIG_IP_NF_IPTABLES
fi
# If they want FTP, set to $CONFIG_IP_NF_NAT (m or y),
# or $CONFIG_IP_NF_FTP (m or y), whichever is weaker. Argh.
if [ "$CONFIG_IP_NF_FTP" = "m" ]; then
- define_tristate CONFIG_IP_NF_NAT_FTP m
+ define_tristate CONFIG_IP_NF_NAT_FTP m
else
if [ "$CONFIG_IP_NF_FTP" = "y" ]; then
define_tristate CONFIG_IP_NF_NAT_FTP $CONFIG_IP_NF_NAT
dep_tristate ' MARK target support' CONFIG_IP_NF_TARGET_MARK $CONFIG_IP_NF_MANGLE
fi
dep_tristate ' LOG target support' CONFIG_IP_NF_TARGET_LOG $CONFIG_IP_NF_IPTABLES
+ dep_tristate ' TCPMSS target support' CONFIG_IP_NF_TARGET_TCPMSS $CONFIG_IP_NF_IPTABLES
fi
# Backwards compatibility modules: only if you don't build in the others.
obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
obj-$(CONFIG_IP_NF_MATCH_STATE) += ipt_state.o
obj-$(CONFIG_IP_NF_MATCH_UNCLEAN) += ipt_unclean.o
+obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o
# targets
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
+obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
# backwards compatibility
obj-$(CONFIG_IP_NF_COMPAT_IPCHAINS) += ipchains.o
--- /dev/null
+/*
+ * This is a module which is used for setting the MSS option in TCP packets.
+ *
+ * Copyright (c) 2000 Marc Boucher
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/ip.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ipt_TCPMSS.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+static u_int16_t
+cheat_check(u_int32_t oldvalinv, u_int32_t newval, u_int16_t oldcheck)
+{
+ u_int32_t diffs[] = { oldvalinv, newval };
+ return csum_fold(csum_partial((char *)diffs, sizeof(diffs),
+ oldcheck^0xFFFF));
+}
+
+static inline unsigned int
+optlen(const u_int8_t *opt, unsigned int offset)
+{
+ /* Beware zero-length options: make finite progress */
+ if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0) return 1;
+ else return opt[offset+1];
+}
+
+static unsigned int
+ipt_tcpmss_target(struct sk_buff **pskb,
+ unsigned int hooknum,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *targinfo,
+ void *userinfo)
+{
+ const struct ipt_tcpmss_info *tcpmssinfo = targinfo;
+ struct tcphdr *tcph;
+ struct iphdr *iph = (*pskb)->nh.iph;
+ u_int16_t tcplen, newtotlen, oldval, newmss;
+ unsigned int i;
+ u_int8_t *opt;
+
+ tcplen = (*pskb)->len - iph->ihl*4;
+
+ tcph = (void *)iph + iph->ihl*4;
+
+ /* Since it passed flags test in tcp match, we know it is is
+ not a fragment, and has data >= tcp header length. SYN
+ packets should not contain data: if they did, then we risk
+ running over MTU, sending Frag Needed and breaking things
+ badly. --RR */
+ if (tcplen != tcph->doff*4) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "ipt_tcpmss_target: bad length (%d bytes)\n",
+ (*pskb)->len);
+ return NF_DROP;
+ }
+
+ if(tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU) {
+ if(!(*pskb)->dst) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "ipt_tcpmss_target: no dst?! can't determine path-MTU\n");
+ return NF_DROP; /* or IPT_CONTINUE ?? */
+ }
+
+ if((*pskb)->dst->pmtu <= (sizeof(struct iphdr) + sizeof(struct tcphdr))) {
+ if (net_ratelimit())
+ printk(KERN_ERR
+ "ipt_tcpmss_target: unknown or invalid path-MTU (%d)\n", (*pskb)->dst->pmtu);
+ return NF_DROP; /* or IPT_CONTINUE ?? */
+ }
+
+ newmss = (*pskb)->dst->pmtu - sizeof(struct iphdr) - sizeof(struct tcphdr);
+ } else
+ newmss = tcpmssinfo->mss;
+
+ opt = (u_int8_t *)tcph;
+ for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)){
+ if ((opt[i] == TCPOPT_MSS) &&
+ ((tcph->doff*4 - i) >= TCPOLEN_MSS) &&
+ (opt[i+1] == TCPOLEN_MSS)) {
+ u_int16_t oldmss;
+
+ oldmss = (opt[i+2] << 8) | opt[i+3];
+
+ if((tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU) &&
+ (oldmss <= newmss))
+ return IPT_CONTINUE;
+
+ opt[i+2] = (newmss & 0xff00) >> 8;
+ opt[i+3] = (newmss & 0x00ff);
+
+ tcph->check = cheat_check(htons(oldmss)^0xFFFF,
+ htons(newmss),
+ tcph->check);
+
+ DEBUGP(KERN_INFO "ipt_tcpmss_target: %u.%u.%u.%u:%hu"
+ "->%u.%u.%u.%u:%hu changed TCP MSS option"
+ " (from %u to %u)\n",
+ NIPQUAD((*pskb)->nh.iph->saddr),
+ ntohs(tcph->source),
+ NIPQUAD((*pskb)->nh.iph->daddr),
+ ntohs(tcph->dest),
+ oldmss, newmss);
+ goto retmodified;
+ }
+ }
+
+ /*
+ * MSS Option not found ?! add it..
+ */
+ if (skb_tailroom((*pskb)) < TCPOLEN_MSS) {
+ struct sk_buff *newskb;
+
+ newskb = skb_copy_expand(*pskb, skb_headroom(*pskb),
+ TCPOLEN_MSS, GFP_ATOMIC);
+ if (!newskb) {
+ if (net_ratelimit())
+ printk(KERN_ERR "ipt_tcpmss_target:"
+ " unable to allocate larger skb\n");
+ return NF_DROP;
+ }
+
+ kfree_skb(*pskb);
+ *pskb = newskb;
+ iph = (*pskb)->nh.iph;
+ tcph = (void *)iph + iph->ihl*4;
+ }
+
+ skb_put((*pskb), TCPOLEN_MSS);
+
+ opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
+ memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
+
+ tcph->check = cheat_check(htons(tcplen) ^ 0xFFFF,
+ htons(tcplen + TCPOLEN_MSS), tcph->check);
+ tcplen += TCPOLEN_MSS;
+
+ opt[0] = TCPOPT_MSS;
+ opt[1] = TCPOLEN_MSS;
+ opt[2] = (newmss & 0xff00) >> 8;
+ opt[3] = (newmss & 0x00ff);
+
+ tcph->check = cheat_check(~0, *((u_int32_t *)opt), tcph->check);
+
+ oldval = ((u_int16_t *)tcph)[6];
+ tcph->doff += TCPOLEN_MSS/4;
+ tcph->check = cheat_check(oldval ^ 0xFFFF,
+ ((u_int16_t *)tcph)[6], tcph->check);
+
+ newtotlen = htons(ntohs(iph->tot_len) + TCPOLEN_MSS);
+ iph->check = cheat_check(iph->tot_len ^ 0xFFFF,
+ newtotlen, iph->check);
+ iph->tot_len = newtotlen;
+
+ DEBUGP(KERN_INFO "ipt_tcpmss_target: %u.%u.%u.%u:%hu"
+ "->%u.%u.%u.%u:%hu added TCP MSS option (%u)\n",
+ NIPQUAD((*pskb)->nh.iph->saddr),
+ ntohs(tcph->source),
+ NIPQUAD((*pskb)->nh.iph->daddr),
+ ntohs(tcph->dest),
+ newmss);
+
+ retmodified:
+ /* If we had a hardware checksum before, it's now invalid */
+ (*pskb)->ip_summed = CHECKSUM_NONE;
+ (*pskb)->nfcache |= NFC_UNKNOWN | NFC_ALTERED;
+ return IPT_CONTINUE;
+}
+
+#define TH_SYN 0x02
+
+static inline int find_syn_match(const struct ipt_entry_match *m)
+{
+ const struct ipt_tcp *tcpinfo = (const struct ipt_tcp *)m->data;
+
+ if (strcmp(m->u.kernel.match->name, "tcp") == 0
+ && (tcpinfo->flg_cmp & TH_SYN)
+ && !(tcpinfo->invflags & IPT_TCP_INV_FLAGS))
+ return 1;
+
+ return 0;
+}
+
+/* Must specify -p tcp --syn/--tcp-flags SYN */
+static int
+ipt_tcpmss_checkentry(const char *tablename,
+ const struct ipt_entry *e,
+ void *targinfo,
+ unsigned int targinfosize,
+ unsigned int hook_mask)
+{
+ const struct ipt_tcpmss_info *tcpmssinfo = targinfo;
+
+ if (targinfosize != IPT_ALIGN(sizeof(struct ipt_tcpmss_info))) {
+ DEBUGP("ipt_tcpmss_checkentry: targinfosize %u != %u\n",
+ targinfosize, IPT_ALIGN(sizeof(struct ipt_tcpmss_info)));
+ return 0;
+ }
+
+
+ if((tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU) &&
+ ((hook_mask & ~((1 << NF_IP_FORWARD)
+ | (1 << NF_IP_LOCAL_OUT)
+ | (1 << NF_IP_POST_ROUTING))) != 0)) {
+ printk("TCPMSS: path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
+ return 0;
+ }
+
+ if (e->ip.proto == IPPROTO_TCP
+ && !(e->ip.invflags & IPT_INV_PROTO)
+ && IPT_MATCH_ITERATE(e, find_syn_match))
+ return 1;
+
+ printk("TCPMSS: Only works on TCP SYN packets\n");
+ return 0;
+}
+
+static struct ipt_target ipt_tcpmss_reg
+= { { NULL, NULL }, "TCPMSS",
+ ipt_tcpmss_target, ipt_tcpmss_checkentry, NULL, THIS_MODULE };
+
+static int __init init(void)
+{
+ return ipt_register_target(&ipt_tcpmss_reg);
+}
+
+static void __exit fini(void)
+{
+ ipt_unregister_target(&ipt_tcpmss_reg);
+}
+
+module_init(init);
+module_exit(fini);
--- /dev/null
+/* Kernel module to match TCP MSS values. */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv4/ipt_tcpmss.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+
+#define TH_SYN 0x02
+
+/* Returns 1 if the mss option is set and matched by the range, 0 otherwise */
+static inline int
+mssoption_match(u_int16_t min, u_int16_t max,
+ const struct tcphdr *tcp,
+ u_int16_t datalen,
+ int invert,
+ int *hotdrop)
+{
+ unsigned int i;
+ const u_int8_t *opt = (u_int8_t *)tcp;
+
+ /* If we don't have the whole header, drop packet. */
+ if (tcp->doff * 4 > datalen) {
+ *hotdrop = 1;
+ return 0;
+ }
+
+ for (i = sizeof(struct tcphdr); i < tcp->doff * 4; ) {
+ if ((opt[i] == TCPOPT_MSS)
+ && ((tcp->doff * 4 - i) >= TCPOLEN_MSS)
+ && (opt[i+1] == TCPOLEN_MSS)) {
+ u_int16_t mssval;
+
+ mssval = (opt[i+2] << 8) | opt[i+3];
+
+ return (mssval >= min && mssval <= max) ^ invert;
+ }
+ if (opt[i] < 2) i++;
+ else i += opt[i+1]?:1;
+ }
+
+ return invert;
+}
+
+static int
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ const void *hdr,
+ u_int16_t datalen,
+ int *hotdrop)
+{
+ const struct ipt_tcpmss_match_info *info = matchinfo;
+ const struct tcphdr *tcph = (void *)skb->nh.iph + skb->nh.iph->ihl*4;
+
+ return mssoption_match(info->mss_min, info->mss_max, tcph,
+ skb->len - skb->nh.iph->ihl*4,
+ info->invert, hotdrop);
+}
+
+static inline int find_syn_match(const struct ipt_entry_match *m)
+{
+ const struct ipt_tcp *tcpinfo = (const struct ipt_tcp *)m->data;
+
+ if (strcmp(m->u.kernel.match->name, "tcp") == 0
+ && (tcpinfo->flg_cmp & TH_SYN)
+ && !(tcpinfo->invflags & IPT_TCP_INV_FLAGS))
+ return 1;
+
+ return 0;
+}
+
+static int
+checkentry(const char *tablename,
+ const struct ipt_ip *ip,
+ void *matchinfo,
+ unsigned int matchsize,
+ unsigned int hook_mask)
+{
+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_tcpmss_match_info)))
+ return 0;
+
+ /* Must specify -p tcp */
+ if (ip->proto != IPPROTO_TCP || (ip->invflags & IPT_INV_PROTO)) {
+ printk("tcpmss: Only works on TCP packets\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct ipt_match tcpmss_match
+= { { NULL, NULL }, "tcpmss", &match, &checkentry, NULL, THIS_MODULE };
+
+static int __init init(void)
+{
+ return ipt_register_match(&tcpmss_match);
+}
+
+static void __exit fini(void)
+{
+ ipt_unregister_match(&tcpmss_match);
+}
+
+module_init(init);
+module_exit(fini);
spin_lock_bh(&ipx_interfaces_lock);
for (i = ipx_interfaces; i;) {
tmp = i->if_next;
- if (i->if_dev == dev)
+ if (i->if_dev == dev) {
if (event == NETDEV_UP)
ipxitf_hold(i);
else
__ipxitf_put(i);
+ }
i = tmp;
}
spin_unlock_bh(&ipx_interfaces_lock);
#
# Makefile for the Linux Traffic Control Unit.
#
-# Note! Dependencies are done automagically by 'make dep', which also
-# removes any old dependencies. DON'T put your own dependencies here
-# unless it's something special (ie not a .c file).
-#
-# Note 2! The CFLAGS definition is now in the main makefile...
O_TARGET := sched.o
obj-y := sch_generic.o
-ifeq ($(CONFIG_NET_SCHED), y)
-
-obj-y += sch_api.o sch_fifo.o
-
-ifeq ($(CONFIG_NET_ESTIMATOR), y)
-obj-y += estimator.o
-endif
-
-ifeq ($(CONFIG_NET_CLS), y)
-obj-y += cls_api.o
-
-ifeq ($(CONFIG_NET_CLS_POLICE), y)
-obj-y += police.o
-endif
-
-endif
-
-ifeq ($(CONFIG_NET_SCH_INGRESS), y)
-obj-y += sch_ingress.o
-else
- ifeq ($(CONFIG_NET_SCH_INGRESS), m)
- obj-m += sch_ingress.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_SCH_CBQ), y)
-obj-y += sch_cbq.o
-else
- ifeq ($(CONFIG_NET_SCH_CBQ), m)
- obj-m += sch_cbq.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_SCH_CSZ), y)
-obj-y += sch_csz.o
-else
- ifeq ($(CONFIG_NET_SCH_CSZ), m)
- obj-m += sch_csz.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_SCH_HPFQ), y)
-obj-y += sch_hpfq.o
-else
- ifeq ($(CONFIG_NET_SCH_HPFQ), m)
- obj-m += sch_hpfq.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_SCH_HFSC), y)
-obj-y += sch_hfsc.o
-else
- ifeq ($(CONFIG_NET_SCH_HFSC), m)
- obj-m += sch_hfsc.o
- endif
-endif
-
-
-ifeq ($(CONFIG_NET_SCH_SFQ), y)
-obj-y += sch_sfq.o
-else
- ifeq ($(CONFIG_NET_SCH_SFQ), m)
- obj-m += sch_sfq.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_SCH_RED), y)
-obj-y += sch_red.o
-else
- ifeq ($(CONFIG_NET_SCH_RED), m)
- obj-m += sch_red.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_SCH_TBF), y)
-obj-y += sch_tbf.o
-else
- ifeq ($(CONFIG_NET_SCH_TBF), m)
- obj-m += sch_tbf.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_SCH_PRIO), y)
-obj-y += sch_prio.o
-else
- ifeq ($(CONFIG_NET_SCH_PRIO), m)
- obj-m += sch_prio.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_SCH_TEQL), y)
-obj-y += sch_teql.o
-else
- ifeq ($(CONFIG_NET_SCH_TEQL), m)
- obj-m += sch_teql.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_SCH_GRED), y)
-obj-y += sch_gred.o
-else
- ifeq ($(CONFIG_NET_SCH_GRED), m)
- obj-m += sch_gred.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_SCH_DSMARK), y)
-obj-y += sch_dsmark.o
-else
- ifeq ($(CONFIG_NET_SCH_DSMARK), m)
- obj-m += sch_dsmark.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_CLS_TCINDEX), y)
-obj-y += cls_tcindex.o
-else
- ifeq ($(CONFIG_NET_CLS_TCINDEX), m)
- obj-m += cls_tcindex.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_SCH_ATM), y)
-obj-y += sch_atm.o
-endif
-
-ifeq ($(CONFIG_NET_CLS_U32), y)
-obj-y += cls_u32.o
-else
- ifeq ($(CONFIG_NET_CLS_U32), m)
- obj-m += cls_u32.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_CLS_RSVP), y)
-obj-y += cls_rsvp.o
-else
- ifeq ($(CONFIG_NET_CLS_RSVP), m)
- obj-m += cls_rsvp.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_CLS_RSVP6), y)
-obj-y += cls_rsvp6.o
-else
- ifeq ($(CONFIG_NET_CLS_RSVP6), m)
- obj-m += cls_rsvp6.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_CLS_ROUTE4), y)
-obj-y += cls_route.o
-else
- ifeq ($(CONFIG_NET_CLS_ROUTE4), m)
- obj-m += cls_route.o
- endif
-endif
-
-ifeq ($(CONFIG_NET_CLS_FW), y)
-obj-y += cls_fw.o
-else
- ifeq ($(CONFIG_NET_CLS_FW), m)
- obj-m += cls_fw.o
- endif
-endif
-endif
+obj-$(CONFIG_NET_SCHED) += sch_api.o sch_fifo.o
+obj-$(CONFIG_NET_ESTIMATOR) += estimator.o
+obj-$(CONFIG_NET_CLS) += cls_api.o
+obj-$(CONFIG_NET_CLS_POLICE) += police.o
+obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o
+obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
+obj-$(CONFIG_NET_SCH_CSZ) += sch_csz.o
+obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o
+obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o
+obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
+obj-$(CONFIG_NET_SCH_RED) += sch_red.o
+obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
+obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
+obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
+obj-$(CONFIG_NET_SCH_GRED) += sch_gred.o
+obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
+obj-$(CONFIG_NET_CLS_TCINDEX) += cls_tcindex.o
+obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
+obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
+obj-$(CONFIG_NET_CLS_RSVP) += cls_rsvp.o
+obj-$(CONFIG_NET_CLS_RSVP6) += cls_rsvp6.o
+obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
+obj-$(CONFIG_NET_CLS_FW) += cls_fw.o
include $(TOPDIR)/Rules.make
static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
{
struct tcindex_data *p = PRIV(tp);
- struct tcindex_filter *f;
+ struct tcindex_filter *f,*next;
int i;
DPRINTK("tcindex_walk(tp %p,walker %p),p %p\n",tp,walker,p);
if (!p->h)
return;
for (i = 0; i < p->hash; i++) {
- for (f = p->h[i]; f; f = f->next) {
+ for (f = p->h[i]; f; f = next) {
+ next = f->next;
if (walker->count >= walker->skip) {
if (walker->fn(tp,(unsigned long) &f->result,
walker) < 0) {
}
}
- q->wd_expires = delay;
+ q->wd_expires = base_delay;
}
}
extern void unix_sysctl_register(void);
extern void unix_sysctl_unregister(void);
#else
-static inline unix_sysctl_register() {};
-static inline unix_sysctl_unregister() {};
+static inline void unix_sysctl_register(void) {}
+static inline void unix_sysctl_unregister(void) {}
#endif
static const char banner[] __initdata = KERN_INFO "NET4: Unix domain sockets 1.0/SMP for Linux NET4.0.\n";