Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 13 Jan 2011 18:05:56 +0000 (10:05 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 13 Jan 2011 18:05:56 +0000 (10:05 -0800)
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial: (43 commits)
  Documentation/trace/events.txt: Remove obsolete sched_signal_send.
  writeback: fix global_dirty_limits comment runtime -> real-time
  ppc: fix comment typo singal -> signal
  drivers: fix comment typo diable -> disable.
  m68k: fix comment typo diable -> disable.
  wireless: comment typo fix diable -> disable.
  media: comment typo fix diable -> disable.
  remove doc for obsolete dynamic-printk kernel-parameter
  remove extraneous 'is' from Documentation/iostats.txt
  Fix spelling milisec -> ms in snd_ps3 module parameter description
  Fix spelling mistakes in comments
  Revert conflicting V4L changes
  i7core_edac: fix typos in comments
  mm/rmap.c: fix comment
  sound, ca0106: Fix assignment to 'channel'.
  hrtimer: fix a typo in comment
  init/Kconfig: fix typo
  anon_inodes: fix wrong function name in comment
  fix comment typos concerning "consistent"
  poll: fix a typo in comment
  ...

Fix up trivial conflicts in:
 - drivers/net/wireless/iwlwifi/iwl-core.c (moved to iwl-legacy.c)
 - fs/ext4/ext4.h

Also fix missed 'diabled' typo in drivers/net/bnx2x/bnx2x.h while at it.

112 files changed:
1  2 
Documentation/kernel-parameters.txt
Documentation/networking/dccp.txt
Documentation/powerpc/booting-without-of.txt
MAINTAINERS
arch/arm/common/it8152.c
arch/arm/mach-imx/pm-imx27.c
arch/arm/mach-msm/io.c
arch/arm/mach-omap1/pm.c
arch/arm/mach-omap2/cpuidle34xx.c
arch/arm/mach-omap2/pm24xx.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/pm44xx.c
arch/arm/mach-omap2/serial.c
arch/arm/mach-pxa/sharpsl_pm.c
arch/arm/mm/flush.c
arch/arm/plat-mxc/include/mach/irqs.h
arch/arm/plat-omap/include/plat/omap_hwmod.h
arch/blackfin/mach-bf537/include/mach/defBF534.h
arch/blackfin/mach-common/pm.c
arch/x86/include/asm/processor.h
arch/x86/kernel/head_32.S
arch/x86/platform/mrst/early_printk_mrst.c
drivers/ata/libata-core.c
drivers/base/bus.c
drivers/base/power/main.c
drivers/edac/edac_core.h
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/radeon/atombios.h
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/input/serio/Kconfig
drivers/input/touchscreen/Kconfig
drivers/media/video/cx25840/cx25840-ir.c
drivers/media/video/omap/omap_vout.c
drivers/media/video/saa7164/saa7164-core.c
drivers/media/video/via-camera.c
drivers/mmc/host/Kconfig
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bnx2x/bnx2x_reg.h
drivers/net/bonding/bond_3ad.c
drivers/net/cxgb3/t3_hw.c
drivers/net/e1000/e1000_hw.h
drivers/net/e1000/e1000_main.c
drivers/net/e1000e/82571.c
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/phy.c
drivers/net/eepro.c
drivers/net/ixgbe/ixgbe_82599.c
drivers/net/ll_temac_main.c
drivers/net/tehuti.c
drivers/net/tun.c
drivers/net/vxge/vxge-traffic.h
drivers/net/wan/dscc4.c
drivers/net/wimax/i2400m/driver.c
drivers/net/wimax/i2400m/i2400m.h
drivers/net/wireless/ath/ath5k/reg.h
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-legacy.c
drivers/net/wireless/iwlwifi/iwl-sta.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/wl1251/wl1251.h
drivers/net/wireless/wl12xx/acx.h
drivers/net/wireless/wl12xx/wl12xx.h
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/eeepc-laptop.c
drivers/platform/x86/fujitsu-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/s390/net/lcs.c
drivers/s390/scsi/zfcp_cfdc.c
drivers/scsi/bfa/bfa_fcpim.c
drivers/scsi/bfa/bfa_fcs_lport.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/scsi_sysfs.c
drivers/staging/olpc_dcon/olpc_dcon.c
drivers/telephony/ixj.c
drivers/usb/gadget/imx_udc.c
drivers/usb/gadget/langwell_udc.c
drivers/usb/musb/musb_gadget.c
drivers/video/aty/atyfb_base.c
fs/anon_inodes.c
fs/coda/inode.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/inode.c
fs/jbd2/transaction.c
fs/xfs/linux-2.6/xfs_super.c
include/linux/suspend.h
init/Kconfig
kernel/hrtimer.c
kernel/perf_event.c
kernel/power/hibernate.c
kernel/power/suspend.c
kernel/sched.c
kernel/sysctl_binary.c
kernel/time/clocksource.c
lib/nlattr.c
mm/page-writeback.c
mm/percpu.c
net/Kconfig
net/core/dev.c
net/decnet/dn_dev.c
net/ipv4/tcp_output.c
net/ipv6/af_inet6.c
scripts/mod/modpost.c
security/apparmor/include/match.h
sound/soc/codecs/max98088.c

@@@ -403,10 -403,6 +403,10 @@@ and is between 256 and 4096 characters
        bttv.pll=       See Documentation/video4linux/bttv/Insmod-options
        bttv.tuner=     and Documentation/video4linux/bttv/CARDLIST
  
 +      bulk_remove=off [PPC]  This parameter disables the use of the pSeries
 +                      firmware feature for flushing multiple hpte entries
 +                      at a time.
 +
        c101=           [NET] Moxa C101 synchronous serial card
  
        cachesize=      [BUGS=X86-32] Override level 2 CPU cache size detection.
  
        dscc4.setup=    [NET]
  
-       dynamic_printk  Enables pr_debug()/dev_dbg() calls if
-                       CONFIG_DYNAMIC_PRINTK_DEBUG has been enabled.
-                       These can also be switched on/off via
-                       <debugfs>/dynamic_printk/modules
        earlycon=       [KNL] Output early console device and options.
                uart[8250],io,<addr>[,options]
                uart[8250],mmio,<addr>[,options]
                             controller
        i8042.nopnp     [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
                             controllers
 +      i8042.notimeout [HW] Ignore timeout condition signalled by conroller
        i8042.reset     [HW] Reset the controller during init and cleanup
        i8042.unlock    [HW] Unlock (ignore) the keylock
  
        mtdparts=       [MTD]
                        See drivers/mtd/cmdlinepart.c.
  
 +      multitce=off    [PPC]  This parameter disables the use of the pSeries
 +                      firmware feature for updating multiple TCE entries
 +                      at a time.
 +
        onenand.bdry=   [HW,MTD] Flex-OneNAND Boundary Configuration
  
                        Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock]
  
        nmi_watchdog=   [KNL,BUGS=X86] Debugging features for SMP kernels
                        Format: [panic,][num]
 -                      Valid num: 0,1,2
 +                      Valid num: 0
                        0 - turn nmi_watchdog off
 -                      1 - use the IO-APIC timer for the NMI watchdog
 -                      2 - use the local APIC for the NMI watchdog using
 -                      a performance counter. Note: This will use one
 -                      performance counter and the local APIC's performance
 -                      vector.
                        When panic is specified, panic when an NMI watchdog
                        timeout occurs.
                        This is useful when you use a panic=... timeout and
                        need the box quickly up again.
 -                      Instead of 1 and 2 it is possible to use the following
 -                      symbolic names: lapic and ioapic
 -                      Example: nmi_watchdog=2 or nmi_watchdog=panic,lapic
  
        netpoll.carrier_timeout=
                        [NET] Specifies amount of time (in seconds) that
        noapic          [SMP,APIC] Tells the kernel to not make use of any
                        IOAPICs that may be present in the system.
  
 +      noautogroup     Disable scheduler automatic task group creation.
 +
        nobats          [PPC] Do not use BATs for mapping kernel lowmem
                        on "Classic" PPC cores.
  
  
        nousb           [USB] Disable the USB subsystem
  
 -      nowatchdog      [KNL] Disable the lockup detector.
 +      nowatchdog      [KNL] Disable the lockup detector (NMI watchdog).
  
        nowb            [ARM]
  
                        to facilitate early boot debugging.
                        See also Documentation/trace/events.txt
  
 -      tsc=            Disable clocksource-must-verify flag for TSC.
 +      tsc=            Disable clocksource stability checks for TSC.
                        Format: <string>
                        [x86] reliable: mark tsc clocksource as reliable, this
 -                      disables clocksource verification at runtime.
 -                      Used to enable high-resolution timer mode on older
 -                      hardware, and in virtualized environment.
 +                      disables clocksource verification at runtime, as well
 +                      as the stability checks done at bootup. Used to enable
 +                      high-resolution timer mode on older hardware, and in
 +                      virtualized environment.
                        [x86] noirqtime: Do not use TSC to do irq accounting.
                        Used to run time disable IRQ_TIME_ACCOUNTING on any
                        platforms where RDTSC is slow and this accounting
@@@ -38,35 -38,15 +38,35 @@@ The Linux DCCP implementation does not 
  specified in RFCs 4340...42.
  
  The known bugs are at:
-       http://linux-net.osdl.org/index.php/TODO#DCCP
+       http://www.linuxfoundation.org/collaborate/workgroups/networking/todo#DCCP
  
  For more up-to-date versions of the DCCP implementation, please consider using
  the experimental DCCP test tree; instructions for checking this out are on:
- http://linux-net.osdl.org/index.php/DCCP_Testing#Experimental_DCCP_source_tree
+ http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp_testing#Experimental_DCCP_source_tree
  
  
  Socket options
  ==============
 +DCCP_SOCKOPT_QPOLICY_ID sets the dequeuing policy for outgoing packets. It takes
 +a policy ID as argument and can only be set before the connection (i.e. changes
 +during an established connection are not supported). Currently, two policies are
 +defined: the "simple" policy (DCCPQ_POLICY_SIMPLE), which does nothing special,
 +and a priority-based variant (DCCPQ_POLICY_PRIO). The latter allows to pass an
 +u32 priority value as ancillary data to sendmsg(), where higher numbers indicate
 +a higher packet priority (similar to SO_PRIORITY). This ancillary data needs to
 +be formatted using a cmsg(3) message header filled in as follows:
 +      cmsg->cmsg_level = SOL_DCCP;
 +      cmsg->cmsg_type  = DCCP_SCM_PRIORITY;
 +      cmsg->cmsg_len   = CMSG_LEN(sizeof(uint32_t));  /* or CMSG_LEN(4) */
 +
 +DCCP_SOCKOPT_QPOLICY_TXQLEN sets the maximum length of the output queue. A zero
 +value is always interpreted as unbounded queue length. If different from zero,
 +the interpretation of this parameter depends on the current dequeuing policy
 +(see above): the "simple" policy will enforce a fixed queue size by returning
 +EAGAIN, whereas the "prio" policy enforces a fixed queue length by dropping the
 +lowest-priority packet first. The default value for this parameter is
 +initialised from /proc/sys/net/dccp/default/tx_qlen.
 +
  DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
  service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
  the socket will fall back to 0 (which means that no meaningful service code
@@@ -167,7 -147,6 +167,7 @@@ rx_ccid = 
  seq_window = 100
        The initial sequence window (sec. 7.5.2) of the sender. This influences
        the local ackno validity and the remote seqno validity windows (7.5.1).
 +      Values in the range Wmin = 32 (RFC 4340, 7.5.2) up to 2^32-1 can be set.
  
  tx_qlen = 5
        The size of the transmit buffer in packets. A value of 0 corresponds
@@@ -131,7 -131,7 +131,7 @@@ order to avoid the degeneration that ha
  point and the way a new platform should be added to the kernel. The
  legacy iSeries platform breaks those rules as it predates this scheme,
  but no new board support will be accepted in the main tree that
 -doesn't follows them properly.  In addition, since the advent of the
 +doesn't follow them properly.  In addition, since the advent of the
  arch/powerpc merged architecture for ppc32 and ppc64, new 32-bit
  platforms and 32-bit platforms which move into arch/powerpc will be
  required to use these rules as well.
@@@ -1025,7 -1025,7 +1025,7 @@@ dtc source code can be found a
  
  WARNING: This version is still in early development stage; the
  resulting device-tree "blobs" have not yet been validated with the
 -kernel. The current generated bloc lacks a useful reserve map (it will
 +kernel. The current generated block lacks a useful reserve map (it will
  be fixed to generate an empty one, it's up to the bootloader to fill
  it up) among others. The error handling needs work, bugs are lurking,
  etc...
@@@ -1098,7 -1098,7 +1098,7 @@@ supported currently at the toplevel
                                   * an arbitrary array of bytes
                                   */
  
-   childnode@addresss {        /* define a child node named "childnode"
+   childnode@address { /* define a child node named "childnode"
                                   * whose unit name is "childnode at
                                 * address"
                                   */
diff --combined MAINTAINERS
@@@ -166,8 -166,9 +166,8 @@@ F: drivers/serial/8250
  F:    include/linux/serial_8250.h
  
  8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.]
 -M:    Paul Gortmaker <p_gortmaker@yahoo.com>
  L:    netdev@vger.kernel.org
 -S:    Maintained
 +S:    Orphan / Obsolete
  F:    drivers/net/*8390*
  F:    drivers/net/ax88796.c
  
@@@ -285,41 -286,6 +285,41 @@@ L:       linux-parisc@vger.kernel.or
  S:    Maintained
  F:    sound/pci/ad1889.*
  
 +AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
 +M:    Michael Hennerich <michael.hennerich@analog.com>
 +L:    device-driver-devel@blackfin.uclinux.org
 +W:    http://wiki-analog.com/AD5254
 +S:    Supported
 +F:    drivers/misc/ad525x_dpot.c
 +
 +AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821)
 +M:    Michael Hennerich <michael.hennerich@analog.com>
 +L:    device-driver-devel@blackfin.uclinux.org
 +W:    http://wiki-analog.com/AD5398
 +S:    Supported
 +F:    drivers/regulator/ad5398.c
 +
 +AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A)
 +M:    Michael Hennerich <michael.hennerich@analog.com>
 +L:    device-driver-devel@blackfin.uclinux.org
 +W:    http://wiki-analog.com/AD7142
 +S:    Supported
 +F:    drivers/input/misc/ad714x.c
 +
 +AD7877 TOUCHSCREEN DRIVER
 +M:    Michael Hennerich <michael.hennerich@analog.com>
 +L:    device-driver-devel@blackfin.uclinux.org
 +W:    http://wiki-analog.com/AD7877
 +S:    Supported
 +F:    drivers/input/touchscreen/ad7877.c
 +
 +AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889)
 +M:    Michael Hennerich <michael.hennerich@analog.com>
 +L:    device-driver-devel@blackfin.uclinux.org
 +W:    http://wiki-analog.com/AD7879
 +S:    Supported
 +F:    drivers/input/touchscreen/ad7879.c
 +
  ADM1025 HARDWARE MONITOR DRIVER
  M:    Jean Delvare <khali@linux-fr.org>
  L:    lm-sensors@lm-sensors.org
@@@ -339,32 -305,6 +339,32 @@@ W:       http://linuxwireless.org
  S:    Orphan
  F:    drivers/net/wireless/adm8211.*
  
 +ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501)
 +M:    Michael Hennerich <michael.hennerich@analog.com>
 +L:    device-driver-devel@blackfin.uclinux.org
 +W:    http://wiki-analog.com/ADP5520
 +S:    Supported
 +F:    drivers/mfd/adp5520.c
 +F:    drivers/video/backlight/adp5520_bl.c
 +F:    drivers/led/leds-adp5520.c
 +F:    drivers/gpio/adp5520-gpio.c
 +F:    drivers/input/keyboard/adp5520-keys.c
 +
 +ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
 +M:    Michael Hennerich <michael.hennerich@analog.com>
 +L:    device-driver-devel@blackfin.uclinux.org
 +W:    http://wiki-analog.com/ADP5588
 +S:    Supported
 +F:    drivers/input/keyboard/adp5588-keys.c
 +F:    drivers/gpio/adp5588-gpio.c
 +
 +ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863)
 +M:    Michael Hennerich <michael.hennerich@analog.com>
 +L:    device-driver-devel@blackfin.uclinux.org
 +W:    http://wiki-analog.com/ADP8860
 +S:    Supported
 +F:    drivers/video/backlight/adp8860_bl.c
 +
  ADT746X FAN DRIVER
  M:    Colin Leroy <colin@colino.net>
  S:    Maintained
@@@ -377,13 -317,6 +377,13 @@@ S:       Maintaine
  F:    Documentation/hwmon/adt7475
  F:    drivers/hwmon/adt7475.c
  
 +ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
 +M:    Michael Hennerich <michael.hennerich@analog.com>
 +L:    device-driver-devel@blackfin.uclinux.org
 +W:    http://wiki-analog.com/ADXL345
 +S:    Supported
 +F:    drivers/input/misc/adxl34x.c
 +
  ADVANSYS SCSI DRIVER
  M:    Matthew Wilcox <matthew@wil.cx>
  L:    linux-scsi@vger.kernel.org
@@@ -472,7 -405,7 +472,7 @@@ S: Supporte
  F:    drivers/usb/gadget/amd5536udc.*
  
  AMD GEODE PROCESSOR/CHIPSET SUPPORT
 -P:    Jordan Crouse
 +P:    Andres Salomon <dilinger@queued.net>
  L:    linux-geode@lists.infradead.org (moderated for non-subscribers)
  W:    http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
  S:    Supported
@@@ -496,6 -429,7 +496,6 @@@ S: Supporte
  F:    arch/x86/kernel/microcode_amd.c
  
  AMS (Apple Motion Sensor) DRIVER
 -M:    Stelian Pop <stelian@popies.net>
  M:    Michael Hanselmann <linux-kernel@hansmi.ch>
  S:    Supported
  F:    drivers/macintosh/ams/
@@@ -507,23 -441,17 +507,23 @@@ L:      linux-rdma@vger.kernel.or
  S:    Maintained
  F:    drivers/infiniband/hw/amso1100/
  
 -ANALOG DEVICES INC ASOC DRIVERS
 -L:    uclinux-dist-devel@blackfin.uclinux.org
 +ANALOG DEVICES INC ASOC CODEC DRIVERS
 +L:    device-driver-devel@blackfin.uclinux.org
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
 -W:    http://blackfin.uclinux.org/
 +W:    http://wiki-analog.com/
  S:    Supported
 -F:    sound/soc/blackfin/*
  F:    sound/soc/codecs/ad1*
  F:    sound/soc/codecs/adau*
  F:    sound/soc/codecs/adav*
  F:    sound/soc/codecs/ssm*
  
 +ANALOG DEVICES INC ASOC DRIVERS
 +L:    uclinux-dist-devel@blackfin.uclinux.org
 +L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
 +W:    http://blackfin.uclinux.org/
 +S:    Supported
 +F:    sound/soc/blackfin/*
 +
  AOA (Apple Onboard Audio) ALSA DRIVER
  M:    Johannes Berg <johannes@sipsolutions.net>
  L:    linuxppc-dev@lists.ozlabs.org
@@@ -864,14 -792,11 +864,14 @@@ S:      Maintaine
  
  ARM/NOMADIK ARCHITECTURE
  M:    Alessandro Rubini <rubini@unipv.it>
 +M:    Linus Walleij <linus.walleij@stericsson.com>
  M:    STEricsson <STEricsson_nomadik_linux@list.st.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm/mach-nomadik/
  F:    arch/arm/plat-nomadik/
 +F:    drivers/i2c/busses/i2c-nomadik.c
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
  
  ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
  M:    Nelson Castillo <arhuaco@freaks-unidos.net>
@@@ -1073,24 -998,12 +1073,24 @@@ F:    drivers/i2c/busses/i2c-stu300.
  F:    drivers/rtc/rtc-coh901331.c
  F:    drivers/watchdog/coh901327_wdt.c
  F:    drivers/dma/coh901318*
 +F:    drivers/mfd/ab3100*
 +F:    drivers/rtc/rtc-ab3100.c
 +F:    drivers/rtc/rtc-coh901331.c
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
  
 -ARM/U8500 ARM ARCHITECTURE
 +ARM/Ux500 ARM ARCHITECTURE
  M:    Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
 +M:    Linus Walleij <linus.walleij@stericsson.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm/mach-ux500/
 +F:    drivers/dma/ste_dma40*
 +F:    drivers/mfd/ab3550*
 +F:    drivers/mfd/abx500*
 +F:    drivers/mfd/ab8500*
 +F:    drivers/mfd/stmpe*
 +F:    drivers/rtc/rtc-ab8500.c
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
  
  ARM/VFP SUPPORT
  M:    Russell King <linux@arm.linux.org.uk>
@@@ -1167,12 -1080,6 +1167,12 @@@ S:    Supporte
  F:    Documentation/aoe/
  F:    drivers/block/aoe/
  
 +ATHEROS ATH GENERIC UTILITIES
 +M:    "Luis R. Rodriguez" <lrodriguez@atheros.com>
 +L:    linux-wireless@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/wireless/ath/*
 +
  ATHEROS ATH5K WIRELESS DRIVER
  M:    Jiri Slaby <jirislaby@gmail.com>
  M:    Nick Kossifidis <mickflemm@gmail.com>
@@@ -1351,15 -1258,6 +1351,15 @@@ S:    Maintaine
  F:    drivers/video/backlight/
  F:    include/linux/backlight.h
  
 +BATMAN ADVANCED
 +M:    Marek Lindner <lindner_marek@yahoo.de>
 +M:    Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
 +M:    Sven Eckelmann <sven@narfation.org>
 +L:    b.a.t.m.a.n@lists.open-mesh.org
 +W:    http://www.open-mesh.org/
 +S:    Maintained
 +F:    net/batman-adv/
 +
  BAYCOM/HDLCDRV DRIVERS FOR AX.25
  M:    Thomas Sailer <t.sailer@alumni.ethz.ch>
  L:    linux-hams@vger.kernel.org
@@@ -1496,9 -1394,7 +1496,9 @@@ F:      drivers/net/tg3.
  BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
  M:    Brett Rudley <brudley@broadcom.com>
  M:    Henry Ptasinski <henryp@broadcom.com>
 -M:    Nohee Ko <noheek@broadcom.com>
 +M:    Dowan Kim <dowan@broadcom.com>
 +M:    Roland Vossen <rvossen@broadcom.com>
 +M:    Arend van Spriel <arend@broadcom.com>
  L:    linux-wireless@vger.kernel.org
  S:    Supported
  F:    drivers/staging/brcm80211/
@@@ -1784,8 -1680,7 +1784,8 @@@ S:      Maintaine
  F:    drivers/usb/atm/cxacru.c
  
  CONFIGFS
 -M:    Joel Becker <joel.becker@oracle.com>
 +M:    Joel Becker <jlbec@evilplan.org>
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/configfs.git
  S:    Supported
  F:    fs/configfs/
  F:    include/linux/configfs.h
@@@ -2007,7 -1902,7 +2007,7 @@@ F:      drivers/scsi/dc395x.
  DCCP PROTOCOL
  M:    Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
  L:    dccp@vger.kernel.org
- W:    http://linux-net.osdl.org/index.php/DCCP
+ W:    http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
  S:    Maintained
  F:    include/linux/dccp.h
  F:    include/linux/tfrc.h
@@@ -2347,14 -2242,6 +2347,14 @@@ W:    http://acpi4asus.sf.ne
  S:    Maintained
  F:    drivers/platform/x86/eeepc-laptop.c
  
 +EEEPC WMI EXTRAS DRIVER
 +M:    Corentin Chary <corentincj@iksaif.net>
 +L:    acpi4asus-user@lists.sourceforge.net
 +L:    platform-driver-x86@vger.kernel.org
 +W:    http://acpi4asus.sf.net
 +S:    Maintained
 +F:    drivers/platform/x86/eeepc-wmi.c
 +
  EFIFB FRAMEBUFFER DRIVER
  L:    linux-fbdev@vger.kernel.org
  M:    Peter Jones <pjones@redhat.com>
@@@ -2429,7 -2316,7 +2429,7 @@@ ETHERNET BRIDG
  M:    Stephen Hemminger <shemminger@linux-foundation.org>
  L:    bridge@lists.linux-foundation.org
  L:    netdev@vger.kernel.org
- W:    http://www.linux-foundation.org/en/Net:Bridge
+ W:    http://www.linuxfoundation.org/en/Net:Bridge
  S:    Maintained
  F:    include/linux/netfilter_bridge/
  F:    net/bridge/
@@@ -2692,14 -2579,6 +2692,14 @@@ S:    Supporte
  F:    drivers/i2c/busses/i2c-gpio.c
  F:    include/linux/i2c-gpio.h
  
 +GENERIC GPIO I2C MULTIPLEXER DRIVER
 +M:    Peter Korsgaard <peter.korsgaard@barco.com>
 +L:    linux-i2c@vger.kernel.org
 +S:    Supported
 +F:    drivers/i2c/muxes/gpio-i2cmux.c
 +F:    include/linux/gpio-i2cmux.h
 +F:    Documentation/i2c/muxes/gpio-i2cmux
 +
  GENERIC HDLC (WAN) DRIVERS
  M:    Krzysztof Halasa <khc@pm.waw.pl>
  W:    http://www.kernel.org/pub/linux/utils/net/hdlc/
@@@ -2918,10 -2797,6 +2918,10 @@@ M:    Thomas Gleixner <tglx@linutronix.de
  S:    Maintained
  F:    Documentation/timers/
  F:    kernel/hrtimer.c
 +F:    kernel/time/clockevents.c
 +F:    kernel/time/tick*.*
 +F:    kernel/time/timer_*.c
 +F     include/linux/clockevents.h
  F:    include/linux/hrtimer.h
  
  HIGH-SPEED SCC DRIVER FOR AX.25
@@@ -3157,10 -3032,8 +3157,10 @@@ F:    drivers/input
  INPUT MULTITOUCH (MT) PROTOCOL
  M:    Henrik Rydberg <rydberg@euromail.se>
  L:    linux-input@vger.kernel.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git
  S:    Maintained
  F:    Documentation/input/multi-touch-protocol.txt
 +F:    drivers/input/input-mt.c
  K:    \b(ABS|SYN)_MT_
  
  INTEL IDLE DRIVER
@@@ -3247,8 -3120,6 +3247,8 @@@ M:      Alex Duyck <alexander.h.duyck@intel.
  M:    John Ronciak <john.ronciak@intel.com>
  L:    e1000-devel@lists.sourceforge.net
  W:    http://e1000.sourceforge.net/
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-2.6.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next-2.6.git
  S:    Supported
  F:    Documentation/networking/e100.txt
  F:    Documentation/networking/e1000.txt
@@@ -4092,8 -3963,9 +4092,8 @@@ F:      include/linux/module.
  F:    kernel/module.c
  
  MOTION EYE VAIO PICTUREBOOK CAMERA DRIVER
 -M:    Stelian Pop <stelian@popies.net>
  W:    http://popies.net/meye/
 -S:    Maintained
 +S:    Orphan
  F:    Documentation/video4linux/meye.txt
  F:    drivers/media/video/meye.*
  F:    include/linux/meye.h
@@@ -4359,7 -4231,6 +4359,7 @@@ NILFS2 FILESYSTE
  M:    KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
  L:    linux-nilfs@vger.kernel.org
  W:    http://www.nilfs.org/en/
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke/nilfs2.git
  S:    Supported
  F:    Documentation/filesystems/nilfs2.txt
  F:    fs/nilfs2/
@@@ -4381,11 -4252,11 +4381,11 @@@ F:   Documentation/scsi/NinjaSCSI.tx
  F:    drivers/scsi/nsp32*
  
  NTFS FILESYSTEM
 -M:    Anton Altaparmakov <aia21@cantab.net>
 +M:    Anton Altaparmakov <anton@tuxera.com>
  L:    linux-ntfs-dev@lists.sourceforge.net
 -W:    http://www.linux-ntfs.org/
 +W:    http://www.tuxera.com/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
 -S:    Maintained
 +S:    Supported
  F:    Documentation/filesystems/ntfs.txt
  F:    fs/ntfs/
  
@@@ -4457,20 -4328,6 +4457,20 @@@ M:    Deepak Saxena <dsaxena@plexity.net
  S:    Maintained
  F:    drivers/char/hw_random/omap-rng.c
  
 +OMAP HWMOD SUPPORT
 +M:    Benoît Cousson <b-cousson@ti.com>
 +M:    Paul Walmsley <paul@pwsan.com>
 +L:    linux-omap@vger.kernel.org
 +S:    Maintained
 +F:    arch/arm/mach-omap2/omap_hwmod.c
 +F:    arch/arm/plat-omap/include/plat/omap_hwmod.h
 +
 +OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
 +M:    Benoît Cousson <b-cousson@ti.com>
 +L:    linux-omap@vger.kernel.org
 +S:    Maintained
 +F:    arch/arm/mach-omap2/omap_hwmod_44xx_data.c
 +
  OMAP USB SUPPORT
  M:    Felipe Balbi <balbi@ti.com>
  M:    David Brownell <dbrownell@users.sourceforge.net>
@@@ -4548,7 -4405,7 +4548,7 @@@ F:      include/linux/oprofile.
  
  ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
  M:    Mark Fasheh <mfasheh@suse.com>
 -M:    Joel Becker <joel.becker@oracle.com>
 +M:    Joel Becker <jlbec@evilplan.org>
  L:    ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
  W:    http://oss.oracle.com/projects/ocfs2/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git
@@@ -4633,7 -4490,7 +4633,7 @@@ M:      Jeremy Fitzhardinge <jeremy@xensourc
  M:    Chris Wright <chrisw@sous-sol.org>
  M:    Alok Kataria <akataria@vmware.com>
  M:    Rusty Russell <rusty@rustcorp.com.au>
- L:    virtualization@lists.osdl.org
+ L:    virtualization@lists.linux-foundation.org
  S:    Supported
  F:    Documentation/ia64/paravirt_ops.txt
  F:    arch/*/kernel/paravirt*
@@@ -4733,7 -4590,7 +4733,7 @@@ F:      drivers/pcmcia
  F:    include/pcmcia/
  
  PCNET32 NETWORK DRIVER
 -M:    Don Fry <pcnet32@verizon.net>
 +M:    Don Fry <pcnet32@frontier.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/pcnet32.c
@@@ -4745,16 -4602,6 +4745,16 @@@ S:    Maintaine
  F:    crypto/pcrypt.c
  F:    include/crypto/pcrypt.h
  
 +PER-CPU MEMORY ALLOCATOR
 +M:    Tejun Heo <tj@kernel.org>
 +M:    Christoph Lameter <cl@linux-foundation.org>
 +L:    linux-kernel@vger.kernel.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
 +S:    Maintained
 +F:    include/linux/percpu*.h
 +F:    mm/percpu*.c
 +F:    arch/*/include/asm/percpu.h
 +
  PER-TASK DELAY ACCOUNTING
  M:    Balbir Singh <balbir@linux.vnet.ibm.com>
  S:    Maintained
@@@ -4765,7 -4612,7 +4765,7 @@@ PERFORMANCE EVENTS SUBSYSTE
  M:    Peter Zijlstra <a.p.zijlstra@chello.nl>
  M:    Paul Mackerras <paulus@samba.org>
  M:    Ingo Molnar <mingo@elte.hu>
 -M:    Arnaldo Carvalho de Melo <acme@redhat.com>
 +M:    Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
  S:    Supported
  F:    kernel/perf_event*.c
  F:    include/linux/perf_event.h
@@@ -5131,6 -4978,11 +5131,6 @@@ F:     kernel/rcu
  F:    kernel/srcu*
  X:    kernel/rcutorture.c
  
 -REAL TIME CLOCK DRIVER (LEGACY)
 -M:    Paul Gortmaker <p_gortmaker@yahoo.com>
 -S:    Maintained
 -F:    drivers/char/rtc.c
 -
  REAL TIME CLOCK (RTC) SUBSYSTEM
  M:    Alessandro Zummo <a.zummo@towertech.it>
  L:    rtc-linux@googlegroups.com
@@@ -5185,7 -5037,7 +5185,7 @@@ L:      linux-wireless@vger.kernel.or
  W:    http://linuxwireless.org/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
  S:    Maintained
 -F:    drivers/net/wireless/rtl818x/rtl8180*
 +F:    drivers/net/wireless/rtl818x/rtl8180/
  
  RTL8187 WIRELESS DRIVER
  M:    Herton Ronaldo Krzesinski <herton@mandriva.com.br>
@@@ -5195,17 -5047,7 +5195,17 @@@ L:    linux-wireless@vger.kernel.or
  W:    http://linuxwireless.org/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
  S:    Maintained
 -F:    drivers/net/wireless/rtl818x/rtl8187*
 +F:    drivers/net/wireless/rtl818x/rtl8187/
 +
 +RTL8192CE WIRELESS DRIVER
 +M:    Larry Finger <Larry.Finger@lwfinger.net>
 +M:    Chaoming Li <chaoming_li@realsil.com.cn>
 +L:    linux-wireless@vger.kernel.org
 +W:    http://linuxwireless.org/
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
 +S:    Maintained
 +F:    drivers/net/wireless/rtlwifi/
 +F:    drivers/net/wireless/rtlwifi/rtl8192ce/
  
  S3 SAVAGE FRAMEBUFFER DRIVER
  M:    Antonino Daplas <adaplas@gmail.com>
@@@ -5285,18 -5127,6 +5285,18 @@@ L:    alsa-devel@alsa-project.org (moderat
  S:    Supported
  F:    sound/soc/s3c24xx
  
 +TIMEKEEPING, NTP
 +M:    John Stultz <johnstul@us.ibm.com>
 +M:    Thomas Gleixner <tglx@linutronix.de>
 +S:    Supported
 +F:    include/linux/clocksource.h
 +F:    include/linux/time.h
 +F:    include/linux/timex.h
 +F:    include/linux/timekeeping.h
 +F:    kernel/time/clocksource.c
 +F:    kernel/time/time*.c
 +F:    kernel/time/ntp.c
 +
  TLG2300 VIDEO4LINUX-2 DRIVER
  M:    Huang Shijie <shijie8@gmail.com>
  M:    Kang Yong <kangyong@telegent.com>
@@@ -5867,6 -5697,12 +5867,6 @@@ M:     Ion Badulescu <ionut@badula.org
  S:    Odd Fixes
  F:    drivers/net/starfire*
  
 -STRADIS MPEG-2 DECODER DRIVER
 -M:    Nathan Laredo <laredo@gnu.org>
 -W:    http://www.stradis.com/
 -S:    Maintained
 -F:    drivers/media/video/stradis.c
 -
  SUN3/3X
  M:    Sam Creasey <sammy@sammy.net>
  W:    http://sammy.net/sun3/
@@@ -6016,8 -5852,7 +6016,8 @@@ F:      drivers/net/tlan.
  TOMOYO SECURITY MODULE
  M:    Kentaro Takeda <takedakn@nttdata.co.jp>
  M:    Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
 -L:    tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for developers and users in English)
 +L:    tomoyo-dev-en@lists.sourceforge.jp (subscribers-only, for developers in English)
 +L:    tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for users in English)
  L:    tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese)
  L:    tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese)
  W:    http://tomoyo.sourceforge.jp/
@@@ -6530,7 -6365,7 +6530,7 @@@ F:      include/linux/virtio_console.
  VIRTIO HOST (VHOST)
  M:    "Michael S. Tsirkin" <mst@redhat.com>
  L:    kvm@vger.kernel.org
- L:    virtualization@lists.osdl.org
+ L:    virtualization@lists.linux-foundation.org
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/vhost/
@@@ -6580,7 -6415,7 +6580,7 @@@ F:      net/8021q
  
  VLYNQ BUS
  M:    Florian Fainelli <florian@openwrt.org>
 -L:    openwrt-devel@lists.openwrt.org
 +L:    openwrt-devel@lists.openwrt.org (subscribers-only)
  S:    Maintained
  F:    drivers/vlynq/vlynq.c
  F:    include/linux/vlynq.h
@@@ -6800,7 -6635,7 +6800,7 @@@ XEN HYPERVISOR INTERFAC
  M:    Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
  M:    Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  L:    xen-devel@lists.xensource.com (moderated for non-subscribers)
- L:    virtualization@lists.osdl.org
+ L:    virtualization@lists.linux-foundation.org
  S:    Supported
  F:    arch/x86/xen/
  F:    drivers/*/xen-*front.c
diff --combined arch/arm/common/it8152.c
@@@ -236,7 -236,7 +236,7 @@@ static struct resource it8152_mem = 
  
  /*
   * The following functions are needed for DMA bouncing.
-  * ITE8152 chip can addrees up to 64MByte, so all the devices
+  * ITE8152 chip can address up to 64MByte, so all the devices
   * connected to ITE8152 (PCI and USB) should have limited DMA window
   */
  
@@@ -352,4 -352,3 +352,4 @@@ struct pci_bus * __init it8152_pci_scan
        return pci_scan_bus(nr, &it8152_ops, sys);
  }
  
 +EXPORT_SYMBOL(dma_set_coherent_mask);
@@@ -32,16 -32,13 +32,16 @@@ static int mx27_suspend_enter(suspend_s
        return 0;
  }
  
- static struct platform_suspend_ops mx27_suspend_ops = {
+ static const struct platform_suspend_ops mx27_suspend_ops = {
        .enter = mx27_suspend_enter,
        .valid = suspend_valid_only_mem,
  };
  
  static int __init mx27_pm_init(void)
  {
 +      if (!cpu_is_mx27())
 +              return 0;
 +
        suspend_set_ops(&mx27_suspend_ops);
        return 0;
  }
diff --combined arch/arm/mach-msm/io.c
@@@ -105,7 -105,6 +105,7 @@@ static struct map_desc msm8x60_io_desc[
        MSM_DEVICE(QGIC_DIST),
        MSM_DEVICE(QGIC_CPU),
        MSM_DEVICE(TMR),
 +      MSM_DEVICE(TMR0),
        MSM_DEVICE(ACC),
        MSM_DEVICE(GCC),
  };
@@@ -154,7 -153,7 +154,7 @@@ __msm_ioremap(unsigned long phys_addr, 
  {
        if (mtype == MT_DEVICE) {
                /* The peripherals in the 88000000 - D0000000 range
-                * are only accessable by type MT_DEVICE_NONSHARED.
+                * are only accessible by type MT_DEVICE_NONSHARED.
                 * Adjust mtype as necessary to make this "just work."
                 */
                if ((phys_addr >= 0x88000000) && (phys_addr < 0xD0000000))
        return __arm_ioremap_caller(phys_addr, size, mtype,
                __builtin_return_address(0));
  }
 +EXPORT_SYMBOL(__msm_ioremap);
diff --combined arch/arm/mach-omap1/pm.c
@@@ -647,7 -647,7 +647,7 @@@ static struct irqaction omap_wakeup_ir
  
  
  
- static struct platform_suspend_ops omap_pm_ops ={
+ static const struct platform_suspend_ops omap_pm_ops = {
        .prepare        = omap_pm_prepare,
        .enter          = omap_pm_enter,
        .finish         = omap_pm_finish,
@@@ -661,9 -661,6 +661,9 @@@ static int __init omap_pm_init(void
        int error;
  #endif
  
 +      if (!cpu_class_is_omap1())
 +              return -ENODEV;
 +
        printk("Power Management for TI OMAP.\n");
  
        /*
@@@ -27,8 -27,8 +27,8 @@@
  
  #include <plat/prcm.h>
  #include <plat/irqs.h>
 -#include <plat/powerdomain.h>
 -#include <plat/clockdomain.h>
 +#include "powerdomain.h"
 +#include "clockdomain.h"
  #include <plat/serial.h>
  
  #include "pm.h"
@@@ -252,7 -252,7 +252,7 @@@ static int omap3_enter_idle_bm(struct c
         * FIXME: we currently manage device-specific idle states
         *        for PER and CORE in combination with CPU-specific
         *        idle states.  This is wrong, and device-specific
-        *        idle managment needs to be separated out into 
+        *        idle management needs to be separated out into 
         *        its own code.
         */
  
@@@ -293,26 -293,25 +293,26 @@@ select_state
  DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
  
  /**
 - * omap3_cpuidle_update_states - Update the cpuidle states.
 + * omap3_cpuidle_update_states() - Update the cpuidle states
 + * @mpu_deepest_state:        Enable states upto and including this for mpu domain
 + * @core_deepest_state:       Enable states upto and including this for core domain
   *
 - * Currently, this function toggles the validity of idle states based upon
 - * the flag 'enable_off_mode'. When the flag is set all states are valid.
 - * Else, states leading to OFF state set to be invalid.
 + * This goes through the list of states available and enables and disables the
 + * validity of C states based on deepest state that can be achieved for the
 + * variable domain
   */
 -void omap3_cpuidle_update_states(void)
 +void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state)
  {
        int i;
  
        for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
                struct omap3_processor_cx *cx = &omap3_power_states[i];
  
 -              if (enable_off_mode) {
 +              if ((cx->mpu_state >= mpu_deepest_state) &&
 +                  (cx->core_state >= core_deepest_state)) {
                        cx->valid = 1;
                } else {
 -                      if ((cx->mpu_state == PWRDM_POWER_OFF) ||
 -                              (cx->core_state == PWRDM_POWER_OFF))
 -                              cx->valid = 0;
 +                      cx->valid = 0;
                }
        }
  }
@@@ -453,18 -452,6 +453,18 @@@ void omap_init_power_states(void
        omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
        omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
                                CPUIDLE_FLAG_CHECK_BM;
 +
 +      /*
 +       * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
 +       * enable OFF mode in a stable form for previous revisions.
 +       * we disable C7 state as a result.
 +       */
 +      if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
 +              omap3_power_states[OMAP3_STATE_C7].valid = 0;
 +              cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
 +              WARN_ONCE(1, "%s: core off state C7 disabled due to i583\n",
 +                              __func__);
 +      }
  }
  
  struct cpuidle_driver omap3_idle_driver = {
@@@ -517,10 -504,7 +517,10 @@@ int __init omap3_idle_init(void
                return -EINVAL;
        dev->state_count = count;
  
 -      omap3_cpuidle_update_states();
 +      if (enable_off_mode)
 +              omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF);
 +      else
 +              omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET);
  
        if (cpuidle_register_device(dev)) {
                printk(KERN_ERR "%s: CPUidle register device failed\n",
  #include <plat/dma.h>
  #include <plat/board.h>
  
 -#include "prm.h"
 +#include "prm2xxx_3xxx.h"
  #include "prm-regbits-24xx.h"
 -#include "cm.h"
 +#include "cm2xxx_3xxx.h"
  #include "cm-regbits-24xx.h"
  #include "sdrc.h"
  #include "pm.h"
  #include "control.h"
  
 -#include <plat/powerdomain.h>
 -#include <plat/clockdomain.h>
 +#include "powerdomain.h"
 +#include "clockdomain.h"
  
  #ifdef CONFIG_SUSPEND
  static suspend_state_t suspend_state = PM_SUSPEND_ON;
@@@ -79,8 -79,8 +79,8 @@@ static int omap2_fclks_active(void
  {
        u32 f1, f2;
  
 -      f1 = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
 -      f2 = cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
 +      f1 = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
 +      f2 = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
  
        /* Ignore UART clocks.  These are handled by UART core (serial.c) */
        f1 &= ~(OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_UART2_MASK);
@@@ -105,9 -105,9 +105,9 @@@ static void omap2_enter_full_retention(
  
        /* Clear old wake-up events */
        /* REVISIT: These write to reserved bits? */
 -      prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
 -      prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
 -      prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST);
 +      omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
 +      omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
 +      omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST);
  
        /*
         * Set MPU powerdomain's next power state to RETENTION;
        l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL;
        omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0);
  
 -      omap2_gpio_prepare_for_idle(PWRDM_POWER_RET);
 +      omap2_gpio_prepare_for_idle(0);
  
        if (omap2_pm_debug) {
                omap2_pm_dump(0, 0, 0);
@@@ -167,30 -167,30 +167,30 @@@ no_sleep
        clk_enable(osc_ck);
  
        /* clear CORE wake-up events */
 -      prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
 -      prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
 +      omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
 +      omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
  
        /* wakeup domain events - bit 1: GPT1, bit5 GPIO */
 -      prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST);
 +      omap2_prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST);
  
        /* MPU domain wake events */
 -      l = prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
 +      l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
        if (l & 0x01)
 -              prm_write_mod_reg(0x01, OCP_MOD,
 +              omap2_prm_write_mod_reg(0x01, OCP_MOD,
                                  OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
        if (l & 0x20)
 -              prm_write_mod_reg(0x20, OCP_MOD,
 +              omap2_prm_write_mod_reg(0x20, OCP_MOD,
                                  OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
  
        /* Mask future PRCM-to-MPU interrupts */
 -      prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
 +      omap2_prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
  }
  
  static int omap2_i2c_active(void)
  {
        u32 l;
  
 -      l = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
 +      l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
        return l & (OMAP2420_EN_I2C2_MASK | OMAP2420_EN_I2C1_MASK);
  }
  
@@@ -201,13 -201,13 +201,13 @@@ static int omap2_allow_mpu_retention(vo
        u32 l;
  
        /* Check for MMC, UART2, UART1, McSPI2, McSPI1 and DSS1. */
 -      l = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
 +      l = omap2_cm_read_mod_reg(CORE_MOD, CM_FCLKEN1);
        if (l & (OMAP2420_EN_MMC_MASK | OMAP24XX_EN_UART2_MASK |
                 OMAP24XX_EN_UART1_MASK | OMAP24XX_EN_MCSPI2_MASK |
                 OMAP24XX_EN_MCSPI1_MASK | OMAP24XX_EN_DSS1_MASK))
                return 0;
        /* Check for UART3. */
 -      l = cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
 +      l = omap2_cm_read_mod_reg(CORE_MOD, OMAP24XX_CM_FCLKEN2);
        if (l & OMAP24XX_EN_UART3_MASK)
                return 0;
        if (sti_console_enabled)
@@@ -230,18 -230,18 +230,18 @@@ static void omap2_enter_mpu_retention(v
         * it is in retention mode. */
        if (omap2_allow_mpu_retention()) {
                /* REVISIT: These write to reserved bits? */
 -              prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
 -              prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
 -              prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST);
 +              omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
 +              omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
 +              omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST);
  
                /* Try to enter MPU retention */
 -              prm_write_mod_reg((0x01 << OMAP_POWERSTATE_SHIFT) |
 +              omap2_prm_write_mod_reg((0x01 << OMAP_POWERSTATE_SHIFT) |
                                  OMAP_LOGICRETSTATE_MASK,
                                  MPU_MOD, OMAP2_PM_PWSTCTRL);
        } else {
                /* Block MPU retention */
  
 -              prm_write_mod_reg(OMAP_LOGICRETSTATE_MASK, MPU_MOD,
 +              omap2_prm_write_mod_reg(OMAP_LOGICRETSTATE_MASK, MPU_MOD,
                                                 OMAP2_PM_PWSTCTRL);
                only_idle = 1;
        }
@@@ -299,11 -299,16 +299,11 @@@ out
        local_irq_enable();
  }
  
 +#ifdef CONFIG_SUSPEND
  static int omap2_pm_begin(suspend_state_t state)
  {
 -      suspend_state = state;
 -      return 0;
 -}
 -
 -static int omap2_pm_prepare(void)
 -{
 -      /* We cannot sleep in idle until we have resumed */
        disable_hlt();
 +      suspend_state = state;
        return 0;
  }
  
@@@ -311,9 -316,9 +311,9 @@@ static int omap2_pm_suspend(void
  {
        u32 wken_wkup, mir1;
  
 -      wken_wkup = prm_read_mod_reg(WKUP_MOD, PM_WKEN);
 +      wken_wkup = omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
        wken_wkup &= ~OMAP24XX_EN_GPT1_MASK;
 -      prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN);
 +      omap2_prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN);
  
        /* Mask GPT1 */
        mir1 = omap_readl(0x480fe0a4);
        omap2_enter_full_retention();
  
        omap_writel(mir1, 0x480fe0a4);
 -      prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN);
 +      omap2_prm_write_mod_reg(wken_wkup, WKUP_MOD, PM_WKEN);
  
        return 0;
  }
@@@ -344,21 -349,24 +344,21 @@@ static int omap2_pm_enter(suspend_state
        return ret;
  }
  
  static void omap2_pm_end(void)
  {
        suspend_state = PM_SUSPEND_ON;
 +      enable_hlt();
  }
  
- static struct platform_suspend_ops omap_pm_ops = {
+ static const struct platform_suspend_ops omap_pm_ops = {
        .begin          = omap2_pm_begin,
 -      .prepare        = omap2_pm_prepare,
        .enter          = omap2_pm_enter,
        .end            = omap2_pm_end,
        .valid          = suspend_valid_only_mem,
  };
 +#else
 +static const struct platform_suspend_ops __initdata omap_pm_ops;
 +#endif /* CONFIG_SUSPEND */
  
  /* XXX This function should be shareable between OMAP2xxx and OMAP3 */
  static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
@@@ -380,7 -388,7 +380,7 @@@ static void __init prcm_setup_regs(void
        struct powerdomain *pwrdm;
  
        /* Enable autoidle */
 -      prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD,
 +      omap2_prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD,
                          OMAP2_PRCM_SYSCONFIG_OFFSET);
  
        /*
        clkdm_add_wkdep(mpu_clkdm, wkup_clkdm);
  
        /* Enable clock autoidle for all domains */
 -      cm_write_mod_reg(OMAP24XX_AUTO_CAM_MASK |
 -                       OMAP24XX_AUTO_MAILBOXES_MASK |
 -                       OMAP24XX_AUTO_WDT4_MASK |
 -                       OMAP2420_AUTO_WDT3_MASK |
 -                       OMAP24XX_AUTO_MSPRO_MASK |
 -                       OMAP2420_AUTO_MMC_MASK |
 -                       OMAP24XX_AUTO_FAC_MASK |
 -                       OMAP2420_AUTO_EAC_MASK |
 -                       OMAP24XX_AUTO_HDQ_MASK |
 -                       OMAP24XX_AUTO_UART2_MASK |
 -                       OMAP24XX_AUTO_UART1_MASK |
 -                       OMAP24XX_AUTO_I2C2_MASK |
 -                       OMAP24XX_AUTO_I2C1_MASK |
 -                       OMAP24XX_AUTO_MCSPI2_MASK |
 -                       OMAP24XX_AUTO_MCSPI1_MASK |
 -                       OMAP24XX_AUTO_MCBSP2_MASK |
 -                       OMAP24XX_AUTO_MCBSP1_MASK |
 -                       OMAP24XX_AUTO_GPT12_MASK |
 -                       OMAP24XX_AUTO_GPT11_MASK |
 -                       OMAP24XX_AUTO_GPT10_MASK |
 -                       OMAP24XX_AUTO_GPT9_MASK |
 -                       OMAP24XX_AUTO_GPT8_MASK |
 -                       OMAP24XX_AUTO_GPT7_MASK |
 -                       OMAP24XX_AUTO_GPT6_MASK |
 -                       OMAP24XX_AUTO_GPT5_MASK |
 -                       OMAP24XX_AUTO_GPT4_MASK |
 -                       OMAP24XX_AUTO_GPT3_MASK |
 -                       OMAP24XX_AUTO_GPT2_MASK |
 -                       OMAP2420_AUTO_VLYNQ_MASK |
 -                       OMAP24XX_AUTO_DSS_MASK,
 -                       CORE_MOD, CM_AUTOIDLE1);
 -      cm_write_mod_reg(OMAP24XX_AUTO_UART3_MASK |
 -                       OMAP24XX_AUTO_SSI_MASK |
 -                       OMAP24XX_AUTO_USB_MASK,
 -                       CORE_MOD, CM_AUTOIDLE2);
 -      cm_write_mod_reg(OMAP24XX_AUTO_SDRC_MASK |
 -                       OMAP24XX_AUTO_GPMC_MASK |
 -                       OMAP24XX_AUTO_SDMA_MASK,
 -                       CORE_MOD, CM_AUTOIDLE3);
 -      cm_write_mod_reg(OMAP24XX_AUTO_PKA_MASK |
 -                       OMAP24XX_AUTO_AES_MASK |
 -                       OMAP24XX_AUTO_RNG_MASK |
 -                       OMAP24XX_AUTO_SHA_MASK |
 -                       OMAP24XX_AUTO_DES_MASK,
 -                       CORE_MOD, OMAP24XX_CM_AUTOIDLE4);
 -
 -      cm_write_mod_reg(OMAP2420_AUTO_DSP_IPI_MASK, OMAP24XX_DSP_MOD,
 -                       CM_AUTOIDLE);
 +      omap2_cm_write_mod_reg(OMAP24XX_AUTO_CAM_MASK |
 +                             OMAP24XX_AUTO_MAILBOXES_MASK |
 +                             OMAP24XX_AUTO_WDT4_MASK |
 +                             OMAP2420_AUTO_WDT3_MASK |
 +                             OMAP24XX_AUTO_MSPRO_MASK |
 +                             OMAP2420_AUTO_MMC_MASK |
 +                             OMAP24XX_AUTO_FAC_MASK |
 +                             OMAP2420_AUTO_EAC_MASK |
 +                             OMAP24XX_AUTO_HDQ_MASK |
 +                             OMAP24XX_AUTO_UART2_MASK |
 +                             OMAP24XX_AUTO_UART1_MASK |
 +                             OMAP24XX_AUTO_I2C2_MASK |
 +                             OMAP24XX_AUTO_I2C1_MASK |
 +                             OMAP24XX_AUTO_MCSPI2_MASK |
 +                             OMAP24XX_AUTO_MCSPI1_MASK |
 +                             OMAP24XX_AUTO_MCBSP2_MASK |
 +                             OMAP24XX_AUTO_MCBSP1_MASK |
 +                             OMAP24XX_AUTO_GPT12_MASK |
 +                             OMAP24XX_AUTO_GPT11_MASK |
 +                             OMAP24XX_AUTO_GPT10_MASK |
 +                             OMAP24XX_AUTO_GPT9_MASK |
 +                             OMAP24XX_AUTO_GPT8_MASK |
 +                             OMAP24XX_AUTO_GPT7_MASK |
 +                             OMAP24XX_AUTO_GPT6_MASK |
 +                             OMAP24XX_AUTO_GPT5_MASK |
 +                             OMAP24XX_AUTO_GPT4_MASK |
 +                             OMAP24XX_AUTO_GPT3_MASK |
 +                             OMAP24XX_AUTO_GPT2_MASK |
 +                             OMAP2420_AUTO_VLYNQ_MASK |
 +                             OMAP24XX_AUTO_DSS_MASK,
 +                             CORE_MOD, CM_AUTOIDLE1);
 +      omap2_cm_write_mod_reg(OMAP24XX_AUTO_UART3_MASK |
 +                             OMAP24XX_AUTO_SSI_MASK |
 +                             OMAP24XX_AUTO_USB_MASK,
 +                             CORE_MOD, CM_AUTOIDLE2);
 +      omap2_cm_write_mod_reg(OMAP24XX_AUTO_SDRC_MASK |
 +                             OMAP24XX_AUTO_GPMC_MASK |
 +                             OMAP24XX_AUTO_SDMA_MASK,
 +                             CORE_MOD, CM_AUTOIDLE3);
 +      omap2_cm_write_mod_reg(OMAP24XX_AUTO_PKA_MASK |
 +                             OMAP24XX_AUTO_AES_MASK |
 +                             OMAP24XX_AUTO_RNG_MASK |
 +                             OMAP24XX_AUTO_SHA_MASK |
 +                             OMAP24XX_AUTO_DES_MASK,
 +                             CORE_MOD, OMAP24XX_CM_AUTOIDLE4);
 +
 +      omap2_cm_write_mod_reg(OMAP2420_AUTO_DSP_IPI_MASK, OMAP24XX_DSP_MOD,
 +                             CM_AUTOIDLE);
  
        /* Put DPLL and both APLLs into autoidle mode */
 -      cm_write_mod_reg((0x03 << OMAP24XX_AUTO_DPLL_SHIFT) |
 -                       (0x03 << OMAP24XX_AUTO_96M_SHIFT) |
 -                       (0x03 << OMAP24XX_AUTO_54M_SHIFT),
 -                       PLL_MOD, CM_AUTOIDLE);
 -
 -      cm_write_mod_reg(OMAP24XX_AUTO_OMAPCTRL_MASK |
 -                       OMAP24XX_AUTO_WDT1_MASK |
 -                       OMAP24XX_AUTO_MPU_WDT_MASK |
 -                       OMAP24XX_AUTO_GPIOS_MASK |
 -                       OMAP24XX_AUTO_32KSYNC_MASK |
 -                       OMAP24XX_AUTO_GPT1_MASK,
 -                       WKUP_MOD, CM_AUTOIDLE);
 +      omap2_cm_write_mod_reg((0x03 << OMAP24XX_AUTO_DPLL_SHIFT) |
 +                             (0x03 << OMAP24XX_AUTO_96M_SHIFT) |
 +                             (0x03 << OMAP24XX_AUTO_54M_SHIFT),
 +                             PLL_MOD, CM_AUTOIDLE);
 +
 +      omap2_cm_write_mod_reg(OMAP24XX_AUTO_OMAPCTRL_MASK |
 +                             OMAP24XX_AUTO_WDT1_MASK |
 +                             OMAP24XX_AUTO_MPU_WDT_MASK |
 +                             OMAP24XX_AUTO_GPIOS_MASK |
 +                             OMAP24XX_AUTO_32KSYNC_MASK |
 +                             OMAP24XX_AUTO_GPT1_MASK,
 +                             WKUP_MOD, CM_AUTOIDLE);
  
        /* REVISIT: Configure number of 32 kHz clock cycles for sys_clk
         * stabilisation */
 -      prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
 -                        OMAP2_PRCM_CLKSSETUP_OFFSET);
 +      omap2_prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
 +                              OMAP2_PRCM_CLKSSETUP_OFFSET);
  
        /* Configure automatic voltage transition */
 -      prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
 -                        OMAP2_PRCM_VOLTSETUP_OFFSET);
 -      prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK |
 -                        (0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) |
 -                        OMAP24XX_MEMRETCTRL_MASK |
 -                        (0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) |
 -                        (0x0 << OMAP24XX_VOLT_LEVEL_SHIFT),
 -                        OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET);
 +      omap2_prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
 +                              OMAP2_PRCM_VOLTSETUP_OFFSET);
 +      omap2_prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK |
 +                              (0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) |
 +                              OMAP24XX_MEMRETCTRL_MASK |
 +                              (0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) |
 +                              (0x0 << OMAP24XX_VOLT_LEVEL_SHIFT),
 +                              OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET);
  
        /* Enable wake-up events */
 -      prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK,
 -                        WKUP_MOD, PM_WKEN);
 +      omap2_prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK,
 +                              WKUP_MOD, PM_WKEN);
  }
  
  static int __init omap2_pm_init(void)
                return -ENODEV;
  
        printk(KERN_INFO "Power Management for OMAP2 initializing\n");
 -      l = prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET);
 +      l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET);
        printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
  
        /* Look up important powerdomains */
@@@ -31,8 -31,8 +31,8 @@@
  #include <linux/console.h>
  
  #include <plat/sram.h>
 -#include <plat/clockdomain.h>
 -#include <plat/powerdomain.h>
 +#include "clockdomain.h"
 +#include "powerdomain.h"
  #include <plat/serial.h>
  #include <plat/sdrc.h>
  #include <plat/prcm.h>
  
  #include <asm/tlbflush.h>
  
 -#include "cm.h"
 +#include "cm2xxx_3xxx.h"
  #include "cm-regbits-34xx.h"
  #include "prm-regbits-34xx.h"
  
 -#include "prm.h"
 +#include "prm2xxx_3xxx.h"
  #include "pm.h"
  #include "sdrc.h"
  #include "control.h"
@@@ -68,9 -68,6 +68,9 @@@ static inline bool is_suspending(void
  #define OMAP343X_TABLE_VALUE_OFFSET      0xc0
  #define OMAP343X_CONTROL_REG_VALUE_OFFSET  0xc8
  
 +/* pm34xx errata defined in pm.h */
 +u16 pm34xx_errata;
 +
  struct power_state {
        struct powerdomain *pwrdm;
        u32 next_state;
@@@ -105,12 -102,12 +105,12 @@@ static void omap3_enable_io_chain(void
        int timeout = 0;
  
        if (omap_rev() >= OMAP3430_REV_ES3_1) {
 -              prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
 +              omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
                                     PM_WKEN);
                /* Do a readback to assure write has been done */
 -              prm_read_mod_reg(WKUP_MOD, PM_WKEN);
 +              omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
  
 -              while (!(prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
 +              while (!(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
                         OMAP3430_ST_IO_CHAIN_MASK)) {
                        timeout++;
                        if (timeout > 1000) {
                                       "activation failed.\n");
                                return;
                        }
 -                      prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
 +                      omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
                                             WKUP_MOD, PM_WKEN);
                }
        }
  static void omap3_disable_io_chain(void)
  {
        if (omap_rev() >= OMAP3430_REV_ES3_1)
 -              prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
 +              omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
                                       PM_WKEN);
  }
  
  static void omap3_core_save_context(void)
  {
 -      u32 control_padconf_off;
 -
 -      /* Save the padconf registers */
 -      control_padconf_off = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF);
 -      control_padconf_off |= START_PADCONF_SAVE;
 -      omap_ctrl_writel(control_padconf_off, OMAP343X_CONTROL_PADCONF_OFF);
 -      /* wait for the save to complete */
 -      while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
 -                      & PADCONF_SAVE_DONE))
 -              udelay(1);
 +      omap3_ctrl_save_padconf();
  
        /*
         * Force write last pad into memory, as this can fail in some
 -       * cases according to erratas 1.157, 1.185
 +       * cases according to errata 1.157, 1.185
         */
        omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
                OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
@@@ -212,27 -218,27 +212,27 @@@ static int prcm_clear_mod_irqs(s16 modu
                OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
        int c = 0;
  
 -      wkst = prm_read_mod_reg(module, wkst_off);
 -      wkst &= prm_read_mod_reg(module, grpsel_off);
 +      wkst = omap2_prm_read_mod_reg(module, wkst_off);
 +      wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
        if (wkst) {
 -              iclk = cm_read_mod_reg(module, iclk_off);
 -              fclk = cm_read_mod_reg(module, fclk_off);
 +              iclk = omap2_cm_read_mod_reg(module, iclk_off);
 +              fclk = omap2_cm_read_mod_reg(module, fclk_off);
                while (wkst) {
                        clken = wkst;
 -                      cm_set_mod_reg_bits(clken, module, iclk_off);
 +                      omap2_cm_set_mod_reg_bits(clken, module, iclk_off);
                        /*
                         * For USBHOST, we don't know whether HOST1 or
                         * HOST2 woke us up, so enable both f-clocks
                         */
                        if (module == OMAP3430ES2_USBHOST_MOD)
                                clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
 -                      cm_set_mod_reg_bits(clken, module, fclk_off);
 -                      prm_write_mod_reg(wkst, module, wkst_off);
 -                      wkst = prm_read_mod_reg(module, wkst_off);
 +                      omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
 +                      omap2_prm_write_mod_reg(wkst, module, wkst_off);
 +                      wkst = omap2_prm_read_mod_reg(module, wkst_off);
                        c++;
                }
 -              cm_write_mod_reg(iclk, module, iclk_off);
 -              cm_write_mod_reg(fclk, module, fclk_off);
 +              omap2_cm_write_mod_reg(iclk, module, iclk_off);
 +              omap2_cm_write_mod_reg(fclk, module, fclk_off);
        }
  
        return c;
@@@ -275,9 -281,9 +275,9 @@@ static irqreturn_t prcm_interrupt_handl
        u32 irqenable_mpu, irqstatus_mpu;
        int c = 0;
  
 -      irqenable_mpu = prm_read_mod_reg(OCP_MOD,
 +      irqenable_mpu = omap2_prm_read_mod_reg(OCP_MOD,
                                         OMAP3_PRM_IRQENABLE_MPU_OFFSET);
 -      irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
 +      irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
                                         OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
        irqstatus_mpu &= irqenable_mpu;
  
                             "no code to handle it (%08x)\n", irqstatus_mpu);
                }
  
 -              prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
 +              omap2_prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
                                        OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
  
 -              irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
 +              irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
                                        OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
                irqstatus_mpu &= irqenable_mpu;
  
@@@ -351,7 -357,6 +351,7 @@@ void omap_sram_idle(void
        int mpu_next_state = PWRDM_POWER_ON;
        int per_next_state = PWRDM_POWER_ON;
        int core_next_state = PWRDM_POWER_ON;
 +      int per_going_off;
        int core_prev_state, per_prev_state;
        u32 sdrc_pwr = 0;
  
        if (omap3_has_io_wakeup() &&
            (per_next_state < PWRDM_POWER_ON ||
             core_next_state < PWRDM_POWER_ON)) {
 -              prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
 +              omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
                omap3_enable_io_chain();
        }
  
  
        /* PER */
        if (per_next_state < PWRDM_POWER_ON) {
 +              per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
                omap_uart_prepare_idle(2);
                omap_uart_prepare_idle(3);
 -              omap2_gpio_prepare_for_idle(per_next_state);
 +              omap2_gpio_prepare_for_idle(per_going_off);
                if (per_next_state == PWRDM_POWER_OFF)
                                omap3_per_save_context();
        }
                omap_uart_prepare_idle(1);
                if (core_next_state == PWRDM_POWER_OFF) {
                        omap3_core_save_context();
 -                      omap3_prcm_save_context();
 +                      omap3_cm_save_context();
                }
        }
  
        /*
        * On EMU/HS devices ROM code restores a SRDC value
        * from scratchpad which has automatic self refresh on timeout
 -      * of AUTO_CNT = 1 enabled. This takes care of errata 1.142.
 +      * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
        * Hence store/restore the SDRC_POWER register here.
        */
        if (omap_rev() >= OMAP3430_REV_ES3_0 &&
                core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
                if (core_prev_state == PWRDM_POWER_OFF) {
                        omap3_core_restore_context();
 -                      omap3_prcm_restore_context();
 +                      omap3_cm_restore_context();
                        omap3_sram_restore_context();
                        omap2_sms_restore_context();
                }
                omap_uart_resume_idle(0);
                omap_uart_resume_idle(1);
                if (core_next_state == PWRDM_POWER_OFF)
 -                      prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
 +                      omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
                                               OMAP3430_GR_MOD,
                                               OMAP3_PRM_VOLTCTRL_OFFSET);
        }
@@@ -488,8 -492,7 +488,8 @@@ console_still_active
        if (omap3_has_io_wakeup() &&
            (per_next_state < PWRDM_POWER_ON ||
             core_next_state < PWRDM_POWER_ON)) {
 -              prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
 +              omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
 +                                           PM_WKEN);
                omap3_disable_io_chain();
        }
  
@@@ -526,6 -529,12 +526,6 @@@ out
  }
  
  #ifdef CONFIG_SUSPEND
 -static int omap3_pm_prepare(void)
 -{
 -      disable_hlt();
 -      return 0;
 -}
 -
  static int omap3_pm_suspend(void)
  {
        struct power_state *pwrst;
@@@ -588,10 -597,14 +588,10 @@@ static int omap3_pm_enter(suspend_state
        return ret;
  }
  
  /* Hooks to enable / disable UART interrupts during suspend */
  static int omap3_pm_begin(suspend_state_t state)
  {
 +      disable_hlt();
        suspend_state = state;
        omap_uart_enable_irqs(0);
        return 0;
@@@ -601,14 -614,15 +601,14 @@@ static void omap3_pm_end(void
  {
        suspend_state = PM_SUSPEND_ON;
        omap_uart_enable_irqs(1);
 +      enable_hlt();
        return;
  }
  
- static struct platform_suspend_ops omap_pm_ops = {
+ static const struct platform_suspend_ops omap_pm_ops = {
        .begin          = omap3_pm_begin,
        .end            = omap3_pm_end,
 -      .prepare        = omap3_pm_prepare,
        .enter          = omap3_pm_enter,
 -      .finish         = omap3_pm_finish,
        .valid          = suspend_valid_only_mem,
  };
  #endif /* CONFIG_SUSPEND */
  static void __init omap3_iva_idle(void)
  {
        /* ensure IVA2 clock is disabled */
 -      cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
 +      omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
  
        /* if no clock activity, nothing else to do */
 -      if (!(cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
 +      if (!(omap2_cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
              OMAP3430_CLKACTIVITY_IVA2_MASK))
                return;
  
        /* Reset IVA2 */
 -      prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
 +      omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
                          OMAP3430_RST2_IVA2_MASK |
                          OMAP3430_RST3_IVA2_MASK,
                          OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
  
        /* Enable IVA2 clock */
 -      cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
 +      omap2_cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
                         OMAP3430_IVA2_MOD, CM_FCLKEN);
  
        /* Set IVA2 boot mode to 'idle' */
                         OMAP343X_CONTROL_IVA2_BOOTMOD);
  
        /* Un-reset IVA2 */
 -      prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 +      omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
  
        /* Disable IVA2 clock */
 -      cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
 +      omap2_cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
  
        /* Reset IVA2 */
 -      prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
 +      omap2_prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
                          OMAP3430_RST2_IVA2_MASK |
                          OMAP3430_RST3_IVA2_MASK,
                          OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@@ -679,10 -693,10 +679,10 @@@ static void __init omap3_d2d_idle(void
        omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
  
        /* reset modem */
 -      prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
 +      omap2_prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
                          OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
                          CORE_MOD, OMAP2_RM_RSTCTRL);
 -      prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
 +      omap2_prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
  }
  
  static void __init prcm_setup_regs(void)
  
        /* XXX Reset all wkdeps. This should be done when initializing
         * powerdomains */
 -      prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
 -      prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
 -      prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
 -      prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
 -      prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
 -      prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
 +      omap2_prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
 +      omap2_prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
 +      omap2_prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
 +      omap2_prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
 +      omap2_prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
 +      omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
        if (omap_rev() > OMAP3430_REV_ES1_0) {
 -              prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
 -              prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
 +              omap2_prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
 +              omap2_prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
        } else
 -              prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
 +              omap2_prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
  
        /*
         * Enable interface clock autoidle for all modules.
         * Note that in the long run this should be done by clockfw
         */
 -      cm_write_mod_reg(
 +      omap2_cm_write_mod_reg(
                OMAP3430_AUTO_MODEM_MASK |
                OMAP3430ES2_AUTO_MMC3_MASK |
                OMAP3430ES2_AUTO_ICR_MASK |
                OMAP3430_AUTO_SSI_MASK,
                CORE_MOD, CM_AUTOIDLE1);
  
 -      cm_write_mod_reg(
 +      omap2_cm_write_mod_reg(
                OMAP3430_AUTO_PKA_MASK |
                OMAP3430_AUTO_AES1_MASK |
                OMAP3430_AUTO_RNG_MASK |
                CORE_MOD, CM_AUTOIDLE2);
  
        if (omap_rev() > OMAP3430_REV_ES1_0) {
 -              cm_write_mod_reg(
 +              omap2_cm_write_mod_reg(
                        OMAP3430_AUTO_MAD2D_MASK |
                        OMAP3430ES2_AUTO_USBTLL_MASK,
                        CORE_MOD, CM_AUTOIDLE3);
        }
  
 -      cm_write_mod_reg(
 +      omap2_cm_write_mod_reg(
                OMAP3430_AUTO_WDT2_MASK |
                OMAP3430_AUTO_WDT1_MASK |
                OMAP3430_AUTO_GPIO1_MASK |
                OMAP3430_AUTO_GPT1_MASK,
                WKUP_MOD, CM_AUTOIDLE);
  
 -      cm_write_mod_reg(
 +      omap2_cm_write_mod_reg(
                OMAP3430_AUTO_DSS_MASK,
                OMAP3430_DSS_MOD,
                CM_AUTOIDLE);
  
 -      cm_write_mod_reg(
 +      omap2_cm_write_mod_reg(
                OMAP3430_AUTO_CAM_MASK,
                OMAP3430_CAM_MOD,
                CM_AUTOIDLE);
  
 -      cm_write_mod_reg(
 +      omap2_cm_write_mod_reg(
                omap3630_auto_uart4_mask |
                OMAP3430_AUTO_GPIO6_MASK |
                OMAP3430_AUTO_GPIO5_MASK |
                CM_AUTOIDLE);
  
        if (omap_rev() > OMAP3430_REV_ES1_0) {
 -              cm_write_mod_reg(
 +              omap2_cm_write_mod_reg(
                        OMAP3430ES2_AUTO_USBHOST_MASK,
                        OMAP3430ES2_USBHOST_MOD,
                        CM_AUTOIDLE);
         * Set all plls to autoidle. This is needed until autoidle is
         * enabled by clockfw
         */
 -      cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
 +      omap2_cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
                         OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
 -      cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
 +      omap2_cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
                         MPU_MOD,
                         CM_AUTOIDLE2);
 -      cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
 +      omap2_cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
                         (1 << OMAP3430_AUTO_CORE_DPLL_SHIFT),
                         PLL_MOD,
                         CM_AUTOIDLE);
 -      cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
 +      omap2_cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
                         PLL_MOD,
                         CM_AUTOIDLE2);
  
         * sys_clkreq. In the long run clock framework should
         * take care of this.
         */
 -      prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
 +      omap2_prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
                             1 << OMAP_AUTOEXTCLKMODE_SHIFT,
                             OMAP3430_GR_MOD,
                             OMAP3_PRM_CLKSRC_CTRL_OFFSET);
  
        /* setup wakup source */
 -      prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
 +      omap2_prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
                          OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
                          WKUP_MOD, PM_WKEN);
        /* No need to write EN_IO, that is always enabled */
 -      prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
 +      omap2_prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
                          OMAP3430_GRPSEL_GPT1_MASK |
                          OMAP3430_GRPSEL_GPT12_MASK,
                          WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
        /* For some reason IO doesn't generate wakeup event even if
         * it is selected to mpu wakeup goup */
 -      prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
 +      omap2_prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
                          OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
  
        /* Enable PM_WKEN to support DSS LPR */
 -      prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
 +      omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
                                OMAP3430_DSS_MOD, PM_WKEN);
  
        /* Enable wakeups in PER */
 -      prm_write_mod_reg(omap3630_en_uart4_mask |
 +      omap2_prm_write_mod_reg(omap3630_en_uart4_mask |
                          OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
                          OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
                          OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
                          OMAP3430_EN_MCBSP4_MASK,
                          OMAP3430_PER_MOD, PM_WKEN);
        /* and allow them to wake up MPU */
 -      prm_write_mod_reg(omap3630_grpsel_uart4_mask |
 +      omap2_prm_write_mod_reg(omap3630_grpsel_uart4_mask |
                          OMAP3430_GRPSEL_GPIO2_MASK |
                          OMAP3430_GRPSEL_GPIO3_MASK |
                          OMAP3430_GRPSEL_GPIO4_MASK |
                          OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
  
        /* Don't attach IVA interrupts */
 -      prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
 -      prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
 -      prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
 -      prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
 +      omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
 +      omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
 +      omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
 +      omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
  
        /* Clear any pending 'reset' flags */
 -      prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
 -      prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
 -      prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
 -      prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
 -      prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
 -      prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
 -      prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
 +      omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
 +      omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
 +      omap2_prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
 +      omap2_prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
 +      omap2_prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
 +      omap2_prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
 +      omap2_prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
  
        /* Clear any pending PRCM interrupts */
 -      prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
 +      omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
  
        omap3_iva_idle();
        omap3_d2d_idle();
@@@ -911,29 -925,12 +911,29 @@@ void omap3_pm_off_mode_enable(int enabl
                state = PWRDM_POWER_RET;
  
  #ifdef CONFIG_CPU_IDLE
 -      omap3_cpuidle_update_states();
 +      /*
 +       * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
 +       * enable OFF mode in a stable form for previous revisions, restrict
 +       * instead to RET
 +       */
 +      if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
 +              omap3_cpuidle_update_states(state, PWRDM_POWER_RET);
 +      else
 +              omap3_cpuidle_update_states(state, state);
  #endif
  
        list_for_each_entry(pwrst, &pwrst_list, node) {
 -              pwrst->next_state = state;
 -              omap_set_pwrdm_state(pwrst->pwrdm, state);
 +              if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
 +                              pwrst->pwrdm == core_pwrdm &&
 +                              state == PWRDM_POWER_OFF) {
 +                      pwrst->next_state = PWRDM_POWER_RET;
 +                      WARN_ONCE(1,
 +                              "%s: Core OFF disabled due to errata i583\n",
 +                              __func__);
 +              } else {
 +                      pwrst->next_state = state;
 +              }
 +              omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
        }
  }
  
@@@ -1005,17 -1002,6 +1005,17 @@@ void omap_push_sram_idle(void
                                save_secure_ram_context_sz);
  }
  
 +static void __init pm_errata_configure(void)
 +{
 +      if (cpu_is_omap3630()) {
 +              pm34xx_errata |= PM_RTA_ERRATUM_i608;
 +              /* Enable the l2 cache toggling in sleep logic */
 +              enable_omap3630_toggle_l2_on_restore();
 +              if (omap_rev() < OMAP3630_REV_ES1_2)
 +                      pm34xx_errata |= PM_SDRC_WAKEUP_ERRATUM_i583;
 +      }
 +}
 +
  static int __init omap3_pm_init(void)
  {
        struct power_state *pwrst, *tmp;
        if (!cpu_is_omap34xx())
                return -ENODEV;
  
 +      pm_errata_configure();
 +
        printk(KERN_ERR "Power Management for TI OMAP3.\n");
  
        /* XXX prcm_setup_regs needs to be before enabling hw
        pm_idle = omap3_pm_idle;
        omap3_idle_init();
  
 +      /*
 +       * RTA is disabled during initialization as per erratum i608
 +       * it is safer to disable RTA by the bootloader, but we would like
 +       * to be doubly sure here and prevent any mishaps.
 +       */
 +      if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
 +              omap3630_ctrl_disable_rta();
 +
        clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
        if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
                omap3_secure_ram_storage =
@@@ -16,7 -16,7 +16,7 @@@
  #include <linux/err.h>
  #include <linux/slab.h>
  
 -#include <plat/powerdomain.h>
 +#include "powerdomain.h"
  #include <mach/omap4-common.h>
  
  struct power_state {
  static LIST_HEAD(pwrst_list);
  
  #ifdef CONFIG_SUSPEND
 -static int omap4_pm_prepare(void)
 -{
 -      disable_hlt();
 -      return 0;
 -}
 -
  static int omap4_pm_suspend(void)
  {
        do_wfi();
@@@ -53,22 -59,28 +53,22 @@@ static int omap4_pm_enter(suspend_state
        return ret;
  }
  
 -static void omap4_pm_finish(void)
 -{
 -      enable_hlt();
 -      return;
 -}
 -
  static int omap4_pm_begin(suspend_state_t state)
  {
 +      disable_hlt();
        return 0;
  }
  
  static void omap4_pm_end(void)
  {
 +      enable_hlt();
        return;
  }
  
- static struct platform_suspend_ops omap_pm_ops = {
+ static const struct platform_suspend_ops omap_pm_ops = {
        .begin          = omap4_pm_begin,
        .end            = omap4_pm_end,
 -      .prepare        = omap4_pm_prepare,
        .enter          = omap4_pm_enter,
 -      .finish         = omap4_pm_finish,
        .valid          = suspend_valid_only_mem,
  };
  #endif /* CONFIG_SUSPEND */
  #include <plat/omap_hwmod.h>
  #include <plat/omap_device.h>
  
 -#include "prm.h"
 +#include "prm2xxx_3xxx.h"
  #include "pm.h"
 -#include "cm.h"
 +#include "cm2xxx_3xxx.h"
  #include "prm-regbits-34xx.h"
  #include "control.h"
 +#include "mux.h"
  
  #define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV   0x52
  #define UART_OMAP_WER         0x17    /* Wake-up enable register */
@@@ -107,16 -106,21 +107,16 @@@ struct omap_uart_state 
  static LIST_HEAD(uart_list);
  static u8 num_uarts;
  
 -/*
 - * Since these idle/enable hooks are used in the idle path itself
 - * which has interrupts disabled, use the non-locking versions of
 - * the hwmod enable/disable functions.
 - */
  static int uart_idle_hwmod(struct omap_device *od)
  {
 -      _omap_hwmod_idle(od->hwmods[0]);
 +      omap_hwmod_idle(od->hwmods[0]);
  
        return 0;
  }
  
  static int uart_enable_hwmod(struct omap_device *od)
  {
 -      _omap_hwmod_enable(od->hwmods[0]);
 +      omap_hwmod_enable(od->hwmods[0]);
  
        return 0;
  }
@@@ -165,9 -169,9 +165,9 @@@ static inline void serial_write_reg(str
  
  static inline void __init omap_uart_reset(struct omap_uart_state *uart)
  {
 -      serial_write_reg(uart, UART_OMAP_MDR1, 0x07);
 +      serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
        serial_write_reg(uart, UART_OMAP_SCR, 0x08);
 -      serial_write_reg(uart, UART_OMAP_MDR1, 0x00);
 +      serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
  }
  
  #if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
@@@ -215,7 -219,7 +215,7 @@@ static void omap_uart_save_context(stru
                return;
  
        lcr = serial_read_reg(uart, UART_LCR);
 -      serial_write_reg(uart, UART_LCR, 0xBF);
 +      serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
        uart->dll = serial_read_reg(uart, UART_DLL);
        uart->dlh = serial_read_reg(uart, UART_DLM);
        serial_write_reg(uart, UART_LCR, lcr);
        uart->sysc = serial_read_reg(uart, UART_OMAP_SYSC);
        uart->scr = serial_read_reg(uart, UART_OMAP_SCR);
        uart->wer = serial_read_reg(uart, UART_OMAP_WER);
 -      serial_write_reg(uart, UART_LCR, 0x80);
 +      serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_A);
        uart->mcr = serial_read_reg(uart, UART_MCR);
        serial_write_reg(uart, UART_LCR, lcr);
  
@@@ -243,35 -247,32 +243,35 @@@ static void omap_uart_restore_context(s
        uart->context_valid = 0;
  
        if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS)
 -              omap_uart_mdr1_errataset(uart, 0x07, 0xA0);
 +              omap_uart_mdr1_errataset(uart, UART_OMAP_MDR1_DISABLE, 0xA0);
        else
 -              serial_write_reg(uart, UART_OMAP_MDR1, 0x7);
 -      serial_write_reg(uart, UART_LCR, 0xBF); /* Config B mode */
 +              serial_write_reg(uart, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
 +
 +      serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
        efr = serial_read_reg(uart, UART_EFR);
        serial_write_reg(uart, UART_EFR, UART_EFR_ECB);
        serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */
        serial_write_reg(uart, UART_IER, 0x0);
 -      serial_write_reg(uart, UART_LCR, 0xBF); /* Config B mode */
 +      serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
        serial_write_reg(uart, UART_DLL, uart->dll);
        serial_write_reg(uart, UART_DLM, uart->dlh);
        serial_write_reg(uart, UART_LCR, 0x0); /* Operational mode */
        serial_write_reg(uart, UART_IER, uart->ier);
 -      serial_write_reg(uart, UART_LCR, 0x80);
 +      serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_A);
        serial_write_reg(uart, UART_MCR, uart->mcr);
 -      serial_write_reg(uart, UART_LCR, 0xBF); /* Config B mode */
 +      serial_write_reg(uart, UART_LCR, UART_LCR_CONF_MODE_B);
        serial_write_reg(uart, UART_EFR, efr);
        serial_write_reg(uart, UART_LCR, UART_LCR_WLEN8);
        serial_write_reg(uart, UART_OMAP_SCR, uart->scr);
        serial_write_reg(uart, UART_OMAP_WER, uart->wer);
        serial_write_reg(uart, UART_OMAP_SYSC, uart->sysc);
 +
        if (uart->errata & UART_ERRATA_i202_MDR1_ACCESS)
 -              omap_uart_mdr1_errataset(uart, 0x00, 0xA1);
 +              omap_uart_mdr1_errataset(uart, UART_OMAP_MDR1_16X_MODE, 0xA1);
        else
                /* UART 16x mode */
 -              serial_write_reg(uart, UART_OMAP_MDR1, 0x00);
 +              serial_write_reg(uart, UART_OMAP_MDR1,
 +                              UART_OMAP_MDR1_16X_MODE);
  }
  #else
  static inline void omap_uart_save_context(struct omap_uart_state *uart) {}
@@@ -491,7 -492,6 +491,7 @@@ static void omap_uart_idle_init(struct 
                u32 wk_mask = 0;
                u32 padconf = 0;
  
 +              /* XXX These PRM accesses do not belong here */
                uart->wk_en = OMAP34XX_PRM_REGADDR(mod, PM_WKEN1);
                uart->wk_st = OMAP34XX_PRM_REGADDR(mod, PM_WKST1);
                switch (uart->num) {
@@@ -695,16 -695,16 +695,16 @@@ void __init omap_serial_early_init(void
  
  /**
   * omap_serial_init_port() - initialize single serial port
 - * @port: serial port number (0-3)
 + * @bdata: port specific board data pointer
   *
 - * This function initialies serial driver for given @port only.
 + * This function initialies serial driver for given port only.
   * Platforms can call this function instead of omap_serial_init()
   * if they don't plan to use all available UARTs as serial ports.
   *
   * Don't mix calls to omap_serial_init_port() and omap_serial_init(),
   * use only one of the two.
   */
 -void __init omap_serial_init_port(int port)
 +void __init omap_serial_init_port(struct omap_board_data *bdata)
  {
        struct omap_uart_state *uart;
        struct omap_hwmod *oh;
        struct omap_uart_port_info omap_up;
  #endif
  
 -      if (WARN_ON(port < 0))
 +      if (WARN_ON(!bdata))
 +              return;
 +      if (WARN_ON(bdata->id < 0))
                return;
 -      if (WARN_ON(port >= num_uarts))
 +      if (WARN_ON(bdata->id >= num_uarts))
                return;
  
        list_for_each_entry(uart, &uart_list, node)
 -              if (port == uart->num)
 +              if (bdata->id == uart->num)
                        break;
  
        oh = uart->oh;
        WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n",
             name, oh->name);
  
 +      oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
 +
        uart->irq = oh->mpu_irqs[0].irq;
        uart->regshift = 2;
        uart->mapbase = oh->slaves[0]->addr->pa_start;
  }
  
  /**
-  * omap_serial_init() - intialize all supported serial ports
+  * omap_serial_init() - initialize all supported serial ports
   *
   * Initializes all available UARTs as serial ports. Platforms
   * can call this function when they want to have default behaviour
  void __init omap_serial_init(void)
  {
        struct omap_uart_state *uart;
 +      struct omap_board_data bdata;
  
 -      list_for_each_entry(uart, &uart_list, node)
 -              omap_serial_init_port(uart->num);
 +      list_for_each_entry(uart, &uart_list, node) {
 +              bdata.id = uart->num;
 +              bdata.flags = 0;
 +              bdata.pads = NULL;
 +              bdata.pads_cnt = 0;
 +              omap_serial_init_port(&bdata);
 +
 +      }
  }
@@@ -579,8 -579,7 +579,8 @@@ static int sharpsl_ac_check(void
  static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state)
  {
        sharpsl_pm.flags |= SHARPSL_SUSPENDED;
 -      flush_scheduled_work();
 +      flush_delayed_work_sync(&toggle_charger);
 +      flush_delayed_work_sync(&sharpsl_bat);
  
        if (sharpsl_pm.charge_mode == CHRG_ON)
                sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
@@@ -869,7 -868,7 +869,7 @@@ static void sharpsl_apm_get_power_statu
  }
  
  #ifdef CONFIG_PM
- static struct platform_suspend_ops sharpsl_pm_ops = {
+ static const struct platform_suspend_ops sharpsl_pm_ops = {
        .prepare        = pxa_pm_prepare,
        .finish         = pxa_pm_finish,
        .enter          = corgi_pxa_pm_enter,
diff --combined arch/arm/mm/flush.c
@@@ -10,7 -10,6 +10,7 @@@
  #include <linux/module.h>
  #include <linux/mm.h>
  #include <linux/pagemap.h>
 +#include <linux/highmem.h>
  
  #include <asm/cacheflush.h>
  #include <asm/cachetype.h>
@@@ -18,7 -17,6 +18,6 @@@
  #include <asm/smp_plat.h>
  #include <asm/system.h>
  #include <asm/tlbflush.h>
- #include <asm/smp_plat.h>
  
  #include "mm.h"
  
@@@ -181,10 -179,10 +180,10 @@@ void __flush_dcache_page(struct address
                        __cpuc_flush_dcache_area(addr, PAGE_SIZE);
                        kunmap_high(page);
                } else if (cache_is_vipt()) {
 -                      pte_t saved_pte;
 -                      addr = kmap_high_l1_vipt(page, &saved_pte);
 +                      /* unmapped pages might still be cached */
 +                      addr = kmap_atomic(page);
                        __cpuc_flush_dcache_area(addr, PAGE_SIZE);
 -                      kunmap_high_l1_vipt(page, saved_pte);
 +                      kunmap_atomic(addr);
                }
        }
  
  #define MXC_GPIO_IRQ_START    MXC_INTERNAL_IRQS
  
  /* these are ordered by size to support multi-SoC kernels */
 -#if defined CONFIG_ARCH_MX2
 +#if defined CONFIG_ARCH_MX53
 +#define MXC_GPIO_IRQS         (32 * 7)
 +#elif defined CONFIG_ARCH_MX2
 +#define MXC_GPIO_IRQS         (32 * 6)
 +#elif defined CONFIG_ARCH_MX50
  #define MXC_GPIO_IRQS         (32 * 6)
  #elif defined CONFIG_ARCH_MX1
  #define MXC_GPIO_IRQS         (32 * 4)
  #elif defined CONFIG_ARCH_MX25
  #define MXC_GPIO_IRQS         (32 * 4)
 -#elif defined CONFIG_ARCH_MX5
 +#elif defined CONFIG_ARCH_MX51
  #define MXC_GPIO_IRQS         (32 * 4)
  #elif defined CONFIG_ARCH_MXC91231
  #define MXC_GPIO_IRQS         (32 * 4)
@@@ -70,7 -66,7 +70,7 @@@ extern int imx_irq_set_priority(unsigne
  
  /* all normal IRQs can be FIQs */
  #define FIQ_START     0
- /* switch betwean IRQ and FIQ */
+ /* switch between IRQ and FIQ */
  extern int mxc_set_irq_fiq(unsigned int irq, unsigned int type);
  
  #endif /* __ASM_ARCH_MXC_IRQS_H__ */
@@@ -23,7 -23,7 +23,7 @@@
   * - add pinmuxing
   * - init_conn_id_bit (CONNID_BIT_VECTOR)
   * - implement default hwmod SMS/SDRC flags?
 - * - remove unused fields
 + * - move Linux-specific data ("non-ROM data") out
   *
   */
  #ifndef __ARCH_ARM_PLAT_OMAP_INCLUDE_MACH_OMAP_HWMOD_H
@@@ -32,9 -32,8 +32,9 @@@
  #include <linux/kernel.h>
  #include <linux/list.h>
  #include <linux/ioport.h>
 -#include <linux/mutex.h>
 +#include <linux/spinlock.h>
  #include <plat/cpu.h>
 +#include <plat/voltage.h>
  
  struct omap_device;
  
@@@ -77,20 -76,6 +77,20 @@@ extern struct omap_hwmod_sysc_fields om
  #define HWMOD_IDLEMODE_FORCE          (1 << 0)
  #define HWMOD_IDLEMODE_NO             (1 << 1)
  #define HWMOD_IDLEMODE_SMART          (1 << 2)
 +/* Slave idle mode flag only */
 +#define HWMOD_IDLEMODE_SMART_WKUP     (1 << 3)
 +
 +/**
 + * struct omap_hwmod_mux_info - hwmod specific mux configuration
 + * @pads:              array of omap_device_pad entries
 + * @nr_pads:           number of omap_device_pad entries
 + *
 + * Note that this is currently built during init as needed.
 + */
 +struct omap_hwmod_mux_info {
 +      int                             nr_pads;
 +      struct omap_device_pad          *pads;
 +};
  
  /**
   * struct omap_hwmod_irq_info - MPU IRQs used by the hwmod
@@@ -174,7 -159,7 +174,7 @@@ struct omap_hwmod_omap2_firewall 
   * ADDR_MAP_ON_INIT: Map this address space during omap_hwmod init.
   * ADDR_TYPE_RT: Address space contains module register target data.
   */
 -#define ADDR_MAP_ON_INIT      (1 << 0)
 +#define ADDR_MAP_ON_INIT      (1 << 0)        /* XXX does not belong */
  #define ADDR_TYPE_RT          (1 << 1)
  
  /**
@@@ -215,6 -200,8 +215,6 @@@ struct omap_hwmod_addr_space 
   * @fw: interface firewall data
   * @addr_cnt: ARRAY_SIZE(@addr)
   * @width: OCP data width
 - * @thread_cnt: number of threads
 - * @max_burst_len: maximum burst length in @width sized words (0 if unlimited)
   * @user: initiators using this interface (see OCP_USER_* macros above)
   * @flags: OCP interface flags (see OCPIF_* macros above)
   *
@@@ -234,6 -221,8 +234,6 @@@ struct omap_hwmod_ocp_if 
        }                               fw;
        u8                              addr_cnt;
        u8                              width;
 -      u8                              thread_cnt;
 -      u8                              max_burst_len;
        u8                              user;
        u8                              flags;
  };
  /* Macros for use in struct omap_hwmod_sysconfig */
  
  /* Flags for use in omap_hwmod_sysconfig.idlemodes */
 -#define MASTER_STANDBY_SHIFT  2
 +#define MASTER_STANDBY_SHIFT  4
  #define SLAVE_IDLE_SHIFT      0
  #define SIDLE_FORCE           (HWMOD_IDLEMODE_FORCE << SLAVE_IDLE_SHIFT)
  #define SIDLE_NO              (HWMOD_IDLEMODE_NO << SLAVE_IDLE_SHIFT)
  #define SIDLE_SMART           (HWMOD_IDLEMODE_SMART << SLAVE_IDLE_SHIFT)
 +#define SIDLE_SMART_WKUP      (HWMOD_IDLEMODE_SMART_WKUP << SLAVE_IDLE_SHIFT)
  #define MSTANDBY_FORCE                (HWMOD_IDLEMODE_FORCE << MASTER_STANDBY_SHIFT)
  #define MSTANDBY_NO           (HWMOD_IDLEMODE_NO << MASTER_STANDBY_SHIFT)
  #define MSTANDBY_SMART                (HWMOD_IDLEMODE_SMART << MASTER_STANDBY_SHIFT)
@@@ -351,7 -339,7 +351,7 @@@ struct omap_hwmod_omap2_prcm 
  /**
   * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
   * @clkctrl_reg: PRCM address of the clock control register
-  * @rstctrl_reg: adress of the XXX_RSTCTRL register located in the PRM
+  * @rstctrl_reg: address of the XXX_RSTCTRL register located in the PRM
   * @submodule_wkdep_bit: bit shift of the WKDEP range
   */
  struct omap_hwmod_omap4_prcm {
   * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out
   *     of standby, rather than relying on module smart-standby
   * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for
 - *     SDRAM controller, etc.
 + *     SDRAM controller, etc. XXX probably belongs outside the main hwmod file
   * HWMOD_INIT_NO_IDLE: don't idle this module at boot - important for SDRAM
 - *     controller, etc.
 + *     controller, etc. XXX probably belongs outside the main hwmod file
   * HWMOD_NO_AUTOIDLE: disable module autoidle (OCP_SYSCONFIG.AUTOIDLE)
   *     when module is enabled, rather than the default, which is to
   *     enable autoidle
   * HWMOD_SET_DEFAULT_CLOCKACT: program CLOCKACTIVITY bits at startup
 - * HWMOD_NO_IDLEST : this module does not have idle status - this is the case
 + * HWMOD_NO_IDLEST: this module does not have idle status - this is the case
   *     only for few initiator modules on OMAP2 & 3.
   * HWMOD_CONTROL_OPT_CLKS_IN_RESET: Enable all optional clocks during reset.
   *     This is needed for devices like DSS that require optional clocks enabled
   * @name: name of the hwmod_class
   * @sysc: device SYSCONFIG/SYSSTATUS register data
   * @rev: revision of the IP class
 + * @pre_shutdown: ptr to fn to be executed immediately prior to device shutdown
 + * @reset: ptr to fn to be executed in place of the standard hwmod reset fn
   *
   * Represent the class of a OMAP hardware "modules" (e.g. timer,
   * smartreflex, gpio, uart...)
 + *
 + * @pre_shutdown is a function that will be run immediately before
 + * hwmod clocks are disabled, etc.  It is intended for use for hwmods
 + * like the MPU watchdog, which cannot be disabled with the standard
 + * omap_hwmod_shutdown().  The function should return 0 upon success,
 + * or some negative error upon failure.  Returning an error will cause
 + * omap_hwmod_shutdown() to abort the device shutdown and return an
 + * error.
 + *
 + * If @reset is defined, then the function it points to will be
 + * executed in place of the standard hwmod _reset() code in
 + * mach-omap2/omap_hwmod.c.  This is needed for IP blocks which have
 + * unusual reset sequences - usually processor IP blocks like the IVA.
   */
  struct omap_hwmod_class {
        const char                              *name;
        struct omap_hwmod_class_sysconfig       *sysc;
        u32                                     rev;
 +      int                                     (*pre_shutdown)(struct omap_hwmod *oh);
 +      int                                     (*reset)(struct omap_hwmod *oh);
  };
  
  /**
   * @main_clk: main clock: OMAP clock name
   * @_clk: pointer to the main struct clk (filled in at runtime)
   * @opt_clks: other device clocks that drivers can request (0..*)
 + * @vdd_name: voltage domain name
 + * @voltdm: pointer to voltage domain (filled in at runtime)
   * @masters: ptr to array of OCP ifs that this hwmod can initiate on
   * @slaves: ptr to array of OCP ifs that this hwmod can respond on
   * @dev_attr: arbitrary device attributes that can be passed to the driver
   * @_sysc_cache: internal-use hwmod flags
   * @_mpu_rt_va: cached register target start address (internal use)
   * @_mpu_port_index: cached MPU register target slave ID (internal use)
 - * @msuspendmux_reg_id: CONTROL_MSUSPENDMUX register ID (1-6)
 - * @msuspendmux_shift: CONTROL_MSUSPENDMUX register bit shift
   * @mpu_irqs_cnt: number of @mpu_irqs
   * @sdma_reqs_cnt: number of @sdma_reqs
   * @opt_clks_cnt: number of @opt_clks
   * @response_lat: device OCP response latency (in interface clock cycles)
   * @_int_flags: internal-use hwmod flags
   * @_state: internal-use hwmod state
 + * @_postsetup_state: internal-use state to leave the hwmod in after _setup()
   * @flags: hwmod flags (documented below)
   * @omap_chip: OMAP chips this hwmod is present on
 - * @_mutex: mutex serializing operations on this hwmod
 + * @_lock: spinlock serializing operations on this hwmod
   * @node: list node for hwmod list (internal use)
   *
   * @main_clk refers to this module's "main clock," which for our
@@@ -499,7 -469,6 +499,7 @@@ struct omap_hwmod 
        const char                      *name;
        struct omap_hwmod_class         *class;
        struct omap_device              *od;
 +      struct omap_hwmod_mux_info      *mux;
        struct omap_hwmod_irq_info      *mpu_irqs;
        struct omap_hwmod_dma_info      *sdma_reqs;
        struct omap_hwmod_rst_info      *rst_lines;
        const char                      *main_clk;
        struct clk                      *_clk;
        struct omap_hwmod_opt_clk       *opt_clks;
 +      char                            *vdd_name;
 +      struct voltagedomain            *voltdm;
        struct omap_hwmod_ocp_if        **masters; /* connect to *_IA */
        struct omap_hwmod_ocp_if        **slaves;  /* connect to *_TA */
        void                            *dev_attr;
        u32                             _sysc_cache;
        void __iomem                    *_mpu_rt_va;
 -      struct mutex                    _mutex;
 +      spinlock_t                      _lock;
        struct list_head                node;
        u16                             flags;
        u8                              _mpu_port_index;
 -      u8                              msuspendmux_reg_id;
 -      u8                              msuspendmux_shift;
        u8                              response_lat;
        u8                              mpu_irqs_cnt;
        u8                              sdma_reqs_cnt;
        u8                              hwmods_cnt;
        u8                              _int_flags;
        u8                              _state;
 +      u8                              _postsetup_state;
        const struct omap_chip_id       omap_chip;
  };
  
  int omap_hwmod_init(struct omap_hwmod **ohs);
  struct omap_hwmod *omap_hwmod_lookup(const char *name);
  int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data),
                        void *data);
 -int omap_hwmod_late_init(u8 skip_setup_idle);
 +int omap_hwmod_late_init(void);
  
  int omap_hwmod_enable(struct omap_hwmod *oh);
  int _omap_hwmod_enable(struct omap_hwmod *oh);
@@@ -586,9 -556,6 +586,9 @@@ int omap_hwmod_for_each_by_class(const 
                                           void *user),
                                 void *user);
  
 +int omap_hwmod_set_postsetup_state(struct omap_hwmod *oh, u8 state);
 +u32 omap_hwmod_get_context_loss_count(struct omap_hwmod *oh);
 +
  /*
   * Chip variant-specific hwmod init routines - XXX should be converted
   * to use initcalls once the initial boot ordering is straightened out
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright 2005-2008 Analog Devices Inc.
 + * Copyright 2005-2010 Analog Devices Inc.
   *
   * Licensed under the ADI BSD license or the GPL-2 (or later)
   */
@@@ -7,6 -7,9 +7,6 @@@
  #ifndef _DEF_BF534_H
  #define _DEF_BF534_H
  
 -/* Include all Core registers and bit definitions */
 -#include <asm/def_LPBlackfin.h>
 -
  /************************************************************************************
  ** System MMR Register Map
  *************************************************************************************/
  #define EBIU_SDSTAT                   0xFFC00A1C      /* SDRAM Status Register                                                */
  
  /* DMA Traffic Control Registers                                                                                                      */
 -#define DMA_TC_PER                    0xFFC00B0C      /* Traffic Control Periods Register                     */
 -#define DMA_TC_CNT                    0xFFC00B10      /* Traffic Control Current Counts Register      */
 -
 -/* Alternate deprecated register names (below) provided for backwards code compatibility */
 -#define DMA_TCPER                     0xFFC00B0C      /* Traffic Control Periods Register                     */
 -#define DMA_TCCNT                     0xFFC00B10      /* Traffic Control Current Counts Register      */
 +#define DMAC_TC_PER                   0xFFC00B0C      /* Traffic Control Periods Register                     */
 +#define DMAC_TC_CNT                   0xFFC00B10      /* Traffic Control Current Counts Register      */
  
  /* DMA Controller (0xFFC00C00 - 0xFFC00FFF)                                                                                                                   */
  #define DMA0_NEXT_DESC_PTR            0xFFC00C00      /* DMA Channel 0 Next Descriptor Pointer Register               */
  #define IWR_ENABLE(x) (1 << ((x)&0x1F))       /* Wakeup Enable Peripheral #x          */
  #define IWR_DISABLE(x)        (0xFFFFFFFF ^ (1 << ((x)&0x1F)))        /* Wakeup Disable Peripheral #x         */
  
 -/* ************** UART CONTROLLER MASKS *************************/
 -/* UARTx_LCR Masks                                                                                            */
 -#define WLS(x)                (((x)-5) & 0x03)        /* Word Length Select   */
 -#define STB                   0x04    /* Stop Bits                    */
 -#define PEN                   0x08    /* Parity Enable                */
 -#define EPS                   0x10    /* Even Parity Select   */
 -#define STP                   0x20    /* Stick Parity                 */
 -#define SB                    0x40    /* Set Break                    */
 -#define DLAB          0x80    /* Divisor Latch Access */
 -
 -/* UARTx_MCR Mask                                                                             */
 -#define LOOP_ENA              0x10    /* Loopback Mode Enable         */
 -#define LOOP_ENA_P    0x04
 -/* UARTx_LSR Masks                                                                            */
 -#define DR                    0x01    /* Data Ready                           */
 -#define OE                    0x02    /* Overrun Error                        */
 -#define PE                    0x04    /* Parity Error                         */
 -#define FE                    0x08    /* Framing Error                        */
 -#define BI                    0x10    /* Break Interrupt                      */
 -#define THRE          0x20    /* THR Empty                            */
 -#define TEMT          0x40    /* TSR and UART_THR Empty       */
 -
 -/* UARTx_IER Masks                                                                                                                    */
 -#define ERBFI         0x01    /* Enable Receive Buffer Full Interrupt         */
 -#define ETBEI         0x02    /* Enable Transmit Buffer Empty Interrupt       */
 -#define ELSI          0x04    /* Enable RX Status Interrupt                           */
 -
 -/* UARTx_IIR Masks                                                                                                            */
 -#define NINT          0x01    /* Pending Interrupt                                    */
 -#define IIR_TX_READY    0x02  /* UART_THR empty                               */
 -#define IIR_RX_READY    0x04  /* Receive data ready                           */
 -#define IIR_LINE_CHANGE 0x06  /* Receive line status                          */
 -#define IIR_STATUS    0x06
 -
 -/* UARTx_GCTL Masks                                                                                                   */
 -#define UCEN          0x01    /* Enable UARTx Clocks                          */
 -#define IREN          0x02    /* Enable IrDA Mode                                     */
 -#define TPOLC         0x04    /* IrDA TX Polarity Change                      */
 -#define RPOLC         0x08    /* IrDA RX Polarity Change                      */
 -#define FPE                   0x10    /* Force Parity Error On Transmit       */
 -#define FFE                   0x20    /* Force Framing Error On Transmit      */
 -
  /*  ****************  GENERAL PURPOSE TIMER MASKS  **********************/
  /* TIMER_ENABLE Masks                                                                                                 */
  #define TIMEN0                        0x0001  /* Enable Timer 0                                       */
  #define EMU_RUN                       0x0200  /* Emulation Behavior Select                    */
  #define ERR_TYP                       0xC000  /* Error Type                                                   */
  
 -/* ******************   GPIO PORTS F, G, H MASKS  ***********************/
 -/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  Masks                               */
 -/* Port F Masks                                                                                                               */
 -#define PF0           0x0001
 -#define PF1           0x0002
 -#define PF2           0x0004
 -#define PF3           0x0008
 -#define PF4           0x0010
 -#define PF5           0x0020
 -#define PF6           0x0040
 -#define PF7           0x0080
 -#define PF8           0x0100
 -#define PF9           0x0200
 -#define PF10  0x0400
 -#define PF11  0x0800
 -#define PF12  0x1000
 -#define PF13  0x2000
 -#define PF14  0x4000
 -#define PF15  0x8000
 -
 -/* Port G Masks                                                                                                                       */
 -#define PG0           0x0001
 -#define PG1           0x0002
 -#define PG2           0x0004
 -#define PG3           0x0008
 -#define PG4           0x0010
 -#define PG5           0x0020
 -#define PG6           0x0040
 -#define PG7           0x0080
 -#define PG8           0x0100
 -#define PG9           0x0200
 -#define PG10  0x0400
 -#define PG11  0x0800
 -#define PG12  0x1000
 -#define PG13  0x2000
 -#define PG14  0x4000
 -#define PG15  0x8000
 -
 -/* Port H Masks                                                                                                                       */
 -#define PH0           0x0001
 -#define PH1           0x0002
 -#define PH2           0x0004
 -#define PH3           0x0008
 -#define PH4           0x0010
 -#define PH5           0x0020
 -#define PH6           0x0040
 -#define PH7           0x0080
 -#define PH8           0x0100
 -#define PH9           0x0200
 -#define PH10  0x0400
 -#define PH11  0x0800
 -#define PH12  0x1000
 -#define PH13  0x2000
 -#define PH14  0x4000
 -#define PH15  0x8000
 -
  /* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
  /* EBIU_AMGCTL Masks                                                                                                                                  */
  #define AMCKEN                        0x0001  /* Enable CLKOUT                                                                        */
  #define       SADD_LEN        0x0002  /* Slave Address Length                                                 */
  #define       STDVAL          0x0004  /* Slave Transmit Data Valid                                    */
  #define       NAK                     0x0008  /* NAK/ACK* Generated At Conclusion Of Transfer */
- #define       GEN                     0x0010  /* General Call Adrress Matching Enabled                */
+ #define       GEN                     0x0010  /* General Call Address Matching Enabled                */
  
  /* TWI_SLAVE_STAT Masks                                                                                                                       */
  #define       SDIR            0x0001  /* Slave Transfer Direction (Transmit/Receive*) */
@@@ -23,6 -23,9 +23,6 @@@
  
  void bfin_pm_suspend_standby_enter(void)
  {
 -      unsigned long flags;
 -
 -      flags = hard_local_irq_save();
        bfin_pm_standby_setup();
  
  #ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
@@@ -52,6 -55,8 +52,6 @@@
  #else
        bfin_write_SIC_IWR(IWR_DISABLE_ALL);
  #endif
 -
 -      hard_local_irq_restore(flags);
  }
  
  int bf53x_suspend_l1_mem(unsigned char *memptr)
@@@ -122,6 -127,7 +122,6 @@@ static void flushinv_all_dcache(void
  
  int bfin_pm_suspend_mem_enter(void)
  {
 -      unsigned long flags;
        int wakeup, ret;
  
        unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH
        wakeup |= GPWE;
  #endif
  
        ret = blackfin_dma_suspend();
  
        if (ret) {
 -              hard_local_irq_restore(flags);
                kfree(memptr);
                return ret;
        }
        bfin_gpio_pm_hibernate_restore();
        blackfin_dma_resume();
  
 -      hard_local_irq_restore(flags);
        kfree(memptr);
  
        return 0;
@@@ -223,7 -233,7 +223,7 @@@ static int bfin_pm_enter(suspend_state_
        return 0;
  }
  
- struct platform_suspend_ops bfin_pm_ops = {
+ static const struct platform_suspend_ops bfin_pm_ops = {
        .enter = bfin_pm_enter,
        .valid  = bfin_pm_valid,
  };
@@@ -141,9 -141,10 +141,9 @@@ extern __u32                      cpu_caps_set[NCAPINTS]
  #ifdef CONFIG_SMP
  DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
  #define cpu_data(cpu)         per_cpu(cpu_info, cpu)
 -#define current_cpu_data      __get_cpu_var(cpu_info)
  #else
 +#define cpu_info              boot_cpu_data
  #define cpu_data(cpu)         boot_cpu_data
 -#define current_cpu_data      boot_cpu_data
  #endif
  
  extern const struct seq_operations cpuinfo_op;
@@@ -901,7 -902,7 +901,7 @@@ extern unsigned long thread_saved_pc(st
  /*
   * The below -8 is to reserve 8 bytes on top of the ring0 stack.
   * This is necessary to guarantee that the entire "struct pt_regs"
-  * is accessable even if the CPU haven't stored the SS/ESP registers
+  * is accessible even if the CPU haven't stored the SS/ESP registers
   * on the stack (interrupt gate does not save these registers
   * when switching to the same priv ring).
   * Therefore beware: accessing the ss/esp fields of the
@@@ -126,7 -126,7 +126,7 @@@ ENTRY(startup_32
        movsl
        movl pa(boot_params) + NEW_CL_POINTER,%esi
        andl %esi,%esi
-       jz 1f                   # No comand line
+       jz 1f                   # No command line
        movl $pa(boot_command_line),%edi
        movl $(COMMAND_LINE_SIZE/4),%ecx
        rep
        movl %eax, pa(olpc_ofw_pgd)
  #endif
  
 -#ifdef CONFIG_PARAVIRT
 -      /* This is can only trip for a broken bootloader... */
 -      cmpw $0x207, pa(boot_params + BP_version)
 -      jb default_entry
 -
 -      /* Paravirt-compatible boot parameters.  Look to see what architecture
 -              we're booting under. */
 -      movl pa(boot_params + BP_hardware_subarch), %eax
 -      cmpl $num_subarch_entries, %eax
 -      jae bad_subarch
 -
 -      movl pa(subarch_entries)(,%eax,4), %eax
 -      subl $__PAGE_OFFSET, %eax
 -      jmp *%eax
 -
 -bad_subarch:
 -WEAK(lguest_entry)
 -WEAK(xen_entry)
 -      /* Unknown implementation; there's really
 -         nothing we can do at this point. */
 -      ud2a
 -
 -      __INITDATA
 -
 -subarch_entries:
 -      .long default_entry             /* normal x86/PC */
 -      .long lguest_entry              /* lguest hypervisor */
 -      .long xen_entry                 /* Xen hypervisor */
 -      .long default_entry             /* Moorestown MID */
 -num_subarch_entries = (. - subarch_entries) / 4
 -.previous
 -#endif /* CONFIG_PARAVIRT */
 -
  /*
   * Initialize page tables.  This creates a PDE and a set of page
   * tables, which are located immediately beyond __brk_base.  The variable
   *
   * Note that the stack is not yet set up!
   */
 -default_entry:
  #ifdef CONFIG_X86_PAE
  
        /*
@@@ -227,42 -261,7 +227,42 @@@ page_pde_offset = (__PAGE_OFFSET >> 20)
        movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
        movl %eax,pa(initial_page_table+0xffc)
  #endif
 -      jmp 3f
 +
 +#ifdef CONFIG_PARAVIRT
 +      /* This is can only trip for a broken bootloader... */
 +      cmpw $0x207, pa(boot_params + BP_version)
 +      jb default_entry
 +
 +      /* Paravirt-compatible boot parameters.  Look to see what architecture
 +              we're booting under. */
 +      movl pa(boot_params + BP_hardware_subarch), %eax
 +      cmpl $num_subarch_entries, %eax
 +      jae bad_subarch
 +
 +      movl pa(subarch_entries)(,%eax,4), %eax
 +      subl $__PAGE_OFFSET, %eax
 +      jmp *%eax
 +
 +bad_subarch:
 +WEAK(lguest_entry)
 +WEAK(xen_entry)
 +      /* Unknown implementation; there's really
 +         nothing we can do at this point. */
 +      ud2a
 +
 +      __INITDATA
 +
 +subarch_entries:
 +      .long default_entry             /* normal x86/PC */
 +      .long lguest_entry              /* lguest hypervisor */
 +      .long xen_entry                 /* Xen hypervisor */
 +      .long default_entry             /* Moorestown MID */
 +num_subarch_entries = (. - subarch_entries) / 4
 +.previous
 +#else
 +      jmp default_entry
 +#endif /* CONFIG_PARAVIRT */
 +
  /*
   * Non-boot CPU entry point; entered from trampoline.S
   * We can't lgdt here, because lgdt itself uses a data segment, but
@@@ -283,7 -282,7 +283,7 @@@ ENTRY(startup_32_smp
        movl %eax,%fs
        movl %eax,%gs
  #endif /* CONFIG_SMP */
 -3:
 +default_entry:
  
  /*
   *    New page tables may be in 4Mbyte page mode and may
        subl $0x80000001, %eax
        cmpl $(0x8000ffff-0x80000001), %eax
        ja 6f
 +
 +      /* Clear bogus XD_DISABLE bits */
 +      call verify_cpu
 +
        mov $0x80000001, %eax
        cpuid
        /* Execute Disable bit supported? */
@@@ -616,8 -611,6 +616,8 @@@ ignore_int
  #endif
        iret
  
 +#include "verify_cpu.S"
 +
        __REFDATA
  .align 4
  ENTRY(initial_code)
  __PAGE_ALIGNED_BSS
        .align PAGE_SIZE_asm
  #ifdef CONFIG_X86_PAE
 -ENTRY(initial_pg_pmd)
 +initial_pg_pmd:
        .fill 1024*KPMDS,4,0
  #else
  ENTRY(initial_page_table)
        .fill 1024,4,0
  #endif
 -ENTRY(initial_pg_fixmap)
 +initial_pg_fixmap:
        .fill 1024,4,0
  ENTRY(empty_zero_page)
        .fill 4096,1,0
index 65df603,0000000..25bfdbb
mode 100644,000000..100644
--- /dev/null
@@@ -1,319 -1,0 +1,319 @@@
 +/*
 + * early_printk_mrst.c - early consoles for Intel MID platforms
 + *
 + * Copyright (c) 2008-2010, Intel Corporation
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * as published by the Free Software Foundation; version 2
 + * of the License.
 + */
 +
 +/*
 + * This file implements two early consoles named mrst and hsu.
 + * mrst is based on Maxim3110 spi-uart device, it exists in both
 + * Moorestown and Medfield platforms, while hsu is based on a High
 + * Speed UART device which only exists in the Medfield platform
 + */
 +
 +#include <linux/serial_reg.h>
 +#include <linux/serial_mfd.h>
 +#include <linux/kmsg_dump.h>
 +#include <linux/console.h>
 +#include <linux/kernel.h>
 +#include <linux/delay.h>
 +#include <linux/init.h>
 +#include <linux/io.h>
 +
 +#include <asm/fixmap.h>
 +#include <asm/pgtable.h>
 +#include <asm/mrst.h>
 +
 +#define MRST_SPI_TIMEOUT              0x200000
 +#define MRST_REGBASE_SPI0             0xff128000
 +#define MRST_REGBASE_SPI1             0xff128400
 +#define MRST_CLK_SPI0_REG             0xff11d86c
 +
 +/* Bit fields in CTRLR0 */
 +#define SPI_DFS_OFFSET                        0
 +
 +#define SPI_FRF_OFFSET                        4
 +#define SPI_FRF_SPI                   0x0
 +#define SPI_FRF_SSP                   0x1
 +#define SPI_FRF_MICROWIRE             0x2
 +#define SPI_FRF_RESV                  0x3
 +
 +#define SPI_MODE_OFFSET                       6
 +#define SPI_SCPH_OFFSET                       6
 +#define SPI_SCOL_OFFSET                       7
 +#define SPI_TMOD_OFFSET                       8
 +#define       SPI_TMOD_TR                     0x0             /* xmit & recv */
 +#define SPI_TMOD_TO                   0x1             /* xmit only */
 +#define SPI_TMOD_RO                   0x2             /* recv only */
 +#define SPI_TMOD_EPROMREAD            0x3             /* eeprom read mode */
 +
 +#define SPI_SLVOE_OFFSET              10
 +#define SPI_SRL_OFFSET                        11
 +#define SPI_CFS_OFFSET                        12
 +
 +/* Bit fields in SR, 7 bits */
 +#define SR_MASK                               0x7f            /* cover 7 bits */
 +#define SR_BUSY                               (1 << 0)
 +#define SR_TF_NOT_FULL                        (1 << 1)
 +#define SR_TF_EMPT                    (1 << 2)
 +#define SR_RF_NOT_EMPT                        (1 << 3)
 +#define SR_RF_FULL                    (1 << 4)
 +#define SR_TX_ERR                     (1 << 5)
 +#define SR_DCOL                               (1 << 6)
 +
 +struct dw_spi_reg {
 +      u32     ctrl0;
 +      u32     ctrl1;
 +      u32     ssienr;
 +      u32     mwcr;
 +      u32     ser;
 +      u32     baudr;
 +      u32     txfltr;
 +      u32     rxfltr;
 +      u32     txflr;
 +      u32     rxflr;
 +      u32     sr;
 +      u32     imr;
 +      u32     isr;
 +      u32     risr;
 +      u32     txoicr;
 +      u32     rxoicr;
 +      u32     rxuicr;
 +      u32     msticr;
 +      u32     icr;
 +      u32     dmacr;
 +      u32     dmatdlr;
 +      u32     dmardlr;
 +      u32     idr;
 +      u32     version;
 +
 +      /* Currently operates as 32 bits, though only the low 16 bits matter */
 +      u32     dr;
 +} __packed;
 +
 +#define dw_readl(dw, name)            __raw_readl(&(dw)->name)
 +#define dw_writel(dw, name, val)      __raw_writel((val), &(dw)->name)
 +
 +/* Default use SPI0 register for mrst, we will detect Penwell and use SPI1 */
 +static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
 +
 +static u32 *pclk_spi0;
- /* Always contains an accessable address, start with 0 */
++/* Always contains an accessible address, start with 0 */
 +static struct dw_spi_reg *pspi;
 +
 +static struct kmsg_dumper dw_dumper;
 +static int dumper_registered;
 +
 +static void dw_kmsg_dump(struct kmsg_dumper *dumper,
 +                      enum kmsg_dump_reason reason,
 +                      const char *s1, unsigned long l1,
 +                      const char *s2, unsigned long l2)
 +{
 +      int i;
 +
 +      /* When run to this, we'd better re-init the HW */
 +      mrst_early_console_init();
 +
 +      for (i = 0; i < l1; i++)
 +              early_mrst_console.write(&early_mrst_console, s1 + i, 1);
 +      for (i = 0; i < l2; i++)
 +              early_mrst_console.write(&early_mrst_console, s2 + i, 1);
 +}
 +
 +/* Set the ratio rate to 115200, 8n1, IRQ disabled */
 +static void max3110_write_config(void)
 +{
 +      u16 config;
 +
 +      config = 0xc001;
 +      dw_writel(pspi, dr, config);
 +}
 +
 +/* Translate char to a eligible word and send to max3110 */
 +static void max3110_write_data(char c)
 +{
 +      u16 data;
 +
 +      data = 0x8000 | c;
 +      dw_writel(pspi, dr, data);
 +}
 +
 +void mrst_early_console_init(void)
 +{
 +      u32 ctrlr0 = 0;
 +      u32 spi0_cdiv;
 +      u32 freq; /* Freqency info only need be searched once */
 +
 +      /* Base clk is 100 MHz, the actual clk = 100M / (clk_divider + 1) */
 +      pclk_spi0 = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
 +                                                      MRST_CLK_SPI0_REG);
 +      spi0_cdiv = ((*pclk_spi0) & 0xe00) >> 9;
 +      freq = 100000000 / (spi0_cdiv + 1);
 +
 +      if (mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL)
 +              mrst_spi_paddr = MRST_REGBASE_SPI1;
 +
 +      pspi = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
 +                                              mrst_spi_paddr);
 +
 +      /* Disable SPI controller */
 +      dw_writel(pspi, ssienr, 0);
 +
 +      /* Set control param, 8 bits, transmit only mode */
 +      ctrlr0 = dw_readl(pspi, ctrl0);
 +
 +      ctrlr0 &= 0xfcc0;
 +      ctrlr0 |= 0xf | (SPI_FRF_SPI << SPI_FRF_OFFSET)
 +                    | (SPI_TMOD_TO << SPI_TMOD_OFFSET);
 +      dw_writel(pspi, ctrl0, ctrlr0);
 +
 +      /*
 +       * Change the spi0 clk to comply with 115200 bps, use 100000 to
 +       * calculate the clk dividor to make the clock a little slower
 +       * than real baud rate.
 +       */
 +      dw_writel(pspi, baudr, freq/100000);
 +
 +      /* Disable all INT for early phase */
 +      dw_writel(pspi, imr, 0x0);
 +
 +      /* Set the cs to spi-uart */
 +      dw_writel(pspi, ser, 0x2);
 +
 +      /* Enable the HW, the last step for HW init */
 +      dw_writel(pspi, ssienr, 0x1);
 +
 +      /* Set the default configuration */
 +      max3110_write_config();
 +
 +      /* Register the kmsg dumper */
 +      if (!dumper_registered) {
 +              dw_dumper.dump = dw_kmsg_dump;
 +              kmsg_dump_register(&dw_dumper);
 +              dumper_registered = 1;
 +      }
 +}
 +
 +/* Slave select should be called in the read/write function */
 +static void early_mrst_spi_putc(char c)
 +{
 +      unsigned int timeout;
 +      u32 sr;
 +
 +      timeout = MRST_SPI_TIMEOUT;
 +      /* Early putc needs to make sure the TX FIFO is not full */
 +      while (--timeout) {
 +              sr = dw_readl(pspi, sr);
 +              if (!(sr & SR_TF_NOT_FULL))
 +                      cpu_relax();
 +              else
 +                      break;
 +      }
 +
 +      if (!timeout)
 +              pr_warning("MRST earlycon: timed out\n");
 +      else
 +              max3110_write_data(c);
 +}
 +
 +/* Early SPI only uses polling mode */
 +static void early_mrst_spi_write(struct console *con, const char *str, unsigned n)
 +{
 +      int i;
 +
 +      for (i = 0; i < n && *str; i++) {
 +              if (*str == '\n')
 +                      early_mrst_spi_putc('\r');
 +              early_mrst_spi_putc(*str);
 +              str++;
 +      }
 +}
 +
 +struct console early_mrst_console = {
 +      .name =         "earlymrst",
 +      .write =        early_mrst_spi_write,
 +      .flags =        CON_PRINTBUFFER,
 +      .index =        -1,
 +};
 +
 +/*
 + * Following is the early console based on Medfield HSU (High
 + * Speed UART) device.
 + */
 +#define HSU_PORT2_PADDR               0xffa28180
 +
 +static void __iomem *phsu;
 +
 +void hsu_early_console_init(void)
 +{
 +      u8 lcr;
 +
 +      phsu = (void *)set_fixmap_offset_nocache(FIX_EARLYCON_MEM_BASE,
 +                                                      HSU_PORT2_PADDR);
 +
 +      /* Disable FIFO */
 +      writeb(0x0, phsu + UART_FCR);
 +
 +      /* Set to default 115200 bps, 8n1 */
 +      lcr = readb(phsu + UART_LCR);
 +      writeb((0x80 | lcr), phsu + UART_LCR);
 +      writeb(0x18, phsu + UART_DLL);
 +      writeb(lcr,  phsu + UART_LCR);
 +      writel(0x3600, phsu + UART_MUL*4);
 +
 +      writeb(0x8, phsu + UART_MCR);
 +      writeb(0x7, phsu + UART_FCR);
 +      writeb(0x3, phsu + UART_LCR);
 +
 +      /* Clear IRQ status */
 +      readb(phsu + UART_LSR);
 +      readb(phsu + UART_RX);
 +      readb(phsu + UART_IIR);
 +      readb(phsu + UART_MSR);
 +
 +      /* Enable FIFO */
 +      writeb(0x7, phsu + UART_FCR);
 +}
 +
 +#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
 +
 +static void early_hsu_putc(char ch)
 +{
 +      unsigned int timeout = 10000; /* 10ms */
 +      u8 status;
 +
 +      while (--timeout) {
 +              status = readb(phsu + UART_LSR);
 +              if (status & BOTH_EMPTY)
 +                      break;
 +              udelay(1);
 +      }
 +
 +      /* Only write the char when there was no timeout */
 +      if (timeout)
 +              writeb(ch, phsu + UART_TX);
 +}
 +
 +static void early_hsu_write(struct console *con, const char *str, unsigned n)
 +{
 +      int i;
 +
 +      for (i = 0; i < n && *str; i++) {
 +              if (*str == '\n')
 +                      early_hsu_putc('\r');
 +              early_hsu_putc(*str);
 +              str++;
 +      }
 +}
 +
 +struct console early_hsu_console = {
 +      .name =         "earlyhsu",
 +      .write =        early_hsu_write,
 +      .flags =        CON_PRINTBUFFER,
 +      .index =        -1,
 +};
@@@ -2240,7 -2240,7 +2240,7 @@@ int ata_dev_configure(struct ata_devic
                        if (id[ATA_ID_CFA_KEY_MGMT] & 1)
                                ata_dev_printk(dev, KERN_WARNING,
                                               "supports DRM functions and may "
-                                              "not be fully accessable.\n");
+                                              "not be fully accessible.\n");
                        snprintf(revbuf, 7, "CFA");
                } else {
                        snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
                        if (ata_id_has_tpm(id))
                                ata_dev_printk(dev, KERN_WARNING,
                                               "supports DRM functions and may "
-                                              "not be fully accessable.\n");
+                                              "not be fully accessible.\n");
                }
  
                dev->n_sectors = ata_id_n_sectors(id);
@@@ -4807,6 -4807,9 +4807,6 @@@ static void ata_verify_xfer(struct ata_
  {
        struct ata_device *dev = qc->dev;
  
 -      if (ata_tag_internal(qc->tag))
 -              return;
 -
        if (ata_is_nodata(qc->tf.protocol))
                return;
  
@@@ -4855,23 -4858,14 +4855,23 @@@ void ata_qc_complete(struct ata_queued_
                if (unlikely(qc->err_mask))
                        qc->flags |= ATA_QCFLAG_FAILED;
  
 -              if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
 -                      /* always fill result TF for failed qc */
 +              /*
 +               * Finish internal commands without any further processing
 +               * and always with the result TF filled.
 +               */
 +              if (unlikely(ata_tag_internal(qc->tag))) {
                        fill_result_tf(qc);
 +                      __ata_qc_complete(qc);
 +                      return;
 +              }
  
 -                      if (!ata_tag_internal(qc->tag))
 -                              ata_qc_schedule_eh(qc);
 -                      else
 -                              __ata_qc_complete(qc);
 +              /*
 +               * Non-internal qc has failed.  Fill the result TF and
 +               * summon EH.
 +               */
 +              if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
 +                      fill_result_tf(qc);
 +                      ata_qc_schedule_eh(qc);
                        return;
                }
  
@@@ -6128,7 -6122,7 +6128,7 @@@ static void ata_port_detach(struct ata_
        /* it better be dead now */
        WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
  
 -      cancel_rearming_delayed_work(&ap->hotplug_task);
 +      cancel_delayed_work_sync(&ap->hotplug_task);
  
   skip_eh:
        if (ap->pmp_link) {
diff --combined drivers/base/bus.c
@@@ -20,6 -20,7 +20,6 @@@
  #include "power/power.h"
  
  #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
 -#define to_bus(obj) container_of(obj, struct bus_type_private, subsys.kobj)
  
  /*
   * sysfs bindings for drivers
@@@ -95,11 -96,11 +95,11 @@@ static ssize_t bus_attr_show(struct kob
                             char *buf)
  {
        struct bus_attribute *bus_attr = to_bus_attr(attr);
 -      struct bus_type_private *bus_priv = to_bus(kobj);
 +      struct subsys_private *subsys_priv = to_subsys_private(kobj);
        ssize_t ret = 0;
  
        if (bus_attr->show)
 -              ret = bus_attr->show(bus_priv->bus, buf);
 +              ret = bus_attr->show(subsys_priv->bus, buf);
        return ret;
  }
  
@@@ -107,11 -108,11 +107,11 @@@ static ssize_t bus_attr_store(struct ko
                              const char *buf, size_t count)
  {
        struct bus_attribute *bus_attr = to_bus_attr(attr);
 -      struct bus_type_private *bus_priv = to_bus(kobj);
 +      struct subsys_private *subsys_priv = to_subsys_private(kobj);
        ssize_t ret = 0;
  
        if (bus_attr->store)
 -              ret = bus_attr->store(bus_priv->bus, buf, count);
 +              ret = bus_attr->store(subsys_priv->bus, buf, count);
        return ret;
  }
  
@@@ -857,9 -858,9 +857,9 @@@ static BUS_ATTR(uevent, S_IWUSR, NULL, 
  int bus_register(struct bus_type *bus)
  {
        int retval;
 -      struct bus_type_private *priv;
 +      struct subsys_private *priv;
  
 -      priv = kzalloc(sizeof(struct bus_type_private), GFP_KERNEL);
 +      priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
  
@@@ -975,7 -976,7 +975,7 @@@ struct klist *bus_get_device_klist(stru
  EXPORT_SYMBOL_GPL(bus_get_device_klist);
  
  /*
-  * Yes, this forcably breaks the klist abstraction temporarily.  It
+  * Yes, this forcibly breaks the klist abstraction temporarily.  It
   * just wants to sort the klist, not change reference counts and
   * take/drop locks rapidly in the process.  It does all this while
   * holding the lock for the list, so objects can't otherwise be
@@@ -8,7 -8,7 +8,7 @@@
   *
   *
   * The driver model core calls device_pm_add() when a device is registered.
-  * This will intialize the embedded device_pm_info object in the device
+  * This will initialize the embedded device_pm_info object in the device
   * and add it to the list of power-controlled devices. sysfs entries for
   * controlling device power management will also be added.
   *
@@@ -26,7 -26,6 +26,7 @@@
  #include <linux/interrupt.h>
  #include <linux/sched.h>
  #include <linux/async.h>
 +#include <linux/suspend.h>
  
  #include "../base.h"
  #include "power.h"
   */
  
  LIST_HEAD(dpm_list);
 +LIST_HEAD(dpm_prepared_list);
 +LIST_HEAD(dpm_suspended_list);
 +LIST_HEAD(dpm_noirq_list);
  
  static DEFINE_MUTEX(dpm_list_mtx);
  static pm_message_t pm_transition;
  
 -/*
 - * Set once the preparation of devices for a PM transition has started, reset
 - * before starting to resume devices.  Protected by dpm_list_mtx.
 - */
 -static bool transition_started;
 -
  static int async_error;
  
  /**
@@@ -57,7 -59,7 +57,7 @@@
   */
  void device_pm_init(struct device *dev)
  {
 -      dev->power.status = DPM_ON;
 +      dev->power.in_suspend = false;
        init_completion(&dev->power.completion);
        complete_all(&dev->power.completion);
        dev->power.wakeup = NULL;
@@@ -88,11 -90,22 +88,11 @@@ void device_pm_unlock(void
  void device_pm_add(struct device *dev)
  {
        pr_debug("PM: Adding info for %s:%s\n",
 -               dev->bus ? dev->bus->name : "No Bus",
 -               kobject_name(&dev->kobj));
 +               dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
        mutex_lock(&dpm_list_mtx);
 -      if (dev->parent) {
 -              if (dev->parent->power.status >= DPM_SUSPENDING)
 -                      dev_warn(dev, "parent %s should not be sleeping\n",
 -                               dev_name(dev->parent));
 -      } else if (transition_started) {
 -              /*
 -               * We refuse to register parentless devices while a PM
 -               * transition is in progress in order to avoid leaving them
 -               * unhandled down the road
 -               */
 -              dev_WARN(dev, "Parentless device registered during a PM transaction\n");
 -      }
 -
 +      if (dev->parent && dev->parent->power.in_suspend)
 +              dev_warn(dev, "parent %s should not be sleeping\n",
 +                      dev_name(dev->parent));
        list_add_tail(&dev->power.entry, &dpm_list);
        mutex_unlock(&dpm_list_mtx);
  }
  void device_pm_remove(struct device *dev)
  {
        pr_debug("PM: Removing info for %s:%s\n",
 -               dev->bus ? dev->bus->name : "No Bus",
 -               kobject_name(&dev->kobj));
 +               dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
        complete_all(&dev->power.completion);
        mutex_lock(&dpm_list_mtx);
        list_del_init(&dev->power.entry);
  void device_pm_move_before(struct device *deva, struct device *devb)
  {
        pr_debug("PM: Moving %s:%s before %s:%s\n",
 -               deva->bus ? deva->bus->name : "No Bus",
 -               kobject_name(&deva->kobj),
 -               devb->bus ? devb->bus->name : "No Bus",
 -               kobject_name(&devb->kobj));
 +               deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 +               devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
        /* Delete deva from dpm_list and reinsert before devb. */
        list_move_tail(&deva->power.entry, &devb->power.entry);
  }
  void device_pm_move_after(struct device *deva, struct device *devb)
  {
        pr_debug("PM: Moving %s:%s after %s:%s\n",
 -               deva->bus ? deva->bus->name : "No Bus",
 -               kobject_name(&deva->kobj),
 -               devb->bus ? devb->bus->name : "No Bus",
 -               kobject_name(&devb->kobj));
 +               deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 +               devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
        /* Delete deva from dpm_list and reinsert after devb. */
        list_move(&deva->power.entry, &devb->power.entry);
  }
  void device_pm_move_last(struct device *dev)
  {
        pr_debug("PM: Moving %s:%s to end of list\n",
 -               dev->bus ? dev->bus->name : "No Bus",
 -               kobject_name(&dev->kobj));
 +               dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
        list_move_tail(&dev->power.entry, &dpm_list);
  }
  
@@@ -284,7 -303,7 +284,7 @@@ static int pm_noirq_op(struct device *d
                        pm_message_t state)
  {
        int error = 0;
 -      ktime_t calltime, delta, rettime;
 +      ktime_t calltime = ktime_set(0, 0), delta, rettime;
  
        if (initcall_debug) {
                pr_info("calling  %s+ @ %i, parent: %s\n",
@@@ -386,7 -405,7 +386,7 @@@ static void pm_dev_err(struct device *d
                        int error)
  {
        printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
 -              kobject_name(&dev->kobj), pm_verb(state.event), info, error);
 +              dev_name(dev), pm_verb(state.event), info, error);
  }
  
  static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
@@@ -456,24 -475,33 +456,24 @@@ End
   */
  void dpm_resume_noirq(pm_message_t state)
  {
 -      struct list_head list;
        ktime_t starttime = ktime_get();
  
 -      INIT_LIST_HEAD(&list);
        mutex_lock(&dpm_list_mtx);
 -      transition_started = false;
 -      while (!list_empty(&dpm_list)) {
 -              struct device *dev = to_device(dpm_list.next);
 +      while (!list_empty(&dpm_noirq_list)) {
 +              struct device *dev = to_device(dpm_noirq_list.next);
 +              int error;
  
                get_device(dev);
 -              if (dev->power.status > DPM_OFF) {
 -                      int error;
 -
 -                      dev->power.status = DPM_OFF;
 -                      mutex_unlock(&dpm_list_mtx);
 +              list_move_tail(&dev->power.entry, &dpm_suspended_list);
 +              mutex_unlock(&dpm_list_mtx);
  
 -                      error = device_resume_noirq(dev, state);
 +              error = device_resume_noirq(dev, state);
 +              if (error)
 +                      pm_dev_err(dev, state, " early", error);
  
 -                      mutex_lock(&dpm_list_mtx);
 -                      if (error)
 -                              pm_dev_err(dev, state, " early", error);
 -              }
 -              if (!list_empty(&dev->power.entry))
 -                      list_move_tail(&dev->power.entry, &list);
 +              mutex_lock(&dpm_list_mtx);
                put_device(dev);
        }
 -      list_splice(&list, &dpm_list);
        mutex_unlock(&dpm_list_mtx);
        dpm_show_time(starttime, state, "early");
        resume_device_irqs();
@@@ -516,7 -544,7 +516,7 @@@ static int device_resume(struct device 
        dpm_wait(dev->parent, async);
        device_lock(dev);
  
 -      dev->power.status = DPM_RESUMING;
 +      dev->power.in_suspend = false;
  
        if (dev->bus) {
                if (dev->bus->pm) {
@@@ -582,14 -610,19 +582,14 @@@ static bool is_async(struct device *dev
   */
  static void dpm_resume(pm_message_t state)
  {
        struct device *dev;
        ktime_t starttime = ktime_get();
  
 -      INIT_LIST_HEAD(&list);
        mutex_lock(&dpm_list_mtx);
        pm_transition = state;
        async_error = 0;
  
 -      list_for_each_entry(dev, &dpm_list, power.entry) {
 -              if (dev->power.status < DPM_OFF)
 -                      continue;
 -
 +      list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
                INIT_COMPLETION(dev->power.completion);
                if (is_async(dev)) {
                        get_device(dev);
                }
        }
  
 -      while (!list_empty(&dpm_list)) {
 -              dev = to_device(dpm_list.next);
 +      while (!list_empty(&dpm_suspended_list)) {
 +              dev = to_device(dpm_suspended_list.next);
                get_device(dev);
 -              if (dev->power.status >= DPM_OFF && !is_async(dev)) {
 +              if (!is_async(dev)) {
                        int error;
  
                        mutex_unlock(&dpm_list_mtx);
  
                        error = device_resume(dev, state, false);
 -
 -                      mutex_lock(&dpm_list_mtx);
                        if (error)
                                pm_dev_err(dev, state, "", error);
 -              } else if (dev->power.status == DPM_SUSPENDING) {
 -                      /* Allow new children of the device to be registered */
 -                      dev->power.status = DPM_RESUMING;
 +
 +                      mutex_lock(&dpm_list_mtx);
                }
                if (!list_empty(&dev->power.entry))
 -                      list_move_tail(&dev->power.entry, &list);
 +                      list_move_tail(&dev->power.entry, &dpm_prepared_list);
                put_device(dev);
        }
 -      list_splice(&list, &dpm_list);
        mutex_unlock(&dpm_list_mtx);
        async_synchronize_full();
        dpm_show_time(starttime, state, NULL);
@@@ -660,18 -697,22 +660,18 @@@ static void dpm_complete(pm_message_t s
  
        INIT_LIST_HEAD(&list);
        mutex_lock(&dpm_list_mtx);
 -      transition_started = false;
 -      while (!list_empty(&dpm_list)) {
 -              struct device *dev = to_device(dpm_list.prev);
 +      while (!list_empty(&dpm_prepared_list)) {
 +              struct device *dev = to_device(dpm_prepared_list.prev);
  
                get_device(dev);
 -              if (dev->power.status > DPM_ON) {
 -                      dev->power.status = DPM_ON;
 -                      mutex_unlock(&dpm_list_mtx);
 +              dev->power.in_suspend = false;
 +              list_move(&dev->power.entry, &list);
 +              mutex_unlock(&dpm_list_mtx);
  
 -                      device_complete(dev, state);
 -                      pm_runtime_put_sync(dev);
 +              device_complete(dev, state);
 +              pm_runtime_put_sync(dev);
  
 -                      mutex_lock(&dpm_list_mtx);
 -              }
 -              if (!list_empty(&dev->power.entry))
 -                      list_move(&dev->power.entry, &list);
 +              mutex_lock(&dpm_list_mtx);
                put_device(dev);
        }
        list_splice(&list, &dpm_list);
@@@ -761,13 -802,15 +761,13 @@@ End
   */
  int dpm_suspend_noirq(pm_message_t state)
  {
        ktime_t starttime = ktime_get();
        int error = 0;
  
 -      INIT_LIST_HEAD(&list);
        suspend_device_irqs();
        mutex_lock(&dpm_list_mtx);
 -      while (!list_empty(&dpm_list)) {
 -              struct device *dev = to_device(dpm_list.prev);
 +      while (!list_empty(&dpm_suspended_list)) {
 +              struct device *dev = to_device(dpm_suspended_list.prev);
  
                get_device(dev);
                mutex_unlock(&dpm_list_mtx);
                        put_device(dev);
                        break;
                }
 -              dev->power.status = DPM_OFF_IRQ;
                if (!list_empty(&dev->power.entry))
 -                      list_move(&dev->power.entry, &list);
 +                      list_move(&dev->power.entry, &dpm_noirq_list);
                put_device(dev);
        }
 -      list_splice_tail(&list, &dpm_list);
        mutex_unlock(&dpm_list_mtx);
        if (error)
                dpm_resume_noirq(resume_event(state));
@@@ -831,11 -876,6 +831,11 @@@ static int __device_suspend(struct devi
        if (async_error)
                goto End;
  
 +      if (pm_wakeup_pending()) {
 +              async_error = -EBUSY;
 +              goto End;
 +      }
 +
        if (dev->class) {
                if (dev->class->pm) {
                        pm_dev_dbg(dev, state, "class ");
                }
        }
  
 -      if (!error)
 -              dev->power.status = DPM_OFF;
 -
   End:
        device_unlock(dev);
        complete_all(&dev->power.completion);
@@@ -908,14 -951,16 +908,14 @@@ static int device_suspend(struct devic
   */
  static int dpm_suspend(pm_message_t state)
  {
        ktime_t starttime = ktime_get();
        int error = 0;
  
        mutex_lock(&dpm_list_mtx);
        pm_transition = state;
        async_error = 0;
 -      while (!list_empty(&dpm_list)) {
 -              struct device *dev = to_device(dpm_list.prev);
 +      while (!list_empty(&dpm_prepared_list)) {
 +              struct device *dev = to_device(dpm_prepared_list.prev);
  
                get_device(dev);
                mutex_unlock(&dpm_list_mtx);
                        break;
                }
                if (!list_empty(&dev->power.entry))
 -                      list_move(&dev->power.entry, &list);
 +                      list_move(&dev->power.entry, &dpm_suspended_list);
                put_device(dev);
                if (async_error)
                        break;
        }
 -      list_splice(&list, dpm_list.prev);
        mutex_unlock(&dpm_list_mtx);
        async_synchronize_full();
        if (!error)
@@@ -992,20 -1038,22 +992,20 @@@ static int device_prepare(struct devic
   */
  static int dpm_prepare(pm_message_t state)
  {
 -      struct list_head list;
        int error = 0;
  
 -      INIT_LIST_HEAD(&list);
        mutex_lock(&dpm_list_mtx);
 -      transition_started = true;
        while (!list_empty(&dpm_list)) {
                struct device *dev = to_device(dpm_list.next);
  
                get_device(dev);
                mutex_unlock(&dpm_list_mtx);
  
                pm_runtime_get_noresume(dev);
 -              if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
 -                      /* Wake-up requested during system sleep transition. */
 +              if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
 +                      pm_wakeup_event(dev, 0);
 +
 +              if (pm_wakeup_pending()) {
                        pm_runtime_put_sync(dev);
                        error = -EBUSY;
                } else {
  
                mutex_lock(&dpm_list_mtx);
                if (error) {
 -                      dev->power.status = DPM_ON;
                        if (error == -EAGAIN) {
                                put_device(dev);
                                error = 0;
                                continue;
                        }
 -                      printk(KERN_ERR "PM: Failed to prepare device %s "
 -                              "for power transition: error %d\n",
 -                              kobject_name(&dev->kobj), error);
 +                      printk(KERN_INFO "PM: Device %s not prepared "
 +                              "for power transition: code %d\n",
 +                              dev_name(dev), error);
                        put_device(dev);
                        break;
                }
 -              dev->power.status = DPM_SUSPENDING;
 +              dev->power.in_suspend = true;
                if (!list_empty(&dev->power.entry))
 -                      list_move_tail(&dev->power.entry, &list);
 +                      list_move_tail(&dev->power.entry, &dpm_prepared_list);
                put_device(dev);
        }
 -      list_splice(&list, &dpm_list);
        mutex_unlock(&dpm_list_mtx);
        return error;
  }
diff --combined drivers/edac/edac_core.h
  #define EDAC_PCI "PCI"
  #define EDAC_DEBUG "DEBUG"
  
 +extern const char *edac_mem_types[];
 +
  #ifdef CONFIG_EDAC_DEBUG
  extern int edac_debug_level;
 -extern const char *edac_mem_types[];
  
  #define edac_debug_printk(level, fmt, arg...)                           \
        do {                                                            \
@@@ -259,7 -258,7 +259,7 @@@ enum scrub_type 
   *                    for single channel are 64 bits, for dual channel 128
   *                    bits.
   *
-  * Single-Ranked stick:       A Single-ranked stick has 1 chip-select row of memmory.
+  * Single-Ranked stick:       A Single-ranked stick has 1 chip-select row of memory.
   *                    Motherboards commonly drive two chip-select pins to
   *                    a memory stick. A single-ranked stick, will occupy
   *                    only one of those rows. The other will be unused.
@@@ -387,7 -386,7 +387,7 @@@ struct mem_ctl_info 
           representation and converts it to the closest matching
           bandwith in bytes/sec.
         */
 -      int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw);
 +      int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci);
  
  
        /* pointer to edac checking routine */
@@@ -1927,7 -1927,7 +1927,7 @@@ init_ltime(struct nvbios *bios, uint16_
         * offset      (8  bit): opcode
         * offset + 1  (16 bit): time
         *
-        * Sleep for "time" miliseconds.
+        * Sleep for "time" milliseconds.
         */
  
        unsigned time = ROM16(bios->data[offset + 1]);
        if (!iexec->execute)
                return 3;
  
-       BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X miliseconds\n",
+       BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
                offset, time);
  
        msleep(time);
@@@ -6053,17 -6053,52 +6053,17 @@@ static struct dcb_entry *new_dcb_entry(
        return entry;
  }
  
 -static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
 +static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
 +                               int heads, int or)
  {
        struct dcb_entry *entry = new_dcb_entry(dcb);
  
 -      entry->type = 0;
 +      entry->type = type;
        entry->i2c_index = i2c;
        entry->heads = heads;
 -      entry->location = DCB_LOC_ON_CHIP;
 -      entry->or = 1;
 -}
 -
 -static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
 -{
 -      struct dcb_entry *entry = new_dcb_entry(dcb);
 -
 -      entry->type = 2;
 -      entry->i2c_index = LEGACY_I2C_PANEL;
 -      entry->heads = twoHeads ? 3 : 1;
 -      entry->location = !DCB_LOC_ON_CHIP;     /* ie OFF CHIP */
 -      entry->or = 1;  /* means |0x10 gets set on CRE_LCD__INDEX */
 -      entry->duallink_possible = false; /* SiI164 and co. are single link */
 -
 -#if 0
 -      /*
 -       * For dvi-a either crtc probably works, but my card appears to only
 -       * support dvi-d.  "nvidia" still attempts to program it for dvi-a,
 -       * doing the full fp output setup (program 0x6808.. fp dimension regs,
 -       * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
 -       * the monitor picks up the mode res ok and lights up, but no pixel
 -       * data appears, so the board manufacturer probably connected up the
 -       * sync lines, but missed the video traces / components
 -       *
 -       * with this introduction, dvi-a left as an exercise for the reader.
 -       */
 -      fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
 -#endif
 -}
 -
 -static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
 -{
 -      struct dcb_entry *entry = new_dcb_entry(dcb);
 -
 -      entry->type = 1;
 -      entry->i2c_index = LEGACY_I2C_TV;
 -      entry->heads = twoHeads ? 3 : 1;
 -      entry->location = !DCB_LOC_ON_CHIP;     /* ie OFF CHIP */
 +      if (type != OUTPUT_ANALOG)
 +              entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
 +      entry->or = or;
  }
  
  static bool
@@@ -6330,36 -6365,8 +6330,36 @@@ apply_dcb_encoder_quirks(struct drm_dev
        return true;
  }
  
 +static void
 +fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
 +{
 +      struct dcb_table *dcb = &bios->dcb;
 +      int all_heads = (nv_two_heads(dev) ? 3 : 1);
 +
 +#ifdef __powerpc__
 +      /* Apple iMac G4 NV17 */
 +      if (of_machine_is_compatible("PowerMac4,5")) {
 +              fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1);
 +              fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2);
 +              return;
 +      }
 +#endif
 +
 +      /* Make up some sane defaults */
 +      fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
 +
 +      if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
 +              fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
 +                                   all_heads, 0);
 +
 +      else if (bios->tmds.output0_script_ptr ||
 +               bios->tmds.output1_script_ptr)
 +              fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
 +                                   all_heads, 1);
 +}
 +
  static int
 -parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
 +parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
  {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct dcb_table *dcb = &bios->dcb;
  
        /* this situation likely means a really old card, pre DCB */
        if (dcbptr == 0x0) {
 -              NV_INFO(dev, "Assuming a CRT output exists\n");
 -              fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
 -
 -              if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
 -                      fabricate_tv_output(dcb, twoHeads);
 -
 +              fabricate_dcb_encoder_table(dev, bios);
                return 0;
        }
  
                 */
                NV_TRACEWARN(dev, "No useful information in BIOS output table; "
                                  "adding all possible outputs\n");
 -              fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
 -
 -              /*
 -               * Attempt to detect TV before DVI because the test
 -               * for the former is more accurate and it rules the
 -               * latter out.
 -               */
 -              if (nv04_tv_identify(dev,
 -                                   bios->legacy.i2c_indices.tv) >= 0)
 -                      fabricate_tv_output(dcb, twoHeads);
 -
 -              else if (bios->tmds.output0_script_ptr ||
 -                       bios->tmds.output1_script_ptr)
 -                      fabricate_dvi_i_output(dcb, twoHeads);
 -
 +              fabricate_dcb_encoder_table(dev, bios);
                return 0;
        }
  
@@@ -6833,7 -6859,7 +6833,7 @@@ nouveau_bios_init(struct drm_device *de
        if (ret)
                return ret;
  
 -      ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
 +      ret = parse_dcb_table(dev, bios);
        if (ret)
                return ret;
  
  #define ATOM_PPLL1            0
  #define ATOM_PPLL2            1
  #define ATOM_DCPLL            2
 +#define ATOM_PPLL0            2
 +#define ATOM_EXT_PLL1         8
 +#define ATOM_EXT_PLL2         9
 +#define ATOM_EXT_CLOCK        10
  #define ATOM_PPLL_INVALID     0xFF
  
 +#define ENCODER_REFCLK_SRC_P1PLL       0       
 +#define ENCODER_REFCLK_SRC_P2PLL       1
 +#define ENCODER_REFCLK_SRC_DCPLL       2
 +#define ENCODER_REFCLK_SRC_EXTCLK      3
 +#define ENCODER_REFCLK_SRC_INVALID     0xFF
 +
  #define ATOM_SCALER1          0
  #define ATOM_SCALER2          1
  
@@@ -202,9 -192,6 +202,9 @@@ typedef struct _ATOM_COMMON_TABLE_HEADE
                                    /*Image can't be updated, while Driver needs to carry the new table! */
  }ATOM_COMMON_TABLE_HEADER;
  
 +/****************************************************************************/        
 +// Structure stores the ROM header.
 +/****************************************************************************/        
  typedef struct _ATOM_ROM_HEADER
  {
    ATOM_COMMON_TABLE_HEADER            sHeader;
        #define USHORT  void*
  #endif
  
 +/****************************************************************************/        
 +// Structures used in Command.mtb 
 +/****************************************************************************/        
  typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
    USHORT ASIC_Init;                              //Function Table, used by various SW components,latest version 1.1
    USHORT GetDisplaySurfaceSize;                  //Atomic Table,  Used by Bios when enabling HW ICON
  #define SetUniphyInstance                        ASIC_StaticPwrMgtStatusChange
  #define HPDInterruptService                      ReadHWAssistedI2CStatus
  #define EnableVGA_Access                         GetSCLKOverMCLKRatio
 +#define GetDispObjectInfo                        EnableYUV 
  
  typedef struct _ATOM_MASTER_COMMAND_TABLE
  {
@@@ -374,24 -357,6 +374,24 @@@ typedef struct _ATOM_COMMON_ROM_COMMAND
  /****************************************************************************/        
  #define COMPUTE_MEMORY_PLL_PARAM        1
  #define COMPUTE_ENGINE_PLL_PARAM        2
 +#define ADJUST_MC_SETTING_PARAM         3
 +
 +/****************************************************************************/        
 +// Structures used by AdjustMemoryControllerTable
 +/****************************************************************************/        
 +typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
 +{
 +#if ATOM_BIG_ENDIAN
 +  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block 
 +  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
 +  ULONG ulClockFreq:24;
 +#else
 +  ULONG ulClockFreq:24;
 +  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
 +  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block 
 +#endif
 +}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
 +#define POINTER_RETURN_FLAG             0x80
  
  typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
  {
@@@ -475,26 -440,6 +475,26 @@@ typedef struct _COMPUTE_MEMORY_ENGINE_P
  #endif
  }COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
  
 +typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
 +{
 +  union
 +  {
 +    ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
 +    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
 +  };
 +  UCHAR   ucRefDiv;                           //Output Parameter      
 +  UCHAR   ucPostDiv;                          //Output Parameter      
 +  union
 +  {
 +    UCHAR   ucCntlFlag;                       //Output Flags
 +    UCHAR   ucInputFlag;                      //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
 +  };
 +  UCHAR   ucReserved;                       
 +}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
 +
 +// ucInputFlag
 +#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN  1   // 1-StrobeMode, 0-PerformanceMode
 +
  typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
  {
    ATOM_COMPUTE_CLOCK_FREQ ulClock;
@@@ -638,7 -583,6 +638,7 @@@ typedef struct _DIG_ENCODER_CONTROL_PAR
  #define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK                           0x01
  #define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ                0x00
  #define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ                0x01
 +#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ                0x02
  #define ATOM_ENCODER_CONFIG_LINK_SEL_MASK                               0x04
  #define ATOM_ENCODER_CONFIG_LINKA                                                               0x00
  #define ATOM_ENCODER_CONFIG_LINKB                                                               0x04
  #define ATOM_ENCODER_MODE_TV                                                                                  13
  #define ATOM_ENCODER_MODE_CV                                                                                  14
  #define ATOM_ENCODER_MODE_CRT                                                                                 15
 +#define ATOM_ENCODER_MODE_DVO                                                                                 16
 +#define ATOM_ENCODER_MODE_DP_SST                  ATOM_ENCODER_MODE_DP    // For DP1.2
 +#define ATOM_ENCODER_MODE_DP_MST                  5                       // For DP1.2
  
  typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
  {
@@@ -720,7 -661,6 +720,7 @@@ typedef struct _DIG_ENCODER_CONTROL_PAR
  #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START       0x08
  #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1    0x09
  #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2    0x0a
 +#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3    0x13
  #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE    0x0b
  #define ATOM_ENCODER_CMD_DP_VIDEO_OFF                 0x0c
  #define ATOM_ENCODER_CMD_DP_VIDEO_ON                  0x0d
  #define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE    0x10
  #define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE  0x00
  
 +//ucTableFormatRevision=1
 +//ucTableContentRevision=3
  // Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
  typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
  {
  #if ATOM_BIG_ENDIAN
      UCHAR ucReserved1:1;
 -    UCHAR ucDigSel:3;             // =0: DIGA/B/C/D/E/F
 +    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
      UCHAR ucReserved:3;
      UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
  #else
      UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
      UCHAR ucReserved:3;
 -    UCHAR ucDigSel:3;             // =0: DIGA/B/C/D/E/F
 +    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
      UCHAR ucReserved1:1;
  #endif
  }ATOM_DIG_ENCODER_CONFIG_V3;
  
 +#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK                                0x03
 +#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ               0x00
 +#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ               0x01
  #define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL                                      0x70
 -
 +#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER                                     0x00
 +#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER                                     0x10
 +#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER                                     0x20
 +#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER                                     0x30
 +#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER                                     0x40
 +#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER                                     0x50
  
  typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
  {
    UCHAR ucReserved;
  }DIG_ENCODER_CONTROL_PARAMETERS_V3;
  
 +//ucTableFormatRevision=1
 +//ucTableContentRevision=4
 +// start from NI           
 +// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
 +typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
 +{
 +#if ATOM_BIG_ENDIAN
 +    UCHAR ucReserved1:1;
 +    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
 +    UCHAR ucReserved:2;
 +    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
 +#else
 +    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
 +    UCHAR ucReserved:2;
 +    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
 +    UCHAR ucReserved1:1;
 +#endif
 +}ATOM_DIG_ENCODER_CONFIG_V4;
 +
 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK                                0x03
 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ               0x00
 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ               0x01
 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ               0x02
 +#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL                                      0x70
 +#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER                                     0x00
 +#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER                                     0x10
 +#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER                                     0x20
 +#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER                                     0x30
 +#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER                                     0x40
 +#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER                                     0x50
 +
 +typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
 +{
 +  USHORT usPixelClock;      // in 10KHz; for bios convenient
 +  union{
 +  ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
 +  UCHAR ucConfig;
 +  };
 +  UCHAR ucAction;                              
 +  UCHAR ucEncoderMode;
 +                            // =0: DP   encoder      
 +                            // =1: LVDS encoder          
 +                            // =2: DVI  encoder  
 +                            // =3: HDMI encoder
 +                            // =4: SDVO encoder
 +                            // =5: DP audio
 +  UCHAR ucLaneNum;          // how many lanes to enable
 +  UCHAR ucBitPerColor;      // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
 +  UCHAR ucHPD_ID;           // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
 +}DIG_ENCODER_CONTROL_PARAMETERS_V4;
  
  // define ucBitPerColor: 
  #define PANEL_BPC_UNDEFINE                               0x00
@@@ -1013,7 -893,6 +1013,7 @@@ typedef struct _ATOM_DIG_TRANSMITTER_CO
  #endif
  }ATOM_DIG_TRANSMITTER_CONFIG_V3;
  
 +
  typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
  {
        union
  #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2               0x40    //CD
  #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3               0x80    //EF
  
 +
 +/****************************************************************************/        
 +// Structures used by UNIPHYTransmitterControlTable V1.4
 +// ASIC Families: NI
 +// ucTableFormatRevision=1
 +// ucTableContentRevision=4
 +/****************************************************************************/        
 +typedef struct _ATOM_DP_VS_MODE_V4
 +{
 +  UCHAR ucLaneSel;
 +      union
 +      {  
 +        UCHAR ucLaneSet;
 +        struct {
 +#if ATOM_BIG_ENDIAN
 +                UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
 +                UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
 +                UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
 +#else
 +                UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
 +                UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
 +                UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
 +#endif
 +              };
 +      }; 
 +}ATOM_DP_VS_MODE_V4;
 + 
 +typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
 +{
 +#if ATOM_BIG_ENDIAN
 +  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
 +                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
 +                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
 +  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
 +  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
 +  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
 +                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
 +  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
 +  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
 +#else
 +  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
 +  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
 +  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
 +                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
 +  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
 +  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
 +  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
 +                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
 +                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
 +#endif
 +}ATOM_DIG_TRANSMITTER_CONFIG_V4;
 +
 +typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
 +{
 +  union
 +  {
 +    USHORT usPixelClock;              // in 10KHz; for bios convenient
 +    USHORT usInitInfo;                        // when init uniphy,lower 8bit is used for connector type defined in objectid.h
 +    ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode     Redefined comparing to previous version
 +  };
 +  union
 +  {
 +  ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
 +  UCHAR ucConfig;
 +  };
 +  UCHAR ucAction;                                 // define as ATOM_TRANSMITER_ACTION_XXX                             
 +  UCHAR ucLaneNum;
 +  UCHAR ucReserved[3];
 +}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
 +
 +//ucConfig 
 +//Bit0
 +#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR                        0x01
 +//Bit1
 +#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT                                     0x02
 +//Bit2
 +#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK                      0x04
 +#define ATOM_TRANSMITTER_CONFIG_V4_LINKA                                  0x00                        
 +#define ATOM_TRANSMITTER_CONFIG_V4_LINKB                                          0x04
 +// Bit3
 +#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK           0x08
 +#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER                         0x00                           
 +#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER                         0x08                          
 +// Bit5:4
 +#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK            0x30
 +#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL                              0x00
 +#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL                              0x10
 +#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL                              0x20   // New in _V4
 +#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT           0x30   // Changed comparing to V3
 +// Bit7:6
 +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK     0xC0
 +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1               0x00    //AB
 +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2               0x40    //CD
 +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3               0x80    //EF
 +
 +
 +/****************************************************************************/        
 +// Structures used by ExternalEncoderControlTable V1.3
 +// ASIC Families: Evergreen, Llano, NI
 +// ucTableFormatRevision=1
 +// ucTableContentRevision=3
 +/****************************************************************************/        
 +
 +typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
 +{
 +  union{
 +  USHORT usPixelClock;      // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT 
 +  USHORT usConnectorId;     // connector id, valid when ucAction = INIT
 +  };
 +  UCHAR  ucConfig;          // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT  
 +  UCHAR  ucAction;          // 
 +  UCHAR  ucEncoderMode;     // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
 +  UCHAR  ucLaneNum;         // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT  
 +  UCHAR  ucBitPerColor;     // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
 +  UCHAR  ucReserved;        
 +}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
 +
 +// ucAction
 +#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT         0x00
 +#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT          0x01
 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT           0x07
 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP          0x0f
 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF   0x10
 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING       0x11
 +#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION      0x12
 +
 +// ucConfig
 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK                            0x03
 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ           0x00
 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ           0x01
 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ           0x02
 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK               0x70
 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1                       0x00
 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2                       0x10
 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3                       0x20
 +
 +typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
 +{
 +  EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
 +  ULONG ulReserved[2];
 +}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
 +
 +
  /****************************************************************************/        
  // Structures used by DAC1OuputControlTable
  //                    DAC2OuputControlTable
@@@ -1406,7 -1142,6 +1406,7 @@@ typedef struct _PIXEL_CLOCK_PARAMETERS_
  #define PIXEL_CLOCK_V4_MISC_SS_ENABLE               0x10
  #define PIXEL_CLOCK_V4_MISC_COHERENT_MODE           0x20
  
 +
  typedef struct _PIXEL_CLOCK_PARAMETERS_V3
  {
    USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
@@@ -1467,55 -1202,6 +1467,55 @@@ typedef struct _PIXEL_CLOCK_PARAMETERS_
  #define PIXEL_CLOCK_V5_MISC_HDMI_32BPP              0x08
  #define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC             0x10
  
 +typedef struct _CRTC_PIXEL_CLOCK_FREQ
 +{
 +#if ATOM_BIG_ENDIAN
 +  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to 
 +                              // drive the pixel clock. not used for DCPLL case.
 +  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing. 
 +                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
 +#else
 +  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing. 
 +                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
 +  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to 
 +                              // drive the pixel clock. not used for DCPLL case.
 +#endif
 +}CRTC_PIXEL_CLOCK_FREQ;
 +
 +typedef struct _PIXEL_CLOCK_PARAMETERS_V6
 +{
 +  union{
 +    CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;    // pixel clock and CRTC id frequency 
 +    ULONG ulDispEngClkFreq;                  // dispclk frequency
 +  };
 +  USHORT usFbDiv;            // feedback divider integer part. 
 +  UCHAR  ucPostDiv;          // post divider. 
 +  UCHAR  ucRefDiv;           // Reference divider
 +  UCHAR  ucPpll;             // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
 +  UCHAR  ucTransmitterID;    // ASIC encoder id defined in objectId.h, 
 +                             // indicate which graphic encoder will be used. 
 +  UCHAR  ucEncoderMode;      // Encoder mode: 
 +  UCHAR  ucMiscInfo;         // bit[0]= Force program PPLL 
 +                             // bit[1]= when VGA timing is used. 
 +                             // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
 +                             // bit[4]= RefClock source for PPLL. 
 +                             // =0: XTLAIN( default mode )
 +                                 // =1: other external clock source, which is pre-defined                                            
 +                             //     by VBIOS depend on the feature required.
 +                             // bit[7:5]: reserved.
 +  ULONG  ulFbDivDecFrac;     // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
 +
 +}PIXEL_CLOCK_PARAMETERS_V6;
 +
 +#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL                                   0x01
 +#define PIXEL_CLOCK_V6_MISC_VGA_MODE                                                          0x02
 +#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK           0x0c
 +#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP              0x00
 +#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP              0x04
 +#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP              0x08
 +#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP              0x0c
 +#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC             0x10
 +
  typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
  {
    PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
@@@ -1555,11 -1241,10 +1555,11 @@@ typedef struct _ADJUST_DISPLAY_PLL_PARA
  typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
  {
        USHORT usPixelClock;                    // target pixel clock
 -      UCHAR ucTransmitterID;                  // transmitter id defined in objectid.h
 +      UCHAR ucTransmitterID;                  // GPU transmitter id defined in objectid.h
        UCHAR ucEncodeMode;                     // encoder mode: CRT, LVDS, DP, TMDS or HDMI
    UCHAR ucDispPllConfig;                 // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
 -      UCHAR ucReserved[3];
 +  UCHAR ucExtTransmitterID;               // external encoder id.
 +      UCHAR ucReserved[2];
  }ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
  
  // usDispPllConfig v1.2 for RoadRunner
@@@ -1629,7 -1314,7 +1629,7 @@@ typedef struct _GET_ENGINE_CLOCK_PARAME
  typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
  {
    USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
-   USHORT    usVRAMAddress;      //Adress in Frame Buffer where to pace raw EDID
+   USHORT    usVRAMAddress;      //Address in Frame Buffer where to pace raw EDID
    USHORT    usStatus;           //When use output: lower byte EDID checksum, high byte hardware status
                                  //WHen use input:  lower byte as 'byte to read':currently limited to 128byte or 1byte
    UCHAR     ucSlaveAddr;        //Read from which slave
@@@ -1673,7 -1358,6 +1673,7 @@@ typedef struct _SET_UP_HW_I2C_DATA_PARA
  /**************************************************************************/
  #define SPEED_FAN_CONTROL_PS_ALLOCATION   WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
  
 +
  /****************************************************************************/        
  // Structures used by PowerConnectorDetectionTable
  /****************************************************************************/        
@@@ -1754,31 -1438,6 +1754,31 @@@ typedef struct _ENABLE_SPREAD_SPECTRUM_
  #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK     0x0F00
  #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT    8
  
 +// Used by DCE5.0
 + typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
 +{
 +  USHORT  usSpreadSpectrumAmountFrac;   // SS_AMOUNT_DSFRAC New in DCE5.0
 +  UCHAR   ucSpreadSpectrumType;               // Bit[0]: 0-Down Spread,1-Center Spread. 
 +                                        // Bit[1]: 1-Ext. 0-Int. 
 +                                        // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
 +                                        // Bits[7:4] reserved
 +  UCHAR   ucEnable;                       // ATOM_ENABLE or ATOM_DISABLE
 +  USHORT  usSpreadSpectrumAmount;             // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]    
 +  USHORT  usSpreadSpectrumStep;               // SS_STEP_SIZE_DSFRAC
 +}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
 +    
 +#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD      0x00
 +#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD    0x01
 +#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD       0x02
 +#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK    0x0c
 +#define ATOM_PPLL_SS_TYPE_V3_P1PLL            0x00
 +#define ATOM_PPLL_SS_TYPE_V3_P2PLL            0x04
 +#define ATOM_PPLL_SS_TYPE_V3_DCPLL            0x08
 +#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK     0x00FF
 +#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT    0
 +#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK     0x0F00
 +#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT    8
 +
  #define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION  ENABLE_SPREAD_SPECTRUM_ON_PPLL
  
  /**************************************************************************/
@@@ -2047,7 -1706,7 +2047,7 @@@ typedef struct _ATOM_MASTER_LIST_OF_DAT
    USHORT        StandardVESA_Timing;      // Only used by Bios
    USHORT        FirmwareInfo;             // Shared by various SW components,latest version 1.4
    USHORT        DAC_Info;                 // Will be obsolete from R600
 -  USHORT        LVDS_Info;                // Shared by various SW components,latest version 1.1 
 +  USHORT        LCD_Info;                 // Shared by various SW components,latest version 1.3, was called LVDS_Info 
    USHORT        TMDS_Info;                // Will be obsolete from R600
    USHORT        AnalogTV_Info;            // Shared by various SW components,latest version 1.1 
    USHORT        SupportedDevicesInfo;     // Will be obsolete from R600
        USHORT                          PowerSourceInfo;                                        // Shared by various SW components, latest versoin 1.1
  }ATOM_MASTER_LIST_OF_DATA_TABLES;
  
 +// For backward compatible 
 +#define LVDS_Info                LCD_Info
 +
  typedef struct _ATOM_MASTER_DATA_TABLE
  { 
    ATOM_COMMON_TABLE_HEADER sHeader;  
    ATOM_MASTER_LIST_OF_DATA_TABLES   ListOfDataTables;
  }ATOM_MASTER_DATA_TABLE;
  
 +
  /****************************************************************************/        
  // Structure used in MultimediaCapabilityInfoTable
  /****************************************************************************/        
@@@ -2121,7 -1776,6 +2121,7 @@@ typedef struct _ATOM_MULTIMEDIA_CONFIG_
    UCHAR                    ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
  }ATOM_MULTIMEDIA_CONFIG_INFO;
  
 +
  /****************************************************************************/        
  // Structures used in FirmwareInfoTable
  /****************************************************************************/        
@@@ -2377,47 -2031,8 +2377,47 @@@ typedef struct _ATOM_FIRMWARE_INFO_V2_
    UCHAR                           ucReserved4[3];
  }ATOM_FIRMWARE_INFO_V2_1;
  
 +//the structure below to be used from NI
 +//ucTableFormatRevision=2
 +//ucTableContentRevision=2
 +typedef struct _ATOM_FIRMWARE_INFO_V2_2
 +{
 +  ATOM_COMMON_TABLE_HEADER        sHeader; 
 +  ULONG                           ulFirmwareRevision;
 +  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
 +  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
 +  ULONG                           ulReserved[2];
 +  ULONG                           ulReserved1;                //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
 +  ULONG                           ulReserved2;                //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
 +  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
 +  ULONG                           ulBinaryAlteredInfo;        //Was ulASICMaxEngineClock  ?
 +  ULONG                           ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.          
 +  UCHAR                           ucReserved3;                //Was ucASICMaxTemperature;
 +  UCHAR                           ucMinAllowedBL_Level;
 +  USHORT                          usBootUpVDDCVoltage;        //In MV unit
 +  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
 +  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
 +  ULONG                           ulReserved4;                //Was ulAsicMaximumVoltage
 +  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
 +  ULONG                           ulReserved5;                //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
 +  ULONG                           ulReserved6;                //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
 +  ULONG                           ulReserved7;                //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
 +  USHORT                          usReserved11;               //Was usMaxPixelClock;  //In 10Khz unit, Max.  Pclk used only for DAC
 +  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
 +  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
 +  USHORT                          usBootUpVDDCIVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
 +  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
 +  USHORT                          usCoreReferenceClock;       //In 10Khz unit 
 +  USHORT                          usMemoryReferenceClock;     //In 10Khz unit 
 +  USHORT                          usUniphyDPModeExtClkFreq;   //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
 +  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
 +  UCHAR                           ucReserved9[3];
 +  USHORT                          usBootUpMVDDCVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
 +  USHORT                          usReserved12;
 +  ULONG                           ulReserved10[3];            // New added comparing to previous version
 +}ATOM_FIRMWARE_INFO_V2_2;
  
 -#define ATOM_FIRMWARE_INFO_LAST  ATOM_FIRMWARE_INFO_V2_1
 +#define ATOM_FIRMWARE_INFO_LAST  ATOM_FIRMWARE_INFO_V2_2
  
  /****************************************************************************/        
  // Structures used in IntegratedSystemInfoTable
@@@ -2597,7 -2212,7 +2597,7 @@@ ulDockingPinCFGInfo: [15:0]-Bus/Device/
  ucDockingPinBit:     which bit in this register to read the pin status;
  ucDockingPinPolarity:Polarity of the pin when docked;
  
 -ulCPUCapInfo:        [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
 +ulCPUCapInfo:        [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
  
  usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
  
@@@ -2635,14 -2250,6 +2635,14 @@@ usMinUpStreamHTLinkWidth:    Asymmetri
  usMinDownStreamHTLinkWidth:  same as above.
  */
  
 +// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo  - CPU type definition 
 +#define    INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU             0
 +#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN        1
 +#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND      2
 +#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__K8             3
 +#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH        4
 +
 +#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE       INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH    // this deff reflects max defined CPU code
  
  #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE                 0x00000001
  #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE             0x00000002
@@@ -3171,88 -2778,8 +3171,88 @@@ typedef struct _ATOM_LVDS_INFO_V1
  #define PANEL_RANDOM_DITHER   0x80
  #define PANEL_RANDOM_DITHER_MASK   0x80
  
 +#define ATOM_LVDS_INFO_LAST  ATOM_LVDS_INFO_V12   // no need to change this 
 +
 +/****************************************************************************/        
 +// Structures used by LCD_InfoTable V1.3    Note: previous version was called ATOM_LVDS_INFO_V12
 +// ASIC Families:  NI
 +// ucTableFormatRevision=1
 +// ucTableContentRevision=3
 +/****************************************************************************/        
 +typedef struct _ATOM_LCD_INFO_V13
 +{
 +  ATOM_COMMON_TABLE_HEADER sHeader;  
 +  ATOM_DTD_FORMAT     sLCDTiming;
 +  USHORT              usExtInfoTableOffset;
 +  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
 +  ULONG               ulReserved0;
 +  UCHAR               ucLCD_Misc;                // Reorganized in V13
 +                                                 // Bit0: {=0:single, =1:dual},
 +                                                 // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888}  // was {=0:666RGB, =1:888RGB},
 +                                                 // Bit3:2: {Grey level}
 +                                                 // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h) 
 +                                                 // Bit7   Reserved.  was for ATOM_PANEL_MISC_API_ENABLED, still need it?  
 +  UCHAR               ucPanelDefaultRefreshRate;
 +  UCHAR               ucPanelIdentification;
 +  UCHAR               ucSS_Id;
 +  USHORT              usLCDVenderID;
 +  USHORT              usLCDProductID;
 +  UCHAR               ucLCDPanel_SpecialHandlingCap;  // Reorganized in V13 
 +                                                 // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
 +                                                 // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
 +                                                 // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
 +                                                 // Bit7-3: Reserved 
 +  UCHAR               ucPanelInfoSize;                                         //  start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
 +  USHORT              usBacklightPWM;            //  Backlight PWM in Hz. New in _V13
 +
 +  UCHAR               ucPowerSequenceDIGONtoDE_in4Ms;
 +  UCHAR               ucPowerSequenceDEtoVARY_BL_in4Ms;
 +  UCHAR               ucPowerSequenceDEtoDIGON_in4Ms;
 +  UCHAR               ucPowerSequenceVARY_BLtoDE_in4Ms;
 +
 +  UCHAR               ucOffDelay_in4Ms;
 +  UCHAR               ucPowerSequenceVARY_BLtoBLON_in4Ms;
 +  UCHAR               ucPowerSequenceBLONtoVARY_BL_in4Ms;
 +  UCHAR               ucReserved1;
 +
 +  ULONG               ulReserved[4];
 +}ATOM_LCD_INFO_V13;  
 +
 +#define ATOM_LCD_INFO_LAST  ATOM_LCD_INFO_V13    
 +
 +//Definitions for ucLCD_Misc
 +#define ATOM_PANEL_MISC_V13_DUAL                   0x00000001
 +#define ATOM_PANEL_MISC_V13_FPDI                   0x00000002
 +#define ATOM_PANEL_MISC_V13_GREY_LEVEL             0x0000000C
 +#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT       2
 +#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK   0x70
 +#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR         0x10
 +#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR         0x20
 +
 +//Color Bit Depth definition in EDID V1.4 @BYTE 14h
 +//Bit 6  5  4
 +                              //      0  0  0  -  Color bit depth is undefined
 +                              //      0  0  1  -  6 Bits per Primary Color
 +                              //      0  1  0  -  8 Bits per Primary Color
 +                              //      0  1  1  - 10 Bits per Primary Color
 +                              //      1  0  0  - 12 Bits per Primary Color
 +                              //      1  0  1  - 14 Bits per Primary Color
 +                              //      1  1  0  - 16 Bits per Primary Color
 +                              //      1  1  1  - Reserved
 + 
 +//Definitions for ucLCDPanel_SpecialHandlingCap:
 +
 +//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. 
 +//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL 
 +#define       LCDPANEL_CAP_V13_READ_EDID              0x1        // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
 +
 +//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
 +//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
 +//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
 +#define       LCDPANEL_CAP_V13_DRR_SUPPORTED          0x2        // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
  
 -#define ATOM_LVDS_INFO_LAST  ATOM_LVDS_INFO_V12
 +//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
 +#define       LCDPANEL_CAP_V13_eDP                    0x4        // = LCDPANEL_CAP_eDP no change comparing to previous version
  
  typedef struct  _ATOM_PATCH_RECORD_MODE
  {
@@@ -3417,9 -2944,9 +3417,9 @@@ typedef struct _ATOM_DPCD_INF
  #define MAX_DTD_MODE_IN_VRAM            6
  #define ATOM_DTD_MODE_SUPPORT_TBL_SIZE  (MAX_DTD_MODE_IN_VRAM*28)    //28= (SIZEOF ATOM_DTD_FORMAT) 
  #define ATOM_STD_MODE_SUPPORT_TBL_SIZE  32*8                         //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
 -#define DFP_ENCODER_TYPE_OFFSET                                       0x80
 -#define DP_ENCODER_LANE_NUM_OFFSET                    0x84
 -#define DP_ENCODER_LINK_RATE_OFFSET                   0x88
 +//20 bytes for Encoder Type and DPCD in STD EDID area
 +#define DFP_ENCODER_TYPE_OFFSET         (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)    
 +#define ATOM_DP_DPCD_OFFSET             (DFP_ENCODER_TYPE_OFFSET + 4 )        
  
  #define ATOM_HWICON1_SURFACE_ADDR       0
  #define ATOM_HWICON2_SURFACE_ADDR       (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
  #define ATOM_DFP5_DTD_MODE_TBL_ADDR     (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
  #define ATOM_DFP5_STD_MODE_TBL_ADDR     (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
  
 -#define ATOM_DP_TRAINING_TBL_ADDR                             (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)       
 +#define ATOM_DP_TRAINING_TBL_ADDR       (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
  
 -#define ATOM_STACK_STORAGE_START        (ATOM_DP_TRAINING_TBL_ADDR+256)       
 -#define ATOM_STACK_STORAGE_END          ATOM_STACK_STORAGE_START+512        
 +#define ATOM_STACK_STORAGE_START        (ATOM_DP_TRAINING_TBL_ADDR + 1024)       
 +#define ATOM_STACK_STORAGE_END          ATOM_STACK_STORAGE_START + 512        
  
  //The size below is in Kb!
  #define ATOM_VRAM_RESERVE_SIZE         ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
     
 +#define ATOM_VRAM_RESERVE_V2_SIZE      32
 +
  #define       ATOM_VRAM_OPERATION_FLAGS_MASK         0xC0000000L
  #define ATOM_VRAM_OPERATION_FLAGS_SHIFT        30
  #define       ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION   0x1
@@@ -3681,15 -3206,6 +3681,15 @@@ typedef struct  _ATOM_DISPLAY_OBJECT_PA
    USHORT    usGraphicObjIds[1];                             //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
  }ATOM_DISPLAY_OBJECT_PATH;
  
 +typedef struct  _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
 +{
 +  USHORT    usDeviceTag;                                   //supported device 
 +  USHORT    usSize;                                        //the size of ATOM_DISPLAY_OBJECT_PATH
 +  USHORT    usConnObjectId;                                //Connector Object ID 
 +  USHORT    usGPUObjectId;                                 //GPU ID 
 +  USHORT    usGraphicObjIds[2];                            //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder 
 +}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
 +
  typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
  {
    UCHAR                           ucNumOfDispPath;
@@@ -3745,47 -3261,6 +3745,47 @@@ typedef struct _ATOM_SRC_DST_TABLE_FOR_
  #define EXT_AUXDDC_LUTINDEX_7                   7
  #define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES   (EXT_AUXDDC_LUTINDEX_7+1)
  
 +//ucChannelMapping are defined as following
 +//for DP connector, eDP, DP to VGA/LVDS 
 +//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
 +//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
 +//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
 +//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
 +typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
 +{
 +#if ATOM_BIG_ENDIAN
 +  UCHAR ucDP_Lane3_Source:2;
 +  UCHAR ucDP_Lane2_Source:2;
 +  UCHAR ucDP_Lane1_Source:2;
 +  UCHAR ucDP_Lane0_Source:2;
 +#else
 +  UCHAR ucDP_Lane0_Source:2;
 +  UCHAR ucDP_Lane1_Source:2;
 +  UCHAR ucDP_Lane2_Source:2;
 +  UCHAR ucDP_Lane3_Source:2;
 +#endif
 +}ATOM_DP_CONN_CHANNEL_MAPPING;
 +
 +//for DVI/HDMI, in dual link case, both links have to have same mapping. 
 +//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
 +//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
 +//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
 +//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
 +typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
 +{
 +#if ATOM_BIG_ENDIAN
 +  UCHAR ucDVI_CLK_Source:2;
 +  UCHAR ucDVI_DATA0_Source:2;
 +  UCHAR ucDVI_DATA1_Source:2;
 +  UCHAR ucDVI_DATA2_Source:2;
 +#else
 +  UCHAR ucDVI_DATA2_Source:2;
 +  UCHAR ucDVI_DATA1_Source:2;
 +  UCHAR ucDVI_DATA0_Source:2;
 +  UCHAR ucDVI_CLK_Source:2;
 +#endif
 +}ATOM_DVI_CONN_CHANNEL_MAPPING;
 +
  typedef struct _EXT_DISPLAY_PATH
  {
    USHORT  usDeviceTag;                    //A bit vector to show what devices are supported 
    UCHAR   ucExtAUXDDCLutIndex;            //An index into external AUX/DDC channel LUT
    UCHAR   ucExtHPDPINLutIndex;            //An index into external HPD pin LUT
    USHORT  usExtEncoderObjId;              //external encoder object id
 -  USHORT  usReserved[3]; 
 +  union{
 +    UCHAR   ucChannelMapping;                  // if ucChannelMapping=0, using default one to one mapping
 +    ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
 +    ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
 +  };
 +  UCHAR   ucReserved;
 +  USHORT  usReserved[2]; 
  }EXT_DISPLAY_PATH;
     
  #define NUMBER_OF_UCHAR_FOR_GUID          16
@@@ -3812,8 -3281,7 +3812,8 @@@ typedef  struct _ATOM_EXTERNAL_DISPLAY_
    UCHAR                    ucGuid [NUMBER_OF_UCHAR_FOR_GUID];     // a GUID is a 16 byte long string
    EXT_DISPLAY_PATH         sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
    UCHAR                    ucChecksum;                            // a  simple Checksum of the sum of whole structure equal to 0x0. 
 -  UCHAR                    Reserved [7];                          // for potential expansion
 +  UCHAR                    uc3DStereoPinId;                       // use for eDP panel
 +  UCHAR                    Reserved [6];                          // for potential expansion
  }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
  
  //Related definitions, all records are differnt but they have a commond header
@@@ -3843,11 -3311,10 +3843,11 @@@ typedef struct _ATOM_COMMON_RECORD_HEAD
  #define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE          17 //This is for the case when connectors are not known to object table
  #define ATOM_OBJECT_LINK_RECORD_TYPE                   18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
  #define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE          19
 +#define ATOM_ENCODER_CAP_RECORD_TYPE                   20
  
  
  //Must be updated when new record type is added,equal to that record definition!
 -#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
 +#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_ENCODER_CAP_RECORD_TYPE
  
  typedef struct  _ATOM_I2C_RECORD
  {
@@@ -3974,26 -3441,6 +3974,26 @@@ typedef struct  _ATOM_ENCODER_DVO_CF_RE
    UCHAR                       ucPadding[2];
  }ATOM_ENCODER_DVO_CF_RECORD;
  
 +// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
 +#define ATOM_ENCODER_CAP_RECORD_HBR2     0x01         // DP1.2 HBR2 is supported by this path
 +
 +typedef struct  _ATOM_ENCODER_CAP_RECORD
 +{
 +  ATOM_COMMON_RECORD_HEADER   sheader;
 +  union {
 +    USHORT                    usEncoderCap;         
 +    struct {
 +#if ATOM_BIG_ENDIAN
 +      USHORT                  usReserved:15;        // Bit1-15 may be defined for other capability in future
 +      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
 +#else
 +      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
 +      USHORT                  usReserved:15;        // Bit1-15 may be defined for other capability in future
 +#endif
 +    };
 +  }; 
 +}ATOM_ENCODER_CAP_RECORD;                             
 +
  // value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
  #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA   1
  #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB   2
@@@ -4133,11 -3580,6 +4133,11 @@@ typedef struct _ATOM_VOLTAGE_CONTRO
  #define       VOLTAGE_CONTROL_ID_DAC                                                          0x02                                                                    //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
  #define       VOLTAGE_CONTROL_ID_VT116xM                                              0x03                                                                    //I2C control, used for R6xx Core Voltage
  #define VOLTAGE_CONTROL_ID_DS4402                                                     0x04                                                                    
 +#define VOLTAGE_CONTROL_ID_UP6266                                             0x05                                                                    
 +#define VOLTAGE_CONTROL_ID_SCORPIO                                            0x06
 +#define       VOLTAGE_CONTROL_ID_VT1556M                                              0x07                                                                    
 +#define       VOLTAGE_CONTROL_ID_CHL822x                                              0x08                                                                    
 +#define       VOLTAGE_CONTROL_ID_VT1586M                                              0x09
  
  typedef struct  _ATOM_VOLTAGE_OBJECT
  {
@@@ -4228,157 -3670,66 +4228,157 @@@ typedef struct _ATOM_POWER_SOURCE_INF
  #define POWER_SENSOR_GPIO                                                             0x01
  #define POWER_SENSOR_I2C                                                              0x02
  
 +typedef struct _ATOM_CLK_VOLT_CAPABILITY
 +{
 +  ULONG      ulVoltageIndex;                      // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table        
 +  ULONG      ulMaximumSupportedCLK;               // Maximum clock supported with specified voltage index, unit in 10kHz
 +}ATOM_CLK_VOLT_CAPABILITY;
 +
 +typedef struct _ATOM_AVAILABLE_SCLK_LIST
 +{
 +  ULONG      ulSupportedSCLK;               // Maximum clock supported with specified voltage index,  unit in 10kHz
 +  USHORT     usVoltageIndex;                // The Voltage Index indicated by FUSE for specified SCLK  
 +  USHORT     usVoltageID;                   // The Voltage ID indicated by FUSE for specified SCLK 
 +}ATOM_AVAILABLE_SCLK_LIST;
 +
 +// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
 +#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE             1       // refer to ulSystemConfig bit[0]
 +
 +// this IntegrateSystemInfoTable is used for Liano/Ontario APU
  typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
  {
    ATOM_COMMON_TABLE_HEADER   sHeader;
    ULONG  ulBootUpEngineClock;
    ULONG  ulDentistVCOFreq;          
    ULONG  ulBootUpUMAClock;          
 -  ULONG  ulReserved1[8];            
 +  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];            
    ULONG  ulBootUpReqDisplayVector;
    ULONG  ulOtherDisplayMisc;
    ULONG  ulGPUCapInfo;
 -  ULONG  ulReserved2[3];            
 +  ULONG  ulSB_MMIO_Base_Addr;
 +  USHORT usRequestedPWMFreqInHz;
 +  UCHAR  ucHtcTmpLmt;   
 +  UCHAR  ucHtcHystLmt;
 +  ULONG  ulMinEngineClock;           
    ULONG  ulSystemConfig;            
    ULONG  ulCPUCapInfo;              
 -  USHORT usMaxNBVoltage;  
 -  USHORT usMinNBVoltage;  
 -  USHORT usBootUpNBVoltage;         
 -  USHORT usExtDispConnInfoOffset;  
 -  UCHAR  ucHtcTmpLmt;   
 -  UCHAR  ucTjOffset;    
 +  USHORT usNBP0Voltage;               
 +  USHORT usNBP1Voltage;
 +  USHORT usBootUpNBVoltage;                       
 +  USHORT usExtDispConnInfoOffset;
 +  USHORT usPanelRefreshRateRange;     
    UCHAR  ucMemoryType;  
    UCHAR  ucUMAChannelNumber;
    ULONG  ulCSR_M3_ARB_CNTL_DEFAULT[10];  
    ULONG  ulCSR_M3_ARB_CNTL_UVD[10]; 
    ULONG  ulCSR_M3_ARB_CNTL_FS3D[10];
 -  ULONG  ulReserved3[42]; 
 +  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
 +  ULONG  ulGMCRestoreResetTime;
 +  ULONG  ulMinimumNClk;
 +  ULONG  ulIdleNClk;
 +  ULONG  ulDDR_DLL_PowerUpTime;
 +  ULONG  ulDDR_PLL_PowerUpTime;
 +  USHORT usPCIEClkSSPercentage;
 +  USHORT usPCIEClkSSType;
 +  USHORT usLvdsSSPercentage;
 +  USHORT usLvdsSSpreadRateIn10Hz;
 +  USHORT usHDMISSPercentage;
 +  USHORT usHDMISSpreadRateIn10Hz;
 +  USHORT usDVISSPercentage;
 +  USHORT usDVISSpreadRateIn10Hz;
 +  ULONG  ulReserved3[21]; 
    ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;   
  }ATOM_INTEGRATED_SYSTEM_INFO_V6;   
  
 +// ulGPUCapInfo
 +#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE       0x01
 +#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION          0x08
 +
 +// ulOtherDisplayMisc
 +#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT                       0x01
 +
 +
  /**********************************************************************************************************************
 -// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
 -//ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. 
 -//ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
 -//ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
 -//ulReserved1[8]                    Reserved by now, must be 0x0. 
 -//ulBootUpReqDisplayVector            VBIOS boot up display IDs
 -//                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
 -//                                  ATOM_DEVICE_CRT2_SUPPORT                  0x0010
 -//                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
 -//                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
 -//                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
 -//                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
 -//                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
 -//                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
 -//                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
 -//ulOtherDisplayMisc                  Other display related flags, not defined yet. 
 -//ulGPUCapInfo                      TBD
 -//ulReserved2[3]                    must be 0x0 for the reserved.
 -//ulSystemConfig                    TBD
 -//ulCPUCapInfo                      TBD
 -//usMaxNBVoltage                    High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. 
 -//usMinNBVoltage                    Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
 -//usBootUpNBVoltage                 Boot up NB voltage in unit of mv.
 -//ucHtcTmpLmt                       Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
 -//ucTjOffset                        Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
 -//ucMemoryType                      [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
 -//ucUMAChannelNumber                  System memory channel numbers. 
 -//usExtDispConnectionInfoOffset     ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table. 
 -//ulCSR_M3_ARB_CNTL_DEFAULT[10]     Arrays with values for CSR M3 arbiter for default
 -//ulCSR_M3_ARB_CNTL_UVD[10]         Arrays with values for CSR M3 arbiter for UVD playback.
 -//ulCSR_M3_ARB_CNTL_FS3D[10]        Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
 +  ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
 +ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
 +ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
 +ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
 +sDISPCLK_Voltage:                 Report Display clock voltage requirement.
 + 
 +ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
 +                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
 +                                  ATOM_DEVICE_CRT2_SUPPORT                  0x0010
 +                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
 +                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
 +                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
 +                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
 +                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
 +                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
 +                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
 +ulOtherDisplayMisc:                   Other display related flags, not defined yet. 
 +ulGPUCapInfo:                     bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
 +                                        =1: TMDS/HDMI Coherent Mode use signel PLL mode.
 +                                  bit[3]=0: Enable HW AUX mode detection logic
 +                                        =1: Disable HW AUX mode dettion logic
 +ulSB_MMIO_Base_Addr:              Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
 +
 +usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). 
 +                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
 +                                  
 +                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
 +                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
 +                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
 +                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment; 
 +                                  and enabling VariBri under the driver environment from PP table is optional.
 +
 +                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
 +                                  that BL control from GPU is expected.
 +                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
 +                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
 +                                  it's per platform 
 +                                  and enabling VariBri under the driver environment from PP table is optional.
 +
 +ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt. 
 +                                  Threshold on value to enter HTC_active state.
 +ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt. 
 +                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
 +ulMinEngineClock:                 Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
 +ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled 
 +                                        =1: PCIE Power Gating Enabled
 +                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
 +                                         1: DDR-DLL shut-down feature enabled.
 +                                  Bit[2]=0: DDR-PLL Power down feature disabled.
 +                                         1: DDR-PLL Power down feature enabled.                                 
 +ulCPUCapInfo:                     TBD
 +usNBP0Voltage:                    VID for voltage on NB P0 State
 +usNBP1Voltage:                    VID for voltage on NB P1 State  
 +usBootUpNBVoltage:                Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
 +usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
 +usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
 +                                  to indicate a range.
 +                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
 +                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
 +                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
 +                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
 +ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
 +ucUMAChannelNumber:                   System memory channel numbers. 
 +ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
 +ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
 +ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
 +sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high  
 +ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. 
 +ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. 
 +ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
 +ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
 +ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
 +usPCIEClkSSPercentage:            PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
 +usPCIEClkSSType:                  PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
 +usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. 
 +usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. 
 +usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
 +usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
 +usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
 +usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
  **********************************************************************************************************************/
  
  /**************************************************************************/
@@@ -4439,7 -3790,6 +4439,7 @@@ typedef struct _ATOM_ASIC_SS_ASSIGNMEN
  #define ASIC_INTERNAL_SS_ON_LVDS    6
  #define ASIC_INTERNAL_SS_ON_DP      7
  #define ASIC_INTERNAL_SS_ON_DCPLL   8
 +#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
  
  typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
  {
@@@ -4553,7 -3903,6 +4553,7 @@@ typedef struct _ATOM_ASIC_INTERNAL_SS_I
  #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC     1
  #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC     2
  #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
 +#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
  
  //Byte aligned defintion for BIOS usage
  #define ATOM_S0_CRT1_MONOb0             0x01
@@@ -5180,8 -4529,7 +5180,8 @@@ typedef struct _ATOM_INIT_REG_BLOCK
  #define INDEX_ACCESS_RANGE_BEGIN          (VALUE_DWORD + 1)
  #define INDEX_ACCESS_RANGE_END                    (INDEX_ACCESS_RANGE_BEGIN + 1)
  #define VALUE_INDEX_ACCESS_SINGLE         (INDEX_ACCESS_RANGE_END + 1)
 -
 +//#define ACCESS_MCIODEBUGIND            0x40       //defined in BIOS code
 +#define ACCESS_PLACEHOLDER             0x80
  
  typedef struct _ATOM_MC_INIT_PARAM_TABLE
  { 
  #define _32Mx32             0x33
  #define _64Mx8              0x41
  #define _64Mx16             0x42
 +#define _64Mx32             0x43
 +#define _128Mx8             0x51
 +#define _128Mx16            0x52
 +#define _256Mx8             0x61
  
  #define SAMSUNG             0x1
  #define INFINEON            0x2
  #define QIMONDA             INFINEON
  #define PROMOS              MOSEL
  #define KRETON              INFINEON
 +#define ELIXIR              NANYA
  
  /////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
  
 -#define UCODE_ROM_START_ADDRESS               0x1c000
 +#define UCODE_ROM_START_ADDRESS               0x1b800
  #define       UCODE_SIGNATURE                 0x4375434d // 'MCuC' - MC uCode
  
  //uCode block header for reference
@@@ -5560,34 -4903,7 +5560,34 @@@ typedef struct _ATOM_VRAM_MODULE_V
    ATOM_MEMORY_TIMING_FORMAT_V2  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
  }ATOM_VRAM_MODULE_V6;
  
 -
 +typedef struct _ATOM_VRAM_MODULE_V7
 +{
 +// Design Specific Values
 +  ULONG         ulChannelMapCfg;                      // mmMC_SHARED_CHREMAP
 +  USHORT  usModuleSize;                     // Size of ATOM_VRAM_MODULE_V7
 +  USHORT  usPrivateReserved;                // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
 +  USHORT  usReserved;
 +  UCHAR   ucExtMemoryID;                    // Current memory module ID
 +  UCHAR   ucMemoryType;                     // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
 +  UCHAR   ucChannelNum;                     // Number of mem. channels supported in this module
 +  UCHAR   ucChannelWidth;                   // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
 +  UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
 +  UCHAR         ucReserve;                        // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now.
 +  UCHAR         ucMisc;                           // RANK_OF_THISMEMORY etc.
 +  UCHAR         ucVREFI;                          // Not used.
 +  UCHAR   ucNPL_RT;                         // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
 +  UCHAR         ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
 +  UCHAR   ucMemorySize;                     // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
 +  UCHAR   ucReserved[3];
 +// Memory Module specific values
 +  USHORT  usEMRS2Value;                     // EMRS2/MR2 Value. 
 +  USHORT  usEMRS3Value;                     // EMRS3/MR3 Value.
 +  UCHAR   ucMemoryVenderID;                 // [7:4] Revision, [3:0] Vendor code
 +  UCHAR         ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
 +  UCHAR         ucFIFODepth;                      // FIFO depth can be detected during vendor detection, here is hardcoded per memory
 +  UCHAR   ucCDR_Bandwidth;                  // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
 +  char    strMemPNString[20];               // part number end with '0'. 
 +}ATOM_VRAM_MODULE_V7;
  
  typedef struct _ATOM_VRAM_INFO_V2
  {
@@@ -5626,20 -4942,6 +5626,20 @@@ typedef struct _ATOM_VRAM_INFO_V
                                                                                                                                                                                                                                                                                                                 //     ATOM_INIT_REG_BLOCK                              aMemAdjust;
  }ATOM_VRAM_INFO_V4;
  
 +typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
 +{
 +  ATOM_COMMON_TABLE_HEADER   sHeader;
 +      USHORT                                                                           usMemAdjustTblOffset;                                                                                                   // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
 +      USHORT                                                                           usMemClkPatchTblOffset;                                                                                                 //     offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
 +      USHORT                                                                           usReserved[4];
 +  UCHAR                      ucNumOfVRAMModule;                              // indicate number of VRAM module
 +  UCHAR                      ucMemoryClkPatchTblVer;                         // version of memory AC timing register list
 +  UCHAR                      ucVramModuleVer;                                // indicate ATOM_VRAM_MODUE version
 +  UCHAR                      ucReserved; 
 +  ATOM_VRAM_MODULE_V7              aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
 +}ATOM_VRAM_INFO_HEADER_V2_1;
 +
 +
  typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
  {
    ATOM_COMMON_TABLE_HEADER   sHeader;
@@@ -5880,16 -5182,6 +5880,16 @@@ typedef struct _ASIC_TRANSMITTER_INF
        UCHAR  ucReserved;
  }ASIC_TRANSMITTER_INFO;
  
 +#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE          0x01
 +#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE         0x02
 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK    0xc4
 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A             0x00
 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B             0x04
 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C             0x40
 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D             0x44
 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E             0x80
 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F             0x84
 +
  typedef struct _ASIC_ENCODER_INFO
  {
        UCHAR ucEncoderID;
@@@ -5992,28 -5284,6 +5992,28 @@@ typedef struct _DP_ENCODER_SERVICE_PARA
  /* /obselete */
  #define DP_ENCODER_SERVICE_PS_ALLOCATION                              WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
  
 +
 +typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
 +{
 +      USHORT usExtEncoderObjId;   // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
 +  UCHAR  ucAuxId;
 +  UCHAR  ucAction;
 +  UCHAR  ucSinkType;          // Iput and Output parameters. 
 +  UCHAR  ucHPDId;             // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
 +      UCHAR  ucReserved[2];
 +}DP_ENCODER_SERVICE_PARAMETERS_V2;
 +
 +typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
 +{
 +  DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
 +  PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
 +}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
 +
 +// ucAction
 +#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE                                                    0x01
 +#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION                           0x02
 +
 +
  // DP_TRAINING_TABLE
  #define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR                           ATOM_DP_TRAINING_TBL_ADDR               
  #define DPCD_SET_SS_CNTL_TBL_ADDR                                                                                                     (ATOM_DP_TRAINING_TBL_ADDR + 8 )
@@@ -6069,7 -5339,6 +6069,7 @@@ typedef struct _SET_HWBLOCK_INSTANCE_PA
  #define SELECT_DCIO_IMPCAL            4
  #define SELECT_DCIO_DIG               6
  #define SELECT_CRTC_PIXEL_RATE        7
 +#define SELECT_VGA_BLK                8
  
  /****************************************************************************/        
  //Portion VI: Definitinos for vbios MC scratch registers that driver used
@@@ -6475,17 -5744,7 +6475,17 @@@ typedef struct _ATOM_PPLIB_THERMALCONTR
  #define ATOM_PP_THERMALCONTROLLER_ADT7473   9
  #define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
  #define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
 +#define ATOM_PP_THERMALCONTROLLER_EMC2103   13  /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
 +#define ATOM_PP_THERMALCONTROLLER_SUMO      14  /* 0x0E */ // Sumo type, used internally
 +#define ATOM_PP_THERMALCONTROLLER_NISLANDS  15
 +
 +// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
 +// We probably should reserve the bit 0x80 for this use.
 +// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
 +// The driver can pick the correct internal controller based on the ASIC.
 +
  #define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
 +#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    // EMC2103 Fan Control + Internal Thermal Controller
  
  typedef struct _ATOM_PPLIB_STATE
  {
@@@ -6582,29 -5841,6 +6582,29 @@@ typedef struct _ATOM_PPLIB_POWERPLAYTAB
      USHORT                     usExtendendedHeaderOffset;
  } ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
  
 +typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
 +{
 +    ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
 +    ULONG                      ulGoldenPPID;                    // PPGen use only     
 +    ULONG                      ulGoldenRevision;                // PPGen use only
 +    USHORT                     usVddcDependencyOnSCLKOffset;
 +    USHORT                     usVddciDependencyOnMCLKOffset;
 +    USHORT                     usVddcDependencyOnMCLKOffset;
 +    USHORT                     usMaxClockVoltageOnDCOffset;
 +    USHORT                     usReserved[2];  
 +} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
 +
 +typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
 +{
 +    ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
 +    ULONG                      ulTDPLimit;
 +    ULONG                      ulNearTDPLimit;
 +    ULONG                      ulSQRampingThreshold;
 +    USHORT                     usCACLeakageTableOffset;         // Points to ATOM_PPLIB_CAC_Leakage_Table
 +    ULONG                      ulCACLeakage;                    // TBD, this parameter is still under discussion.  Change to ulReserved if not needed.
 +    ULONG                      ulReserved;
 +} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
 +
  //// ATOM_PPLIB_NONCLOCK_INFO::usClassification
  #define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
  #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
  #define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
  #define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
  
 +//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
 +#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2     0x0001
 +#define ATOM_PPLIB_CLASSIFICATION2_ULV                      0x0002
 +
  //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
  #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
  #define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK         0x00000002
  #define ATOM_PPLIB_M3ARB_MASK                       0x00060000
  #define ATOM_PPLIB_M3ARB_SHIFT                      17
  
 +#define ATOM_PPLIB_ENABLE_DRR                       0x00080000
 +
 +// remaining 16 bits are reserved
 +typedef struct _ATOM_PPLIB_THERMAL_STATE
 +{
 +    UCHAR   ucMinTemperature;
 +    UCHAR   ucMaxTemperature;
 +    UCHAR   ucThermalAction;
 +}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
 +
  // Contained in an array starting at the offset
  // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
  // referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
 +#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
 +#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
  typedef struct _ATOM_PPLIB_NONCLOCK_INFO
  {
        USHORT usClassification;
        UCHAR  ucMaxTemperature;
        ULONG  ulCapsAndSettings;
        UCHAR  ucRequiredPower;
 -      UCHAR  ucUnused1[3];
 +      USHORT usClassification2;
 +      ULONG  ulVCLK;
 +      ULONG  ulDCLK;
 +      UCHAR  ucUnused[5];
  } ATOM_PPLIB_NONCLOCK_INFO;
  
  // Contained in an array starting at the offset
  // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
  // referenced from ATOM_PPLIB_STATE::ucClockStateIndices
 -#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
 -#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
 -
  typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
  {
        USHORT usEngineClockLow;
@@@ -6765,93 -5985,6 +6765,93 @@@ typedef struct _ATOM_PPLIB_RS780_CLOCK_
  #define ATOM_PPLIB_RS780_HTLINKFREQ_LOW        1 
  #define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH       2 
  
 +typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
 +      USHORT usEngineClockLow;  //clockfrequency & 0xFFFF. The unit is in 10khz
 +      UCHAR  ucEngineClockHigh; //clockfrequency >> 16. 
 +      UCHAR  vddcIndex;         //2-bit vddc index;
 +      UCHAR  leakage;          //please use 8-bit absolute value, not the 6-bit % value 
 +      //please initalize to 0
 +      UCHAR  rsv;
 +      //please initalize to 0
 +      USHORT rsv1;
 +      //please initialize to 0s
 +      ULONG rsv2[2];
 +}ATOM_PPLIB_SUMO_CLOCK_INFO;
 +
 +
 +
 +typedef struct _ATOM_PPLIB_STATE_V2
 +{
 +      //number of valid dpm levels in this state; Driver uses it to calculate the whole 
 +      //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
 +      UCHAR ucNumDPMLevels;
 +      
 +      //a index to the array of nonClockInfos
 +      UCHAR nonClockInfoIndex;
 +      /**
 +      * Driver will read the first ucNumDPMLevels in this array
 +      */
 +      UCHAR clockInfoIndex[1];
 +} ATOM_PPLIB_STATE_V2;
 +
 +typedef struct StateArray{
 +    //how many states we have 
 +    UCHAR ucNumEntries;
 +    
 +    ATOM_PPLIB_STATE_V2 states[1];
 +}StateArray;
 +
 +
 +typedef struct ClockInfoArray{
 +    //how many clock levels we have
 +    UCHAR ucNumEntries;
 +    
 +    //sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO)
 +    UCHAR ucEntrySize;
 +    
 +    //this is for Sumo
 +    ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1];
 +}ClockInfoArray;
 +
 +typedef struct NonClockInfoArray{
 +
 +    //how many non-clock levels we have. normally should be same as number of states
 +    UCHAR ucNumEntries;
 +    //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
 +    UCHAR ucEntrySize;
 +    
 +    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
 +}NonClockInfoArray;
 +
 +typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
 +{
 +    USHORT usClockLow;
 +    UCHAR  ucClockHigh;
 +    USHORT usVoltage;
 +}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
 +
 +typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
 +{
 +    UCHAR ucNumEntries;                                                // Number of entries.
 +    ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1];             // Dynamically allocate entries.
 +}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
 +
 +typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
 +{
 +    USHORT usSclkLow;
 +    UCHAR  ucSclkHigh;
 +    USHORT usMclkLow;
 +    UCHAR  ucMclkHigh;
 +    USHORT usVddc;
 +    USHORT usVddci;
 +}ATOM_PPLIB_Clock_Voltage_Limit_Record;
 +
 +typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
 +{
 +    UCHAR ucNumEntries;                                                // Number of entries.
 +    ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
 +}ATOM_PPLIB_Clock_Voltage_Limit_Table;
 +
  /**************************************************************************/
  
  
@@@ -46,7 -46,6 +46,6 @@@
  #include <linux/timer.h>
  #include <linux/io.h>
  #include <linux/kfifo.h>
- #include <linux/mutex.h>
  
  #include <asm/byteorder.h>
  
@@@ -760,6 -759,7 +759,6 @@@ int c4iw_flush_rq(struct t4_wq *wq, str
  int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
  int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
  u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
 -int c4iw_post_zb_read(struct c4iw_qp *qhp);
  int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
  u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
  void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
@@@ -71,9 -71,6 +71,9 @@@ static void qib_7322_mini_pcs_reset(str
  
  static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
  static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
 +static void serdes_7322_los_enable(struct qib_pportdata *, int);
 +static int serdes_7322_init_old(struct qib_pportdata *);
 +static int serdes_7322_init_new(struct qib_pportdata *);
  
  #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
  
@@@ -114,21 -111,6 +114,21 @@@ static ushort qib_singleport
  module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
  MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
  
 +/*
 + * Receive header queue sizes
 + */
 +static unsigned qib_rcvhdrcnt;
 +module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
 +MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
 +
 +static unsigned qib_rcvhdrsize;
 +module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
 +MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
 +
 +static unsigned qib_rcvhdrentsize;
 +module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
 +MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
 +
  #define MAX_ATTEN_LEN 64 /* plenty for any real system */
  /* for read back, default index is ~5m copper cable */
  static char txselect_list[MAX_ATTEN_LEN] = "10";
@@@ -332,7 -314,7 +332,7 @@@ MODULE_PARM_DESC(txselect, 
  #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
  
  /*
-  * Per-context kernel registers.  Acess only with qib_read_kreg_ctxt()
+  * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
   * or qib_write_kreg_ctxt()
   */
  #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
@@@ -562,7 -544,6 +562,7 @@@ static void write_tx_serdes_param(struc
  
  #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
  #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
 +#define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
  #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
  
  #define H1_FORCE_VAL 8
@@@ -623,7 -604,6 +623,7 @@@ struct qib_chippport_specific 
        u8 ibmalfusesnap;
        struct qib_qsfp_data qsfp_data;
        char epmsgbuf[192]; /* for port error interrupt msg buffer */
 +      u8 bounced;
  };
  
  static struct {
@@@ -1697,8 -1677,6 +1697,8 @@@ static void handle_serdes_issues(struc
            (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
                force_h1(ppd);
                ppd->cpspec->qdr_reforce = 1;
 +              if (!ppd->dd->cspec->r1)
 +                      serdes_7322_los_enable(ppd, 0);
        } else if (ppd->cpspec->qdr_reforce &&
                (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
                 (ibclt == IB_7322_LT_STATE_CFGENH ||
              ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
                adj_tx_serdes(ppd);
  
 -      if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
 -          ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
 -              ppd->cpspec->qdr_dfe_on = 1;
 -              ppd->cpspec->qdr_dfe_time = 0;
 -              /* On link down, reenable QDR adaptation */
 -              qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
 -                      ppd->dd->cspec->r1 ?
 -                                  QDR_STATIC_ADAPT_DOWN_R1 :
 -                                  QDR_STATIC_ADAPT_DOWN);
 +      if (ibclt != IB_7322_LT_STATE_LINKUP) {
 +              u8 ltstate = qib_7322_phys_portstate(ibcst);
 +              u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
 +                                        LinkTrainingState);
 +              if (!ppd->dd->cspec->r1 &&
 +                  pibclt == IB_7322_LT_STATE_LINKUP &&
 +                  ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
 +                  ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
 +                  ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
 +                  ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
 +                      /* If the link went down (but no into recovery,
 +                       * turn LOS back on */
 +                      serdes_7322_los_enable(ppd, 1);
 +              if (!ppd->cpspec->qdr_dfe_on &&
 +                  ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
 +                      ppd->cpspec->qdr_dfe_on = 1;
 +                      ppd->cpspec->qdr_dfe_time = 0;
 +                      /* On link down, reenable QDR adaptation */
 +                      qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
 +                                          ppd->dd->cspec->r1 ?
 +                                          QDR_STATIC_ADAPT_DOWN_R1 :
 +                                          QDR_STATIC_ADAPT_DOWN);
 +                      printk(KERN_INFO QIB_DRV_NAME
 +                              " IB%u:%u re-enabled QDR adaptation "
 +                              "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
 +              }
        }
  }
  
 +static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
 +
  /*
   * This is per-pport error handling.
   * will likely get it's own MSIx interrupt (one for each port,
@@@ -1881,23 -1840,7 +1881,23 @@@ static noinline void handle_7322_p_erro
                    IB_PHYSPORTSTATE_DISABLED)
                        qib_set_ib_7322_lstate(ppd, 0,
                               QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
 -              else
 +              else {
 +                      u32 lstate;
 +                      /*
 +                       * We need the current logical link state before
 +                       * lflags are set in handle_e_ibstatuschanged.
 +                       */
 +                      lstate = qib_7322_iblink_state(ibcs);
 +
 +                      if (IS_QMH(dd) && !ppd->cpspec->bounced &&
 +                          ltstate == IB_PHYSPORTSTATE_LINKUP &&
 +                          (lstate >= IB_PORT_INIT &&
 +                              lstate <= IB_PORT_ACTIVE)) {
 +                              ppd->cpspec->bounced = 1;
 +                              qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
 +                                      IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
 +                      }
 +
                        /*
                         * Since going into a recovery state causes the link
                         * state to go down and since recovery is transitory,
                            ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
                            ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
                                qib_handle_e_ibstatuschanged(ppd, ibcs);
 +              }
        }
        if (*msg && iserr)
                qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
@@@ -2843,6 -2785,7 +2843,6 @@@ static irqreturn_t qib_7322intr(int irq
                                ctxtrbits &= ~rmask;
                                if (dd->rcd[i]) {
                                        qib_kreceive(dd->rcd[i], NULL, &npkts);
 -                                      adjust_rcv_timeout(dd->rcd[i], npkts);
                                }
                        }
                        rmask <<= 1;
@@@ -2892,6 -2835,7 +2892,6 @@@ static irqreturn_t qib_7322pintr(int ir
                       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
  
        qib_kreceive(rcd, NULL, &npkts);
 -      adjust_rcv_timeout(rcd, npkts);
  
        return IRQ_HANDLED;
  }
@@@ -3213,10 -3157,6 +3213,10 @@@ static unsigned qib_7322_boardname(stru
        case BOARD_QME7342:
                n = "InfiniPath_QME7342";
                break;
 +      case 8:
 +              n = "InfiniPath_QME7362";
 +              dd->flags |= QIB_HAS_QSFP;
 +              break;
        case 15:
                n = "InfiniPath_QLE7342_TEST";
                dd->flags |= QIB_HAS_QSFP;
@@@ -3535,6 -3475,11 +3535,6 @@@ static void qib_7322_config_ctxts(struc
        nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
        dd->cspec->numctxts = nchipctxts;
        if (qib_n_krcv_queues > 1 && dd->num_pports) {
 -              /*
 -               * Set the mask for which bits from the QPN are used
 -               * to select a context number.
 -               */
 -              dd->qpn_mask = 0x3f;
                dd->first_user_ctxt = NUM_IB_PORTS +
                        (qib_n_krcv_queues - 1) * dd->num_pports;
                if (dd->first_user_ctxt > nchipctxts)
  
        /* kr_rcvegrcnt changes based on the number of contexts enabled */
        dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
 -      dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
 -                              dd->num_pports > 1 ? 1024U : 2048U);
 +      if (qib_rcvhdrcnt)
 +              dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
 +      else
 +              dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
 +                                  dd->num_pports > 1 ? 1024U : 2048U);
  }
  
  static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
@@@ -4060,14 -4002,8 +4060,14 @@@ static int qib_7322_set_ib_table(struc
  }
  
  static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
 -                                  u32 updegr, u32 egrhd)
 +                                  u32 updegr, u32 egrhd, u32 npkts)
  {
 +      /*
 +       * Need to write timeout register before updating rcvhdrhead to ensure
 +       * that the timer is enabled on reception of a packet.
 +       */
 +      if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
 +              adjust_rcv_timeout(rcd, npkts);
        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
        if (updegr)
@@@ -5586,7 -5522,7 +5586,7 @@@ static void qsfp_7322_event(struct work
                u64 now = get_jiffies_64();
                if (time_after64(now, pwrup))
                        break;
 -              msleep(1);
 +              msleep(20);
        }
        ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
        /*
@@@ -5643,7 -5579,6 +5643,7 @@@ static void set_no_qsfp_atten(struct qi
        u32 pidx, unit, port, deflt, h1;
        unsigned long val;
        int any = 0, seth1;
 +      int txdds_size;
  
        str = txselect_list;
  
        for (pidx = 0; pidx < dd->num_pports; ++pidx)
                dd->pport[pidx].cpspec->no_eep = deflt;
  
 +      txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
 +      if (IS_QME(dd) || IS_QMH(dd))
 +              txdds_size += TXDDS_MFG_SZ;
 +
        while (*nxt && nxt[1]) {
                str = ++nxt;
                unit = simple_strtoul(str, &nxt, 0);
                                ;
                        continue;
                }
 -              if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
 +              if (val >= txdds_size)
                        continue;
                seth1 = 0;
                h1 = 0; /* gcc thinks it might be used uninitted */
@@@ -5730,11 -5661,10 +5730,11 @@@ static int setup_txselect(const char *s
                return -ENOSPC;
        }
        val = simple_strtoul(str, &n, 0);
 -      if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
 +      if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
 +                              TXDDS_MFG_SZ)) {
                printk(KERN_INFO QIB_DRV_NAME
                       "txselect_values must start with a number < %d\n",
 -                      TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
 +                      TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
                return -EINVAL;
        }
        strcpy(txselect_list, str);
@@@ -5880,8 -5810,7 +5880,8 @@@ static void write_7322_initregs(struct 
                unsigned n, regno;
                unsigned long flags;
  
 -              if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
 +              if (dd->n_krcv_queues < 2 ||
 +                      !dd->pport[pidx].link_speed_supported)
                        continue;
  
                ppd = &dd->pport[pidx];
@@@ -6168,10 -6097,8 +6168,10 @@@ static int qib_init_7322_variables(stru
                ppd++;
        }
  
 -      dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
 -      dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
 +      dd->rcvhdrentsize = qib_rcvhdrentsize ?
 +              qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
 +      dd->rcvhdrsize = qib_rcvhdrsize ?
 +              qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
        dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
  
        /* we always allocate at least 2048 bytes for eager buffers */
@@@ -6568,7 -6495,7 +6568,7 @@@ static void qib_7322_txchk_change(struc
                /* make sure we see an updated copy next time around */
                sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
                sleeps++;
 -              msleep(1);
 +              msleep(20);
        }
  
        switch (which) {
@@@ -7066,12 -6993,6 +7066,12 @@@ static const struct txdds_ent txdds_ext
        {  0, 1,  0, 12 },      /* QMH7342 backplane settings */
  };
  
 +static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
 +      /* amp, pre, main, post */
 +      { 0, 0, 0, 0 },         /* QME7342 mfg settings */
 +      { 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
 +};
 +
  static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
                                               unsigned atten)
  {
@@@ -7145,16 -7066,6 +7145,16 @@@ static void find_best_ent(struct qib_pp
                *sdr_dds = &txdds_extra_sdr[idx];
                *ddr_dds = &txdds_extra_ddr[idx];
                *qdr_dds = &txdds_extra_qdr[idx];
 +      } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
 +                 ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
 +                                        TXDDS_MFG_SZ)) {
 +              idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
 +              printk(KERN_INFO QIB_DRV_NAME
 +                      " IB%u:%u use idx %u into txdds_mfg\n",
 +                      ppd->dd->unit, ppd->port, idx);
 +              *sdr_dds = &txdds_extra_mfg[idx];
 +              *ddr_dds = &txdds_extra_mfg[idx];
 +              *qdr_dds = &txdds_extra_mfg[idx];
        } else {
                /* this shouldn't happen, it's range checked */
                *sdr_dds = txdds_sdr + qib_long_atten;
@@@ -7299,30 -7210,9 +7299,30 @@@ static void ibsd_wr_allchans(struct qib
        }
  }
  
 +static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
 +{
 +      u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
 +      printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
 +              ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
 +      if (enable)
 +              data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
 +      else
 +              data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
 +      qib_write_kreg_port(ppd, krp_serdesctrl, data);
 +}
 +
  static int serdes_7322_init(struct qib_pportdata *ppd)
  {
 -      u64 data;
 +      int ret = 0;
 +      if (ppd->dd->cspec->r1)
 +              ret = serdes_7322_init_old(ppd);
 +      else
 +              ret = serdes_7322_init_new(ppd);
 +      return ret;
 +}
 +
 +static int serdes_7322_init_old(struct qib_pportdata *ppd)
 +{
        u32 le_val;
  
        /*
        ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
        ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
  
 -      data = qib_read_kreg_port(ppd, krp_serdesctrl);
 -      /* Turn off IB latency mode */
 -      data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
 -      qib_write_kreg_port(ppd, krp_serdesctrl, data |
 -              SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
 +      serdes_7322_los_enable(ppd, 1);
  
        /* rxbistena; set 0 to avoid effects of it switch later */
        ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
        return 0;
  }
  
 +static int serdes_7322_init_new(struct qib_pportdata *ppd)
 +{
 +      u64 tstart;
 +      u32 le_val, rxcaldone;
 +      int chan, chan_done = (1 << SERDES_CHANS) - 1;
 +
 +      /*
 +       * Initialize the Tx DDS tables.  Also done every QSFP event,
 +       * for adapters with QSFP
 +       */
 +      init_txdds_table(ppd, 0);
 +
 +      /* Clear cmode-override, may be set from older driver */
 +      ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
 +
 +      /* ensure no tx overrides from earlier driver loads */
 +      qib_write_kreg_port(ppd, krp_tx_deemph_override,
 +              SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
 +              reset_tx_deemphasis_override));
 +
 +      /* START OF LSI SUGGESTED SERDES BRINGUP */
 +      /* Reset - Calibration Setup */
 +      /*       Stop DFE adaptaion */
 +      ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
 +      /*       Disable LE1 */
 +      ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
 +      /*       Disable autoadapt for LE1 */
 +      ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
 +      /*       Disable LE2 */
 +      ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
 +      /*       Disable VGA */
 +      ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
 +      /*       Disable AFE Offset Cancel */
 +      ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
 +      /*       Disable Timing Loop */
 +      ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
 +      /*       Disable Frequency Loop */
 +      ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
 +      /*       Disable Baseline Wander Correction */
 +      ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
 +      /*       Disable RX Calibration */
 +      ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
 +      /*       Disable RX Offset Calibration */
 +      ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
 +      /*       Select BB CDR */
 +      ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
 +      /*       CDR Step Size */
 +      ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
 +      /*       Enable phase Calibration */
 +      ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
 +      /*       DFE Bandwidth [2:14-12] */
 +      ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
 +      /*       DFE Config (4 taps only) */
 +      ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
 +      /*       Gain Loop Bandwidth */
 +      if (!ppd->dd->cspec->r1) {
 +              ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
 +              ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
 +      } else {
 +              ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
 +      }
 +      /*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
 +      /*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
 +      /*       Data Rate Select [5:7-6] (leave as default) */
 +      /*       RX Parralel Word Width [3:10-8] (leave as default) */
 +
 +      /* RX REST */
 +      /*       Single- or Multi-channel reset */
 +      /*       RX Analog reset */
 +      /*       RX Digital reset */
 +      ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
 +      msleep(20);
 +      /*       RX Analog reset */
 +      ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
 +      msleep(20);
 +      /*       RX Digital reset */
 +      ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
 +      msleep(20);
 +
 +      /* setup LoS params; these are subsystem, so chan == 5 */
 +      /* LoS filter threshold_count on, ch 0-3, set to 8 */
 +      ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
 +      ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
 +      ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
 +      ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
 +
 +      /* LoS filter threshold_count off, ch 0-3, set to 4 */
 +      ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
 +      ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
 +      ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
 +      ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
 +
 +      /* LoS filter select enabled */
 +      ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
 +
 +      /* LoS target data:  SDR=4, DDR=2, QDR=1 */
 +      ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
 +      ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
 +      ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
 +
 +      /* Turn on LOS on initial SERDES init */
 +      serdes_7322_los_enable(ppd, 1);
 +      /* FLoop LOS gate: PPM filter  enabled */
 +      ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
 +
 +      /* RX LATCH CALIBRATION */
 +      /*       Enable Eyefinder Phase Calibration latch */
 +      ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
 +      /*       Enable RX Offset Calibration latch */
 +      ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
 +      msleep(20);
 +      /*       Start Calibration */
 +      ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
 +      tstart = get_jiffies_64();
 +      while (chan_done &&
 +             !time_after64(tstart, tstart + msecs_to_jiffies(500))) {
 +              msleep(20);
 +              for (chan = 0; chan < SERDES_CHANS; ++chan) {
 +                      rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
 +                                          (chan + (chan >> 1)),
 +                                          25, 0, 0);
 +                      if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
 +                          (~chan_done & (1 << chan)) == 0)
 +                              chan_done &= ~(1 << chan);
 +              }
 +      }
 +      if (chan_done) {
 +              printk(KERN_INFO QIB_DRV_NAME
 +                       " Serdes %d calibration not done after .5 sec: 0x%x\n",
 +                       IBSD(ppd->hw_pidx), chan_done);
 +      } else {
 +              for (chan = 0; chan < SERDES_CHANS; ++chan) {
 +                      rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
 +                                          (chan + (chan >> 1)),
 +                                          25, 0, 0);
 +                      if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
 +                              printk(KERN_INFO QIB_DRV_NAME
 +                                       " Serdes %d chan %d calibration "
 +                                       "failed\n", IBSD(ppd->hw_pidx), chan);
 +              }
 +      }
 +
 +      /*       Turn off Calibration */
 +      ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
 +      msleep(20);
 +
 +      /* BRING RX UP */
 +      /*       Set LE2 value (May be overridden in qsfp_7322_event) */
 +      le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
 +      ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
 +      /*       Set LE2 Loop bandwidth */
 +      ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
 +      /*       Enable LE2 */
 +      ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
 +      msleep(20);
 +      /*       Enable H0 only */
 +      ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
 +      /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
 +      le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
 +      ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
 +      /*       Enable VGA */
 +      ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
 +      msleep(20);
 +      /*       Set Frequency Loop Bandwidth */
 +      ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
 +      /*       Enable Frequency Loop */
 +      ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
 +      /*       Set Timing Loop Bandwidth */
 +      ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
 +      /*       Enable Timing Loop */
 +      ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
 +      msleep(50);
 +      /*       Enable DFE
 +       *       Set receive adaptation mode.  SDR and DDR adaptation are
 +       *       always on, and QDR is initially enabled; later disabled.
 +       */
 +      qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
 +      qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
 +      qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
 +                          ppd->dd->cspec->r1 ?
 +                          QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
 +      ppd->cpspec->qdr_dfe_on = 1;
 +      /*       Disable LE1  */
 +      ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
 +      /*       Disable auto adapt for LE1 */
 +      ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
 +      msleep(20);
 +      /*       Enable AFE Offset Cancel */
 +      ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
 +      /*       Enable Baseline Wander Correction */
 +      ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
 +      /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
 +      ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
 +      /* VGA output common mode */
 +      ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
 +
 +      return 0;
 +}
 +
  /* start adjust QMH serdes parameters */
  
  static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
@@@ -214,6 -214,7 +214,6 @@@ config SERIO_AMS_DELT
        tristate "Amstrad Delta (E3) mailboard support"
        depends on MACH_AMS_DELTA
        default y
 -      select AMS_DELTA_FIQ
        ---help---
          Say Y here if you have an E3 and want to use its mailboard,
          or any standard AT keyboard connected to the mailboard port.
@@@ -229,7 -230,7 +229,7 @@@ config SERIO_PS2MUL
        tristate "TQC PS/2 multiplexer"
        help
          Say Y here if you have the PS/2 line multiplexer like the one
-         present on TQC boads.
+         present on TQC boards.
  
          To compile this driver as a module, choose M here: the
          module will be called ps2mult.
@@@ -610,7 -610,7 +610,7 @@@ config TOUCHSCREEN_USB_ZYTRONI
  
  config TOUCHSCREEN_USB_ETT_TC45USB
        default y
-       bool "ET&T USB series TC4UM/TC5UH touchscreen controler support" if EMBEDDED
+       bool "ET&T USB series TC4UM/TC5UH touchscreen controller support" if EMBEDDED
        depends on TOUCHSCREEN_USB_COMPOSITE
  
  config TOUCHSCREEN_USB_NEXIO
@@@ -659,17 -659,17 +659,17 @@@ config TOUCHSCREEN_PCA
          To compile this driver as a module, choose M here: the
          module will be called pcap_ts.
  
 -config TOUCHSCREEN_TPS6507X
 -      tristate "TPS6507x based touchscreens"
 +config TOUCHSCREEN_ST1232
 +      tristate "Sitronix ST1232 touchscreen controllers"
        depends on I2C
        help
 -        Say Y here if you have a TPS6507x based touchscreen
 -        controller.
 +        Say Y here if you want to support Sitronix ST1232
 +        touchscreen controller.
  
          If unsure, say N.
  
          To compile this driver as a module, choose M here: the
 -        module will be called tps6507x_ts.
 +        module will be called st1232_ts.
  
  config TOUCHSCREEN_STMPE
        tristate "STMicroelectronics STMPE touchscreens"
          To compile this driver as a module, choose M here: the
          module will be called stmpe-ts.
  
 +config TOUCHSCREEN_TPS6507X
 +      tristate "TPS6507x based touchscreens"
 +      depends on I2C
 +      help
 +        Say Y here if you have a TPS6507x based touchscreen
 +        controller.
 +
 +        If unsure, say N.
 +
 +        To compile this driver as a module, choose M here: the
 +        module will be called tps6507x_ts.
 +
  endif
@@@ -24,7 -24,7 +24,7 @@@
  #include <linux/slab.h>
  #include <linux/kfifo.h>
  #include <media/cx25840.h>
 -#include <media/ir-core.h>
 +#include <media/rc-core.h>
  
  #include "cx25840-core.h"
  
@@@ -261,7 -261,7 +261,7 @@@ static u16 ns_to_pulse_width_count(u32 
        u32 rem;
  
        /*
-        * The 2 lsb's of the pulse width timer count are not accessable, hence
+        * The 2 lsb's of the pulse width timer count are not accessible, hence
         * the (1 << 2)
         */
        n = ((u64) ns) * CX25840_IR_REFCLK_FREQ / 1000000; /* millicycles */
@@@ -1286,7 -1286,7 +1286,7 @@@ static int omap_vout_release(struct fil
        videobuf_mmap_free(q);
  
        /* Even if apply changes fails we should continue
-          freeing allocated memeory */
+          freeing allocated memory */
        if (vout->streaming) {
                u32 mask = 0;
  
@@@ -2230,6 -2230,7 +2230,6 @@@ static int __init omap_vout_setup_video
  
        strlcpy(vfd->name, VOUT_NAME, sizeof(vfd->name));
  
 -      /* need to register for a VID_HARDWARE_* ID in videodev.h */
        vfd->fops = &omap_vout_fops;
        vfd->v4l2_dev = &vout->vid_dev->v4l2_dev;
        mutex_init(&vout->lock);
@@@ -40,12 -40,12 +40,12 @@@ MODULE_AUTHOR("Steven Toth <stoth@kerne
  MODULE_LICENSE("GPL");
  
  /*
 -  1 Basic
 -  2
 -  4 i2c
 -  8 api
 - 16 cmd
 - 32 bus
 + *  1 Basic
 + *  2
 + *  4 i2c
 + *  8 api
 + * 16 cmd
 + * 32 bus
   */
  
  unsigned int saa_debug;
@@@ -82,8 -82,7 +82,8 @@@ MODULE_PARM_DESC(crc_checking, "enable 
  
  unsigned int guard_checking = 1;
  module_param(guard_checking, int, 0644);
 -MODULE_PARM_DESC(guard_checking, "enable dma sanity checking for buffer overruns");
 +MODULE_PARM_DESC(guard_checking,
 +      "enable dma sanity checking for buffer overruns");
  
  static unsigned int saa7164_devcount;
  
@@@ -124,9 -123,7 +124,9 @@@ static void saa7164_pack_verifier(struc
                if ((*(p + i + 0) != 0x00) || (*(p + i + 1) != 0x00) ||
                        (*(p + i + 2) != 0x01) || (*(p + i + 3) != 0xBA)) {
                        printk(KERN_ERR "No pack at 0x%x\n", i);
 -//                    saa7164_dumphex16FF(buf->port->dev, (p + i), 32);
 +#if 0
 +                      saa7164_dumphex16FF(buf->port->dev, (p + i), 32);
 +#endif
                }
        }
  }
@@@ -202,16 -199,19 +202,16 @@@ static void saa7164_histogram_reset(str
        strcpy(hg->name, name);
  
        /* First 30ms x 1ms */
 -      for (i = 0; i < 30; i++) {
 +      for (i = 0; i < 30; i++)
                hg->counter1[0 + i].val = i;
 -      }
  
        /* 30 - 200ms x 10ms  */
 -      for (i = 0; i < 18; i++) {
 +      for (i = 0; i < 18; i++)
                hg->counter1[30 + i].val = 30 + (i * 10);
 -      }
  
        /* 200 - 2000ms x 100ms  */
 -      for (i = 0; i < 15; i++) {
 +      for (i = 0; i < 15; i++)
                hg->counter1[48 + i].val = 200 + (i * 200);
 -      }
  
        /* Catch all massive value (2secs) */
        hg->counter1[55].val = 2000;
@@@ -315,9 -315,7 +315,9 @@@ static void saa7164_work_enchandler_hel
                                        (*(p + buf->actual_size + 0x13) != 0xff)) {
                                                printk(KERN_ERR "%s() buf %p guard buffer breach\n",
                                                        __func__, buf);
 -//                                            saa7164_dumphex16FF(dev, (p + buf->actual_size) - 32 , 64);
 +#if 0
 +                                              saa7164_dumphex16FF(dev, (p + buf->actual_size) - 32 , 64);
 +#endif
                                }
                        }
  
@@@ -655,8 -653,8 +655,8 @@@ static irqreturn_t saa7164_irq(int irq
                goto out;
        }
  
-       /* Check that the hardware is accessable. If the status bytes are
-        * 0xFF then the device is not accessable, the the IRQ belongs
+       /* Check that the hardware is accessible. If the status bytes are
+        * 0xFF then the device is not accessible, the the IRQ belongs
         * to another driver.
         * 4 x u32 interrupt registers.
         */
@@@ -963,7 -961,9 +963,7 @@@ static int saa7164_port_init(struct saa
  
                /* We need a deferred interrupt handler for cmd handling */
                INIT_WORK(&port->workenc, saa7164_work_enchandler);
 -      }
 -      else
 -      if ((portnr == SAA7164_PORT_VBI1) || (portnr == SAA7164_PORT_VBI2)) {
 +      } else if ((portnr == SAA7164_PORT_VBI1) || (portnr == SAA7164_PORT_VBI2)) {
                port->type = SAA7164_MPEG_VBI;
  
                /* We need a deferred interrupt handler for cmd handling */
@@@ -1001,7 -1001,7 +1001,7 @@@ static int saa7164_dev_setup(struct saa
        atomic_inc(&dev->refcount);
        dev->nr = saa7164_devcount++;
  
 -      sprintf(dev->name, "saa7164[%d]", dev->nr);
 +      snprintf(dev->name, sizeof(dev->name), "saa7164[%d]", dev->nr);
  
        mutex_lock(&devlist);
        list_add_tail(&dev->devlist, &saa7164_devlist);
@@@ -1169,7 -1169,7 +1169,7 @@@ static int saa7164_proc_open(struct ino
        return single_open(filp, saa7164_proc_show, NULL);
  }
  
 -static struct file_operations saa7164_proc_fops = {
 +static const struct file_operations saa7164_proc_fops = {
        .open           = saa7164_proc_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
  #include <linux/pci.h>
  #include <linux/gpio.h>
  #include <linux/interrupt.h>
- #include <linux/pci.h>
  #include <linux/platform_device.h>
  #include <linux/videodev2.h>
  #include <media/v4l2-device.h>
  #include <media/v4l2-ioctl.h>
  #include <media/v4l2-chip-ident.h>
  #include <media/videobuf-dma-sg.h>
- #include <linux/device.h>
  #include <linux/delay.h>
  #include <linux/dma-mapping.h>
  #include <linux/pm_qos_params.h>
@@@ -1161,6 -1159,16 +1159,6 @@@ out
        return ret;
  }
  
 -#ifdef CONFIG_VIDEO_V4L1_COMPAT
 -static int viacam_vidiocgmbuf(struct file *filp, void *priv,
 -              struct video_mbuf *mbuf)
 -{
 -      struct via_camera *cam = priv;
 -
 -      return videobuf_cgmbuf(&cam->vb_queue, mbuf, 6);
 -}
 -#endif
 -
  /* G/S_PARM */
  
  static int viacam_g_parm(struct file *filp, void *priv,
@@@ -1241,6 -1249,9 +1239,6 @@@ static const struct v4l2_ioctl_ops viac
        .vidioc_s_parm          = viacam_s_parm,
        .vidioc_enum_framesizes = viacam_enum_framesizes,
        .vidioc_enum_frameintervals = viacam_enum_frameintervals,
 -#ifdef CONFIG_VIDEO_V4L1_COMPAT
 -      .vidiocgmbuf            = viacam_vidiocgmbuf,
 -#endif
  };
  
  /*----------------------------------------------------------------------------*/
diff --combined drivers/mmc/host/Kconfig
@@@ -83,7 -83,7 +83,7 @@@ config MMC_RICOH_MM
  
  config MMC_SDHCI_OF
        tristate "SDHCI support on OpenFirmware platforms"
 -      depends on MMC_SDHCI && PPC_OF
 +      depends on MMC_SDHCI && OF
        help
          This selects the OF support for Secure Digital Host Controller
          Interfaces.
@@@ -93,7 -93,6 +93,7 @@@
  config MMC_SDHCI_OF_ESDHC
        bool "SDHCI OF support for the Freescale eSDHC controller"
        depends on MMC_SDHCI_OF
 +      depends on PPC_OF
        select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
        help
          This selects the Freescale eSDHC controller support.
  config MMC_SDHCI_OF_HLWD
        bool "SDHCI OF support for the Nintendo Wii SDHCI controllers"
        depends on MMC_SDHCI_OF
 +      depends on PPC_OF
        select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
        help
          This selects the Secure Digital Host Controller Interface (SDHCI)
@@@ -142,27 -140,6 +142,27 @@@ config MMC_SDHCI_ESDHC_IM
  
          If unsure, say N.
  
 +config MMC_SDHCI_DOVE
 +      bool "SDHCI support on Marvell's Dove SoC"
 +      depends on ARCH_DOVE
 +      depends on MMC_SDHCI_PLTFM
 +      select MMC_SDHCI_IO_ACCESSORS
 +      help
 +        This selects the Secure Digital Host Controller Interface in
 +        Marvell's Dove SoC.
 +
 +        If unsure, say N.
 +
 +config MMC_SDHCI_TEGRA
 +      tristate "SDHCI platform support for the Tegra SD/MMC Controller"
 +      depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
 +      select MMC_SDHCI_IO_ACCESSORS
 +      help
 +        This selects the Tegra SD/MMC controller. If you have a Tegra
 +        platform with SD or MMC devices, say Y or M here.
 +
 +        If unsure, say N.
 +
  config MMC_SDHCI_S3C
        tristate "SDHCI support on Samsung S3C SoC"
        depends on MMC_SDHCI && PLAT_SAMSUNG
@@@ -481,27 -458,11 +481,27 @@@ config SDH_BFIN_MISSING_CMD_PULLUP_WORK
        help
          If you say yes here SD-Cards may work on the EZkit.
  
 +config MMC_DW
 +      tristate "Synopsys DesignWare Memory Card Interface"
 +      depends on ARM
 +      help
 +        This selects support for the Synopsys DesignWare Mobile Storage IP
 +        block, this provides host support for SD and MMC interfaces, in both
 +        PIO and external DMA modes.
 +
 +config MMC_DW_IDMAC
 +      bool "Internal DMAC interface"
 +      depends on MMC_DW
 +      help
 +        This selects support for the internal DMAC block within the Synopsys
 +        Designware Mobile Storage IP block. This disables the external DMA
 +        interface.
 +
  config MMC_SH_MMCIF
        tristate "SuperH Internal MMCIF support"
        depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
        help
-         This selects the MMC Host Interface controler (MMCIF).
+         This selects the MMC Host Interface controller (MMCIF).
  
          This driver supports MMCIF in sh7724/sh7757/sh7372.
  
@@@ -13,8 -13,6 +13,8 @@@
  
  #ifndef BNX2X_H
  #define BNX2X_H
 +#include <linux/netdevice.h>
 +#include <linux/types.h>
  
  /* compilation time flags */
  
   * (you will need to reboot afterwards) */
  /* #define BNX2X_STOP_ON_ERROR */
  
 -#define DRV_MODULE_VERSION      "1.60.01-0"
 -#define DRV_MODULE_RELDATE      "2010/11/12"
 +#define DRV_MODULE_VERSION      "1.62.00-3"
 +#define DRV_MODULE_RELDATE      "2010/12/21"
  #define BNX2X_BC_VER            0x040200
  
  #define BNX2X_MULTI_QUEUE
  
  #define BNX2X_NEW_NAPI
  
 -
 +#if defined(CONFIG_DCB)
 +#define BCM_DCB
 +#endif
  #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
  #define BCM_CNIC 1
  #include "../cnic_if.h"
@@@ -52,7 -48,6 +52,7 @@@
  #include "bnx2x_fw_defs.h"
  #include "bnx2x_hsi.h"
  #include "bnx2x_link.h"
 +#include "bnx2x_dcb.h"
  #include "bnx2x_stats.h"
  
  /* error/debug prints */
@@@ -204,25 -199,10 +204,25 @@@ void bnx2x_panic_dump(struct bnx2x *bp)
  /* EQ completions */
  #define HC_SP_INDEX_EQ_CONS                   7
  
 +/* FCoE L2 connection completions */
 +#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS               6
 +#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS               4
  /* iSCSI L2 */
  #define HC_SP_INDEX_ETH_ISCSI_CQ_CONS         5
  #define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS      1
  
 +/* Special clients parameters */
 +
 +/* SB indices */
 +/* FCoE L2 */
 +#define BNX2X_FCOE_L2_RX_INDEX \
 +      (&bp->def_status_blk->sp_sb.\
 +      index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
 +
 +#define BNX2X_FCOE_L2_TX_INDEX \
 +      (&bp->def_status_blk->sp_sb.\
 +      index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
 +
  /**
   *  CIDs and CLIDs:
   *  CLIDs below is a CLID for func 0, then the CLID for other
  #define BNX2X_ISCSI_ETH_CL_ID         17
  #define BNX2X_ISCSI_ETH_CID           17
  
 +/* FCoE L2 */
 +#define BNX2X_FCOE_ETH_CL_ID          18
 +#define BNX2X_FCOE_ETH_CID            18
 +
  /** Additional rings budgeting */
  #ifdef BCM_CNIC
  #define CNIC_CONTEXT_USE              1
 +#define FCOE_CONTEXT_USE              1
  #else
  #define CNIC_CONTEXT_USE              0
 +#define FCOE_CONTEXT_USE              0
  #endif /* BCM_CNIC */
 +#define NONE_ETH_CONTEXT_USE  (FCOE_CONTEXT_USE)
  
  #define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
        AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@@ -428,17 -401,6 +428,17 @@@ struct bnx2x_fastpath 
  };
  
  #define bnx2x_fp(bp, nr, var)         (bp->fp[nr].var)
 +#ifdef BCM_CNIC
 +/* FCoE L2 `fastpath' is right after the eth entries */
 +#define FCOE_IDX                      BNX2X_NUM_ETH_QUEUES(bp)
 +#define bnx2x_fcoe_fp(bp)             (&bp->fp[FCOE_IDX])
 +#define bnx2x_fcoe(bp, var)           (bnx2x_fcoe_fp(bp)->var)
 +#define IS_FCOE_FP(fp)                        (fp->index == FCOE_IDX)
 +#define IS_FCOE_IDX(idx)              ((idx) == FCOE_IDX)
 +#else
 +#define IS_FCOE_FP(fp)                false
 +#define IS_FCOE_IDX(idx)      false
 +#endif
  
  
  /* MC hsi */
@@@ -636,7 -598,6 +636,7 @@@ struct bnx2x_common 
  
  #define CHIP_METAL(bp)                        (bp->common.chip_id & 0x00000ff0)
  #define CHIP_BOND_ID(bp)              (bp->common.chip_id & 0x0000000f)
 +#define CHIP_PARITY_ENABLED(bp)       (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
  
        int                     flash_size;
  #define NVRAM_1MB_SIZE                        0x20000 /* 1M bit in bytes */
@@@ -708,14 -669,8 +708,14 @@@ struct bnx2x_port 
  enum {
        CAM_ETH_LINE = 0,
        CAM_ISCSI_ETH_LINE,
 -      CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
 +      CAM_FIP_ETH_LINE,
 +      CAM_FIP_MCAST_LINE,
 +      CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE
  };
 +/* number of MACs per function in NIG memory - used for SI mode */
 +#define NIG_LLH_FUNC_MEM_SIZE         16
 +/* number of entries in NIG_REG_LLHX_FUNC_MEM */
 +#define NIG_LLH_FUNC_MEM_MAX_OFFSET   8
  
  #define BNX2X_VF_ID_INVALID   0xFF
  
   */
  #define L2_FP_COUNT(cid_cnt)  ((cid_cnt) - CNIC_CONTEXT_USE)
  
 +/*
 + * The number of FP-SB allocated by the driver == max number of regular L2
 + * queues + 1 for the CNIC which also consumes an FP-SB
 + */
 +#define FP_SB_COUNT(cid_cnt)  ((cid_cnt) - FCOE_CONTEXT_USE)
 +#define NUM_IGU_SB_REQUIRED(cid_cnt) \
 +                              (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE)
 +
  union cdu_context {
        struct eth_context eth;
        char pad[1024];
  
  #ifdef BCM_CNIC
  #define CNIC_ISCSI_CID_MAX    256
 -#define CNIC_CID_MAX          (CNIC_ISCSI_CID_MAX)
 +#define CNIC_FCOE_CID_MAX     2048
 +#define CNIC_CID_MAX          (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
  #define CNIC_ILT_LINES                DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
  #endif
  
@@@ -824,8 -770,6 +824,8 @@@ struct bnx2x_slowpath 
  
        u32                             wb_comp;
        u32                             wb_data[4];
 +      /* pfc configuration for DCBX ramrod */
 +      struct flow_control_configuration pfc_config;
  };
  
  #define bnx2x_sp(bp, var)             (&bp->slowpath->var)
@@@ -974,10 -918,6 +974,10 @@@ struct bnx2x 
  #define DISABLE_MSI_FLAG              0x200
  #define BP_NOMCP(bp)                  (bp->flags & NO_MCP_FLAG)
  #define MF_FUNC_DIS                   0x1000
 +#define FCOE_MACS_SET                 0x2000
 +#define NO_FCOE_FLAG                  0x4000
 +
 +#define NO_FCOE(bp)           ((bp)->flags & NO_FCOE_FLAG)
  
        int                     pf_num; /* absolute PF number */
        int                     pfid;   /* per-path PF number */
        u16                     mf_ov;
        u8                      mf_mode;
  #define IS_MF(bp)             (bp->mf_mode != 0)
 +#define IS_MF_SI(bp)          (bp->mf_mode == MULTI_FUNCTION_SI)
 +#define IS_MF_SD(bp)          (bp->mf_mode == MULTI_FUNCTION_SD)
  
        u8                      wol;
  
  #define BNX2X_ACCEPT_ALL_UNICAST      0x0004
  #define BNX2X_ACCEPT_ALL_MULTICAST    0x0008
  #define BNX2X_ACCEPT_BROADCAST                0x0010
 +#define BNX2X_ACCEPT_UNMATCHED_UCAST  0x0020
  #define BNX2X_PROMISCUOUS_MODE                0x10000
  
        u32                     rx_mode;
        u16                     cnic_kwq_pending;
        u16                     cnic_spq_pending;
        struct mutex            cnic_mutex;
 -      u8                      iscsi_mac[6];
 +      u8                      iscsi_mac[ETH_ALEN];
 +      u8                      fip_mac[ETH_ALEN];
  #endif
  
        int                     dmae_ready;
  
        char                    fw_ver[32];
        const struct firmware   *firmware;
 +      /* LLDP params */
 +      struct bnx2x_config_lldp_params         lldp_config_params;
 +
 +      /* DCB support on/off */
 +      u16 dcb_state;
 +#define BNX2X_DCB_STATE_OFF                   0
 +#define BNX2X_DCB_STATE_ON                    1
 +
 +      /* DCBX engine mode */
 +      int dcbx_enabled;
 +#define BNX2X_DCBX_ENABLED_OFF                        0
 +#define BNX2X_DCBX_ENABLED_ON_NEG_OFF         1
 +#define BNX2X_DCBX_ENABLED_ON_NEG_ON          2
 +#define BNX2X_DCBX_ENABLED_INVALID            (-1)
 +
 +      bool dcbx_mode_uset;
 +
 +      struct bnx2x_config_dcbx_params         dcbx_config_params;
 +
 +      struct bnx2x_dcbx_port_params           dcbx_port_params;
 +      int                                     dcb_version;
 +
 +      /* DCBX Negotation results */
 +      struct dcbx_features                    dcbx_local_feat;
 +      u32                                     dcbx_error;
  };
  
  /**
  #define RSS_IPV6_TCP_CAP      0x0008
  
  #define BNX2X_NUM_QUEUES(bp)  (bp->num_queues)
 +#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE)
 +
 +/* ethtool statistics are displayed for all regular ethernet queues and the
 + * fcoe L2 queue if not disabled
 + */
 +#define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \
 +                         (BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE))
 +
  #define is_multi(bp)          (BNX2X_NUM_QUEUES(bp) > 1)
  
  #define BNX2X_MAX_QUEUES(bp)  (bp->igu_sb_cnt - CNIC_CONTEXT_USE)
 -#define is_eth_multi(bp)      (BNX2X_NUM_ETH_QUEUES(bp) > 1)
  
  #define RSS_IPV4_CAP_MASK                                             \
        TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
@@@ -1344,7 -1248,6 +1344,7 @@@ struct bnx2x_client_ramrod_params 
        u16 cl_id;
        u32 cid;
        u8 poll;
 +#define CLIENT_IS_FCOE                        0x01
  #define CLIENT_IS_LEADING_RSS         0x02
        u8 flags;
  };
@@@ -1377,54 -1280,11 +1377,54 @@@ struct bnx2x_func_init_params 
        u16             spq_prod;       /* valid iff FUNC_FLG_SPQ */
  };
  
 +#define for_each_eth_queue(bp, var) \
 +                      for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
 +
 +#define for_each_nondefault_eth_queue(bp, var) \
 +                      for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
 +
 +#define for_each_napi_queue(bp, var) \
 +      for (var = 0; \
 +              var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \
 +              if (skip_queue(bp, var))        \
 +                      continue;               \
 +              else
 +
  #define for_each_queue(bp, var) \
 -                      for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
 +      for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
 +              if (skip_queue(bp, var))        \
 +                      continue;               \
 +              else
 +
 +#define for_each_rx_queue(bp, var) \
 +      for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
 +              if (skip_rx_queue(bp, var))     \
 +                      continue;               \
 +              else
 +
 +#define for_each_tx_queue(bp, var) \
 +      for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
 +              if (skip_tx_queue(bp, var))     \
 +                      continue;               \
 +              else
 +
  #define for_each_nondefault_queue(bp, var) \
 -                      for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
 +      for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) \
 +              if (skip_queue(bp, var))        \
 +                      continue;               \
 +              else
 +
 +/* skip rx queue
-  * if FCOE l2 support is diabled and this is the fcoe L2 queue
++ * if FCOE l2 support is disabled and this is the fcoe L2 queue
 + */
 +#define skip_rx_queue(bp, idx)        (NO_FCOE(bp) && IS_FCOE_IDX(idx))
  
 +/* skip tx queue
-  * if FCOE l2 support is diabled and this is the fcoe L2 queue
++ * if FCOE l2 support is disabled and this is the fcoe L2 queue
 + */
 +#define skip_tx_queue(bp, idx)        (NO_FCOE(bp) && IS_FCOE_IDX(idx))
 +
 +#define skip_queue(bp, idx)   (NO_FCOE(bp) && IS_FCOE_IDX(idx))
  
  #define WAIT_RAMROD_POLL      0x01
  #define WAIT_RAMROD_COMMON    0x02
@@@ -1469,7 -1329,7 +1469,7 @@@ static inline u32 reg_poll(struct bnx2
  
  #define BNX2X_ILT_ZALLOC(x, y, size) \
        do { \
 -              x = pci_alloc_consistent(bp->pdev, size, y); \
 +              x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
                if (x) \
                        memset(x, 0, size); \
        } while (0)
  #define BNX2X_ILT_FREE(x, y, size) \
        do { \
                if (x) { \
 -                      pci_free_consistent(bp->pdev, size, x, y); \
 +                      dma_free_coherent(&bp->pdev->dev, size, x, y); \
                        x = NULL; \
                        y = 0; \
                } \
        MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
        (T_ETH_MAC_COMMAND_INVALIDATE))
  
 -#define CAM_INVALIDATE(x) \
 -      (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
 -
 -
  /* Number of u32 elements in MC hash array */
  #define MC_HASH_SIZE                  8
  #define MC_HASH_OFFSET(bp, i)         (BAR_TSTRORM_INTMEM + \
@@@ -55,7 -55,6 +55,7 @@@
  #include "bnx2x_init.h"
  #include "bnx2x_init_ops.h"
  #include "bnx2x_cmn.h"
 +#include "bnx2x_dcb.h"
  
  #include <linux/firmware.h>
  #include "bnx2x_fw_file_hdr.h"
@@@ -122,10 -121,6 +122,10 @@@ MODULE_PARM_DESC(debug, " Default debu
  
  static struct workqueue_struct *bnx2x_wq;
  
 +#ifdef BCM_CNIC
 +static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
 +#endif
 +
  enum bnx2x_board_type {
        BCM57710 = 0,
        BCM57711 = 1,
@@@ -926,7 -921,7 +926,7 @@@ void bnx2x_panic_dump(struct bnx2x *bp
               sp_sb_data.p_func.vf_valid);
  
  
 -      for_each_queue(bp, i) {
 +      for_each_eth_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
                int loop;
                struct hc_status_block_data_e2 sb_data_e2;
  
                /* host sb data */
  
 +#ifdef BCM_CNIC
 +              if (IS_FCOE_FP(fp))
 +                      continue;
 +#endif
                BNX2X_ERR("     run indexes (");
                for (j = 0; j < HC_SB_MAX_SM; j++)
                        pr_cont("0x%x%s",
  #ifdef BNX2X_STOP_ON_ERROR
        /* Rings */
        /* Rx */
 -      for_each_queue(bp, i) {
 +      for_each_rx_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
  
                start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
        }
  
        /* Tx */
 -      for_each_queue(bp, i) {
 +      for_each_tx_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
  
                start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
@@@ -1307,7 -1298,7 +1307,7 @@@ void bnx2x_int_disable_sync(struct bnx2
  #ifdef BCM_CNIC
                offset++;
  #endif
 -              for_each_queue(bp, i)
 +              for_each_eth_queue(bp, i)
                        synchronize_irq(bp->msix_table[i + offset].vector);
        } else
                synchronize_irq(bp->pdev->irq);
@@@ -1429,7 -1420,7 +1429,7 @@@ irqreturn_t bnx2x_interrupt(int irq, vo
                return IRQ_HANDLED;
  #endif
  
 -      for_each_queue(bp, i) {
 +      for_each_eth_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
  
                mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
@@@ -2035,28 -2026,13 +2035,28 @@@ static int bnx2x_get_cmng_fns_mode(stru
  
  static void bnx2x_read_mf_cfg(struct bnx2x *bp)
  {
 -      int vn;
 +      int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
  
        if (BP_NOMCP(bp))
                return; /* what should be the default bvalue in this case */
  
 +      /* For 2 port configuration the absolute function number formula
 +       * is:
 +       *      abs_func = 2 * vn + BP_PORT + BP_PATH
 +       *
 +       *      and there are 4 functions per port
 +       *
 +       * For 4 port configuration it is
 +       *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
 +       *
 +       *      and there are 2 functions per port
 +       */
        for (vn = VN_0; vn < E1HVN_MAX; vn++) {
 -              int /*abs*/func = 2*vn + BP_PORT(bp);
 +              int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
 +
 +              if (func >= E1H_FUNC_MAX)
 +                      break;
 +
                bp->mf_config[vn] =
                        MF_CFG_RD(bp, func_mf_config[func].config);
        }
@@@ -2262,15 -2238,6 +2262,15 @@@ u32 bnx2x_fw_command(struct bnx2x *bp, 
        return rc;
  }
  
 +static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
 +{
 +#ifdef BCM_CNIC
 +      if (IS_FCOE_FP(fp) && IS_MF(bp))
 +              return false;
 +#endif
 +      return true;
 +}
 +
  /* must be called under rtnl_lock */
  static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
  {
        u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
        u8 unmatched_unicast = 0;
  
 +      if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
 +              unmatched_unicast = 1;
 +
        if (filters & BNX2X_PROMISCUOUS_MODE) {
                /* promiscious - accept all, drop none */
                drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
                accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
 +              if (IS_MF_SI(bp)) {
 +                      /*
 +                       * SI mode defines to accept in promiscuos mode
 +                       * only unmatched packets
 +                       */
 +                      unmatched_unicast = 1;
 +                      accp_all_ucast = 0;
 +              }
        }
        if (filters & BNX2X_ACCEPT_UNICAST) {
                /* accept matched ucast */
        if (filters & BNX2X_ACCEPT_MULTICAST) {
                /* accept matched mcast */
                drop_all_mcast = 0;
 +              if (IS_MF_SI(bp))
 +                      /* since mcast addresses won't arrive with ovlan,
 +                       * fw needs to accept all of them in
 +                       * switch-independent mode */
 +                      accp_all_mcast = 1;
        }
        if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
                /* accept all mcast */
@@@ -2421,7 -2372,7 +2421,7 @@@ static inline u16 bnx2x_get_cl_flags(st
        /* calculate queue flags */
        flags |= QUEUE_FLG_CACHE_ALIGN;
        flags |= QUEUE_FLG_HC;
 -      flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
 +      flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
  
        flags |= QUEUE_FLG_VLAN;
        DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
        if (!fp->disable_tpa)
                flags |= QUEUE_FLG_TPA;
  
 -      flags |= QUEUE_FLG_STATS;
 +      flags = stat_counter_valid(bp, fp) ?
 +                      (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
  
        return flags;
  }
@@@ -2490,10 -2440,7 +2490,10 @@@ static void bnx2x_pf_rx_cl_prep(struct 
        rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
        rxq_init->fw_sb_id = fp->fw_sb_id;
  
 -      rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
 +      if (IS_FCOE_FP(fp))
 +              rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
 +      else
 +              rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
  
        rxq_init->cid = HW_CID(bp, fp->cid);
  
@@@ -2513,12 -2460,6 +2513,12 @@@ static void bnx2x_pf_tx_cl_prep(struct 
        txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
        txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
        txq_init->fw_sb_id = fp->fw_sb_id;
 +
 +      if (IS_FCOE_FP(fp)) {
 +              txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
 +              txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
 +      }
 +
        txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
  }
  
@@@ -2632,26 -2573,6 +2632,26 @@@ static void bnx2x_e1h_enable(struct bnx
         */
  }
  
 +/* called due to MCP event (on pmf):
 + *    reread new bandwidth configuration
 + *    configure FW
 + *    notify others function about the change
 + */
 +static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
 +{
 +      if (bp->link_vars.link_up) {
 +              bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
 +              bnx2x_link_sync_notify(bp);
 +      }
 +      storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
 +}
 +
 +static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
 +{
 +      bnx2x_config_mf_bw(bp);
 +      bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
 +}
 +
  static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
  {
        DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
                dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
        }
        if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
 -
 -              bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
 -              bnx2x_link_sync_notify(bp);
 -              storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
 +              bnx2x_config_mf_bw(bp);
                dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
        }
  
@@@ -3098,20 -3022,10 +3098,20 @@@ static inline void bnx2x_attn_int_deass
                        if (val & DRV_STATUS_DCC_EVENT_MASK)
                                bnx2x_dcc_event(bp,
                                            (val & DRV_STATUS_DCC_EVENT_MASK));
 +
 +                      if (val & DRV_STATUS_SET_MF_BW)
 +                              bnx2x_set_mf_bw(bp);
 +
                        bnx2x__link_status_update(bp);
                        if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
                                bnx2x_pmf_update(bp);
  
 +                      if (bp->port.pmf &&
 +                          (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
 +                              bp->dcbx_enabled > 0)
 +                              /* start dcbx state machine */
 +                              bnx2x_dcbx_set_params(bp,
 +                                      BNX2X_DCBX_STATE_NEG_RECEIVED);
                } else if (attn & BNX2X_MC_ASSERT_BITS) {
  
                        BNX2X_ERR("MC assert!\n");
  #define LOAD_COUNTER_MASK     (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
  #define RESET_DONE_FLAG_MASK  (~LOAD_COUNTER_MASK)
  #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
 -#define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
  
  /*
   * should be run under rtnl lock
@@@ -3526,7 -3441,7 +3526,7 @@@ static void bnx2x_attn_int_deasserted(s
           try to handle this event */
        bnx2x_acquire_alr(bp);
  
 -      if (bnx2x_chk_parity_attn(bp)) {
 +      if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
                bp->recovery_state = BNX2X_RECOVERY_INIT;
                bnx2x_set_reset_in_progress(bp);
                schedule_delayed_work(&bp->reset_task, 0);
@@@ -3722,23 -3637,11 +3722,23 @@@ static void bnx2x_eq_int(struct bnx2x *
  #ifdef BCM_CNIC
                        if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
                                goto next_spqe;
 +                      if (cid == BNX2X_FCOE_ETH_CID)
 +                              bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
 +                      else
  #endif
 -                      bnx2x_fp(bp, cid, state) =
 +                              bnx2x_fp(bp, cid, state) =
                                                BNX2X_FP_STATE_CLOSED;
  
                        goto next_spqe;
 +
 +              case EVENT_RING_OPCODE_STOP_TRAFFIC:
 +                      DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
 +                      bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
 +                      goto next_spqe;
 +              case EVENT_RING_OPCODE_START_TRAFFIC:
 +                      DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
 +                      bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
 +                      goto next_spqe;
                }
  
                switch (opcode | bp->state) {
@@@ -3811,13 -3714,7 +3811,13 @@@ static void bnx2x_sp_task(struct work_s
  
        /* SP events: STAT_QUERY and others */
        if (status & BNX2X_DEF_SB_IDX) {
 +#ifdef BCM_CNIC
 +              struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
  
 +              if ((!NO_FCOE(bp)) &&
 +                      (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
 +                      napi_schedule(&bnx2x_fcoe(bp, napi));
 +#endif
                /* Handle EQ completions */
                bnx2x_eq_int(bp);
  
@@@ -4200,7 -4097,7 +4200,7 @@@ void bnx2x_update_coalesce(struct bnx2
  {
        int i;
  
 -      for_each_queue(bp, i)
 +      for_each_eth_queue(bp, i)
                bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
                                         bp->rx_ticks, bp->tx_ticks);
  }
@@@ -4248,16 -4145,13 +4248,16 @@@ static void bnx2x_init_ind_table(struc
        for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
                REG_WR8(bp, BAR_TSTRORM_INTMEM +
                        TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
 -                      bp->fp->cl_id + (i % bp->num_queues));
 +                      bp->fp->cl_id + (i % (bp->num_queues -
 +                              NONE_ETH_CONTEXT_USE)));
  }
  
  void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
  {
        int mode = bp->rx_mode;
 +      int port = BP_PORT(bp);
        u16 cl_id;
 +      u32 def_q_filters = 0;
  
        /* All but management unicast packets should pass to the host as well */
        u32 llh_mask =
  
        switch (mode) {
        case BNX2X_RX_MODE_NONE: /* no Rx */
 -              cl_id = BP_L_ID(bp);
 -              bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
 +              def_q_filters = BNX2X_ACCEPT_NONE;
 +#ifdef BCM_CNIC
 +              if (!NO_FCOE(bp)) {
 +                      cl_id = bnx2x_fcoe(bp, cl_id);
 +                      bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
 +              }
 +#endif
                break;
  
        case BNX2X_RX_MODE_NORMAL:
 -              cl_id = BP_L_ID(bp);
 -              bnx2x_rxq_set_mac_filters(bp, cl_id,
 -                      BNX2X_ACCEPT_UNICAST |
 -                      BNX2X_ACCEPT_BROADCAST |
 -                      BNX2X_ACCEPT_MULTICAST);
 +              def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
 +                              BNX2X_ACCEPT_MULTICAST;
 +#ifdef BCM_CNIC
 +              cl_id = bnx2x_fcoe(bp, cl_id);
 +              bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
 +                                        BNX2X_ACCEPT_MULTICAST);
 +#endif
                break;
  
        case BNX2X_RX_MODE_ALLMULTI:
 -              cl_id = BP_L_ID(bp);
 -              bnx2x_rxq_set_mac_filters(bp, cl_id,
 -                      BNX2X_ACCEPT_UNICAST |
 -                      BNX2X_ACCEPT_BROADCAST |
 -                      BNX2X_ACCEPT_ALL_MULTICAST);
 +              def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
 +                              BNX2X_ACCEPT_ALL_MULTICAST;
 +#ifdef BCM_CNIC
 +              cl_id = bnx2x_fcoe(bp, cl_id);
 +              bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
 +                                        BNX2X_ACCEPT_MULTICAST);
 +#endif
                break;
  
        case BNX2X_RX_MODE_PROMISC:
 -              cl_id = BP_L_ID(bp);
 -              bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
 -
 +              def_q_filters |= BNX2X_PROMISCUOUS_MODE;
 +#ifdef BCM_CNIC
 +              cl_id = bnx2x_fcoe(bp, cl_id);
 +              bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
 +                                        BNX2X_ACCEPT_MULTICAST);
 +#endif
                /* pass management unicast packets as well */
                llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
                break;
                break;
        }
  
 +      cl_id = BP_L_ID(bp);
 +      bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
 +
        REG_WR(bp,
 -             BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
 -                           NIG_REG_LLH0_BRB1_DRV_MASK,
 -             llh_mask);
 +             (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
 +                     NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
  
        DP(NETIF_MSG_IFUP, "rx mode %d\n"
                "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
 -              "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
 +              "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
 +              "unmatched_ucast 0x%x\n", mode,
                bp->mac_filters.ucast_drop_all,
                bp->mac_filters.mcast_drop_all,
                bp->mac_filters.bcast_drop_all,
                bp->mac_filters.ucast_accept_all,
                bp->mac_filters.mcast_accept_all,
 -              bp->mac_filters.bcast_accept_all
 +              bp->mac_filters.bcast_accept_all,
 +              bp->mac_filters.unmatched_unicast
        );
  
        storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
@@@ -4354,15 -4232,6 +4354,15 @@@ static void bnx2x_init_internal_common(
                        bp->mf_mode);
        }
  
 +      if (IS_MF_SI(bp))
 +              /*
 +               * In switch independent mode, the TSTORM needs to accept
 +               * packets that failed classification, since approximate match
 +               * mac addresses aren't written to NIG LLH
 +               */
 +              REG_WR8(bp, BAR_TSTRORM_INTMEM +
 +                          TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
 +
        /* Zero this manually as its initialization is
           currently missing in the initTool */
        for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
  static void bnx2x_init_internal_port(struct bnx2x *bp)
  {
        /* port */
 +      bnx2x_dcb_init_intmem_pfc(bp);
  }
  
  static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
@@@ -4440,11 -4308,9 +4440,11 @@@ void bnx2x_nic_init(struct bnx2x *bp, u
  {
        int i;
  
 -      for_each_queue(bp, i)
 +      for_each_eth_queue(bp, i)
                bnx2x_init_fp_sb(bp, i);
  #ifdef BCM_CNIC
 +      if (!NO_FCOE(bp))
 +              bnx2x_init_fcoe_fp(bp);
  
        bnx2x_init_sb(bp, bp->cnic_sb_mapping,
                      BNX2X_VF_ID_INVALID, false,
@@@ -4753,7 -4619,7 +4753,7 @@@ static int bnx2x_int_mem_test(struct bn
        return 0; /* OK */
  }
  
 -static void enable_blocks_attention(struct bnx2x *bp)
 +static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
  {
        REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
        if (CHIP_IS_E2(bp))
        REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
        REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
  /*    REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
 -      REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
 -}
 -
 -static const struct {
 -      u32 addr;
 -      u32 mask;
 -} bnx2x_parity_mask[] = {
 -      {PXP_REG_PXP_PRTY_MASK,         0x3ffffff},
 -      {PXP2_REG_PXP2_PRTY_MASK_0,     0xffffffff},
 -      {PXP2_REG_PXP2_PRTY_MASK_1,     0x7f},
 -      {HC_REG_HC_PRTY_MASK,           0x7},
 -      {MISC_REG_MISC_PRTY_MASK,       0x1},
 -      {QM_REG_QM_PRTY_MASK,           0x0},
 -      {DORQ_REG_DORQ_PRTY_MASK,       0x0},
 -      {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
 -      {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
 -      {SRC_REG_SRC_PRTY_MASK,         0x4}, /* bit 2 */
 -      {CDU_REG_CDU_PRTY_MASK,         0x0},
 -      {CFC_REG_CFC_PRTY_MASK,         0x0},
 -      {DBG_REG_DBG_PRTY_MASK,         0x0},
 -      {DMAE_REG_DMAE_PRTY_MASK,       0x0},
 -      {BRB1_REG_BRB1_PRTY_MASK,       0x0},
 -      {PRS_REG_PRS_PRTY_MASK,         (1<<6)},/* bit 6 */
 -      {TSDM_REG_TSDM_PRTY_MASK,       0x18},  /* bit 3,4 */
 -      {CSDM_REG_CSDM_PRTY_MASK,       0x8},   /* bit 3 */
 -      {USDM_REG_USDM_PRTY_MASK,       0x38},  /* bit 3,4,5 */
 -      {XSDM_REG_XSDM_PRTY_MASK,       0x8},   /* bit 3 */
 -      {TSEM_REG_TSEM_PRTY_MASK_0,     0x0},
 -      {TSEM_REG_TSEM_PRTY_MASK_1,     0x0},
 -      {USEM_REG_USEM_PRTY_MASK_0,     0x0},
 -      {USEM_REG_USEM_PRTY_MASK_1,     0x0},
 -      {CSEM_REG_CSEM_PRTY_MASK_0,     0x0},
 -      {CSEM_REG_CSEM_PRTY_MASK_1,     0x0},
 -      {XSEM_REG_XSEM_PRTY_MASK_0,     0x0},
 -      {XSEM_REG_XSEM_PRTY_MASK_1,     0x0}
 -};
 -
 -static void enable_blocks_parity(struct bnx2x *bp)
 -{
 -      int i;
 -
 -      for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
 -              REG_WR(bp, bnx2x_parity_mask[i].addr,
 -                      bnx2x_parity_mask[i].mask);
 +      REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);         /* bit 3,4 masked */
  }
  
 -
  static void bnx2x_reset_common(struct bnx2x *bp)
  {
        /* reset_common */
@@@ -5037,7 -4947,7 +5037,7 @@@ static int bnx2x_init_hw_common(struct 
                memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
                memset(&ilt, 0, sizeof(struct bnx2x_ilt));
  
-               /* initalize dummy TM client */
+               /* initialize dummy TM client */
                ilt_cli.start = 0;
                ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
                ilt_cli.client_num = ILT_CLIENT_TM;
        REG_WR(bp, PRS_REG_NIC_MODE, 1);
  #endif
        if (!CHIP_IS_E1(bp))
 -              REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
 +              REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
  
        if (CHIP_IS_E2(bp)) {
                /* Bit-map indicating which L2 hdrs may appear after the
                   basic Ethernet header */
 -              int has_ovlan = IS_MF(bp);
 +              int has_ovlan = IS_MF_SD(bp);
                REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
                REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
        }
        bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
  
        if (CHIP_IS_E2(bp)) {
 -              int has_ovlan = IS_MF(bp);
 +              int has_ovlan = IS_MF_SD(bp);
                REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
                REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
        }
        bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
        if (!CHIP_IS_E1(bp)) {
                REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
 -              REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
 +              REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
        }
        if (CHIP_IS_E2(bp)) {
                /* Bit-map indicating which L2 hdrs may appear after the
                   basic Ethernet header */
 -              REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
 +              REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
        }
  
        if (CHIP_REV_IS_SLOW(bp))
        /* clear PXP2 attentions */
        REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
  
 -      enable_blocks_attention(bp);
 -      if (CHIP_PARITY_SUPPORTED(bp))
 -              enable_blocks_parity(bp);
 +      bnx2x_enable_blocks_attention(bp);
 +      if (CHIP_PARITY_ENABLED(bp))
 +              bnx2x_enable_blocks_parity(bp);
  
        if (!BP_NOMCP(bp)) {
                /* In E2 2-PORT mode, same ext phy is used for the two paths */
@@@ -5460,10 -5370,8 +5460,10 @@@ static int bnx2x_init_hw_port(struct bn
         *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
         *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
         *             bits 4-7 are used for "per vn group attention" */
 -      REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
 -             (IS_MF(bp) ? 0xF7 : 0x7));
 +      val = IS_MF(bp) ? 0xF7 : 0x7;
 +      /* Enable DCBX attention for all but E1 */
 +      val |= CHIP_IS_E1(bp) ? 0 : 0x10;
 +      REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
  
        bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
        bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
        if (!CHIP_IS_E1(bp)) {
                /* 0x2 disable mf_ov, 0x1 enable */
                REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
 -                     (IS_MF(bp) ? 0x1 : 0x2));
 +                     (IS_MF_SD(bp) ? 0x1 : 0x2));
  
                if (CHIP_IS_E2(bp)) {
                        val = 0;
@@@ -5908,15 -5816,6 +5908,15 @@@ void bnx2x_free_mem(struct bnx2x *bp
        /* fastpath */
        /* Common */
        for_each_queue(bp, i) {
 +#ifdef BCM_CNIC
 +              /* FCoE client uses default status block */
 +              if (IS_FCOE_IDX(i)) {
 +                      union host_hc_status_block *sb =
 +                              &bnx2x_fp(bp, i, status_blk);
 +                      memset(sb, 0, sizeof(union host_hc_status_block));
 +                      bnx2x_fp(bp, i, status_blk_mapping) = 0;
 +              } else {
 +#endif
                /* status blocks */
                if (CHIP_IS_E2(bp))
                        BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
                        BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
                                       bnx2x_fp(bp, i, status_blk_mapping),
                                       sizeof(struct host_hc_status_block_e1x));
 +#ifdef BCM_CNIC
 +              }
 +#endif
        }
        /* Rx */
 -      for_each_queue(bp, i) {
 +      for_each_rx_queue(bp, i) {
  
                /* fastpath rx rings: rx_buf rx_desc rx_comp */
                BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
                               BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
        }
        /* Tx */
 -      for_each_queue(bp, i) {
 +      for_each_tx_queue(bp, i) {
  
                /* fastpath tx rings: tx_buf tx_desc */
                BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
@@@ -6035,20 -5931,15 +6035,20 @@@ int bnx2x_alloc_mem(struct bnx2x *bp
                union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
                bnx2x_fp(bp, i, bp) = bp;
                /* status blocks */
 -              if (CHIP_IS_E2(bp))
 -                      BNX2X_PCI_ALLOC(sb->e2_sb,
 -                              &bnx2x_fp(bp, i, status_blk_mapping),
 -                              sizeof(struct host_hc_status_block_e2));
 -              else
 -                      BNX2X_PCI_ALLOC(sb->e1x_sb,
 -                              &bnx2x_fp(bp, i, status_blk_mapping),
 -                              sizeof(struct host_hc_status_block_e1x));
 -
 +#ifdef BCM_CNIC
 +              if (!IS_FCOE_IDX(i)) {
 +#endif
 +                      if (CHIP_IS_E2(bp))
 +                              BNX2X_PCI_ALLOC(sb->e2_sb,
 +                                  &bnx2x_fp(bp, i, status_blk_mapping),
 +                                  sizeof(struct host_hc_status_block_e2));
 +                      else
 +                              BNX2X_PCI_ALLOC(sb->e1x_sb,
 +                                  &bnx2x_fp(bp, i, status_blk_mapping),
 +                                  sizeof(struct host_hc_status_block_e1x));
 +#ifdef BCM_CNIC
 +              }
 +#endif
                set_sb_shortcuts(bp, i);
        }
        /* Rx */
@@@ -6164,7 -6055,7 +6164,7 @@@ static int bnx2x_func_stop(struct bnx2
   * @param cam_offset offset in a CAM to use
   * @param is_bcast is the set MAC a broadcast address (for E1 only)
   */
 -static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
 +static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
                                   u32 cl_bit_vec, u8 cam_offset,
                                   u8 is_bcast)
  {
@@@ -6279,70 -6170,6 +6279,70 @@@ static u8 bnx2x_e1h_cam_offset(struct b
                return BP_VN(bp) * 32  + rel_offset;
  }
  
 +/**
 + *  LLH CAM line allocations: currently only iSCSI and ETH macs are
 + *  relevant. In addition, current implementation is tuned for a
 + *  single ETH MAC.
 + *
 + *  When multiple unicast ETH MACs PF configuration in switch
 + *  independent mode is required (NetQ, multiple netdev MACs,
 + *  etc.), consider better utilisation of 16 per function MAC
 + *  entries in the LLH memory.
 + */
 +enum {
 +      LLH_CAM_ISCSI_ETH_LINE = 0,
 +      LLH_CAM_ETH_LINE,
 +      LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
 +};
 +
 +static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
 +                        int set,
 +                        unsigned char *dev_addr,
 +                        int index)
 +{
 +      u32 wb_data[2];
 +      u32 mem_offset, ena_offset, mem_index;
 +      /**
 +       * indexes mapping:
 +       * 0..7 - goes to MEM
 +       * 8..15 - goes to MEM2
 +       */
 +
 +      if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
 +              return;
 +
 +      /* calculate memory start offset according to the mapping
 +       * and index in the memory */
 +      if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
 +              mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
 +                                         NIG_REG_LLH0_FUNC_MEM;
 +              ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
 +                                         NIG_REG_LLH0_FUNC_MEM_ENABLE;
 +              mem_index = index;
 +      } else {
 +              mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
 +                                         NIG_REG_P0_LLH_FUNC_MEM2;
 +              ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
 +                                         NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
 +              mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
 +      }
 +
 +      if (set) {
 +              /* LLH_FUNC_MEM is a u64 WB register */
 +              mem_offset += 8*mem_index;
 +
 +              wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
 +                            (dev_addr[4] <<  8) |  dev_addr[5]);
 +              wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
 +
 +              REG_WR_DMAE(bp, mem_offset, wb_data, 2);
 +      }
 +
 +      /* enable/disable the entry */
 +      REG_WR(bp, ena_offset + 4*mem_index, set);
 +
 +}
 +
  void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
  {
        u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
        bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
                               (1 << bp->fp->cl_id), cam_offset , 0);
  
 +      bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
 +
        if (CHIP_IS_E1(bp)) {
                /* broadcast MAC */
 -              u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
 +              static const u8 bcast[ETH_ALEN] = {
 +                      0xff, 0xff, 0xff, 0xff, 0xff, 0xff
 +              };
                bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
        }
  }
@@@ -6460,59 -6283,12 +6460,59 @@@ static int bnx2x_set_iscsi_eth_mac_addr
  {
        u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
                         bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
 -      u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
 +      u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
 +              BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
        u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
  
        /* Send a SET_MAC ramrod */
        bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
                               cam_offset, 0);
 +
 +      bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
 +
 +      return 0;
 +}
 +
 +/**
 + * Set FCoE L2 MAC(s) at the next enties in the CAM after the
 + * ETH MAC(s). This function will wait until the ramdord
 + * completion returns.
 + *
 + * @param bp driver handle
 + * @param set set or clear the CAM entry
 + *
 + * @return 0 if cussess, -ENODEV if ramrod doesn't return.
 + */
 +int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
 +{
 +      u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
 +      /**
 +       * CAM allocation for E1H
 +       * eth unicasts: by func number
 +       * iscsi: by func number
 +       * fip unicast: by func number
 +       * fip multicast: by func number
 +       */
 +      bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
 +              cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
 +
 +      return 0;
 +}
 +
 +int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
 +{
 +      u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
 +
 +      /**
 +       * CAM allocation for E1H
 +       * eth unicasts: by func number
 +       * iscsi: by func number
 +       * fip unicast: by func number
 +       * fip multicast: by func number
 +       */
 +      bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
 +              bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
 +
        return 0;
  }
  #endif
@@@ -6530,8 -6306,6 +6530,8 @@@ static void bnx2x_fill_cl_init_data(str
        data->general.statistics_counter_id = params->rxq_params.stat_id;
        data->general.statistics_en_flg =
                (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
 +      data->general.is_fcoe_flg =
 +              (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
        data->general.activate_flg = activate;
        data->general.sp_client_id = params->rxq_params.spcl_id;
  
        data->fc.safc_group_num = params->txq_params.cos;
        data->fc.safc_group_en_flg =
                (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
 -      data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
 +      data->fc.traffic_type =
 +              (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
 +              LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
  }
  
  static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
@@@ -6701,7 -6473,7 +6701,7 @@@ static int __devinit bnx2x_set_int_mode
                bnx2x_enable_msi(bp);
                /* falling through... */
        case INT_MODE_INTx:
 -              bp->num_queues = 1;
 +              bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
                DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
                break;
        default:
                                          "enable MSI-X (%d), "
                                          "set number of queues to %d\n",
                                   bp->num_queues,
 -                                 1);
 -                      bp->num_queues = 1;
 +                                 1 + NONE_ETH_CONTEXT_USE);
 +                      bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
  
                        if (!(bp->flags & DISABLE_MSI_FLAG))
                                bnx2x_enable_msi(bp);
@@@ -6846,9 -6618,7 +6846,9 @@@ int bnx2x_setup_client(struct bnx2x *bp
        struct bnx2x_client_init_params params = { {0} };
        int rc;
  
 -      bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
 +      /* reset IGU state skip FCoE L2 queue */
 +      if (!IS_FCOE_FP(fp))
 +              bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
                             IGU_INT_ENABLE, 0);
  
        params.ramrod_params.pstate = &fp->state;
        params.ramrod_params.index = fp->index;
        params.ramrod_params.cid = fp->cid;
  
 +#ifdef BCM_CNIC
 +      if (IS_FCOE_FP(fp))
 +              params.ramrod_params.flags |= CLIENT_IS_FCOE;
 +
 +#endif
 +
        if (is_leading)
                params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
  
@@@ -6946,7 -6710,7 +6946,7 @@@ static void bnx2x_reset_func(struct bnx
        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
  
        /* FP SBs */
 -      for_each_queue(bp, i) {
 +      for_each_eth_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
                REG_WR8(bp,
                        BAR_CSTRORM_INTMEM +
@@@ -7066,20 -6830,6 +7066,20 @@@ static void bnx2x_reset_chip(struct bnx
        }
  }
  
 +#ifdef BCM_CNIC
 +static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
 +{
 +      if (bp->flags & FCOE_MACS_SET) {
 +              if (!IS_MF_SD(bp))
 +                      bnx2x_set_fip_eth_mac_addr(bp, 0);
 +
 +              bnx2x_set_all_enode_macs(bp, 0);
 +
 +              bp->flags &= ~FCOE_MACS_SET;
 +      }
 +}
 +#endif
 +
  void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
  {
        int port = BP_PORT(bp);
        int i, cnt, rc;
  
        /* Wait until tx fastpath tasks complete */
 -      for_each_queue(bp, i) {
 +      for_each_tx_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
  
                cnt = 1000;
        }
  
  #ifdef BCM_CNIC
 -      /* Clear iSCSI L2 MAC */
 -      mutex_lock(&bp->cnic_mutex);
 -      if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
 -              bnx2x_set_iscsi_eth_mac_addr(bp, 0);
 -              bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
 -      }
 -      mutex_unlock(&bp->cnic_mutex);
 +      bnx2x_del_fcoe_eth_macs(bp);
  #endif
  
        if (unload_mode == UNLOAD_NORMAL)
@@@ -7980,7 -7736,7 +7980,7 @@@ static void __devinit bnx2x_get_igu_cam
        bp->igu_sb_cnt = 0;
        if (CHIP_INT_MODE_IS_BC(bp)) {
                bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
 -                                     bp->l2_cid_count);
 +                                     NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
  
                bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
                        FP_SB_MAX_E1x;
                        }
                }
        }
 -      bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
 +      bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
 +                                 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
        if (bp->igu_sb_cnt == 0)
                BNX2X_ERR("CAM configuration error\n");
  }
@@@ -8321,8 -8076,9 +8321,8 @@@ static void __devinit bnx2x_set_mac_buf
  static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
  {
        int port = BP_PORT(bp);
 -      u32 val, val2;
        u32 config;
 -      u32 ext_phy_type, ext_phy_config;;
 +      u32 ext_phy_type, ext_phy_config;
  
        bp->link_params.bp = bp;
        bp->link_params.port = port;
                 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
                bp->mdio.prtad =
                        XGXS_EXT_PHY_ADDR(ext_phy_config);
 +}
 +
 +static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 +{
 +      u32 val, val2;
 +      int func = BP_ABS_FUNC(bp);
 +      int port = BP_PORT(bp);
 +
 +      if (BP_NOMCP(bp)) {
 +              BNX2X_ERROR("warning: random MAC workaround active\n");
 +              random_ether_addr(bp->dev->dev_addr);
 +      } else if (IS_MF(bp)) {
 +              val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
 +              val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
 +              if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
 +                  (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
 +                      bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 +
 +#ifdef BCM_CNIC
 +              /* iSCSI NPAR MAC */
 +              if (IS_MF_SI(bp)) {
 +                      u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
 +                      if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
 +                              val2 = MF_CFG_RD(bp, func_ext_config[func].
 +                                                   iscsi_mac_addr_upper);
 +                              val = MF_CFG_RD(bp, func_ext_config[func].
 +                                                  iscsi_mac_addr_lower);
 +                              bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
 +                      }
 +              }
 +#endif
 +      } else {
 +              /* in SF read MACs from port configuration */
 +              val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
 +              val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
 +              bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 +
 +#ifdef BCM_CNIC
 +              val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
 +                                  iscsi_mac_upper);
 +              val = SHMEM_RD(bp, dev_info.port_hw_config[port].
 +                                 iscsi_mac_lower);
 +              bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
 +#endif
 +      }
  
 -      val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
 -      val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
 -      bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
        memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
        memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
  
  #ifdef BCM_CNIC
 -      val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
 -      val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
 -      bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
 +      /* Inform the upper layers about FCoE MAC */
 +      if (!CHIP_IS_E1x(bp)) {
 +              if (IS_MF_SD(bp))
 +                      memcpy(bp->fip_mac, bp->dev->dev_addr,
 +                             sizeof(bp->fip_mac));
 +              else
 +                      memcpy(bp->fip_mac, bp->iscsi_mac,
 +                             sizeof(bp->fip_mac));
 +      }
  #endif
  }
  
  static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
  {
 -      int func = BP_ABS_FUNC(bp);
 -      int vn;
 -      u32 val, val2;
 +      int /*abs*/func = BP_ABS_FUNC(bp);
 +      int vn, port;
 +      u32 val = 0;
        int rc = 0;
  
        bnx2x_get_common_hwinfo(bp);
  
                bp->igu_dsb_id = DEF_SB_IGU_ID;
                bp->igu_base_sb = 0;
 -              bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
 +              bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
 +                                     NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
        } else {
                bp->common.int_block = INT_BLOCK_IGU;
                val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
        bp->mf_ov = 0;
        bp->mf_mode = 0;
        vn = BP_E1HVN(bp);
 +      port = BP_PORT(bp);
 +
        if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
 +              DP(NETIF_MSG_PROBE,
 +                          "shmem2base 0x%x, size %d, mfcfg offset %d\n",
 +                          bp->common.shmem2_base, SHMEM2_RD(bp, size),
 +                          (u32)offsetof(struct shmem2_region, mf_cfg_addr));
                if (SHMEM2_HAS(bp, mf_cfg_addr))
                        bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
                else
                        bp->common.mf_cfg_base = bp->common.shmem_base +
                                offsetof(struct shmem_region, func_mb) +
                                E1H_FUNC_MAX * sizeof(struct drv_func_mb);
 -              bp->mf_config[vn] =
 -                      MF_CFG_RD(bp, func_mf_config[func].config);
 +              /*
 +               * get mf configuration:
 +               * 1. existance of MF configuration
 +               * 2. MAC address must be legal (check only upper bytes)
 +               *    for  Switch-Independent mode;
 +               *    OVLAN must be legal for Switch-Dependent mode
 +               * 3. SF_MODE configures specific MF mode
 +               */
 +              if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
 +                      /* get mf configuration */
 +                      val = SHMEM_RD(bp,
 +                                     dev_info.shared_feature_config.config);
 +                      val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
 +
 +                      switch (val) {
 +                      case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
 +                              val = MF_CFG_RD(bp, func_mf_config[func].
 +                                              mac_upper);
 +                              /* check for legal mac (upper bytes)*/
 +                              if (val != 0xffff) {
 +                                      bp->mf_mode = MULTI_FUNCTION_SI;
 +                                      bp->mf_config[vn] = MF_CFG_RD(bp,
 +                                                 func_mf_config[func].config);
 +                              } else
 +                                      DP(NETIF_MSG_PROBE, "illegal MAC "
 +                                                          "address for SI\n");
 +                              break;
 +                      case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
 +                              /* get OV configuration */
 +                              val = MF_CFG_RD(bp,
 +                                      func_mf_config[FUNC_0].e1hov_tag);
 +                              val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
 +
 +                              if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
 +                                      bp->mf_mode = MULTI_FUNCTION_SD;
 +                                      bp->mf_config[vn] = MF_CFG_RD(bp,
 +                                              func_mf_config[func].config);
 +                              } else
 +                                      DP(NETIF_MSG_PROBE, "illegal OV for "
 +                                                          "SD\n");
 +                              break;
 +                      default:
 +                              /* Unknown configuration: reset mf_config */
 +                              bp->mf_config[vn] = 0;
 +                              DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
 +                                 val);
 +                      }
 +              }
  
                BNX2X_DEV_INFO("%s function mode\n",
                               IS_MF(bp) ? "multi" : "single");
  
 -              if (IS_MF(bp)) {
 -                      val = (MF_CFG_RD(bp, func_mf_config[func].
 -                                                              e1hov_tag) &
 -                             FUNC_MF_CFG_E1HOV_TAG_MASK);
 +              switch (bp->mf_mode) {
 +              case MULTI_FUNCTION_SD:
 +                      val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
 +                            FUNC_MF_CFG_E1HOV_TAG_MASK;
                        if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
                                bp->mf_ov = val;
 -                              BNX2X_DEV_INFO("MF OV for func %d is %d "
 -                                             "(0x%04x)\n",
 -                                             func, bp->mf_ov, bp->mf_ov);
 +                              BNX2X_DEV_INFO("MF OV for func %d is %d"
 +                                             " (0x%04x)\n", func,
 +                                             bp->mf_ov, bp->mf_ov);
                        } else {
 -                              BNX2X_ERROR("No valid MF OV for func %d,"
 -                                          "  aborting\n", func);
 +                              BNX2X_ERR("No valid MF OV for func %d,"
 +                                        "  aborting\n", func);
                                rc = -EPERM;
                        }
 -              } else {
 -                      if (BP_VN(bp)) {
 -                              BNX2X_ERROR("VN %d in single function mode,"
 -                                          "  aborting\n", BP_E1HVN(bp));
 +                      break;
 +              case MULTI_FUNCTION_SI:
 +                      BNX2X_DEV_INFO("func %d is in MF "
 +                                     "switch-independent mode\n", func);
 +                      break;
 +              default:
 +                      if (vn) {
 +                              BNX2X_ERR("VN %d in single function mode,"
 +                                        "  aborting\n", vn);
                                rc = -EPERM;
                        }
 +                      break;
                }
 +
        }
  
        /* adjust igu_sb_cnt to MF for E1x */
                BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
        }
  
 -      if (IS_MF(bp)) {
 -              val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
 -              val = MF_CFG_RD(bp,  func_mf_config[func].mac_lower);
 -              if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
 -                  (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
 -                      bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
 -                      bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
 -                      bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
 -                      bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
 -                      bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
 -                      bp->dev->dev_addr[5] = (u8)(val & 0xff);
 -                      memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
 -                             ETH_ALEN);
 -                      memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
 -                             ETH_ALEN);
 -              }
 -
 -              return rc;
 -      }
 -
 -      if (BP_NOMCP(bp)) {
 -              /* only supposed to happen on emulation/FPGA */
 -              BNX2X_ERROR("warning: random MAC workaround active\n");
 -              random_ether_addr(bp->dev->dev_addr);
 -              memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
 -      }
 +      /* Get MAC addresses */
 +      bnx2x_get_mac_hwinfo(bp);
  
        return rc;
  }
@@@ -8706,6 -8382,13 +8706,6 @@@ static int __devinit bnx2x_init_bp(stru
                dev_err(&bp->pdev->dev, "MCP disabled, "
                                        "must load devices in order!\n");
  
 -      /* Set multi queue mode */
 -      if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
 -          ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
 -              dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
 -                                      "requested is not MSI-X\n");
 -              multi_mode = ETH_RSS_MODE_DISABLED;
 -      }
        bp->multi_mode = multi_mode;
        bp->int_mode = int_mode;
  
        bp->timer.data = (unsigned long) bp;
        bp->timer.function = bnx2x_timer;
  
 +      bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
 +      bnx2x_dcbx_init_params(bp);
 +
        return rc;
  }
  
@@@ -8949,7 -8629,6 +8949,7 @@@ static const struct net_device_ops bnx2
        .ndo_open               = bnx2x_open,
        .ndo_stop               = bnx2x_close,
        .ndo_start_xmit         = bnx2x_start_xmit,
 +      .ndo_select_queue       = bnx2x_select_queue,
        .ndo_set_multicast_list = bnx2x_set_rx_mode,
        .ndo_set_mac_address    = bnx2x_change_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
@@@ -9082,7 -8761,7 +9082,7 @@@ static int __devinit bnx2x_init_dev(str
        dev->netdev_ops = &bnx2x_netdev_ops;
        bnx2x_set_ethtool_ops(dev);
        dev->features |= NETIF_F_SG;
 -      dev->features |= NETIF_F_HW_CSUM;
 +      dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        if (bp->flags & USING_DAC_FLAG)
                dev->features |= NETIF_F_HIGHDMA;
        dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
        dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
  
        dev->vlan_features |= NETIF_F_SG;
 -      dev->vlan_features |= NETIF_F_HW_CSUM;
 +      dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        if (bp->flags & USING_DAC_FLAG)
                dev->vlan_features |= NETIF_F_HIGHDMA;
        dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
        dev->vlan_features |= NETIF_F_TSO6;
  
 +#ifdef BCM_DCB
 +      dev->dcbnl_ops = &bnx2x_dcbnl_ops;
 +#endif
 +
        /* get_port_hwinfo() will set prtad and mmds properly */
        bp->mdio.prtad = MDIO_PRTAD_NONE;
        bp->mdio.mmds = 0;
@@@ -9392,7 -9067,7 +9392,7 @@@ static int __devinit bnx2x_init_one(str
                return -ENODEV;
        }
  
 -      cid_count += CNIC_CONTEXT_USE;
 +      cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
  
        /* dev zeroed in init_etherdev */
        dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
        /* calc qm_cid_count */
        bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
  
 -      rc = register_netdev(dev);
 -      if (rc) {
 -              dev_err(&pdev->dev, "Cannot register net device\n");
 -              goto init_one_exit;
 -      }
 +#ifdef BCM_CNIC
 +      /* disable FCOE L2 queue for E1x*/
 +      if (CHIP_IS_E1x(bp))
 +              bp->flags |= NO_FCOE_FLAG;
 +
 +#endif
  
        /* Configure interupt mode: try to enable MSI-X/MSI if
         * needed, set bp->num_queues appropriately.
        /* Add all NAPI objects */
        bnx2x_add_all_napi(bp);
  
 +      rc = register_netdev(dev);
 +      if (rc) {
 +              dev_err(&pdev->dev, "Cannot register net device\n");
 +              goto init_one_exit;
 +      }
 +
 +#ifdef BCM_CNIC
 +      if (!NO_FCOE(bp)) {
 +              /* Add storage MAC address */
 +              rtnl_lock();
 +              dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
 +              rtnl_unlock();
 +      }
 +#endif
 +
        bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
  
        netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
@@@ -9494,29 -9153,14 +9494,29 @@@ static void __devexit bnx2x_remove_one(
        }
        bp = netdev_priv(dev);
  
 +#ifdef BCM_CNIC
 +      /* Delete storage MAC address */
 +      if (!NO_FCOE(bp)) {
 +              rtnl_lock();
 +              dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
 +              rtnl_unlock();
 +      }
 +#endif
 +
        unregister_netdev(dev);
  
        /* Delete all NAPI objects */
        bnx2x_del_all_napi(bp);
  
 +      /* Power on: we can't let PCI layer write to us while we are in D3 */
 +      bnx2x_set_power_state(bp, PCI_D0);
 +
        /* Disable MSI/MSI-X */
        bnx2x_disable_msi(bp);
  
 +      /* Power off */
 +      bnx2x_set_power_state(bp, PCI_D3hot);
 +
        /* Make sure RESET task is not scheduled before continuing */
        cancel_delayed_work_sync(&bp->reset_task);
  
@@@ -9558,7 -9202,7 +9558,7 @@@ static int bnx2x_eeh_nic_unload(struct 
        /* Free SKBs, SGEs, TPA pool and driver internals */
        bnx2x_free_skbs(bp);
  
 -      for_each_queue(bp, i)
 +      for_each_rx_queue(bp, i)
                bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
  
        bnx2x_free_mem(bp);
@@@ -9785,8 -9429,7 +9785,8 @@@ static void bnx2x_cnic_sp_post(struct b
                                break;
                        else
                                atomic_dec(&bp->spq_left);
 -              } else if (type == ISCSI_CONNECTION_TYPE) {
 +              } else if ((type == ISCSI_CONNECTION_TYPE) ||
 +                         (type == FCOE_CONNECTION_TYPE)) {
                        if (bp->cnic_spq_pending >=
                            bp->cnic_eth_dev.max_kwqe_pending)
                                break;
@@@ -9933,9 -9576,6 +9933,9 @@@ static int bnx2x_drv_ctl(struct net_dev
        case DRV_CTL_START_L2_CMD: {
                u32 cli = ctl->data.ring.client_id;
  
 +              /* Clear FCoE FIP and ALL ENODE MACs addresses first */
 +              bnx2x_del_fcoe_eth_macs(bp);
 +
                /* Set iSCSI MAC address */
                bnx2x_set_iscsi_eth_mac_addr(bp, 1);
  
@@@ -10057,6 -9697,10 +10057,6 @@@ static int bnx2x_unregister_cnic(struc
        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  
        mutex_lock(&bp->cnic_mutex);
 -      if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
 -              bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
 -              bnx2x_set_iscsi_eth_mac_addr(bp, 0);
 -      }
        cp->drv_state = 0;
        rcu_assign_pointer(bp->cnic_ops, NULL);
        mutex_unlock(&bp->cnic_mutex);
@@@ -10087,9 -9731,7 +10087,9 @@@ struct cnic_eth_dev *bnx2x_cnic_probe(s
        cp->drv_ctl = bnx2x_drv_ctl;
        cp->drv_register_cnic = bnx2x_register_cnic;
        cp->drv_unregister_cnic = bnx2x_unregister_cnic;
 -      cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
 +      cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
 +      cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
 +              BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
        cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
  
        DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
@@@ -18,8 -18,6 +18,8 @@@
   * WR - Write Clear (write 1 to clear the bit)
   *
   */
 +#ifndef BNX2X_REG_H
 +#define BNX2X_REG_H
  
  #define ATC_ATC_INT_STS_REG_ADDRESS_ERROR                      (0x1<<0)
  #define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS              (0x1<<2)
@@@ -41,8 -39,6 +41,8 @@@
  #define BRB1_REG_BRB1_PRTY_MASK                                0x60138
  /* [R 4] Parity register #0 read */
  #define BRB1_REG_BRB1_PRTY_STS                                         0x6012c
 +/* [RC 4] Parity register #0 read clear */
 +#define BRB1_REG_BRB1_PRTY_STS_CLR                             0x60130
  /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
   * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
   * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
  #define CCM_REG_CCM_INT_MASK                                   0xd01e4
  /* [R 11] Interrupt register #0 read */
  #define CCM_REG_CCM_INT_STS                                    0xd01d8
 +/* [RW 27] Parity mask register #0 read/write */
 +#define CCM_REG_CCM_PRTY_MASK                                  0xd01f4
  /* [R 27] Parity register #0 read */
  #define CCM_REG_CCM_PRTY_STS                                   0xd01e8
 +/* [RC 27] Parity register #0 read clear */
 +#define CCM_REG_CCM_PRTY_STS_CLR                               0xd01ec
  /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
     REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
     Is used to determine the number of the AG context REG-pairs written back;
  #define CDU_REG_CDU_PRTY_MASK                                  0x10104c
  /* [R 5] Parity register #0 read */
  #define CDU_REG_CDU_PRTY_STS                                   0x101040
 +/* [RC 5] Parity register #0 read clear */
 +#define CDU_REG_CDU_PRTY_STS_CLR                               0x101044
  /* [RC 32] logging of error data in case of a CDU load error:
     {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
     ype_error; ctual_active; ctual_compressed_context}; */
  #define CFC_REG_CFC_PRTY_MASK                                  0x104118
  /* [R 4] Parity register #0 read */
  #define CFC_REG_CFC_PRTY_STS                                   0x10410c
 +/* [RC 4] Parity register #0 read clear */
 +#define CFC_REG_CFC_PRTY_STS_CLR                               0x104110
  /* [RW 21] CID cam access (21:1 - Data; alid - 0) */
  #define CFC_REG_CID_CAM                                        0x104800
  #define CFC_REG_CONTROL0                                       0x104028
  #define CSDM_REG_CSDM_PRTY_MASK                                0xc22bc
  /* [R 11] Parity register #0 read */
  #define CSDM_REG_CSDM_PRTY_STS                                         0xc22b0
 +/* [RC 11] Parity register #0 read clear */
 +#define CSDM_REG_CSDM_PRTY_STS_CLR                             0xc22b4
  #define CSDM_REG_ENABLE_IN1                                    0xc2238
  #define CSDM_REG_ENABLE_IN2                                    0xc223c
  #define CSDM_REG_ENABLE_OUT1                                   0xc2240
  /* [R 32] Parity register #0 read */
  #define CSEM_REG_CSEM_PRTY_STS_0                               0x200124
  #define CSEM_REG_CSEM_PRTY_STS_1                               0x200134
 +/* [RC 32] Parity register #0 read clear */
 +#define CSEM_REG_CSEM_PRTY_STS_CLR_0                           0x200128
 +#define CSEM_REG_CSEM_PRTY_STS_CLR_1                           0x200138
  #define CSEM_REG_ENABLE_IN                                     0x2000a4
  #define CSEM_REG_ENABLE_OUT                                    0x2000a8
  /* [RW 32] This address space contains all registers and memories that are
  #define DBG_REG_DBG_PRTY_MASK                                  0xc0a8
  /* [R 1] Parity register #0 read */
  #define DBG_REG_DBG_PRTY_STS                                   0xc09c
 +/* [RC 1] Parity register #0 read clear */
 +#define DBG_REG_DBG_PRTY_STS_CLR                               0xc0a0
  /* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
   * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
   * 4.Completion function=0; 5.Error handling=0 */
  #define DMAE_REG_DMAE_PRTY_MASK                                0x102064
  /* [R 4] Parity register #0 read */
  #define DMAE_REG_DMAE_PRTY_STS                                         0x102058
 +/* [RC 4] Parity register #0 read clear */
 +#define DMAE_REG_DMAE_PRTY_STS_CLR                             0x10205c
  /* [RW 1] Command 0 go. */
  #define DMAE_REG_GO_C0                                                 0x102080
  /* [RW 1] Command 1 go. */
  #define DORQ_REG_DORQ_PRTY_MASK                                0x170190
  /* [R 2] Parity register #0 read */
  #define DORQ_REG_DORQ_PRTY_STS                                         0x170184
 +/* [RC 2] Parity register #0 read clear */
 +#define DORQ_REG_DORQ_PRTY_STS_CLR                             0x170188
  /* [RW 8] The address to write the DPM CID to STORM. */
  #define DORQ_REG_DPM_CID_ADDR                                  0x170044
  /* [RW 5] The DPM mode CID extraction offset. */
  /* [R 1] data availble for error memory. If this bit is clear do not red
   * from error_handling_memory. */
  #define IGU_REG_ERROR_HANDLING_DATA_VALID                      0x130130
 +/* [RW 11] Parity mask register #0 read/write */
 +#define IGU_REG_IGU_PRTY_MASK                                  0x1300a8
  /* [R 11] Parity register #0 read */
  #define IGU_REG_IGU_PRTY_STS                                   0x13009c
 +/* [RC 11] Parity register #0 read clear */
 +#define IGU_REG_IGU_PRTY_STS_CLR                               0x1300a0
  /* [R 4] Debug: int_handle_fsm */
  #define IGU_REG_INT_HANDLE_FSM                                         0x130050
  #define IGU_REG_LEADING_EDGE_LATCH                             0x130134
  #define MISC_REG_MISC_PRTY_MASK                                0xa398
  /* [R 1] Parity register #0 read */
  #define MISC_REG_MISC_PRTY_STS                                         0xa38c
 +/* [RC 1] Parity register #0 read clear */
 +#define MISC_REG_MISC_PRTY_STS_CLR                             0xa390
  #define MISC_REG_NIG_WOL_P0                                    0xa270
  #define MISC_REG_NIG_WOL_P1                                    0xa274
  /* [R 1] If set indicate that the pcie_rst_b was asserted without perst
     (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
  #define MISC_REG_SW_TIMER_RELOAD_VAL_4                                 0xa2fc
  /* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
-    in this register. addres 0 - timer 1; address 1 - timer 2, ...  address 7 -
+    in this register. address 0 - timer 1; address 1 - timer 2, ...  address 7 -
     timer 8 */
  #define MISC_REG_SW_TIMER_VAL                                  0xa5c0
  /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
  #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN  (0x1<<4)
  #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST    (0x1<<2)
  #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN     (0x1<<3)
 +#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN                        (0x1<<0)
 +#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN                        (0x1<<0)
  #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT    (0x1<<0)
  #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS  (0x1<<9)
  #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G        (0x1<<15)
     ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
     port */
  #define NIG_REG_LLFC_ENABLE_0                                  0x16208
 +#define NIG_REG_LLFC_ENABLE_1                                  0x1620c
  /* [RW 16] classes are high-priority for port0 */
  #define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0                   0x16058
 +#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1                   0x1605c
  /* [RW 16] classes are low-priority for port0 */
  #define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0                    0x16060
 +#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1                    0x16064
  /* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
  #define NIG_REG_LLFC_OUT_EN_0                                  0x160c8
 +#define NIG_REG_LLFC_OUT_EN_1                                  0x160cc
  #define NIG_REG_LLH0_ACPI_PAT_0_CRC                            0x1015c
  #define NIG_REG_LLH0_ACPI_PAT_6_LEN                            0x10154
  #define NIG_REG_LLH0_BRB1_DRV_MASK                             0x10244
  /* [RW 8] event id for llh0 */
  #define NIG_REG_LLH0_EVENT_ID                                  0x10084
  #define NIG_REG_LLH0_FUNC_EN                                   0x160fc
 +#define NIG_REG_LLH0_FUNC_MEM                                  0x16180
 +#define NIG_REG_LLH0_FUNC_MEM_ENABLE                           0x16140
  #define NIG_REG_LLH0_FUNC_VLAN_ID                              0x16100
  /* [RW 1] Determine the IP version to look for in
     ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
  #define NIG_REG_LLH1_ERROR_MASK                                0x10090
  /* [RW 8] event id for llh1 */
  #define NIG_REG_LLH1_EVENT_ID                                  0x10088
 +#define NIG_REG_LLH1_FUNC_MEM                                  0x161c0
 +#define NIG_REG_LLH1_FUNC_MEM_ENABLE                           0x16160
 +#define NIG_REG_LLH1_FUNC_MEM_SIZE                             16
  /* [RW 8] init credit counter for port1 in LLH */
  #define NIG_REG_LLH1_XCM_INIT_CREDIT                           0x10564
  #define NIG_REG_LLH1_XCM_MASK                                  0x10134
     ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
     port */
  #define NIG_REG_PAUSE_ENABLE_0                                         0x160c0
 +#define NIG_REG_PAUSE_ENABLE_1                                         0x160c4
  /* [RW 1] Input enable for RX PBF LP IF */
  #define NIG_REG_PBF_LB_IN_EN                                   0x100b4
  /* [RW 1] Value of this register will be transmitted to port swap when
     ~nig_registers_strap_override.strap_override =1 */
  #define NIG_REG_PORT_SWAP                                      0x10394
 +/* [RW 1] PPP enable for port0. This register may get 1 only when
 + * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
 + * same port */
 +#define NIG_REG_PPP_ENABLE_0                                   0x160b0
 +#define NIG_REG_PPP_ENABLE_1                                   0x160b4
  /* [RW 1] output enable for RX parser descriptor IF */
  #define NIG_REG_PRS_EOP_OUT_EN                                         0x10104
  /* [RW 1] Input enable for RX parser request IF */
  #define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G    (0x1<<15)
  #define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS  (0xf<<18)
  #define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
 +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
 +#define PBF_REG_COS0_UPPER_BOUND                               0x15c05c
 +/* [RW 31] The weight of COS0 in the ETS command arbiter. */
 +#define PBF_REG_COS0_WEIGHT                                    0x15c054
 +/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
 +#define PBF_REG_COS1_UPPER_BOUND                               0x15c060
 +/* [RW 31] The weight of COS1 in the ETS command arbiter. */
 +#define PBF_REG_COS1_WEIGHT                                    0x15c058
  /* [RW 1] Disable processing further tasks from port 0 (after ending the
     current task in process). */
  #define PBF_REG_DISABLE_NEW_TASK_PROC_P0                       0x14005c
     current task in process). */
  #define PBF_REG_DISABLE_NEW_TASK_PROC_P4                       0x14006c
  #define PBF_REG_DISABLE_PF                                     0x1402e8
 +/* [RW 1] Indicates that ETS is performed between the COSes in the command
 + * arbiter. If reset strict priority w/ anti-starvation will be performed
 + * w/o WFQ. */
 +#define PBF_REG_ETS_ENABLED                                    0x15c050
  /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
   * Ethernet header. */
  #define PBF_REG_HDRS_AFTER_BASIC                               0x15c0a8
 +/* [RW 1] Indicates which COS is conncted to the highest priority in the
 + * command arbiter. */
 +#define PBF_REG_HIGH_PRIORITY_COS_NUM                          0x15c04c
  #define PBF_REG_IF_ENABLE_REG                                  0x140044
  /* [RW 1] Init bit. When set the initial credits are copied to the credit
     registers (except the port credits). Should be set and then reset after
  #define PBF_REG_MAC_LB_ENABLE                                  0x140040
  /* [RW 6] Bit-map indicating which headers must appear in the packet */
  #define PBF_REG_MUST_HAVE_HDRS                                         0x15c0c4
 +/* [RW 16] The number of strict priority arbitration slots between 2 RR
 + * arbitration slots. A value of 0 means no strict priority cycles; i.e. the
 + * strict-priority w/ anti-starvation arbiter is a RR arbiter. */
 +#define PBF_REG_NUM_STRICT_ARB_SLOTS                           0x15c064
  /* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
     not suppoterd. */
  #define PBF_REG_P0_ARB_THRSH                                   0x1400e4
  #define PBF_REG_PBF_INT_MASK                                   0x1401d4
  /* [R 5] Interrupt register #0 read */
  #define PBF_REG_PBF_INT_STS                                    0x1401c8
 +/* [RW 20] Parity mask register #0 read/write */
 +#define PBF_REG_PBF_PRTY_MASK                                  0x1401e4
 +/* [RC 20] Parity register #0 read clear */
 +#define PBF_REG_PBF_PRTY_STS_CLR                               0x1401dc
  #define PB_REG_CONTROL                                                 0
  /* [RW 2] Interrupt mask register #0 read/write */
  #define PB_REG_PB_INT_MASK                                     0x28
  #define PB_REG_PB_PRTY_MASK                                    0x38
  /* [R 4] Parity register #0 read */
  #define PB_REG_PB_PRTY_STS                                     0x2c
 +/* [RC 4] Parity register #0 read clear */
 +#define PB_REG_PB_PRTY_STS_CLR                                         0x30
  #define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR              (0x1<<0)
  #define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW      (0x1<<8)
  #define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR     (0x1<<1)
  #define PRS_REG_PRS_PRTY_MASK                                  0x401a4
  /* [R 8] Parity register #0 read */
  #define PRS_REG_PRS_PRTY_STS                                   0x40198
 +/* [RC 8] Parity register #0 read clear */
 +#define PRS_REG_PRS_PRTY_STS_CLR                               0x4019c
  /* [RW 8] Context region for pure acknowledge packets. Used in CFC load
     request message */
  #define PRS_REG_PURE_REGIONS                                   0x40024
  /* [R 32] Parity register #0 read */
  #define PXP2_REG_PXP2_PRTY_STS_0                               0x12057c
  #define PXP2_REG_PXP2_PRTY_STS_1                               0x12058c
 +/* [RC 32] Parity register #0 read clear */
 +#define PXP2_REG_PXP2_PRTY_STS_CLR_0                           0x120580
 +#define PXP2_REG_PXP2_PRTY_STS_CLR_1                           0x120590
  /* [R 1] Debug only: The 'almost full' indication from each fifo (gives
     indication about backpressure) */
  #define PXP2_REG_RD_ALMOST_FULL_0                              0x120424
  #define PXP_REG_PXP_PRTY_MASK                                  0x103094
  /* [R 26] Parity register #0 read */
  #define PXP_REG_PXP_PRTY_STS                                   0x103088
 +/* [RC 27] Parity register #0 read clear */
 +#define PXP_REG_PXP_PRTY_STS_CLR                               0x10308c
  /* [RW 4] The activity counter initial increment value sent in the load
     request */
  #define QM_REG_ACTCTRINITVAL_0                                         0x168040
  #define QM_REG_QM_PRTY_MASK                                    0x168454
  /* [R 12] Parity register #0 read */
  #define QM_REG_QM_PRTY_STS                                     0x168448
 +/* [RC 12] Parity register #0 read clear */
 +#define QM_REG_QM_PRTY_STS_CLR                                         0x16844c
  /* [R 32] Current queues in pipeline: Queues from 32 to 63 */
  #define QM_REG_QSTATUS_HIGH                                    0x16802c
  /* [R 32] Current queues in pipeline: Queues from 96 to 127 */
  #define QM_REG_WRRWEIGHTS_9                                    0x168848
  /* [R 6] Keep the fill level of the fifo from write client 1 */
  #define QM_REG_XQM_WRC_FIFOLVL                                         0x168000
 +/* [W 1] reset to parity interrupt */
 +#define SEM_FAST_REG_PARITY_RST                                        0x18840
  #define SRC_REG_COUNTFREE0                                     0x40500
  /* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
     ports. If set the searcher support 8 functions. */
  #define SRC_REG_SRC_PRTY_MASK                                  0x404c8
  /* [R 3] Parity register #0 read */
  #define SRC_REG_SRC_PRTY_STS                                   0x404bc
 +/* [RC 3] Parity register #0 read clear */
 +#define SRC_REG_SRC_PRTY_STS_CLR                               0x404c0
  /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
  #define TCM_REG_CAM_OCCUP                                      0x5017c
  /* [RW 1] CDU AG read Interface enable. If 0 - the request input is
  #define TCM_REG_TCM_INT_MASK                                   0x501dc
  /* [R 11] Interrupt register #0 read */
  #define TCM_REG_TCM_INT_STS                                    0x501d0
 +/* [RW 27] Parity mask register #0 read/write */
 +#define TCM_REG_TCM_PRTY_MASK                                  0x501ec
  /* [R 27] Parity register #0 read */
  #define TCM_REG_TCM_PRTY_STS                                   0x501e0
 +/* [RC 27] Parity register #0 read clear */
 +#define TCM_REG_TCM_PRTY_STS_CLR                               0x501e4
  /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
     REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
     Is used to determine the number of the AG context REG-pairs written back;
  #define TM_REG_TM_INT_MASK                                     0x1640fc
  /* [R 1] Interrupt register #0 read */
  #define TM_REG_TM_INT_STS                                      0x1640f0
 +/* [RW 7] Parity mask register #0 read/write */
 +#define TM_REG_TM_PRTY_MASK                                    0x16410c
 +/* [RC 7] Parity register #0 read clear */
 +#define TM_REG_TM_PRTY_STS_CLR                                         0x164104
  /* [RW 8] The event id for aggregated interrupt 0 */
  #define TSDM_REG_AGG_INT_EVENT_0                               0x42038
  #define TSDM_REG_AGG_INT_EVENT_1                               0x4203c
  #define TSDM_REG_TSDM_PRTY_MASK                                0x422bc
  /* [R 11] Parity register #0 read */
  #define TSDM_REG_TSDM_PRTY_STS                                         0x422b0
 +/* [RC 11] Parity register #0 read clear */
 +#define TSDM_REG_TSDM_PRTY_STS_CLR                             0x422b4
  /* [RW 5] The number of time_slots in the arbitration cycle */
  #define TSEM_REG_ARB_CYCLE_SIZE                                0x180034
  /* [RW 3] The source that is associated with arbitration element 0. Source
  #define TSEM_REG_SLOW_EXT_STORE_EMPTY                          0x1802a0
  /* [RW 8] List of free threads . There is a bit per thread. */
  #define TSEM_REG_THREADS_LIST                                  0x1802e4
 +/* [RC 32] Parity register #0 read clear */
 +#define TSEM_REG_TSEM_PRTY_STS_CLR_0                           0x180118
 +#define TSEM_REG_TSEM_PRTY_STS_CLR_1                           0x180128
  /* [RW 3] The arbitration scheme of time_slot 0 */
  #define TSEM_REG_TS_0_AS                                       0x180038
  /* [RW 3] The arbitration scheme of time_slot 10 */
  #define UCM_REG_UCM_INT_STS                                    0xe01c8
  /* [R 27] Parity register #0 read */
  #define UCM_REG_UCM_PRTY_STS                                   0xe01d8
 +/* [RC 27] Parity register #0 read clear */
 +#define UCM_REG_UCM_PRTY_STS_CLR                               0xe01dc
  /* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
     REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
     Is used to determine the number of the AG context REG-pairs written back;
  #define USDM_REG_USDM_PRTY_MASK                                0xc42c0
  /* [R 11] Parity register #0 read */
  #define USDM_REG_USDM_PRTY_STS                                         0xc42b4
 +/* [RC 11] Parity register #0 read clear */
 +#define USDM_REG_USDM_PRTY_STS_CLR                             0xc42b8
  /* [RW 5] The number of time_slots in the arbitration cycle */
  #define USEM_REG_ARB_CYCLE_SIZE                                0x300034
  /* [RW 3] The source that is associated with arbitration element 0. Source
  /* [R 32] Parity register #0 read */
  #define USEM_REG_USEM_PRTY_STS_0                               0x300124
  #define USEM_REG_USEM_PRTY_STS_1                               0x300134
 +/* [RC 32] Parity register #0 read clear */
 +#define USEM_REG_USEM_PRTY_STS_CLR_0                           0x300128
 +#define USEM_REG_USEM_PRTY_STS_CLR_1                           0x300138
  /* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
   * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
  #define USEM_REG_VFPF_ERR_NUM                                  0x300380
  #define XSDM_REG_XSDM_PRTY_MASK                                0x1662bc
  /* [R 11] Parity register #0 read */
  #define XSDM_REG_XSDM_PRTY_STS                                         0x1662b0
 +/* [RC 11] Parity register #0 read clear */
 +#define XSDM_REG_XSDM_PRTY_STS_CLR                             0x1662b4
  /* [RW 5] The number of time_slots in the arbitration cycle */
  #define XSEM_REG_ARB_CYCLE_SIZE                                0x280034
  /* [RW 3] The source that is associated with arbitration element 0. Source
  /* [R 32] Parity register #0 read */
  #define XSEM_REG_XSEM_PRTY_STS_0                               0x280124
  #define XSEM_REG_XSEM_PRTY_STS_1                               0x280134
 +/* [RC 32] Parity register #0 read clear */
 +#define XSEM_REG_XSEM_PRTY_STS_CLR_0                           0x280128
 +#define XSEM_REG_XSEM_PRTY_STS_CLR_1                           0x280138
  #define MCPR_NVM_ACCESS_ENABLE_EN                              (1L<<0)
  #define MCPR_NVM_ACCESS_ENABLE_WR_EN                           (1L<<1)
  #define MCPR_NVM_ADDR_NVM_ADDR_VALUE                           (0xffffffL<<0)
  #define EMAC_REG_EMAC_TX_MODE                                  0xbc
  #define EMAC_REG_EMAC_TX_STAT_AC                               0x280
  #define EMAC_REG_EMAC_TX_STAT_AC_COUNT                                 22
 +#define EMAC_REG_RX_PFC_MODE                                   0x320
 +#define EMAC_REG_RX_PFC_MODE_PRIORITIES                                (1L<<2)
 +#define EMAC_REG_RX_PFC_MODE_RX_EN                             (1L<<1)
 +#define EMAC_REG_RX_PFC_MODE_TX_EN                             (1L<<0)
 +#define EMAC_REG_RX_PFC_PARAM                                  0x324
 +#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT                  0
 +#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT             16
 +#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD                                0x328
 +#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT                  (0xffff<<0)
 +#define EMAC_REG_RX_PFC_STATS_XOFF_SENT                                0x330
 +#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT                  (0xffff<<0)
 +#define EMAC_REG_RX_PFC_STATS_XON_RCVD                                 0x32c
 +#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT                   (0xffff<<0)
 +#define EMAC_REG_RX_PFC_STATS_XON_SENT                                 0x334
 +#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT                   (0xffff<<0)
  #define EMAC_RX_MODE_FLOW_EN                                   (1L<<2)
 +#define EMAC_RX_MODE_KEEP_MAC_CONTROL                          (1L<<3)
  #define EMAC_RX_MODE_KEEP_VLAN_TAG                             (1L<<10)
  #define EMAC_RX_MODE_PROMISCUOUS                               (1L<<8)
  #define EMAC_RX_MODE_RESET                                     (1L<<0)
@@@ -6389,4 -6264,3 +6389,4 @@@ static inline u8 calc_crc8(u32 data, u
  }
  
  
 +#endif /* BNX2X_REG_H */
@@@ -840,7 -840,7 +840,7 @@@ static int ad_lacpdu_send(struct port *
        lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
  
        memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
-       /* Note: source addres is set to be the member's PERMANENT address,
+       /* Note: source address is set to be the member's PERMANENT address,
           because we use it to identify loopback lacpdus in receive. */
        memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
        lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
@@@ -881,7 -881,7 +881,7 @@@ static int ad_marker_send(struct port *
        marker_header = (struct bond_marker_header *)skb_put(skb, length);
  
        memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
-       /* Note: source addres is set to be the member's PERMANENT address,
+       /* Note: source address is set to be the member's PERMANENT address,
           because we use it to identify loopback MARKERs in receive. */
        memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
        marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
@@@ -1916,7 -1916,7 +1916,7 @@@ int bond_3ad_bind_slave(struct slave *s
                return -1;
        }
  
-       //check that the slave has not been intialized yet.
+       //check that the slave has not been initialized yet.
        if (SLAVE_AD_INFO(slave).port.slave != slave) {
  
                // port initialization
@@@ -2474,7 -2474,8 +2474,7 @@@ int bond_3ad_lacpdu_recv(struct sk_buf
                goto out;
  
        read_lock(&bond->lock);
 -      slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
 -                                      orig_dev);
 +      slave = bond_get_slave_by_dev(netdev_priv(dev), orig_dev);
        if (!slave)
                goto out_unlock;
  
@@@ -607,7 -607,7 +607,7 @@@ struct t3_vpd 
   *
   *    Read a 32-bit word from a location in VPD EEPROM using the card's PCI
   *    VPD ROM capability.  A zero is written to the flag bit when the
-  *    addres is written to the control register.  The hardware device will
+  *    address is written to the control register.  The hardware device will
   *    set the flag to 1 when 4 bytes have been read into the data register.
   */
  int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
@@@ -1562,7 -1562,7 +1562,7 @@@ static void tp_intr_handler(struct adap
                {0}
        };
  
 -      static struct intr_info tp_intr_info_t3c[] = {
 +      static const struct intr_info tp_intr_info_t3c[] = {
                {0x1fffffff, "TP parity error", -1, 1},
                {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
                {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
@@@ -41,7 -41,7 +41,7 @@@ struct e1000_hw
  struct e1000_hw_stats;
  
  /* Enumerated types specific to the e1000 hardware */
- /* Media Access Controlers */
+ /* Media Access Controllers */
  typedef enum {
        e1000_undefined = 0,
        e1000_82542_rev2_0,
@@@ -52,7 -52,6 +52,7 @@@
        e1000_82545,
        e1000_82545_rev_3,
        e1000_82546,
 +      e1000_ce4100,
        e1000_82546_rev_3,
        e1000_82541,
        e1000_82541_rev_2,
@@@ -210,11 -209,9 +210,11 @@@ typedef enum 
  } e1000_1000t_rx_status;
  
  typedef enum {
 -    e1000_phy_m88 = 0,
 -    e1000_phy_igp,
 -    e1000_phy_undefined = 0xFF
 +      e1000_phy_m88 = 0,
 +      e1000_phy_igp,
 +      e1000_phy_8211,
 +      e1000_phy_8201,
 +      e1000_phy_undefined = 0xFF
  } e1000_phy_type;
  
  typedef enum {
@@@ -445,7 -442,6 +445,7 @@@ void e1000_io_write(struct e1000_hw *hw
  #define E1000_DEV_ID_82547EI             0x1019
  #define E1000_DEV_ID_82547EI_MOBILE      0x101A
  #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
 +#define E1000_DEV_ID_INTEL_CE4100_GBE    0x2E6E
  
  #define NODE_ADDRESS_SIZE 6
  #define ETH_LENGTH_OF_ADDRESS 6
@@@ -812,16 -808,6 +812,16 @@@ struct e1000_ffvt_entry 
  #define E1000_CTRL_EXT 0x00018        /* Extended Device Control - RW */
  #define E1000_FLA      0x0001C        /* Flash Access - RW */
  #define E1000_MDIC     0x00020        /* MDI Control - RW */
 +
 +extern void __iomem *ce4100_gbe_mdio_base_virt;
 +#define INTEL_CE_GBE_MDIO_RCOMP_BASE    (ce4100_gbe_mdio_base_virt)
 +#define E1000_MDIO_STS  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
 +#define E1000_MDIO_CMD  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
 +#define E1000_MDIO_DRV  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
 +#define E1000_MDC_CMD   (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC)
 +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20)
 +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24)
 +
  #define E1000_SCTL     0x00024        /* SerDes Control - RW */
  #define E1000_FEXTNVM  0x00028        /* Future Extended NVM register */
  #define E1000_FCAL     0x00028        /* Flow Control Address Low - RW */
  #define E1000_IMS      0x000D0        /* Interrupt Mask Set - RW */
  #define E1000_IMC      0x000D8        /* Interrupt Mask Clear - WO */
  #define E1000_IAM      0x000E0        /* Interrupt Acknowledge Auto Mask */
 +
 +/* Auxiliary Control Register. This register is CE4100 specific,
 + * RMII/RGMII function is switched by this register - RW
 + * Following are bits definitions of the Auxiliary Control Register
 + */
 +#define E1000_CTL_AUX  0x000E0
 +#define E1000_CTL_AUX_END_SEL_SHIFT     10
 +#define E1000_CTL_AUX_ENDIANESS_SHIFT   8
 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT  0
 +
 +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */
 +#define E1000_CTL_AUX_DES_PKT   (0x0 << E1000_CTL_AUX_END_SEL_SHIFT)
 +/* descriptor use CTL_AUX.ENDIANESS, packet use default */
 +#define E1000_CTL_AUX_DES       (0x1 << E1000_CTL_AUX_END_SEL_SHIFT)
 +/* descriptor use default, packet use CTL_AUX.ENDIANESS */
 +#define E1000_CTL_AUX_PKT       (0x2 << E1000_CTL_AUX_END_SEL_SHIFT)
 +/* all use CTL_AUX.ENDIANESS */
 +#define E1000_CTL_AUX_ALL       (0x3 << E1000_CTL_AUX_END_SEL_SHIFT)
 +
 +#define E1000_CTL_AUX_RGMII     (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
 +#define E1000_CTL_AUX_RMII      (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
 +
 +/* LW little endian, Byte big endian */
 +#define E1000_CTL_AUX_LWLE_BBE  (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT)
 +#define E1000_CTL_AUX_LWLE_BLE  (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT)
 +#define E1000_CTL_AUX_LWBE_BBE  (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT)
 +#define E1000_CTL_AUX_LWBE_BLE  (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT)
 +
  #define E1000_RCTL     0x00100        /* RX Control - RW */
  #define E1000_RDTR1    0x02820        /* RX Delay Timer (1) - RW */
  #define E1000_RDBAL1   0x02900        /* RX Descriptor Base Address Low (1) - RW */
   * in more current versions of the 8254x. Despite the difference in location,
   * the registers function in the same manner.
   */
 +#define E1000_82542_CTL_AUX  E1000_CTL_AUX
  #define E1000_82542_CTRL     E1000_CTRL
  #define E1000_82542_CTRL_DUP E1000_CTRL_DUP
  #define E1000_82542_STATUS   E1000_STATUS
@@@ -1614,11 -1571,6 +1614,11 @@@ struct e1000_hw 
  #define E1000_MDIC_INT_EN    0x20000000
  #define E1000_MDIC_ERROR     0x40000000
  
 +#define INTEL_CE_GBE_MDIC_OP_WRITE      0x04000000
 +#define INTEL_CE_GBE_MDIC_OP_READ       0x00000000
 +#define INTEL_CE_GBE_MDIC_GO            0x80000000
 +#define INTEL_CE_GBE_MDIC_READ_ERROR    0x80000000
 +
  #define E1000_KUMCTRLSTA_MASK           0x0000FFFF
  #define E1000_KUMCTRLSTA_OFFSET         0x001F0000
  #define E1000_KUMCTRLSTA_OFFSET_SHIFT   16
@@@ -2919,11 -2871,6 +2919,11 @@@ struct e1000_host_command_info 
  #define M88E1111_I_PHY_ID  0x01410CC0
  #define L1LXT971A_PHY_ID   0x001378E0
  
 +#define RTL8211B_PHY_ID    0x001CC910
 +#define RTL8201N_PHY_ID    0x8200
 +#define RTL_PHY_CTRL_FD    0x0100 /* Full duplex.0=half; 1=full */
 +#define RTL_PHY_CTRL_SPD_100    0x200000 /* Force 100Mb */
 +
  /* Bits...
   * 15-5: page
   * 4-0: register offset
  
  #include "e1000.h"
  #include <net/ip6_checksum.h>
 +#include <linux/io.h>
 +
 +/* Intel Media SOC GbE MDIO physical base address */
 +static unsigned long ce4100_gbe_mdio_base_phy;
 +/* Intel Media SOC GbE MDIO virtual base address */
 +void __iomem *ce4100_gbe_mdio_base_virt;
  
  char e1000_driver_name[] = "e1000";
  static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@@ -85,7 -79,6 +85,7 @@@ static DEFINE_PCI_DEVICE_TABLE(e1000_pc
        INTEL_E1000_ETHERNET_DEVICE(0x108A),
        INTEL_E1000_ETHERNET_DEVICE(0x1099),
        INTEL_E1000_ETHERNET_DEVICE(0x10B5),
 +      INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
        /* required last entry */
        {0,}
  };
@@@ -466,7 -459,6 +466,7 @@@ static void e1000_power_down_phy(struc
                case e1000_82545:
                case e1000_82545_rev_3:
                case e1000_82546:
 +              case e1000_ce4100:
                case e1000_82546_rev_3:
                case e1000_82541:
                case e1000_82541_rev_2:
@@@ -581,7 -573,6 +581,7 @@@ void e1000_reset(struct e1000_adapter *
        case e1000_82545:
        case e1000_82545_rev_3:
        case e1000_82546:
 +      case e1000_ce4100:
        case e1000_82546_rev_3:
                pba = E1000_PBA_48K;
                break;
@@@ -903,7 -894,6 +903,7 @@@ static int __devinit e1000_probe(struc
        static int global_quad_port_a = 0; /* global ksp3 port a indication */
        int i, err, pci_using_dac;
        u16 eeprom_data = 0;
 +      u16 tmp = 0;
        u16 eeprom_apme_mask = E1000_EEPROM_APME;
        int bars, need_ioport;
  
                 */
                dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
                pci_using_dac = 1;
 -      } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 -              dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        } else {
 -              pr_err("No usable DMA config, aborting\n");
 -              goto err_dma;
 +              err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 +              if (err) {
 +                      pr_err("No usable DMA config, aborting\n");
 +                      goto err_dma;
 +              }
 +              dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        }
  
        netdev->netdev_ops = &e1000_netdev_ops;
                goto err_sw_init;
  
        err = -EIO;
 +      if (hw->mac_type == e1000_ce4100) {
 +              ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
 +              ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
 +                                              pci_resource_len(pdev, BAR_1));
 +
 +              if (!ce4100_gbe_mdio_base_virt)
 +                      goto err_mdio_ioremap;
 +      }
  
        if (hw->mac_type >= e1000_82543) {
                netdev->features = NETIF_F_SG |
        adapter->wol = adapter->eeprom_wol;
        device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
  
 +      /* Auto detect PHY address */
 +      if (hw->mac_type == e1000_ce4100) {
 +              for (i = 0; i < 32; i++) {
 +                      hw->phy_addr = i;
 +                      e1000_read_phy_reg(hw, PHY_ID2, &tmp);
 +                      if (tmp == 0 || tmp == 0xFF) {
 +                              if (i == 31)
 +                                      goto err_eeprom;
 +                              continue;
 +                      } else
 +                              break;
 +              }
 +      }
 +
        /* reset the hardware with the new settings */
        e1000_reset(adapter);
  
@@@ -1203,8 -1169,6 +1203,8 @@@ err_eeprom
        kfree(adapter->rx_ring);
  err_dma:
  err_sw_init:
 +err_mdio_ioremap:
 +      iounmap(ce4100_gbe_mdio_base_virt);
        iounmap(hw->hw_addr);
  err_ioremap:
        free_netdev(netdev);
@@@ -1443,7 -1407,6 +1443,7 @@@ static bool e1000_check_64k_bound(struc
        /* First rev 82545 and 82546 need to not allow any memory
         * write location to cross 64k boundary due to errata 23 */
        if (hw->mac_type == e1000_82545 ||
 +          hw->mac_type == e1000_ce4100 ||
            hw->mac_type == e1000_82546) {
                return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
        }
@@@ -1466,12 -1429,13 +1466,12 @@@ static int e1000_setup_tx_resources(str
        int size;
  
        size = sizeof(struct e1000_buffer) * txdr->count;
 -      txdr->buffer_info = vmalloc(size);
 +      txdr->buffer_info = vzalloc(size);
        if (!txdr->buffer_info) {
                e_err(probe, "Unable to allocate memory for the Tx descriptor "
                      "ring\n");
                return -ENOMEM;
        }
 -      memset(txdr->buffer_info, 0, size);
  
        /* round up to nearest 4K */
  
@@@ -1661,12 -1625,13 +1661,12 @@@ static int e1000_setup_rx_resources(str
        int size, desc_len;
  
        size = sizeof(struct e1000_buffer) * rxdr->count;
 -      rxdr->buffer_info = vmalloc(size);
 +      rxdr->buffer_info = vzalloc(size);
        if (!rxdr->buffer_info) {
                e_err(probe, "Unable to allocate memory for the Rx descriptor "
                      "ring\n");
                return -ENOMEM;
        }
 -      memset(rxdr->buffer_info, 0, size);
  
        desc_len = sizeof(struct e1000_rx_desc);
  
@@@ -2233,7 -2198,7 +2233,7 @@@ static void e1000_set_rx_mode(struct ne
         * addresses take precedence to avoid disabling unicast filtering
         * when possible.
         *
-        * RAR 0 is used for the station MAC adddress
+        * RAR 0 is used for the station MAC address
         * if there are not 14 addresses, go ahead and clear the filters
         */
        i = 1;
@@@ -2761,7 -2726,7 +2761,7 @@@ static bool e1000_tx_csum(struct e1000_
                break;
        }
  
 -      css = skb_transport_offset(skb);
 +      css = skb_checksum_start_offset(skb);
  
        i = tx_ring->next_to_use;
        buffer_info = &tx_ring->buffer_info[i];
@@@ -52,7 -52,6 +52,7 @@@
                              (ID_LED_DEF1_DEF2))
  
  #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
 +#define AN_RETRY_COUNT          5 /* Autoneg Retry Count value */
  #define E1000_BASE1000T_STATUS          10
  #define E1000_IDLE_ERROR_COUNT_MASK     0xFF
  #define E1000_RECEIVE_ERROR_COUNTER     21
@@@ -75,11 -74,6 +75,11 @@@ static bool e1000_check_mng_mode_82574(
  static s32 e1000_led_on_82574(struct e1000_hw *hw);
  static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
  static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
 +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
 +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
 +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
 +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
 +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
  
  /**
   *  e1000_init_phy_params_82571 - Init PHY func ptrs.
@@@ -113,10 -107,6 +113,10 @@@ static s32 e1000_init_phy_params_82571(
        case e1000_82574:
        case e1000_82583:
                phy->type                = e1000_phy_bm;
 +              phy->ops.acquire = e1000_get_hw_semaphore_82574;
 +              phy->ops.release = e1000_put_hw_semaphore_82574;
 +              phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
 +              phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
                break;
        default:
                return -E1000_ERR_PHY;
  
        /* This can only be done after all function pointers are setup. */
        ret_val = e1000_get_phy_id_82571(hw);
 +      if (ret_val) {
 +              e_dbg("Error getting PHY ID\n");
 +              return ret_val;
 +      }
  
        /* Verify phy id */
        switch (hw->mac.type) {
        case e1000_82571:
        case e1000_82572:
                if (phy->id != IGP01E1000_I_PHY_ID)
 -                      return -E1000_ERR_PHY;
 +                      ret_val = -E1000_ERR_PHY;
                break;
        case e1000_82573:
                if (phy->id != M88E1111_I_PHY_ID)
 -                      return -E1000_ERR_PHY;
 +                      ret_val = -E1000_ERR_PHY;
                break;
        case e1000_82574:
        case e1000_82583:
                if (phy->id != BME1000_E_PHY_ID_R2)
 -                      return -E1000_ERR_PHY;
 +                      ret_val = -E1000_ERR_PHY;
                break;
        default:
 -              return -E1000_ERR_PHY;
 +              ret_val = -E1000_ERR_PHY;
                break;
        }
  
 -      return 0;
 +      if (ret_val)
 +              e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
 +
 +      return ret_val;
  }
  
  /**
@@@ -217,17 -200,6 +217,17 @@@ static s32 e1000_init_nvm_params_82571(
                break;
        }
  
 +      /* Function Pointers */
 +      switch (hw->mac.type) {
 +      case e1000_82574:
 +      case e1000_82583:
 +              nvm->ops.acquire = e1000_get_hw_semaphore_82574;
 +              nvm->ops.release = e1000_put_hw_semaphore_82574;
 +              break;
 +      default:
 +              break;
 +      }
 +
        return 0;
  }
  
@@@ -328,7 -300,7 +328,7 @@@ static s32 e1000_init_mac_params_82571(
  
        /*
         * Ensure that the inter-port SWSM.SMBI lock bit is clear before
-        * first NVM or PHY acess. This should be done for single-port
+        * first NVM or PHY access. This should be done for single-port
         * devices, and for one port only on dual-port devices so that
         * for those devices we can still use the SMBI lock to synchronize
         * inter-port accesses to the PHY & NVM.
@@@ -570,146 -542,6 +570,146 @@@ static void e1000_put_hw_semaphore_8257
        swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
        ew32(SWSM, swsm);
  }
 +/**
 + *  e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
 + *  @hw: pointer to the HW structure
 + *
 + *  Acquire the HW semaphore during reset.
 + *
 + **/
 +static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
 +{
 +      u32 extcnf_ctrl;
 +      s32 ret_val = 0;
 +      s32 i = 0;
 +
 +      extcnf_ctrl = er32(EXTCNF_CTRL);
 +      extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
 +      do {
 +              ew32(EXTCNF_CTRL, extcnf_ctrl);
 +              extcnf_ctrl = er32(EXTCNF_CTRL);
 +
 +              if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
 +                      break;
 +
 +              extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
 +
 +              msleep(2);
 +              i++;
 +      } while (i < MDIO_OWNERSHIP_TIMEOUT);
 +
 +      if (i == MDIO_OWNERSHIP_TIMEOUT) {
 +              /* Release semaphores */
 +              e1000_put_hw_semaphore_82573(hw);
 +              e_dbg("Driver can't access the PHY\n");
 +              ret_val = -E1000_ERR_PHY;
 +              goto out;
 +      }
 +
 +out:
 +      return ret_val;
 +}
 +
 +/**
 + *  e1000_put_hw_semaphore_82573 - Release hardware semaphore
 + *  @hw: pointer to the HW structure
 + *
 + *  Release hardware semaphore used during reset.
 + *
 + **/
 +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
 +{
 +      u32 extcnf_ctrl;
 +
 +      extcnf_ctrl = er32(EXTCNF_CTRL);
 +      extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
 +      ew32(EXTCNF_CTRL, extcnf_ctrl);
 +}
 +
 +static DEFINE_MUTEX(swflag_mutex);
 +
 +/**
 + *  e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
 + *  @hw: pointer to the HW structure
 + *
 + *  Acquire the HW semaphore to access the PHY or NVM.
 + *
 + **/
 +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
 +{
 +      s32 ret_val;
 +
 +      mutex_lock(&swflag_mutex);
 +      ret_val = e1000_get_hw_semaphore_82573(hw);
 +      if (ret_val)
 +              mutex_unlock(&swflag_mutex);
 +      return ret_val;
 +}
 +
 +/**
 + *  e1000_put_hw_semaphore_82574 - Release hardware semaphore
 + *  @hw: pointer to the HW structure
 + *
 + *  Release hardware semaphore used to access the PHY or NVM
 + *
 + **/
 +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
 +{
 +      e1000_put_hw_semaphore_82573(hw);
 +      mutex_unlock(&swflag_mutex);
 +}
 +
 +/**
 + *  e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
 + *  @hw: pointer to the HW structure
 + *  @active: true to enable LPLU, false to disable
 + *
 + *  Sets the LPLU D0 state according to the active flag.
 + *  LPLU will not be activated unless the
 + *  device autonegotiation advertisement meets standards of
 + *  either 10 or 10/100 or 10/100/1000 at all duplexes.
 + *  This is a function pointer entry point only called by
 + *  PHY setup routines.
 + **/
 +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
 +{
 +      u16 data = er32(POEMB);
 +
 +      if (active)
 +              data |= E1000_PHY_CTRL_D0A_LPLU;
 +      else
 +              data &= ~E1000_PHY_CTRL_D0A_LPLU;
 +
 +      ew32(POEMB, data);
 +      return 0;
 +}
 +
 +/**
 + *  e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
 + *  @hw: pointer to the HW structure
 + *  @active: boolean used to enable/disable lplu
 + *
 + *  The low power link up (lplu) state is set to the power management level D3
 + *  when active is true, else clear lplu for D3. LPLU
 + *  is used during Dx states where the power conservation is most important.
 + *  During driver activity, SmartSpeed should be enabled so performance is
 + *  maintained.
 + **/
 +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
 +{
 +      u16 data = er32(POEMB);
 +
 +      if (!active) {
 +              data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
 +      } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
 +                 (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
 +                 (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
 +              data |= E1000_PHY_CTRL_NOND0A_LPLU;
 +      }
 +
 +      ew32(POEMB, data);
 +      return 0;
 +}
  
  /**
   *  e1000_acquire_nvm_82571 - Request for access to the EEPROM
@@@ -730,6 -562,8 +730,6 @@@ static s32 e1000_acquire_nvm_82571(stru
  
        switch (hw->mac.type) {
        case e1000_82573:
 -      case e1000_82574:
 -      case e1000_82583:
                break;
        default:
                ret_val = e1000e_acquire_nvm(hw);
@@@ -1019,8 -853,9 +1019,8 @@@ static s32 e1000_set_d0_lplu_state_8257
   **/
  static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
  {
 -      u32 ctrl, extcnf_ctrl, ctrl_ext, icr;
 +      u32 ctrl, ctrl_ext;
        s32 ret_val;
 -      u16 i = 0;
  
        /*
         * Prevent the PCI-E bus from sticking if there is no TLP connection
         */
        switch (hw->mac.type) {
        case e1000_82573:
 +              ret_val = e1000_get_hw_semaphore_82573(hw);
 +              break;
        case e1000_82574:
        case e1000_82583:
 -              extcnf_ctrl = er32(EXTCNF_CTRL);
 -              extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
 -
 -              do {
 -                      ew32(EXTCNF_CTRL, extcnf_ctrl);
 -                      extcnf_ctrl = er32(EXTCNF_CTRL);
 -
 -                      if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
 -                              break;
 -
 -                      extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
 -
 -                      msleep(2);
 -                      i++;
 -              } while (i < MDIO_OWNERSHIP_TIMEOUT);
 +              ret_val = e1000_get_hw_semaphore_82574(hw);
                break;
        default:
                break;
        }
 +      if (ret_val)
 +              e_dbg("Cannot acquire MDIO ownership\n");
  
        ctrl = er32(CTRL);
  
        e_dbg("Issuing a global reset to MAC\n");
        ew32(CTRL, ctrl | E1000_CTRL_RST);
  
 +      /* Must release MDIO ownership and mutex after MAC reset. */
 +      switch (hw->mac.type) {
 +      case e1000_82574:
 +      case e1000_82583:
 +              e1000_put_hw_semaphore_82574(hw);
 +              break;
 +      default:
 +              break;
 +      }
 +
        if (hw->nvm.type == e1000_nvm_flash_hw) {
                udelay(10);
                ctrl_ext = er32(CTRL_EXT);
  
        /* Clear any pending interrupt events. */
        ew32(IMC, 0xffffffff);
 -      icr = er32(ICR);
 +      er32(ICR);
  
        if (hw->mac.type == e1000_82571) {
                /* Install any alternate MAC address into RAR0 */
@@@ -1567,8 -1402,6 +1567,8 @@@ static s32 e1000_check_for_serdes_link_
        u32 rxcw;
        u32 ctrl;
        u32 status;
 +      u32 txcw;
 +      u32 i;
        s32 ret_val = 0;
  
        ctrl = er32(CTRL);
                                    e1000_serdes_link_autoneg_progress;
                                mac->serdes_has_link = false;
                                e_dbg("AN_UP     -> AN_PROG\n");
 +                      } else {
 +                              mac->serdes_has_link = true;
                        }
 -              break;
 +                      break;
  
                case e1000_serdes_link_forced_up:
                        /*
                         * auto-negotiation in the TXCW register and disable
                         * forced link in the Device Control register in an
                         * attempt to auto-negotiate with our link partner.
 +                       * If the partner code word is null, stop forcing
 +                       * and restart auto negotiation.
                         */
 -                      if (rxcw & E1000_RXCW_C) {
 +                      if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW))  {
                                /* Enable autoneg, and unforce link up */
                                ew32(TXCW, mac->txcw);
                                ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
                                    e1000_serdes_link_autoneg_progress;
                                mac->serdes_has_link = false;
                                e_dbg("FORCED_UP -> AN_PROG\n");
 +                      } else {
 +                              mac->serdes_has_link = true;
                        }
                        break;
  
                        ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
                        mac->serdes_link_state =
                            e1000_serdes_link_autoneg_progress;
 +                      mac->serdes_has_link = false;
                        e_dbg("DOWN      -> AN_PROG\n");
                        break;
                }
                        e_dbg("ANYSTATE  -> DOWN\n");
                } else {
                        /*
 -                       * We have sync, and can tolerate one invalid (IV)
 -                       * codeword before declaring link down, so reread
 -                       * to look again.
 +                       * Check several times, if Sync and Config
 +                       * both are consistently 1 then simply ignore
 +                       * the Invalid bit and restart Autoneg
                         */
 -                      udelay(10);
 -                      rxcw = er32(RXCW);
 -                      if (rxcw & E1000_RXCW_IV) {
 -                              mac->serdes_link_state = e1000_serdes_link_down;
 +                      for (i = 0; i < AN_RETRY_COUNT; i++) {
 +                              udelay(10);
 +                              rxcw = er32(RXCW);
 +                              if ((rxcw & E1000_RXCW_IV) &&
 +                                  !((rxcw & E1000_RXCW_SYNCH) &&
 +                                    (rxcw & E1000_RXCW_C))) {
 +                                      mac->serdes_has_link = false;
 +                                      mac->serdes_link_state =
 +                                          e1000_serdes_link_down;
 +                                      e_dbg("ANYSTATE  -> DOWN\n");
 +                                      break;
 +                              }
 +                      }
 +
 +                      if (i == AN_RETRY_COUNT) {
 +                              txcw = er32(TXCW);
 +                              txcw |= E1000_TXCW_ANE;
 +                              ew32(TXCW, txcw);
 +                              mac->serdes_link_state =
 +                                  e1000_serdes_link_autoneg_progress;
                                mac->serdes_has_link = false;
 -                              e_dbg("ANYSTATE  -> DOWN\n");
 +                              e_dbg("ANYSTATE  -> AN_PROG\n");
                        }
                }
        }
@@@ -2087,7 -1897,7 +2087,7 @@@ struct e1000_info e1000_82574_info = 
                                  | FLAG_HAS_AMT
                                  | FLAG_HAS_CTRLEXT_ON_LOAD,
        .flags2                   = FLAG2_CHECK_PHY_HANG,
 -      .pba                    = 36,
 +      .pba                    = 32,
        .max_hw_frame_size      = DEFAULT_JUMBO,
        .get_variants           = e1000_get_variants_82571,
        .mac_ops                = &e82571_mac_ops,
@@@ -2104,7 -1914,7 +2104,7 @@@ struct e1000_info e1000_82583_info = 
                                  | FLAG_HAS_SMART_POWER_DOWN
                                  | FLAG_HAS_AMT
                                  | FLAG_HAS_CTRLEXT_ON_LOAD,
 -      .pba                    = 36,
 +      .pba                    = 32,
        .max_hw_frame_size      = ETH_FRAME_LEN + ETH_FCS_LEN,
        .get_variants           = e1000_get_variants_82571,
        .mac_ops                = &e82571_mac_ops,
@@@ -321,7 -321,7 +321,7 @@@ static s32 e1000_init_phy_params_pchlan
        }
  
        /*
-        * Reset the PHY before any acccess to it.  Doing so, ensures that
+        * Reset the PHY before any access to it.  Doing so, ensures that
         * the PHY is in a known good state before we read/write PHY registers.
         * The generic reset is sufficient here, because we haven't determined
         * the PHY type yet.
        }
  
        phy->id = e1000_phy_unknown;
 -      ret_val = e1000e_get_phy_id(hw);
 -      if (ret_val)
 -              goto out;
 -      if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
 +      switch (hw->mac.type) {
 +      default:
 +              ret_val = e1000e_get_phy_id(hw);
 +              if (ret_val)
 +                      goto out;
 +              if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
 +                      break;
 +              /* fall-through */
 +      case e1000_pch2lan:
                /*
 -               * In case the PHY needs to be in mdio slow mode (eg. 82577),
 +               * In case the PHY needs to be in mdio slow mode,
                 * set slow mode and try to get the PHY id again.
                 */
                ret_val = e1000_set_mdio_slow_mode_hv(hw);
                ret_val = e1000e_get_phy_id(hw);
                if (ret_val)
                        goto out;
 +              break;
        }
        phy->type = e1000e_get_phy_type_from_id(phy->id);
  
@@@ -1395,6 -1389,22 +1395,6 @@@ void e1000_copy_rx_addrs_to_phy_ich8lan
        }
  }
  
 -static u32 e1000_calc_rx_da_crc(u8 mac[])
 -{
 -      u32 poly = 0xEDB88320;  /* Polynomial for 802.3 CRC calculation */
 -      u32 i, j, mask, crc;
 -
 -      crc = 0xffffffff;
 -      for (i = 0; i < 6; i++) {
 -              crc = crc ^ mac[i];
 -              for (j = 8; j > 0; j--) {
 -                      mask = (crc & 1) * (-1);
 -                      crc = (crc >> 1) ^ (poly & mask);
 -              }
 -      }
 -      return ~crc;
 -}
 -
  /**
   *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
   *  with 82579 PHY
@@@ -1437,7 -1447,8 +1437,7 @@@ s32 e1000_lv_jumbo_workaround_ich8lan(s
                        mac_addr[4] = (addr_high & 0xFF);
                        mac_addr[5] = ((addr_high >> 8) & 0xFF);
  
 -                      ew32(PCH_RAICC(i),
 -                                      e1000_calc_rx_da_crc(mac_addr));
 +                      ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
                }
  
                /* Write Rx addresses to the PHY */
@@@ -2292,10 -2303,11 +2292,10 @@@ static s32 e1000_read_flash_data_ich8la
                 */
                if (ret_val == 0) {
                        flash_data = er32flash(ICH_FLASH_FDATA0);
 -                      if (size == 1) {
 +                      if (size == 1)
                                *data = (u8)(flash_data & 0x000000FF);
 -                      } else if (size == 2) {
 +                      else if (size == 2)
                                *data = (u16)(flash_data & 0x0000FFFF);
 -                      }
                        break;
                } else {
                        /*
@@@ -2960,7 -2972,7 +2960,7 @@@ static s32 e1000_reset_hw_ich8lan(struc
  {
        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
        u16 reg;
 -      u32 ctrl, icr, kab;
 +      u32 ctrl, kab;
        s32 ret_val;
  
        /*
                ew32(CRC_OFFSET, 0x65656565);
  
        ew32(IMC, 0xffffffff);
 -      icr = er32(ICR);
 +      er32(ICR);
  
        kab = er32(KABGTXD);
        kab |= E1000_KABGTXD_BGSQLBIAS;
@@@ -3101,7 -3113,7 +3101,7 @@@ static s32 e1000_init_hw_ich8lan(struc
         * Reset the phy after disabling host wakeup to reset the Rx buffer.
         */
        if (hw->phy.type == e1000_phy_82578) {
 -              hw->phy.ops.read_reg(hw, BM_WUC, &i);
 +              e1e_rphy(hw, BM_WUC, &i);
                ret_val = e1000_phy_hw_reset_ich8lan(hw);
                if (ret_val)
                        return ret_val;
@@@ -3259,8 -3271,9 +3259,8 @@@ static s32 e1000_setup_link_ich8lan(str
            (hw->phy.type == e1000_phy_82577)) {
                ew32(FCRTV_PCH, hw->fc.refresh_time);
  
 -              ret_val = hw->phy.ops.write_reg(hw,
 -                                           PHY_REG(BM_PORT_CTRL_PAGE, 27),
 -                                           hw->fc.pause_time);
 +              ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
 +                                 hw->fc.pause_time);
                if (ret_val)
                        return ret_val;
        }
@@@ -3324,7 -3337,8 +3324,7 @@@ static s32 e1000_setup_copper_link_ich8
                        return ret_val;
                break;
        case e1000_phy_ife:
 -              ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
 -                                             &reg_data);
 +              ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
                if (ret_val)
                        return ret_val;
  
                        reg_data |= IFE_PMC_AUTO_MDIX;
                        break;
                }
 -              ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
 -                                              reg_data);
 +              ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
                if (ret_val)
                        return ret_val;
                break;
@@@ -3576,7 -3591,7 +3576,7 @@@ void e1000e_disable_gig_wol_ich8lan(str
        ew32(PHY_CTRL, phy_ctrl);
  
        if (hw->mac.type >= e1000_pchlan) {
 -              e1000_oem_bits_config_ich8lan(hw, true);
 +              e1000_oem_bits_config_ich8lan(hw, false);
                ret_val = hw->phy.ops.acquire(hw);
                if (ret_val)
                        return;
@@@ -3626,8 -3641,7 +3626,8 @@@ static s32 e1000_led_off_ich8lan(struc
  {
        if (hw->phy.type == e1000_phy_ife)
                return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
 -                             (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
 +                              (IFE_PSCL_PROBE_MODE |
 +                               IFE_PSCL_PROBE_LEDS_OFF));
  
        ew32(LEDCTL, hw->mac.ledctl_mode1);
        return 0;
   **/
  static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
  {
 -      return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
 -                                      (u16)hw->mac.ledctl_mode1);
 +      return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
  }
  
  /**
   **/
  static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
  {
 -      return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
 -                                      (u16)hw->mac.ledctl_default);
 +      return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
  }
  
  /**
@@@ -3683,7 -3699,7 +3683,7 @@@ static s32 e1000_led_on_pchlan(struct e
                }
        }
  
 -      return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
 +      return e1e_wphy(hw, HV_LED_CONFIG, data);
  }
  
  /**
@@@ -3714,7 -3730,7 +3714,7 @@@ static s32 e1000_led_off_pchlan(struct 
                }
        }
  
 -      return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
 +      return e1e_wphy(hw, HV_LED_CONFIG, data);
  }
  
  /**
@@@ -3823,20 -3839,20 +3823,20 @@@ static void e1000_clear_hw_cntrs_ich8la
        if ((hw->phy.type == e1000_phy_82578) ||
            (hw->phy.type == e1000_phy_82579) ||
            (hw->phy.type == e1000_phy_82577)) {
 -              hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
 -              hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
 +              e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
 +              e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
 +              e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
 +              e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
 +              e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
 +              e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
 +              e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
 +              e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
 +              e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
 +              e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
 +              e1e_rphy(hw, HV_DC_UPPER, &phy_data);
 +              e1e_rphy(hw, HV_DC_LOWER, &phy_data);
 +              e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
 +              e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
        }
  }
  
diff --combined drivers/net/e1000e/phy.c
@@@ -42,20 -42,20 +42,20 @@@ static s32 e1000_access_phy_debug_regs_
                                            u16 *data, bool read);
  
  /* Cable length tables */
 -static const u16 e1000_m88_cable_length_table[] =
 -      { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
 +static const u16 e1000_m88_cable_length_table[] = {
 +      0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
  #define M88E1000_CABLE_LENGTH_TABLE_SIZE \
                ARRAY_SIZE(e1000_m88_cable_length_table)
  
 -static const u16 e1000_igp_2_cable_length_table[] =
 -      { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
 -        6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
 -        26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
 -        44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
 -        66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
 -        87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
 -        100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
 -        124};
 +static const u16 e1000_igp_2_cable_length_table[] = {
 +      0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
 +      6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
 +      26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
 +      44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
 +      66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
 +      87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
 +      100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
 +      124};
  #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
                ARRAY_SIZE(e1000_igp_2_cable_length_table)
  
@@@ -226,13 -226,6 +226,13 @@@ s32 e1000e_read_phy_reg_mdic(struct e10
        }
        *data = (u16) mdic;
  
 +      /*
 +       * Allow some time after each MDIC transaction to avoid
 +       * reading duplicate data in the next MDIC transaction.
 +       */
 +      if (hw->mac.type == e1000_pch2lan)
 +              udelay(100);
 +
        return 0;
  }
  
@@@ -286,13 -279,6 +286,13 @@@ s32 e1000e_write_phy_reg_mdic(struct e1
                return -E1000_ERR_PHY;
        }
  
 +      /*
 +       * Allow some time after each MDIC transaction to avoid
 +       * reading duplicate data in the next MDIC transaction.
 +       */
 +      if (hw->mac.type == e1000_pch2lan)
 +              udelay(100);
 +
        return 0;
  }
  
@@@ -637,11 -623,12 +637,11 @@@ s32 e1000e_write_kmrn_reg_locked(struc
   **/
  s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
  {
 -      struct e1000_phy_info *phy = &hw->phy;
        s32 ret_val;
        u16 phy_data;
  
        /* Enable CRS on TX. This must be set for half-duplex operation. */
 -      ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
 +      ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
        if (ret_val)
                goto out;
  
        /* Enable downshift */
        phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
  
 -      ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
 +      ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
  
  out:
        return ret_val;
@@@ -773,14 -760,16 +773,14 @@@ s32 e1000e_copper_link_setup_m88(struc
        }
  
        if (phy->type == e1000_phy_82578) {
 -              ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
 -                                          &phy_data);
 +              ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
                if (ret_val)
                        return ret_val;
  
                /* 82578 PHY - set the downshift count to 1x. */
                phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
                phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
 -              ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
 -                                           phy_data);
 +              ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
                if (ret_val)
                        return ret_val;
        }
@@@ -1054,8 -1043,9 +1054,8 @@@ static s32 e1000_phy_setup_autoneg(stru
  
        e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
  
 -      if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
 +      if (phy->autoneg_mask & ADVERTISE_1000_FULL)
                ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
 -      }
  
        return ret_val;
  }
@@@ -1316,8 -1306,9 +1316,8 @@@ s32 e1000e_phy_force_speed_duplex_m88(s
                                 * We didn't get link.
                                 * Reset the DSP and cross our fingers.
                                 */
 -                              ret_val = e1e_wphy(hw,
 -                                              M88E1000_PHY_PAGE_SELECT,
 -                                              0x001d);
 +                              ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
 +                                                 0x001d);
                                if (ret_val)
                                        return ret_val;
                                ret_val = e1000e_phy_reset_dsp(hw);
@@@ -1849,12 -1840,11 +1849,12 @@@ s32 e1000e_get_cable_length_igp_2(struc
        u16 phy_data, i, agc_value = 0;
        u16 cur_agc_index, max_agc_index = 0;
        u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
 -      u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
 -                                                       {IGP02E1000_PHY_AGC_A,
 -                                                        IGP02E1000_PHY_AGC_B,
 -                                                        IGP02E1000_PHY_AGC_C,
 -                                                        IGP02E1000_PHY_AGC_D};
 +      static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
 +             IGP02E1000_PHY_AGC_A,
 +             IGP02E1000_PHY_AGC_B,
 +             IGP02E1000_PHY_AGC_C,
 +             IGP02E1000_PHY_AGC_D
 +      };
  
        /* Read the AGC registers for all channels */
        for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
@@@ -2986,7 -2976,7 +2986,7 @@@ s32 e1000_write_phy_reg_hv_locked(struc
  }
  
  /**
-  *  e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
+  *  e1000_get_phy_addr_for_hv_page - Get PHY address based on page
   *  @page: page to be accessed
   **/
  static u32 e1000_get_phy_addr_for_hv_page(u32 page)
@@@ -3067,12 -3057,12 +3067,12 @@@ s32 e1000_link_stall_workaround_hv(stru
                goto out;
  
        /* Do not apply workaround if in PHY loopback bit 14 set */
 -      hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
 +      e1e_rphy(hw, PHY_CONTROL, &data);
        if (data & PHY_CONTROL_LB)
                goto out;
  
        /* check if link is up and at 1Gbps */
 -      ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
 +      ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
        if (ret_val)
                goto out;
  
        mdelay(200);
  
        /* flush the packets in the fifo buffer */
 -      ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
 -                                      HV_MUX_DATA_CTRL_GEN_TO_MAC |
 -                                      HV_MUX_DATA_CTRL_FORCE_SPEED);
 +      ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
 +                         HV_MUX_DATA_CTRL_FORCE_SPEED);
        if (ret_val)
                goto out;
  
 -      ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
 -                                      HV_MUX_DATA_CTRL_GEN_TO_MAC);
 +      ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
  
  out:
        return ret_val;
@@@ -3113,7 -3105,7 +3113,7 @@@ s32 e1000_check_polarity_82577(struct e
        s32 ret_val;
        u16 data;
  
 -      ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
 +      ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
  
        if (!ret_val)
                phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@@@ -3136,13 -3128,13 +3136,13 @@@ s32 e1000_phy_force_speed_duplex_82577(
        u16 phy_data;
        bool link;
  
 -      ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
 +      ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
        if (ret_val)
                goto out;
  
        e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
  
 -      ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
 +      ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
        if (ret_val)
                goto out;
  
@@@ -3206,7 -3198,7 +3206,7 @@@ s32 e1000_get_phy_info_82577(struct e10
        if (ret_val)
                goto out;
  
 -      ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
 +      ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
        if (ret_val)
                goto out;
  
                if (ret_val)
                        goto out;
  
 -              ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
 +              ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
                if (ret_val)
                        goto out;
  
@@@ -3252,7 -3244,7 +3252,7 @@@ s32 e1000_get_cable_length_82577(struc
        s32 ret_val;
        u16 phy_data, length;
  
 -      ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
 +      ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
        if (ret_val)
                goto out;
  
diff --combined drivers/net/eepro.c
@@@ -302,7 -302,7 +302,7 @@@ struct eepro_local 
  #define ee_id_eepro10p0 0x10   /* ID for eepro/10+ */
  #define ee_id_eepro10p1 0x31
  
 -#define TX_TIMEOUT 40
 +#define TX_TIMEOUT ((4*HZ)/10)
  
  /* Index to functions, as function prototypes. */
  
@@@ -891,13 -891,12 +891,13 @@@ err
     there is non-reboot way to recover if something goes wrong.
     */
  
 -static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
 -static char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
 +static const char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
 +static const char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
  static int    eepro_grab_irq(struct net_device *dev)
  {
 -      int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 };
 -      int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr;
 +      static const int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 };
 +      const int *irqp = irqlist;
 +      int temp_reg, ioaddr = dev->base_addr;
  
        eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
  
@@@ -1761,7 -1760,7 +1761,7 @@@ module_param_array(io, int, NULL, 0)
  module_param_array(irq, int, NULL, 0);
  module_param_array(mem, int, NULL, 0);
  module_param(autodetect, int, 0);
- MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)");
+ MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)");
  MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
  MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
  MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
@@@ -56,6 -56,9 +56,6 @@@ static s32 ixgbe_setup_mac_link_82599(s
                                 ixgbe_link_speed speed,
                                 bool autoneg,
                                 bool autoneg_wait_to_complete);
 -static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
 -                                             ixgbe_link_speed *speed,
 -                                             bool *autoneg);
  static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
                                           ixgbe_link_speed speed,
                                           bool autoneg,
@@@ -65,9 -68,9 +65,9 @@@ static s32 ixgbe_verify_fw_version_8259
  static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
  {
        struct ixgbe_mac_info *mac = &hw->mac;
 -      if (hw->phy.multispeed_fiber) {
 -              /* Set up dual speed SFP+ support */
 -              mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
 +
 +      /* enable the laser control functions for SFP+ fiber */
 +      if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
                mac->ops.disable_tx_laser =
                                       &ixgbe_disable_tx_laser_multispeed_fiber;
                mac->ops.enable_tx_laser =
                mac->ops.disable_tx_laser = NULL;
                mac->ops.enable_tx_laser = NULL;
                mac->ops.flap_tx_laser = NULL;
 +      }
 +
 +      if (hw->phy.multispeed_fiber) {
 +              /* Set up dual speed SFP+ support */
 +              mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
 +      } else {
                if ((mac->ops.get_media_type(hw) ==
                     ixgbe_media_type_backplane) &&
                    (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@@ -96,8 -93,6 +96,8 @@@
  static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
  {
        s32 ret_val = 0;
 +      u32 reg_anlp1 = 0;
 +      u32 i = 0;
        u16 list_offset, data_offset, data_value;
  
        if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
                        IXGBE_WRITE_FLUSH(hw);
                        hw->eeprom.ops.read(hw, ++data_offset, &data_value);
                }
 -              /* Now restart DSP by setting Restart_AN */
 -              IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
 -                  (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
  
                /* Release the semaphore */
                ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
                /* Delay obtaining semaphore again to allow FW access */
                msleep(hw->eeprom.semaphore_delay);
 +
 +              /* Now restart DSP by setting Restart_AN and clearing LMS */
 +              IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
 +                              IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
 +                              IXGBE_AUTOC_AN_RESTART));
 +
 +              /* Wait for AN to leave state 0 */
 +              for (i = 0; i < 10; i++) {
 +                      msleep(4);
 +                      reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
 +                      if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
 +                              break;
 +              }
 +              if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
 +                      hw_dbg(hw, "sfp module setup not complete\n");
 +                      ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
 +                      goto setup_sfp_out;
 +              }
 +
 +              /* Restart DSP by setting Restart_AN and return to SFI mode */
 +              IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
 +                              IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
 +                              IXGBE_AUTOC_AN_RESTART));
        }
  
  setup_sfp_out:
@@@ -199,7 -174,7 +199,7 @@@ static s32 ixgbe_init_phy_ops_82599(str
        if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
                mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
                mac->ops.get_link_capabilities =
 -                                &ixgbe_get_copper_link_capabilities_82599;
 +                      &ixgbe_get_copper_link_capabilities_generic;
        }
  
        /* Set necessary function pointers based on phy type */
                phy->ops.get_firmware_version =
                             &ixgbe_get_phy_firmware_version_tnx;
                break;
 +      case ixgbe_phy_aq:
 +              phy->ops.get_firmware_version =
 +                      &ixgbe_get_phy_firmware_version_generic;
 +              break;
        default:
                break;
        }
@@@ -319,6 -290,37 +319,6 @@@ out
  }
  
  /**
 - *  ixgbe_get_copper_link_capabilities_82599 - Determines link capabilities
 - *  @hw: pointer to hardware structure
 - *  @speed: pointer to link speed
 - *  @autoneg: boolean auto-negotiation value
 - *
 - *  Determines the link capabilities by reading the AUTOC register.
 - **/
 -static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
 -                                                    ixgbe_link_speed *speed,
 -                                                    bool *autoneg)
 -{
 -      s32 status = IXGBE_ERR_LINK_SETUP;
 -      u16 speed_ability;
 -
 -      *speed = 0;
 -      *autoneg = true;
 -
 -      status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
 -                                    &speed_ability);
 -
 -      if (status == 0) {
 -              if (speed_ability & MDIO_SPEED_10G)
 -                  *speed |= IXGBE_LINK_SPEED_10GB_FULL;
 -              if (speed_ability & MDIO_PMA_SPEED_1000)
 -                  *speed |= IXGBE_LINK_SPEED_1GB_FULL;
 -      }
 -
 -      return status;
 -}
 -
 -/**
   *  ixgbe_get_media_type_82599 - Get media type
   *  @hw: pointer to hardware structure
   *
@@@ -330,8 -332,7 +330,8 @@@ static enum ixgbe_media_type ixgbe_get_
  
        /* Detect if there is a copper PHY attached. */
        if (hw->phy.type == ixgbe_phy_cu_unknown ||
 -          hw->phy.type == ixgbe_phy_tn) {
 +          hw->phy.type == ixgbe_phy_tn ||
 +          hw->phy.type == ixgbe_phy_aq) {
                media_type = ixgbe_media_type_copper;
                goto out;
        }
        case IXGBE_DEV_ID_82599_KX4_MEZZ:
        case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
        case IXGBE_DEV_ID_82599_KR:
 +      case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
        case IXGBE_DEV_ID_82599_XAUI_LOM:
                /* Default device ID is mezzanine card KX/KX4 */
                media_type = ixgbe_media_type_backplane;
                break;
        case IXGBE_DEV_ID_82599_SFP:
 +      case IXGBE_DEV_ID_82599_SFP_FCOE:
        case IXGBE_DEV_ID_82599_SFP_EM:
                media_type = ixgbe_media_type_fiber;
                break;
@@@ -1003,7 -1002,7 +1003,7 @@@ s32 ixgbe_reinit_fdir_tables_82599(stru
                udelay(10);
        }
        if (i >= IXGBE_FDIRCMD_CMD_POLL) {
 -              hw_dbg(hw ,"Flow Director previous command isn't complete, "
 +              hw_dbg(hw, "Flow Director previous command isn't complete, "
                       "aborting table re-initialization.\n");
                return IXGBE_ERR_FDIR_REINIT_FAILED;
        }
@@@ -1079,7 -1078,7 +1079,7 @@@ s32 ixgbe_init_fdir_signature_82599(str
  
        /*
         * The defaults in the HW for RX PB 1-7 are not zero and so should be
-        * intialized to zero for non DCB mode otherwise actual total RX PB
+        * initialized to zero for non DCB mode otherwise actual total RX PB
         * would be bigger than programmed and filter space would run into
         * the PB 0 region.
         */
        /* Move the flexible bytes to use the ethertype - shift 6 words */
        fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
  
 -      fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
  
        /* Prime the keys for hashing */
 -      IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
 -                      htonl(IXGBE_ATR_BUCKET_HASH_KEY));
 -      IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
 -                      htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
 +      IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
 +      IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
  
        /*
         * Poll init-done after we write the register.  Estimated times:
@@@ -1167,7 -1169,7 +1167,7 @@@ s32 ixgbe_init_fdir_perfect_82599(struc
  
        /*
         * The defaults in the HW for RX PB 1-7 are not zero and so should be
-        * intialized to zero for non DCB mode otherwise actual total RX PB
+        * initialized to zero for non DCB mode otherwise actual total RX PB
         * would be bigger than programmed and filter space would run into
         * the PB 0 region.
         */
        fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
  
        /* Prime the keys for hashing */
 -      IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
 -                      htonl(IXGBE_ATR_BUCKET_HASH_KEY));
 -      IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
 -                      htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
 +      IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
 +      IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
  
        /*
         * Poll init-done after we write the register.  Estimated times:
   *  @stream: input bitstream to compute the hash on
   *  @key: 32-bit hash key
   **/
 -static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
 -                                        u32 key)
 +static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
 +                                      u32 key)
  {
        /*
         * The algorithm is as follows:
         *    To simplify for programming, the algorithm is implemented
         *    in software this way:
         *
 -       *    Key[31:0], Stream[335:0]
 +       *    key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
         *
 -       *    tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
 -       *    int_key[350:0] = tmp_key[351:1]
 -       *    int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
 +       *    for (i = 0; i < 352; i+=32)
 +       *        hi_hash_dword[31:0] ^= Stream[(i+31):i];
         *
 -       *    hash[15:0] = 0;
 -       *    for (i = 0; i < 351; i++) {
 -       *        if (int_key[i])
 -       *            hash ^= int_stream[(i + 15):i];
 +       *    lo_hash_dword[15:0]  ^= Stream[15:0];
 +       *    lo_hash_dword[15:0]  ^= hi_hash_dword[31:16];
 +       *    lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
 +       *
 +       *    hi_hash_dword[31:0]  ^= Stream[351:320];
 +       *
 +       *    if(key[0])
 +       *        hash[15:0] ^= Stream[15:0];
 +       *
 +       *    for (i = 0; i < 16; i++) {
 +       *        if (key[i])
 +       *            hash[15:0] ^= lo_hash_dword[(i+15):i];
 +       *        if (key[i + 16])
 +       *            hash[15:0] ^= hi_hash_dword[(i+15):i];
         *    }
 +       *
         */
 +      __be32 common_hash_dword = 0;
 +      u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
 +      u32 hash_result = 0;
 +      u8 i;
  
 -      union {
 -              u64    fill[6];
 -              u32    key[11];
 -              u8     key_stream[44];
 -      } tmp_key;
 +      /* record the flow_vm_vlan bits as they are a key part to the hash */
 +      flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
  
 -      u8   *stream = (u8 *)atr_input;
 -      u8   int_key[44];      /* upper-most bit unused */
 -      u8   hash_str[46];     /* upper-most 2 bits unused */
 -      u16  hash_result = 0;
 -      int  i, j, k, h;
 +      /* generate common hash dword */
 +      for (i = 10; i; i -= 2)
 +              common_hash_dword ^= atr_input->dword_stream[i] ^
 +                                   atr_input->dword_stream[i - 1];
  
 -      /*
 -       * Initialize the fill member to prevent warnings
 -       * on some compilers
 -       */
 -       tmp_key.fill[0] = 0;
 +      hi_hash_dword = ntohl(common_hash_dword);
  
 -      /* First load the temporary key stream */
 -      for (i = 0; i < 6; i++) {
 -              u64 fillkey = ((u64)key << 32) | key;
 -              tmp_key.fill[i] = fillkey;
 -      }
 +      /* low dword is word swapped version of common */
 +      lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
  
 -      /*
 -       * Set the interim key for the hashing.  Bit 352 is unused, so we must
 -       * shift and compensate when building the key.
 -       */
 +      /* apply flow ID/VM pool/VLAN ID bits to hash words */
 +      hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
  
 -      int_key[0] = tmp_key.key_stream[0] >> 1;
 -      for (i = 1, j = 0; i < 44; i++) {
 -              unsigned int this_key = tmp_key.key_stream[j] << 7;
 -              j++;
 -              int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
 -      }
 +      /* Process bits 0 and 16 */
 +      if (key & 0x0001) hash_result ^= lo_hash_dword;
 +      if (key & 0x00010000) hash_result ^= hi_hash_dword;
  
        /*
 -       * Set the interim bit string for the hashing.  Bits 368 and 367 are
 -       * unused, so shift and compensate when building the string.
 +       * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
 +       * delay this because bit 0 of the stream should not be processed
 +       * so we do not add the vlan until after bit 0 was processed
         */
 -      hash_str[0] = (stream[40] & 0x7f) >> 1;
 -      for (i = 1, j = 40; i < 46; i++) {
 -              unsigned int this_str = stream[j] << 7;
 -              j++;
 -              if (j > 41)
 -                      j = 0;
 -              hash_str[i] = (u8)(this_str | (stream[j] >> 1));
 -      }
 -
 -      /*
 -       * Now compute the hash.  i is the index into hash_str, j is into our
 -       * key stream, k is counting the number of bits, and h interates within
 -       * each byte.
 -       */
 -      for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
 -              for (h = 0; h < 8 && k < 351; h++, k++) {
 -                      if (int_key[j] & (1 << h)) {
 -                              /*
 -                               * Key bit is set, XOR in the current 16-bit
 -                               * string.  Example of processing:
 -                               *    h = 0,
 -                               *      tmp = (hash_str[i - 2] & 0 << 16) |
 -                               *            (hash_str[i - 1] & 0xff << 8) |
 -                               *            (hash_str[i] & 0xff >> 0)
 -                               *      So tmp = hash_str[15 + k:k], since the
 -                               *      i + 2 clause rolls off the 16-bit value
 -                               *    h = 7,
 -                               *      tmp = (hash_str[i - 2] & 0x7f << 9) |
 -                               *            (hash_str[i - 1] & 0xff << 1) |
 -                               *            (hash_str[i] & 0x80 >> 7)
 -                               */
 -                              int tmp = (hash_str[i] >> h);
 -                              tmp |= (hash_str[i - 1] << (8 - h));
 -                              tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
 -                                           << (16 - h);
 -                              hash_result ^= (u16)tmp;
 -                      }
 -              }
 -      }
 -
 -      return hash_result;
 -}
 -
 -/**
 - *  ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
 - *  @input: input stream to modify
 - *  @vlan: the VLAN id to load
 - **/
 -s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
 -{
 -      input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
 -      input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
 -
 -      return 0;
 -}
 -
 -/**
 - *  ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
 - *  @input: input stream to modify
 - *  @src_addr: the IP address to load
 - **/
 -s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
 -{
 -      input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
 -      input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
 -                                                     (src_addr >> 16) & 0xff;
 -      input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
 -                                                      (src_addr >> 8) & 0xff;
 -      input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
 -
 -      return 0;
 -}
 -
 -/**
 - *  ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
 - *  @input: input stream to modify
 - *  @dst_addr: the IP address to load
 - **/
 -s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
 -{
 -      input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
 -      input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
 -                                                     (dst_addr >> 16) & 0xff;
 -      input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
 -                                                      (dst_addr >> 8) & 0xff;
 -      input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
 -
 -      return 0;
 -}
 -
 -/**
 - *  ixgbe_atr_set_src_port_82599 - Sets the source port
 - *  @input: input stream to modify
 - *  @src_port: the source port to load
 - **/
 -s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
 -{
 -      input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
 -      input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
 -
 -      return 0;
 -}
 -
 -/**
 - *  ixgbe_atr_set_dst_port_82599 - Sets the destination port
 - *  @input: input stream to modify
 - *  @dst_port: the destination port to load
 - **/
 -s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
 -{
 -      input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
 -      input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
 -
 -      return 0;
 -}
 -
 -/**
 - *  ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
 - *  @input: input stream to modify
 - *  @flex_bytes: the flexible bytes to load
 - **/
 -s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
 -{
 -      input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
 -      input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
 -
 -      return 0;
 -}
 -
 -/**
 - *  ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
 - *  @input: input stream to modify
 - *  @l4type: the layer 4 type value to load
 - **/
 -s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
 -{
 -      input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
 -
 -      return 0;
 -}
 -
 -/**
 - *  ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
 - *  @input: input stream to search
 - *  @vlan: the VLAN id to load
 - **/
 -static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
 -{
 -      *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
 -      *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
 +      lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
  
 -      return 0;
 -}
  
 -/**
 - *  ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
 - *  @input: input stream to search
 - *  @src_addr: the IP address to load
 - **/
 -static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
 -                                        u32 *src_addr)
 -{
 -      *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
 -      *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
 -      *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
 -      *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
 +      /* process the remaining 30 bits in the key 2 bits at a time */
 +      for (i = 15; i; i-- ) {
 +              if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
 +              if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
 +      }
  
 -      return 0;
 +      return hash_result & IXGBE_ATR_HASH_MASK;
  }
  
 -/**
 - *  ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
 - *  @input: input stream to search
 - *  @dst_addr: the IP address to load
 - **/
 -static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
 -                                        u32 *dst_addr)
 -{
 -      *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
 -      *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
 -      *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
 -      *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
 -
 -      return 0;
 -}
 +/*
 + * These defines allow us to quickly generate all of the necessary instructions
 + * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
 + * for values 0 through 15
 + */
 +#define IXGBE_ATR_COMMON_HASH_KEY \
 +              (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
 +#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
 +do { \
 +      u32 n = (_n); \
 +      if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
 +              common_hash ^= lo_hash_dword >> n; \
 +      else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
 +              bucket_hash ^= lo_hash_dword >> n; \
 +      else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
 +              sig_hash ^= lo_hash_dword << (16 - n); \
 +      if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
 +              common_hash ^= hi_hash_dword >> n; \
 +      else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
 +              bucket_hash ^= hi_hash_dword >> n; \
 +      else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
 +              sig_hash ^= hi_hash_dword << (16 - n); \
 +} while (0);
  
  /**
 - *  ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
 - *  @input: input stream to search
 - *  @src_addr_1: the first 4 bytes of the IP address to load
 - *  @src_addr_2: the second 4 bytes of the IP address to load
 - *  @src_addr_3: the third 4 bytes of the IP address to load
 - *  @src_addr_4: the fourth 4 bytes of the IP address to load
 - **/
 -static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
 -                                        u32 *src_addr_1, u32 *src_addr_2,
 -                                        u32 *src_addr_3, u32 *src_addr_4)
 -{
 -      *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
 -      *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
 -      *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
 -      *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
 -
 -      *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
 -      *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
 -      *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
 -      *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
 -
 -      *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
 -      *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
 -      *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
 -      *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
 -
 -      *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
 -      *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
 -      *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
 -      *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
 -
 -      return 0;
 -}
 -
 -/**
 - *  ixgbe_atr_get_src_port_82599 - Gets the source port
 - *  @input: input stream to modify
 - *  @src_port: the source port to load
 + *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
 + *  @stream: input bitstream to compute the hash on
   *
 - *  Even though the input is given in big-endian, the FDIRPORT registers
 - *  expect the ports to be programmed in little-endian.  Hence the need to swap
 - *  endianness when retrieving the data.  This can be confusing since the
 - *  internal hash engine expects it to be big-endian.
 + *  This function is almost identical to the function above but contains
 + *  several optomizations such as unwinding all of the loops, letting the
 + *  compiler work out all of the conditional ifs since the keys are static
 + *  defines, and computing two keys at once since the hashed dword stream
 + *  will be the same for both keys.
   **/
 -static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input,
 -                                        u16 *src_port)
 +static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
 +                                          union ixgbe_atr_hash_dword common)
  {
 -      *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
 -      *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
 +      u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
 +      u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
  
 -      return 0;
 -}
 +      /* record the flow_vm_vlan bits as they are a key part to the hash */
 +      flow_vm_vlan = ntohl(input.dword);
  
 -/**
 - *  ixgbe_atr_get_dst_port_82599 - Gets the destination port
 - *  @input: input stream to modify
 - *  @dst_port: the destination port to load
 - *
 - *  Even though the input is given in big-endian, the FDIRPORT registers
 - *  expect the ports to be programmed in little-endian.  Hence the need to swap
 - *  endianness when retrieving the data.  This can be confusing since the
 - *  internal hash engine expects it to be big-endian.
 - **/
 -static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
 -                                        u16 *dst_port)
 -{
 -      *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
 -      *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
 +      /* generate common hash dword */
 +      hi_hash_dword = ntohl(common.dword);
  
 -      return 0;
 -}
 +      /* low dword is word swapped version of common */
 +      lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
  
 -/**
 - *  ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
 - *  @input: input stream to modify
 - *  @flex_bytes: the flexible bytes to load
 - **/
 -static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
 -                                         u16 *flex_byte)
 -{
 -      *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
 -      *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
 +      /* apply flow ID/VM pool/VLAN ID bits to hash words */
 +      hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
  
 -      return 0;
 -}
 +      /* Process bits 0 and 16 */
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
  
 -/**
 - *  ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
 - *  @input: input stream to modify
 - *  @l4type: the layer 4 type value to load
 - **/
 -static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input,
 -                                      u8 *l4type)
 -{
 -      *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
 +      /*
 +       * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
 +       * delay this because bit 0 of the stream should not be processed
 +       * so we do not add the vlan until after bit 0 was processed
 +       */
 +      lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
 +
 +      /* Process remaining 30 bit of the key */
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
 +      IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
 +
 +      /* combine common_hash result with signature and bucket hashes */
 +      bucket_hash ^= common_hash;
 +      bucket_hash &= IXGBE_ATR_HASH_MASK;
  
 -      return 0;
 +      sig_hash ^= common_hash << 16;
 +      sig_hash &= IXGBE_ATR_HASH_MASK << 16;
 +
 +      /* return completed signature hash */
 +      return sig_hash ^ bucket_hash;
  }
  
  /**
   *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
   *  @hw: pointer to hardware structure
 - *  @stream: input bitstream
 + *  @input: unique input dword
 + *  @common: compressed common input dword
   *  @queue: queue index to direct traffic to
   **/
  s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
 -                                          struct ixgbe_atr_input *input,
 +                                          union ixgbe_atr_hash_dword input,
 +                                          union ixgbe_atr_hash_dword common,
                                            u8 queue)
  {
        u64  fdirhashcmd;
 -      u64  fdircmd;
 -      u32  fdirhash;
 -      u16  bucket_hash, sig_hash;
 -      u8   l4type;
 -
 -      bucket_hash = ixgbe_atr_compute_hash_82599(input,
 -                                                 IXGBE_ATR_BUCKET_HASH_KEY);
 -
 -      /* bucket_hash is only 15 bits */
 -      bucket_hash &= IXGBE_ATR_HASH_MASK;
 -
 -      sig_hash = ixgbe_atr_compute_hash_82599(input,
 -                                              IXGBE_ATR_SIGNATURE_HASH_KEY);
 -
 -      /* Get the l4type in order to program FDIRCMD properly */
 -      /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
 -      ixgbe_atr_get_l4type_82599(input, &l4type);
 +      u32  fdircmd;
  
        /*
 -       * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
 -       * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
 +       * Get the flow_type in order to program FDIRCMD properly
 +       * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
         */
 -      fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
 -
 -      fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
 -                 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
 -
 -      switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
 -      case IXGBE_ATR_L4TYPE_TCP:
 -              fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
 -              break;
 -      case IXGBE_ATR_L4TYPE_UDP:
 -              fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
 -              break;
 -      case IXGBE_ATR_L4TYPE_SCTP:
 -              fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
 +      switch (input.formatted.flow_type) {
 +      case IXGBE_ATR_FLOW_TYPE_TCPV4:
 +      case IXGBE_ATR_FLOW_TYPE_UDPV4:
 +      case IXGBE_ATR_FLOW_TYPE_SCTPV4:
 +      case IXGBE_ATR_FLOW_TYPE_TCPV6:
 +      case IXGBE_ATR_FLOW_TYPE_UDPV6:
 +      case IXGBE_ATR_FLOW_TYPE_SCTPV6:
                break;
        default:
 -              hw_dbg(hw, "Error on l4type input\n");
 +              hw_dbg(hw, " Error on flow type input\n");
                return IXGBE_ERR_CONFIG;
        }
  
 -      if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
 -              fdircmd |= IXGBE_FDIRCMD_IPV6;
 +      /* configure FDIRCMD register */
 +      fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
 +                IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
 +      fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
 +      fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
  
 -      fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
 -      fdirhashcmd = ((fdircmd << 32) | fdirhash);
 +      /*
 +       * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
 +       * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
 +       */
 +      fdirhashcmd = (u64)fdircmd << 32;
 +      fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
  
        IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
  
 +      hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
 +
        return 0;
  }
  
  /**
 + *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
 + *  @input_mask: mask to be bit swapped
 + *
 + *  The source and destination port masks for flow director are bit swapped
 + *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
 + *  generate a correctly swapped value we need to bit swap the mask and that
 + *  is what is accomplished by this function.
 + **/
 +static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
 +{
 +      u32 mask = ntohs(input_masks->dst_port_mask);
 +      mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
 +      mask |= ntohs(input_masks->src_port_mask);
 +      mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
 +      mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
 +      mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
 +      return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
 +}
 +
 +/*
 + * These two macros are meant to address the fact that we have registers
 + * that are either all or in part big-endian.  As a result on big-endian
 + * systems we will end up byte swapping the value to little-endian before
 + * it is byte swapped again and written to the hardware in the original
 + * big-endian format.
 + */
 +#define IXGBE_STORE_AS_BE32(_value) \
 +      (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
 +       (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
 +
 +#define IXGBE_WRITE_REG_BE32(a, reg, value) \
 +      IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
 +
 +#define IXGBE_STORE_AS_BE16(_value) \
 +      (((u16)(_value) >> 8) | ((u16)(_value) << 8))
 +
 +/**
   *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
   *  @hw: pointer to hardware structure
   *  @input: input bitstream
   *  hardware writes must be protected from one another.
   **/
  s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
 -                                      struct ixgbe_atr_input *input,
 +                                      union ixgbe_atr_input *input,
                                        struct ixgbe_atr_input_masks *input_masks,
                                        u16 soft_id, u8 queue)
  {
 -      u32 fdircmd = 0;
        u32 fdirhash;
 -      u32 src_ipv4 = 0, dst_ipv4 = 0;
 -      u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
 -      u16 src_port, dst_port, vlan_id, flex_bytes;
 -      u16 bucket_hash;
 -      u8  l4type;
 -      u8  fdirm = 0;
 -
 -      /* Get our input values */
 -      ixgbe_atr_get_l4type_82599(input, &l4type);
 +      u32 fdircmd;
 +      u32 fdirport, fdirtcpm;
 +      u32 fdirvlan;
 +      /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
 +      u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
 +                  IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
  
        /*
 -       * Check l4type formatting, and bail out before we touch the hardware
 +       * Check flow_type formatting, and bail out before we touch the hardware
         * if there's a configuration issue
         */
 -      switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
 -      case IXGBE_ATR_L4TYPE_TCP:
 -              fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
 -              break;
 -      case IXGBE_ATR_L4TYPE_UDP:
 -              fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
 -              break;
 -      case IXGBE_ATR_L4TYPE_SCTP:
 -              fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
 +      switch (input->formatted.flow_type) {
 +      case IXGBE_ATR_FLOW_TYPE_IPV4:
 +              /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
 +              fdirm |= IXGBE_FDIRM_L4P;
 +      case IXGBE_ATR_FLOW_TYPE_SCTPV4:
 +              if (input_masks->dst_port_mask || input_masks->src_port_mask) {
 +                      hw_dbg(hw, " Error on src/dst port mask\n");
 +                      return IXGBE_ERR_CONFIG;
 +              }
 +      case IXGBE_ATR_FLOW_TYPE_TCPV4:
 +      case IXGBE_ATR_FLOW_TYPE_UDPV4:
                break;
        default:
 -              hw_dbg(hw, "Error on l4type input\n");
 +              hw_dbg(hw, " Error on flow type input\n");
                return IXGBE_ERR_CONFIG;
        }
  
 -      bucket_hash = ixgbe_atr_compute_hash_82599(input,
 -                                                 IXGBE_ATR_BUCKET_HASH_KEY);
 -
 -      /* bucket_hash is only 15 bits */
 -      bucket_hash &= IXGBE_ATR_HASH_MASK;
 -
 -      ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
 -      ixgbe_atr_get_src_port_82599(input, &src_port);
 -      ixgbe_atr_get_dst_port_82599(input, &dst_port);
 -      ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
 -
 -      fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
 -
 -      /* Now figure out if we're IPv4 or IPv6 */
 -      if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
 -              /* IPv6 */
 -              ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
 -                                           &src_ipv6_3, &src_ipv6_4);
 -
 -              IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
 -              IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
 -              IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
 -              /* The last 4 bytes is the same register as IPv4 */
 -              IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
 -
 -              fdircmd |= IXGBE_FDIRCMD_IPV6;
 -              fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
 -      } else {
 -              /* IPv4 */
 -              ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
 -              IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
 -      }
 -
 -      ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
 -      IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
 -
 -      IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
 -                                  (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
 -      IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
 -                    (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
 -
        /*
 -       * Program the relevant mask registers.  L4type cannot be
 -       * masked out in this implementation.
 +       * Program the relevant mask registers.  If src/dst_port or src/dst_addr
 +       * are zero, then assume a full mask for that field.  Also assume that
 +       * a VLAN of 0 is unspecified, so mask that out as well.  L4type
 +       * cannot be masked out in this implementation.
         *
         * This also assumes IPv4 only.  IPv6 masking isn't supported at this
         * point in time.
         */
 -      IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
 -      IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
 -
 -      switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
 -      case IXGBE_ATR_L4TYPE_TCP:
 -              IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask);
 -              IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
 -                              (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
 -                               (input_masks->dst_port_mask << 16)));
 +
 +      /* Program FDIRM */
 +      switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
 +      case 0xEFFF:
 +              /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
 +              fdirm &= ~IXGBE_FDIRM_VLANID;
 +      case 0xE000:
 +              /* Unmask VLAN prio - bit 1 */
 +              fdirm &= ~IXGBE_FDIRM_VLANP;
                break;
 -      case IXGBE_ATR_L4TYPE_UDP:
 -              IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask);
 -              IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
 -                              (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
 -                               (input_masks->src_port_mask << 16)));
 +      case 0x0FFF:
 +              /* Unmask VLAN ID - bit 0 */
 +              fdirm &= ~IXGBE_FDIRM_VLANID;
                break;
 -      default:
 -              /* this already would have failed above */
 +      case 0x0000:
 +              /* do nothing, vlans already masked */
                break;
 +      default:
 +              hw_dbg(hw, " Error on VLAN mask\n");
 +              return IXGBE_ERR_CONFIG;
        }
  
 -      /* Program the last mask register, FDIRM */
 -      if (input_masks->vlan_id_mask)
 -              /* Mask both VLAN and VLANP - bits 0 and 1 */
 -              fdirm |= 0x3;
 -
 -      if (input_masks->data_mask)
 -              /* Flex bytes need masking, so mask the whole thing - bit 4 */
 -              fdirm |= 0x10;
 +      if (input_masks->flex_mask & 0xFFFF) {
 +              if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
 +                      hw_dbg(hw, " Error on flexible byte mask\n");
 +                      return IXGBE_ERR_CONFIG;
 +              }
 +              /* Unmask Flex Bytes - bit 4 */
 +              fdirm &= ~IXGBE_FDIRM_FLEX;
 +      }
  
        /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
 -      fdirm |= 0x24;
 -
        IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
  
 -      fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
 -      fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
 -      fdircmd |= IXGBE_FDIRCMD_LAST;
 -      fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
 -      fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
 +      /* store the TCP/UDP port masks, bit reversed from port layout */
 +      fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
 +
 +      /* write both the same so that UDP and TCP use the same mask */
 +      IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
 +      IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
 +
 +      /* store source and destination IP masks (big-enian) */
 +      IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
 +                           ~input_masks->src_ip_mask[0]);
 +      IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
 +                           ~input_masks->dst_ip_mask[0]);
 +
 +      /* Apply masks to input data */
 +      input->formatted.vlan_id &= input_masks->vlan_id_mask;
 +      input->formatted.flex_bytes &= input_masks->flex_mask;
 +      input->formatted.src_port &= input_masks->src_port_mask;
 +      input->formatted.dst_port &= input_masks->dst_port_mask;
 +      input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
 +      input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
 +
 +      /* record vlan (little-endian) and flex_bytes(big-endian) */
 +      fdirvlan =
 +              IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
 +      fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
 +      fdirvlan |= ntohs(input->formatted.vlan_id);
 +      IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
 +
 +      /* record source and destination port (little-endian)*/
 +      fdirport = ntohs(input->formatted.dst_port);
 +      fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
 +      fdirport |= ntohs(input->formatted.src_port);
 +      IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
 +
 +      /* record the first 32 bits of the destination address (big-endian) */
 +      IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
 +
 +      /* record the source address (big-endian) */
 +      IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
 +
 +      /* configure FDIRCMD register */
 +      fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
 +                IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
 +      fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
 +      fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
 +
 +      /* we only want the bucket hash so drop the upper 16 bits */
 +      fdirhash = ixgbe_atr_compute_hash_82599(input,
 +                                              IXGBE_ATR_BUCKET_HASH_KEY);
 +      fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
  
        IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
        IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
  
        return 0;
  }
 +
  /**
   *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
   *  @hw: pointer to hardware structure
@@@ -1764,7 -1924,6 +1764,7 @@@ static u32 ixgbe_get_supported_physical
        hw->phy.ops.identify(hw);
  
        if (hw->phy.type == ixgbe_phy_tn ||
 +          hw->phy.type == ixgbe_phy_aq ||
            hw->phy.type == ixgbe_phy_cu_unknown) {
                hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
                                     &ext_ability);
@@@ -1966,6 -2125,51 +1966,6 @@@ fw_version_out
        return status;
  }
  
 -/**
 - *  ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
 - *  the EEPROM
 - *  @hw: pointer to hardware structure
 - *  @wwnn_prefix: the alternative WWNN prefix
 - *  @wwpn_prefix: the alternative WWPN prefix
 - *
 - *  This function will read the EEPROM from the alternative SAN MAC address
 - *  block to check the support for the alternative WWNN/WWPN prefix support.
 - **/
 -static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
 -                                      u16 *wwpn_prefix)
 -{
 -      u16 offset, caps;
 -      u16 alt_san_mac_blk_offset;
 -
 -      /* clear output first */
 -      *wwnn_prefix = 0xFFFF;
 -      *wwpn_prefix = 0xFFFF;
 -
 -      /* check if alternative SAN MAC is supported */
 -      hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
 -                          &alt_san_mac_blk_offset);
 -
 -      if ((alt_san_mac_blk_offset == 0) ||
 -          (alt_san_mac_blk_offset == 0xFFFF))
 -              goto wwn_prefix_out;
 -
 -      /* check capability in alternative san mac address block */
 -      offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
 -      hw->eeprom.ops.read(hw, offset, &caps);
 -      if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
 -              goto wwn_prefix_out;
 -
 -      /* get the corresponding prefix for WWNN/WWPN */
 -      offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
 -      hw->eeprom.ops.read(hw, offset, wwnn_prefix);
 -
 -      offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
 -      hw->eeprom.ops.read(hw, offset, wwpn_prefix);
 -
 -wwn_prefix_out:
 -      return 0;
 -}
 -
  static struct ixgbe_mac_operations mac_ops_82599 = {
        .init_hw                = &ixgbe_init_hw_generic,
        .reset_hw               = &ixgbe_reset_hw_82599,
        .get_mac_addr           = &ixgbe_get_mac_addr_generic,
        .get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
        .get_device_caps        = &ixgbe_get_device_caps_82599,
 -      .get_wwn_prefix         = &ixgbe_get_wwn_prefix_82599,
 +      .get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic,
        .stop_adapter           = &ixgbe_stop_adapter_generic,
        .get_bus_info           = &ixgbe_get_bus_info_generic,
        .set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie,
        .fc_enable              = &ixgbe_fc_enable_generic,
        .init_uta_tables        = &ixgbe_init_uta_tables_generic,
        .setup_sfp              = &ixgbe_setup_sfp_modules_82599,
 +      .set_mac_anti_spoofing  = &ixgbe_set_mac_anti_spoofing,
 +      .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
  };
  
  static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
        .init_params            = &ixgbe_init_eeprom_params_generic,
        .read                   = &ixgbe_read_eerd_generic,
        .write                  = &ixgbe_write_eeprom_generic,
 +      .calc_checksum          = &ixgbe_calc_eeprom_checksum_generic,
        .validate_checksum      = &ixgbe_validate_eeprom_checksum_generic,
        .update_checksum        = &ixgbe_update_eeprom_checksum_generic,
  };
@@@ -2039,5 -2240,5 +2039,5 @@@ struct ixgbe_info ixgbe_82599_info = 
        .mac_ops                = &mac_ops_82599,
        .eeprom_ops             = &eeprom_ops_82599,
        .phy_ops                = &phy_ops_82599,
 -      .mbx_ops                = &mbx_ops_82599,
 +      .mbx_ops                = &mbx_ops_generic,
  };
@@@ -238,7 -238,7 +238,7 @@@ static int temac_dma_bd_init(struct net
                goto out;
        }
        /* allocate the tx and rx ring buffer descriptors. */
-       /* returns a virtual addres and a physical address. */
+       /* returns a virtual address and a physical address. */
        lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
                                         sizeof(*lp->tx_bd_v) * TX_BD_NUM,
                                         &lp->tx_bd_p, GFP_KERNEL);
@@@ -692,7 -692,7 +692,7 @@@ static int temac_start_xmit(struct sk_b
  
        cur_p->app0 = 0;
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
 -              unsigned int csum_start_off = skb_transport_offset(skb);
 +              unsigned int csum_start_off = skb_checksum_start_offset(skb);
                unsigned int csum_index_off = csum_start_off + skb->csum_offset;
  
                cur_p->app0 |= 1; /* TX Checksum Enabled */
@@@ -952,7 -952,7 +952,7 @@@ static const struct attribute_group tem
        .attrs = temac_device_attrs,
  };
  
 -static int __init
 +static int __devinit
  temac_of_probe(struct platform_device *op, const struct of_device_id *match)
  {
        struct device_node *np;
diff --combined drivers/net/tehuti.c
@@@ -12,7 -12,7 +12,7 @@@
  /*
   * RX HW/SW interaction overview
   * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-  * There are 2 types of RX communication channels betwean driver and NIC.
+  * There are 2 types of RX communication channels between driver and NIC.
   * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming
   * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds
   * info about buffer's location, size and ID. An ID field is used to identify a
@@@ -324,7 -324,7 +324,7 @@@ static int bdx_fw_load(struct bdx_priv 
        ENTER;
        master = READ_REG(priv, regINIT_SEMAPHORE);
        if (!READ_REG(priv, regINIT_STATUS) && master) {
 -              rc = request_firmware(&fw, "tehuti/firmware.bin", &priv->pdev->dev);
 +              rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
                if (rc)
                        goto out;
                bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
@@@ -821,7 -821,7 +821,7 @@@ static void bdx_setmulti(struct net_dev
                }
  
                /* use PMF to accept first MAC_MCST_NUM (15) addresses */
-               /* TBD: sort addreses and write them in ascending order
+               /* TBD: sort addresses and write them in ascending order
                 * into RX_MAC_MCST regs. we skip this phase now and accept ALL
                 * multicast frames throu IMF */
                /* accept the rest of addresses throu IMF */
@@@ -1346,7 -1346,7 +1346,7 @@@ static void print_rxfd(struct rxf_desc 
  /*
   * TX HW/SW interaction overview
   * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-  * There are 2 types of TX communication channels betwean driver and NIC.
+  * There are 2 types of TX communication channels between driver and NIC.
   * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets
   * 2) TX Data Fifo - TXD - holds descriptors of full buffers.
   *
@@@ -2510,4 -2510,4 +2510,4 @@@ module_exit(bdx_module_exit)
  MODULE_LICENSE("GPL");
  MODULE_AUTHOR(DRIVER_AUTHOR);
  MODULE_DESCRIPTION(BDX_DRV_DESC);
 -MODULE_FIRMWARE("tehuti/firmware.bin");
 +MODULE_FIRMWARE("tehuti/bdx.bin");
diff --combined drivers/net/tun.c
@@@ -757,7 -757,7 +757,7 @@@ static __inline__ ssize_t tun_put_user(
  
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
                        gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
 -                      gso.csum_start = skb->csum_start - skb_headroom(skb);
 +                      gso.csum_start = skb_checksum_start_offset(skb);
                        gso.csum_offset = skb->csum_offset;
                } /* else everything is zero */
  
@@@ -1309,7 -1309,7 +1309,7 @@@ static long __tun_chr_ioctl(struct fil
                break;
  
        case SIOCGIFHWADDR:
-               /* Get hw addres */
+               /* Get hw address */
                memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
                ifr.ifr_hwaddr.sa_family = tun->dev->type;
                if (copy_to_user(argp, &ifr, ifreq_len))
@@@ -1695,7 -1695,7 +1695,7 @@@ struct vxge_hw_device_stats_sw_err 
   * struct vxge_hw_device_stats - Contains HW per-device statistics,
   * including hw.
   * @devh: HW device handle.
-  * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats.
+  * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
   * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
   *                space.
   * @hw_info_dma_acch: One more DMA handle used subsequently to free the
@@@ -1904,6 -1904,34 +1904,6 @@@ enum vxge_hw_ring_tcode 
        VXGE_HW_RING_T_CODE_MULTI_ERR                   = 0xF
  };
  
 -/**
 - * enum enum vxge_hw_ring_hash_type - RTH hash types
 - * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
 - * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
 - * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
 - * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
 - * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
 - * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
 - * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
 - * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
 - * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
 - * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
 - *
 - * RTH hash types
 - */
 -enum vxge_hw_ring_hash_type {
 -      VXGE_HW_RING_HASH_TYPE_NONE                     = 0x0,
 -      VXGE_HW_RING_HASH_TYPE_TCP_IPV4         = 0x1,
 -      VXGE_HW_RING_HASH_TYPE_UDP_IPV4         = 0x2,
 -      VXGE_HW_RING_HASH_TYPE_IPV4                     = 0x3,
 -      VXGE_HW_RING_HASH_TYPE_TCP_IPV6         = 0x4,
 -      VXGE_HW_RING_HASH_TYPE_UDP_IPV6         = 0x5,
 -      VXGE_HW_RING_HASH_TYPE_IPV6                     = 0x6,
 -      VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX      = 0x7,
 -      VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX      = 0x8,
 -      VXGE_HW_RING_HASH_TYPE_IPV6_EX          = 0x9
 -};
 -
  enum vxge_hw_status vxge_hw_ring_rxd_reserve(
        struct __vxge_hw_ring *ring_handle,
        void **rxdh);
@@@ -2081,6 -2109,10 +2081,6 @@@ struct __vxge_hw_ring_rxd_priv 
  #endif
  };
  
 -/* ========================= FIFO PRIVATE API ============================= */
 -
 -struct vxge_hw_fifo_attr;
 -
  struct vxge_hw_mempool_cbs {
        void (*item_func_alloc)(
                        struct vxge_hw_mempool *mempoolh,
@@@ -2154,27 -2186,27 +2154,27 @@@ enum vxge_hw_vpath_mac_addr_add_mode 
  enum vxge_hw_status
  vxge_hw_vpath_mac_addr_add(
        struct __vxge_hw_vpath_handle *vpath_handle,
 -      u8 (macaddr)[ETH_ALEN],
 -      u8 (macaddr_mask)[ETH_ALEN],
 +      u8 *macaddr,
 +      u8 *macaddr_mask,
        enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
  
  enum vxge_hw_status
  vxge_hw_vpath_mac_addr_get(
        struct __vxge_hw_vpath_handle *vpath_handle,
 -      u8 (macaddr)[ETH_ALEN],
 -      u8 (macaddr_mask)[ETH_ALEN]);
 +      u8 *macaddr,
 +      u8 *macaddr_mask);
  
  enum vxge_hw_status
  vxge_hw_vpath_mac_addr_get_next(
        struct __vxge_hw_vpath_handle *vpath_handle,
 -      u8 (macaddr)[ETH_ALEN],
 -      u8 (macaddr_mask)[ETH_ALEN]);
 +      u8 *macaddr,
 +      u8 *macaddr_mask);
  
  enum vxge_hw_status
  vxge_hw_vpath_mac_addr_delete(
        struct __vxge_hw_vpath_handle *vpath_handle,
 -      u8 (macaddr)[ETH_ALEN],
 -      u8 (macaddr_mask)[ETH_ALEN]);
 +      u8 *macaddr,
 +      u8 *macaddr_mask);
  
  enum vxge_hw_status
  vxge_hw_vpath_vid_add(
@@@ -2281,7 -2313,6 +2281,7 @@@ vxge_hw_channel_dtr_free(struct __vxge_
  
  int
  vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
 +
  void
  vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
  
diff --combined drivers/net/wan/dscc4.c
@@@ -125,7 -125,7 +125,7 @@@ static u32 dscc4_pci_config_store[16]
  /* Module parameters */
  
  MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
- MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
+ MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
  MODULE_LICENSE("GPL");
  module_param(debug, int, 0);
  MODULE_PARM_DESC(debug,"Enable/disable extra messages");
@@@ -1358,7 -1358,7 +1358,7 @@@ static int dscc4_ioctl(struct net_devic
        return ret;
  }
  
 -static int dscc4_match(struct thingie *p, int value)
 +static int dscc4_match(const struct thingie *p, int value)
  {
        int i;
  
@@@ -1403,7 -1403,7 +1403,7 @@@ done
  static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv,
                                  struct net_device *dev)
  {
 -      struct thingie encoding[] = {
 +      static const struct thingie encoding[] = {
                { ENCODING_NRZ,         0x00000000 },
                { ENCODING_NRZI,        0x00200000 },
                { ENCODING_FM_MARK,     0x00400000 },
@@@ -1442,7 -1442,7 +1442,7 @@@ static int dscc4_loopback_setting(struc
  static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv,
                             struct net_device *dev)
  {
 -      struct thingie crc[] = {
 +      static const struct thingie crc[] = {
                { PARITY_CRC16_PR0_CCITT,       0x00000010 },
                { PARITY_CRC16_PR1_CCITT,       0x00000000 },
                { PARITY_CRC32_PR0_CCITT,       0x00000011 },
@@@ -92,6 -92,54 +92,6 @@@ MODULE_PARM_DESC(barkers
                 "signal; values are appended to a list--setting one value "
                 "as zero cleans the existing list and starts a new one.");
  
 -static
 -struct i2400m_work *__i2400m_work_setup(
 -      struct i2400m *i2400m, void (*fn)(struct work_struct *),
 -      gfp_t gfp_flags, const void *pl, size_t pl_size)
 -{
 -      struct i2400m_work *iw;
 -
 -      iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags);
 -      if (iw == NULL)
 -              return NULL;
 -      iw->i2400m = i2400m_get(i2400m);
 -      iw->pl_size = pl_size;
 -      memcpy(iw->pl, pl, pl_size);
 -      INIT_WORK(&iw->ws, fn);
 -      return iw;
 -}
 -
 -
 -/*
 - * Schedule i2400m's specific work on the system's queue.
 - *
 - * Used for a few cases where we really need it; otherwise, identical
 - * to i2400m_queue_work().
 - *
 - * Returns < 0 errno code on error, 1 if ok.
 - *
 - * If it returns zero, something really bad happened, as it means the
 - * works struct was already queued, but we have just allocated it, so
 - * it should not happen.
 - */
 -static int i2400m_schedule_work(struct i2400m *i2400m,
 -                       void (*fn)(struct work_struct *), gfp_t gfp_flags,
 -                       const void *pl, size_t pl_size)
 -{
 -      int result;
 -      struct i2400m_work *iw;
 -
 -      result = -ENOMEM;
 -      iw = __i2400m_work_setup(i2400m, fn, gfp_flags, pl, pl_size);
 -      if (iw != NULL) {
 -              result = schedule_work(&iw->ws);
 -              if (WARN_ON(result == 0))
 -                      result = -ENXIO;
 -      }
 -      return result;
 -}
 -
 -
  /*
   * WiMAX stack operation: relay a message from user space
   *
@@@ -232,7 -280,7 +232,7 @@@ int i2400m_check_mac_addr(struct i2400
                        result);
                goto error;
        }
-       /* Extract MAC addresss */
+       /* Extract MAC address */
        ddi = (void *) skb->data;
        BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
        d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
@@@ -600,11 -648,17 +600,11 @@@ EXPORT_SYMBOL_GPL(i2400m_post_reset)
  static
  void __i2400m_dev_reset_handle(struct work_struct *ws)
  {
 -      int result;
 -      struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
 -      const char *reason;
 -      struct i2400m *i2400m = iw->i2400m;
 +      struct i2400m *i2400m = container_of(ws, struct i2400m, reset_ws);
 +      const char *reason = i2400m->reset_reason;
        struct device *dev = i2400m_dev(i2400m);
        struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
 -
 -      if (WARN_ON(iw->pl_size != sizeof(reason)))
 -              reason = "SW BUG: reason n/a";
 -      else
 -              memcpy(&reason, iw->pl, sizeof(reason));
 +      int result;
  
        d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
  
                }
        }
  out:
 -      i2400m_put(i2400m);
 -      kfree(iw);
        d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
                ws, i2400m, reason);
  }
   */
  int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
  {
 -      return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
 -                                  GFP_ATOMIC, &reason, sizeof(reason));
 +      i2400m->reset_reason = reason;
 +      return schedule_work(&i2400m->reset_ws);
  }
  EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
  
  static
  void __i2400m_error_recovery(struct work_struct *ws)
  {
 -      struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
 -      struct i2400m *i2400m = iw->i2400m;
 +      struct i2400m *i2400m = container_of(ws, struct i2400m, recovery_ws);
  
        i2400m_reset(i2400m, I2400M_RT_BUS);
 -
 -      i2400m_put(i2400m);
 -      kfree(iw);
 -      return;
  }
  
  /*
   */
  void i2400m_error_recovery(struct i2400m *i2400m)
  {
 -      struct device *dev = i2400m_dev(i2400m);
 -
 -      if (atomic_add_return(1, &i2400m->error_recovery) == 1) {
 -              if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
 -                      GFP_ATOMIC, NULL, 0) < 0) {
 -                      dev_err(dev, "run out of memory for "
 -                              "scheduling an error recovery ?\n");
 -                      atomic_dec(&i2400m->error_recovery);
 -              }
 -      } else
 +      if (atomic_add_return(1, &i2400m->error_recovery) == 1)
 +              schedule_work(&i2400m->recovery_ws);
 +      else
                atomic_dec(&i2400m->error_recovery);
 -      return;
  }
  EXPORT_SYMBOL_GPL(i2400m_error_recovery);
  
@@@ -817,10 -886,6 +817,10 @@@ void i2400m_init(struct i2400m *i2400m
  
        mutex_init(&i2400m->init_mutex);
        /* wake_tx_ws is initialized in i2400m_tx_setup() */
 +
 +      INIT_WORK(&i2400m->reset_ws, __i2400m_dev_reset_handle);
 +      INIT_WORK(&i2400m->recovery_ws, __i2400m_error_recovery);
 +
        atomic_set(&i2400m->bus_reset_retries, 0);
  
        i2400m->alive = 0;
@@@ -975,9 -1040,6 +975,9 @@@ void i2400m_release(struct i2400m *i240
  
        i2400m_dev_stop(i2400m);
  
 +      cancel_work_sync(&i2400m->reset_ws);
 +      cancel_work_sync(&i2400m->recovery_ws);
 +
        i2400m_debugfs_rm(i2400m);
        sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
                           &i2400m_dev_attr_group);
@@@ -1021,6 -1083,8 +1021,6 @@@ module_init(i2400m_driver_init)
  static
  void __exit i2400m_driver_exit(void)
  {
 -      /* for scheds i2400m_dev_reset_handle() */
 -      flush_scheduled_work();
        i2400m_barker_db_exit();
  }
  module_exit(i2400m_driver_exit);
@@@ -186,7 -186,7 +186,7 @@@ enum 
   * struct i2400m_poke_table - Hardware poke table for the Intel 2400m
   *
   * This structure will be used to create a device specific poke table
-  * to put the device in a consistant state at boot time.
+  * to put the device in a consistent state at boot time.
   *
   * @address: The device address to poke
   *
@@@ -632,11 -632,6 +632,11 @@@ struct i2400m 
        struct work_struct wake_tx_ws;
        struct sk_buff *wake_tx_skb;
  
 +      struct work_struct reset_ws;
 +      const char *reset_reason;
 +
 +      struct work_struct recovery_ws;
 +
        struct dentry *debugfs_dentry;
        const char *fw_name;            /* name of the current firmware image */
        unsigned long fw_version;       /* version of the firmware interface */
@@@ -703,7 -698,7 +703,7 @@@ enum i2400m_bm_cmd_flags 
   * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot
   *     rom after reading the MAC address. This is quite a dirty hack,
   *     if you ask me -- the device requires the bootrom to be
-  *     intialized after reading the MAC address.
+  *     initialized after reading the MAC address.
   */
  enum i2400m_bri {
        I2400M_BRI_SOFT       = 1 << 1,
@@@ -901,6 -896,20 +901,6 @@@ struct device *i2400m_dev(struct i2400
        return i2400m->wimax_dev.net_dev->dev.parent;
  }
  
 -/*
 - * Helper for scheduling simple work functions
 - *
 - * This struct can get any kind of payload attached (normally in the
 - * form of a struct where you pack the stuff you want to pass to the
 - * _work function).
 - */
 -struct i2400m_work {
 -      struct work_struct ws;
 -      struct i2400m *i2400m;
 -      size_t pl_size;
 -      u8 pl[0];
 -};
 -
  extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
                                   char *, size_t);
  extern int i2400m_msg_size_check(struct i2400m *,
  #define       AR5K_DCU_GBL_IFS_MISC_LFSR_SLICE        0x00000007      /* LFSR Slice Select */
  #define       AR5K_DCU_GBL_IFS_MISC_TURBO_MODE        0x00000008      /* Turbo mode */
  #define       AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC     0x000003f0      /* SIFS Duration mask */
 +#define       AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC_S   4
  #define       AR5K_DCU_GBL_IFS_MISC_USEC_DUR          0x000ffc00      /* USEC Duration mask */
  #define       AR5K_DCU_GBL_IFS_MISC_USEC_DUR_S        10
  #define       AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY     0x00300000      /* DCU Arbiter delay mask */
  /*
   * EEPROM command register
   */
- #define AR5K_EEPROM_CMD               0x6008                  /* Register Addres */
+ #define AR5K_EEPROM_CMD               0x6008                  /* Register Address */
  #define AR5K_EEPROM_CMD_READ  0x00000001      /* EEPROM read */
  #define AR5K_EEPROM_CMD_WRITE 0x00000002      /* EEPROM write */
  #define AR5K_EEPROM_CMD_RESET 0x00000004      /* EEPROM reset */
  /*
   * EEPROM config register
   */
- #define AR5K_EEPROM_CFG                       0x6010                  /* Register Addres */
+ #define AR5K_EEPROM_CFG                       0x6010                  /* Register Address */
  #define AR5K_EEPROM_CFG_SIZE          0x00000003              /* Size determination override */
  #define AR5K_EEPROM_CFG_SIZE_AUTO     0
  #define AR5K_EEPROM_CFG_SIZE_4KBIT    1
   * Second station id register (Upper 16 bits of MAC address + PCU settings)
   */
  #define AR5K_STA_ID1                  0x8004                  /* Register Address */
- #define       AR5K_STA_ID1_ADDR_U16           0x0000ffff      /* Upper 16 bits of MAC addres */
+ #define       AR5K_STA_ID1_ADDR_U16           0x0000ffff      /* Upper 16 bits of MAC address */
  #define AR5K_STA_ID1_AP                       0x00010000      /* Set AP mode */
  #define AR5K_STA_ID1_ADHOC            0x00020000      /* Set Ad-Hoc mode */
  #define AR5K_STA_ID1_PWR_SV           0x00040000      /* Power save reporting */
  #define AR5K_IFS1_EIFS                0x03fff000
  #define AR5K_IFS1_EIFS_S      12
  #define AR5K_IFS1_CS_EN               0x04000000
 -
 +#define AR5K_IFS1_CS_EN_S     26
  
  /*
   * CFP duration register
  
  #define AR5K_PHY_SCAL                 0x9878
  #define AR5K_PHY_SCAL_32MHZ           0x0000000e
 +#define       AR5K_PHY_SCAL_32MHZ_5311        0x00000008
  #define       AR5K_PHY_SCAL_32MHZ_2417        0x0000000a
  #define       AR5K_PHY_SCAL_32MHZ_HB63        0x00000032
  
  #define       AR5K_PHY_FRAME_CTL              (ah->ah_version == AR5K_AR5210 ? \
                                        AR5K_PHY_FRAME_CTL_5210 : AR5K_PHY_FRAME_CTL_5211)
  /*---[5111+]---*/
 +#define       AR5K_PHY_FRAME_CTL_WIN_LEN      0x00000003      /* Force window length (?) */
 +#define       AR5K_PHY_FRAME_CTL_WIN_LEN_S    0
  #define       AR5K_PHY_FRAME_CTL_TX_CLIP      0x00000038      /* Mask for tx clip (?) */
  #define       AR5K_PHY_FRAME_CTL_TX_CLIP_S    3
  #define       AR5K_PHY_FRAME_CTL_PREP_CHINFO  0x00010000      /* Prepend chan info */
   */
  #define AR5K_PHY_PDADC_TXPOWER_BASE   0xa280
  #define       AR5K_PHY_PDADC_TXPOWER(_n)      (AR5K_PHY_PDADC_TXPOWER_BASE + ((_n) << 2))
 +
 +/*
 + * Platform registers for WiSoC
 + */
 +#define AR5K_AR5312_RESET             0xbc003020
 +#define AR5K_AR5312_RESET_BB0_COLD    0x00000004
 +#define AR5K_AR5312_RESET_BB1_COLD    0x00000200
 +#define AR5K_AR5312_RESET_WMAC0               0x00002000
 +#define AR5K_AR5312_RESET_BB0_WARM    0x00004000
 +#define AR5K_AR5312_RESET_WMAC1               0x00020000
 +#define AR5K_AR5312_RESET_BB1_WARM    0x00040000
 +
 +#define AR5K_AR5312_ENABLE            0xbc003080
 +#define AR5K_AR5312_ENABLE_WLAN0    0x00000001
 +#define AR5K_AR5312_ENABLE_WLAN1    0x00000008
 +
 +#define AR5K_AR2315_RESET             0xb1000004
 +#define AR5K_AR2315_RESET_WMAC                0x00000001
 +#define AR5K_AR2315_RESET_BB_WARM     0x00000002
 +
 +#define AR5K_AR2315_AHB_ARB_CTL               0xb1000008
 +#define AR5K_AR2315_AHB_ARB_CTL_WLAN  0x00000002
 +
 +#define AR5K_AR2315_BYTESWAP  0xb100000c
 +#define AR5K_AR2315_BYTESWAP_WMAC     0x00000002
@@@ -90,6 -90,170 +90,6 @@@ MODULE_ALIAS("iwl4965")
  static int iwlagn_ant_coupling;
  static bool iwlagn_bt_ch_announce = 1;
  
 -/**
 - * iwlagn_commit_rxon - commit staging_rxon to hardware
 - *
 - * The RXON command in staging_rxon is committed to the hardware and
 - * the active_rxon structure is updated with the new data.  This
 - * function correctly transitions out of the RXON_ASSOC_MSK state if
 - * a HW tune is required based on the RXON structure changes.
 - */
 -int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 -{
 -      /* cast away the const for active_rxon in this function */
 -      struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
 -      int ret;
 -      bool new_assoc =
 -              !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
 -      bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK);
 -
 -      if (!iwl_is_alive(priv))
 -              return -EBUSY;
 -
 -      if (!ctx->is_active)
 -              return 0;
 -
 -      /* always get timestamp with Rx frame */
 -      ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
 -
 -      ret = iwl_check_rxon_cmd(priv, ctx);
 -      if (ret) {
 -              IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
 -              return -EINVAL;
 -      }
 -
 -      /*
 -       * receive commit_rxon request
 -       * abort any previous channel switch if still in process
 -       */
 -      if (priv->switch_rxon.switch_in_progress &&
 -          (priv->switch_rxon.channel != ctx->staging.channel)) {
 -              IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
 -                    le16_to_cpu(priv->switch_rxon.channel));
 -              iwl_chswitch_done(priv, false);
 -      }
 -
 -      /* If we don't need to send a full RXON, we can use
 -       * iwl_rxon_assoc_cmd which is used to reconfigure filter
 -       * and other flags for the current radio configuration. */
 -      if (!iwl_full_rxon_required(priv, ctx)) {
 -              ret = iwl_send_rxon_assoc(priv, ctx);
 -              if (ret) {
 -                      IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
 -                      return ret;
 -              }
 -
 -              memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
 -              iwl_print_rx_config_cmd(priv, ctx);
 -              return 0;
 -      }
 -
 -      /* If we are currently associated and the new config requires
 -       * an RXON_ASSOC and the new config wants the associated mask enabled,
 -       * we must clear the associated from the active configuration
 -       * before we apply the new config */
 -      if (iwl_is_associated_ctx(ctx) && new_assoc) {
 -              IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
 -              active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 -
 -              ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
 -                                     sizeof(struct iwl_rxon_cmd),
 -                                     active_rxon);
 -
 -              /* If the mask clearing failed then we set
 -               * active_rxon back to what it was previously */
 -              if (ret) {
 -                      active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
 -                      IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
 -                      return ret;
 -              }
 -              iwl_clear_ucode_stations(priv, ctx);
 -              iwl_restore_stations(priv, ctx);
 -              ret = iwl_restore_default_wep_keys(priv, ctx);
 -              if (ret) {
 -                      IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
 -                      return ret;
 -              }
 -      }
 -
 -      IWL_DEBUG_INFO(priv, "Sending RXON\n"
 -                     "* with%s RXON_FILTER_ASSOC_MSK\n"
 -                     "* channel = %d\n"
 -                     "* bssid = %pM\n",
 -                     (new_assoc ? "" : "out"),
 -                     le16_to_cpu(ctx->staging.channel),
 -                     ctx->staging.bssid_addr);
 -
 -      iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
 -
 -      if (!old_assoc) {
 -              /*
 -               * First of all, before setting associated, we need to
 -               * send RXON timing so the device knows about the DTIM
 -               * period and other timing values
 -               */
 -              ret = iwl_send_rxon_timing(priv, ctx);
 -              if (ret) {
 -                      IWL_ERR(priv, "Error setting RXON timing!\n");
 -                      return ret;
 -              }
 -      }
 -
 -      if (priv->cfg->ops->hcmd->set_pan_params) {
 -              ret = priv->cfg->ops->hcmd->set_pan_params(priv);
 -              if (ret)
 -                      return ret;
 -      }
 -
 -      /* Apply the new configuration
 -       * RXON unassoc clears the station table in uCode so restoration of
 -       * stations is needed after it (the RXON command) completes
 -       */
 -      if (!new_assoc) {
 -              ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
 -                            sizeof(struct iwl_rxon_cmd), &ctx->staging);
 -              if (ret) {
 -                      IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
 -                      return ret;
 -              }
 -              IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
 -              memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
 -              iwl_clear_ucode_stations(priv, ctx);
 -              iwl_restore_stations(priv, ctx);
 -              ret = iwl_restore_default_wep_keys(priv, ctx);
 -              if (ret) {
 -                      IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
 -                      return ret;
 -              }
 -      }
 -      if (new_assoc) {
 -              priv->start_calib = 0;
 -              /* Apply the new configuration
 -               * RXON assoc doesn't clear the station table in uCode,
 -               */
 -              ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
 -                            sizeof(struct iwl_rxon_cmd), &ctx->staging);
 -              if (ret) {
 -                      IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
 -                      return ret;
 -              }
 -              memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
 -      }
 -      iwl_print_rx_config_cmd(priv, ctx);
 -
 -      iwl_init_sensitivity(priv);
 -
 -      /* If we issue a new RXON command which required a tune then we must
 -       * send a new TXPOWER command or we won't be able to Tx any frames */
 -      ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
 -      if (ret) {
 -              IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
 -              return ret;
 -      }
 -
 -      return 0;
 -}
 -
  void iwl_update_chain_flags(struct iwl_priv *priv)
  {
        struct iwl_rxon_context *ctx;
        if (priv->cfg->ops->hcmd->set_rxon_chain) {
                for_each_context(priv, ctx) {
                        priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
 -                      iwlcore_commit_rxon(priv, ctx);
 +                      if (ctx->active.rx_chain != ctx->staging.rx_chain)
 +                              iwlcore_commit_rxon(priv, ctx);
                }
        }
  }
@@@ -248,8 -411,7 +248,8 @@@ static unsigned int iwl_hw_get_beacon_c
  
        return sizeof(*tx_beacon_cmd) + frame_size;
  }
 -static int iwl_send_beacon_cmd(struct iwl_priv *priv)
 +
 +int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
  {
        struct iwl_frame *frame;
        unsigned int frame_size;
@@@ -499,7 -661,7 +499,7 @@@ static void iwl_bg_beacon_update(struc
  
        priv->beacon_skb = beacon;
  
 -      iwl_send_beacon_cmd(priv);
 +      iwlagn_send_beacon_cmd(priv);
   out:
        mutex_unlock(&priv->mutex);
  }
@@@ -1154,7 -1316,7 +1154,7 @@@ static void iwl_irq_tasklet_legacy(stru
        }
  
        /* Re-enable all interrupts */
-       /* only Re-enable if diabled by irq */
+       /* only Re-enable if disabled by irq */
        if (test_bit(STATUS_INT_ENABLED, &priv->status))
                iwl_enable_interrupts(priv);
  
@@@ -1368,7 -1530,7 +1368,7 @@@ static void iwl_irq_tasklet(struct iwl_
        }
  
        /* Re-enable all interrupts */
-       /* only Re-enable if diabled by irq */
+       /* only Re-enable if disabled by irq */
        if (test_bit(STATUS_INT_ENABLED, &priv->status))
                iwl_enable_interrupts(priv);
  }
@@@ -2502,7 -2664,7 +2502,7 @@@ int iwl_dump_nic_event_log(struct iwl_p
                return pos;
        }
  
 -      /* enable/disable bt channel announcement */
 +      /* enable/disable bt channel inhibition */
        priv->bt_ch_announce = iwlagn_bt_ch_announce;
  
  #ifdef CONFIG_IWLWIFI_DEBUG
@@@ -2654,8 -2816,13 +2654,8 @@@ static void iwl_alive_start(struct iwl_
        /* After the ALIVE response, we can send host commands to the uCode */
        set_bit(STATUS_ALIVE, &priv->status);
  
 -      if (priv->cfg->ops->lib->recover_from_tx_stall) {
 -              /* Enable timer to monitor the driver queues */
 -              mod_timer(&priv->monitor_recover,
 -                      jiffies +
 -                      msecs_to_jiffies(
 -                        priv->cfg->base_params->monitor_recover_period));
 -      }
 +      /* Enable watchdog to monitor the driver tx queues */
 +      iwl_setup_watchdog(priv);
  
        if (iwl_is_rfkill(priv))
                return;
  
        iwl_reset_run_time_calib(priv);
  
 +      set_bit(STATUS_READY, &priv->status);
 +
        /* Configure the adapter for unassociated operation */
        iwlcore_commit_rxon(priv, ctx);
  
        iwl_leds_init(priv);
  
        IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
 -      set_bit(STATUS_READY, &priv->status);
        wake_up_interruptible(&priv->wait_command_queue);
  
        iwl_power_update_mode(priv, true);
@@@ -2750,7 -2916,8 +2750,7 @@@ static void __iwl_down(struct iwl_priv 
  
        /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
         * to prevent rearm timer */
 -      if (priv->cfg->ops->lib->recover_from_tx_stall)
 -              del_timer_sync(&priv->monitor_recover);
 +      del_timer_sync(&priv->watchdog);
  
        iwl_clear_ucode_stations(priv, NULL);
        iwl_dealloc_bcast_stations(priv);
                                STATUS_EXIT_PENDING;
  
        /* device going down, Stop using ICT table */
 -      iwl_disable_ict(priv);
 +      if (priv->cfg->ops->lib->isr_ops.disable)
 +              priv->cfg->ops->lib->isr_ops.disable(priv);
  
        iwlagn_txq_ctx_stop(priv);
        iwlagn_rxq_stop(priv);
@@@ -3035,8 -3201,7 +3035,8 @@@ static void iwl_bg_alive_start(struct w
                return;
  
        /* enable dram interrupt */
 -      iwl_reset_ict(priv);
 +      if (priv->cfg->ops->lib->isr_ops.reset)
 +              priv->cfg->ops->lib->isr_ops.reset(priv);
  
        mutex_lock(&priv->mutex);
        iwl_alive_start(priv);
@@@ -3144,6 -3309,92 +3144,6 @@@ static void iwl_bg_rx_replenish(struct 
        mutex_unlock(&priv->mutex);
  }
  
 -#define IWL_DELAY_NEXT_SCAN (HZ*2)
 -
 -void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
 -{
 -      struct iwl_rxon_context *ctx;
 -      struct ieee80211_conf *conf = NULL;
 -      int ret = 0;
 -
 -      if (!vif || !priv->is_open)
 -              return;
 -
 -      ctx = iwl_rxon_ctx_from_vif(vif);
 -
 -      if (vif->type == NL80211_IFTYPE_AP) {
 -              IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
 -              return;
 -      }
 -
 -      if (test_bit(STATUS_EXIT_PENDING, &priv->status))
 -              return;
 -
 -      iwl_scan_cancel_timeout(priv, 200);
 -
 -      conf = ieee80211_get_hw_conf(priv->hw);
 -
 -      ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 -      iwlcore_commit_rxon(priv, ctx);
 -
 -      ret = iwl_send_rxon_timing(priv, ctx);
 -      if (ret)
 -              IWL_WARN(priv, "RXON timing - "
 -                          "Attempting to continue.\n");
 -
 -      ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
 -
 -      iwl_set_rxon_ht(priv, &priv->current_ht_config);
 -
 -      if (priv->cfg->ops->hcmd->set_rxon_chain)
 -              priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
 -
 -      ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
 -
 -      IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
 -                      vif->bss_conf.aid, vif->bss_conf.beacon_int);
 -
 -      if (vif->bss_conf.use_short_preamble)
 -              ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
 -      else
 -              ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
 -
 -      if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
 -              if (vif->bss_conf.use_short_slot)
 -                      ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
 -              else
 -                      ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
 -      }
 -
 -      iwlcore_commit_rxon(priv, ctx);
 -
 -      IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
 -                      vif->bss_conf.aid, ctx->active.bssid_addr);
 -
 -      switch (vif->type) {
 -      case NL80211_IFTYPE_STATION:
 -              break;
 -      case NL80211_IFTYPE_ADHOC:
 -              iwl_send_beacon_cmd(priv);
 -              break;
 -      default:
 -              IWL_ERR(priv, "%s Should not be called in %d mode\n",
 -                        __func__, vif->type);
 -              break;
 -      }
 -
 -      /* the chain noise calibration will enabled PM upon completion
 -       * If chain noise has already been run, then we need to enable
 -       * power management here */
 -      if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
 -              iwl_power_update_mode(priv, false);
 -
 -      /* Enable Rx differential gain and sensitivity calibrations */
 -      iwl_chain_noise_reset(priv);
 -      priv->start_calib = 1;
 -
 -}
 -
  /*****************************************************************************
   *
   * mac80211 entry point functions
@@@ -3169,8 -3420,7 +3169,8 @@@ static int iwl_mac_setup_register(struc
        hw->flags = IEEE80211_HW_SIGNAL_DBM |
                    IEEE80211_HW_AMPDU_AGGREGATION |
                    IEEE80211_HW_NEED_DTIM_PERIOD |
 -                  IEEE80211_HW_SPECTRUM_MGMT;
 +                  IEEE80211_HW_SPECTRUM_MGMT |
 +                  IEEE80211_HW_REPORTS_TX_ACK_STATUS;
  
        if (!priv->cfg->base_params->broken_powersave)
                hw->flags |= IEEE80211_HW_SUPPORTS_PS |
  }
  
  
 -static int iwl_mac_start(struct ieee80211_hw *hw)
 +int iwlagn_mac_start(struct ieee80211_hw *hw)
  {
        struct iwl_priv *priv = hw->priv;
        int ret;
@@@ -3265,7 -3515,7 +3265,7 @@@ out
        return 0;
  }
  
 -static void iwl_mac_stop(struct ieee80211_hw *hw)
 +void iwlagn_mac_stop(struct ieee80211_hw *hw)
  {
        struct iwl_priv *priv = hw->priv;
  
  
        flush_workqueue(priv->workqueue);
  
 -      /* enable interrupts again in order to receive rfkill changes */
 +      /* User space software may expect getting rfkill changes
 +       * even if interface is down */
        iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
 -      iwl_enable_interrupts(priv);
 +      iwl_enable_rfkill_int(priv);
  
        IWL_DEBUG_MAC80211(priv, "leave\n");
  }
  
 -static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
 +int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
  {
        struct iwl_priv *priv = hw->priv;
  
        return NETDEV_TX_OK;
  }
  
 -void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
 -{
 -      struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 -      int ret = 0;
 -
 -      lockdep_assert_held(&priv->mutex);
 -
 -      if (test_bit(STATUS_EXIT_PENDING, &priv->status))
 -              return;
 -
 -      /* The following should be done only at AP bring up */
 -      if (!iwl_is_associated_ctx(ctx)) {
 -
 -              /* RXON - unassoc (to set timing command) */
 -              ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 -              iwlcore_commit_rxon(priv, ctx);
 -
 -              /* RXON Timing */
 -              ret = iwl_send_rxon_timing(priv, ctx);
 -              if (ret)
 -                      IWL_WARN(priv, "RXON timing failed - "
 -                                      "Attempting to continue.\n");
 -
 -              /* AP has all antennas */
 -              priv->chain_noise_data.active_chains =
 -                      priv->hw_params.valid_rx_ant;
 -              iwl_set_rxon_ht(priv, &priv->current_ht_config);
 -              if (priv->cfg->ops->hcmd->set_rxon_chain)
 -                      priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
 -
 -              ctx->staging.assoc_id = 0;
 -
 -              if (vif->bss_conf.use_short_preamble)
 -                      ctx->staging.flags |=
 -                              RXON_FLG_SHORT_PREAMBLE_MSK;
 -              else
 -                      ctx->staging.flags &=
 -                              ~RXON_FLG_SHORT_PREAMBLE_MSK;
 -
 -              if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
 -                      if (vif->bss_conf.use_short_slot)
 -                              ctx->staging.flags |=
 -                                      RXON_FLG_SHORT_SLOT_MSK;
 -                      else
 -                              ctx->staging.flags &=
 -                                      ~RXON_FLG_SHORT_SLOT_MSK;
 -              }
 -              /* need to send beacon cmd before committing assoc RXON! */
 -              iwl_send_beacon_cmd(priv);
 -              /* restore RXON assoc */
 -              ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
 -              iwlcore_commit_rxon(priv, ctx);
 -      }
 -      iwl_send_beacon_cmd(priv);
 -
 -      /* FIXME - we need to add code here to detect a totally new
 -       * configuration, reset the AP, unassoc, rxon timing, assoc,
 -       * clear sta table, add BCAST sta... */
 -}
 -
 -static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
 -                                  struct ieee80211_vif *vif,
 -                                  struct ieee80211_key_conf *keyconf,
 -                                  struct ieee80211_sta *sta,
 -                                  u32 iv32, u16 *phase1key)
 +void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
 +                              struct ieee80211_vif *vif,
 +                              struct ieee80211_key_conf *keyconf,
 +                              struct ieee80211_sta *sta,
 +                              u32 iv32, u16 *phase1key)
  {
 -
        struct iwl_priv *priv = hw->priv;
        struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
  
        IWL_DEBUG_MAC80211(priv, "leave\n");
  }
  
 -static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 -                         struct ieee80211_vif *vif,
 -                         struct ieee80211_sta *sta,
 -                         struct ieee80211_key_conf *key)
 +int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 +                     struct ieee80211_vif *vif, struct ieee80211_sta *sta,
 +                     struct ieee80211_key_conf *key)
  {
        struct iwl_priv *priv = hw->priv;
        struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
        return ret;
  }
  
 -static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
 -                              struct ieee80211_vif *vif,
 -                              enum ieee80211_ampdu_mlme_action action,
 -                              struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 +int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
 +                          struct ieee80211_vif *vif,
 +                          enum ieee80211_ampdu_mlme_action action,
 +                          struct ieee80211_sta *sta, u16 tid, u16 *ssn)
  {
        struct iwl_priv *priv = hw->priv;
        int ret = -EINVAL;
        return ret;
  }
  
 -static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
 -                             struct ieee80211_vif *vif,
 -                             enum sta_notify_cmd cmd,
 -                             struct ieee80211_sta *sta)
 -{
 -      struct iwl_priv *priv = hw->priv;
 -      struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
 -      int sta_id;
 -
 -      switch (cmd) {
 -      case STA_NOTIFY_SLEEP:
 -              WARN_ON(!sta_priv->client);
 -              sta_priv->asleep = true;
 -              if (atomic_read(&sta_priv->pending_frames) > 0)
 -                      ieee80211_sta_block_awake(hw, sta, true);
 -              break;
 -      case STA_NOTIFY_AWAKE:
 -              WARN_ON(!sta_priv->client);
 -              if (!sta_priv->asleep)
 -                      break;
 -              sta_priv->asleep = false;
 -              sta_id = iwl_sta_id(sta);
 -              if (sta_id != IWL_INVALID_STATION)
 -                      iwl_sta_modify_ps_wake(priv, sta_id);
 -              break;
 -      default:
 -              break;
 -      }
 -}
 -
 -static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
 -                            struct ieee80211_vif *vif,
 -                            struct ieee80211_sta *sta)
 +int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
 +                     struct ieee80211_vif *vif,
 +                     struct ieee80211_sta *sta)
  {
        struct iwl_priv *priv = hw->priv;
        struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
        return 0;
  }
  
 -static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
 -                                 struct ieee80211_channel_switch *ch_switch)
 +void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
 +                             struct ieee80211_channel_switch *ch_switch)
  {
        struct iwl_priv *priv = hw->priv;
        const struct iwl_channel_info *ch_info;
@@@ -3615,10 -3956,10 +3615,10 @@@ out_exit
        IWL_DEBUG_MAC80211(priv, "leave\n");
  }
  
 -static void iwlagn_configure_filter(struct ieee80211_hw *hw,
 -                                  unsigned int changed_flags,
 -                                  unsigned int *total_flags,
 -                                  u64 multicast)
 +void iwlagn_configure_filter(struct ieee80211_hw *hw,
 +                           unsigned int changed_flags,
 +                           unsigned int *total_flags,
 +                           u64 multicast)
  {
        struct iwl_priv *priv = hw->priv;
        __le32 filter_or = 0, filter_nand = 0;
                        changed_flags, *total_flags);
  
        CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
 -      CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
 +      /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
 +      CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
        CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
  
  #undef CHK
        for_each_context(priv, ctx) {
                ctx->staging.filter_flags &= ~filter_nand;
                ctx->staging.filter_flags |= filter_or;
 -              iwlcore_commit_rxon(priv, ctx);
 +
 +              /*
 +               * Not committing directly because hardware can perform a scan,
 +               * but we'll eventually commit the filter flags change anyway.
 +               */
        }
  
        mutex_unlock(&priv->mutex);
                        FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
  }
  
 -static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop)
 +void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
  {
        struct iwl_priv *priv = hw->priv;
  
@@@ -3738,9 -4074,12 +3738,9 @@@ static void iwl_setup_deferred_work(str
        priv->ucode_trace.data = (unsigned long)priv;
        priv->ucode_trace.function = iwl_bg_ucode_trace;
  
 -      if (priv->cfg->ops->lib->recover_from_tx_stall) {
 -              init_timer(&priv->monitor_recover);
 -              priv->monitor_recover.data = (unsigned long)priv;
 -              priv->monitor_recover.function =
 -                      priv->cfg->ops->lib->recover_from_tx_stall;
 -      }
 +      init_timer(&priv->watchdog);
 +      priv->watchdog.data = (unsigned long)priv;
 +      priv->watchdog.function = iwl_bg_watchdog;
  
        if (!priv->cfg->base_params->use_isr_legacy)
                tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
@@@ -3833,13 -4172,13 +3833,13 @@@ static int iwl_init_drv(struct iwl_pri
                priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
                priv->bt_duration = BT_DURATION_LIMIT_DEF;
                priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
 -              priv->dynamic_agg_thresh = BT_AGG_THRESHOLD_DEF;
        }
  
        /* Set the tx_power_user_lmt to the lowest power level
         * this value will get overwritten by channel max power avg
         * from eeprom */
        priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
 +      priv->tx_power_next = IWLAGN_TX_POWER_TARGET_POWER_MIN;
  
        ret = iwl_init_channel_map(priv);
        if (ret) {
@@@ -3870,30 -4209,28 +3870,30 @@@ static void iwl_uninit_drv(struct iwl_p
        kfree(priv->scan_cmd);
  }
  
 -static struct ieee80211_ops iwl_hw_ops = {
 -      .tx = iwl_mac_tx,
 -      .start = iwl_mac_start,
 -      .stop = iwl_mac_stop,
 +#ifdef CONFIG_IWL5000
 +struct ieee80211_ops iwlagn_hw_ops = {
 +      .tx = iwlagn_mac_tx,
 +      .start = iwlagn_mac_start,
 +      .stop = iwlagn_mac_stop,
        .add_interface = iwl_mac_add_interface,
        .remove_interface = iwl_mac_remove_interface,
 -      .config = iwl_mac_config,
 +      .change_interface = iwl_mac_change_interface,
 +      .config = iwlagn_mac_config,
        .configure_filter = iwlagn_configure_filter,
 -      .set_key = iwl_mac_set_key,
 -      .update_tkip_key = iwl_mac_update_tkip_key,
 +      .set_key = iwlagn_mac_set_key,
 +      .update_tkip_key = iwlagn_mac_update_tkip_key,
        .conf_tx = iwl_mac_conf_tx,
 -      .reset_tsf = iwl_mac_reset_tsf,
 -      .bss_info_changed = iwl_bss_info_changed,
 -      .ampdu_action = iwl_mac_ampdu_action,
 +      .bss_info_changed = iwlagn_bss_info_changed,
 +      .ampdu_action = iwlagn_mac_ampdu_action,
        .hw_scan = iwl_mac_hw_scan,
 -      .sta_notify = iwl_mac_sta_notify,
 +      .sta_notify = iwlagn_mac_sta_notify,
        .sta_add = iwlagn_mac_sta_add,
        .sta_remove = iwl_mac_sta_remove,
 -      .channel_switch = iwl_mac_channel_switch,
 -      .flush = iwl_mac_flush,
 +      .channel_switch = iwlagn_mac_channel_switch,
 +      .flush = iwlagn_mac_flush,
        .tx_last_beacon = iwl_mac_tx_last_beacon,
  };
 +#endif
  
  static void iwl_hw_detect(struct iwl_priv *priv)
  {
@@@ -3961,15 -4298,10 +3961,15 @@@ static int iwl_pci_probe(struct pci_de
        if (cfg->mod_params->disable_hw_scan) {
                dev_printk(KERN_DEBUG, &(pdev->dev),
                        "sw scan support is deprecated\n");
 -              iwl_hw_ops.hw_scan = NULL;
 +#ifdef CONFIG_IWL5000
 +              iwlagn_hw_ops.hw_scan = NULL;
 +#endif
 +#ifdef CONFIG_IWL4965
 +              iwl4965_hw_ops.hw_scan = NULL;
 +#endif
        }
  
 -      hw = iwl_alloc_all(cfg, &iwl_hw_ops);
 +      hw = iwl_alloc_all(cfg);
        if (!hw) {
                err = -ENOMEM;
                goto out;
                BIT(NL80211_IFTYPE_ADHOC);
        priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
                BIT(NL80211_IFTYPE_STATION);
 +      priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
        priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
        priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
        priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
                (iwlagn_ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
                true : false;
  
 -      /* enable/disable bt channel announcement */
 +      /* enable/disable bt channel inhibition */
        priv->bt_ch_announce = iwlagn_bt_ch_announce;
 +      IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
 +                     (priv->bt_ch_announce) ? "On" : "Off");
  
        if (iwl_alloc_traffic_mem(priv))
                IWL_ERR(priv, "Not enough memory to generate traffic log\n");
        if (err)
                goto out_free_eeprom;
  
 +      err = iwl_eeprom_check_sku(priv);
 +      if (err)
 +              goto out_free_eeprom;
 +
        /* extract MAC Address */
        iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
        IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
  
        pci_enable_msi(priv->pci_dev);
  
 -      iwl_alloc_isr_ict(priv);
 -      err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr,
 +      if (priv->cfg->ops->lib->isr_ops.alloc)
 +              priv->cfg->ops->lib->isr_ops.alloc(priv);
 +
 +      err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr,
                          IRQF_SHARED, DRV_NAME, priv);
        if (err) {
                IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
         * 8. Enable interrupts and read RFKILL state
         *********************************************/
  
 -      /* enable interrupts if needed: hw bug w/a */
 +      /* enable rfkill interrupt: hw bug w/a */
        pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
        if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
                pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
                pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
        }
  
 -      iwl_enable_interrupts(priv);
 +      iwl_enable_rfkill_int(priv);
  
        /* If platform's RF_KILL switch is NOT set to KILL */
        if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
        destroy_workqueue(priv->workqueue);
        priv->workqueue = NULL;
        free_irq(priv->pci_dev->irq, priv);
 -      iwl_free_isr_ict(priv);
 +      if (priv->cfg->ops->lib->isr_ops.free)
 +              priv->cfg->ops->lib->isr_ops.free(priv);
   out_disable_msi:
        pci_disable_msi(priv->pci_dev);
        iwl_uninit_drv(priv);
@@@ -4321,8 -4643,7 +4321,8 @@@ static void __devexit iwl_pci_remove(st
  
        iwl_uninit_drv(priv);
  
 -      iwl_free_isr_ict(priv);
 +      if (priv->cfg->ops->lib->isr_ops.free)
 +              priv->cfg->ops->lib->isr_ops.free(priv);
  
        dev_kfree_skb(priv->beacon_skb);
  
@@@ -4413,32 -4734,51 +4413,32 @@@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_c
        {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
        {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
  
 -/* 6x00 Series Gen2a */
 -      {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)},
 -      {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)},
 -      {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)},
 -      {IWL_PCI_DEVICE(0x0082, 0x1206, iwl6000g2a_2abg_cfg)},
 -      {IWL_PCI_DEVICE(0x0085, 0x1216, iwl6000g2a_2abg_cfg)},
 -      {IWL_PCI_DEVICE(0x0082, 0x1226, iwl6000g2a_2abg_cfg)},
 -      {IWL_PCI_DEVICE(0x0082, 0x1207, iwl6000g2a_2bg_cfg)},
 -      {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6000g2a_2agn_cfg)},
 -      {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6000g2a_2abg_cfg)},
 -      {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6000g2a_2bg_cfg)},
 -      {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6000g2a_2agn_cfg)},
 -      {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6000g2a_2abg_cfg)},
 -      {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6000g2a_2agn_cfg)},
 -      {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6000g2a_2abg_cfg)},
 -
 -/* 6x00 Series Gen2b */
 -      {IWL_PCI_DEVICE(0x008F, 0x5105, iwl6000g2b_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x0090, 0x5115, iwl6000g2b_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x008F, 0x5125, iwl6000g2b_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x008F, 0x5107, iwl6000g2b_bg_cfg)},
 -      {IWL_PCI_DEVICE(0x008F, 0x5201, iwl6000g2b_2agn_cfg)},
 -      {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
 -      {IWL_PCI_DEVICE(0x008F, 0x5221, iwl6000g2b_2agn_cfg)},
 -      {IWL_PCI_DEVICE(0x008F, 0x5206, iwl6000g2b_2abg_cfg)},
 -      {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
 -      {IWL_PCI_DEVICE(0x008F, 0x5226, iwl6000g2b_2abg_cfg)},
 -      {IWL_PCI_DEVICE(0x008F, 0x5207, iwl6000g2b_2bg_cfg)},
 -      {IWL_PCI_DEVICE(0x008A, 0x5301, iwl6000g2b_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6000g2b_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6000g2b_bg_cfg)},
 -      {IWL_PCI_DEVICE(0x008A, 0x5321, iwl6000g2b_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6000g2b_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x008B, 0x5311, iwl6000g2b_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6000g2b_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
 -      {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6000g2b_2bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
 -      {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6000g2b_2agn_cfg)},
 -      {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6000g2b_2bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6000g2b_2abg_cfg)},
 -      {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6000g2b_2bg_cfg)},
 -      {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6000g2b_2agn_cfg)},
 -      {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6000g2b_2bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6000g2b_2abg_cfg)},
 +/* 6x05 Series */
 +      {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
 +      {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
 +      {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
 +      {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
 +      {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
 +      {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
 +      {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
 +
 +/* 6x30 Series */
 +      {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
 +      {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
 +      {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
 +      {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
 +      {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
 +      {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
 +      {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
 +      {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
 +      {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
 +      {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
  
  /* 6x50 WiFi/WiMax Series */
        {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
  
 -/* 6x50 WiFi/WiMax Series Gen2 */
 -      {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6050g2_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6050g2_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6050g2_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6050g2_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6050g2_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6050g2_bgn_cfg)},
 +/* 6150 WiFi/WiMax Series */
 +      {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6150_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6150_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6150_bgn_cfg)},
  
  /* 1000 Series WiFi */
        {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
  
  /* 100 Series WiFi */
        {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
        {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
        {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
 -      {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
 -      {IWL_PCI_DEVICE(0x08AE, 0x1017, iwl100_bg_cfg)},
 +      {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
  
  /* 130 Series WiFi */
        {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
@@@ -4497,7 -4836,10 +4497,7 @@@ static struct pci_driver iwl_driver = 
        .id_table = iwl_hw_card_ids,
        .probe = iwl_pci_probe,
        .remove = __devexit_p(iwl_pci_remove),
 -#ifdef CONFIG_PM
 -      .suspend = iwl_pci_suspend,
 -      .resume = iwl_pci_resume,
 -#endif
 +      .driver.pm = IWL_PM_OPS,
  };
  
  static int __init iwl_init(void)
@@@ -4583,6 -4925,6 +4583,6 @@@ module_param_named(antenna_coupling, iw
  MODULE_PARM_DESC(antenna_coupling,
                 "specify antenna coupling in dB (defualt: 0 dB)");
  
 -module_param_named(bt_ch_announce, iwlagn_bt_ch_announce, bool, S_IRUGO);
 -MODULE_PARM_DESC(bt_ch_announce,
 -               "Enable BT channel announcement mode (default: enable)");
 +module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO);
 +MODULE_PARM_DESC(bt_ch_inhibition,
 +               "Disable BT channel inhibition (default: enable)");
index a08b4e5,0000000..bb1a742
mode 100644,000000..100644
--- /dev/null
@@@ -1,662 -1,0 +1,662 @@@
 +/******************************************************************************
 + *
 + * GPL LICENSE SUMMARY
 + *
 + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of version 2 of the GNU General Public License as
 + * published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 + * USA
 + *
 + * The full GNU General Public License is included in this distribution
 + * in the file called LICENSE.GPL.
 + *
 + * Contact Information:
 + *  Intel Linux Wireless <ilw@linux.intel.com>
 + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 + *****************************************************************************/
 +
 +#include <linux/kernel.h>
 +#include <net/mac80211.h>
 +
 +#include "iwl-dev.h"
 +#include "iwl-core.h"
 +#include "iwl-helpers.h"
 +#include "iwl-legacy.h"
 +
 +static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 +{
 +      if (test_bit(STATUS_EXIT_PENDING, &priv->status))
 +              return;
 +
 +      if (!ctx->is_active)
 +              return;
 +
 +      ctx->qos_data.def_qos_parm.qos_flags = 0;
 +
 +      if (ctx->qos_data.qos_active)
 +              ctx->qos_data.def_qos_parm.qos_flags |=
 +                      QOS_PARAM_FLG_UPDATE_EDCA_MSK;
 +
 +      if (ctx->ht.enabled)
 +              ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
 +
 +      IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
 +                    ctx->qos_data.qos_active,
 +                    ctx->qos_data.def_qos_parm.qos_flags);
 +
 +      iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
 +                             sizeof(struct iwl_qosparam_cmd),
 +                             &ctx->qos_data.def_qos_parm, NULL);
 +}
 +
 +/**
 + * iwl_legacy_mac_config - mac80211 config callback
 + */
 +int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
 +{
 +      struct iwl_priv *priv = hw->priv;
 +      const struct iwl_channel_info *ch_info;
 +      struct ieee80211_conf *conf = &hw->conf;
 +      struct ieee80211_channel *channel = conf->channel;
 +      struct iwl_ht_config *ht_conf = &priv->current_ht_config;
 +      struct iwl_rxon_context *ctx;
 +      unsigned long flags = 0;
 +      int ret = 0;
 +      u16 ch;
 +      int scan_active = 0;
 +      bool ht_changed[NUM_IWL_RXON_CTX] = {};
 +
 +      if (WARN_ON(!priv->cfg->ops->legacy))
 +              return -EOPNOTSUPP;
 +
 +      mutex_lock(&priv->mutex);
 +
 +      IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
 +                                      channel->hw_value, changed);
 +
 +      if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
 +                      test_bit(STATUS_SCANNING, &priv->status))) {
 +              scan_active = 1;
 +              IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
 +      }
 +
 +      if (changed & (IEEE80211_CONF_CHANGE_SMPS |
 +                     IEEE80211_CONF_CHANGE_CHANNEL)) {
 +              /* mac80211 uses static for non-HT which is what we want */
 +              priv->current_ht_config.smps = conf->smps_mode;
 +
 +              /*
 +               * Recalculate chain counts.
 +               *
 +               * If monitor mode is enabled then mac80211 will
 +               * set up the SM PS mode to OFF if an HT channel is
 +               * configured.
 +               */
 +              if (priv->cfg->ops->hcmd->set_rxon_chain)
 +                      for_each_context(priv, ctx)
 +                              priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
 +      }
 +
 +      /* during scanning mac80211 will delay channel setting until
 +       * scan finish with changed = 0
 +       */
 +      if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
 +              if (scan_active)
 +                      goto set_ch_out;
 +
 +              ch = channel->hw_value;
 +              ch_info = iwl_get_channel_info(priv, channel->band, ch);
 +              if (!is_channel_valid(ch_info)) {
 +                      IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
 +                      ret = -EINVAL;
 +                      goto set_ch_out;
 +              }
 +
 +              spin_lock_irqsave(&priv->lock, flags);
 +
 +              for_each_context(priv, ctx) {
 +                      /* Configure HT40 channels */
 +                      if (ctx->ht.enabled != conf_is_ht(conf)) {
 +                              ctx->ht.enabled = conf_is_ht(conf);
 +                              ht_changed[ctx->ctxid] = true;
 +                      }
 +                      if (ctx->ht.enabled) {
 +                              if (conf_is_ht40_minus(conf)) {
 +                                      ctx->ht.extension_chan_offset =
 +                                              IEEE80211_HT_PARAM_CHA_SEC_BELOW;
 +                                      ctx->ht.is_40mhz = true;
 +                              } else if (conf_is_ht40_plus(conf)) {
 +                                      ctx->ht.extension_chan_offset =
 +                                              IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
 +                                      ctx->ht.is_40mhz = true;
 +                              } else {
 +                                      ctx->ht.extension_chan_offset =
 +                                              IEEE80211_HT_PARAM_CHA_SEC_NONE;
 +                                      ctx->ht.is_40mhz = false;
 +                              }
 +                      } else
 +                              ctx->ht.is_40mhz = false;
 +
 +                      /*
 +                       * Default to no protection. Protection mode will
 +                       * later be set from BSS config in iwl_ht_conf
 +                       */
 +                      ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
 +
 +                      /* if we are switching from ht to 2.4 clear flags
 +                       * from any ht related info since 2.4 does not
 +                       * support ht */
 +                      if ((le16_to_cpu(ctx->staging.channel) != ch))
 +                              ctx->staging.flags = 0;
 +
 +                      iwl_set_rxon_channel(priv, channel, ctx);
 +                      iwl_set_rxon_ht(priv, ht_conf);
 +
 +                      iwl_set_flags_for_band(priv, ctx, channel->band,
 +                                             ctx->vif);
 +              }
 +
 +              spin_unlock_irqrestore(&priv->lock, flags);
 +
 +              if (priv->cfg->ops->legacy->update_bcast_stations)
 +                      ret = priv->cfg->ops->legacy->update_bcast_stations(priv);
 +
 + set_ch_out:
 +              /* The list of supported rates and rate mask can be different
 +               * for each band; since the band may have changed, reset
 +               * the rate mask to what mac80211 lists */
 +              iwl_set_rate(priv);
 +      }
 +
 +      if (changed & (IEEE80211_CONF_CHANGE_PS |
 +                      IEEE80211_CONF_CHANGE_IDLE)) {
 +              ret = iwl_power_update_mode(priv, false);
 +              if (ret)
 +                      IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
 +      }
 +
 +      if (changed & IEEE80211_CONF_CHANGE_POWER) {
 +              IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
 +                      priv->tx_power_user_lmt, conf->power_level);
 +
 +              iwl_set_tx_power(priv, conf->power_level, false);
 +      }
 +
 +      if (!iwl_is_ready(priv)) {
 +              IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
 +              goto out;
 +      }
 +
 +      if (scan_active)
 +              goto out;
 +
 +      for_each_context(priv, ctx) {
 +              if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
 +                      iwlcore_commit_rxon(priv, ctx);
 +              else
 +                      IWL_DEBUG_INFO(priv,
 +                              "Not re-sending same RXON configuration.\n");
 +              if (ht_changed[ctx->ctxid])
 +                      iwl_update_qos(priv, ctx);
 +      }
 +
 +out:
 +      IWL_DEBUG_MAC80211(priv, "leave\n");
 +      mutex_unlock(&priv->mutex);
 +      return ret;
 +}
 +EXPORT_SYMBOL(iwl_legacy_mac_config);
 +
 +void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
 +{
 +      struct iwl_priv *priv = hw->priv;
 +      unsigned long flags;
 +      /* IBSS can only be the IWL_RXON_CTX_BSS context */
 +      struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
 +
 +      if (WARN_ON(!priv->cfg->ops->legacy))
 +              return;
 +
 +      mutex_lock(&priv->mutex);
 +      IWL_DEBUG_MAC80211(priv, "enter\n");
 +
 +      spin_lock_irqsave(&priv->lock, flags);
 +      memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
 +      spin_unlock_irqrestore(&priv->lock, flags);
 +
 +      spin_lock_irqsave(&priv->lock, flags);
 +
 +      /* new association get rid of ibss beacon skb */
 +      if (priv->beacon_skb)
 +              dev_kfree_skb(priv->beacon_skb);
 +
 +      priv->beacon_skb = NULL;
 +
 +      priv->timestamp = 0;
 +
 +      spin_unlock_irqrestore(&priv->lock, flags);
 +
 +      iwl_scan_cancel_timeout(priv, 100);
 +      if (!iwl_is_ready_rf(priv)) {
 +              IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
 +              mutex_unlock(&priv->mutex);
 +              return;
 +      }
 +
 +      /* we are restarting association process
 +       * clear RXON_FILTER_ASSOC_MSK bit
 +       */
 +      ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 +      iwlcore_commit_rxon(priv, ctx);
 +
 +      iwl_set_rate(priv);
 +
 +      mutex_unlock(&priv->mutex);
 +
 +      IWL_DEBUG_MAC80211(priv, "leave\n");
 +}
 +EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
 +
 +static void iwl_ht_conf(struct iwl_priv *priv,
 +                      struct ieee80211_vif *vif)
 +{
 +      struct iwl_ht_config *ht_conf = &priv->current_ht_config;
 +      struct ieee80211_sta *sta;
 +      struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
 +      struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 +
 +      IWL_DEBUG_ASSOC(priv, "enter:\n");
 +
 +      if (!ctx->ht.enabled)
 +              return;
 +
 +      ctx->ht.protection =
 +              bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
 +      ctx->ht.non_gf_sta_present =
 +              !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
 +
 +      ht_conf->single_chain_sufficient = false;
 +
 +      switch (vif->type) {
 +      case NL80211_IFTYPE_STATION:
 +              rcu_read_lock();
 +              sta = ieee80211_find_sta(vif, bss_conf->bssid);
 +              if (sta) {
 +                      struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
 +                      int maxstreams;
 +
 +                      maxstreams = (ht_cap->mcs.tx_params &
 +                                    IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
 +                                      >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
 +                      maxstreams += 1;
 +
 +                      if ((ht_cap->mcs.rx_mask[1] == 0) &&
 +                          (ht_cap->mcs.rx_mask[2] == 0))
 +                              ht_conf->single_chain_sufficient = true;
 +                      if (maxstreams <= 1)
 +                              ht_conf->single_chain_sufficient = true;
 +              } else {
 +                      /*
 +                       * If at all, this can only happen through a race
 +                       * when the AP disconnects us while we're still
 +                       * setting up the connection, in that case mac80211
 +                       * will soon tell us about that.
 +                       */
 +                      ht_conf->single_chain_sufficient = true;
 +              }
 +              rcu_read_unlock();
 +              break;
 +      case NL80211_IFTYPE_ADHOC:
 +              ht_conf->single_chain_sufficient = true;
 +              break;
 +      default:
 +              break;
 +      }
 +
 +      IWL_DEBUG_ASSOC(priv, "leave\n");
 +}
 +
 +static inline void iwl_set_no_assoc(struct iwl_priv *priv,
 +                                  struct ieee80211_vif *vif)
 +{
 +      struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 +
 +      iwl_led_disassociate(priv);
 +      /*
 +       * inform the ucode that there is no longer an
 +       * association and that no more packets should be
 +       * sent
 +       */
 +      ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 +      ctx->staging.assoc_id = 0;
 +      iwlcore_commit_rxon(priv, ctx);
 +}
 +
 +static void iwlcore_beacon_update(struct ieee80211_hw *hw,
 +                                struct ieee80211_vif *vif)
 +{
 +      struct iwl_priv *priv = hw->priv;
 +      unsigned long flags;
 +      __le64 timestamp;
 +      struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
 +
 +      if (!skb)
 +              return;
 +
 +      IWL_DEBUG_MAC80211(priv, "enter\n");
 +
 +      lockdep_assert_held(&priv->mutex);
 +
 +      if (!priv->beacon_ctx) {
 +              IWL_ERR(priv, "update beacon but no beacon context!\n");
 +              dev_kfree_skb(skb);
 +              return;
 +      }
 +
 +      spin_lock_irqsave(&priv->lock, flags);
 +
 +      if (priv->beacon_skb)
 +              dev_kfree_skb(priv->beacon_skb);
 +
 +      priv->beacon_skb = skb;
 +
 +      timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
 +      priv->timestamp = le64_to_cpu(timestamp);
 +
 +      IWL_DEBUG_MAC80211(priv, "leave\n");
 +      spin_unlock_irqrestore(&priv->lock, flags);
 +
 +      if (!iwl_is_ready_rf(priv)) {
 +              IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
 +              return;
 +      }
 +
 +      priv->cfg->ops->legacy->post_associate(priv);
 +}
 +
 +void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
 +                                   struct ieee80211_vif *vif,
 +                                   struct ieee80211_bss_conf *bss_conf,
 +                                   u32 changes)
 +{
 +      struct iwl_priv *priv = hw->priv;
 +      struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
 +      int ret;
 +
 +      if (WARN_ON(!priv->cfg->ops->legacy))
 +              return;
 +
 +      IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
 +
 +      if (!iwl_is_alive(priv))
 +              return;
 +
 +      mutex_lock(&priv->mutex);
 +
 +      if (changes & BSS_CHANGED_QOS) {
 +              unsigned long flags;
 +
 +              spin_lock_irqsave(&priv->lock, flags);
 +              ctx->qos_data.qos_active = bss_conf->qos;
 +              iwl_update_qos(priv, ctx);
 +              spin_unlock_irqrestore(&priv->lock, flags);
 +      }
 +
 +      if (changes & BSS_CHANGED_BEACON_ENABLED) {
 +              /*
 +               * the add_interface code must make sure we only ever
 +               * have a single interface that could be beaconing at
 +               * any time.
 +               */
 +              if (vif->bss_conf.enable_beacon)
 +                      priv->beacon_ctx = ctx;
 +              else
 +                      priv->beacon_ctx = NULL;
 +      }
 +
 +      if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
 +              dev_kfree_skb(priv->beacon_skb);
 +              priv->beacon_skb = ieee80211_beacon_get(hw, vif);
 +      }
 +
 +      if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
 +              iwl_send_rxon_timing(priv, ctx);
 +
 +      if (changes & BSS_CHANGED_BSSID) {
 +              IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
 +
 +              /*
 +               * If there is currently a HW scan going on in the
 +               * background then we need to cancel it else the RXON
 +               * below/in post_associate will fail.
 +               */
 +              if (iwl_scan_cancel_timeout(priv, 100)) {
 +                      IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
 +                      IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
 +                      mutex_unlock(&priv->mutex);
 +                      return;
 +              }
 +
 +              /* mac80211 only sets assoc when in STATION mode */
 +              if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
 +                      memcpy(ctx->staging.bssid_addr,
 +                             bss_conf->bssid, ETH_ALEN);
 +
 +                      /* currently needed in a few places */
 +                      memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
 +              } else {
 +                      ctx->staging.filter_flags &=
 +                              ~RXON_FILTER_ASSOC_MSK;
 +              }
 +
 +      }
 +
 +      /*
 +       * This needs to be after setting the BSSID in case
 +       * mac80211 decides to do both changes at once because
 +       * it will invoke post_associate.
 +       */
 +      if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
 +              iwlcore_beacon_update(hw, vif);
 +
 +      if (changes & BSS_CHANGED_ERP_PREAMBLE) {
 +              IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
 +                                 bss_conf->use_short_preamble);
 +              if (bss_conf->use_short_preamble)
 +                      ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
 +              else
 +                      ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
 +      }
 +
 +      if (changes & BSS_CHANGED_ERP_CTS_PROT) {
 +              IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
 +              if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
 +                      ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
 +              else
 +                      ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
 +              if (bss_conf->use_cts_prot)
 +                      ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
 +              else
 +                      ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
 +      }
 +
 +      if (changes & BSS_CHANGED_BASIC_RATES) {
 +              /* XXX use this information
 +               *
 +               * To do that, remove code from iwl_set_rate() and put something
 +               * like this here:
 +               *
 +              if (A-band)
 +                      ctx->staging.ofdm_basic_rates =
 +                              bss_conf->basic_rates;
 +              else
 +                      ctx->staging.ofdm_basic_rates =
 +                              bss_conf->basic_rates >> 4;
 +                      ctx->staging.cck_basic_rates =
 +                              bss_conf->basic_rates & 0xF;
 +               */
 +      }
 +
 +      if (changes & BSS_CHANGED_HT) {
 +              iwl_ht_conf(priv, vif);
 +
 +              if (priv->cfg->ops->hcmd->set_rxon_chain)
 +                      priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
 +      }
 +
 +      if (changes & BSS_CHANGED_ASSOC) {
 +              IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
 +              if (bss_conf->assoc) {
 +                      priv->timestamp = bss_conf->timestamp;
 +
 +                      iwl_led_associate(priv);
 +
 +                      if (!iwl_is_rfkill(priv))
 +                              priv->cfg->ops->legacy->post_associate(priv);
 +              } else
 +                      iwl_set_no_assoc(priv, vif);
 +      }
 +
 +      if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
 +              IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
 +                                 changes);
 +              ret = iwl_send_rxon_assoc(priv, ctx);
 +              if (!ret) {
 +                      /* Sync active_rxon with latest change. */
 +                      memcpy((void *)&ctx->active,
 +                              &ctx->staging,
 +                              sizeof(struct iwl_rxon_cmd));
 +              }
 +      }
 +
 +      if (changes & BSS_CHANGED_BEACON_ENABLED) {
 +              if (vif->bss_conf.enable_beacon) {
 +                      memcpy(ctx->staging.bssid_addr,
 +                             bss_conf->bssid, ETH_ALEN);
 +                      memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
 +                      iwl_led_associate(priv);
 +                      priv->cfg->ops->legacy->config_ap(priv);
 +              } else
 +                      iwl_set_no_assoc(priv, vif);
 +      }
 +
 +      if (changes & BSS_CHANGED_IBSS) {
 +              ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
 +                                                      bss_conf->ibss_joined);
 +              if (ret)
 +                      IWL_ERR(priv, "failed to %s IBSS station %pM\n",
 +                              bss_conf->ibss_joined ? "add" : "remove",
 +                              bss_conf->bssid);
 +      }
 +
 +      mutex_unlock(&priv->mutex);
 +
 +      IWL_DEBUG_MAC80211(priv, "leave\n");
 +}
 +EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
 +
 +irqreturn_t iwl_isr_legacy(int irq, void *data)
 +{
 +      struct iwl_priv *priv = data;
 +      u32 inta, inta_mask;
 +      u32 inta_fh;
 +      unsigned long flags;
 +      if (!priv)
 +              return IRQ_NONE;
 +
 +      spin_lock_irqsave(&priv->lock, flags);
 +
 +      /* Disable (but don't clear!) interrupts here to avoid
 +       *    back-to-back ISRs and sporadic interrupts from our NIC.
 +       * If we have something to service, the tasklet will re-enable ints.
 +       * If we *don't* have something, we'll re-enable before leaving here. */
 +      inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
 +      iwl_write32(priv, CSR_INT_MASK, 0x00000000);
 +
 +      /* Discover which interrupts are active/pending */
 +      inta = iwl_read32(priv, CSR_INT);
 +      inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
 +
 +      /* Ignore interrupt if there's nothing in NIC to service.
 +       * This may be due to IRQ shared with another device,
 +       * or due to sporadic interrupts thrown from our NIC. */
 +      if (!inta && !inta_fh) {
 +              IWL_DEBUG_ISR(priv,
 +                      "Ignore interrupt, inta == 0, inta_fh == 0\n");
 +              goto none;
 +      }
 +
 +      if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
 +              /* Hardware disappeared. It might have already raised
 +               * an interrupt */
 +              IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
 +              goto unplugged;
 +      }
 +
 +      IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
 +                    inta, inta_mask, inta_fh);
 +
 +      inta &= ~CSR_INT_BIT_SCD;
 +
 +      /* iwl_irq_tasklet() will service interrupts and re-enable them */
 +      if (likely(inta || inta_fh))
 +              tasklet_schedule(&priv->irq_tasklet);
 +
 +unplugged:
 +      spin_unlock_irqrestore(&priv->lock, flags);
 +      return IRQ_HANDLED;
 +
 +none:
 +      /* re-enable interrupts here since we don't have anything to service. */
-       /* only Re-enable if diabled by irq */
++      /* only Re-enable if disabled by irq */
 +      if (test_bit(STATUS_INT_ENABLED, &priv->status))
 +              iwl_enable_interrupts(priv);
 +      spin_unlock_irqrestore(&priv->lock, flags);
 +      return IRQ_NONE;
 +}
 +EXPORT_SYMBOL(iwl_isr_legacy);
 +
 +/*
 + *  iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
 + *  function.
 + */
 +void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
 +                             struct ieee80211_tx_info *info,
 +                             __le16 fc, __le32 *tx_flags)
 +{
 +      if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
 +              *tx_flags |= TX_CMD_FLG_RTS_MSK;
 +              *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
 +              *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
 +
 +              if (!ieee80211_is_mgmt(fc))
 +                      return;
 +
 +              switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
 +              case cpu_to_le16(IEEE80211_STYPE_AUTH):
 +              case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
 +              case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
 +              case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
 +                      *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
 +                      *tx_flags |= TX_CMD_FLG_CTS_MSK;
 +                      break;
 +              }
 +      } else if (info->control.rates[0].flags &
 +                 IEEE80211_TX_RC_USE_CTS_PROTECT) {
 +              *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
 +              *tx_flags |= TX_CMD_FLG_CTS_MSK;
 +              *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
 +      }
 +}
 +EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
@@@ -107,7 -107,7 +107,7 @@@ static int iwl_process_add_sta_resp(str
        /*
         * XXX: The MAC address in the command buffer is often changed from
         * the original sent to the device. That is, the MAC address
-        * written to the command buffer often is not the same MAC adress
+        * written to the command buffer often is not the same MAC address
         * read from the command buffer when the command returns. This
         * issue has not yet been resolved and this debugging is left to
         * observe the problem.
@@@ -400,8 -400,7 +400,8 @@@ static void iwl_sta_ucode_deactivate(st
  }
  
  static int iwl_send_remove_station(struct iwl_priv *priv,
 -                                 const u8 *addr, int sta_id)
 +                                 const u8 *addr, int sta_id,
 +                                 bool temporary)
  {
        struct iwl_rx_packet *pkt;
        int ret;
        if (!ret) {
                switch (pkt->u.rem_sta.status) {
                case REM_STA_SUCCESS_MSK:
 -                      spin_lock_irqsave(&priv->sta_lock, flags_spin);
 -                      iwl_sta_ucode_deactivate(priv, sta_id);
 -                      spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
 +                      if (!temporary) {
 +                              spin_lock_irqsave(&priv->sta_lock, flags_spin);
 +                              iwl_sta_ucode_deactivate(priv, sta_id);
 +                              spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
 +                      }
                        IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
                        break;
                default:
@@@ -508,7 -505,7 +508,7 @@@ int iwl_remove_station(struct iwl_priv 
  
        spin_unlock_irqrestore(&priv->sta_lock, flags);
  
 -      return iwl_send_remove_station(priv, addr, sta_id);
 +      return iwl_send_remove_station(priv, addr, sta_id, false);
  out_err:
        spin_unlock_irqrestore(&priv->sta_lock, flags);
        return -EINVAL;
@@@ -627,49 -624,6 +627,49 @@@ void iwl_restore_stations(struct iwl_pr
  }
  EXPORT_SYMBOL(iwl_restore_stations);
  
 +void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
 +{
 +      unsigned long flags;
 +      int sta_id = ctx->ap_sta_id;
 +      int ret;
 +      struct iwl_addsta_cmd sta_cmd;
 +      struct iwl_link_quality_cmd lq;
 +      bool active;
 +
 +      spin_lock_irqsave(&priv->sta_lock, flags);
 +      if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
 +              spin_unlock_irqrestore(&priv->sta_lock, flags);
 +              return;
 +      }
 +
 +      memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
 +      sta_cmd.mode = 0;
 +      memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
 +
 +      active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
 +      priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
 +      spin_unlock_irqrestore(&priv->sta_lock, flags);
 +
 +      if (active) {
 +              ret = iwl_send_remove_station(
 +                      priv, priv->stations[sta_id].sta.sta.addr,
 +                      sta_id, true);
 +              if (ret)
 +                      IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
 +                              priv->stations[sta_id].sta.sta.addr, ret);
 +      }
 +      spin_lock_irqsave(&priv->sta_lock, flags);
 +      priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
 +      spin_unlock_irqrestore(&priv->sta_lock, flags);
 +
 +      ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
 +      if (ret)
 +              IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
 +                      priv->stations[sta_id].sta.sta.addr, ret);
 +      iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
 +}
 +EXPORT_SYMBOL(iwl_reprogram_ap_sta);
 +
  int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
  {
        int i;
@@@ -782,14 -736,6 +782,14 @@@ int iwl_send_lq_cmd(struct iwl_priv *pr
        if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
                return -EINVAL;
  
 +
 +      spin_lock_irqsave(&priv->sta_lock, flags_spin);
 +      if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
 +              spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
 +              return -EINVAL;
 +      }
 +      spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
 +
        iwl_dump_lq_cmd(priv, lq);
        BUG_ON(init && (cmd.flags & CMD_ASYNC));
  
@@@ -104,7 -104,7 +104,7 @@@ int rt2x00mac_tx(struct ieee80211_hw *h
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        enum data_queue_qid qid = skb_get_queue_mapping(skb);
 -      struct data_queue *queue;
 +      struct data_queue *queue = NULL;
  
        /*
         * Mac80211 might be calling this function while we are trying
                goto exit_fail;
  
        if (rt2x00queue_threshold(queue))
 -              ieee80211_stop_queue(rt2x00dev->hw, qid);
 +              rt2x00queue_pause_queue(queue);
  
        return NETDEV_TX_OK;
  
@@@ -268,12 -268,13 +268,12 @@@ int rt2x00mac_add_interface(struct ieee
        else
                rt2x00dev->intf_sta_count++;
  
 -      spin_lock_init(&intf->lock);
        spin_lock_init(&intf->seqlock);
        mutex_init(&intf->beacon_skb_mutex);
        intf->beacon = entry;
  
        /*
-        * The MAC adddress must be configured after the device
+        * The MAC address must be configured after the device
         * has been initialized. Otherwise the device can reset
         * the MAC registers.
         * The BSSID address must only be configured in AP mode,
         * STA interfaces at this time, since this can cause
         * invalid behavior in the device.
         */
 -      memcpy(&intf->mac, vif->addr, ETH_ALEN);
 -      if (vif->type == NL80211_IFTYPE_AP) {
 -              memcpy(&intf->bssid, vif->addr, ETH_ALEN);
 -              rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
 -                                    intf->mac, intf->bssid);
 -      } else {
 -              rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
 -                                    intf->mac, NULL);
 -      }
 +      rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
 +                            vif->addr, NULL);
  
        /*
         * Some filters depend on the current working mode. We can force
@@@ -350,7 -358,7 +350,7 @@@ int rt2x00mac_config(struct ieee80211_h
         * if for any reason the link tuner must be reset, this will be
         * handled by rt2x00lib_config().
         */
 -      rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK);
 +      rt2x00queue_stop_queue(rt2x00dev->rx);
  
        /*
         * When we've just turned on the radio, we want to reprogram
        rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant);
  
        /* Turn RX back on */
 -      rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
 +      rt2x00queue_start_queue(rt2x00dev->rx);
  
        return 0;
  }
@@@ -443,7 -451,9 +443,7 @@@ static void rt2x00mac_set_tim_iter(voi
            vif->type != NL80211_IFTYPE_WDS)
                return;
  
 -      spin_lock(&intf->lock);
 -      intf->delayed_flags |= DELAYED_UPDATE_BEACON;
 -      spin_unlock(&intf->lock);
 +      set_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags);
  }
  
  int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
@@@ -468,17 -478,17 +468,17 @@@ EXPORT_SYMBOL_GPL(rt2x00mac_set_tim)
  static void memcpy_tkip(struct rt2x00lib_crypto *crypto, u8 *key, u8 key_len)
  {
        if (key_len > NL80211_TKIP_DATA_OFFSET_ENCR_KEY)
 -              memcpy(&crypto->key,
 +              memcpy(crypto->key,
                       &key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY],
                       sizeof(crypto->key));
  
        if (key_len > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
 -              memcpy(&crypto->tx_mic,
 +              memcpy(crypto->tx_mic,
                       &key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
                       sizeof(crypto->tx_mic));
  
        if (key_len > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY)
 -              memcpy(&crypto->rx_mic,
 +              memcpy(crypto->rx_mic,
                       &key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
                       sizeof(crypto->rx_mic));
  }
@@@ -488,6 -498,7 +488,6 @@@ int rt2x00mac_set_key(struct ieee80211_
                      struct ieee80211_key_conf *key)
  {
        struct rt2x00_dev *rt2x00dev = hw->priv;
 -      struct rt2x00_intf *intf = vif_to_intf(vif);
        int (*set_key) (struct rt2x00_dev *rt2x00dev,
                        struct rt2x00lib_crypto *crypto,
                        struct ieee80211_key_conf *key);
        if (rt2x00dev->intf_sta_count)
                crypto.bssidx = 0;
        else
 -              crypto.bssidx = intf->mac[5] & (rt2x00dev->ops->max_ap_intf - 1);
 +              crypto.bssidx = vif->addr[5] & (rt2x00dev->ops->max_ap_intf - 1);
  
        crypto.cipher = rt2x00crypto_key_to_cipher(key);
        if (crypto.cipher == CIPHER_NONE)
        if (crypto.cipher == CIPHER_TKIP)
                memcpy_tkip(&crypto, &key->key[0], key->keylen);
        else
 -              memcpy(&crypto.key, &key->key[0], key->keylen);
 +              memcpy(crypto.key, &key->key[0], key->keylen);
        /*
         * Each BSS has a maximum of 4 shared keys.
         * Shared key index values:
@@@ -609,8 -620,22 +609,8 @@@ void rt2x00mac_bss_info_changed(struct 
        if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
                return;
  
 -      spin_lock(&intf->lock);
 -
        /*
 -       * conf->bssid can be NULL if coming from the internal
 -       * beacon update routine.
 -       */
 -      if (changes & BSS_CHANGED_BSSID)
 -              memcpy(&intf->bssid, bss_conf->bssid, ETH_ALEN);
 -
 -      spin_unlock(&intf->lock);
 -
 -      /*
 -       * Call rt2x00_config_intf() outside of the spinlock context since
 -       * the call will sleep for USB drivers. By using the ieee80211_if_conf
 -       * values as arguments we make keep access to rt2x00_intf thread safe
 -       * even without the lock.
 +       * Update the BSSID.
         */
        if (changes & BSS_CHANGED_BSSID)
                rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
@@@ -694,13 -719,3 +694,13 @@@ void rt2x00mac_rfkill_poll(struct ieee8
        wiphy_rfkill_set_hw_state(hw->wiphy, !active);
  }
  EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
 +
 +void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop)
 +{
 +      struct rt2x00_dev *rt2x00dev = hw->priv;
 +      struct data_queue *queue;
 +
 +      tx_queue_for_each(rt2x00dev, queue)
 +              rt2x00queue_flush_queue(queue, drop);
 +}
 +EXPORT_SYMBOL_GPL(rt2x00mac_flush);
@@@ -256,7 -256,6 +256,7 @@@ struct wl1251_if_operations 
        void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len);
        void (*read_elp)(struct wl1251 *wl, int addr, u32 *val);
        void (*write_elp)(struct wl1251 *wl, int addr, u32 val);
 +      int  (*power)(struct wl1251 *wl, bool enable);
        void (*reset)(struct wl1251 *wl);
        void (*enable_irq)(struct wl1251 *wl);
        void (*disable_irq)(struct wl1251 *wl);
@@@ -419,7 -418,7 +419,7 @@@ void wl1251_disable_interrupts(struct w
  #define WL1251_FW_NAME "wl1251-fw.bin"
  #define WL1251_NVS_NAME "wl1251-nvs.bin"
  
- #define WL1251_POWER_ON_SLEEP 10 /* in miliseconds */
+ #define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
  
  #define WL1251_PART_DOWN_MEM_START    0x0
  #define WL1251_PART_DOWN_MEM_SIZE     0x16800
index 9cbc3f4,0000000..7bd8e4d
mode 100644,000000..100644
--- /dev/null
@@@ -1,1190 -1,0 +1,1190 @@@
 +/*
 + * This file is part of wl1271
 + *
 + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
 + * Copyright (C) 2008-2010 Nokia Corporation
 + *
 + * Contact: Luciano Coelho <luciano.coelho@nokia.com>
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * version 2 as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 + * 02110-1301 USA
 + *
 + */
 +
 +#ifndef __ACX_H__
 +#define __ACX_H__
 +
 +#include "wl12xx.h"
 +#include "cmd.h"
 +
 +/*************************************************************************
 +
 +    Host Interrupt Register (WiLink -> Host)
 +
 +**************************************************************************/
 +/* HW Initiated interrupt Watchdog timer expiration */
 +#define WL1271_ACX_INTR_WATCHDOG           BIT(0)
 +/* Init sequence is done (masked interrupt, detection through polling only ) */
 +#define WL1271_ACX_INTR_INIT_COMPLETE      BIT(1)
 +/* Event was entered to Event MBOX #A*/
 +#define WL1271_ACX_INTR_EVENT_A            BIT(2)
 +/* Event was entered to Event MBOX #B*/
 +#define WL1271_ACX_INTR_EVENT_B            BIT(3)
 +/* Command processing completion*/
 +#define WL1271_ACX_INTR_CMD_COMPLETE       BIT(4)
 +/* Signaling the host on HW wakeup */
 +#define WL1271_ACX_INTR_HW_AVAILABLE       BIT(5)
 +/* The MISC bit is used for aggregation of RX, TxComplete and TX rate update */
 +#define WL1271_ACX_INTR_DATA               BIT(6)
- /* Trace meassge on MBOX #A */
++/* Trace message on MBOX #A */
 +#define WL1271_ACX_INTR_TRACE_A            BIT(7)
- /* Trace meassge on MBOX #B */
++/* Trace message on MBOX #B */
 +#define WL1271_ACX_INTR_TRACE_B            BIT(8)
 +
 +#define WL1271_ACX_INTR_ALL              0xFFFFFFFF
 +#define WL1271_ACX_ALL_EVENTS_VECTOR       (WL1271_ACX_INTR_WATCHDOG      | \
 +                                          WL1271_ACX_INTR_INIT_COMPLETE | \
 +                                          WL1271_ACX_INTR_EVENT_A       | \
 +                                          WL1271_ACX_INTR_EVENT_B       | \
 +                                          WL1271_ACX_INTR_CMD_COMPLETE  | \
 +                                          WL1271_ACX_INTR_HW_AVAILABLE  | \
 +                                          WL1271_ACX_INTR_DATA)
 +
 +#define WL1271_INTR_MASK                   (WL1271_ACX_INTR_WATCHDOG     | \
 +                                          WL1271_ACX_INTR_EVENT_A      | \
 +                                          WL1271_ACX_INTR_EVENT_B      | \
 +                                          WL1271_ACX_INTR_HW_AVAILABLE | \
 +                                          WL1271_ACX_INTR_DATA)
 +
 +/* Target's information element */
 +struct acx_header {
 +      struct wl1271_cmd_header cmd;
 +
 +      /* acx (or information element) header */
 +      __le16 id;
 +
 +      /* payload length (not including headers */
 +      __le16 len;
 +} __packed;
 +
 +struct acx_error_counter {
 +      struct acx_header header;
 +
 +      /* The number of PLCP errors since the last time this */
 +      /* information element was interrogated. This field is */
 +      /* automatically cleared when it is interrogated.*/
 +      __le32 PLCP_error;
 +
 +      /* The number of FCS errors since the last time this */
 +      /* information element was interrogated. This field is */
 +      /* automatically cleared when it is interrogated.*/
 +      __le32 FCS_error;
 +
 +      /* The number of MPDUs without PLCP header errors received*/
 +      /* since the last time this information element was interrogated. */
 +      /* This field is automatically cleared when it is interrogated.*/
 +      __le32 valid_frame;
 +
 +      /* the number of missed sequence numbers in the squentially */
 +      /* values of frames seq numbers */
 +      __le32 seq_num_miss;
 +} __packed;
 +
 +enum wl1271_psm_mode {
 +      /* Active mode */
 +      WL1271_PSM_CAM = 0,
 +
 +      /* Power save mode */
 +      WL1271_PSM_PS = 1,
 +
 +      /* Extreme low power */
 +      WL1271_PSM_ELP = 2,
 +};
 +
 +struct acx_sleep_auth {
 +      struct acx_header header;
 +
 +      /* The sleep level authorization of the device. */
 +      /* 0 - Always active*/
 +      /* 1 - Power down mode: light / fast sleep*/
 +      /* 2 - ELP mode: Deep / Max sleep*/
 +      u8  sleep_auth;
 +      u8  padding[3];
 +} __packed;
 +
 +enum {
 +      HOSTIF_PCI_MASTER_HOST_INDIRECT,
 +      HOSTIF_PCI_MASTER_HOST_DIRECT,
 +      HOSTIF_SLAVE,
 +      HOSTIF_PKT_RING,
 +      HOSTIF_DONTCARE = 0xFF
 +};
 +
 +#define DEFAULT_UCAST_PRIORITY          0
 +#define DEFAULT_RX_Q_PRIORITY           0
 +#define DEFAULT_NUM_STATIONS            1
 +#define DEFAULT_RXQ_PRIORITY            0 /* low 0 .. 15 high  */
 +#define DEFAULT_RXQ_TYPE                0x07    /* All frames, Data/Ctrl/Mgmt */
 +#define TRACE_BUFFER_MAX_SIZE           256
 +
 +#define  DP_RX_PACKET_RING_CHUNK_SIZE 1600
 +#define  DP_TX_PACKET_RING_CHUNK_SIZE 1600
 +#define  DP_RX_PACKET_RING_CHUNK_NUM 2
 +#define  DP_TX_PACKET_RING_CHUNK_NUM 2
 +#define  DP_TX_COMPLETE_TIME_OUT 20
 +
 +#define TX_MSDU_LIFETIME_MIN       0
 +#define TX_MSDU_LIFETIME_MAX       3000
 +#define TX_MSDU_LIFETIME_DEF       512
 +#define RX_MSDU_LIFETIME_MIN       0
 +#define RX_MSDU_LIFETIME_MAX       0xFFFFFFFF
 +#define RX_MSDU_LIFETIME_DEF       512000
 +
 +struct acx_rx_msdu_lifetime {
 +      struct acx_header header;
 +
 +      /*
 +       * The maximum amount of time, in TU, before the
 +       * firmware discards the MSDU.
 +       */
 +      __le32 lifetime;
 +} __packed;
 +
 +/*
 + * RX Config Options Table
 + * Bit                Definition
 + * ===                ==========
 + * 31:14              Reserved
 + * 13         Copy RX Status - when set, write three receive status words
 + *            to top of rx'd MPDUs.
 + *            When cleared, do not write three status words (added rev 1.5)
 + * 12         Reserved
 + * 11         RX Complete upon FCS error - when set, give rx complete
 + *            interrupt for FCS errors, after the rx filtering, e.g. unicast
 + *            frames not to us with FCS error will not generate an interrupt.
 + * 10         SSID Filter Enable - When set, the WiLink discards all beacon,
 + *            probe request, and probe response frames with an SSID that does
 + *            not match the SSID specified by the host in the START/JOIN
 + *            command.
 + *            When clear, the WiLink receives frames with any SSID.
 + * 9          Broadcast Filter Enable - When set, the WiLink discards all
 + *            broadcast frames. When clear, the WiLink receives all received
 + *            broadcast frames.
 + * 8:6                Reserved
 + * 5          BSSID Filter Enable - When set, the WiLink discards any frames
 + *            with a BSSID that does not match the BSSID specified by the
 + *            host.
 + *            When clear, the WiLink receives frames from any BSSID.
 + * 4          MAC Addr Filter - When set, the WiLink discards any frames
 + *            with a destination address that does not match the MAC address
 + *            of the adaptor.
 + *            When clear, the WiLink receives frames destined to any MAC
 + *            address.
 + * 3          Promiscuous - When set, the WiLink receives all valid frames
 + *            (i.e., all frames that pass the FCS check).
 + *            When clear, only frames that pass the other filters specified
 + *            are received.
 + * 2          FCS - When set, the WiLink includes the FCS with the received
 + *            frame.
 + *            When cleared, the FCS is discarded.
 + * 1          PLCP header - When set, write all data from baseband to frame
 + *            buffer including PHY header.
 + * 0          Reserved - Always equal to 0.
 + *
 + * RX Filter Options Table
 + * Bit                Definition
 + * ===                ==========
 + * 31:12              Reserved - Always equal to 0.
 + * 11         Association - When set, the WiLink receives all association
 + *            related frames (association request/response, reassocation
 + *            request/response, and disassociation). When clear, these frames
 + *            are discarded.
 + * 10         Auth/De auth - When set, the WiLink receives all authentication
 + *            and de-authentication frames. When clear, these frames are
 + *            discarded.
 + * 9          Beacon - When set, the WiLink receives all beacon frames.
 + *            When clear, these frames are discarded.
 + * 8          Contention Free - When set, the WiLink receives all contention
 + *            free frames.
 + *            When clear, these frames are discarded.
 + * 7          Control - When set, the WiLink receives all control frames.
 + *            When clear, these frames are discarded.
 + * 6          Data - When set, the WiLink receives all data frames.
 + *            When clear, these frames are discarded.
 + * 5          FCS Error - When set, the WiLink receives frames that have FCS
 + *            errors.
 + *            When clear, these frames are discarded.
 + * 4          Management - When set, the WiLink receives all management
 + *            frames.
 + *            When clear, these frames are discarded.
 + * 3          Probe Request - When set, the WiLink receives all probe request
 + *            frames.
 + *            When clear, these frames are discarded.
 + * 2          Probe Response - When set, the WiLink receives all probe
 + *            response frames.
 + *            When clear, these frames are discarded.
 + * 1          RTS/CTS/ACK - When set, the WiLink receives all RTS, CTS and ACK
 + *            frames.
 + *            When clear, these frames are discarded.
 + * 0          Rsvd Type/Sub Type - When set, the WiLink receives all frames
 + *            that have reserved frame types and sub types as defined by the
 + *            802.11 specification.
 + *            When clear, these frames are discarded.
 + */
 +struct acx_rx_config {
 +      struct acx_header header;
 +
 +      __le32 config_options;
 +      __le32 filter_options;
 +} __packed;
 +
 +struct acx_packet_detection {
 +      struct acx_header header;
 +
 +      __le32 threshold;
 +} __packed;
 +
 +
 +enum acx_slot_type {
 +      SLOT_TIME_LONG = 0,
 +      SLOT_TIME_SHORT = 1,
 +      DEFAULT_SLOT_TIME = SLOT_TIME_SHORT,
 +      MAX_SLOT_TIMES = 0xFF
 +};
 +
 +#define STATION_WONE_INDEX 0
 +
 +struct acx_slot {
 +      struct acx_header header;
 +
 +      u8 wone_index; /* Reserved */
 +      u8 slot_time;
 +      u8 reserved[6];
 +} __packed;
 +
 +
 +#define ACX_MC_ADDRESS_GROUP_MAX      (8)
 +#define ADDRESS_GROUP_MAX_LEN         (ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX)
 +
 +struct acx_dot11_grp_addr_tbl {
 +      struct acx_header header;
 +
 +      u8 enabled;
 +      u8 num_groups;
 +      u8 pad[2];
 +      u8 mac_table[ADDRESS_GROUP_MAX_LEN];
 +} __packed;
 +
 +struct acx_rx_timeout {
 +      struct acx_header header;
 +
 +      __le16 ps_poll_timeout;
 +      __le16 upsd_timeout;
 +} __packed;
 +
 +struct acx_rts_threshold {
 +      struct acx_header header;
 +
 +      __le16 threshold;
 +      u8 pad[2];
 +} __packed;
 +
 +struct acx_beacon_filter_option {
 +      struct acx_header header;
 +
 +      u8 enable;
 +
 +      /*
 +       * The number of beacons without the unicast TIM
 +       * bit set that the firmware buffers before
 +       * signaling the host about ready frames.
 +       * When set to 0 and the filter is enabled, beacons
 +       * without the unicast TIM bit set are dropped.
 +       */
 +      u8 max_num_beacons;
 +      u8 pad[2];
 +} __packed;
 +
 +/*
 + * ACXBeaconFilterEntry (not 221)
 + * Byte Offset     Size (Bytes)    Definition
 + * ===========     ============    ==========
 + * 0               1               IE identifier
 + * 1               1               Treatment bit mask
 + *
 + * ACXBeaconFilterEntry (221)
 + * Byte Offset     Size (Bytes)    Definition
 + * ===========     ============    ==========
 + * 0               1               IE identifier
 + * 1               1               Treatment bit mask
 + * 2               3               OUI
 + * 5               1               Type
 + * 6               2               Version
 + *
 + *
 + * Treatment bit mask - The information element handling:
 + * bit 0 - The information element is compared and transferred
 + * in case of change.
 + * bit 1 - The information element is transferred to the host
 + * with each appearance or disappearance.
 + * Note that both bits can be set at the same time.
 + */
 +#define       BEACON_FILTER_TABLE_MAX_IE_NUM                 (32)
 +#define BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM (6)
 +#define BEACON_FILTER_TABLE_IE_ENTRY_SIZE            (2)
 +#define BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE (6)
 +#define BEACON_FILTER_TABLE_MAX_SIZE ((BEACON_FILTER_TABLE_MAX_IE_NUM * \
 +                          BEACON_FILTER_TABLE_IE_ENTRY_SIZE) + \
 +                         (BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM * \
 +                          BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE))
 +
 +struct acx_beacon_filter_ie_table {
 +      struct acx_header header;
 +
 +      u8 num_ie;
 +      u8 pad[3];
 +      u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
 +} __packed;
 +
 +struct acx_conn_monit_params {
 +       struct acx_header header;
 +
 +       __le32 synch_fail_thold; /* number of beacons missed */
 +       __le32 bss_lose_timeout; /* number of TU's from synch fail */
 +} __packed;
 +
 +struct acx_bt_wlan_coex {
 +      struct acx_header header;
 +
 +      u8 enable;
 +      u8 pad[3];
 +} __packed;
 +
 +struct acx_bt_wlan_coex_param {
 +      struct acx_header header;
 +
 +      __le32 params[CONF_SG_PARAMS_MAX];
 +      u8 param_idx;
 +      u8 padding[3];
 +} __packed;
 +
 +struct acx_dco_itrim_params {
 +      struct acx_header header;
 +
 +      u8 enable;
 +      u8 padding[3];
 +      __le32 timeout;
 +} __packed;
 +
 +struct acx_energy_detection {
 +      struct acx_header header;
 +
 +      /* The RX Clear Channel Assessment threshold in the PHY */
 +      __le16 rx_cca_threshold;
 +      u8 tx_energy_detection;
 +      u8 pad;
 +} __packed;
 +
 +struct acx_beacon_broadcast {
 +      struct acx_header header;
 +
 +      __le16 beacon_rx_timeout;
 +      __le16 broadcast_timeout;
 +
 +      /* Enables receiving of broadcast packets in PS mode */
 +      u8 rx_broadcast_in_ps;
 +
 +      /* Consecutive PS Poll failures before updating the host */
 +      u8 ps_poll_threshold;
 +      u8 pad[2];
 +} __packed;
 +
 +struct acx_event_mask {
 +      struct acx_header header;
 +
 +      __le32 event_mask;
 +      __le32 high_event_mask; /* Unused */
 +} __packed;
 +
 +#define CFG_RX_FCS            BIT(2)
 +#define CFG_RX_ALL_GOOD               BIT(3)
 +#define CFG_UNI_FILTER_EN     BIT(4)
 +#define CFG_BSSID_FILTER_EN   BIT(5)
 +#define CFG_MC_FILTER_EN      BIT(6)
 +#define CFG_MC_ADDR0_EN               BIT(7)
 +#define CFG_MC_ADDR1_EN               BIT(8)
 +#define CFG_BC_REJECT_EN      BIT(9)
 +#define CFG_SSID_FILTER_EN    BIT(10)
 +#define CFG_RX_INT_FCS_ERROR  BIT(11)
 +#define CFG_RX_INT_ENCRYPTED  BIT(12)
 +#define CFG_RX_WR_RX_STATUS   BIT(13)
 +#define CFG_RX_FILTER_NULTI   BIT(14)
 +#define CFG_RX_RESERVE                BIT(15)
 +#define CFG_RX_TIMESTAMP_TSF  BIT(16)
 +
 +#define CFG_RX_RSV_EN         BIT(0)
 +#define CFG_RX_RCTS_ACK               BIT(1)
 +#define CFG_RX_PRSP_EN                BIT(2)
 +#define CFG_RX_PREQ_EN                BIT(3)
 +#define CFG_RX_MGMT_EN                BIT(4)
 +#define CFG_RX_FCS_ERROR      BIT(5)
 +#define CFG_RX_DATA_EN                BIT(6)
 +#define CFG_RX_CTL_EN         BIT(7)
 +#define CFG_RX_CF_EN          BIT(8)
 +#define CFG_RX_BCN_EN         BIT(9)
 +#define CFG_RX_AUTH_EN                BIT(10)
 +#define CFG_RX_ASSOC_EN               BIT(11)
 +
 +#define SCAN_PASSIVE          BIT(0)
 +#define SCAN_5GHZ_BAND                BIT(1)
 +#define SCAN_TRIGGERED                BIT(2)
 +#define SCAN_PRIORITY_HIGH    BIT(3)
 +
 +/* When set, disable HW encryption */
 +#define DF_ENCRYPTION_DISABLE      0x01
 +#define DF_SNIFF_MODE_ENABLE       0x80
 +
 +struct acx_feature_config {
 +      struct acx_header header;
 +
 +      __le32 options;
 +      __le32 data_flow_options;
 +} __packed;
 +
 +struct acx_current_tx_power {
 +      struct acx_header header;
 +
 +      u8  current_tx_power;
 +      u8  padding[3];
 +} __packed;
 +
 +struct acx_wake_up_condition {
 +      struct acx_header header;
 +
 +      u8 wake_up_event; /* Only one bit can be set */
 +      u8 listen_interval;
 +      u8 pad[2];
 +} __packed;
 +
 +struct acx_aid {
 +      struct acx_header header;
 +
 +      /*
 +       * To be set when associated with an AP.
 +       */
 +      __le16 aid;
 +      u8 pad[2];
 +} __packed;
 +
 +enum acx_preamble_type {
 +      ACX_PREAMBLE_LONG = 0,
 +      ACX_PREAMBLE_SHORT = 1
 +};
 +
 +struct acx_preamble {
 +      struct acx_header header;
 +
 +      /*
 +       * When set, the WiLink transmits the frames with a short preamble and
 +       * when cleared, the WiLink transmits the frames with a long preamble.
 +       */
 +      u8 preamble;
 +      u8 padding[3];
 +} __packed;
 +
 +enum acx_ctsprotect_type {
 +      CTSPROTECT_DISABLE = 0,
 +      CTSPROTECT_ENABLE = 1
 +};
 +
 +struct acx_ctsprotect {
 +      struct acx_header header;
 +      u8 ctsprotect;
 +      u8 padding[3];
 +} __packed;
 +
 +struct acx_tx_statistics {
 +      __le32 internal_desc_overflow;
 +}  __packed;
 +
 +struct acx_rx_statistics {
 +      __le32 out_of_mem;
 +      __le32 hdr_overflow;
 +      __le32 hw_stuck;
 +      __le32 dropped;
 +      __le32 fcs_err;
 +      __le32 xfr_hint_trig;
 +      __le32 path_reset;
 +      __le32 reset_counter;
 +} __packed;
 +
 +struct acx_dma_statistics {
 +      __le32 rx_requested;
 +      __le32 rx_errors;
 +      __le32 tx_requested;
 +      __le32 tx_errors;
 +}  __packed;
 +
 +struct acx_isr_statistics {
 +      /* host command complete */
 +      __le32 cmd_cmplt;
 +
 +      /* fiqisr() */
 +      __le32 fiqs;
 +
 +      /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
 +      __le32 rx_headers;
 +
 +      /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
 +      __le32 rx_completes;
 +
 +      /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
 +      __le32 rx_mem_overflow;
 +
 +      /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
 +      __le32 rx_rdys;
 +
 +      /* irqisr() */
 +      __le32 irqs;
 +
 +      /* (INT_STS_ND & INT_TRIG_TX_PROC) */
 +      __le32 tx_procs;
 +
 +      /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
 +      __le32 decrypt_done;
 +
 +      /* (INT_STS_ND & INT_TRIG_DMA0) */
 +      __le32 dma0_done;
 +
 +      /* (INT_STS_ND & INT_TRIG_DMA1) */
 +      __le32 dma1_done;
 +
 +      /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
 +      __le32 tx_exch_complete;
 +
 +      /* (INT_STS_ND & INT_TRIG_COMMAND) */
 +      __le32 commands;
 +
 +      /* (INT_STS_ND & INT_TRIG_RX_PROC) */
 +      __le32 rx_procs;
 +
 +      /* (INT_STS_ND & INT_TRIG_PM_802) */
 +      __le32 hw_pm_mode_changes;
 +
 +      /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
 +      __le32 host_acknowledges;
 +
 +      /* (INT_STS_ND & INT_TRIG_PM_PCI) */
 +      __le32 pci_pm;
 +
 +      /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
 +      __le32 wakeups;
 +
 +      /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
 +      __le32 low_rssi;
 +} __packed;
 +
 +struct acx_wep_statistics {
 +      /* WEP address keys configured */
 +      __le32 addr_key_count;
 +
 +      /* default keys configured */
 +      __le32 default_key_count;
 +
 +      __le32 reserved;
 +
 +      /* number of times that WEP key not found on lookup */
 +      __le32 key_not_found;
 +
 +      /* number of times that WEP key decryption failed */
 +      __le32 decrypt_fail;
 +
 +      /* WEP packets decrypted */
 +      __le32 packets;
 +
 +      /* WEP decrypt interrupts */
 +      __le32 interrupt;
 +} __packed;
 +
 +#define ACX_MISSED_BEACONS_SPREAD 10
 +
 +struct acx_pwr_statistics {
 +      /* the amount of enters into power save mode (both PD & ELP) */
 +      __le32 ps_enter;
 +
 +      /* the amount of enters into ELP mode */
 +      __le32 elp_enter;
 +
 +      /* the amount of missing beacon interrupts to the host */
 +      __le32 missing_bcns;
 +
 +      /* the amount of wake on host-access times */
 +      __le32 wake_on_host;
 +
 +      /* the amount of wake on timer-expire */
 +      __le32 wake_on_timer_exp;
 +
 +      /* the number of packets that were transmitted with PS bit set */
 +      __le32 tx_with_ps;
 +
 +      /* the number of packets that were transmitted with PS bit clear */
 +      __le32 tx_without_ps;
 +
 +      /* the number of received beacons */
 +      __le32 rcvd_beacons;
 +
 +      /* the number of entering into PowerOn (power save off) */
 +      __le32 power_save_off;
 +
 +      /* the number of entries into power save mode */
 +      __le16 enable_ps;
 +
 +      /*
 +       * the number of exits from power save, not including failed PS
 +       * transitions
 +       */
 +      __le16 disable_ps;
 +
 +      /*
 +       * the number of times the TSF counter was adjusted because
 +       * of drift
 +       */
 +      __le32 fix_tsf_ps;
 +
 +      /* Gives statistics about the spread continuous missed beacons.
 +       * The 16 LSB are dedicated for the PS mode.
 +       * The 16 MSB are dedicated for the PS mode.
 +       * cont_miss_bcns_spread[0] - single missed beacon.
 +       * cont_miss_bcns_spread[1] - two continuous missed beacons.
 +       * cont_miss_bcns_spread[2] - three continuous missed beacons.
 +       * ...
 +       * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
 +      */
 +      __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
 +
 +      /* the number of beacons in awake mode */
 +      __le32 rcvd_awake_beacons;
 +} __packed;
 +
 +struct acx_mic_statistics {
 +      __le32 rx_pkts;
 +      __le32 calc_failure;
 +} __packed;
 +
 +struct acx_aes_statistics {
 +      __le32 encrypt_fail;
 +      __le32 decrypt_fail;
 +      __le32 encrypt_packets;
 +      __le32 decrypt_packets;
 +      __le32 encrypt_interrupt;
 +      __le32 decrypt_interrupt;
 +} __packed;
 +
 +struct acx_event_statistics {
 +      __le32 heart_beat;
 +      __le32 calibration;
 +      __le32 rx_mismatch;
 +      __le32 rx_mem_empty;
 +      __le32 rx_pool;
 +      __le32 oom_late;
 +      __le32 phy_transmit_error;
 +      __le32 tx_stuck;
 +} __packed;
 +
 +struct acx_ps_statistics {
 +      __le32 pspoll_timeouts;
 +      __le32 upsd_timeouts;
 +      __le32 upsd_max_sptime;
 +      __le32 upsd_max_apturn;
 +      __le32 pspoll_max_apturn;
 +      __le32 pspoll_utilization;
 +      __le32 upsd_utilization;
 +} __packed;
 +
 +struct acx_rxpipe_statistics {
 +      __le32 rx_prep_beacon_drop;
 +      __le32 descr_host_int_trig_rx_data;
 +      __le32 beacon_buffer_thres_host_int_trig_rx_data;
 +      __le32 missed_beacon_host_int_trig_rx_data;
 +      __le32 tx_xfr_host_int_trig_rx_data;
 +} __packed;
 +
 +struct acx_statistics {
 +      struct acx_header header;
 +
 +      struct acx_tx_statistics tx;
 +      struct acx_rx_statistics rx;
 +      struct acx_dma_statistics dma;
 +      struct acx_isr_statistics isr;
 +      struct acx_wep_statistics wep;
 +      struct acx_pwr_statistics pwr;
 +      struct acx_aes_statistics aes;
 +      struct acx_mic_statistics mic;
 +      struct acx_event_statistics event;
 +      struct acx_ps_statistics ps;
 +      struct acx_rxpipe_statistics rxpipe;
 +} __packed;
 +
 +struct acx_rate_class {
 +      __le32 enabled_rates;
 +      u8 short_retry_limit;
 +      u8 long_retry_limit;
 +      u8 aflags;
 +      u8 reserved;
 +};
 +
 +#define ACX_TX_BASIC_RATE      0
 +#define ACX_TX_AP_FULL_RATE    1
 +#define ACX_TX_RATE_POLICY_CNT 2
 +struct acx_rate_policy {
 +      struct acx_header header;
 +
 +      __le32 rate_class_cnt;
 +      struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
 +} __packed;
 +
 +struct acx_ac_cfg {
 +      struct acx_header header;
 +      u8 ac;
 +      u8 cw_min;
 +      __le16 cw_max;
 +      u8 aifsn;
 +      u8 reserved;
 +      __le16 tx_op_limit;
 +} __packed;
 +
 +struct acx_tid_config {
 +      struct acx_header header;
 +      u8 queue_id;
 +      u8 channel_type;
 +      u8 tsid;
 +      u8 ps_scheme;
 +      u8 ack_policy;
 +      u8 padding[3];
 +      __le32 apsd_conf[2];
 +} __packed;
 +
 +struct acx_frag_threshold {
 +      struct acx_header header;
 +      __le16 frag_threshold;
 +      u8 padding[2];
 +} __packed;
 +
 +struct acx_tx_config_options {
 +      struct acx_header header;
 +      __le16 tx_compl_timeout;     /* msec */
 +      __le16 tx_compl_threshold;   /* number of packets */
 +} __packed;
 +
 +#define ACX_RX_MEM_BLOCKS     70
 +#define ACX_TX_MIN_MEM_BLOCKS 40
 +#define ACX_TX_DESCRIPTORS    32
 +#define ACX_NUM_SSID_PROFILES 1
 +
 +struct wl1271_acx_config_memory {
 +      struct acx_header header;
 +
 +      u8 rx_mem_block_num;
 +      u8 tx_min_mem_block_num;
 +      u8 num_stations;
 +      u8 num_ssid_profiles;
 +      __le32 total_tx_descriptors;
 +} __packed;
 +
 +struct wl1271_acx_mem_map {
 +      struct acx_header header;
 +
 +      __le32 code_start;
 +      __le32 code_end;
 +
 +      __le32 wep_defkey_start;
 +      __le32 wep_defkey_end;
 +
 +      __le32 sta_table_start;
 +      __le32 sta_table_end;
 +
 +      __le32 packet_template_start;
 +      __le32 packet_template_end;
 +
 +      /* Address of the TX result interface (control block) */
 +      __le32 tx_result;
 +      __le32 tx_result_queue_start;
 +
 +      __le32 queue_memory_start;
 +      __le32 queue_memory_end;
 +
 +      __le32 packet_memory_pool_start;
 +      __le32 packet_memory_pool_end;
 +
 +      __le32 debug_buffer1_start;
 +      __le32 debug_buffer1_end;
 +
 +      __le32 debug_buffer2_start;
 +      __le32 debug_buffer2_end;
 +
 +      /* Number of blocks FW allocated for TX packets */
 +      __le32 num_tx_mem_blocks;
 +
 +      /* Number of blocks FW allocated for RX packets */
 +      __le32 num_rx_mem_blocks;
 +
 +      /* the following 4 fields are valid in SLAVE mode only */
 +      u8 *tx_cbuf;
 +      u8 *rx_cbuf;
 +      __le32 rx_ctrl;
 +      __le32 tx_ctrl;
 +} __packed;
 +
 +struct wl1271_acx_rx_config_opt {
 +      struct acx_header header;
 +
 +      __le16 mblk_threshold;
 +      __le16 threshold;
 +      __le16 timeout;
 +      u8 queue_type;
 +      u8 reserved;
 +} __packed;
 +
 +
 +struct wl1271_acx_bet_enable {
 +      struct acx_header header;
 +
 +      u8 enable;
 +      u8 max_consecutive;
 +      u8 padding[2];
 +} __packed;
 +
 +#define ACX_IPV4_VERSION 4
 +#define ACX_IPV6_VERSION 6
 +#define ACX_IPV4_ADDR_SIZE 4
 +
 +/* bitmap of enabled arp_filter features */
 +#define ACX_ARP_FILTER_ARP_FILTERING  BIT(0)
 +#define ACX_ARP_FILTER_AUTO_ARP               BIT(1)
 +
 +struct wl1271_acx_arp_filter {
 +      struct acx_header header;
 +      u8 version;         /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */
 +      u8 enable;          /* bitmap of enabled ARP filtering features */
 +      u8 padding[2];
 +      u8 address[16];     /* The configured device IP address - all ARP
 +                             requests directed to this IP address will pass
 +                             through. For IPv4, the first four bytes are
 +                             used. */
 +} __packed;
 +
 +struct wl1271_acx_pm_config {
 +      struct acx_header header;
 +
 +      __le32 host_clk_settling_time;
 +      u8 host_fast_wakeup_support;
 +      u8 padding[3];
 +} __packed;
 +
 +struct wl1271_acx_keep_alive_mode {
 +      struct acx_header header;
 +
 +      u8 enabled;
 +      u8 padding[3];
 +} __packed;
 +
 +enum {
 +      ACX_KEEP_ALIVE_NO_TX = 0,
 +      ACX_KEEP_ALIVE_PERIOD_ONLY
 +};
 +
 +enum {
 +      ACX_KEEP_ALIVE_TPL_INVALID = 0,
 +      ACX_KEEP_ALIVE_TPL_VALID
 +};
 +
 +struct wl1271_acx_keep_alive_config {
 +      struct acx_header header;
 +
 +      __le32 period;
 +      u8 index;
 +      u8 tpl_validation;
 +      u8 trigger;
 +      u8 padding;
 +} __packed;
 +
 +enum {
 +      WL1271_ACX_TRIG_TYPE_LEVEL = 0,
 +      WL1271_ACX_TRIG_TYPE_EDGE,
 +};
 +
 +enum {
 +      WL1271_ACX_TRIG_DIR_LOW = 0,
 +      WL1271_ACX_TRIG_DIR_HIGH,
 +      WL1271_ACX_TRIG_DIR_BIDIR,
 +};
 +
 +enum {
 +      WL1271_ACX_TRIG_ENABLE = 1,
 +      WL1271_ACX_TRIG_DISABLE,
 +};
 +
 +enum {
 +      WL1271_ACX_TRIG_METRIC_RSSI_BEACON = 0,
 +      WL1271_ACX_TRIG_METRIC_RSSI_DATA,
 +      WL1271_ACX_TRIG_METRIC_SNR_BEACON,
 +      WL1271_ACX_TRIG_METRIC_SNR_DATA,
 +};
 +
 +enum {
 +      WL1271_ACX_TRIG_IDX_RSSI = 0,
 +      WL1271_ACX_TRIG_COUNT = 8,
 +};
 +
 +struct wl1271_acx_rssi_snr_trigger {
 +      struct acx_header header;
 +
 +      __le16 threshold;
 +      __le16 pacing; /* 0 - 60000 ms */
 +      u8 metric;
 +      u8 type;
 +      u8 dir;
 +      u8 hysteresis;
 +      u8 index;
 +      u8 enable;
 +      u8 padding[2];
 +};
 +
 +struct wl1271_acx_rssi_snr_avg_weights {
 +      struct acx_header header;
 +
 +      u8 rssi_beacon;
 +      u8 rssi_data;
 +      u8 snr_beacon;
 +      u8 snr_data;
 +};
 +
 +/*
 + * ACX_PEER_HT_CAP
 + * Configure HT capabilities - declare the capabilities of the peer
 + * we are connected to.
 + */
 +struct wl1271_acx_ht_capabilities {
 +      struct acx_header header;
 +
 +      /*
 +       * bit 0 - Allow HT Operation
 +       * bit 1 - Allow Greenfield format in TX
 +       * bit 2 - Allow Short GI in TX
 +       * bit 3 - Allow L-SIG TXOP Protection in TX
 +       * bit 4 - Allow HT Control fields in TX.
 +       *         Note, driver will still leave space for HT control in packets
 +       *         regardless of the value of this field. FW will be responsible
 +       *         to drop the HT field from any frame when this Bit set to 0.
 +       * bit 5 - Allow RD initiation in TXOP. FW is allowed to initate RD.
 +       *         Exact policy setting for this feature is TBD.
 +       *         Note, this bit can only be set to 1 if bit 3 is set to 1.
 +       */
 +      __le32 ht_capabilites;
 +
 +      /*
 +       * Indicates to which peer these capabilities apply.
 +       * For infrastructure use ff:ff:ff:ff:ff:ff that indicates relevance
 +       * for all peers.
 +       * Only valid for IBSS/DLS operation.
 +       */
 +      u8 mac_address[ETH_ALEN];
 +
 +      /*
 +       * This the maximum A-MPDU length supported by the AP. The FW may not
 +       * exceed this length when sending A-MPDUs
 +       */
 +      u8 ampdu_max_length;
 +
 +      /* This is the minimal spacing required when sending A-MPDUs to the AP*/
 +      u8 ampdu_min_spacing;
 +} __packed;
 +
 +/* HT Capabilites Fw Bit Mask Mapping */
 +#define WL1271_ACX_FW_CAP_HT_OPERATION                 BIT(0)
 +#define WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT      BIT(1)
 +#define WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS   BIT(2)
 +#define WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION         BIT(3)
 +#define WL1271_ACX_FW_CAP_HT_CONTROL_FIELDS            BIT(4)
 +#define WL1271_ACX_FW_CAP_RD_INITIATION                BIT(5)
 +
 +
 +/*
 + * ACX_HT_BSS_OPERATION
 + * Configure HT capabilities - AP rules for behavior in the BSS.
 + */
 +struct wl1271_acx_ht_information {
 +      struct acx_header header;
 +
 +      /* Values: 0 - RIFS not allowed, 1 - RIFS allowed */
 +      u8 rifs_mode;
 +
 +      /* Values: 0 - 3 like in spec */
 +      u8 ht_protection;
 +
 +      /* Values: 0 - GF protection not required, 1 - GF protection required */
 +      u8 gf_protection;
 +
 +      /*Values: 0 - TX Burst limit not required, 1 - TX Burst Limit required*/
 +      u8 ht_tx_burst_limit;
 +
 +      /*
 +       * Values: 0 - Dual CTS protection not required,
 +       *         1 - Dual CTS Protection required
 +       * Note: When this value is set to 1 FW will protect all TXOP with RTS
 +       * frame and will not use CTS-to-self regardless of the value of the
 +       * ACX_CTS_PROTECTION information element
 +       */
 +      u8 dual_cts_protection;
 +
 +      u8 padding[3];
 +} __packed;
 +
 +struct wl1271_acx_fw_tsf_information {
 +      struct acx_header header;
 +
 +      __le32 current_tsf_high;
 +      __le32 current_tsf_low;
 +      __le32 last_bttt_high;
 +      __le32 last_tbtt_low;
 +      u8 last_dtim_count;
 +      u8 padding[3];
 +} __packed;
 +
 +enum {
 +      ACX_WAKE_UP_CONDITIONS      = 0x0002,
 +      ACX_MEM_CFG                 = 0x0003,
 +      ACX_SLOT                    = 0x0004,
 +      ACX_AC_CFG                  = 0x0007,
 +      ACX_MEM_MAP                 = 0x0008,
 +      ACX_AID                     = 0x000A,
 +      /* ACX_FW_REV is missing in the ref driver, but seems to work */
 +      ACX_FW_REV                  = 0x000D,
 +      ACX_MEDIUM_USAGE            = 0x000F,
 +      ACX_RX_CFG                  = 0x0010,
 +      ACX_TX_QUEUE_CFG            = 0x0011, /* FIXME: only used by wl1251 */
 +      ACX_STATISTICS              = 0x0013, /* Debug API */
 +      ACX_PWR_CONSUMPTION_STATISTICS = 0x0014,
 +      ACX_FEATURE_CFG             = 0x0015,
 +      ACX_TID_CFG                 = 0x001A,
 +      ACX_PS_RX_STREAMING         = 0x001B,
 +      ACX_BEACON_FILTER_OPT       = 0x001F,
 +      ACX_NOISE_HIST              = 0x0021,
 +      ACX_HDK_VERSION             = 0x0022, /* ??? */
 +      ACX_PD_THRESHOLD            = 0x0023,
 +      ACX_TX_CONFIG_OPT           = 0x0024,
 +      ACX_CCA_THRESHOLD           = 0x0025,
 +      ACX_EVENT_MBOX_MASK         = 0x0026,
 +      ACX_CONN_MONIT_PARAMS       = 0x002D,
 +      ACX_CONS_TX_FAILURE         = 0x002F,
 +      ACX_BCN_DTIM_OPTIONS        = 0x0031,
 +      ACX_SG_ENABLE               = 0x0032,
 +      ACX_SG_CFG                  = 0x0033,
 +      ACX_BEACON_FILTER_TABLE     = 0x0038,
 +      ACX_ARP_IP_FILTER           = 0x0039,
 +      ACX_ROAMING_STATISTICS_TBL  = 0x003B,
 +      ACX_RATE_POLICY             = 0x003D,
 +      ACX_CTS_PROTECTION          = 0x003E,
 +      ACX_SLEEP_AUTH              = 0x003F,
 +      ACX_PREAMBLE_TYPE           = 0x0040,
 +      ACX_ERROR_CNT               = 0x0041,
 +      ACX_IBSS_FILTER             = 0x0044,
 +      ACX_SERVICE_PERIOD_TIMEOUT  = 0x0045,
 +      ACX_TSF_INFO                = 0x0046,
 +      ACX_CONFIG_PS_WMM           = 0x0049,
 +      ACX_ENABLE_RX_DATA_FILTER   = 0x004A,
 +      ACX_SET_RX_DATA_FILTER      = 0x004B,
 +      ACX_GET_DATA_FILTER_STATISTICS = 0x004C,
 +      ACX_RX_CONFIG_OPT           = 0x004E,
 +      ACX_FRAG_CFG                = 0x004F,
 +      ACX_BET_ENABLE              = 0x0050,
 +      ACX_RSSI_SNR_TRIGGER        = 0x0051,
 +      ACX_RSSI_SNR_WEIGHTS        = 0x0052,
 +      ACX_KEEP_ALIVE_MODE         = 0x0053,
 +      ACX_SET_KEEP_ALIVE_CONFIG   = 0x0054,
 +      ACX_BA_SESSION_RESPONDER_POLICY = 0x0055,
 +      ACX_BA_SESSION_INITIATOR_POLICY = 0x0056,
 +      ACX_PEER_HT_CAP             = 0x0057,
 +      ACX_HT_BSS_OPERATION        = 0x0058,
 +      ACX_COEX_ACTIVITY           = 0x0059,
 +      ACX_SET_DCO_ITRIM_PARAMS    = 0x0061,
 +      DOT11_RX_MSDU_LIFE_TIME     = 0x1004,
 +      DOT11_CUR_TX_PWR            = 0x100D,
 +      DOT11_RX_DOT11_MODE         = 0x1012,
 +      DOT11_RTS_THRESHOLD         = 0x1013,
 +      DOT11_GROUP_ADDRESS_TBL     = 0x1014,
 +      ACX_PM_CONFIG               = 0x1016,
 +
 +      MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
 +
 +      MAX_IE = 0xFFFF
 +};
 +
 +
 +int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
 +int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
 +int wl1271_acx_tx_power(struct wl1271 *wl, int power);
 +int wl1271_acx_feature_cfg(struct wl1271 *wl);
 +int wl1271_acx_mem_map(struct wl1271 *wl,
 +                     struct acx_header *mem_map, size_t len);
 +int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
 +int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter);
 +int wl1271_acx_pd_threshold(struct wl1271 *wl);
 +int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
 +int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
 +                               void *mc_list, u32 mc_list_len);
 +int wl1271_acx_service_period_timeout(struct wl1271 *wl);
 +int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
 +int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
 +int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
 +int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
 +int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
 +int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
 +int wl1271_acx_sg_cfg(struct wl1271 *wl);
 +int wl1271_acx_cca_threshold(struct wl1271 *wl);
 +int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
 +int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
 +int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
 +int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
 +int wl1271_acx_cts_protect(struct wl1271 *wl,
 +                         enum acx_ctsprotect_type ctsprotect);
 +int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
 +int wl1271_acx_rate_policies(struct wl1271 *wl);
 +int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
 +                    u8 aifsn, u16 txop);
 +int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
 +                     u8 tsid, u8 ps_scheme, u8 ack_policy,
 +                     u32 apsd_conf0, u32 apsd_conf1);
 +int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold);
 +int wl1271_acx_tx_config_options(struct wl1271 *wl);
 +int wl1271_acx_mem_cfg(struct wl1271 *wl);
 +int wl1271_acx_init_mem_config(struct wl1271 *wl);
 +int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
 +int wl1271_acx_smart_reflex(struct wl1271 *wl);
 +int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
 +int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address);
 +int wl1271_acx_pm_config(struct wl1271 *wl);
 +int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
 +int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
 +int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
 +                              s16 thold, u8 hyst);
 +int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
 +int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
 +                                  struct ieee80211_sta_ht_cap *ht_cap,
 +                                  bool allow_ht_operation);
 +int wl1271_acx_set_ht_information(struct wl1271 *wl,
 +                                 u16 ht_operation_mode);
 +int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
 +
 +#endif /* __WL1271_ACX_H__ */
index ce3d31f,0000000..9050dd9
mode 100644,000000..100644
--- /dev/null
@@@ -1,426 -1,0 +1,426 @@@
 +/*
 + * This file is part of wl1271
 + *
 + * Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
 + * Copyright (C) 2008-2009 Nokia Corporation
 + *
 + * Contact: Luciano Coelho <luciano.coelho@nokia.com>
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License
 + * version 2 as published by the Free Software Foundation.
 + *
 + * This program is distributed in the hope that it will be useful, but
 + * WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * General Public License for more details.
 + *
 + * You should have received a copy of the GNU General Public License
 + * along with this program; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 + * 02110-1301 USA
 + *
 + */
 +
 +#ifndef __WL12XX_H__
 +#define __WL12XX_H__
 +
 +#include <linux/mutex.h>
 +#include <linux/completion.h>
 +#include <linux/spinlock.h>
 +#include <linux/list.h>
 +#include <linux/bitops.h>
 +#include <net/mac80211.h>
 +
 +#include "conf.h"
 +#include "ini.h"
 +
 +#define DRIVER_NAME "wl1271"
 +#define DRIVER_PREFIX DRIVER_NAME ": "
 +
 +enum {
 +      DEBUG_NONE      = 0,
 +      DEBUG_IRQ       = BIT(0),
 +      DEBUG_SPI       = BIT(1),
 +      DEBUG_BOOT      = BIT(2),
 +      DEBUG_MAILBOX   = BIT(3),
 +      DEBUG_TESTMODE  = BIT(4),
 +      DEBUG_EVENT     = BIT(5),
 +      DEBUG_TX        = BIT(6),
 +      DEBUG_RX        = BIT(7),
 +      DEBUG_SCAN      = BIT(8),
 +      DEBUG_CRYPT     = BIT(9),
 +      DEBUG_PSM       = BIT(10),
 +      DEBUG_MAC80211  = BIT(11),
 +      DEBUG_CMD       = BIT(12),
 +      DEBUG_ACX       = BIT(13),
 +      DEBUG_SDIO      = BIT(14),
 +      DEBUG_FILTERS   = BIT(15),
 +      DEBUG_ADHOC     = BIT(16),
 +      DEBUG_ALL       = ~0,
 +};
 +
 +extern u32 wl12xx_debug_level;
 +
 +#define DEBUG_DUMP_LIMIT 1024
 +
 +#define wl1271_error(fmt, arg...) \
 +      pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
 +
 +#define wl1271_warning(fmt, arg...) \
 +      pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
 +
 +#define wl1271_notice(fmt, arg...) \
 +      pr_info(DRIVER_PREFIX fmt "\n", ##arg)
 +
 +#define wl1271_info(fmt, arg...) \
 +      pr_info(DRIVER_PREFIX fmt "\n", ##arg)
 +
 +#define wl1271_debug(level, fmt, arg...) \
 +      do { \
 +              if (level & wl12xx_debug_level) \
 +                      pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
 +      } while (0)
 +
 +/* TODO: use pr_debug_hex_dump when it will be available */
 +#define wl1271_dump(level, prefix, buf, len)  \
 +      do { \
 +              if (level & wl12xx_debug_level) \
 +                      print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
 +                                     DUMP_PREFIX_OFFSET, 16, 1,       \
 +                                     buf,                             \
 +                                     min_t(size_t, len, DEBUG_DUMP_LIMIT), \
 +                                     0);                              \
 +      } while (0)
 +
 +#define wl1271_dump_ascii(level, prefix, buf, len)    \
 +      do { \
 +              if (level & wl12xx_debug_level) \
 +                      print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
 +                                     DUMP_PREFIX_OFFSET, 16, 1,       \
 +                                     buf,                             \
 +                                     min_t(size_t, len, DEBUG_DUMP_LIMIT), \
 +                                     true);                           \
 +      } while (0)
 +
 +#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \
 +                                CFG_BSSID_FILTER_EN | \
 +                                CFG_MC_FILTER_EN)
 +
 +#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN |  \
 +                                CFG_RX_MGMT_EN | CFG_RX_DATA_EN |   \
 +                                CFG_RX_CTL_EN | CFG_RX_BCN_EN |     \
 +                                CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
 +
 +#define WL1271_FW_NAME "wl1271-fw.bin"
 +#define WL1271_NVS_NAME "wl1271-nvs.bin"
 +
 +#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
 +#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
 +
 +#define WL1271_CIPHER_SUITE_GEM 0x00147201
 +
 +#define WL1271_BUSY_WORD_CNT 1
 +#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32))
 +
 +#define WL1271_ELP_HW_STATE_ASLEEP 0
 +#define WL1271_ELP_HW_STATE_IRQ    1
 +
 +#define WL1271_DEFAULT_BEACON_INT  100
 +#define WL1271_DEFAULT_DTIM_PERIOD 1
 +
 +#define ACX_TX_DESCRIPTORS         32
 +
 +#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
 +
 +enum wl1271_state {
 +      WL1271_STATE_OFF,
 +      WL1271_STATE_ON,
 +      WL1271_STATE_PLT,
 +};
 +
 +enum wl1271_partition_type {
 +      PART_DOWN,
 +      PART_WORK,
 +      PART_DRPW,
 +
 +      PART_TABLE_LEN
 +};
 +
 +struct wl1271_partition {
 +      u32 size;
 +      u32 start;
 +};
 +
 +struct wl1271_partition_set {
 +      struct wl1271_partition mem;
 +      struct wl1271_partition reg;
 +      struct wl1271_partition mem2;
 +      struct wl1271_partition mem3;
 +};
 +
 +struct wl1271;
 +
 +/* FIXME: I'm not sure about this structure name */
 +struct wl1271_chip {
 +      u32 id;
 +      char fw_ver[21];
 +};
 +
 +struct wl1271_stats {
 +      struct acx_statistics *fw_stats;
 +      unsigned long fw_stats_update;
 +
 +      unsigned int retry_count;
 +      unsigned int excessive_retries;
 +};
 +
 +#define NUM_TX_QUEUES              4
 +#define NUM_RX_PKT_DESC            8
 +
 +/* FW status registers */
 +struct wl1271_fw_status {
 +      __le32 intr;
 +      u8  fw_rx_counter;
 +      u8  drv_rx_counter;
 +      u8  reserved;
 +      u8  tx_results_counter;
 +      __le32 rx_pkt_descs[NUM_RX_PKT_DESC];
 +      __le32 tx_released_blks[NUM_TX_QUEUES];
 +      __le32 fw_localtime;
 +      __le32 padding[2];
 +} __packed;
 +
 +struct wl1271_rx_mem_pool_addr {
 +      u32 addr;
 +      u32 addr_extra;
 +};
 +
 +struct wl1271_scan {
 +      struct cfg80211_scan_request *req;
 +      bool *scanned_ch;
 +      bool failed;
 +      u8 state;
 +      u8 ssid[IW_ESSID_MAX_SIZE+1];
 +      size_t ssid_len;
 +};
 +
 +struct wl1271_if_operations {
 +      void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
 +                   bool fixed);
 +      void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
 +                   bool fixed);
 +      void (*reset)(struct wl1271 *wl);
 +      void (*init)(struct wl1271 *wl);
 +      int (*power)(struct wl1271 *wl, bool enable);
 +      struct device* (*dev)(struct wl1271 *wl);
 +      void (*enable_irq)(struct wl1271 *wl);
 +      void (*disable_irq)(struct wl1271 *wl);
 +};
 +
 +struct wl1271 {
 +      struct platform_device *plat_dev;
 +      struct ieee80211_hw *hw;
 +      bool mac80211_registered;
 +
 +      void *if_priv;
 +
 +      struct wl1271_if_operations *if_ops;
 +
 +      void (*set_power)(bool enable);
 +      int irq;
 +      int ref_clock;
 +
 +      spinlock_t wl_lock;
 +
 +      enum wl1271_state state;
 +      struct mutex mutex;
 +
 +#define WL1271_FLAG_STA_RATES_CHANGED  (0)
 +#define WL1271_FLAG_STA_ASSOCIATED     (1)
 +#define WL1271_FLAG_JOINED             (2)
 +#define WL1271_FLAG_GPIO_POWER         (3)
 +#define WL1271_FLAG_TX_QUEUE_STOPPED   (4)
 +#define WL1271_FLAG_IN_ELP             (5)
 +#define WL1271_FLAG_PSM                (6)
 +#define WL1271_FLAG_PSM_REQUESTED      (7)
 +#define WL1271_FLAG_IRQ_PENDING        (8)
 +#define WL1271_FLAG_IRQ_RUNNING        (9)
 +#define WL1271_FLAG_IDLE              (10)
 +#define WL1271_FLAG_IDLE_REQUESTED    (11)
 +#define WL1271_FLAG_PSPOLL_FAILURE    (12)
 +#define WL1271_FLAG_STA_STATE_SENT    (13)
 +#define WL1271_FLAG_FW_TX_BUSY        (14)
 +      unsigned long flags;
 +
 +      struct wl1271_partition_set part;
 +
 +      struct wl1271_chip chip;
 +
 +      int cmd_box_addr;
 +      int event_box_addr;
 +
 +      u8 *fw;
 +      size_t fw_len;
 +      struct wl1271_nvs_file *nvs;
 +      size_t nvs_len;
 +
 +      s8 hw_pg_ver;
 +
 +      u8 bssid[ETH_ALEN];
 +      u8 mac_addr[ETH_ALEN];
 +      u8 bss_type;
 +      u8 set_bss_type;
 +      u8 ssid[IW_ESSID_MAX_SIZE + 1];
 +      u8 ssid_len;
 +      int channel;
 +
 +      struct wl1271_acx_mem_map *target_mem_map;
 +
 +      /* Accounting for allocated / available TX blocks on HW */
 +      u32 tx_blocks_freed[NUM_TX_QUEUES];
 +      u32 tx_blocks_available;
 +      u32 tx_results_count;
 +
 +      /* Transmitted TX packets counter for chipset interface */
 +      u32 tx_packets_count;
 +
 +      /* Time-offset between host and chipset clocks */
 +      s64 time_offset;
 +
 +      /* Session counter for the chipset */
 +      int session_counter;
 +
 +      /* Frames scheduled for transmission, not handled yet */
 +      struct sk_buff_head tx_queue[NUM_TX_QUEUES];
 +      int tx_queue_count;
 +
 +      struct work_struct tx_work;
 +
 +      /* Pending TX frames */
 +      unsigned long tx_frames_map[BITS_TO_LONGS(ACX_TX_DESCRIPTORS)];
 +      struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
 +      int tx_frames_cnt;
 +
 +      /* Security sequence number counters */
 +      u8 tx_security_last_seq;
 +      s64 tx_security_seq;
 +
 +      /* FW Rx counter */
 +      u32 rx_counter;
 +
 +      /* Rx memory pool address */
 +      struct wl1271_rx_mem_pool_addr rx_mem_pool_addr;
 +
 +      /* Intermediate buffer, used for packet aggregation */
 +      u8 *aggr_buf;
 +
 +      /* The target interrupt mask */
 +      struct work_struct irq_work;
 +
 +      /* Hardware recovery work */
 +      struct work_struct recovery_work;
 +
 +      /* The mbox event mask */
 +      u32 event_mask;
 +
 +      /* Mailbox pointers */
 +      u32 mbox_ptr[2];
 +
 +      /* Are we currently scanning */
 +      struct wl1271_scan scan;
 +      struct delayed_work scan_complete_work;
 +
 +      /* probe-req template for the current AP */
 +      struct sk_buff *probereq;
 +
 +      /* Our association ID */
 +      u16 aid;
 +
 +      /*
 +       * currently configured rate set:
 +       *      bits  0-15 - 802.11abg rates
 +       *      bits 16-23 - 802.11n   MCS index mask
 +       * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
 +       */
 +      u32 sta_rate_set;
 +      u32 basic_rate_set;
 +      u32 basic_rate;
 +      u32 rate_set;
 +
 +      /* The current band */
 +      enum ieee80211_band band;
 +
 +      /* Beaconing interval (needed for ad-hoc) */
 +      u32 beacon_int;
 +
 +      /* Default key (for WEP) */
 +      u32 default_key;
 +
 +      unsigned int filters;
 +      unsigned int rx_config;
 +      unsigned int rx_filter;
 +
 +      struct completion *elp_compl;
 +      struct delayed_work elp_work;
 +      struct delayed_work pspoll_work;
 +
 +      /* counter for ps-poll delivery failures */
 +      int ps_poll_failures;
 +
 +      /* retry counter for PSM entries */
 +      u8 psm_entry_retry;
 +
 +      /* in dBm */
 +      int power_level;
 +
 +      int rssi_thold;
 +      int last_rssi_event;
 +
 +      struct wl1271_stats stats;
 +      struct dentry *rootdir;
 +
 +      __le32 buffer_32;
 +      u32 buffer_cmd;
 +      u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
 +
 +      struct wl1271_fw_status *fw_status;
 +      struct wl1271_tx_hw_res_if *tx_res_if;
 +
 +      struct ieee80211_vif *vif;
 +
 +      /* Current chipset configuration */
 +      struct conf_drv_settings conf;
 +
 +      bool sg_enabled;
 +
 +      bool enable_11a;
 +
 +      struct list_head list;
 +
 +      /* Most recently reported noise in dBm */
 +      s8 noise;
 +};
 +
 +int wl1271_plt_start(struct wl1271 *wl);
 +int wl1271_plt_stop(struct wl1271 *wl);
 +
 +#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
 +
 +#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */
 +
 +#define WL1271_DEFAULT_POWER_LEVEL 0
 +
 +#define WL1271_TX_QUEUE_LOW_WATERMARK  10
 +#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
 +
 +/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
 +   on in case is has been shut down shortly before */
- #define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
- #define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
++#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
++#define WL1271_POWER_ON_SLEEP 200 /* in milliseconds */
 +
 +/* Macros to handle wl1271.sta_rate_set */
 +#define HW_BG_RATES_MASK      0xffff
 +#define HW_HT_RATES_OFFSET    16
 +
 +#endif
@@@ -37,9 -37,6 +37,9 @@@
  #include <linux/workqueue.h>
  #include <linux/debugfs.h>
  #include <linux/slab.h>
 +#include <linux/input.h>
 +#include <linux/input/sparse-keymap.h>
 +#include <linux/dmi.h>
  
  #include <acpi/acpi_drivers.h>
  
@@@ -51,7 -48,6 +51,7 @@@ MODULE_LICENSE("GPL")
  #define ACER_ERR KERN_ERR ACER_LOGPREFIX
  #define ACER_NOTICE KERN_NOTICE ACER_LOGPREFIX
  #define ACER_INFO KERN_INFO ACER_LOGPREFIX
 +#define ACER_WARNING KERN_WARNING ACER_LOGPREFIX
  
  /*
   * Magic Number
  #define AMW0_GUID2            "431F16ED-0C2B-444C-B267-27DEB140CF9C"
  #define WMID_GUID1            "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
  #define WMID_GUID2            "95764E09-FB56-4e83-B31A-37761F60994A"
 +#define WMID_GUID3            "61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
 +
 +/*
 + * Acer ACPI event GUIDs
 + */
 +#define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026"
  
  MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
  MODULE_ALIAS("wmi:6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3");
 +MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
 +
 +enum acer_wmi_event_ids {
 +      WMID_HOTKEY_EVENT = 0x1,
 +};
 +
 +static const struct key_entry acer_wmi_keymap[] = {
 +      {KE_KEY, 0x01, {KEY_WLAN} },     /* WiFi */
 +      {KE_KEY, 0x12, {KEY_BLUETOOTH} },       /* BT */
 +      {KE_KEY, 0x21, {KEY_PROG1} },    /* Backup */
 +      {KE_KEY, 0x22, {KEY_PROG2} },    /* Arcade */
 +      {KE_KEY, 0x23, {KEY_PROG3} },    /* P_Key */
 +      {KE_KEY, 0x24, {KEY_PROG4} },    /* Social networking_Key */
 +      {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
 +      {KE_KEY, 0x82, {KEY_F22} },      /* Touch Pad On/Off */
 +      {KE_END, 0}
 +};
 +
 +static struct input_dev *acer_wmi_input_dev;
 +
 +struct event_return_value {
 +      u8 function;
 +      u8 key_num;
 +      u16 device_state;
 +      u32 reserved;
 +} __attribute__((packed));
 +
 +/*
 + * GUID3 Get Device Status device flags
 + */
 +#define ACER_WMID3_GDS_WIRELESS               (1<<0)  /* WiFi */
 +#define ACER_WMID3_GDS_THREEG         (1<<6)  /* 3G */
 +#define ACER_WMID3_GDS_BLUETOOTH      (1<<11) /* BT */
 +
 +struct lm_input_params {
 +      u8 function_num;        /* Function Number */
 +      u16 commun_devices;     /* Communication type devices default status */
 +      u16 devices;            /* Other type devices default status */
 +      u8 lm_status;           /* Launch Manager Status */
 +      u16 reserved;
 +} __attribute__((packed));
 +
 +struct lm_return_value {
 +      u8 error_code;          /* Error Code */
 +      u8 ec_return_value;     /* EC Return Value */
 +      u16 reserved;
 +} __attribute__((packed));
 +
 +struct wmid3_gds_input_param {        /* Get Device Status input parameter */
 +      u8 function_num;        /* Function Number */
 +      u8 hotkey_number;       /* Hotkey Number */
 +      u16 devices;            /* Get Device */
 +} __attribute__((packed));
 +
 +struct wmid3_gds_return_value {       /* Get Device Status return value*/
 +      u8 error_code;          /* Error Code */
 +      u8 ec_return_value;     /* EC Return Value */
 +      u16 devices;            /* Current Device Status */
 +      u32 reserved;
 +} __attribute__((packed));
 +
 +struct hotkey_function_type_aa {
 +      u8 type;
 +      u8 length;
 +      u16 handle;
 +      u16 commun_func_bitmap;
 +} __attribute__((packed));
  
  /*
   * Interface capability flags
@@@ -193,19 -116,15 +193,19 @@@ static int mailled = -1
  static int brightness = -1;
  static int threeg = -1;
  static int force_series;
 +static bool ec_raw_mode;
 +static bool has_type_aa;
  
  module_param(mailled, int, 0444);
  module_param(brightness, int, 0444);
  module_param(threeg, int, 0444);
  module_param(force_series, int, 0444);
 +module_param(ec_raw_mode, bool, 0444);
  MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
  MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
  MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
  MODULE_PARM_DESC(force_series, "Force a different laptop series");
 +MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode");
  
  struct acer_data {
        int mailled;
@@@ -221,7 -140,6 +221,7 @@@ struct acer_debug 
  
  static struct rfkill *wireless_rfkill;
  static struct rfkill *bluetooth_rfkill;
 +static struct rfkill *threeg_rfkill;
  
  /* Each low-level interface must define at least some of the following */
  struct wmi_interface {
@@@ -835,28 -753,6 +835,28 @@@ static acpi_status WMID_set_u32(u32 val
        return WMI_execute_u32(method_id, (u32)value, NULL);
  }
  
 +static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy)
 +{
 +      struct hotkey_function_type_aa *type_aa;
 +
 +      /* We are looking for OEM-specific Type AAh */
 +      if (header->type != 0xAA)
 +              return;
 +
 +      has_type_aa = true;
 +      type_aa = (struct hotkey_function_type_aa *) header;
 +
 +      printk(ACER_INFO "Function bitmap for Communication Button: 0x%x\n",
 +              type_aa->commun_func_bitmap);
 +
 +      if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS)
 +              interface->capability |= ACER_CAP_WIRELESS;
 +      if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_THREEG)
 +              interface->capability |= ACER_CAP_THREEG;
 +      if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH)
 +              interface->capability |= ACER_CAP_BLUETOOTH;
 +}
 +
  static acpi_status WMID_set_capabilities(void)
  {
        struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
                return AE_ERROR;
        }
  
 -      /* Not sure on the meaning of the relevant bits yet to detect these */
 -      interface->capability |= ACER_CAP_WIRELESS;
 -      interface->capability |= ACER_CAP_THREEG;
 +      dmi_walk(type_aa_dmi_decode, NULL);
 +      if (!has_type_aa) {
 +              interface->capability |= ACER_CAP_WIRELESS;
 +              interface->capability |= ACER_CAP_THREEG;
 +              if (devices & 0x10)
 +                      interface->capability |= ACER_CAP_BLUETOOTH;
 +      }
  
        /* WMID always provides brightness methods */
        interface->capability |= ACER_CAP_BRIGHTNESS;
  
 -      if (devices & 0x10)
 -              interface->capability |= ACER_CAP_BLUETOOTH;
 -
        if (!(devices & 0x20))
                max_brightness = 0x9;
  
@@@ -966,8 -861,7 +966,8 @@@ static void __init acer_commandline_ini
         * capability isn't available on the given interface
         */
        set_u32(mailled, ACER_CAP_MAILLED);
 -      set_u32(threeg, ACER_CAP_THREEG);
 +      if (!has_type_aa)
 +              set_u32(threeg, ACER_CAP_THREEG);
        set_u32(brightness, ACER_CAP_BRIGHTNESS);
  }
  
@@@ -1021,7 -915,7 +1021,7 @@@ static int update_bl_status(struct back
        return 0;
  }
  
- static struct backlight_ops acer_bl_ops = {
+ static const struct backlight_ops acer_bl_ops = {
        .get_brightness = read_brightness,
        .update_status = update_bl_status,
  };
@@@ -1054,79 -948,6 +1054,79 @@@ static void acer_backlight_exit(void
        backlight_device_unregister(acer_backlight_device);
  }
  
 +static acpi_status wmid3_get_device_status(u32 *value, u16 device)
 +{
 +      struct wmid3_gds_return_value return_value;
 +      acpi_status status;
 +      union acpi_object *obj;
 +      struct wmid3_gds_input_param params = {
 +              .function_num = 0x1,
 +              .hotkey_number = 0x01,
 +              .devices = device,
 +      };
 +      struct acpi_buffer input = {
 +              sizeof(struct wmid3_gds_input_param),
 +              &params
 +      };
 +      struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
 +
 +      status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output);
 +      if (ACPI_FAILURE(status))
 +              return status;
 +
 +      obj = output.pointer;
 +
 +      if (!obj)
 +              return AE_ERROR;
 +      else if (obj->type != ACPI_TYPE_BUFFER) {
 +              kfree(obj);
 +              return AE_ERROR;
 +      }
 +      if (obj->buffer.length != 8) {
 +              printk(ACER_WARNING "Unknown buffer length %d\n",
 +                      obj->buffer.length);
 +              kfree(obj);
 +              return AE_ERROR;
 +      }
 +
 +      return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer);
 +      kfree(obj);
 +
 +      if (return_value.error_code || return_value.ec_return_value)
 +              printk(ACER_WARNING "Get Device Status failed: "
 +                      "0x%x - 0x%x\n", return_value.error_code,
 +                      return_value.ec_return_value);
 +      else
 +              *value = !!(return_value.devices & device);
 +
 +      return status;
 +}
 +
 +static acpi_status get_device_status(u32 *value, u32 cap)
 +{
 +      if (wmi_has_guid(WMID_GUID3)) {
 +              u16 device;
 +
 +              switch (cap) {
 +              case ACER_CAP_WIRELESS:
 +                      device = ACER_WMID3_GDS_WIRELESS;
 +                      break;
 +              case ACER_CAP_BLUETOOTH:
 +                      device = ACER_WMID3_GDS_BLUETOOTH;
 +                      break;
 +              case ACER_CAP_THREEG:
 +                      device = ACER_WMID3_GDS_THREEG;
 +                      break;
 +              default:
 +                      return AE_ERROR;
 +              }
 +              return wmid3_get_device_status(value, device);
 +
 +      } else {
 +              return get_u32(value, cap);
 +      }
 +}
 +
  /*
   * Rfkill devices
   */
@@@ -1147,13 -968,6 +1147,13 @@@ static void acer_rfkill_update(struct w
                        rfkill_set_sw_state(bluetooth_rfkill, !state);
        }
  
 +      if (has_cap(ACER_CAP_THREEG) && wmi_has_guid(WMID_GUID3)) {
 +              status = wmid3_get_device_status(&state,
 +                              ACER_WMID3_GDS_THREEG);
 +              if (ACPI_SUCCESS(status))
 +                      rfkill_set_sw_state(threeg_rfkill, !state);
 +      }
 +
        schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
  }
  
@@@ -1177,8 -991,6 +1177,8 @@@ static struct rfkill *acer_rfkill_regis
  {
        int err;
        struct rfkill *rfkill_dev;
 +      u32 state;
 +      acpi_status status;
  
        rfkill_dev = rfkill_alloc(name, dev, type,
                                  &acer_rfkill_ops,
        if (!rfkill_dev)
                return ERR_PTR(-ENOMEM);
  
 +      status = get_device_status(&state, cap);
 +      if (ACPI_SUCCESS(status))
 +              rfkill_init_sw_state(rfkill_dev, !state);
 +
        err = rfkill_register(rfkill_dev);
        if (err) {
                rfkill_destroy(rfkill_dev);
@@@ -1216,19 -1024,6 +1216,19 @@@ static int acer_rfkill_init(struct devi
                }
        }
  
 +      if (has_cap(ACER_CAP_THREEG)) {
 +              threeg_rfkill = acer_rfkill_register(dev,
 +                      RFKILL_TYPE_WWAN, "acer-threeg",
 +                      ACER_CAP_THREEG);
 +              if (IS_ERR(threeg_rfkill)) {
 +                      rfkill_unregister(wireless_rfkill);
 +                      rfkill_destroy(wireless_rfkill);
 +                      rfkill_unregister(bluetooth_rfkill);
 +                      rfkill_destroy(bluetooth_rfkill);
 +                      return PTR_ERR(threeg_rfkill);
 +              }
 +      }
 +
        schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
  
        return 0;
@@@ -1245,11 -1040,6 +1245,11 @@@ static void acer_rfkill_exit(void
                rfkill_unregister(bluetooth_rfkill);
                rfkill_destroy(bluetooth_rfkill);
        }
 +
 +      if (has_cap(ACER_CAP_THREEG)) {
 +              rfkill_unregister(threeg_rfkill);
 +              rfkill_destroy(threeg_rfkill);
 +      }
        return;
  }
  
@@@ -1260,12 -1050,7 +1260,12 @@@ static ssize_t show_bool_threeg(struct 
        struct device_attribute *attr, char *buf)
  {
        u32 result; \
 -      acpi_status status = get_u32(&result, ACER_CAP_THREEG);
 +      acpi_status status;
 +      if (wmi_has_guid(WMID_GUID3))
 +              status = wmid3_get_device_status(&result,
 +                              ACER_WMID3_GDS_THREEG);
 +      else
 +              status = get_u32(&result, ACER_CAP_THREEG);
        if (ACPI_SUCCESS(status))
                return sprintf(buf, "%u\n", result);
        return sprintf(buf, "Read error\n");
@@@ -1300,178 -1085,6 +1300,178 @@@ static ssize_t show_interface(struct de
  
  static DEVICE_ATTR(interface, S_IRUGO, show_interface, NULL);
  
 +static void acer_wmi_notify(u32 value, void *context)
 +{
 +      struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
 +      union acpi_object *obj;
 +      struct event_return_value return_value;
 +      acpi_status status;
 +
 +      status = wmi_get_event_data(value, &response);
 +      if (status != AE_OK) {
 +              printk(ACER_WARNING "bad event status 0x%x\n", status);
 +              return;
 +      }
 +
 +      obj = (union acpi_object *)response.pointer;
 +
 +      if (!obj)
 +              return;
 +      if (obj->type != ACPI_TYPE_BUFFER) {
 +              printk(ACER_WARNING "Unknown response received %d\n",
 +                      obj->type);
 +              kfree(obj);
 +              return;
 +      }
 +      if (obj->buffer.length != 8) {
 +              printk(ACER_WARNING "Unknown buffer length %d\n",
 +                      obj->buffer.length);
 +              kfree(obj);
 +              return;
 +      }
 +
 +      return_value = *((struct event_return_value *)obj->buffer.pointer);
 +      kfree(obj);
 +
 +      switch (return_value.function) {
 +      case WMID_HOTKEY_EVENT:
 +              if (!sparse_keymap_report_event(acer_wmi_input_dev,
 +                              return_value.key_num, 1, true))
 +                      printk(ACER_WARNING "Unknown key number - 0x%x\n",
 +                              return_value.key_num);
 +              break;
 +      default:
 +              printk(ACER_WARNING "Unknown function number - %d - %d\n",
 +                      return_value.function, return_value.key_num);
 +              break;
 +      }
 +}
 +
 +static acpi_status
 +wmid3_set_lm_mode(struct lm_input_params *params,
 +                struct lm_return_value *return_value)
 +{
 +      acpi_status status;
 +      union acpi_object *obj;
 +
 +      struct acpi_buffer input = { sizeof(struct lm_input_params), params };
 +      struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
 +
 +      status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output);
 +      if (ACPI_FAILURE(status))
 +              return status;
 +
 +      obj = output.pointer;
 +
 +      if (!obj)
 +              return AE_ERROR;
 +      else if (obj->type != ACPI_TYPE_BUFFER) {
 +              kfree(obj);
 +              return AE_ERROR;
 +      }
 +      if (obj->buffer.length != 4) {
 +              printk(ACER_WARNING "Unknown buffer length %d\n",
 +                     obj->buffer.length);
 +              kfree(obj);
 +              return AE_ERROR;
 +      }
 +
 +      *return_value = *((struct lm_return_value *)obj->buffer.pointer);
 +      kfree(obj);
 +
 +      return status;
 +}
 +
 +static int acer_wmi_enable_ec_raw(void)
 +{
 +      struct lm_return_value return_value;
 +      acpi_status status;
 +      struct lm_input_params params = {
 +              .function_num = 0x1,
 +              .commun_devices = 0xFFFF,
 +              .devices = 0xFFFF,
 +              .lm_status = 0x00,            /* Launch Manager Deactive */
 +      };
 +
 +      status = wmid3_set_lm_mode(&params, &return_value);
 +
 +      if (return_value.error_code || return_value.ec_return_value)
 +              printk(ACER_WARNING "Enabling EC raw mode failed: "
 +                     "0x%x - 0x%x\n", return_value.error_code,
 +                     return_value.ec_return_value);
 +      else
 +              printk(ACER_INFO "Enabled EC raw mode");
 +
 +      return status;
 +}
 +
 +static int acer_wmi_enable_lm(void)
 +{
 +      struct lm_return_value return_value;
 +      acpi_status status;
 +      struct lm_input_params params = {
 +              .function_num = 0x1,
 +              .commun_devices = 0xFFFF,
 +              .devices = 0xFFFF,
 +              .lm_status = 0x01,            /* Launch Manager Active */
 +      };
 +
 +      status = wmid3_set_lm_mode(&params, &return_value);
 +
 +      if (return_value.error_code || return_value.ec_return_value)
 +              printk(ACER_WARNING "Enabling Launch Manager failed: "
 +                     "0x%x - 0x%x\n", return_value.error_code,
 +                     return_value.ec_return_value);
 +
 +      return status;
 +}
 +
 +static int __init acer_wmi_input_setup(void)
 +{
 +      acpi_status status;
 +      int err;
 +
 +      acer_wmi_input_dev = input_allocate_device();
 +      if (!acer_wmi_input_dev)
 +              return -ENOMEM;
 +
 +      acer_wmi_input_dev->name = "Acer WMI hotkeys";
 +      acer_wmi_input_dev->phys = "wmi/input0";
 +      acer_wmi_input_dev->id.bustype = BUS_HOST;
 +
 +      err = sparse_keymap_setup(acer_wmi_input_dev, acer_wmi_keymap, NULL);
 +      if (err)
 +              goto err_free_dev;
 +
 +      status = wmi_install_notify_handler(ACERWMID_EVENT_GUID,
 +                                              acer_wmi_notify, NULL);
 +      if (ACPI_FAILURE(status)) {
 +              err = -EIO;
 +              goto err_free_keymap;
 +      }
 +
 +      err = input_register_device(acer_wmi_input_dev);
 +      if (err)
 +              goto err_uninstall_notifier;
 +
 +      return 0;
 +
 +err_uninstall_notifier:
 +      wmi_remove_notify_handler(ACERWMID_EVENT_GUID);
 +err_free_keymap:
 +      sparse_keymap_free(acer_wmi_input_dev);
 +err_free_dev:
 +      input_free_device(acer_wmi_input_dev);
 +      return err;
 +}
 +
 +static void acer_wmi_input_destroy(void)
 +{
 +      wmi_remove_notify_handler(ACERWMID_EVENT_GUID);
 +      sparse_keymap_free(acer_wmi_input_dev);
 +      input_unregister_device(acer_wmi_input_dev);
 +}
 +
  /*
   * debugfs functions
   */
@@@ -1714,26 -1327,6 +1714,26 @@@ static int __init acer_wmi_init(void
                       "generic video driver\n");
        }
  
 +      if (wmi_has_guid(WMID_GUID3)) {
 +              if (ec_raw_mode) {
 +                      if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) {
 +                              printk(ACER_ERR "Cannot enable EC raw mode\n");
 +                              return -ENODEV;
 +                      }
 +              } else if (ACPI_FAILURE(acer_wmi_enable_lm())) {
 +                      printk(ACER_ERR "Cannot enable Launch Manager mode\n");
 +                      return -ENODEV;
 +              }
 +      } else if (ec_raw_mode) {
 +              printk(ACER_INFO "No WMID EC raw mode enable method\n");
 +      }
 +
 +      if (wmi_has_guid(ACERWMID_EVENT_GUID)) {
 +              err = acer_wmi_input_setup();
 +              if (err)
 +                      return err;
 +      }
 +
        err = platform_driver_register(&acer_platform_driver);
        if (err) {
                printk(ACER_ERR "Unable to register platform driver.\n");
@@@ -1775,17 -1368,11 +1775,17 @@@ error_device_add
  error_device_alloc:
        platform_driver_unregister(&acer_platform_driver);
  error_platform_register:
 +      if (wmi_has_guid(ACERWMID_EVENT_GUID))
 +              acer_wmi_input_destroy();
 +
        return err;
  }
  
  static void __exit acer_wmi_exit(void)
  {
 +      if (wmi_has_guid(ACERWMID_EVENT_GUID))
 +              acer_wmi_input_destroy();
 +
        remove_sysfs(acer_platform_device);
        remove_debugfs();
        platform_device_unregister(acer_platform_device);
@@@ -529,15 -529,6 +529,15 @@@ static void tpd_led_set(struct led_clas
        queue_work(eeepc->led_workqueue, &eeepc->tpd_led_work);
  }
  
 +static enum led_brightness tpd_led_get(struct led_classdev *led_cdev)
 +{
 +      struct eeepc_laptop *eeepc;
 +
 +      eeepc = container_of(led_cdev, struct eeepc_laptop, tpd_led);
 +
 +      return get_acpi(eeepc, CM_ASL_TPD);
 +}
 +
  static int eeepc_led_init(struct eeepc_laptop *eeepc)
  {
        int rv;
  
        eeepc->tpd_led.name = "eeepc::touchpad";
        eeepc->tpd_led.brightness_set = tpd_led_set;
 +      if (get_acpi(eeepc, CM_ASL_TPD) >= 0) /* if method is available */
 +        eeepc->tpd_led.brightness_get = tpd_led_get;
        eeepc->tpd_led.max_brightness = 1;
  
        rv = led_classdev_register(&eeepc->platform_device->dev,
@@@ -1126,7 -1115,7 +1126,7 @@@ static int update_bl_status(struct back
        return set_brightness(bd, bd->props.brightness);
  }
  
- static struct backlight_ops eeepcbl_ops = {
+ static const struct backlight_ops eeepcbl_ops = {
        .get_brightness = read_brightness,
        .update_status = update_bl_status,
  };
@@@ -437,7 -437,7 +437,7 @@@ static int bl_update_status(struct back
        return ret;
  }
  
- static struct backlight_ops fujitsubl_ops = {
+ static const struct backlight_ops fujitsubl_ops = {
        .get_brightness = bl_get_brightness,
        .update_status = bl_update_status,
  };
@@@ -1240,7 -1240,7 +1240,7 @@@ MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*
  MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
  MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
  
 -static struct pnp_device_id pnp_ids[] = {
 +static struct pnp_device_id pnp_ids[] __used = {
        {.id = "FUJ02bf"},
        {.id = "FUJ02B1"},
        {.id = "FUJ02E3"},
@@@ -235,7 -235,6 +235,7 @@@ static int sony_laptop_input_index[] = 
        57,     /* 70 SONYPI_EVENT_VOLUME_DEC_PRESSED */
        -1,     /* 71 SONYPI_EVENT_BRIGHTNESS_PRESSED */
        58,     /* 72 SONYPI_EVENT_MEDIA_PRESSED */
 +      59,     /* 72 SONYPI_EVENT_VENDOR_PRESSED */
  };
  
  static int sony_laptop_input_keycode_map[] = {
        KEY_VOLUMEUP,   /* 56 SONYPI_EVENT_VOLUME_INC_PRESSED */
        KEY_VOLUMEDOWN, /* 57 SONYPI_EVENT_VOLUME_DEC_PRESSED */
        KEY_MEDIA,      /* 58 SONYPI_EVENT_MEDIA_PRESSED */
 +      KEY_VENDOR,     /* 59 SONYPI_EVENT_VENDOR_PRESSED */
  };
  
  /* release buttons after a short delay if pressed */
@@@ -858,7 -856,7 +858,7 @@@ static int sony_backlight_get_brightnes
  }
  
  static struct backlight_device *sony_backlight_device;
- static struct backlight_ops sony_backlight_ops = {
+ static const struct backlight_ops sony_backlight_ops = {
        .update_status = sony_backlight_update_status,
        .get_brightness = sony_backlight_get_brightness,
  };
@@@ -896,18 -894,10 +896,18 @@@ static struct sony_nc_event sony_100_ev
        { 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
        { 0x8C, SONYPI_EVENT_FNKEY_F12 },
        { 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
 +      { 0x9d, SONYPI_EVENT_ZOOM_PRESSED },
 +      { 0x1d, SONYPI_EVENT_ANYBUTTON_RELEASED },
        { 0x9f, SONYPI_EVENT_CD_EJECT_PRESSED },
        { 0x1f, SONYPI_EVENT_ANYBUTTON_RELEASED },
        { 0xa1, SONYPI_EVENT_MEDIA_PRESSED },
        { 0x21, SONYPI_EVENT_ANYBUTTON_RELEASED },
 +      { 0xa4, SONYPI_EVENT_CD_EJECT_PRESSED },
 +      { 0x24, SONYPI_EVENT_ANYBUTTON_RELEASED },
 +      { 0xa5, SONYPI_EVENT_VENDOR_PRESSED },
 +      { 0x25, SONYPI_EVENT_ANYBUTTON_RELEASED },
 +      { 0xa6, SONYPI_EVENT_HELP_PRESSED },
 +      { 0x26, SONYPI_EVENT_ANYBUTTON_RELEASED },
        { 0, 0 },
  };
  
@@@ -1141,7 -1131,7 +1141,7 @@@ static int sony_nc_setup_rfkill(struct 
        return err;
  }
  
 -static void sony_nc_rfkill_update()
 +static void sony_nc_rfkill_update(void)
  {
        enum sony_nc_rfkill i;
        int result;
@@@ -589,7 -589,6 +589,7 @@@ static int acpi_evalf(acpi_handle handl
                default:
                        printk(TPACPI_ERR "acpi_evalf() called "
                               "with invalid format character '%c'\n", c);
 +                      va_end(ap);
                        return 0;
                }
        }
@@@ -6110,7 -6109,7 +6110,7 @@@ static void tpacpi_brightness_notify_ch
                               BACKLIGHT_UPDATE_HOTKEY);
  }
  
- static struct backlight_ops ibm_backlight_data = {
+ static const struct backlight_ops ibm_backlight_data = {
        .get_brightness = brightness_get,
        .update_status  = brightness_update_status,
  };
@@@ -6346,7 -6345,7 +6346,7 @@@ static int __init brightness_init(struc
                        "as change notification\n");
        tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
                                | TP_ACPI_HKEY_BRGHTUP_MASK
 -                              | TP_ACPI_HKEY_BRGHTDWN_MASK);;
 +                              | TP_ACPI_HKEY_BRGHTDWN_MASK);
        return 0;
  }
  
@@@ -7194,7 -7193,7 +7194,7 @@@ static struct ibm_struct volume_driver_
   *            TPACPI_FAN_WR_ACPI_FANS (X31/X40/X41)
   *
   *    FIRMWARE BUG: on some models, EC 0x2f might not be initialized at
-  *    boot. Apparently the EC does not intialize it, so unless ACPI DSDT
+  *    boot. Apparently the EC does not initialize it, so unless ACPI DSDT
   *    does so, its initial value is meaningless (0x07).
   *
   *    For firmware bugs, refer to:
diff --combined drivers/s390/net/lcs.c
@@@ -26,7 -26,6 +26,7 @@@
  #define KMSG_COMPONENT                "lcs"
  #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  
 +#include <linux/kernel_stat.h>
  #include <linux/module.h>
  #include <linux/if.h>
  #include <linux/netdevice.h>
@@@ -841,7 -840,7 +841,7 @@@ lcs_notify_lancmd_waiters(struct lcs_ca
  }
  
  /**
-  * Emit buffer of a lan comand.
+  * Emit buffer of a lan command.
   */
  static void
  lcs_lancmd_timeout(unsigned long data)
@@@ -1189,8 -1188,7 +1189,8 @@@ lcs_remove_mc_addresses(struct lcs_car
        spin_lock_irqsave(&card->ipm_lock, flags);
        list_for_each(l, &card->ipm_list) {
                ipm = list_entry(l, struct lcs_ipm_list, list);
 -              for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) {
 +              for (im4 = rcu_dereference(in4_dev->mc_list);
 +                   im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
                        lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
                        if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
                             (memcmp(buf, &ipm->ipm.mac_addr,
@@@ -1235,8 -1233,7 +1235,8 @@@ lcs_set_mc_addresses(struct lcs_card *c
        unsigned long flags;
  
        LCS_DBF_TEXT(4, trace, "setmclst");
 -      for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
 +      for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
 +           im4 = rcu_dereference(im4->next_rcu)) {
                lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
                ipm = lcs_check_addr_entry(card, im4, buf);
                if (ipm != NULL)
@@@ -1272,10 -1269,10 +1272,10 @@@ lcs_register_mc_addresses(void *data
        in4_dev = in_dev_get(card->dev);
        if (in4_dev == NULL)
                goto out;
 -      read_lock(&in4_dev->mc_list_lock);
 +      rcu_read_lock();
        lcs_remove_mc_addresses(card,in4_dev);
        lcs_set_mc_addresses(card, in4_dev);
 -      read_unlock(&in4_dev->mc_list_lock);
 +      rcu_read_unlock();
        in_dev_put(in4_dev);
  
        netif_carrier_off(card->dev);
@@@ -1399,7 -1396,6 +1399,7 @@@ lcs_irq(struct ccw_device *cdev, unsign
        int rc, index;
        int cstat, dstat;
  
 +      kstat_cpu(smp_processor_id()).irqs[IOINT_LCS]++;
        if (lcs_check_irb_error(cdev, irb))
                return;
  
@@@ -288,7 -288,7 +288,7 @@@ void zfcp_cfdc_adapter_access_changed(s
                    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
                        zfcp_erp_port_reopen(port,
                                             ZFCP_STATUS_COMMON_ERP_FAILED,
 -                                           "cfaac_1", NULL);
 +                                           "cfaac_1");
        }
        read_unlock_irqrestore(&adapter->port_list_lock, flags);
  
                    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
                        zfcp_erp_lun_reopen(sdev,
                                            ZFCP_STATUS_COMMON_ERP_FAILED,
 -                                          "cfaac_2", NULL);
 +                                          "cfaac_2");
        }
  }
  
@@@ -317,7 -317,7 +317,7 @@@ static void zfcp_act_eval_err(struct zf
  
  /**
   * zfcp_cfdc_port_denied - Process "access denied" for port
-  * @port: The port where the acces has been denied
+  * @port: The port where the access has been denied
   * @qual: The FSF status qualifier for the access denied FSF status
   */
  void zfcp_cfdc_port_denied(struct zfcp_port *port,
@@@ -426,7 -426,7 +426,7 @@@ int zfcp_cfdc_open_lun_eval(struct scsi
                        zfcp_scsi_dev_lun(sdev),
                        (unsigned long long)zfcp_sdev->port->wwpn);
                zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
 -              zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL);
 +              zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6");
                return -EACCES;
        }
  
                        zfcp_scsi_dev_lun(sdev),
                        (unsigned long long)zfcp_sdev->port->wwpn);
                zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
 -              zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL);
 +              zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8");
                return -EACCES;
        }
  
   * General Public License for more details.
   */
  
 +#include "bfad_drv.h"
  #include "bfa_modules.h"
 -#include "bfa_cb_ioim.h"
  
  BFA_TRC_FILE(HAL, FCPIM);
  BFA_MODULE(fcpim);
  
 -
 -#define bfa_fcpim_add_iostats(__l, __r, __stats)      \
 -      (__l->__stats += __r->__stats)
 -
 -
  /*
   *  BFA ITNIM Related definitions
   */
@@@ -32,12 -37,12 +32,12 @@@ static void bfa_itnim_update_del_itn_st
  #define bfa_fcpim_additn(__itnim)                                     \
        list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
  #define bfa_fcpim_delitn(__itnim)     do {                            \
 -      bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));      \
 +      WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));   \
        bfa_itnim_update_del_itn_stats(__itnim);      \
        list_del(&(__itnim)->qe);      \
 -      bfa_assert(list_empty(&(__itnim)->io_q));      \
 -      bfa_assert(list_empty(&(__itnim)->io_cleanup_q));      \
 -      bfa_assert(list_empty(&(__itnim)->pending_q));      \
 +      WARN_ON(!list_empty(&(__itnim)->io_q));                         \
 +      WARN_ON(!list_empty(&(__itnim)->io_cleanup_q));                 \
 +      WARN_ON(!list_empty(&(__itnim)->pending_q));                    \
  } while (0)
  
  #define bfa_itnim_online_cb(__itnim) do {                             \
  } while (0)
  
  /*
 - *  bfa_itnim_sm BFA itnim state machine
 + *  itnim state machine event
   */
 -
 -
  enum bfa_itnim_event {
        BFA_ITNIM_SM_CREATE = 1,        /*  itnim is created */
        BFA_ITNIM_SM_ONLINE = 2,        /*  itnim is online */
        if ((__fcpim)->profile_start)                                   \
                (__fcpim)->profile_start(__ioim);                       \
  } while (0)
 -/*
 - *  hal_ioim_sm
 - */
  
  /*
   * IO state machine events
@@@ -211,7 -221,8 +211,7 @@@ static void     bfa_itnim_sm_deleting_q
   * forward declaration for BFA IOIM functions
   */
  static bfa_boolean_t  bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
 -static bfa_boolean_t  bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
 -static void           bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
 +static bfa_boolean_t  bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
  static bfa_boolean_t  bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
  static void           bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
  static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
@@@ -221,6 -232,7 +221,6 @@@ static void __bfa_cb_ioim_failed(void *
  static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
  static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
  
 -
  /*
   * forward declaration of BFA IO state machine
   */
@@@ -248,13 -260,14 +248,13 @@@ static void     bfa_ioim_sm_resfree(str
                                        enum bfa_ioim_event event);
  static void   bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
                                        enum bfa_ioim_event event);
 -
  /*
   * forward declaration for BFA TSKIM functions
   */
  static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
  static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
  static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
 -                                      lun_t lun);
 +                                      struct scsi_lun lun);
  static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
  static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
  static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
@@@ -262,6 -275,7 +262,6 @@@ static bfa_boolean_t bfa_tskim_send(str
  static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
  static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
  
 -
  /*
   * forward declaration of BFA TSKIM state machine
   */
@@@ -279,12 -293,13 +279,12 @@@ static void     bfa_tskim_sm_cleanup_qf
                                        enum bfa_tskim_event event);
  static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
                                        enum bfa_tskim_event event);
 -
  /*
 - *  hal_fcpim_mod BFA FCP Initiator Mode module
 + *  BFA FCP Initiator Mode module
   */
  
  /*
 - *    Compute and return memory needed by FCP(im) module.
 + * Compute and return memory needed by FCP(im) module.
   */
  static void
  bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
@@@ -342,6 -357,10 +342,6 @@@ bfa_fcpim_attach(struct bfa_s *bfa, voi
  static void
  bfa_fcpim_detach(struct bfa_s *bfa)
  {
 -      struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 -
 -      bfa_ioim_detach(fcpim);
 -      bfa_tskim_detach(fcpim);
  }
  
  static void
@@@ -368,6 -387,56 +368,6 @@@ bfa_fcpim_iocdisable(struct bfa_s *bfa
  }
  
  void
 -bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
 -              struct bfa_itnim_iostats_s *rstats)
 -{
 -      bfa_fcpim_add_iostats(lstats, rstats, total_ios);
 -      bfa_fcpim_add_iostats(lstats, rstats, qresumes);
 -      bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
 -      bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
 -      bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
 -      bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
 -      bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
 -      bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
 -      bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
 -      bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
 -      bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
 -      bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
 -      bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
 -      bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
 -      bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
 -      bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
 -      bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
 -      bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
 -      bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
 -      bfa_fcpim_add_iostats(lstats, rstats, onlines);
 -      bfa_fcpim_add_iostats(lstats, rstats, offlines);
 -      bfa_fcpim_add_iostats(lstats, rstats, creates);
 -      bfa_fcpim_add_iostats(lstats, rstats, deletes);
 -      bfa_fcpim_add_iostats(lstats, rstats, create_comps);
 -      bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
 -      bfa_fcpim_add_iostats(lstats, rstats, sler_events);
 -      bfa_fcpim_add_iostats(lstats, rstats, fw_create);
 -      bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
 -      bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
 -      bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
 -      bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
 -      bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
 -      bfa_fcpim_add_iostats(lstats, rstats, tm_success);
 -      bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
 -      bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
 -      bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
 -      bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
 -      bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
 -      bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
 -      bfa_fcpim_add_iostats(lstats, rstats, io_comps);
 -      bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
 -      bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
 -      bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
 -      bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
 -}
 -
 -void
  bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
  {
        struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
@@@ -385,6 -454,128 +385,6 @@@ bfa_fcpim_path_tov_get(struct bfa_s *bf
        return fcpim->path_tov / 1000;
  }
  
 -bfa_status_t
 -bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
 -      u8 lp_tag)
 -{
 -      struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 -      struct list_head *qe, *qen;
 -      struct bfa_itnim_s *itnim;
 -
 -      /* accumulate IO stats from itnim */
 -      memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
 -      list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 -              itnim = (struct bfa_itnim_s *) qe;
 -              if (itnim->rport->rport_info.lp_tag != lp_tag)
 -                      continue;
 -              bfa_fcpim_add_stats(stats, &(itnim->stats));
 -      }
 -      return BFA_STATUS_OK;
 -}
 -bfa_status_t
 -bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
 -{
 -      struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 -      struct list_head *qe, *qen;
 -      struct bfa_itnim_s *itnim;
 -
 -      /* accumulate IO stats from itnim */
 -      memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
 -      list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 -              itnim = (struct bfa_itnim_s *) qe;
 -              bfa_fcpim_add_stats(modstats, &(itnim->stats));
 -      }
 -      return BFA_STATUS_OK;
 -}
 -
 -bfa_status_t
 -bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
 -       struct bfa_fcpim_del_itn_stats_s *modstats)
 -{
 -      struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 -
 -      *modstats = fcpim->del_itn_stats;
 -
 -      return BFA_STATUS_OK;
 -}
 -
 -
 -bfa_status_t
 -bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
 -{
 -      struct bfa_itnim_s *itnim;
 -      struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 -      struct list_head *qe, *qen;
 -
 -      /* accumulate IO stats from itnim */
 -      list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 -              itnim = (struct bfa_itnim_s *) qe;
 -              bfa_itnim_clear_stats(itnim);
 -      }
 -      fcpim->io_profile = BFA_TRUE;
 -      fcpim->io_profile_start_time = time;
 -      fcpim->profile_comp = bfa_ioim_profile_comp;
 -      fcpim->profile_start = bfa_ioim_profile_start;
 -
 -      return BFA_STATUS_OK;
 -}
 -bfa_status_t
 -bfa_fcpim_profile_off(struct bfa_s *bfa)
 -{
 -      struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 -      fcpim->io_profile = BFA_FALSE;
 -      fcpim->io_profile_start_time = 0;
 -      fcpim->profile_comp = NULL;
 -      fcpim->profile_start = NULL;
 -      return BFA_STATUS_OK;
 -}
 -
 -bfa_status_t
 -bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag)
 -{
 -      struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 -      struct list_head *qe, *qen;
 -      struct bfa_itnim_s *itnim;
 -
 -      /* clear IO stats from all active itnims */
 -      list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 -              itnim = (struct bfa_itnim_s *) qe;
 -              if (itnim->rport->rport_info.lp_tag != lp_tag)
 -                      continue;
 -              bfa_itnim_clear_stats(itnim);
 -      }
 -      return BFA_STATUS_OK;
 -
 -}
 -
 -bfa_status_t
 -bfa_fcpim_clr_modstats(struct bfa_s *bfa)
 -{
 -      struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 -      struct list_head *qe, *qen;
 -      struct bfa_itnim_s *itnim;
 -
 -      /* clear IO stats from all active itnims */
 -      list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 -              itnim = (struct bfa_itnim_s *) qe;
 -              bfa_itnim_clear_stats(itnim);
 -      }
 -      memset(&fcpim->del_itn_stats, 0,
 -              sizeof(struct bfa_fcpim_del_itn_stats_s));
 -
 -      return BFA_STATUS_OK;
 -}
 -
 -void
 -bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth)
 -{
 -      struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 -
 -      bfa_assert(q_depth <= BFA_IOCFC_QDEPTH_MAX);
 -
 -      fcpim->q_depth = q_depth;
 -}
 -
  u16
  bfa_fcpim_qdepth_get(struct bfa_s *bfa)
  {
        return fcpim->q_depth;
  }
  
 -void
 -bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
 -{
 -      bfa_boolean_t ioredirect;
 -
 -      /*
 -       * IO redirection is turned off when QoS is enabled and vice versa
 -       */
 -      ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
 -}
 -
 -void
 -bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
 -{
 -      struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 -      fcpim->ioredirect = state;
 -}
 -
 -
 -
  /*
   *  BFA ITNIM module state machine functions
   */
  
  /*
 - *    Beginning/unallocated state - no events expected.
 + * Beginning/unallocated state - no events expected.
   */
  static void
  bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  }
  
  /*
 - *    Beginning state, only online event expected.
 + * Beginning state, only online event expected.
   */
  static void
  bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@@ -522,7 -733,7 +522,7 @@@ bfa_itnim_sm_fwcreate_qfull(struct bfa_
  }
  
  /*
 - *    Waiting for itnim create response from firmware, a delete is pending.
 + * Waiting for itnim create response from firmware, a delete is pending.
   */
  static void
  bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
  }
  
  /*
 - *    Online state - normal parking state.
 + * Online state - normal parking state.
   */
  static void
  bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  }
  
  /*
 - *    Second level error recovery need.
 + * Second level error recovery need.
   */
  static void
  bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  }
  
  /*
 - *    Going offline. Waiting for active IO cleanup.
 + * Going offline. Waiting for active IO cleanup.
   */
  static void
  bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
  }
  
  /*
 - *    Deleting itnim. Waiting for active IO cleanup.
 + * Deleting itnim. Waiting for active IO cleanup.
   */
  static void
  bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
@@@ -745,7 -956,7 +745,7 @@@ bfa_itnim_sm_fwdelete_qfull(struct bfa_
  }
  
  /*
 - *    Offline state.
 + * Offline state.
   */
  static void
  bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
        }
  }
  
 -/*
 - *    IOC h/w failed state.
 - */
  static void
  bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
                                enum bfa_itnim_event event)
  }
  
  /*
 - *    Itnim is deleted, waiting for firmware response to delete.
 + * Itnim is deleted, waiting for firmware response to delete.
   */
  static void
  bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
@@@ -855,7 -1069,7 +855,7 @@@ bfa_itnim_sm_deleting_qfull(struct bfa_
  }
  
  /*
 - *    Initiate cleanup of all IOs on an IOC failure.
 + * Initiate cleanup of all IOs on an IOC failure.
   */
  static void
  bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
  }
  
  /*
 - *    IO cleanup completion
 + * IO cleanup completion
   */
  static void
  bfa_itnim_cleanp_comp(void *itnim_cbarg)
  }
  
  /*
 - *    Initiate cleanup of all IOs.
 + * Initiate cleanup of all IOs.
   */
  static void
  bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
@@@ -973,6 -1187,9 +973,6 @@@ bfa_itnim_qresume(void *cbarg
        bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
  }
  
 -
 -
 -
  /*
   *  bfa_itnim_public
   */
@@@ -1184,7 -1401,7 +1184,7 @@@ bfa_itnim_iotov_start(struct bfa_itnim_
        if (itnim->fcpim->path_tov > 0) {
  
                itnim->iotov_active = BFA_TRUE;
 -              bfa_assert(bfa_itnim_hold_io(itnim));
 +              WARN_ON(!bfa_itnim_hold_io(itnim));
                bfa_timer_start(itnim->bfa, &itnim->timer,
                        bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
        }
@@@ -1240,12 -1457,14 +1240,12 @@@ bfa_itnim_update_del_itn_stats(struct b
        fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
  }
  
 -
 -
  /*
 - *  bfa_itnim_public
 + * bfa_itnim_public
   */
  
  /*
 - *    Itnim interrupt processing.
 + * Itnim interrupt processing.
   */
  void
  bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
        case BFI_ITNIM_I2H_CREATE_RSP:
                itnim = BFA_ITNIM_FROM_TAG(fcpim,
                                                msg.create_rsp->bfa_handle);
 -              bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
 +              WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
                bfa_stats(itnim, create_comps);
                bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
                break;
        case BFI_ITNIM_I2H_DELETE_RSP:
                itnim = BFA_ITNIM_FROM_TAG(fcpim,
                                                msg.delete_rsp->bfa_handle);
 -              bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
 +              WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
                bfa_stats(itnim, delete_comps);
                bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
                break;
  
        default:
                bfa_trc(bfa, m->mhdr.msg_id);
 -              bfa_assert(0);
 +              WARN_ON(1);
        }
  }
  
 -
 -
  /*
 - *  bfa_itnim_api
 + * bfa_itnim_api
   */
  
  struct bfa_itnim_s *
@@@ -1299,7 -1520,7 +1299,7 @@@ bfa_itnim_create(struct bfa_s *bfa, str
        struct bfa_itnim_s *itnim;
  
        itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
 -      bfa_assert(itnim->rport == rport);
 +      WARN_ON(itnim->rport != rport);
  
        itnim->ditn = ditn;
  
@@@ -1347,6 -1568,31 +1347,6 @@@ bfa_itnim_hold_io(struct bfa_itnim_s *i
                 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
  }
  
 -bfa_status_t
 -bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
 -              struct bfa_itnim_ioprofile_s *ioprofile)
 -{
 -      struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
 -      if (!fcpim->io_profile)
 -              return BFA_STATUS_IOPROFILE_OFF;
 -
 -      itnim->ioprofile.index = BFA_IOBUCKET_MAX;
 -      itnim->ioprofile.io_profile_start_time =
 -              bfa_io_profile_start_time(itnim->bfa);
 -      itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
 -      itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
 -      *ioprofile = itnim->ioprofile;
 -
 -      return BFA_STATUS_OK;
 -}
 -
 -void
 -bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
 -      struct bfa_itnim_iostats_s *stats)
 -{
 -      *stats = itnim->stats;
 -}
 -
  void
  bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
  {
   */
  
  /*
 - *    IO is not started (unallocated).
 + * IO is not started (unallocated).
   */
  static void
  bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  {
 -      bfa_trc_fp(ioim->bfa, ioim->iotag);
 -      bfa_trc_fp(ioim->bfa, event);
 -
        switch (event) {
        case BFA_IOIM_SM_START:
                if (!bfa_itnim_is_online(ioim->itnim)) {
                }
  
                if (ioim->nsges > BFI_SGE_INLINE) {
 -                      if (!bfa_ioim_sge_setup(ioim)) {
 +                      if (!bfa_ioim_sgpg_alloc(ioim)) {
                                bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
                                return;
                        }
                 * requests immediately.
                 */
                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
 -              bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
 +              WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
                bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
                                __bfa_cb_ioim_abort, ioim);
                break;
  }
  
  /*
 - *    IO is waiting for SG pages.
 + * IO is waiting for SG pages.
   */
  static void
  bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  }
  
  /*
 - *    IO is active.
 + * IO is active.
   */
  static void
  bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  {
 -      bfa_trc_fp(ioim->bfa, ioim->iotag);
 -      bfa_trc_fp(ioim->bfa, event);
 -
        switch (event) {
        case BFA_IOIM_SM_COMP_GOOD:
                bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
                break;
  
        case BFA_IOIM_SM_SQRETRY:
 -              if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) {
 -                      /* max retry completed free IO */
 +              if (bfa_ioim_maxretry_reached(ioim)) {
 +                      /* max retry reached, free IO */
                        bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
                        bfa_ioim_move_to_comp_q(ioim);
                        bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  }
  
  /*
 -*     IO is retried with new tag.
 -*/
 + * IO is retried with new tag.
 + */
  static void
  bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  {
        switch (event) {
        case BFA_IOIM_SM_FREE:
                /* abts and rrq done. Now retry the IO with new tag */
 +              bfa_ioim_update_iotag(ioim);
                if (!bfa_ioim_send_ioreq(ioim)) {
                        bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
                        break;
  }
  
  /*
 - *    IO is being aborted, waiting for completion from firmware.
 + * IO is being aborted, waiting for completion from firmware.
   */
  static void
  bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
                break;
  
        case BFA_IOIM_SM_CLEANUP:
 -              bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
 +              WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
                ioim->iosp->abort_explicit = BFA_FALSE;
  
                if (bfa_ioim_send_abort(ioim))
@@@ -1727,7 -1981,7 +1727,7 @@@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *
  }
  
  /*
 - *    IO is waiting for room in request CQ
 + * IO is waiting for room in request CQ
   */
  static void
  bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  }
  
  /*
 - *    Active IO is being aborted, waiting for room in request CQ.
 + * Active IO is being aborted, waiting for room in request CQ.
   */
  static void
  bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
                break;
  
        case BFA_IOIM_SM_CLEANUP:
 -              bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
 +              WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
                ioim->iosp->abort_explicit = BFA_FALSE;
                bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
                break;
  }
  
  /*
 - *    Active IO is being cleaned up, waiting for room in request CQ.
 + * Active IO is being cleaned up, waiting for room in request CQ.
   */
  static void
  bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  
        case BFA_IOIM_SM_ABORT:
                /*
-                * IO is alraedy being cleaned up implicitly
+                * IO is already being cleaned up implicitly
                 */
                ioim->io_cbfn = __bfa_cb_ioim_abort;
                break;
  static void
  bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  {
 -      bfa_trc_fp(ioim->bfa, ioim->iotag);
 -      bfa_trc_fp(ioim->bfa, event);
 -
        switch (event) {
        case BFA_IOIM_SM_HCB:
                bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
@@@ -1956,6 -2213,11 +1956,6 @@@ bfa_ioim_sm_resfree(struct bfa_ioim_s *
  }
  
  
 -
 -/*
 - *  hal_ioim_private
 - */
 -
  static void
  __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
  {
@@@ -2061,7 -2323,7 +2061,7 @@@ bfa_ioim_sgpg_alloced(void *cbarg
  
        ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
        list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
 -      bfa_ioim_sgpg_setup(ioim);
 +      ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
        bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
  }
  
@@@ -2073,16 -2335,13 +2073,16 @@@ bfa_ioim_send_ioreq(struct bfa_ioim_s *
  {
        struct bfa_itnim_s *itnim = ioim->itnim;
        struct bfi_ioim_req_s *m;
 -      static struct fcp_cmnd_s cmnd_z0 = { 0 };
 -      struct bfi_sge_s      *sge;
 +      static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
 +      struct bfi_sge_s *sge, *sgpge;
        u32     pgdlen = 0;
        u32     fcp_dl;
        u64 addr;
        struct scatterlist *sg;
 +      struct bfa_sgpg_s *sgpg;
        struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
 +      u32 i, sge_id, pgcumsz;
 +      enum dma_data_direction dmadir;
  
        /*
         * check for room in queue to send request now
         */
        m->io_tag = cpu_to_be16(ioim->iotag);
        m->rport_hdl = ioim->itnim->rport->fw_handle;
 -      m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
 +      m->io_timeout = 0;
  
 -      /*
 -       * build inline IO SG element here
 -       */
        sge = &m->sges[0];
 -      if (ioim->nsges) {
 -              sg = (struct scatterlist *)scsi_sglist(cmnd);
 -              addr = bfa_os_sgaddr(sg_dma_address(sg));
 -              sge->sga = *(union bfi_addr_u *) &addr;
 -              pgdlen = sg_dma_len(sg);
 -              sge->sg_len = pgdlen;
 -              sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
 +      sgpg = ioim->sgpg;
 +      sge_id = 0;
 +      sgpge = NULL;
 +      pgcumsz = 0;
 +      scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
 +              if (i == 0) {
 +                      /* build inline IO SG element */
 +                      addr = bfa_sgaddr_le(sg_dma_address(sg));
 +                      sge->sga = *(union bfi_addr_u *) &addr;
 +                      pgdlen = sg_dma_len(sg);
 +                      sge->sg_len = pgdlen;
 +                      sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
                                        BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
 -              bfa_sge_to_be(sge);
 -              sge++;
 +                      bfa_sge_to_be(sge);
 +                      sge++;
 +              } else {
 +                      if (sge_id == 0)
 +                              sgpge = sgpg->sgpg->sges;
 +
 +                      addr = bfa_sgaddr_le(sg_dma_address(sg));
 +                      sgpge->sga = *(union bfi_addr_u *) &addr;
 +                      sgpge->sg_len = sg_dma_len(sg);
 +                      pgcumsz += sgpge->sg_len;
 +
 +                      /* set flags */
 +                      if (i < (ioim->nsges - 1) &&
 +                                      sge_id < (BFI_SGPG_DATA_SGES - 1))
 +                              sgpge->flags = BFI_SGE_DATA;
 +                      else if (i < (ioim->nsges - 1))
 +                              sgpge->flags = BFI_SGE_DATA_CPL;
 +                      else
 +                              sgpge->flags = BFI_SGE_DATA_LAST;
 +
 +                      bfa_sge_to_le(sgpge);
 +
 +                      sgpge++;
 +                      if (i == (ioim->nsges - 1)) {
 +                              sgpge->flags = BFI_SGE_PGDLEN;
 +                              sgpge->sga.a32.addr_lo = 0;
 +                              sgpge->sga.a32.addr_hi = 0;
 +                              sgpge->sg_len = pgcumsz;
 +                              bfa_sge_to_le(sgpge);
 +                      } else if (++sge_id == BFI_SGPG_DATA_SGES) {
 +                              sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
 +                              sgpge->flags = BFI_SGE_LINK;
 +                              sgpge->sga = sgpg->sgpg_pa;
 +                              sgpge->sg_len = pgcumsz;
 +                              bfa_sge_to_le(sgpge);
 +                              sge_id = 0;
 +                              pgcumsz = 0;
 +                      }
 +              }
        }
  
        if (ioim->nsges > BFI_SGE_INLINE) {
         * set up I/O command parameters
         */
        m->cmnd = cmnd_z0;
 -      m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
 -      m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
 -      m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio);
 -      fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
 +      int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
 +      dmadir = cmnd->sc_data_direction;
 +      if (dmadir == DMA_TO_DEVICE)
 +              m->cmnd.iodir = FCP_IODIR_WRITE;
 +      else if (dmadir == DMA_FROM_DEVICE)
 +              m->cmnd.iodir = FCP_IODIR_READ;
 +      else
 +              m->cmnd.iodir = FCP_IODIR_NONE;
 +
 +      m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
 +      fcp_dl = scsi_bufflen(cmnd);
        m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
  
        /*
                bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
        }
        if (itnim->seq_rec ||
 -          (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
 +          (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
                bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
  
 -#ifdef IOIM_ADVANCED
 -      m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
 -      m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
 -      m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
 -
 -      /*
 -       * Handle large CDB (>16 bytes).
 -       */
 -      m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
 -                                      FCP_CMND_CDB_LEN) / sizeof(u32);
 -      if (m->cmnd.addl_cdb_len) {
 -              memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
 -                              bfa_cb_ioim_get_cdb(ioim->dio) + 1,
 -                              m->cmnd.addl_cdb_len * sizeof(u32));
 -              fcp_cmnd_fcpdl(&m->cmnd) =
 -                              cpu_to_be32(bfa_cb_ioim_get_size(ioim->dio));
 -      }
 -#endif
 -
        /*
         * queue I/O message to firmware
         */
   * at queuing time.
   */
  static bfa_boolean_t
 -bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
 +bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
  {
        u16     nsgpgs;
  
 -      bfa_assert(ioim->nsges > BFI_SGE_INLINE);
 +      WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
  
        /*
         * allocate SG pages needed
        }
  
        ioim->nsgpgs = nsgpgs;
 -      bfa_ioim_sgpg_setup(ioim);
 +      ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
  
        return BFA_TRUE;
  }
  
 -static void
 -bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
 -{
 -      int             sgeid, nsges, i;
 -      struct bfi_sge_s      *sge;
 -      struct bfa_sgpg_s *sgpg;
 -      u32     pgcumsz;
 -      u64        addr;
 -      struct scatterlist *sg;
 -      struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
 -
 -      sgeid = BFI_SGE_INLINE;
 -      ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
 -
 -      sg = scsi_sglist(cmnd);
 -      sg = sg_next(sg);
 -
 -      do {
 -              sge = sgpg->sgpg->sges;
 -              nsges = ioim->nsges - sgeid;
 -              if (nsges > BFI_SGPG_DATA_SGES)
 -                      nsges = BFI_SGPG_DATA_SGES;
 -
 -              pgcumsz = 0;
 -              for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
 -                      addr = bfa_os_sgaddr(sg_dma_address(sg));
 -                      sge->sga = *(union bfi_addr_u *) &addr;
 -                      sge->sg_len = sg_dma_len(sg);
 -                      pgcumsz += sge->sg_len;
 -
 -                      /*
 -                       * set flags
 -                       */
 -                      if (i < (nsges - 1))
 -                              sge->flags = BFI_SGE_DATA;
 -                      else if (sgeid < (ioim->nsges - 1))
 -                              sge->flags = BFI_SGE_DATA_CPL;
 -                      else
 -                              sge->flags = BFI_SGE_DATA_LAST;
 -
 -                      bfa_sge_to_le(sge);
 -              }
 -
 -              sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
 -
 -              /*
 -               * set the link element of each page
 -               */
 -              if (sgeid == ioim->nsges) {
 -                      sge->flags = BFI_SGE_PGDLEN;
 -                      sge->sga.a32.addr_lo = 0;
 -                      sge->sga.a32.addr_hi = 0;
 -              } else {
 -                      sge->flags = BFI_SGE_LINK;
 -                      sge->sga = sgpg->sgpg_pa;
 -              }
 -              sge->sg_len = pgcumsz;
 -
 -              bfa_sge_to_le(sge);
 -      } while (sgeid < ioim->nsges);
 -}
 -
  /*
   * Send I/O abort request to firmware.
   */
@@@ -2311,7 -2605,7 +2311,7 @@@ bfa_ioim_notify_cleanup(struct bfa_ioim
                }
                bfa_itnim_iodone(ioim->itnim);
        } else
 -              bfa_tskim_iodone(ioim->iosp->tskim);
 +              bfa_wc_down(&ioim->iosp->tskim->wc);
  }
  
  static bfa_boolean_t
@@@ -2329,6 -2623,9 +2329,6 @@@ bfa_ioim_is_abortable(struct bfa_ioim_
        return BFA_TRUE;
  }
  
 -/*
 - *    or after the link comes back.
 - */
  void
  bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
  {
  }
  
  
 -
 -/*
 - *  hal_ioim_friend
 - */
 -
  /*
   * Memory allocation and initialization.
   */
@@@ -2420,6 -2722,14 +2420,6 @@@ bfa_ioim_attach(struct bfa_fcpim_mod_s 
        }
  }
  
 -/*
 - * Driver detach time call.
 - */
 -void
 -bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
 -{
 -}
 -
  void
  bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  {
        iotag = be16_to_cpu(rsp->io_tag);
  
        ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
 -      bfa_assert(ioim->iotag == iotag);
 +      WARN_ON(ioim->iotag != iotag);
  
        bfa_trc(ioim->bfa, ioim->iotag);
        bfa_trc(ioim->bfa, rsp->io_status);
  
        case BFI_IOIM_STS_PROTO_ERR:
                bfa_stats(ioim->itnim, iocom_proto_err);
 -              bfa_assert(rsp->reuse_io_tag);
 +              WARN_ON(!rsp->reuse_io_tag);
                evt = BFA_IOIM_SM_COMP;
                break;
  
        case BFI_IOIM_STS_SQER_NEEDED:
                bfa_stats(ioim->itnim, iocom_sqer_needed);
 -              bfa_assert(rsp->reuse_io_tag == 0);
 +              WARN_ON(rsp->reuse_io_tag != 0);
                evt = BFA_IOIM_SM_SQRETRY;
                break;
  
                break;
  
        default:
 -              bfa_assert(0);
 +              WARN_ON(1);
        }
  
        bfa_sm_send_event(ioim, evt);
@@@ -2515,12 -2825,39 +2515,12 @@@ bfa_ioim_good_comp_isr(struct bfa_s *bf
        iotag = be16_to_cpu(rsp->io_tag);
  
        ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
 -      bfa_assert(ioim->iotag == iotag);
 +      WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
  
 -      bfa_trc_fp(ioim->bfa, ioim->iotag);
        bfa_ioim_cb_profile_comp(fcpim, ioim);
        bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
  }
  
 -void
 -bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
 -{
 -      ioim->start_time = jiffies;
 -}
 -
 -void
 -bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
 -{
 -      u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
 -      u32 index = bfa_ioim_get_index(fcp_dl);
 -      u64 end_time = jiffies;
 -      struct bfa_itnim_latency_s *io_lat =
 -                      &(ioim->itnim->ioprofile.io_latency);
 -      u32 val = (u32)(end_time - ioim->start_time);
 -
 -      bfa_itnim_ioprofile_update(ioim->itnim, index);
 -
 -      io_lat->count[index]++;
 -      io_lat->min[index] = (io_lat->min[index] < val) ?
 -              io_lat->min[index] : val;
 -      io_lat->max[index] = (io_lat->max[index] > val) ?
 -              io_lat->max[index] : val;
 -      io_lat->avg[index] += val;
 -}
  /*
   * Called by itnim to clean up IO while going offline.
   */
@@@ -2566,6 -2903,11 +2566,6 @@@ bfa_ioim_tov(struct bfa_ioim_s *ioim
  }
  
  
 -
 -/*
 - *  hal_ioim_api
 - */
 -
  /*
   * Allocate IOIM resource for initiator mode I/O request.
   */
@@@ -2594,6 -2936,7 +2594,6 @@@ bfa_ioim_alloc(struct bfa_s *bfa, struc
        fcpim->ios_active++;
  
        list_add_tail(&ioim->qe, &itnim->io_q);
 -      bfa_trc_fp(ioim->bfa, ioim->iotag);
  
        return ioim;
  }
@@@ -2603,13 -2946,18 +2603,13 @@@ bfa_ioim_free(struct bfa_ioim_s *ioim
  {
        struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
  
 -      bfa_trc_fp(ioim->bfa, ioim->iotag);
 -      bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
 -
 -      bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
 -                      (ioim->nsges > BFI_SGE_INLINE));
 -
        if (ioim->nsgpgs > 0)
                bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
  
        bfa_stats(ioim->itnim, io_comps);
        fcpim->ios_active--;
  
 +      ioim->iotag &= BFA_IOIM_IOTAG_MASK;
        list_del(&ioim->qe);
        list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
  }
  void
  bfa_ioim_start(struct bfa_ioim_s *ioim)
  {
 -      bfa_trc_fp(ioim->bfa, ioim->iotag);
 -
        bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
  
        /*
         * Obtain the queue over which this request has to be issued
         */
        ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
 -                      bfa_cb_ioim_get_reqq(ioim->dio) :
 -                      bfa_itnim_get_reqq(ioim);
 +                      BFA_FALSE : bfa_itnim_get_reqq(ioim);
  
        bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
  }
@@@ -2646,12 -2997,13 +2646,12 @@@ bfa_ioim_abort(struct bfa_ioim_s *ioim
        return BFA_STATUS_OK;
  }
  
 -
  /*
   *  BFA TSKIM state machine functions
   */
  
  /*
 - *    Task management command beginning state.
 + * Task management command beginning state.
   */
  static void
  bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  }
  
  /*
 - * brief
 - *    TM command is active, awaiting completion from firmware to
 - *    cleanup IO requests in TM scope.
 + * TM command is active, awaiting completion from firmware to
 + * cleanup IO requests in TM scope.
   */
  static void
  bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  }
  
  /*
 - *    An active TM is being cleaned up since ITN is offline. Awaiting cleanup
 - *    completion event from firmware.
 + * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
 + * completion event from firmware.
   */
  static void
  bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
@@@ -2785,7 -3138,7 +2785,7 @@@ bfa_tskim_sm_iocleanup(struct bfa_tskim
  }
  
  /*
 - *    Task management command is waiting for room in request CQ
 + * Task management command is waiting for room in request CQ
   */
  static void
  bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  }
  
  /*
 - *    Task management command is active, awaiting for room in request CQ
 - *    to send clean up request.
 + * Task management command is active, awaiting for room in request CQ
 + * to send clean up request.
   */
  static void
  bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
        case BFA_TSKIM_SM_DONE:
                bfa_reqq_wcancel(&tskim->reqq_wait);
                /*
 -               *
                 * Fall through !!!
                 */
 -
        case BFA_TSKIM_SM_QRESUME:
                bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
                bfa_tskim_send_abort(tskim);
  }
  
  /*
 - *    BFA callback is pending
 + * BFA callback is pending
   */
  static void
  bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
        }
  }
  
 -
 -
 -/*
 - *  hal_tskim_private
 - */
 -
  static void
  __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
  {
@@@ -2907,8 -3268,8 +2907,8 @@@ __bfa_cb_tskim_failed(void *cbarg, bfa_
                                BFI_TSKIM_STS_FAILED);
  }
  
 -static        bfa_boolean_t
 -bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
 +static bfa_boolean_t
 +bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
  {
        switch (tskim->tm_cmnd) {
        case FCP_TM_TARGET_RESET:
        case FCP_TM_CLEAR_TASK_SET:
        case FCP_TM_LUN_RESET:
        case FCP_TM_CLEAR_ACA:
 -              return (tskim->lun == lun);
 +              return !memcmp(&tskim->lun, &lun, sizeof(lun));
  
        default:
 -              bfa_assert(0);
 +              WARN_ON(1);
        }
  
        return BFA_FALSE;
  }
  
  /*
 - *    Gather affected IO requests and task management commands.
 + * Gather affected IO requests and task management commands.
   */
  static void
  bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
  {
        struct bfa_itnim_s *itnim = tskim->itnim;
        struct bfa_ioim_s *ioim;
 -      struct list_head        *qe, *qen;
 +      struct list_head *qe, *qen;
 +      struct scsi_cmnd *cmnd;
 +      struct scsi_lun scsilun;
  
        INIT_LIST_HEAD(&tskim->io_q);
  
         */
        list_for_each_safe(qe, qen, &itnim->io_q) {
                ioim = (struct bfa_ioim_s *) qe;
 -              if (bfa_tskim_match_scope
 -                      (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
 +              cmnd = (struct scsi_cmnd *) ioim->dio;
 +              int_to_scsilun(cmnd->device->lun, &scsilun);
 +              if (bfa_tskim_match_scope(tskim, scsilun)) {
                        list_del(&ioim->qe);
                        list_add_tail(&ioim->qe, &tskim->io_q);
                }
         */
        list_for_each_safe(qe, qen, &itnim->pending_q) {
                ioim = (struct bfa_ioim_s *) qe;
 -              if (bfa_tskim_match_scope
 -                      (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
 +              cmnd = (struct scsi_cmnd *) ioim->dio;
 +              int_to_scsilun(cmnd->device->lun, &scsilun);
 +              if (bfa_tskim_match_scope(tskim, scsilun)) {
                        list_del(&ioim->qe);
                        list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
                        bfa_ioim_tov(ioim);
  }
  
  /*
 - *    IO cleanup completion
 + * IO cleanup completion
   */
  static void
  bfa_tskim_cleanp_comp(void *tskim_cbarg)
  }
  
  /*
 - *    Gather affected IO requests and task management commands.
 + * Gather affected IO requests and task management commands.
   */
  static void
  bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
  }
  
  /*
 - *    Send task management request to firmware.
 + * Send task management request to firmware.
   */
  static bfa_boolean_t
  bfa_tskim_send(struct bfa_tskim_s *tskim)
  }
  
  /*
 - *    Send abort request to cleanup an active TM to firmware.
 + * Send abort request to cleanup an active TM to firmware.
   */
  static bfa_boolean_t
  bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
  }
  
  /*
 - *    Call to resume task management cmnd waiting for room in request queue.
 + * Call to resume task management cmnd waiting for room in request queue.
   */
  static void
  bfa_tskim_qresume(void *cbarg)
@@@ -3094,6 -3451,12 +3094,6 @@@ bfa_tskim_iocdisable_ios(struct bfa_tsk
        }
  }
  
 -
 -
 -/*
 - *  hal_tskim_friend
 - */
 -
  /*
   * Notification on completions from related ioim.
   */
@@@ -3126,7 -3489,7 +3126,7 @@@ bfa_tskim_cleanup(struct bfa_tskim_s *t
  }
  
  /*
 - *    Memory allocation and initialization.
 + * Memory allocation and initialization.
   */
  void
  bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
  }
  
  void
 -bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
 -{
 -      /*
 -      * @todo
 -      */
 -}
 -
 -void
  bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  {
        struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
        u16     tsk_tag = be16_to_cpu(rsp->tsk_tag);
  
        tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
 -      bfa_assert(tskim->tsk_tag == tsk_tag);
 +      WARN_ON(tskim->tsk_tag != tsk_tag);
  
        tskim->tsk_status = rsp->tsk_status;
  
  }
  
  
 -
 -/*
 - *  hal_tskim_api
 - */
 -
 -
  struct bfa_tskim_s *
  bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
  {
  void
  bfa_tskim_free(struct bfa_tskim_s *tskim)
  {
 -      bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
 +      WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
        list_del(&tskim->qe);
        list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
  }
  
  /*
 - *    Start a task management command.
 + * Start a task management command.
   *
   * @param[in] tskim   BFA task management command instance
   * @param[in] itnim   i-t nexus for the task management command
   * @return None.
   */
  void
 -bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
 +bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
 +                      struct scsi_lun lun,
                        enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
  {
        tskim->itnim    = itnim;
   * General Public License for more details.
   */
  
 +#include "bfad_drv.h"
  #include "bfa_fcs.h"
  #include "bfa_fcbuild.h"
  #include "bfa_fc.h"
 -#include "bfad_drv.h"
  
  BFA_TRC_FILE(FCS, PORT);
  
@@@ -159,7 -159,7 +159,7 @@@ bfa_fcs_lport_sm_online
                        bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
                        list_for_each_safe(qe, qen, &port->rport_q) {
                                rport = (struct bfa_fcs_rport_s *) qe;
 -                              bfa_fcs_rport_delete(rport);
 +                              bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
                        }
                }
                break;
@@@ -197,7 -197,7 +197,7 @@@ bfa_fcs_lport_sm_offline
                        bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
                        list_for_each_safe(qe, qen, &port->rport_q) {
                                rport = (struct bfa_fcs_rport_s *) qe;
 -                              bfa_fcs_rport_delete(rport);
 +                              bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
                        }
                }
                break;
@@@ -309,7 -309,6 +309,7 @@@ bfa_fcs_lport_plogi(struct bfa_fcs_lpor
                        return;
                }
                port->pid  = rx_fchs->d_id;
 +              bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
        }
  
        /*
                        (memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
                        (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
                        port->pid  = rx_fchs->d_id;
 +                      bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
                        rport->pid = rx_fchs->s_id;
                }
                bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
                 * This is a different device with the same pid. Old device
                 * disappeared. Send implicit LOGO to old device.
                 */
 -              bfa_assert(rport->pwwn != plogi->port_name);
 -              bfa_fcs_rport_logo_imp(rport);
 +              WARN_ON(rport->pwwn == plogi->port_name);
 +              bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
  
                /*
                 * Inbound PLOGI from a new device (with old PID).
        /*
         * PLOGI crossing each other.
         */
 -      bfa_assert(rport->pwwn == WWN_NULL);
 +      WARN_ON(rport->pwwn != WWN_NULL);
        bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
  }
  
@@@ -493,7 -491,7 +493,7 @@@ bfa_fcs_lport_online_actions(struct bfa
        __port_action[port->fabric->fab_type].online(port);
  
        wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
 -      BFA_LOG(KERN_INFO, bfad, log_level,
 +      BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                "Logical port online: WWN = %s Role = %s\n",
                lpwwn_buf, "Initiator");
  
@@@ -513,38 -511,37 +513,38 @@@ bfa_fcs_lport_offline_actions(struct bf
        __port_action[port->fabric->fab_type].offline(port);
  
        wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
 -      if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE)
 -              BFA_LOG(KERN_ERR, bfad, log_level,
 +      if (bfa_sm_cmp_state(port->fabric,
 +                      bfa_fcs_fabric_sm_online) == BFA_TRUE)
 +              BFA_LOG(KERN_ERR, bfad, bfa_log_level,
                "Logical port lost fabric connectivity: WWN = %s Role = %s\n",
                lpwwn_buf, "Initiator");
        else
 -              BFA_LOG(KERN_INFO, bfad, log_level,
 +              BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                "Logical port taken offline: WWN = %s Role = %s\n",
                lpwwn_buf, "Initiator");
  
        list_for_each_safe(qe, qen, &port->rport_q) {
                rport = (struct bfa_fcs_rport_s *) qe;
 -              bfa_fcs_rport_offline(rport);
 +              bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
        }
  }
  
  static void
  bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port)
  {
 -      bfa_assert(0);
 +      WARN_ON(1);
  }
  
  static void
  bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port)
  {
 -      bfa_assert(0);
 +      WARN_ON(1);
  }
  
  static void
  bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port)
  {
 -      bfa_assert(0);
 +      WARN_ON(1);
  }
  
  static void
@@@ -576,7 -573,7 +576,7 @@@ bfa_fcs_lport_deleted(struct bfa_fcs_lp
        char    lpwwn_buf[BFA_STRING_32];
  
        wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
 -      BFA_LOG(KERN_INFO, bfad, log_level,
 +      BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                "Logical port deleted: WWN = %s Role = %s\n",
                lpwwn_buf, "Initiator");
  
                                port->vport ? port->vport->vport_drv : NULL);
                bfa_fcs_vport_delete_comp(port->vport);
        } else {
 -               bfa_fcs_fabric_port_delete_comp(port->fabric);
 +              bfa_wc_down(&port->fabric->wc);
        }
  }
  
  
 -
 -/*
 - *  fcs_lport_api BFA FCS port API
 - */
 -/*
 - *   Module initialization
 - */
 -void
 -bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs)
 -{
 -
 -}
 -
 -/*
 - *   Module cleanup
 - */
 -void
 -bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs)
 -{
 -      bfa_fcs_modexit_comp(fcs);
 -}
 -
  /*
   * Unsolicited frame receive handling.
   */
@@@ -604,7 -623,6 +604,7 @@@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lp
        struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
  
        bfa_stats(lport, uf_recvs);
 +      bfa_trc(lport->fcs, fchs->type);
  
        if (!bfa_fcs_lport_is_online(lport)) {
                bfa_stats(lport, uf_recv_drops);
         * Only handles ELS frames for now.
         */
        if (fchs->type != FC_TYPE_ELS) {
 -              bfa_trc(lport->fcs, fchs->type);
 -              bfa_assert(0);
 +              bfa_trc(lport->fcs, fchs->s_id);
 +              bfa_trc(lport->fcs, fchs->d_id);
 +              /* ignore type FC_TYPE_FC_FSS */
 +              if (fchs->type != FC_TYPE_FC_FSS)
 +                      bfa_sm_fault(lport->fcs, fchs->type);
                return;
        }
  
@@@ -777,7 -792,7 +777,7 @@@ bfa_fcs_lport_del_rport
        struct bfa_fcs_lport_s *port,
        struct bfa_fcs_rport_s *rport)
  {
 -      bfa_assert(bfa_q_is_on_q(&port->rport_q, rport));
 +      WARN_ON(!bfa_q_is_on_q(&port->rport_q, rport));
        list_del(&rport->qe);
        port->num_rports--;
  
@@@ -835,8 -850,8 +835,8 @@@ bfa_fcs_lport_attach(struct bfa_fcs_lpo
        lport->fcs = fcs;
        lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
        lport->vport = vport;
 -      lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) :
 -                                bfa_lps_get_tag(lport->fabric->lps);
 +      lport->lp_tag = (vport) ? vport->lps->lp_tag :
 +                                lport->fabric->lps->lp_tag;
  
        INIT_LIST_HEAD(&lport->rport_q);
        lport->num_rports = 0;
@@@ -863,7 -878,7 +863,7 @@@ bfa_fcs_lport_init(struct bfa_fcs_lport
                                        vport ? vport->vport_drv : NULL);
  
        wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(lport));
 -      BFA_LOG(KERN_INFO, bfad, log_level,
 +      BFA_LOG(KERN_INFO, bfad, bfa_log_level,
                "New logical port created: WWN = %s Role = %s\n",
                lpwwn_buf, "Initiator");
  
@@@ -888,12 -903,10 +888,12 @@@ bfa_fcs_lport_get_attr
        port_attr->port_cfg = port->port_cfg;
  
        if (port->fabric) {
 -              port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric);
 -              port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric);
 +              port_attr->port_type = port->fabric->oper_type;
 +              port_attr->loopback = bfa_sm_cmp_state(port->fabric,
 +                              bfa_fcs_fabric_sm_loopback);
                port_attr->authfail =
 -                      bfa_fcs_fabric_is_auth_failed(port->fabric);
 +                      bfa_sm_cmp_state(port->fabric,
 +                              bfa_fcs_fabric_sm_auth_failed);
                port_attr->fabric_name  = bfa_fcs_lport_get_fabric_name(port);
                memcpy(port_attr->fabric_ip_addr,
                        bfa_fcs_lport_get_fabric_ipaddr(port),
                if (port->vport != NULL) {
                        port_attr->port_type = BFA_PORT_TYPE_VPORT;
                        port_attr->fpma_mac =
 -                              bfa_lps_get_lp_mac(port->vport->lps);
 +                              port->vport->lps->lp_mac;
                } else {
                        port_attr->fpma_mac =
 -                              bfa_lps_get_lp_mac(port->fabric->lps);
 +                              port->fabric->lps->lp_mac;
                }
        } else {
                port_attr->port_type = BFA_PORT_TYPE_UNKNOWN;
@@@ -985,7 -998,6 +985,7 @@@ bfa_fcs_lport_n2n_online(struct bfa_fcs
            ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
             sizeof(wwn_t)) > 0) {
                port->pid = N2N_LOCAL_PID;
 +              bfa_lps_set_n2n_pid(port->fabric->lps, N2N_LOCAL_PID);
                /*
                 * First, check if we know the device by pwwn.
                 */
                        bfa_trc(port->fcs, rport->pid);
                        bfa_trc(port->fcs, rport->pwwn);
                        rport->pid = N2N_REMOTE_PID;
 -                      bfa_fcs_rport_online(rport);
 +                      bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
                        return;
                }
  
                 */
                if (port->num_rports > 0) {
                        rport = bfa_fcs_lport_get_rport_by_pid(port, 0);
 -                      bfa_assert(rport != NULL);
 +                      WARN_ON(rport == NULL);
                        if (rport) {
                                bfa_trc(port->fcs, rport->pwwn);
 -                              bfa_fcs_rport_delete(rport);
 +                              bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
                        }
                }
                bfa_fcs_rport_create(port, N2N_REMOTE_PID);
@@@ -1557,7 -1569,6 +1557,7 @@@ bfa_fcs_lport_fdmi_build_rhba_pyld(stru
        struct fdmi_attr_s *attr;
        u8        *curr_ptr;
        u16        len, count;
 +      u16     templen;
  
        /*
         * get hba attributes
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
        attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME);
 -      attr->len = sizeof(wwn_t);
 -      memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len);
 -      curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -      len += attr->len;
 +      templen = sizeof(wwn_t);
 +      memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), templen);
 +      curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +      len += templen;
        count++;
 -      attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                           sizeof(attr->len));
 +      attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                           sizeof(templen));
  
        /*
         * Manufacturer
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
        attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
 -      attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
 -      memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
 -      attr->len = fc_roundup(attr->len, sizeof(u32));
 -      curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -      len += attr->len;
 +      templen = (u16) strlen(fcs_hba_attr->manufacturer);
 +      memcpy(attr->value, fcs_hba_attr->manufacturer, templen);
 +      templen = fc_roundup(templen, sizeof(u32));
 +      curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +      len += templen;
        count++;
 -      attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                           sizeof(attr->len));
 +      attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                           sizeof(templen));
  
        /*
         * Serial Number
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
        attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
 -      attr->len = (u16) strlen(fcs_hba_attr->serial_num);
 -      memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
 -      attr->len = fc_roundup(attr->len, sizeof(u32));
 -      curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -      len += attr->len;
 +      templen = (u16) strlen(fcs_hba_attr->serial_num);
 +      memcpy(attr->value, fcs_hba_attr->serial_num, templen);
 +      templen = fc_roundup(templen, sizeof(u32));
 +      curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +      len += templen;
        count++;
 -      attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                           sizeof(attr->len));
 +      attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                           sizeof(templen));
  
        /*
         * Model
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
        attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
 -      attr->len = (u16) strlen(fcs_hba_attr->model);
 -      memcpy(attr->value, fcs_hba_attr->model, attr->len);
 -      attr->len = fc_roundup(attr->len, sizeof(u32));
 -      curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -      len += attr->len;
 +      templen = (u16) strlen(fcs_hba_attr->model);
 +      memcpy(attr->value, fcs_hba_attr->model, templen);
 +      templen = fc_roundup(templen, sizeof(u32));
 +      curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +      len += templen;
        count++;
 -      attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                           sizeof(attr->len));
 +      attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                           sizeof(templen));
  
        /*
         * Model Desc
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
        attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
 -      attr->len = (u16) strlen(fcs_hba_attr->model_desc);
 -      memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
 -      attr->len = fc_roundup(attr->len, sizeof(u32));
 -      curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -      len += attr->len;
 +      templen = (u16) strlen(fcs_hba_attr->model_desc);
 +      memcpy(attr->value, fcs_hba_attr->model_desc, templen);
 +      templen = fc_roundup(templen, sizeof(u32));
 +      curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +      len += templen;
        count++;
 -      attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                           sizeof(attr->len));
 +      attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                           sizeof(templen));
  
        /*
         * H/W Version
        if (fcs_hba_attr->hw_version[0] != '\0') {
                attr = (struct fdmi_attr_s *) curr_ptr;
                attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
 -              attr->len = (u16) strlen(fcs_hba_attr->hw_version);
 -              memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
 -              attr->len = fc_roundup(attr->len, sizeof(u32));
 -              curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -              len += attr->len;
 +              templen = (u16) strlen(fcs_hba_attr->hw_version);
 +              memcpy(attr->value, fcs_hba_attr->hw_version, templen);
 +              templen = fc_roundup(templen, sizeof(u32));
 +              curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +              len += templen;
                count++;
 -              attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                                       sizeof(attr->len));
 +              attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                                       sizeof(templen));
        }
  
        /*
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
        attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
 -      attr->len = (u16) strlen(fcs_hba_attr->driver_version);
 -      memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
 -      attr->len = fc_roundup(attr->len, sizeof(u32));
 -      curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -      len += attr->len;;
 +      templen = (u16) strlen(fcs_hba_attr->driver_version);
 +      memcpy(attr->value, fcs_hba_attr->driver_version, templen);
 +      templen = fc_roundup(templen, sizeof(u32));
 +      curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +      len += templen;;
        count++;
 -      attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                           sizeof(attr->len));
 +      attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                           sizeof(templen));
  
        /*
         * Option Rom Version
        if (fcs_hba_attr->option_rom_ver[0] != '\0') {
                attr = (struct fdmi_attr_s *) curr_ptr;
                attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
 -              attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
 -              memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
 -              attr->len = fc_roundup(attr->len, sizeof(u32));
 -              curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -              len += attr->len;
 +              templen = (u16) strlen(fcs_hba_attr->option_rom_ver);
 +              memcpy(attr->value, fcs_hba_attr->option_rom_ver, templen);
 +              templen = fc_roundup(templen, sizeof(u32));
 +              curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +              len += templen;
                count++;
 -              attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                                       sizeof(attr->len));
 +              attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                                       sizeof(templen));
        }
  
        /*
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
        attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
 -      attr->len = (u16) strlen(fcs_hba_attr->driver_version);
 -      memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
 -      attr->len = fc_roundup(attr->len, sizeof(u32));
 -      curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -      len += attr->len;
 +      templen = (u16) strlen(fcs_hba_attr->driver_version);
 +      memcpy(attr->value, fcs_hba_attr->driver_version, templen);
 +      templen = fc_roundup(templen, sizeof(u32));
 +      curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +      len += templen;
        count++;
 -      attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                           sizeof(attr->len));
 +      attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                           sizeof(templen));
  
        /*
         * OS Name
        if (fcs_hba_attr->os_name[0] != '\0') {
                attr = (struct fdmi_attr_s *) curr_ptr;
                attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
 -              attr->len = (u16) strlen(fcs_hba_attr->os_name);
 -              memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
 -              attr->len = fc_roundup(attr->len, sizeof(u32));
 -              curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -              len += attr->len;
 +              templen = (u16) strlen(fcs_hba_attr->os_name);
 +              memcpy(attr->value, fcs_hba_attr->os_name, templen);
 +              templen = fc_roundup(templen, sizeof(u32));
 +              curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +              len += templen;
                count++;
 -              attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                                      sizeof(attr->len));
 +              attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                                      sizeof(templen));
        }
  
        /*
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
        attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
 -      attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
 -      memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
 -      len += attr->len;
 +      templen = sizeof(fcs_hba_attr->max_ct_pyld);
 +      memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen);
 +      len += templen;
        count++;
 -      attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                           sizeof(attr->len));
 +      attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                           sizeof(templen));
  
        /*
         * Update size of payload
@@@ -1834,7 -1845,6 +1834,7 @@@ bfa_fcs_lport_fdmi_build_portattr_block
        u8        *curr_ptr;
        u16        len;
        u8      count = 0;
 +      u16     templen;
  
        /*
         * get port attributes
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
        attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES);
 -      attr->len = sizeof(fcs_port_attr.supp_fc4_types);
 -      memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
 -      curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -      len += attr->len;
 +      templen = sizeof(fcs_port_attr.supp_fc4_types);
 +      memcpy(attr->value, fcs_port_attr.supp_fc4_types, templen);
 +      curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +      len += templen;
        ++count;
        attr->len =
 -              cpu_to_be16(attr->len + sizeof(attr->type) +
 -                           sizeof(attr->len));
 +              cpu_to_be16(templen + sizeof(attr->type) +
 +                           sizeof(templen));
  
        /*
         * Supported Speed
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
        attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED);
 -      attr->len = sizeof(fcs_port_attr.supp_speed);
 -      memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
 -      curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -      len += attr->len;
 +      templen = sizeof(fcs_port_attr.supp_speed);
 +      memcpy(attr->value, &fcs_port_attr.supp_speed, templen);
 +      curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +      len += templen;
        ++count;
        attr->len =
 -              cpu_to_be16(attr->len + sizeof(attr->type) +
 -                           sizeof(attr->len));
 +              cpu_to_be16(templen + sizeof(attr->type) +
 +                           sizeof(templen));
  
        /*
         * current Port Speed
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
        attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED);
 -      attr->len = sizeof(fcs_port_attr.curr_speed);
 -      memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
 -      curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -      len += attr->len;
 +      templen = sizeof(fcs_port_attr.curr_speed);
 +      memcpy(attr->value, &fcs_port_attr.curr_speed, templen);
 +      curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +      len += templen;
        ++count;
 -      attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                           sizeof(attr->len));
 +      attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                           sizeof(templen));
  
        /*
         * max frame size
         */
        attr = (struct fdmi_attr_s *) curr_ptr;
        attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE);
 -      attr->len = sizeof(fcs_port_attr.max_frm_size);
 -      memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
 -      curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -      len += attr->len;
 +      templen = sizeof(fcs_port_attr.max_frm_size);
 +      memcpy(attr->value, &fcs_port_attr.max_frm_size, templen);
 +      curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +      len += templen;
        ++count;
 -      attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                           sizeof(attr->len));
 +      attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                           sizeof(templen));
  
        /*
         * OS Device Name
        if (fcs_port_attr.os_device_name[0] != '\0') {
                attr = (struct fdmi_attr_s *) curr_ptr;
                attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
 -              attr->len = (u16) strlen(fcs_port_attr.os_device_name);
 -              memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
 -              attr->len = fc_roundup(attr->len, sizeof(u32));
 -              curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -              len += attr->len;
 +              templen = (u16) strlen(fcs_port_attr.os_device_name);
 +              memcpy(attr->value, fcs_port_attr.os_device_name, templen);
 +              templen = fc_roundup(templen, sizeof(u32));
 +              curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +              len += templen;
                ++count;
 -              attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                                      sizeof(attr->len));
 +              attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                                      sizeof(templen));
        }
        /*
         * Host Name
        if (fcs_port_attr.host_name[0] != '\0') {
                attr = (struct fdmi_attr_s *) curr_ptr;
                attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
 -              attr->len = (u16) strlen(fcs_port_attr.host_name);
 -              memcpy(attr->value, fcs_port_attr.host_name, attr->len);
 -              attr->len = fc_roundup(attr->len, sizeof(u32));
 -              curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 -              len += attr->len;
 +              templen = (u16) strlen(fcs_port_attr.host_name);
 +              memcpy(attr->value, fcs_port_attr.host_name, templen);
 +              templen = fc_roundup(templen, sizeof(u32));
 +              curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
 +              len += templen;
                ++count;
 -              attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 -                              sizeof(attr->len));
 +              attr->len = cpu_to_be16(templen + sizeof(attr->type) +
 +                              sizeof(templen));
        }
  
        /*
@@@ -2093,7 -2103,7 +2093,7 @@@ bfa_fcs_lport_fdmi_timeout(void *arg
        bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
  }
  
 -void
 +static void
  bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
                         struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
  {
        hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ);
  }
  
 -void
 +static void
  bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
                          struct bfa_fcs_fdmi_port_attr_s *port_attr)
  {
@@@ -2550,7 -2560,7 +2550,7 @@@ bfa_fcs_lport_ms_send_gmal(void *ms_cba
  
        len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
                             bfa_fcs_lport_get_fcid(port),
 -                               bfa_lps_get_peer_nwwn(port->fabric->lps));
 +                               port->fabric->lps->pr_nwwn);
  
        bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                          FC_CLASS_3, len, &fchs,
@@@ -2750,7 -2760,7 +2750,7 @@@ bfa_fcs_lport_ms_send_gfn(void *ms_cbar
  
        len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
                             bfa_fcs_lport_get_fcid(port),
 -                               bfa_lps_get_peer_nwwn(port->fabric->lps));
 +                               port->fabric->lps->pr_nwwn);
  
        bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
                          FC_CLASS_3, len, &fchs,
@@@ -2826,7 -2836,7 +2826,7 @@@ bfa_fcs_lport_ms_send_plogi(void *ms_cb
        ms->fcxp = fcxp;
  
        len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 -                           bfa_os_hton3b(FC_MGMT_SERVER),
 +                           bfa_hton3b(FC_MGMT_SERVER),
                             bfa_fcs_lport_get_fcid(port), 0,
                             port->port_cfg.pwwn, port->port_cfg.nwwn,
                                 bfa_fcport_get_maxfrsize(port->fcs->bfa));
@@@ -3583,7 -3593,7 +3583,7 @@@ fcxp = fcxp_alloced ? fcxp_alloced : bf
        ns->fcxp = fcxp;
  
        len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 -                           bfa_os_hton3b(FC_NAME_SERVER),
 +                           bfa_hton3b(FC_NAME_SERVER),
                             bfa_fcs_lport_get_fcid(port), 0,
                             port->port_cfg.pwwn, port->port_cfg.nwwn,
                                 bfa_fcport_get_maxfrsize(port->fcs->bfa));
@@@ -4140,7 -4150,7 +4140,7 @@@ bfa_fcs_lport_ns_query(struct bfa_fcs_l
        bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
  }
  
 -void
 +static void
  bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
  {
  
  
        for (ii = 0 ; ii < nwwns; ++ii) {
                rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
 -              bfa_assert(rport);
 +              WARN_ON(!rport);
        }
  }
  
@@@ -4342,8 -4352,8 +4342,8 @@@ bfa_fcs_lport_scn_send_scr(void *scn_cb
        /* Handle VU registrations for Base port only */
        if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
                len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 -                              bfa_lps_is_brcd_fabric(port->fabric->lps),
 -                                                      port->pid, 0);
 +                              port->fabric->lps->brcd_switch,
 +                              port->pid, 0);
        } else {
            len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
                                    BFA_FALSE,
@@@ -4616,7 -4626,7 +4616,7 @@@ bfa_fcs_lport_scn_process_rscn(struct b
  
  
                default:
 -                      bfa_assert(0);
 +                      WARN_ON(1);
                        nsquery = BFA_TRUE;
                }
        }
@@@ -4662,7 -4672,7 +4662,7 @@@ bfa_fcs_lport_get_rport(struct bfa_fcs_
  
        while ((qe != qh) && (i < nrports)) {
                rport = (struct bfa_fcs_rport_s *) qe;
 -              if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
 +              if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
                        qe = bfa_q_next(qe);
                        bfa_trc(fcs, (u32) rport->pwwn);
                        bfa_trc(fcs, rport->pid);
@@@ -4710,7 -4720,7 +4710,7 @@@ bfa_fcs_lport_get_rports(struct bfa_fcs
  
        while ((qe != qh) && (i < *nrports)) {
                rport = (struct bfa_fcs_rport_s *) qe;
 -              if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) {
 +              if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
                        qe = bfa_q_next(qe);
                        bfa_trc(fcs, (u32) rport->pwwn);
                        bfa_trc(fcs, rport->pid);
@@@ -4761,7 -4771,7 +4761,7 @@@ bfa_fcs_lport_get_rport_max_speed(bfa_f
  
        while (qe != qh) {
                rport = (struct bfa_fcs_rport_s *) qe;
 -              if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) ||
 +              if ((bfa_ntoh3b(rport->pid) > 0xFFF000) ||
                        (bfa_fcs_rport_get_state(rport) ==
                          BFA_RPORT_OFFLINE)) {
                        qe = bfa_q_next(qe);
@@@ -4797,7 -4807,7 +4797,7 @@@ bfa_fcs_lookup_port(struct bfa_fcs_s *f
        struct bfa_fcs_vport_s *vport;
        bfa_fcs_vf_t   *vf;
  
 -      bfa_assert(fcs != NULL);
 +      WARN_ON(fcs == NULL);
  
        vf = bfa_fcs_vf_lookup(fcs, vf_id);
        if (vf == NULL) {
@@@ -4843,7 -4853,7 +4843,7 @@@ bfa_fcs_lport_get_info(struct bfa_fcs_l
                port_info->max_vports_supp =
                        bfa_lps_get_max_vport(port->fcs->bfa);
                port_info->num_vports_inuse =
 -                      bfa_fcs_fabric_vport_count(port->fabric);
 +                      port->fabric->num_vports;
                port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
                port_info->num_rports_inuse = port->num_rports;
        } else {
@@@ -4987,8 -4997,7 +4987,8 @@@ bfa_fcs_vport_sm_created(struct bfa_fcs
  
        switch (event) {
        case BFA_FCS_VPORT_SM_START:
 -              if (bfa_fcs_fabric_is_online(__vport_fabric(vport))
 +              if (bfa_sm_cmp_state(__vport_fabric(vport),
 +                                      bfa_fcs_fabric_sm_online)
                    && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
                        bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
                        bfa_fcs_vport_do_fdisc(vport);
@@@ -5071,13 -5080,13 +5071,13 @@@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_v
        switch (event) {
        case BFA_FCS_VPORT_SM_DELETE:
                bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
 -              bfa_lps_discard(vport->lps);
 +              bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
                bfa_fcs_lport_delete(&vport->lport);
                break;
  
        case BFA_FCS_VPORT_SM_OFFLINE:
                bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
 -              bfa_lps_discard(vport->lps);
 +              bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
                break;
  
        case BFA_FCS_VPORT_SM_RSP_OK:
@@@ -5157,7 -5166,7 +5157,7 @@@ bfa_fcs_vport_sm_online(struct bfa_fcs_
  
        case BFA_FCS_VPORT_SM_OFFLINE:
                bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
 -              bfa_lps_discard(vport->lps);
 +              bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
                bfa_fcs_lport_offline(&vport->lport);
                break;
  
@@@ -5257,7 -5266,7 +5257,7 @@@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vp
  
        switch (event) {
        case BFA_FCS_VPORT_SM_OFFLINE:
 -              bfa_lps_discard(vport->lps);
 +              bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
                /*
                 * !!! fall through !!!
                 */
@@@ -5296,14 -5305,14 +5296,14 @@@ bfa_fcs_vport_do_fdisc(struct bfa_fcs_v
  static void
  bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
  {
 -      u8              lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps);
 -      u8              lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps);
 +      u8              lsrjt_rsn = vport->lps->lsrjt_rsn;
 +      u8              lsrjt_expl = vport->lps->lsrjt_expl;
  
        bfa_trc(__vport_fcs(vport), lsrjt_rsn);
        bfa_trc(__vport_fcs(vport), lsrjt_expl);
  
        /* For certain reason codes, we don't want to retry. */
 -      switch (bfa_lps_get_lsrjt_expl(vport->lps)) {
 +      switch (vport->lps->lsrjt_expl) {
        case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
        case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
                if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
@@@ -5467,7 -5476,7 +5467,7 @@@ bfa_fcs_vport_create(struct bfa_fcs_vpo
        if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
                return BFA_STATUS_VPORT_EXISTS;
  
 -      if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
 +      if (fcs->fabric.num_vports ==
                        bfa_lps_get_max_vport(fcs->bfa))
                return BFA_STATUS_VPORT_MAX;
  
@@@ -5609,6 -5618,33 +5609,6 @@@ bfa_fcs_vport_get_attr(struct bfa_fcs_v
        attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
  }
  
 -/*
 - *    Use this function to get vport's statistics.
 - *
 - *    param[in]       vport   pointer to bfa_fcs_vport_t.
 - *    param[out]      stats   pointer to return vport statistics in
 - *
 - *    return None
 - */
 -void
 -bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
 -                      struct bfa_vport_stats_s *stats)
 -{
 -      *stats = vport->vport_stats;
 -}
 -
 -/*
 - *    Use this function to clear vport's statistics.
 - *
 - *    param[in]       vport   pointer to bfa_fcs_vport_t.
 - *
 - *    return None
 - */
 -void
 -bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
 -{
 -      memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
 -}
  
  /*
   *    Lookup a virtual port. Excludes base port from lookup.
@@@ -5646,9 -5682,9 +5646,9 @@@ bfa_cb_lps_fdisc_comp(void *bfad, void 
        switch (status) {
        case BFA_STATUS_OK:
                /*
-                * Initialiaze the V-Port fields
+                * Initialize the V-Port fields
                 */
 -              __vport_fcid(vport) = bfa_lps_get_pid(vport->lps);
 +              __vport_fcid(vport) = vport->lps->lp_pid;
                vport->vport_stats.fdisc_accepts++;
                bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
                break;
                break;
  
        case BFA_STATUS_EPROTOCOL:
 -              switch (bfa_lps_get_extstatus(vport->lps)) {
 +              switch (vport->lps->ext_status) {
                case BFA_EPROTO_BAD_ACCEPT:
                        vport->vport_stats.fdisc_acc_bad++;
                        break;
@@@ -57,9 -57,6 +57,9 @@@ struct kmem_cache *scsi_pkt_cachep
  #define FC_SRB_READ           (1 << 1)
  #define FC_SRB_WRITE          (1 << 0)
  
 +/* constant added to e_d_tov timeout to get rec_tov value */
 +#define REC_TOV_CONST         1
 +
  /*
   * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
   */
@@@ -99,7 -96,7 +99,7 @@@ static void fc_fcp_resp(struct fc_fcp_p
  static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
  static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
  static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
 -static void fc_fcp_recovery(struct fc_fcp_pkt *);
 +static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code);
  static void fc_fcp_timeout(unsigned long);
  static void fc_fcp_rec(struct fc_fcp_pkt *);
  static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
@@@ -123,13 -120,14 +123,13 @@@ static void fc_fcp_srr_error(struct fc_
  #define FC_DATA_UNDRUN                7
  #define FC_ERROR              8
  #define FC_HRD_ERROR          9
 -#define FC_CMD_RECOVERY               10
 +#define FC_CRC_ERROR          10
 +#define FC_TIMED_OUT          11
  
  /*
   * Error recovery timeout values.
   */
 -#define FC_SCSI_ER_TIMEOUT    (10 * HZ)
  #define FC_SCSI_TM_TOV                (10 * HZ)
 -#define FC_SCSI_REC_TOV               (2 * HZ)
  #define FC_HOST_RESET_TIMEOUT (30 * HZ)
  #define FC_CAN_QUEUE_PERIOD   (60 * HZ)
  
@@@ -440,7 -438,6 +440,7 @@@ static void fc_fcp_recv_data(struct fc_
        void *buf;
        struct scatterlist *sg;
        u32 nents;
 +      u8 host_bcode = FC_COMPLETE;
  
        fh = fc_frame_header_get(fp);
        offset = ntohl(fh->fh_parm_offset);
        buf = fc_frame_payload_get(fp, 0);
  
        /*
 -       * if this I/O is ddped then clear it
 -       * and initiate recovery since data
 -       * frames are expected to be placed
 -       * directly in that case.
 +       * if this I/O is ddped then clear it and initiate recovery since data
 +       * frames are expected to be placed directly in that case.
 +       *
 +       * Indicate error to scsi-ml because something went wrong with the
 +       * ddp handling to get us here.
         */
        if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
                fc_fcp_ddp_done(fsp);
 +              FC_FCP_DBG(fsp, "DDP I/O in fc_fcp_recv_data set ERROR\n");
 +              host_bcode = FC_ERROR;
                goto err;
        }
        if (offset + len > fsp->data_len) {
                        goto crc_err;
                FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
                           "data_len %x\n", len, offset, fsp->data_len);
 +
 +              /* Data is corrupted indicate scsi-ml should retry */
 +              host_bcode = FC_DATA_OVRRUN;
                goto err;
        }
        if (offset != fsp->xfer_len)
@@@ -507,10 -498,8 +507,10 @@@ crc_err
                         * If so, we need to retry the entire operation.
                         * Otherwise, ignore it.
                         */
 -                      if (fsp->state & FC_SRB_DISCONTIG)
 +                      if (fsp->state & FC_SRB_DISCONTIG) {
 +                              host_bcode = FC_CRC_ERROR;
                                goto err;
 +                      }
                        return;
                }
        }
                fc_fcp_complete_locked(fsp);
        return;
  err:
 -      fc_fcp_recovery(fsp);
 +      fc_fcp_recovery(fsp, host_bcode);
  }
  
  /**
@@@ -973,13 -962,7 +973,13 @@@ static void fc_fcp_complete_locked(stru
                }
                lport->tt.exch_done(seq);
        }
 -      fc_io_compl(fsp);
 +      /*
 +       * Some resets driven by SCSI are not I/Os and do not have
 +       * SCSI commands associated with the requests. We should not
 +       * call I/O completion if we do not have a SCSI command.
 +       */
 +      if (fsp->cmd)
 +              fc_io_compl(fsp);
  }
  
  /**
@@@ -1090,21 -1073,6 +1090,21 @@@ static int fc_fcp_pkt_send(struct fc_lp
  }
  
  /**
 + * get_fsp_rec_tov() - Helper function to get REC_TOV
 + * @fsp: the FCP packet
 + */
 +static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
 +{
 +      struct fc_rport *rport;
 +      struct fc_rport_libfc_priv *rpriv;
 +
 +      rport = fsp->rport;
 +      rpriv = rport->dd_data;
 +
 +      return rpriv->e_d_tov + REC_TOV_CONST;
 +}
 +
 +/**
   * fc_fcp_cmd_send() - Send a FCP command
   * @lport: The local port to send the command on
   * @fsp:   The FCP packet the command is on
@@@ -1121,7 -1089,6 +1121,7 @@@ static int fc_fcp_cmd_send(struct fc_lp
        struct fc_rport_libfc_priv *rpriv;
        const size_t len = sizeof(fsp->cdb_cmd);
        int rc = 0;
 +      unsigned int rec_tov;
  
        if (fc_fcp_lock_pkt(fsp))
                return 0;
        fsp->seq_ptr = seq;
        fc_fcp_pkt_hold(fsp);   /* hold for fc_fcp_pkt_destroy */
  
 +      rec_tov = get_fsp_rec_tov(fsp);
 +
        setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
 -      fc_fcp_timer_set(fsp,
 -                       (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
 -                       FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
 +
 +      if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
 +              fc_fcp_timer_set(fsp, rec_tov);
 +
  unlock:
        fc_fcp_unlock_pkt(fsp);
        return rc;
@@@ -1233,16 -1197,13 +1233,16 @@@ static void fc_lun_reset_send(unsigned 
  {
        struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
        struct fc_lport *lport = fsp->lp;
 +      unsigned int rec_tov;
 +
        if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
                if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
                        return;
                if (fc_fcp_lock_pkt(fsp))
                        return;
 +              rec_tov = get_fsp_rec_tov(fsp);
                setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
 -              fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
 +              fc_fcp_timer_set(fsp, rec_tov);
                fc_fcp_unlock_pkt(fsp);
        }
  }
  /**
   * fc_lun_reset() - Send a LUN RESET command to a device
   *                and wait for the reply
-  * @lport: The local port to sent the comand on
+  * @lport: The local port to sent the command on
   * @fsp:   The FCP packet that identifies the LUN to be reset
   * @id:          The SCSI command ID
   * @lun:   The LUN ID to be reset
@@@ -1321,27 -1282,27 +1321,27 @@@ static void fc_tm_done(struct fc_seq *s
                 *
                 * scsi-eh will escalate for when either happens.
                 */
 -              return;
 +              goto out;
        }
  
        if (fc_fcp_lock_pkt(fsp))
 -              return;
 +              goto out;
  
        /*
         * raced with eh timeout handler.
         */
 -      if (!fsp->seq_ptr || !fsp->wait_for_comp) {
 -              spin_unlock_bh(&fsp->scsi_pkt_lock);
 -              return;
 -      }
 +      if (!fsp->seq_ptr || !fsp->wait_for_comp)
 +              goto out_unlock;
  
        fh = fc_frame_header_get(fp);
        if (fh->fh_type != FC_TYPE_BLS)
                fc_fcp_resp(fsp, fp);
        fsp->seq_ptr = NULL;
        fsp->lp->tt.exch_done(seq);
 -      fc_frame_free(fp);
 +out_unlock:
        fc_fcp_unlock_pkt(fsp);
 +out:
 +      fc_frame_free(fp);
  }
  
  /**
@@@ -1380,10 -1341,13 +1380,10 @@@ static void fc_fcp_timeout(unsigned lon
  
        if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
                fc_fcp_rec(fsp);
        else if (fsp->state & FC_SRB_RCV_STATUS)
                fc_fcp_complete_locked(fsp);
        else
 -              fc_fcp_recovery(fsp);
 +              fc_fcp_recovery(fsp, FC_TIMED_OUT);
        fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
  unlock:
        fc_fcp_unlock_pkt(fsp);
@@@ -1409,7 -1373,6 +1409,7 @@@ static void fc_fcp_rec(struct fc_fcp_pk
                fc_fcp_complete_locked(fsp);
                return;
        }
 +
        fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec));
        if (!fp)
                goto retry;
                       FC_FCTL_REQ, 0);
        if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
                                 fc_fcp_rec_resp, fsp,
 -                               jiffies_to_msecs(FC_SCSI_REC_TOV))) {
 +                               2 * lport->r_a_tov)) {
                fc_fcp_pkt_hold(fsp);           /* hold while REC outstanding */
                return;
        }
  retry:
        if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
 -              fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
 +              fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
        else
 -              fc_fcp_recovery(fsp);
 +              fc_fcp_recovery(fsp, FC_TIMED_OUT);
  }
  
  /**
@@@ -1482,6 -1445,7 +1482,6 @@@ static void fc_fcp_rec_resp(struct fc_s
                         * making progress.
                         */
                        rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
 -                      fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
                        break;
                case ELS_RJT_LOGIC:
                case ELS_RJT_UNAB:
                                fc_fcp_retry_cmd(fsp);
                                break;
                        }
 -                      fc_fcp_recovery(fsp);
 +                      fc_fcp_recovery(fsp, FC_ERROR);
                        break;
                }
        } else if (opcode == ELS_LS_ACC) {
                        }
                        fc_fcp_srr(fsp, r_ctl, offset);
                } else if (e_stat & ESB_ST_SEQ_INIT) {
 -
 +                      unsigned int rec_tov = get_fsp_rec_tov(fsp);
                        /*
                         * The remote port has the initiative, so just
                         * keep waiting for it to complete.
                         */
 -                      fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
 +                      fc_fcp_timer_set(fsp, rec_tov);
                } else {
  
                        /*
@@@ -1611,7 -1575,7 +1611,7 @@@ static void fc_fcp_rec_error(struct fc_
                if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
                        fc_fcp_rec(fsp);
                else
 -                      fc_fcp_recovery(fsp);
 +                      fc_fcp_recovery(fsp, FC_ERROR);
                break;
        }
        fc_fcp_unlock_pkt(fsp);
@@@ -1623,9 -1587,9 +1623,9 @@@ out
   * fc_fcp_recovery() - Handler for fcp_pkt recovery
   * @fsp: The FCP pkt that needs to be aborted
   */
 -static void fc_fcp_recovery(struct fc_fcp_pkt *fsp)
 +static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
  {
 -      fsp->status_code = FC_CMD_RECOVERY;
 +      fsp->status_code = code;
        fsp->cdb_status = 0;
        fsp->io_status = 0;
        /*
@@@ -1652,7 -1616,6 +1652,7 @@@ static void fc_fcp_srr(struct fc_fcp_pk
        struct fcp_srr *srr;
        struct fc_frame *fp;
        u8 cdb_op;
 +      unsigned int rec_tov;
  
        rport = fsp->rport;
        rpriv = rport->dd_data;
                       rpriv->local_port->port_id, FC_TYPE_FCP,
                       FC_FCTL_REQ, 0);
  
 +      rec_tov = get_fsp_rec_tov(fsp);
        seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
 -                                    fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
 +                                    fsp, jiffies_to_msecs(rec_tov));
        if (!seq)
                goto retry;
  
@@@ -1703,7 -1665,6 +1703,7 @@@ static void fc_fcp_srr_resp(struct fc_s
  {
        struct fc_fcp_pkt *fsp = arg;
        struct fc_frame_header *fh;
 +      unsigned int rec_tov;
  
        if (IS_ERR(fp)) {
                fc_fcp_srr_error(fsp, fp);
        switch (fc_frame_payload_op(fp)) {
        case ELS_LS_ACC:
                fsp->recov_retry = 0;
 -              fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
 +              rec_tov = get_fsp_rec_tov(fsp);
 +              fc_fcp_timer_set(fsp, rec_tov);
                break;
        case ELS_LS_RJT:
        default:
 -              fc_fcp_recovery(fsp);
 +              fc_fcp_recovery(fsp, FC_ERROR);
                break;
        }
        fc_fcp_unlock_pkt(fsp);
@@@ -1761,7 -1721,7 +1761,7 @@@ static void fc_fcp_srr_error(struct fc_
                if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
                        fc_fcp_rec(fsp);
                else
 -                      fc_fcp_recovery(fsp);
 +                      fc_fcp_recovery(fsp, FC_TIMED_OUT);
                break;
        case -FC_EX_CLOSED:                     /* e.g., link failure */
                /* fall through */
@@@ -1860,17 -1820,19 +1860,17 @@@ static int fc_queuecommand_lck(struct s
        if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
                fsp->req_flags = FC_SRB_READ;
                stats->InputRequests++;
 -              stats->InputMegabytes = fsp->data_len;
 +              stats->InputBytes += fsp->data_len;
        } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
                fsp->req_flags = FC_SRB_WRITE;
                stats->OutputRequests++;
 -              stats->OutputMegabytes = fsp->data_len;
 +              stats->OutputBytes += fsp->data_len;
        } else {
                fsp->req_flags = 0;
                stats->ControlRequests++;
        }
        put_cpu();
  
 -      fsp->tgt_flags = rpriv->flags;
 -
        init_timer(&fsp->timer);
        fsp->timer.data = (unsigned long)fsp;
  
@@@ -1984,29 -1946,18 +1984,29 @@@ static void fc_io_compl(struct fc_fcp_p
                break;
        case FC_CMD_ABORTED:
                FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
 -                         "due to FC_CMD_ABORTED\n");
 +                        "due to FC_CMD_ABORTED\n");
                sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
                break;
 -      case FC_CMD_RECOVERY:
 -              sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
 -              break;
        case FC_CMD_RESET:
 +              FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
 +                         "due to FC_CMD_RESET\n");
                sc_cmd->result = (DID_RESET << 16);
                break;
        case FC_HRD_ERROR:
 +              FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
 +                         "due to FC_HRD_ERROR\n");
                sc_cmd->result = (DID_NO_CONNECT << 16);
                break;
 +      case FC_CRC_ERROR:
 +              FC_FCP_DBG(fsp, "Returning DID_PARITY to scsi-ml "
 +                         "due to FC_CRC_ERROR\n");
 +              sc_cmd->result = (DID_PARITY << 16);
 +              break;
 +      case FC_TIMED_OUT:
 +              FC_FCP_DBG(fsp, "Returning DID_BUS_BUSY to scsi-ml "
 +                         "due to FC_TIMED_OUT\n");
 +              sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
 +              break;
        default:
                FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
                           "due to unknown error\n");
@@@ -2053,7 -2004,7 +2053,7 @@@ int fc_eh_abort(struct scsi_cmnd *sc_cm
        fsp = CMD_SP(sc_cmd);
        if (!fsp) {
                /* command completed while scsi eh was setting up */
 -              spin_unlock_irqrestore(lport->host->host_lock, flags);
 +              spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
                return SUCCESS;
        }
        /* grab a ref so the fsp and sc_cmd cannot be relased from under us */
  #define LPFC_MIN_DEVLOSS_TMO 1
  #define LPFC_MAX_DEVLOSS_TMO 255
  
 -#define LPFC_MAX_LINK_SPEED 8
 -#define LPFC_LINK_SPEED_BITMAP 0x00000117
 -#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8"
 -
  /**
   * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
   * @incr: integer to convert.
@@@ -459,7 -463,7 +459,7 @@@ lpfc_link_state_show(struct device *dev
                if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
                        len += snprintf(buf + len, PAGE_SIZE-len,
                                        "   Menlo Maint Mode\n");
 -              else if (phba->fc_topology == TOPOLOGY_LOOP) {
 +              else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                        if (vport->fc_flag & FC_PUBLIC_LOOP)
                                len += snprintf(buf + len, PAGE_SIZE-len,
                                                "   Public Loop\n");
@@@ -1335,7 -1339,7 +1335,7 @@@ lpfc_##attr##_show(struct device *dev, 
  }
  
  /**
-  * lpfc_param_init - Intializes a cfg attribute
+  * lpfc_param_init - Initializes a cfg attribute
   *
   * Description:
   * Macro that given an attr e.g. hba_queue_depth expands
@@@ -1977,13 -1981,6 +1977,13 @@@ lpfc_param_show(enable_npiv)
  lpfc_param_init(enable_npiv, 1, 0, 1);
  static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
  
 +int lpfc_enable_rrq;
 +module_param(lpfc_enable_rrq, int, 0);
 +MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
 +lpfc_param_show(enable_rrq);
 +lpfc_param_init(enable_rrq, 0, 0, 1);
 +static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
 +
  /*
  # lpfc_suppress_link_up:  Bring link up at initialization
  #            0x0  = bring link up (issue MBX_INIT_LINK)
@@@ -2840,8 -2837,14 +2840,8 @@@ static struct bin_attribute sysfs_drvr_
  /*
  # lpfc_link_speed: Link speed selection for initializing the Fibre Channel
  # connection.
 -#       0  = auto select (default)
 -#       1  = 1 Gigabaud
 -#       2  = 2 Gigabaud
 -#       4  = 4 Gigabaud
 -#       8  = 8 Gigabaud
 -# Value range is [0,8]. Default value is 0.
 +# Value range is [0,16]. Default value is 0.
  */
 -
  /**
   * lpfc_link_speed_set - Set the adapters link speed
   * @phba: lpfc_hba pointer.
@@@ -2866,7 -2869,7 +2866,7 @@@ lpfc_link_speed_store(struct device *de
        struct Scsi_Host  *shost = class_to_shost(dev);
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
 -      int val = 0;
 +      int val = LPFC_USER_LINK_SPEED_AUTO;
        int nolip = 0;
        const char *val_buf = buf;
        int err;
        if (sscanf(val_buf, "%i", &val) != 1)
                return -EINVAL;
  
 -      if (((val == LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
 -              ((val == LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
 -              ((val == LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
 -              ((val == LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
 -              ((val == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)))
 +      if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
 +          ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
 +          ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
 +          ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
 +          ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
 +          ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) {
 +              lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                              "2879 lpfc_link_speed attribute cannot be set "
 +                              "to %d. Speed is not supported by this port.\n",
 +                              val);
                return -EINVAL;
 -
 -      if ((val >= 0 && val <= 8)
 -              && (LPFC_LINK_SPEED_BITMAP & (1 << val))) {
 +      }
 +      if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
 +          (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
                prev_val = phba->cfg_link_speed;
                phba->cfg_link_speed = val;
                if (nolip)
                } else
                        return strlen(buf);
        }
 -
        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 -              "%d:0469 lpfc_link_speed attribute cannot be set to %d, "
 -              "allowed range is [0, 8]\n",
 -              phba->brd_no, val);
 +              "0469 lpfc_link_speed attribute cannot be set to %d, "
 +              "allowed values are ["LPFC_LINK_SPEED_STRING"]\n", val);
        return -EINVAL;
  }
  
@@@ -2938,8 -2938,8 +2938,8 @@@ lpfc_param_show(link_speed
  static int
  lpfc_link_speed_init(struct lpfc_hba *phba, int val)
  {
 -      if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED)
 -              && (LPFC_LINK_SPEED_BITMAP & (1 << val))) {
 +      if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
 +          (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
                phba->cfg_link_speed = val;
                return 0;
        }
                        "0405 lpfc_link_speed attribute cannot "
                        "be set to %d, allowed values are "
                        "["LPFC_LINK_SPEED_STRING"]\n", val);
 -      phba->cfg_link_speed = 0;
 +      phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
        return -EINVAL;
  }
  
  static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
 -              lpfc_link_speed_show, lpfc_link_speed_store);
 +                 lpfc_link_speed_show, lpfc_link_speed_store);
  
  /*
  # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
@@@ -3305,12 -3305,12 +3305,12 @@@ LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_D
  LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");
  
  /*
 -# lpfc_enable_hba_heartbeat: Enable HBA heartbeat timer..
 +# lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
  #       0  = HBA Heartbeat disabled
  #       1  = HBA Heartbeat enabled (default)
  # Value range is [0,1]. Default value is 1.
  */
 -LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
 +LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
  
  /*
  # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
@@@ -3401,7 -3401,6 +3401,7 @@@ struct device_attribute *lpfc_hba_attrs
        &dev_attr_lpfc_fdmi_on,
        &dev_attr_lpfc_max_luns,
        &dev_attr_lpfc_enable_npiv,
 +      &dev_attr_lpfc_enable_rrq,
        &dev_attr_nport_evt_cnt,
        &dev_attr_board_mode,
        &dev_attr_max_vpi,
@@@ -3799,7 -3798,8 +3799,7 @@@ sysfs_mbox_read(struct file *filp, stru
                        }
                        break;
                case MBX_READ_SPARM64:
 -              case MBX_READ_LA:
 -              case MBX_READ_LA64:
 +              case MBX_READ_TOPOLOGY:
                case MBX_REG_LOGIN:
                case MBX_REG_LOGIN64:
                case MBX_CONFIG_PORT:
@@@ -3989,7 -3989,7 +3989,7 @@@ lpfc_get_host_port_type(struct Scsi_Hos
        if (vport->port_type == LPFC_NPIV_PORT) {
                fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
        } else if (lpfc_is_link_up(phba)) {
 -              if (phba->fc_topology == TOPOLOGY_LOOP) {
 +              if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                        if (vport->fc_flag & FC_PUBLIC_LOOP)
                                fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
                        else
@@@ -4058,26 -4058,23 +4058,26 @@@ lpfc_get_host_speed(struct Scsi_Host *s
  
        if (lpfc_is_link_up(phba)) {
                switch(phba->fc_linkspeed) {
 -                      case LA_1GHZ_LINK:
 -                              fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
 +              case LPFC_LINK_SPEED_1GHZ:
 +                      fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
                        break;
 -                      case LA_2GHZ_LINK:
 -                              fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
 +              case LPFC_LINK_SPEED_2GHZ:
 +                      fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
                        break;
 -                      case LA_4GHZ_LINK:
 -                              fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
 +              case LPFC_LINK_SPEED_4GHZ:
 +                      fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
                        break;
 -                      case LA_8GHZ_LINK:
 -                              fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
 +              case LPFC_LINK_SPEED_8GHZ:
 +                      fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
                        break;
 -                      case LA_10GHZ_LINK:
 -                              fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
 +              case LPFC_LINK_SPEED_10GHZ:
 +                      fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
                        break;
 -                      default:
 -                              fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 +              case LPFC_LINK_SPEED_16GHZ:
 +                      fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
 +                      break;
 +              default:
 +                      fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
                        break;
                }
        } else
@@@ -4100,7 -4097,7 +4100,7 @@@ lpfc_get_host_fabric_name (struct Scsi_
        spin_lock_irq(shost->host_lock);
  
        if ((vport->fc_flag & FC_FABRIC) ||
 -          ((phba->fc_topology == TOPOLOGY_LOOP) &&
 +          ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
             (vport->fc_flag & FC_PUBLIC_LOOP)))
                node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
        else
@@@ -4211,11 -4208,11 +4211,11 @@@ lpfc_get_stats(struct Scsi_Host *shost
        hs->invalid_crc_count -= lso->invalid_crc_count;
        hs->error_frames -= lso->error_frames;
  
 -      if (phba->hba_flag & HBA_FCOE_SUPPORT) {
 +      if (phba->hba_flag & HBA_FCOE_MODE) {
                hs->lip_count = -1;
                hs->nos_count = (phba->link_events >> 1);
                hs->nos_count -= lso->link_events;
 -      } else if (phba->fc_topology == TOPOLOGY_LOOP) {
 +      } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                hs->lip_count = (phba->fc_eventTag >> 1);
                hs->lip_count -= lso->link_events;
                hs->nos_count = -1;
@@@ -4306,7 -4303,7 +4306,7 @@@ lpfc_reset_stats(struct Scsi_Host *shos
        lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
        lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
        lso->error_frames = pmb->un.varRdLnk.crcCnt;
 -      if (phba->hba_flag & HBA_FCOE_SUPPORT)
 +      if (phba->hba_flag & HBA_FCOE_MODE)
                lso->link_events = (phba->link_events >> 1);
        else
                lso->link_events = (phba->fc_eventTag >> 1);
@@@ -4618,7 -4615,6 +4618,7 @@@ lpfc_get_cfgparam(struct lpfc_hba *phba
        lpfc_link_speed_init(phba, lpfc_link_speed);
        lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
        lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
 +      lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
        lpfc_use_msi_init(phba, lpfc_use_msi);
        lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
        lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
@@@ -607,8 -607,6 +607,8 @@@ lpfc_work_done(struct lpfc_hba *phba
  
        /* Process SLI4 events */
        if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
 +              if (phba->hba_flag & HBA_RRQ_ACTIVE)
 +                      lpfc_handle_rrq_active(phba);
                if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
                        lpfc_sli4_fcp_xri_abort_event_proc(phba);
                if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
@@@ -968,7 -966,6 +968,7 @@@ lpfc_linkup(struct lpfc_hba *phba
        struct lpfc_vport **vports;
        int i;
  
 +      lpfc_cleanup_wt_rrqs(phba);
        phba->link_state = LPFC_LINK_UP;
  
        /* Unblock fabric iocbs if they are blocked */
@@@ -1067,7 -1064,7 +1067,7 @@@ lpfc_mbx_cmpl_local_config_link(struct 
  
        mempool_free(pmb, phba->mbox_mem_pool);
  
 -      if (phba->fc_topology == TOPOLOGY_LOOP &&
 +      if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
            vport->fc_flag & FC_PUBLIC_LOOP &&
            !(vport->fc_flag & FC_LBIT)) {
                        /* Need to wait for FAN - use discovery timer
        /* Start discovery by sending a FLOGI. port_state is identically
         * LPFC_FLOGI while waiting for FLOGI cmpl
         */
 -      if (vport->port_state != LPFC_FLOGI) {
 +      if (vport->port_state != LPFC_FLOGI)
                lpfc_initial_flogi(vport);
 -      }
        return;
  
  out:
@@@ -1133,7 -1131,7 +1133,7 @@@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba 
        if (vport->port_state != LPFC_FLOGI) {
                phba->hba_flag |= FCF_RR_INPROG;
                spin_unlock_irq(&phba->hbalock);
 -              lpfc_initial_flogi(vport);
 +              lpfc_issue_init_vfi(vport);
                goto out;
        }
        spin_unlock_irq(&phba->hbalock);
@@@ -1355,7 -1353,7 +1355,7 @@@ lpfc_register_fcf(struct lpfc_hba *phba
                if (phba->pport->port_state != LPFC_FLOGI) {
                        phba->hba_flag |= FCF_RR_INPROG;
                        spin_unlock_irq(&phba->hbalock);
 -                      lpfc_initial_flogi(phba->pport);
 +                      lpfc_issue_init_vfi(phba->pport);
                        return;
                }
                spin_unlock_irq(&phba->hbalock);
@@@ -2333,7 -2331,7 +2333,7 @@@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struc
                                phba->fcf.current_rec.fcf_indx, fcf_index);
                /* Wait 500 ms before retrying FLOGI to current FCF */
                msleep(500);
 -              lpfc_initial_flogi(phba->pport);
 +              lpfc_issue_init_vfi(phba->pport);
                goto out;
        }
  
  }
  
  /**
 + * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
 + * @phba: pointer to lpfc hba data structure.
 + * @mboxq: pointer to mailbox data structure.
 + *
 + * This function handles completion of init vfi mailbox command.
 + */
 +void
 +lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 +{
 +      struct lpfc_vport *vport = mboxq->vport;
 +
 +      if (mboxq->u.mb.mbxStatus && (mboxq->u.mb.mbxStatus != 0x4002)) {
 +              lpfc_printf_vlog(vport, KERN_ERR,
 +                              LOG_MBOX,
 +                              "2891 Init VFI mailbox failed 0x%x\n",
 +                              mboxq->u.mb.mbxStatus);
 +              mempool_free(mboxq, phba->mbox_mem_pool);
 +              lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 +              return;
 +      }
 +      lpfc_initial_flogi(vport);
 +      mempool_free(mboxq, phba->mbox_mem_pool);
 +      return;
 +}
 +
 +/**
 + * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
 + * @vport: pointer to lpfc_vport data structure.
 + *
 + * This function issue a init_vfi mailbox command to initialize the VFI and
 + * VPI for the physical port.
 + */
 +void
 +lpfc_issue_init_vfi(struct lpfc_vport *vport)
 +{
 +      LPFC_MBOXQ_t *mboxq;
 +      int rc;
 +      struct lpfc_hba *phba = vport->phba;
 +
 +      mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 +      if (!mboxq) {
 +              lpfc_printf_vlog(vport, KERN_ERR,
 +                      LOG_MBOX, "2892 Failed to allocate "
 +                      "init_vfi mailbox\n");
 +              return;
 +      }
 +      lpfc_init_vfi(mboxq, vport);
 +      mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
 +      rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
 +      if (rc == MBX_NOT_FINISHED) {
 +              lpfc_printf_vlog(vport, KERN_ERR,
 +                      LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
 +              mempool_free(mboxq, vport->phba->mbox_mem_pool);
 +      }
 +}
 +
 +/**
   * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
   * @phba: pointer to lpfc hba data structure.
   * @mboxq: pointer to mailbox data structure.
@@@ -2587,7 -2528,7 +2587,7 @@@ lpfc_start_fdiscs(struct lpfc_hba *phba
                                                     FC_VPORT_FAILED);
                                continue;
                        }
 -                      if (phba->fc_topology == TOPOLOGY_LOOP) {
 +                      if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                                lpfc_vport_set_state(vports[i],
                                                     FC_VPORT_LINKDOWN);
                                continue;
@@@ -2623,7 -2564,7 +2623,7 @@@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *
                         "2018 REG_VFI mbxStatus error x%x "
                         "HBA state x%x\n",
                         mboxq->u.mb.mbxStatus, vport->port_state);
 -              if (phba->fc_topology == TOPOLOGY_LOOP) {
 +              if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                        /* FLOGI failed, use loop map to make discovery list */
                        lpfc_disc_list_loopmap(vport);
                        /* Start discovery */
        spin_unlock_irq(shost->host_lock);
  
        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
 -              lpfc_start_fdiscs(phba);
 -              lpfc_do_scr_ns_plogi(phba, vport);
 +              /* For private loop just start discovery and we are done. */
 +              if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
 +                  (phba->alpa_map[0] == 0) &&
 +                  !(vport->fc_flag & FC_PUBLIC_LOOP)) {
 +                      /* Use loop map to make discovery list */
 +                      lpfc_disc_list_loopmap(vport);
 +                      /* Start discovery */
 +                      lpfc_disc_start(vport);
 +              } else {
 +                      lpfc_start_fdiscs(phba);
 +                      lpfc_do_scr_ns_plogi(phba, vport);
 +              }
        }
  
  fail_free_mem:
@@@ -2713,7 -2644,7 +2713,7 @@@ out
  }
  
  static void
 -lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 +lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
  {
        struct lpfc_vport *vport = phba->pport;
        LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
        struct fcf_record *fcf_record;
  
        spin_lock_irq(&phba->hbalock);
 -      switch (la->UlnkSpeed) {
 -      case LA_1GHZ_LINK:
 -              phba->fc_linkspeed = LA_1GHZ_LINK;
 -              break;
 -      case LA_2GHZ_LINK:
 -              phba->fc_linkspeed = LA_2GHZ_LINK;
 -              break;
 -      case LA_4GHZ_LINK:
 -              phba->fc_linkspeed = LA_4GHZ_LINK;
 -              break;
 -      case LA_8GHZ_LINK:
 -              phba->fc_linkspeed = LA_8GHZ_LINK;
 -              break;
 -      case LA_10GHZ_LINK:
 -              phba->fc_linkspeed = LA_10GHZ_LINK;
 +      switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
 +      case LPFC_LINK_SPEED_1GHZ:
 +      case LPFC_LINK_SPEED_2GHZ:
 +      case LPFC_LINK_SPEED_4GHZ:
 +      case LPFC_LINK_SPEED_8GHZ:
 +      case LPFC_LINK_SPEED_10GHZ:
 +      case LPFC_LINK_SPEED_16GHZ:
 +              phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
                break;
        default:
 -              phba->fc_linkspeed = LA_UNKNW_LINK;
 +              phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
                break;
        }
  
 -      phba->fc_topology = la->topology;
 +      phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
        phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
  
 -      if (phba->fc_topology == TOPOLOGY_LOOP) {
 +      if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
  
                /* if npiv is enabled and this adapter supports npiv log
                                "1309 Link Up Event npiv not supported in loop "
                                "topology\n");
                                /* Get Loop Map information */
 -              if (la->il)
 +              if (bf_get(lpfc_mbx_read_top_il, la))
                        vport->fc_flag |= FC_LBIT;
  
 -              vport->fc_myDID = la->granted_AL_PA;
 -              i = la->un.lilpBde64.tus.f.bdeSize;
 +              vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
 +              i = la->lilpBde64.tus.f.bdeSize;
  
                if (i == 0) {
                        phba->alpa_map[0] = 0;
                goto out;
        }
  
 -      if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
 +      if (!(phba->hba_flag & HBA_FCOE_MODE)) {
                cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
                if (!cfglink_mbox)
                        goto out;
                        if (unlikely(!fcf_record)) {
                                lpfc_printf_log(phba, KERN_ERR,
                                        LOG_MBOX | LOG_SLI,
-                                       "2554 Could not allocate memmory for "
+                                       "2554 Could not allocate memory for "
                                        "fcf record\n");
                                rc = -ENODEV;
                                goto out;
@@@ -2936,17 -2874,17 +2936,17 @@@ lpfc_mbx_issue_link_down(struct lpfc_hb
  
  
  /*
 - * This routine handles processing a READ_LA mailbox
 + * This routine handles processing a READ_TOPOLOGY mailbox
   * command upon completion. It is setup in the LPFC_MBOXQ
   * as the completion routine when the command is
   * handed off to the SLI layer.
   */
  void
 -lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 +lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
  {
        struct lpfc_vport *vport = pmb->vport;
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 -      READ_LA_VAR *la;
 +      struct lpfc_mbx_read_top *la;
        MAILBOX_t *mb = &pmb->u.mb;
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
  
                                mb->mbxStatus, vport->port_state);
                lpfc_mbx_issue_link_down(phba);
                phba->link_state = LPFC_HBA_ERROR;
 -              goto lpfc_mbx_cmpl_read_la_free_mbuf;
 +              goto lpfc_mbx_cmpl_read_topology_free_mbuf;
        }
  
 -      la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
 +      la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
  
        memcpy(&phba->alpa_map[0], mp->virt, 128);
  
        spin_lock_irq(shost->host_lock);
 -      if (la->pb)
 +      if (bf_get(lpfc_mbx_read_top_pb, la))
                vport->fc_flag |= FC_BYPASSED_MODE;
        else
                vport->fc_flag &= ~FC_BYPASSED_MODE;
        if ((phba->fc_eventTag  < la->eventTag) ||
            (phba->fc_eventTag == la->eventTag)) {
                phba->fc_stat.LinkMultiEvent++;
 -              if (la->attType == AT_LINK_UP)
 +              if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
                        if (phba->fc_eventTag != 0)
                                lpfc_linkdown(phba);
        }
  
        phba->fc_eventTag = la->eventTag;
        spin_lock_irq(&phba->hbalock);
 -      if (la->mm)
 +      if (bf_get(lpfc_mbx_read_top_mm, la))
                phba->sli.sli_flag |= LPFC_MENLO_MAINT;
        else
                phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
        spin_unlock_irq(&phba->hbalock);
  
        phba->link_events++;
 -      if (la->attType == AT_LINK_UP && (!la->mm)) {
 +      if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
 +          (!bf_get(lpfc_mbx_read_top_mm, la))) {
                phba->fc_stat.LinkUp++;
                if (phba->link_flag & LS_LOOPBACK_MODE) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
                                        "1306 Link Up Event in loop back mode "
                                        "x%x received Data: x%x x%x x%x x%x\n",
                                        la->eventTag, phba->fc_eventTag,
 -                                      la->granted_AL_PA, la->UlnkSpeed,
 +                                      bf_get(lpfc_mbx_read_top_alpa_granted,
 +                                             la),
 +                                      bf_get(lpfc_mbx_read_top_link_spd, la),
                                        phba->alpa_map[0]);
                } else {
                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
                                        "1303 Link Up Event x%x received "
                                        "Data: x%x x%x x%x x%x x%x x%x %d\n",
                                        la->eventTag, phba->fc_eventTag,
 -                                      la->granted_AL_PA, la->UlnkSpeed,
 +                                      bf_get(lpfc_mbx_read_top_alpa_granted,
 +                                             la),
 +                                      bf_get(lpfc_mbx_read_top_link_spd, la),
                                        phba->alpa_map[0],
 -                                      la->mm, la->fa,
 +                                      bf_get(lpfc_mbx_read_top_mm, la),
 +                                      bf_get(lpfc_mbx_read_top_fa, la),
                                        phba->wait_4_mlo_maint_flg);
                }
                lpfc_mbx_process_link_up(phba, la);
 -      } else if (la->attType == AT_LINK_DOWN) {
 +      } else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
 +                 LPFC_ATT_LINK_DOWN) {
                phba->fc_stat.LinkDown++;
                if (phba->link_flag & LS_LOOPBACK_MODE) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
                                "Data: x%x x%x x%x x%x x%x\n",
                                la->eventTag, phba->fc_eventTag,
                                phba->pport->port_state, vport->fc_flag,
 -                              la->mm, la->fa);
 +                              bf_get(lpfc_mbx_read_top_mm, la),
 +                              bf_get(lpfc_mbx_read_top_fa, la));
                }
                lpfc_mbx_issue_link_down(phba);
        }
 -      if (la->mm && la->attType == AT_LINK_UP) {
 +      if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
 +          (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) {
                if (phba->link_state != LPFC_LINK_DOWN) {
                        phba->fc_stat.LinkDown++;
                        lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
                }
        }
  
 -      if (la->fa) {
 -              if (la->mm)
 +      if (bf_get(lpfc_mbx_read_top_fa, la)) {
 +              if (bf_get(lpfc_mbx_read_top_mm, la))
                        lpfc_issue_clear_la(phba, vport);
                lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
 -                              "1311 fa %d\n", la->fa);
 +                              "1311 fa %d\n",
 +                              bf_get(lpfc_mbx_read_top_fa, la));
        }
  
 -lpfc_mbx_cmpl_read_la_free_mbuf:
 +lpfc_mbx_cmpl_read_topology_free_mbuf:
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
        mempool_free(pmb, phba->mbox_mem_pool);
@@@ -3102,8 -3030,8 +3102,8 @@@ lpfc_mbx_cmpl_reg_login(struct lpfc_hb
        if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
                ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
  
 -      if (ndlp->nlp_flag &  NLP_IGNR_REG_CMPL ||
 -              ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
 +      if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
 +          ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
                /* We rcvd a rscn after issuing this
                 * mbox reg login, we may have cycled
                 * back through the state and be
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
                spin_unlock_irq(shost->host_lock);
 -              if (phba->sli_rev == LPFC_SLI_REV4)
 -                      lpfc_sli4_free_rpi(phba,
 -                              pmb->u.mb.un.varRegLogin.rpi);
 -
        } else
                /* Good status, call state machine */
                lpfc_disc_state_machine(vport, ndlp, pmb,
@@@ -3160,7 -3092,6 +3160,7 @@@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hb
        spin_unlock_irq(shost->host_lock);
        vport->unreg_vpi_cmpl = VPORT_OK;
        mempool_free(pmb, phba->mbox_mem_pool);
 +      lpfc_cleanup_vports_rrqs(vport);
        /*
         * This shost reference might have been taken at the beginning of
         * lpfc_vport_delete()
@@@ -3402,7 -3333,7 +3402,7 @@@ lpfc_mbx_cmpl_fabric_reg_login(struct l
                kfree(mp);
                mempool_free(pmb, phba->mbox_mem_pool);
  
 -              if (phba->fc_topology == TOPOLOGY_LOOP) {
 +              if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                        /* FLOGI failed, use loop map to make discovery list */
                        lpfc_disc_list_loopmap(vport);
  
        }
  
        ndlp->nlp_rpi = mb->un.varWords[0];
 -      ndlp->nlp_flag |= NLP_RPI_VALID;
 +      ndlp->nlp_flag |= NLP_RPI_REGISTERED;
        ndlp->nlp_type |= NLP_FABRIC;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  
@@@ -3482,7 -3413,7 +3482,7 @@@ out
                /* If no other thread is using the ndlp, free it */
                lpfc_nlp_not_used(ndlp);
  
 -              if (phba->fc_topology == TOPOLOGY_LOOP) {
 +              if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
                        /*
                         * RegLogin failed, use loop map to make discovery
                         * list
        }
  
        ndlp->nlp_rpi = mb->un.varWords[0];
 -      ndlp->nlp_flag |= NLP_RPI_VALID;
 +      ndlp->nlp_flag |= NLP_RPI_REGISTERED;
        ndlp->nlp_type |= NLP_FABRIC;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  
@@@ -3831,8 -3762,6 +3831,8 @@@ lpfc_initialize_node(struct lpfc_vport 
        NLP_INT_NODE_ACT(ndlp);
        atomic_set(&ndlp->cmd_pending, 0);
        ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
 +      if (vport->phba->sli_rev == LPFC_SLI_REV4)
 +              ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
  }
  
  struct lpfc_nodelist *
@@@ -4046,7 -3975,7 +4046,7 @@@ lpfc_no_rpi(struct lpfc_hba *phba, stru
         * by firmware with a no rpi error.
         */
        psli = &phba->sli;
 -      if (ndlp->nlp_flag & NLP_RPI_VALID) {
 +      if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
                /* Now process each ring */
                for (i = 0; i < psli->num_rings; i++) {
                        pring = &psli->ring[i];
@@@ -4094,7 -4023,7 +4094,7 @@@ lpfc_unreg_rpi(struct lpfc_vport *vport
        LPFC_MBOXQ_t    *mbox;
        int rc;
  
 -      if (ndlp->nlp_flag & NLP_RPI_VALID) {
 +      if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
                mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
                if (mbox) {
                        lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
                }
                lpfc_no_rpi(phba, ndlp);
  
 -              ndlp->nlp_rpi = 0;
 -              ndlp->nlp_flag &= ~NLP_RPI_VALID;
 +              if (phba->sli_rev != LPFC_SLI_REV4)
 +                      ndlp->nlp_rpi = 0;
 +              ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
                return 1;
        }
@@@ -4131,16 -4059,11 +4131,16 @@@ lpfc_unreg_hba_rpis(struct lpfc_hba *ph
        int i;
  
        vports = lpfc_create_vport_work_array(phba);
 +      if (!vports) {
 +              lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
 +                      "2884 Vport array allocation failed \n");
 +              return;
 +      }
        for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                shost = lpfc_shost_from_vport(vports[i]);
                spin_lock_irq(shost->host_lock);
                list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
 -                      if (ndlp->nlp_flag & NLP_RPI_VALID) {
 +                      if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
                                /* The mempool_alloc might sleep */
                                spin_unlock_irq(shost->host_lock);
                                lpfc_unreg_rpi(vports[i], ndlp);
@@@ -4269,6 -4192,9 +4269,6 @@@ lpfc_cleanup_node(struct lpfc_vport *vp
                                kfree(mp);
                        }
                        list_del(&mb->list);
 -                      if (phba->sli_rev == LPFC_SLI_REV4)
 -                              lpfc_sli4_free_rpi(phba,
 -                                       mb->u.mb.un.varRegLogin.rpi);
                        mempool_free(mb, phba->mbox_mem_pool);
                        /* We shall not invoke the lpfc_nlp_put to decrement
                         * the ndlp reference count as we are in the process
@@@ -4310,15 -4236,15 +4310,15 @@@ lpfc_nlp_remove(struct lpfc_vport *vpor
  
        lpfc_cancel_retry_delay_tmo(vport, ndlp);
        if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
 -              !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
 -          !(ndlp->nlp_flag & NLP_RPI_VALID)) {
 +          !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
 +          !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
                /* For this case we need to cleanup the default rpi
                 * allocated by the firmware.
                 */
                if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
                        != NULL) {
                        rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
 -                          (uint8_t *) &vport->fc_sparam, mbox, 0);
 +                          (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
                        if (rc) {
                                mempool_free(mbox, phba->mbox_mem_pool);
                        }
@@@ -4510,7 -4436,7 +4510,7 @@@ lpfc_disc_list_loopmap(struct lpfc_vpor
        if (!lpfc_is_link_up(phba))
                return;
  
 -      if (phba->fc_topology != TOPOLOGY_LOOP)
 +      if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
                return;
  
        /* Check for loop map present or not */
@@@ -4862,10 -4788,7 +4862,10 @@@ lpfc_disc_timeout_handler(struct lpfc_v
                        }
                }
                if (vport->port_state != LPFC_FLOGI) {
 -                      lpfc_initial_flogi(vport);
 +                      if (phba->sli_rev <= LPFC_SLI_REV3)
 +                              lpfc_initial_flogi(vport);
 +                      else
 +                              lpfc_issue_init_vfi(vport);
                        return;
                }
                break;
@@@ -5056,7 -4979,7 +5056,7 @@@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpf
        pmb->context2 = NULL;
  
        ndlp->nlp_rpi = mb->un.varWords[0];
 -      ndlp->nlp_flag |= NLP_RPI_VALID;
 +      ndlp->nlp_flag |= NLP_RPI_REGISTERED;
        ndlp->nlp_type |= NLP_FABRIC;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
  
@@@ -5180,8 -5103,6 +5180,8 @@@ lpfc_nlp_release(struct kref *kref
        spin_lock_irqsave(&phba->ndlp_lock, flags);
        NLP_CLR_NODE_ACT(ndlp);
        spin_unlock_irqrestore(&phba->ndlp_lock, flags);
 +      if (phba->sli_rev == LPFC_SLI_REV4)
 +              lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
  
        /* free ndlp memory for final ndlp release */
        if (NLP_CHK_FREE_REQ(ndlp)) {
@@@ -5333,10 -5254,6 +5333,10 @@@ lpfc_fcf_inuse(struct lpfc_hba *phba
  
        vports = lpfc_create_vport_work_array(phba);
  
 +      /* If driver cannot allocate memory, indicate fcf is in use */
 +      if (!vports)
 +              return 1;
 +
        for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                shost = lpfc_shost_from_vport(vports[i]);
                spin_lock_irq(shost->host_lock);
                                        "logged in\n",
                                        ndlp->nlp_rpi, ndlp->nlp_DID,
                                        ndlp->nlp_flag);
 -                              if (ndlp->nlp_flag & NLP_RPI_VALID)
 +                              if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
                                        ret = 1;
                        }
                }
@@@ -5633,7 -5550,7 +5633,7 @@@ lpfc_unregister_unused_fcf(struct lpfc_
         * registered, do nothing.
         */
        spin_lock_irq(&phba->hbalock);
 -      if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
 +      if (!(phba->hba_flag & HBA_FCOE_MODE) ||
            !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
            !(phba->hba_flag & HBA_FIP_SUPPORT) ||
            (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
@@@ -446,25 -446,23 +446,25 @@@ lpfc_config_port_post(struct lpfc_hba *
        /* Get the default values for Model Name and Description */
        lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  
 -      if ((phba->cfg_link_speed > LINK_SPEED_10G)
 -          || ((phba->cfg_link_speed == LINK_SPEED_1G)
 +      if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
 +          || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
                && !(phba->lmt & LMT_1Gb))
 -          || ((phba->cfg_link_speed == LINK_SPEED_2G)
 +          || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
                && !(phba->lmt & LMT_2Gb))
 -          || ((phba->cfg_link_speed == LINK_SPEED_4G)
 +          || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
                && !(phba->lmt & LMT_4Gb))
 -          || ((phba->cfg_link_speed == LINK_SPEED_8G)
 +          || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
                && !(phba->lmt & LMT_8Gb))
 -          || ((phba->cfg_link_speed == LINK_SPEED_10G)
 -              && !(phba->lmt & LMT_10Gb))) {
 +          || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
 +              && !(phba->lmt & LMT_10Gb))
 +          || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
 +              && !(phba->lmt & LMT_16Gb))) {
                /* Reset link speed to auto */
                lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
                        "1302 Invalid speed for this board: "
                        "Reset link speed to auto: x%x\n",
                        phba->cfg_link_speed);
 -                      phba->cfg_link_speed = LINK_SPEED_AUTO;
 +                      phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
        }
  
        phba->link_state = LPFC_LINK_DOWN;
@@@ -650,23 -648,22 +650,23 @@@ lpfc_hba_init_link(struct lpfc_hba *phb
        mb = &pmb->u.mb;
        pmb->vport = vport;
  
 -      lpfc_init_link(phba, pmb, phba->cfg_topology,
 -              phba->cfg_link_speed);
 +      lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
        pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
        lpfc_set_loopback_flag(phba);
        rc = lpfc_sli_issue_mbox(phba, pmb, flag);
 -      if (rc != MBX_SUCCESS) {
 +      if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                        "0498 Adapter failed to init, mbxCmd x%x "
                        "INIT_LINK, mbxStatus x%x\n",
                        mb->mbxCommand, mb->mbxStatus);
 -              /* Clear all interrupt enable conditions */
 -              writel(0, phba->HCregaddr);
 -              readl(phba->HCregaddr); /* flush */
 -              /* Clear all pending interrupts */
 -              writel(0xffffffff, phba->HAregaddr);
 -              readl(phba->HAregaddr); /* flush */
 +              if (phba->sli_rev <= LPFC_SLI_REV3) {
 +                      /* Clear all interrupt enable conditions */
 +                      writel(0, phba->HCregaddr);
 +                      readl(phba->HCregaddr); /* flush */
 +                      /* Clear all pending interrupts */
 +                      writel(0xffffffff, phba->HAregaddr);
 +                      readl(phba->HAregaddr); /* flush */
 +              }
                phba->link_state = LPFC_HBA_ERROR;
                if (rc != MBX_BUSY || flag == MBX_POLL)
                        mempool_free(pmb, phba->mbox_mem_pool);
@@@ -930,35 -927,6 +930,35 @@@ lpfc_hb_timeout(unsigned long ptr
  }
  
  /**
 + * lpfc_rrq_timeout - The RRQ-timer timeout handler
 + * @ptr: unsigned long holds the pointer to lpfc hba data structure.
 + *
 + * This is the RRQ-timer timeout handler registered to the lpfc driver. When
 + * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
 + * work-port-events bitmap and the worker thread is notified. This timeout
 + * event will be used by the worker thread to invoke the actual timeout
 + * handler routine, lpfc_rrq_handler. Any periodical operations will
 + * be performed in the timeout handler and the RRQ timeout event bit shall
 + * be cleared by the worker thread after it has taken the event bitmap out.
 + **/
 +static void
 +lpfc_rrq_timeout(unsigned long ptr)
 +{
 +      struct lpfc_hba *phba;
 +      uint32_t tmo_posted;
 +      unsigned long iflag;
 +
 +      phba = (struct lpfc_hba *)ptr;
 +      spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
 +      tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE;
 +      if (!tmo_posted)
 +              phba->hba_flag |= HBA_RRQ_ACTIVE;
 +      spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
 +      if (!tmo_posted)
 +              lpfc_worker_wake_up(phba);
 +}
 +
 +/**
   * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
   * @phba: pointer to lpfc hba data structure.
   * @pmboxq: pointer to the driver internal queue element for mailbox command.
@@@ -1406,8 -1374,6 +1406,8 @@@ lpfc_handle_eratt_s4(struct lpfc_hba *p
        struct lpfc_vport *vport = phba->pport;
        uint32_t event_data;
        struct Scsi_Host *shost;
 +      uint32_t if_type;
 +      struct lpfc_register portstat_reg;
  
        /* If the pci channel is offline, ignore possible errors, since
         * we cannot communicate with the pci card anyway.
        /* For now, the actual action for SLI4 device handling is not
         * specified yet, just treated it as adaptor hardware failure
         */
 -      lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 -                      "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
 -                      phba->work_status[0], phba->work_status[1]);
 -
        event_data = FC_REG_DUMP_EVENT;
        shost = lpfc_shost_from_vport(vport);
        fc_host_post_vendor_event(shost, fc_get_event_number(),
                                  sizeof(event_data), (char *) &event_data,
                                  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
  
 -      lpfc_sli4_offline_eratt(phba);
 +      if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
 +      switch (if_type) {
 +      case LPFC_SLI_INTF_IF_TYPE_0:
 +              lpfc_sli4_offline_eratt(phba);
 +              break;
 +      case LPFC_SLI_INTF_IF_TYPE_2:
 +              portstat_reg.word0 =
 +                      readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
 +
 +              if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
 +                      /* TODO: Register for Overtemp async events. */
 +                      lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                              "2889 Port Overtemperature event, "
 +                              "taking port\n");
 +                      spin_lock_irq(&phba->hbalock);
 +                      phba->over_temp_state = HBA_OVER_TEMP;
 +                      spin_unlock_irq(&phba->hbalock);
 +                      lpfc_sli4_offline_eratt(phba);
 +                      return;
 +              }
 +              if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) {
 +                      /*
 +                       * TODO: Attempt port recovery via a port reset.
 +                       * When fully implemented, the driver should
 +                       * attempt to recover the port here and return.
 +                       * For now, log an error and take the port offline.
 +                       */
 +                      lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                                      "2887 Port Error: Attempting "
 +                                      "Port Recovery\n");
 +              }
 +              lpfc_sli4_offline_eratt(phba);
 +              break;
 +      case LPFC_SLI_INTF_IF_TYPE_1:
 +      default:
 +              break;
 +      }
  }
  
  /**
@@@ -1525,8 -1459,8 +1525,8 @@@ lpfc_handle_latt(struct lpfc_hba *phba
        lpfc_els_flush_all_cmd(phba);
  
        psli->slistat.link_event++;
 -      lpfc_read_la(phba, pmb, mp);
 -      pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
 +      lpfc_read_topology(phba, pmb, mp);
 +      pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
        pmb->vport = vport;
        /* Block ELS IOCBs until we have processed this mbox command */
        phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
@@@ -1919,14 -1853,6 +1919,14 @@@ lpfc_get_hba_model_desc(struct lpfc_hb
                m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
                                "Fibre Channel Adapter"};
                break;
 +      case PCI_DEVICE_ID_LANCER_FC:
 +              oneConnect = 1;
 +              m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"};
 +              break;
 +      case PCI_DEVICE_ID_LANCER_FCOE:
 +              oneConnect = 1;
 +              m = (typeof(m)){"Undefined", "PCIe", "FCoE"};
 +              break;
        default:
                m = (typeof(m)){"Unknown", "", ""};
                break;
@@@ -3017,6 -2943,63 +3017,6 @@@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned 
  }
  
  /**
 - * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support
 - * @phba: pointer to lpfc hba data structure.
 - *
 - * This function uses the QUERY_FW_CFG mailbox command to determine if the
 - * firmware loaded supports FCoE. A return of zero indicates that the mailbox
 - * was successful and the firmware supports FCoE. Any other return indicates
 - * a error. It is assumed that this function will be called before interrupts
 - * are enabled.
 - **/
 -static int
 -lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba)
 -{
 -      int rc = 0;
 -      LPFC_MBOXQ_t *mboxq;
 -      struct lpfc_mbx_query_fw_cfg *query_fw_cfg;
 -      uint32_t length;
 -      uint32_t shdr_status, shdr_add_status;
 -
 -      mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 -      if (!mboxq) {
 -              lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 -                              "2621 Failed to allocate mbox for "
 -                              "query firmware config cmd\n");
 -              return -ENOMEM;
 -      }
 -      query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg;
 -      length = (sizeof(struct lpfc_mbx_query_fw_cfg) -
 -                sizeof(struct lpfc_sli4_cfg_mhdr));
 -      lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
 -                       LPFC_MBOX_OPCODE_QUERY_FW_CFG,
 -                       length, LPFC_SLI4_MBX_EMBED);
 -      rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 -      /* The IOCTL status is embedded in the mailbox subheader. */
 -      shdr_status = bf_get(lpfc_mbox_hdr_status,
 -                           &query_fw_cfg->header.cfg_shdr.response);
 -      shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
 -                               &query_fw_cfg->header.cfg_shdr.response);
 -      if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
 -              lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 -                              "2622 Query Firmware Config failed "
 -                              "mbx status x%x, status x%x add_status x%x\n",
 -                              rc, shdr_status, shdr_add_status);
 -              return -EINVAL;
 -      }
 -      if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) {
 -              lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 -                              "2623 FCoE Function not supported by firmware. "
 -                              "Function mode = %08x\n",
 -                              query_fw_cfg->function_mode);
 -              return -EINVAL;
 -      }
 -      if (rc != MBX_TIMEOUT)
 -              mempool_free(mboxq, phba->mbox_mem_pool);
 -      return 0;
 -}
 -
 -/**
   * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
   * @phba: pointer to lpfc hba data structure.
   * @acqe_link: pointer to the async link completion queue entry.
@@@ -3068,20 -3051,20 +3068,20 @@@ lpfc_sli4_parse_latt_type(struct lpfc_h
        switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
        case LPFC_ASYNC_LINK_STATUS_DOWN:
        case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
 -              att_type = AT_LINK_DOWN;
 +              att_type = LPFC_ATT_LINK_DOWN;
                break;
        case LPFC_ASYNC_LINK_STATUS_UP:
                /* Ignore physical link up events - wait for logical link up */
 -              att_type = AT_RESERVED;
 +              att_type = LPFC_ATT_RESERVED;
                break;
        case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
 -              att_type = AT_LINK_UP;
 +              att_type = LPFC_ATT_LINK_UP;
                break;
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0399 Invalid link attention type: x%x\n",
                                bf_get(lpfc_acqe_link_status, acqe_link));
 -              att_type = AT_RESERVED;
 +              att_type = LPFC_ATT_RESERVED;
                break;
        }
        return att_type;
@@@ -3105,32 -3088,36 +3105,32 @@@ lpfc_sli4_parse_latt_link_speed(struct 
  
        switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
        case LPFC_ASYNC_LINK_SPEED_ZERO:
 -              link_speed = LA_UNKNW_LINK;
 -              break;
        case LPFC_ASYNC_LINK_SPEED_10MBPS:
 -              link_speed = LA_UNKNW_LINK;
 -              break;
        case LPFC_ASYNC_LINK_SPEED_100MBPS:
 -              link_speed = LA_UNKNW_LINK;
 +              link_speed = LPFC_LINK_SPEED_UNKNOWN;
                break;
        case LPFC_ASYNC_LINK_SPEED_1GBPS:
 -              link_speed = LA_1GHZ_LINK;
 +              link_speed = LPFC_LINK_SPEED_1GHZ;
                break;
        case LPFC_ASYNC_LINK_SPEED_10GBPS:
 -              link_speed = LA_10GHZ_LINK;
 +              link_speed = LPFC_LINK_SPEED_10GHZ;
                break;
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0483 Invalid link-attention link speed: x%x\n",
                                bf_get(lpfc_acqe_link_speed, acqe_link));
 -              link_speed = LA_UNKNW_LINK;
 +              link_speed = LPFC_LINK_SPEED_UNKNOWN;
                break;
        }
        return link_speed;
  }
  
  /**
 - * lpfc_sli4_async_link_evt - Process the asynchronous link event
 + * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
   * @phba: pointer to lpfc hba data structure.
   * @acqe_link: pointer to the async link completion queue entry.
   *
 - * This routine is to handle the SLI4 asynchronous link event.
 + * This routine is to handle the SLI4 asynchronous FCoE link event.
   **/
  static void
  lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
        struct lpfc_dmabuf *mp;
        LPFC_MBOXQ_t *pmb;
        MAILBOX_t *mb;
 -      READ_LA_VAR *la;
 +      struct lpfc_mbx_read_top *la;
        uint8_t att_type;
 +      int rc;
  
        att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
 -      if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
 +      if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
                return;
        phba->fcoe_eventtag = acqe_link->event_tag;
        pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        /* Update link event statistics */
        phba->sli.slistat.link_event++;
  
 -      /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
 -      lpfc_read_la(phba, pmb, mp);
 +      /* Create lpfc_handle_latt mailbox command from link ACQE */
 +      lpfc_read_topology(phba, pmb, mp);
 +      pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
        pmb->vport = phba->pport;
  
 +      /* Keep the link status for extra SLI4 state machine reference */
 +      phba->sli4_hba.link_state.speed =
 +                              bf_get(lpfc_acqe_link_speed, acqe_link);
 +      phba->sli4_hba.link_state.duplex =
 +                              bf_get(lpfc_acqe_link_duplex, acqe_link);
 +      phba->sli4_hba.link_state.status =
 +                              bf_get(lpfc_acqe_link_status, acqe_link);
 +      phba->sli4_hba.link_state.type =
 +                              bf_get(lpfc_acqe_link_type, acqe_link);
 +      phba->sli4_hba.link_state.number =
 +                              bf_get(lpfc_acqe_link_number, acqe_link);
 +      phba->sli4_hba.link_state.fault =
 +                              bf_get(lpfc_acqe_link_fault, acqe_link);
 +      phba->sli4_hba.link_state.logical_speed =
 +                      bf_get(lpfc_acqe_logical_link_speed, acqe_link);
 +      lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
 +                      "2900 Async FCoE Link event - Speed:%dGBit duplex:x%x "
 +                      "LA Type:x%x Port Type:%d Port Number:%d Logical "
 +                      "speed:%dMbps Fault:%d\n",
 +                      phba->sli4_hba.link_state.speed,
 +                      phba->sli4_hba.link_state.topology,
 +                      phba->sli4_hba.link_state.status,
 +                      phba->sli4_hba.link_state.type,
 +                      phba->sli4_hba.link_state.number,
 +                      phba->sli4_hba.link_state.logical_speed * 10,
 +                      phba->sli4_hba.link_state.fault);
 +      /*
 +       * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
 +       * topology info. Note: Optional for non FC-AL ports.
 +       */
 +      if (!(phba->hba_flag & HBA_FCOE_MODE)) {
 +              rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 +              if (rc == MBX_NOT_FINISHED)
 +                      goto out_free_dmabuf;
 +              return;
 +      }
 +      /*
 +       * For FCoE Mode: fill in all the topology information we need and call
 +       * the READ_TOPOLOGY completion routine to continue without actually
 +       * sending the READ_TOPOLOGY mailbox command to the port.
 +       */
        /* Parse and translate status field */
        mb = &pmb->u.mb;
        mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
  
        /* Parse and translate link attention fields */
 -      la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
 +      la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
        la->eventTag = acqe_link->event_tag;
 -      la->attType = att_type;
 -      la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
 +      bf_set(lpfc_mbx_read_top_att_type, la, att_type);
 +      bf_set(lpfc_mbx_read_top_link_spd, la,
 +             lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
  
        /* Fake the the following irrelvant fields */
 -      la->topology = TOPOLOGY_PT_PT;
 -      la->granted_AL_PA = 0;
 -      la->il = 0;
 -      la->pb = 0;
 -      la->fa = 0;
 -      la->mm = 0;
 +      bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
 +      bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
 +      bf_set(lpfc_mbx_read_top_il, la, 0);
 +      bf_set(lpfc_mbx_read_top_pb, la, 0);
 +      bf_set(lpfc_mbx_read_top_fa, la, 0);
 +      bf_set(lpfc_mbx_read_top_mm, la, 0);
 +
 +      /* Invoke the lpfc_handle_latt mailbox command callback function */
 +      lpfc_mbx_cmpl_read_topology(phba, pmb);
  
 +      return;
 +
 +out_free_dmabuf:
 +      kfree(mp);
 +out_free_pmb:
 +      mempool_free(pmb, phba->mbox_mem_pool);
 +}
 +
 +/**
 + * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
 + * @phba: pointer to lpfc hba data structure.
 + * @acqe_fc: pointer to the async fc completion queue entry.
 + *
 + * This routine is to handle the SLI4 asynchronous FC event. It will simply log
 + * that the event was received and then issue a read_topology mailbox command so
 + * that the rest of the driver will treat it the same as SLI3.
 + **/
 +static void
 +lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
 +{
 +      struct lpfc_dmabuf *mp;
 +      LPFC_MBOXQ_t *pmb;
 +      int rc;
 +
 +      if (bf_get(lpfc_trailer_type, acqe_fc) !=
 +          LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
 +              lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 +                              "2895 Non FC link Event detected.(%d)\n",
 +                              bf_get(lpfc_trailer_type, acqe_fc));
 +              return;
 +      }
        /* Keep the link status for extra SLI4 state machine reference */
        phba->sli4_hba.link_state.speed =
 -                              bf_get(lpfc_acqe_link_speed, acqe_link);
 -      phba->sli4_hba.link_state.duplex =
 -                              bf_get(lpfc_acqe_link_duplex, acqe_link);
 +                              bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
 +      phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
 +      phba->sli4_hba.link_state.topology =
 +                              bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
        phba->sli4_hba.link_state.status =
 -                              bf_get(lpfc_acqe_link_status, acqe_link);
 -      phba->sli4_hba.link_state.physical =
 -                              bf_get(lpfc_acqe_link_physical, acqe_link);
 +                              bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
 +      phba->sli4_hba.link_state.type =
 +                              bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
 +      phba->sli4_hba.link_state.number =
 +                              bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
        phba->sli4_hba.link_state.fault =
 -                              bf_get(lpfc_acqe_link_fault, acqe_link);
 +                              bf_get(lpfc_acqe_link_fault, acqe_fc);
        phba->sli4_hba.link_state.logical_speed =
 -                              bf_get(lpfc_acqe_qos_link_speed, acqe_link);
 +                              bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
 +      lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
 +                      "2896 Async FC event - Speed:%dGBaud Topology:x%x "
 +                      "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
 +                      "%dMbps Fault:%d\n",
 +                      phba->sli4_hba.link_state.speed,
 +                      phba->sli4_hba.link_state.topology,
 +                      phba->sli4_hba.link_state.status,
 +                      phba->sli4_hba.link_state.type,
 +                      phba->sli4_hba.link_state.number,
 +                      phba->sli4_hba.link_state.logical_speed * 10,
 +                      phba->sli4_hba.link_state.fault);
 +      pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 +      if (!pmb) {
 +              lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 +                              "2897 The mboxq allocation failed\n");
 +              return;
 +      }
 +      mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
 +      if (!mp) {
 +              lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 +                              "2898 The lpfc_dmabuf allocation failed\n");
 +              goto out_free_pmb;
 +      }
 +      mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
 +      if (!mp->virt) {
 +              lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 +                              "2899 The mbuf allocation failed\n");
 +              goto out_free_dmabuf;
 +      }
  
 -      /* Invoke the lpfc_handle_latt mailbox command callback function */
 -      lpfc_mbx_cmpl_read_la(phba, pmb);
 +      /* Cleanup any outstanding ELS commands */
 +      lpfc_els_flush_all_cmd(phba);
 +
 +      /* Block ELS IOCBs until we have done process link event */
 +      phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
 +
 +      /* Update link event statistics */
 +      phba->sli.slistat.link_event++;
 +
 +      /* Create lpfc_handle_latt mailbox command from link ACQE */
 +      lpfc_read_topology(phba, pmb, mp);
 +      pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
 +      pmb->vport = phba->pport;
  
 +      rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
 +      if (rc == MBX_NOT_FINISHED)
 +              goto out_free_dmabuf;
        return;
  
  out_free_dmabuf:
@@@ -3346,24 -3209,6 +3346,24 @@@ out_free_pmb
  }
  
  /**
 + * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
 + * @phba: pointer to lpfc hba data structure.
 + * @acqe_fc: pointer to the async SLI completion queue entry.
 + *
 + * This routine is to handle the SLI4 asynchronous SLI events.
 + **/
 +static void
 +lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
 +{
 +      lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
 +                      "2901 Async SLI event - Event Data1:x%08x Event Data2:"
 +                      "x%08x SLI Event Type:%d",
 +                      acqe_sli->event_data1, acqe_sli->event_data2,
 +                      bf_get(lpfc_trailer_type, acqe_sli));
 +      return;
 +}
 +
 +/**
   * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
   * @vport: pointer to vport data structure.
   *
@@@ -3402,12 -3247,10 +3402,12 @@@ lpfc_sli4_perform_vport_cvl(struct lpfc
                if (!ndlp)
                        return 0;
        }
 -      if (phba->pport->port_state < LPFC_FLOGI)
 +      if ((phba->pport->port_state < LPFC_FLOGI) &&
 +              (phba->pport->port_state != LPFC_VPORT_FAILED))
                return NULL;
        /* If virtual link is not yet instantiated ignore CVL */
 -      if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC))
 +      if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
 +              && (vport->port_state != LPFC_VPORT_FAILED))
                return NULL;
        shost = lpfc_shost_from_vport(vport);
        if (!shost)
@@@ -3442,17 -3285,17 +3442,17 @@@ lpfc_sli4_perform_all_vport_cvl(struct 
  }
  
  /**
 - * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
 + * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
   * @phba: pointer to lpfc hba data structure.
   * @acqe_link: pointer to the async fcoe completion queue entry.
   *
   * This routine is to handle the SLI4 asynchronous fcoe event.
   **/
  static void
 -lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
 -                       struct lpfc_acqe_fcoe *acqe_fcoe)
 +lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
 +                      struct lpfc_acqe_fip *acqe_fip)
  {
 -      uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
 +      uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
        int rc;
        struct lpfc_vport *vport;
        struct lpfc_nodelist *ndlp;
        struct lpfc_vport **vports;
        int i;
  
 -      phba->fc_eventTag = acqe_fcoe->event_tag;
 -      phba->fcoe_eventtag = acqe_fcoe->event_tag;
 +      phba->fc_eventTag = acqe_fip->event_tag;
 +      phba->fcoe_eventtag = acqe_fip->event_tag;
        switch (event_type) {
 -      case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
 -      case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
 -              if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
 +      case LPFC_FIP_EVENT_TYPE_NEW_FCF:
 +      case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
 +              if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
                                        LOG_DISCOVERY,
                                        "2546 New FCF event, evt_tag:x%x, "
                                        "index:x%x\n",
 -                                      acqe_fcoe->event_tag,
 -                                      acqe_fcoe->index);
 +                                      acqe_fip->event_tag,
 +                                      acqe_fip->index);
                else
                        lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
                                        LOG_DISCOVERY,
                                        "2788 FCF param modified event, "
                                        "evt_tag:x%x, index:x%x\n",
 -                                      acqe_fcoe->event_tag,
 -                                      acqe_fcoe->index);
 +                                      acqe_fip->event_tag,
 +                                      acqe_fip->index);
                if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
                        /*
                         * During period of FCF discovery, read the FCF
                                        LOG_DISCOVERY,
                                        "2779 Read FCF (x%x) for updating "
                                        "roundrobin FCF failover bmask\n",
 -                                      acqe_fcoe->index);
 -                      rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
 +                                      acqe_fip->index);
 +                      rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
                }
  
                /* If the FCF discovery is in progress, do nothing. */
                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
                                "2770 Start FCF table scan per async FCF "
                                "event, evt_tag:x%x, index:x%x\n",
 -                              acqe_fcoe->event_tag, acqe_fcoe->index);
 +                              acqe_fip->event_tag, acqe_fip->index);
                rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
                                                     LPFC_FCOE_FCF_GET_FIRST);
                if (rc)
                                        "command failed (x%x)\n", rc);
                break;
  
 -      case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
 +      case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                        "2548 FCF Table full count 0x%x tag 0x%x\n",
 -                      bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
 -                      acqe_fcoe->event_tag);
 +                      bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
 +                      acqe_fip->event_tag);
                break;
  
 -      case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
 +      case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
                        "2549 FCF (x%x) disconnected from network, "
 -                      "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
 +                      "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
                /*
                 * If we are in the middle of FCF failover process, clear
                 * the corresponding FCF bit in the roundrobin bitmap.
                if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
                        spin_unlock_irq(&phba->hbalock);
                        /* Update FLOGI FCF failover eligible FCF bmask */
 -                      lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
 +                      lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
                        break;
                }
                spin_unlock_irq(&phba->hbalock);
  
                /* If the event is not for currently used fcf do nothing */
 -              if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
 +              if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
                        break;
  
                /*
                lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
                                "2771 Start FCF fast failover process due to "
                                "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
 -                              "\n", acqe_fcoe->event_tag, acqe_fcoe->index);
 +                              "\n", acqe_fip->event_tag, acqe_fip->index);
                rc = lpfc_sli4_redisc_fcf_table(phba);
                if (rc) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
                        lpfc_sli4_perform_all_vport_cvl(phba);
                }
                break;
 -      case LPFC_FCOE_EVENT_TYPE_CVL:
 +      case LPFC_FIP_EVENT_TYPE_CVL:
                lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
                        "2718 Clear Virtual Link Received for VPI 0x%x"
 -                      " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
 +                      " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
                vport = lpfc_find_vport_by_vpid(phba,
 -                              acqe_fcoe->index - phba->vpi_base);
 +                              acqe_fip->index - phba->vpi_base);
                ndlp = lpfc_sli4_perform_vport_cvl(vport);
                if (!ndlp)
                        break;
                        lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
                                        LOG_DISCOVERY,
                                        "2773 Start FCF failover per CVL, "
 -                                      "evt_tag:x%x\n", acqe_fcoe->event_tag);
 +                                      "evt_tag:x%x\n", acqe_fip->event_tag);
                        rc = lpfc_sli4_redisc_fcf_table(phba);
                        if (rc) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
        default:
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                        "0288 Unknown FCoE event type 0x%x event tag "
 -                      "0x%x\n", event_type, acqe_fcoe->event_tag);
 +                      "0x%x\n", event_type, acqe_fip->event_tag);
                break;
        }
  }
@@@ -3756,7 -3599,8 +3756,7 @@@ void lpfc_sli4_async_event_proc(struct 
                                                 &cq_event->cqe.acqe_link);
                        break;
                case LPFC_TRAILER_CODE_FCOE:
 -                      lpfc_sli4_async_fcoe_evt(phba,
 -                                               &cq_event->cqe.acqe_fcoe);
 +                      lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
                        break;
                case LPFC_TRAILER_CODE_DCBX:
                        lpfc_sli4_async_dcbx_evt(phba,
                        lpfc_sli4_async_grp5_evt(phba,
                                                 &cq_event->cqe.acqe_grp5);
                        break;
 +              case LPFC_TRAILER_CODE_FC:
 +                      lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
 +                      break;
 +              case LPFC_TRAILER_CODE_SLI:
 +                      lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
 +                      break;
                default:
                        lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                        "1804 Invalid asynchrous event code: "
@@@ -4110,7 -3948,7 +4110,7 @@@ lpfc_sli4_driver_resource_setup(struct 
        int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
        uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
        struct lpfc_mqe *mqe;
 -      int longs;
 +      int longs, sli_family;
  
        /* Before proceed, wait for POST done and device ready */
        rc = lpfc_sli4_post_status_check(phba);
        init_timer(&phba->hb_tmofunc);
        phba->hb_tmofunc.function = lpfc_hb_timeout;
        phba->hb_tmofunc.data = (unsigned long)phba;
 +      init_timer(&phba->rrq_tmr);
 +      phba->rrq_tmr.function = lpfc_rrq_timeout;
 +      phba->rrq_tmr.data = (unsigned long)phba;
  
        psli = &phba->sli;
        /* MBOX heartbeat timer */
         */
        buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
                    ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
 -      /* Feature Level 1 hardware is limited to 2 pages */
 -      if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) ==
 -           LPFC_SLI_INTF_FEATURELEVEL1_1))
 -              max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
 -      else
 -              max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
 +
 +      sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
 +      max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
 +      switch (sli_family) {
 +      case LPFC_SLI_INTF_FAMILY_BE2:
 +      case LPFC_SLI_INTF_FAMILY_BE3:
 +              /* There is a single hint for BE - 2 pages per BPL. */
 +              if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
 +                  LPFC_SLI_INTF_SLI_HINT1_1)
 +                      max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
 +              break;
 +      case LPFC_SLI_INTF_FAMILY_LNCR_A0:
 +      case LPFC_SLI_INTF_FAMILY_LNCR_B0:
 +      default:
 +              break;
 +      }
        for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
             dma_buf_size < max_buf_size && buf_size > dma_buf_size;
             dma_buf_size = dma_buf_size << 1)
        if (rc)
                return -ENOMEM;
  
 +      /* IF Type 2 ports get initialized now. */
 +      if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
 +          LPFC_SLI_INTF_IF_TYPE_2) {
 +              rc = lpfc_pci_function_reset(phba);
 +              if (unlikely(rc))
 +                      return -ENODEV;
 +      }
 +
        /* Create the bootstrap mailbox command */
        rc = lpfc_create_bootstrap_mbox(phba);
        if (unlikely(rc))
        if (unlikely(rc))
                goto out_free_bsmbx;
  
 -      rc = lpfc_sli4_fw_cfg_check(phba);
 -      if (unlikely(rc))
 -              goto out_free_bsmbx;
 -
        /* Set up the hba's configuration parameters. */
        rc = lpfc_sli4_read_config(phba);
        if (unlikely(rc))
                goto out_free_bsmbx;
  
 -      /* Perform a function reset */
 -      rc = lpfc_pci_function_reset(phba);
 -      if (unlikely(rc))
 -              goto out_free_bsmbx;
 +      /* IF Type 0 ports get initialized now. */
 +      if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
 +          LPFC_SLI_INTF_IF_TYPE_0) {
 +              rc = lpfc_pci_function_reset(phba);
 +              if (unlikely(rc))
 +                      goto out_free_bsmbx;
 +      }
  
        mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
                                                       GFP_KERNEL);
@@@ -5372,183 -5190,97 +5372,183 @@@ lpfc_sli_pci_mem_unset(struct lpfc_hba 
  int
  lpfc_sli4_post_status_check(struct lpfc_hba *phba)
  {
 -      struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg;
 -      int i, port_error = -ENODEV;
 +      struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
 +      struct lpfc_register reg_data;
 +      int i, port_error = 0;
 +      uint32_t if_type;
  
 -      if (!phba->sli4_hba.STAregaddr)
 +      if (!phba->sli4_hba.PSMPHRregaddr)
                return -ENODEV;
  
        /* Wait up to 30 seconds for the SLI Port POST done and ready */
        for (i = 0; i < 3000; i++) {
 -              sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
 -              /* Encounter fatal POST error, break out */
 -              if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
 +              portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr);
 +              if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) {
 +                      /* Port has a fatal POST error, break out */
                        port_error = -ENODEV;
                        break;
                }
 -              if (LPFC_POST_STAGE_ARMFW_READY ==
 -                  bf_get(lpfc_hst_state_port_status, &sta_reg)) {
 -                      port_error = 0;
 +              if (LPFC_POST_STAGE_PORT_READY ==
 +                  bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
                        break;
                msleep(10);
        }
  
 -      if (port_error)
 +      /*
 +       * If there was a port error during POST, then don't proceed with
 +       * other register reads as the data may not be valid.  Just exit.
 +       */
 +      if (port_error) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 -                      "1408 Failure HBA POST Status: sta_reg=0x%x, "
 -                      "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
 -                      "dl=x%x, pstatus=x%x\n", sta_reg.word0,
 -                      bf_get(lpfc_hst_state_perr, &sta_reg),
 -                      bf_get(lpfc_hst_state_sfi, &sta_reg),
 -                      bf_get(lpfc_hst_state_nip, &sta_reg),
 -                      bf_get(lpfc_hst_state_ipc, &sta_reg),
 -                      bf_get(lpfc_hst_state_xrom, &sta_reg),
 -                      bf_get(lpfc_hst_state_dl, &sta_reg),
 -                      bf_get(lpfc_hst_state_port_status, &sta_reg));
 -
 -      /* Log device information */
 -      phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr);
 -      if (bf_get(lpfc_sli_intf_valid,
 -                 &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) {
 +                      "1408 Port Failed POST - portsmphr=0x%x, "
 +                      "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
 +                      "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
 +                      portsmphr_reg.word0,
 +                      bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
 +                      bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
 +                      bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
 +                      bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
 +                      bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
 +                      bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
 +                      bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
 +                      bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
 +      } else {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 -                              "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
 -                              "FeatureL1=0x%x, FeatureL2=0x%x\n",
 +                              "2534 Device Info: SLIFamily=0x%x, "
 +                              "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
 +                              "SLIHint_2=0x%x, FT=0x%x\n",
                                bf_get(lpfc_sli_intf_sli_family,
                                       &phba->sli4_hba.sli_intf),
                                bf_get(lpfc_sli_intf_slirev,
                                       &phba->sli4_hba.sli_intf),
 -                              bf_get(lpfc_sli_intf_featurelevel1,
 +                              bf_get(lpfc_sli_intf_if_type,
                                       &phba->sli4_hba.sli_intf),
 -                              bf_get(lpfc_sli_intf_featurelevel2,
 +                              bf_get(lpfc_sli_intf_sli_hint1,
 +                                     &phba->sli4_hba.sli_intf),
 +                              bf_get(lpfc_sli_intf_sli_hint2,
 +                                     &phba->sli4_hba.sli_intf),
 +                              bf_get(lpfc_sli_intf_func_type,
                                       &phba->sli4_hba.sli_intf));
 +              /*
 +               * Check for other Port errors during the initialization
 +               * process.  Fail the load if the port did not come up
 +               * correctly.
 +               */
 +              if_type = bf_get(lpfc_sli_intf_if_type,
 +                               &phba->sli4_hba.sli_intf);
 +              switch (if_type) {
 +              case LPFC_SLI_INTF_IF_TYPE_0:
 +                      phba->sli4_hba.ue_mask_lo =
 +                            readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
 +                      phba->sli4_hba.ue_mask_hi =
 +                            readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
 +                      uerrlo_reg.word0 =
 +                            readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
 +                      uerrhi_reg.word0 =
 +                              readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
 +                      if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
 +                          (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
 +                              lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                                              "1422 Unrecoverable Error "
 +                                              "Detected during POST "
 +                                              "uerr_lo_reg=0x%x, "
 +                                              "uerr_hi_reg=0x%x, "
 +                                              "ue_mask_lo_reg=0x%x, "
 +                                              "ue_mask_hi_reg=0x%x\n",
 +                                              uerrlo_reg.word0,
 +                                              uerrhi_reg.word0,
 +                                              phba->sli4_hba.ue_mask_lo,
 +                                              phba->sli4_hba.ue_mask_hi);
 +                              port_error = -ENODEV;
 +                      }
 +                      break;
 +              case LPFC_SLI_INTF_IF_TYPE_2:
 +                      /* Final checks.  The port status should be clean. */
 +                      reg_data.word0 =
 +                              readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
 +                      if (bf_get(lpfc_sliport_status_err, &reg_data)) {
 +                              phba->work_status[0] =
 +                                      readl(phba->sli4_hba.u.if_type2.
 +                                            ERR1regaddr);
 +                              phba->work_status[1] =
 +                                      readl(phba->sli4_hba.u.if_type2.
 +                                            ERR2regaddr);
 +                              lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                                      "2888 Port Error Detected "
 +                                      "during POST: "
 +                                      "port status reg 0x%x, "
 +                                      "port_smphr reg 0x%x, "
 +                                      "error 1=0x%x, error 2=0x%x\n",
 +                                      reg_data.word0,
 +                                      portsmphr_reg.word0,
 +                                      phba->work_status[0],
 +                                      phba->work_status[1]);
 +                              port_error = -ENODEV;
 +                      }
 +                      break;
 +              case LPFC_SLI_INTF_IF_TYPE_1:
 +              default:
 +                      break;
 +              }
        }
 -      phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr);
 -      phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr);
 -      /* With uncoverable error, log the error message and return error */
 -      uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
 -      uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
 -      if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
 -          (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
 -              lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 -                              "1422 HBA Unrecoverable error: "
 -                              "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
 -                              "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
 -                              uerrlo_reg.word0, uerrhi_reg.word0,
 -                              phba->sli4_hba.ue_mask_lo,
 -                              phba->sli4_hba.ue_mask_hi);
 -              return -ENODEV;
 -      }
 -
        return port_error;
  }
  
  /**
   * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
   * @phba: pointer to lpfc hba data structure.
 + * @if_type:  The SLI4 interface type getting configured.
   *
   * This routine is invoked to set up SLI4 BAR0 PCI config space register
   * memory map.
   **/
  static void
 -lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
 -{
 -      phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
 -                                      LPFC_UERR_STATUS_LO;
 -      phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
 -                                      LPFC_UERR_STATUS_HI;
 -      phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
 -                                      LPFC_UE_MASK_LO;
 -      phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
 -                                      LPFC_UE_MASK_HI;
 -      phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p +
 -                                      LPFC_SLI_INTF;
 +lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
 +{
 +      switch (if_type) {
 +      case LPFC_SLI_INTF_IF_TYPE_0:
 +              phba->sli4_hba.u.if_type0.UERRLOregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
 +              phba->sli4_hba.u.if_type0.UERRHIregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
 +              phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
 +              phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
 +              phba->sli4_hba.SLIINTFregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
 +              break;
 +      case LPFC_SLI_INTF_IF_TYPE_2:
 +              phba->sli4_hba.u.if_type2.ERR1regaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1;
 +              phba->sli4_hba.u.if_type2.ERR2regaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2;
 +              phba->sli4_hba.u.if_type2.CTRLregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL;
 +              phba->sli4_hba.u.if_type2.STATUSregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS;
 +              phba->sli4_hba.SLIINTFregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
 +              phba->sli4_hba.PSMPHRregaddr =
 +                   phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR;
 +              phba->sli4_hba.RQDBregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
 +              phba->sli4_hba.WQDBregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
 +              phba->sli4_hba.EQCQDBregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
 +              phba->sli4_hba.MQDBregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
 +              phba->sli4_hba.BMBXregaddr =
 +                      phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
 +              break;
 +      case LPFC_SLI_INTF_IF_TYPE_1:
 +      default:
 +              dev_printk(KERN_ERR, &phba->pcidev->dev,
 +                         "FATAL - unsupported SLI4 interface type - %d\n",
 +                         if_type);
 +              break;
 +      }
  }
  
  /**
  static void
  lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
  {
 -
 -      phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
 -                                  LPFC_HST_STATE;
 +      phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
 +              LPFC_SLIPORT_IF0_SMPHR;
        phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
 -                                  LPFC_HST_ISR0;
 +              LPFC_HST_ISR0;
        phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
 -                                  LPFC_HST_IMR0;
 +              LPFC_HST_IMR0;
        phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
 -                                   LPFC_HST_ISCR0;
 -      return;
 +              LPFC_HST_ISCR0;
  }
  
  /**
@@@ -5808,12 -5542,11 +5808,12 @@@ lpfc_sli4_read_config(struct lpfc_hba *
  }
  
  /**
 - * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
 + * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
   * @phba: pointer to lpfc hba data structure.
   *
 - * This routine is invoked to setup the host-side endian order to the
 - * HBA consistent with the SLI-4 interface spec.
 + * This routine is invoked to setup the port-side endian order when
 + * the port if_type is 0.  This routine has no function for other
 + * if_types.
   *
   * Return codes
   *    0 - successful
@@@ -5824,44 -5557,34 +5824,44 @@@ static in
  lpfc_setup_endian_order(struct lpfc_hba *phba)
  {
        LPFC_MBOXQ_t *mboxq;
 -      uint32_t rc = 0;
 +      uint32_t if_type, rc = 0;
        uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
                                      HOST_ENDIAN_HIGH_WORD1};
  
 -      mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 -      if (!mboxq) {
 -              lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 -                              "0492 Unable to allocate memory for issuing "
 -                              "SLI_CONFIG_SPECIAL mailbox command\n");
 -              return -ENOMEM;
 -      }
 +      if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
 +      switch (if_type) {
 +      case LPFC_SLI_INTF_IF_TYPE_0:
 +              mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
 +                                                     GFP_KERNEL);
 +              if (!mboxq) {
 +                      lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                                      "0492 Unable to allocate memory for "
 +                                      "issuing SLI_CONFIG_SPECIAL mailbox "
 +                                      "command\n");
 +                      return -ENOMEM;
 +              }
  
 -      /*
 -       * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
 -       * words to contain special data values and no other data.
 -       */
 -      memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
 -      memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
 -      rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 -      if (rc != MBX_SUCCESS) {
 -              lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 -                              "0493 SLI_CONFIG_SPECIAL mailbox failed with "
 -                              "status x%x\n",
 -                              rc);
 -              rc = -EIO;
 +              /*
 +               * The SLI4_CONFIG_SPECIAL mailbox command requires the first
 +               * two words to contain special data values and no other data.
 +               */
 +              memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
 +              memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
 +              rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 +              if (rc != MBX_SUCCESS) {
 +                      lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                                      "0493 SLI_CONFIG_SPECIAL mailbox "
 +                                      "failed with status x%x\n",
 +                                      rc);
 +                      rc = -EIO;
 +              }
 +              mempool_free(mboxq, phba->mbox_mem_pool);
 +              break;
 +      case LPFC_SLI_INTF_IF_TYPE_2:
 +      case LPFC_SLI_INTF_IF_TYPE_1:
 +      default:
 +              break;
        }
 -
 -      mempool_free(mboxq, phba->mbox_mem_pool);
        return rc;
  }
  
  lpfc_pci_function_reset(struct lpfc_hba *phba)
  {
        LPFC_MBOXQ_t *mboxq;
 -      uint32_t rc = 0;
 +      uint32_t rc = 0, if_type;
        uint32_t shdr_status, shdr_add_status;
 +      uint32_t rdy_chk, num_resets = 0, reset_again = 0;
        union lpfc_sli4_cfg_shdr *shdr;
 +      struct lpfc_register reg_data;
  
 -      mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 -      if (!mboxq) {
 -              lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 -                              "0494 Unable to allocate memory for issuing "
 -                              "SLI_FUNCTION_RESET mailbox command\n");
 -              return -ENOMEM;
 -      }
 +      if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
 +      switch (if_type) {
 +      case LPFC_SLI_INTF_IF_TYPE_0:
 +              mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
 +                                                     GFP_KERNEL);
 +              if (!mboxq) {
 +                      lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                                      "0494 Unable to allocate memory for "
 +                                      "issuing SLI_FUNCTION_RESET mailbox "
 +                                      "command\n");
 +                      return -ENOMEM;
 +              }
  
 -      /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
 -      lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
 -                       LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
 -                       LPFC_SLI4_MBX_EMBED);
 -      rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 -      shdr = (union lpfc_sli4_cfg_shdr *)
 -              &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
 -      shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
 -      shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
 -      if (rc != MBX_TIMEOUT)
 -              mempool_free(mboxq, phba->mbox_mem_pool);
 -      if (shdr_status || shdr_add_status || rc) {
 -              lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 -                              "0495 SLI_FUNCTION_RESET mailbox failed with "
 -                              "status x%x add_status x%x, mbx status x%x\n",
 -                              shdr_status, shdr_add_status, rc);
 -              rc = -ENXIO;
 +              /* Setup PCI function reset mailbox-ioctl command */
 +              lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
 +                               LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
 +                               LPFC_SLI4_MBX_EMBED);
 +              rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 +              shdr = (union lpfc_sli4_cfg_shdr *)
 +                      &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
 +              shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
 +              shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
 +                                       &shdr->response);
 +              if (rc != MBX_TIMEOUT)
 +                      mempool_free(mboxq, phba->mbox_mem_pool);
 +              if (shdr_status || shdr_add_status || rc) {
 +                      lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                                      "0495 SLI_FUNCTION_RESET mailbox "
 +                                      "failed with status x%x add_status x%x,"
 +                                      " mbx status x%x\n",
 +                                      shdr_status, shdr_add_status, rc);
 +                      rc = -ENXIO;
 +              }
 +              break;
 +      case LPFC_SLI_INTF_IF_TYPE_2:
 +              for (num_resets = 0;
 +                   num_resets < MAX_IF_TYPE_2_RESETS;
 +                   num_resets++) {
 +                      reg_data.word0 = 0;
 +                      bf_set(lpfc_sliport_ctrl_end, &reg_data,
 +                             LPFC_SLIPORT_LITTLE_ENDIAN);
 +                      bf_set(lpfc_sliport_ctrl_ip, &reg_data,
 +                             LPFC_SLIPORT_INIT_PORT);
 +                      writel(reg_data.word0, phba->sli4_hba.u.if_type2.
 +                             CTRLregaddr);
 +
 +                      /*
 +                       * Poll the Port Status Register and wait for RDY for
 +                       * up to 10 seconds.  If the port doesn't respond, treat
 +                       * it as an error.  If the port responds with RN, start
 +                       * the loop again.
 +                       */
 +                      for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
 +                              reg_data.word0 =
 +                                      readl(phba->sli4_hba.u.if_type2.
 +                                            STATUSregaddr);
 +                              if (bf_get(lpfc_sliport_status_rdy, &reg_data))
 +                                      break;
 +                              if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
 +                                      reset_again++;
 +                                      break;
 +                              }
 +                              msleep(10);
 +                      }
 +
 +                      /*
 +                       * If the port responds to the init request with
 +                       * reset needed, delay for a bit and restart the loop.
 +                       */
 +                      if (reset_again) {
 +                              msleep(10);
 +                              reset_again = 0;
 +                              continue;
 +                      }
 +
 +                      /* Detect any port errors. */
 +                      reg_data.word0 = readl(phba->sli4_hba.u.if_type2.
 +                                             STATUSregaddr);
 +                      if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
 +                          (rdy_chk >= 1000)) {
 +                              phba->work_status[0] = readl(
 +                                      phba->sli4_hba.u.if_type2.ERR1regaddr);
 +                              phba->work_status[1] = readl(
 +                                      phba->sli4_hba.u.if_type2.ERR2regaddr);
 +                              lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                                      "2890 Port Error Detected "
 +                                      "during Port Reset: "
 +                                      "port status reg 0x%x, "
 +                                      "error 1=0x%x, error 2=0x%x\n",
 +                                      reg_data.word0,
 +                                      phba->work_status[0],
 +                                      phba->work_status[1]);
 +                              rc = -ENODEV;
 +                      }
 +
 +                      /*
 +                       * Terminate the outer loop provided the Port indicated
 +                       * ready within 10 seconds.
 +                       */
 +                      if (rdy_chk < 1000)
 +                              break;
 +              }
 +              break;
 +      case LPFC_SLI_INTF_IF_TYPE_1:
 +      default:
 +              break;
        }
 +
 +      /* Catch the not-ready port failure after a port reset. */
 +      if (num_resets >= MAX_IF_TYPE_2_RESETS)
 +              rc = -ENODEV;
 +
        return rc;
  }
  
@@@ -6901,7 -6536,6 +6901,7 @@@ lpfc_sli4_pci_mem_setup(struct lpfc_hb
        struct pci_dev *pdev;
        unsigned long bar0map_len, bar1map_len, bar2map_len;
        int error = -ENODEV;
 +      uint32_t if_type;
  
        /* Obtain PCI device reference */
        if (!phba->pcidev)
                }
        }
  
 -      /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
 -       * number of bytes required by each mapping. They are actually
 -       * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device.
 +      /*
 +       * The BARs and register set definitions and offset locations are
 +       * dependent on the if_type.
 +       */
 +      if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
 +                                &phba->sli4_hba.sli_intf.word0)) {
 +              return error;
 +      }
 +
 +      /* There is no SLI3 failback for SLI4 devices. */
 +      if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
 +          LPFC_SLI_INTF_VALID) {
 +              lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                              "2894 SLI_INTF reg contents invalid "
 +                              "sli_intf reg 0x%x\n",
 +                              phba->sli4_hba.sli_intf.word0);
 +              return error;
 +      }
 +
 +      if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
 +      /*
 +       * Get the bus address of SLI4 device Bar regions and the
 +       * number of bytes required by each mapping. The mapping of the
 +       * particular PCI BARs regions is dependent on the type of
 +       * SLI4 device.
         */
        if (pci_resource_start(pdev, 0)) {
                phba->pci_bar0_map = pci_resource_start(pdev, 0);
                bar0map_len = pci_resource_len(pdev, 0);
 +
 +              /*
 +               * Map SLI4 PCI Config Space Register base to a kernel virtual
 +               * addr
 +               */
 +              phba->sli4_hba.conf_regs_memmap_p =
 +                      ioremap(phba->pci_bar0_map, bar0map_len);
 +              if (!phba->sli4_hba.conf_regs_memmap_p) {
 +                      dev_printk(KERN_ERR, &pdev->dev,
 +                                 "ioremap failed for SLI4 PCI config "
 +                                 "registers.\n");
 +                      goto out;
 +              }
 +              /* Set up BAR0 PCI config space register memory map */
 +              lpfc_sli4_bar0_register_memmap(phba, if_type);
        } else {
                phba->pci_bar0_map = pci_resource_start(pdev, 1);
                bar0map_len = pci_resource_len(pdev, 1);
 -      }
 -      phba->pci_bar1_map = pci_resource_start(pdev, 2);
 -      bar1map_len = pci_resource_len(pdev, 2);
 -
 -      phba->pci_bar2_map = pci_resource_start(pdev, 4);
 -      bar2map_len = pci_resource_len(pdev, 4);
 -
 -      /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
 -      phba->sli4_hba.conf_regs_memmap_p =
 +              if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
 +                      dev_printk(KERN_ERR, &pdev->dev,
 +                         "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
 +                      goto out;
 +              }
 +              phba->sli4_hba.conf_regs_memmap_p =
                                ioremap(phba->pci_bar0_map, bar0map_len);
 -      if (!phba->sli4_hba.conf_regs_memmap_p) {
 -              dev_printk(KERN_ERR, &pdev->dev,
 -                         "ioremap failed for SLI4 PCI config registers.\n");
 -              goto out;
 +              if (!phba->sli4_hba.conf_regs_memmap_p) {
 +                      dev_printk(KERN_ERR, &pdev->dev,
 +                              "ioremap failed for SLI4 PCI config "
 +                              "registers.\n");
 +                              goto out;
 +              }
 +              lpfc_sli4_bar0_register_memmap(phba, if_type);
        }
  
 -      /* Map SLI4 HBA Control Register base to a kernel virtual address. */
 -      phba->sli4_hba.ctrl_regs_memmap_p =
 +      if (pci_resource_start(pdev, 2)) {
 +              /*
 +               * Map SLI4 if type 0 HBA Control Register base to a kernel
 +               * virtual address and setup the registers.
 +               */
 +              phba->pci_bar1_map = pci_resource_start(pdev, 2);
 +              bar1map_len = pci_resource_len(pdev, 2);
 +              phba->sli4_hba.ctrl_regs_memmap_p =
                                ioremap(phba->pci_bar1_map, bar1map_len);
 -      if (!phba->sli4_hba.ctrl_regs_memmap_p) {
 -              dev_printk(KERN_ERR, &pdev->dev,
 +              if (!phba->sli4_hba.ctrl_regs_memmap_p) {
 +                      dev_printk(KERN_ERR, &pdev->dev,
                           "ioremap failed for SLI4 HBA control registers.\n");
 -              goto out_iounmap_conf;
 +                      goto out_iounmap_conf;
 +              }
 +              lpfc_sli4_bar1_register_memmap(phba);
        }
  
 -      /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
 -      phba->sli4_hba.drbl_regs_memmap_p =
 +      if (pci_resource_start(pdev, 4)) {
 +              /*
 +               * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
 +               * virtual address and setup the registers.
 +               */
 +              phba->pci_bar2_map = pci_resource_start(pdev, 4);
 +              bar2map_len = pci_resource_len(pdev, 4);
 +              phba->sli4_hba.drbl_regs_memmap_p =
                                ioremap(phba->pci_bar2_map, bar2map_len);
 -      if (!phba->sli4_hba.drbl_regs_memmap_p) {
 -              dev_printk(KERN_ERR, &pdev->dev,
 +              if (!phba->sli4_hba.drbl_regs_memmap_p) {
 +                      dev_printk(KERN_ERR, &pdev->dev,
                           "ioremap failed for SLI4 HBA doorbell registers.\n");
 -              goto out_iounmap_ctrl;
 +                      goto out_iounmap_ctrl;
 +              }
 +              error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
 +              if (error)
 +                      goto out_iounmap_all;
        }
  
 -      /* Set up BAR0 PCI config space register memory map */
 -      lpfc_sli4_bar0_register_memmap(phba);
 -
 -      /* Set up BAR1 register memory map */
 -      lpfc_sli4_bar1_register_memmap(phba);
 -
 -      /* Set up BAR2 register memory map */
 -      error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
 -      if (error)
 -              goto out_iounmap_all;
 -
        return 0;
  
  out_iounmap_all:
@@@ -8071,7 -7661,7 +8071,7 @@@ lpfc_pci_remove_one_s3(struct pci_dev *
         * the HBA.
         */
  
-       /* HBA interrupt will be diabled after this call */
+       /* HBA interrupt will be disabled after this call */
        lpfc_sli_hba_down(phba);
        /* Stop kthread signal shall trigger work_done one more time */
        kthread_stop(phba->worker_thread);
@@@ -8559,8 -8149,6 +8559,8 @@@ lpfc_pci_probe_one_s4(struct pci_dev *p
                goto out_unset_driver_resource_s4;
        }
  
 +      INIT_LIST_HEAD(&phba->active_rrq_list);
 +
        /* Set up common device driver resources */
        error = lpfc_setup_driver_resource_phase2(phba);
        if (error) {
                                "0451 Configure interrupt mode (%d) "
                                "failed active interrupt test.\n",
                                intr_mode);
 -              /* Unset the preivous SLI-4 HBA setup */
 +              /* Unset the previous SLI-4 HBA setup. */
 +              /*
 +               * TODO:  Is this operation compatible with IF TYPE 2
 +               * devices?  All port state is deleted and cleared.
 +               */
                lpfc_sli4_unset_hba(phba);
                /* Try next level of interrupt mode */
                cfg_mode = --intr_mode;
@@@ -9406,10 -8990,6 +9406,10 @@@ static struct pci_device_id lpfc_id_tab
                PCI_ANY_ID, PCI_ANY_ID, },
        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
                PCI_ANY_ID, PCI_ANY_ID, },
 +      {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
 +              PCI_ANY_ID, PCI_ANY_ID, },
 +      {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
 +              PCI_ANY_ID, PCI_ANY_ID, },
        { 0 }
  };
  
@@@ -513,344 -513,8 +513,344 @@@ __lpfc_get_active_sglq(struct lpfc_hba 
  }
  
  /**
 + * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
 + * @phba: Pointer to HBA context object.
 + * @ndlp: nodelist pointer for this target.
 + * @xritag: xri used in this exchange.
 + * @rxid: Remote Exchange ID.
 + * @send_rrq: Flag used to determine if we should send rrq els cmd.
 + *
 + * This function is called with hbalock held.
 + * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
 + * rrq struct and adds it to the active_rrq_list.
 + *
 + * returns  0 for rrq slot for this xri
 + *         < 0  Were not able to get rrq mem or invalid parameter.
 + **/
 +static int
 +__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 +              uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
 +{
 +      uint16_t adj_xri;
 +      struct lpfc_node_rrq *rrq;
 +      int empty;
 +
 +      /*
 +       * set the active bit even if there is no mem available.
 +       */
 +      adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
 +      if (!ndlp)
 +              return -EINVAL;
 +      if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
 +              return -EINVAL;
 +      rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
 +      if (rrq) {
 +              rrq->send_rrq = send_rrq;
 +              rrq->xritag = xritag;
 +              rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
 +              rrq->ndlp = ndlp;
 +              rrq->nlp_DID = ndlp->nlp_DID;
 +              rrq->vport = ndlp->vport;
 +              rrq->rxid = rxid;
 +              empty = list_empty(&phba->active_rrq_list);
 +              if (phba->cfg_enable_rrq && send_rrq)
 +                      /*
 +                       * We need the xri before we can add this to the
 +                       * phba active rrq list.
 +                       */
 +                      rrq->send_rrq = send_rrq;
 +              else
 +                      rrq->send_rrq = 0;
 +              list_add_tail(&rrq->list, &phba->active_rrq_list);
 +              if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
 +                      phba->hba_flag |= HBA_RRQ_ACTIVE;
 +                      if (empty)
 +                              lpfc_worker_wake_up(phba);
 +              }
 +              return 0;
 +      }
 +      return -ENOMEM;
 +}
 +
 +/**
 + * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
 + * @phba: Pointer to HBA context object.
 + * @xritag: xri used in this exchange.
 + * @rrq: The RRQ to be cleared.
 + *
 + * This function is called with hbalock held. This function
 + **/
 +static void
 +__lpfc_clr_rrq_active(struct lpfc_hba *phba,
 +                      uint16_t xritag,
 +                      struct lpfc_node_rrq *rrq)
 +{
 +      uint16_t adj_xri;
 +      struct lpfc_nodelist *ndlp;
 +
 +      ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
 +
 +      /* The target DID could have been swapped (cable swap)
 +       * we should use the ndlp from the findnode if it is
 +       * available.
 +       */
 +      if (!ndlp)
 +              ndlp = rrq->ndlp;
 +
 +      adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
 +      if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
 +              rrq->send_rrq = 0;
 +              rrq->xritag = 0;
 +              rrq->rrq_stop_time = 0;
 +      }
 +      mempool_free(rrq, phba->rrq_pool);
 +}
 +
 +/**
 + * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
 + * @phba: Pointer to HBA context object.
 + *
 + * This function is called with hbalock held. This function
 + * Checks if stop_time (ratov from setting rrq active) has
 + * been reached, if it has and the send_rrq flag is set then
 + * it will call lpfc_send_rrq. If the send_rrq flag is not set
 + * then it will just call the routine to clear the rrq and
 + * free the rrq resource.
 + * The timer is set to the next rrq that is going to expire before
 + * leaving the routine.
 + *
 + **/
 +void
 +lpfc_handle_rrq_active(struct lpfc_hba *phba)
 +{
 +      struct lpfc_node_rrq *rrq;
 +      struct lpfc_node_rrq *nextrrq;
 +      unsigned long next_time;
 +      unsigned long iflags;
 +
 +      spin_lock_irqsave(&phba->hbalock, iflags);
 +      phba->hba_flag &= ~HBA_RRQ_ACTIVE;
 +      next_time = jiffies + HZ * (phba->fc_ratov + 1);
 +      list_for_each_entry_safe(rrq, nextrrq,
 +                      &phba->active_rrq_list, list) {
 +              if (time_after(jiffies, rrq->rrq_stop_time)) {
 +                      list_del(&rrq->list);
 +                      if (!rrq->send_rrq)
 +                              /* this call will free the rrq */
 +                              __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
 +                      else {
 +                      /* if we send the rrq then the completion handler
 +                       *  will clear the bit in the xribitmap.
 +                       */
 +                              spin_unlock_irqrestore(&phba->hbalock, iflags);
 +                              if (lpfc_send_rrq(phba, rrq)) {
 +                                      lpfc_clr_rrq_active(phba, rrq->xritag,
 +                                                               rrq);
 +                              }
 +                              spin_lock_irqsave(&phba->hbalock, iflags);
 +                      }
 +              } else if  (time_before(rrq->rrq_stop_time, next_time))
 +                      next_time = rrq->rrq_stop_time;
 +      }
 +      spin_unlock_irqrestore(&phba->hbalock, iflags);
 +      if (!list_empty(&phba->active_rrq_list))
 +              mod_timer(&phba->rrq_tmr, next_time);
 +}
 +
 +/**
 + * lpfc_get_active_rrq - Get the active RRQ for this exchange.
 + * @vport: Pointer to vport context object.
 + * @xri: The xri used in the exchange.
 + * @did: The targets DID for this exchange.
 + *
 + * returns NULL = rrq not found in the phba->active_rrq_list.
 + *         rrq = rrq for this xri and target.
 + **/
 +struct lpfc_node_rrq *
 +lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
 +{
 +      struct lpfc_hba *phba = vport->phba;
 +      struct lpfc_node_rrq *rrq;
 +      struct lpfc_node_rrq *nextrrq;
 +      unsigned long iflags;
 +
 +      if (phba->sli_rev != LPFC_SLI_REV4)
 +              return NULL;
 +      spin_lock_irqsave(&phba->hbalock, iflags);
 +      list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
 +              if (rrq->vport == vport && rrq->xritag == xri &&
 +                              rrq->nlp_DID == did){
 +                      list_del(&rrq->list);
 +                      spin_unlock_irqrestore(&phba->hbalock, iflags);
 +                      return rrq;
 +              }
 +      }
 +      spin_unlock_irqrestore(&phba->hbalock, iflags);
 +      return NULL;
 +}
 +
 +/**
 + * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
 + * @vport: Pointer to vport context object.
 + *
 + * Remove all active RRQs for this vport from the phba->active_rrq_list and
 + * clear the rrq.
 + **/
 +void
 +lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
 +
 +{
 +      struct lpfc_hba *phba = vport->phba;
 +      struct lpfc_node_rrq *rrq;
 +      struct lpfc_node_rrq *nextrrq;
 +      unsigned long iflags;
 +
 +      if (phba->sli_rev != LPFC_SLI_REV4)
 +              return;
 +      spin_lock_irqsave(&phba->hbalock, iflags);
 +      list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
 +              if (rrq->vport == vport) {
 +                      list_del(&rrq->list);
 +                      __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
 +              }
 +      }
 +      spin_unlock_irqrestore(&phba->hbalock, iflags);
 +}
 +
 +/**
 + * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
 + * @phba: Pointer to HBA context object.
 + *
 + * Remove all rrqs from the phba->active_rrq_list and free them by
 + * calling __lpfc_clr_active_rrq
 + *
 + **/
 +void
 +lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
 +{
 +      struct lpfc_node_rrq *rrq;
 +      struct lpfc_node_rrq *nextrrq;
 +      unsigned long next_time;
 +      unsigned long iflags;
 +
 +      if (phba->sli_rev != LPFC_SLI_REV4)
 +              return;
 +      spin_lock_irqsave(&phba->hbalock, iflags);
 +      phba->hba_flag &= ~HBA_RRQ_ACTIVE;
 +      next_time = jiffies + HZ * (phba->fc_ratov * 2);
 +      list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
 +              list_del(&rrq->list);
 +              __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
 +      }
 +      spin_unlock_irqrestore(&phba->hbalock, iflags);
 +      if (!list_empty(&phba->active_rrq_list))
 +              mod_timer(&phba->rrq_tmr, next_time);
 +}
 +
 +
 +/**
 + * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
 + * @phba: Pointer to HBA context object.
 + * @ndlp: Targets nodelist pointer for this exchange.
 + * @xritag the xri in the bitmap to test.
 + *
 + * This function is called with hbalock held. This function
 + * returns 0 = rrq not active for this xri
 + *         1 = rrq is valid for this xri.
 + **/
 +static int
 +__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 +                      uint16_t  xritag)
 +{
 +      uint16_t adj_xri;
 +
 +      adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
 +      if (!ndlp)
 +              return 0;
 +      if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
 +                      return 1;
 +      else
 +              return 0;
 +}
 +
 +/**
 + * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
 + * @phba: Pointer to HBA context object.
 + * @ndlp: nodelist pointer for this target.
 + * @xritag: xri used in this exchange.
 + * @rxid: Remote Exchange ID.
 + * @send_rrq: Flag used to determine if we should send rrq els cmd.
 + *
 + * This function takes the hbalock.
 + * The active bit is always set in the active rrq xri_bitmap even
 + * if there is no slot avaiable for the other rrq information.
 + *
 + * returns 0 rrq actived for this xri
 + *         < 0 No memory or invalid ndlp.
 + **/
 +int
 +lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 +                      uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
 +{
 +      int ret;
 +      unsigned long iflags;
 +
 +      spin_lock_irqsave(&phba->hbalock, iflags);
 +      ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
 +      spin_unlock_irqrestore(&phba->hbalock, iflags);
 +      return ret;
 +}
 +
 +/**
 + * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
 + * @phba: Pointer to HBA context object.
 + * @xritag: xri used in this exchange.
 + * @rrq: The RRQ to be cleared.
 + *
 + * This function is takes the hbalock.
 + **/
 +void
 +lpfc_clr_rrq_active(struct lpfc_hba *phba,
 +                      uint16_t xritag,
 +                      struct lpfc_node_rrq *rrq)
 +{
 +      unsigned long iflags;
 +
 +      spin_lock_irqsave(&phba->hbalock, iflags);
 +      __lpfc_clr_rrq_active(phba, xritag, rrq);
 +      spin_unlock_irqrestore(&phba->hbalock, iflags);
 +      return;
 +}
 +
 +
 +
 +/**
 + * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
 + * @phba: Pointer to HBA context object.
 + * @ndlp: Targets nodelist pointer for this exchange.
 + * @xritag the xri in the bitmap to test.
 + *
 + * This function takes the hbalock.
 + * returns 0 = rrq not active for this xri
 + *         1 = rrq is valid for this xri.
 + **/
 +int
 +lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
 +                      uint16_t  xritag)
 +{
 +      int ret;
 +      unsigned long iflags;
 +
 +      spin_lock_irqsave(&phba->hbalock, iflags);
 +      ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
 +      spin_unlock_irqrestore(&phba->hbalock, iflags);
 +      return ret;
 +}
 +
 +/**
   * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
   * @phba: Pointer to HBA context object.
 + * @piocb: Pointer to the iocbq.
   *
   * This function is called with hbalock held. This function
   * Gets a new driver sglq object from the sglq list. If the
   * allocated sglq object else it returns NULL.
   **/
  static struct lpfc_sglq *
 -__lpfc_sli_get_sglq(struct lpfc_hba *phba)
 +__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
  {
        struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
        struct lpfc_sglq *sglq = NULL;
 +      struct lpfc_sglq *start_sglq = NULL;
        uint16_t adj_xri;
 +      struct lpfc_scsi_buf *lpfc_cmd;
 +      struct lpfc_nodelist *ndlp;
 +      int found = 0;
 +
 +      if (piocbq->iocb_flag &  LPFC_IO_FCP) {
 +              lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
 +              ndlp = lpfc_cmd->rdata->pnode;
 +      } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
 +                      !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
 +              ndlp = piocbq->context_un.ndlp;
 +      else
 +              ndlp = piocbq->context1;
 +
        list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
 -      if (!sglq)
 -              return NULL;
 -      adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
 -      phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
 -      sglq->state = SGL_ALLOCATED;
 +      start_sglq = sglq;
 +      while (!found) {
 +              if (!sglq)
 +                      return NULL;
 +              adj_xri = sglq->sli4_xritag -
 +                              phba->sli4_hba.max_cfg_param.xri_base;
 +              if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
 +                      /* This xri has an rrq outstanding for this DID.
 +                       * put it back in the list and get another xri.
 +                       */
 +                      list_add_tail(&sglq->list, lpfc_sgl_list);
 +                      sglq = NULL;
 +                      list_remove_head(lpfc_sgl_list, sglq,
 +                                              struct lpfc_sglq, list);
 +                      if (sglq == start_sglq) {
 +                              sglq = NULL;
 +                              break;
 +                      } else
 +                              continue;
 +              }
 +              sglq->ndlp = ndlp;
 +              found = 1;
 +              phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
 +              sglq->state = SGL_ALLOCATED;
 +      }
        return sglq;
  }
  
@@@ -968,7 -598,6 +968,7 @@@ __lpfc_sli_release_iocbq_s4(struct lpfc
                                &phba->sli4_hba.abts_sgl_list_lock, iflag);
                } else {
                        sglq->state = SGL_FREED;
 +                      sglq->ndlp = NULL;
                        list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
  
                        /* Check if TXQ queue needs to be serviced */
@@@ -2005,6 -1634,7 +2005,6 @@@ lpfc_sli_chk_mbx_command(uint8_t mbxCom
        case MBX_READ_LNK_STAT:
        case MBX_REG_LOGIN:
        case MBX_UNREG_LOGIN:
 -      case MBX_READ_LA:
        case MBX_CLEAR_LA:
        case MBX_DUMP_MEMORY:
        case MBX_DUMP_CONTEXT:
        case MBX_READ_SPARM64:
        case MBX_READ_RPI64:
        case MBX_REG_LOGIN64:
 -      case MBX_READ_LA64:
 +      case MBX_READ_TOPOLOGY:
        case MBX_WRITE_WWN:
        case MBX_SET_DEBUG:
        case MBX_LOAD_EXP_ROM:
@@@ -2116,6 -1746,11 +2116,6 @@@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba 
                kfree(mp);
        }
  
 -      if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
 -          (phba->sli_rev == LPFC_SLI_REV4) &&
 -          (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
 -              lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
 -
        /*
         * If a REG_LOGIN succeeded  after node is destroyed or node
         * is in re-discovery driver need to cleanup the RPI.
@@@ -3848,6 -3483,12 +3848,6 @@@ lpfc_sli4_brdreset(struct lpfc_hba *phb
        phba->pport->fc_myDID = 0;
        phba->pport->fc_prevDID = 0;
  
 -      /* Turn off parity checking and serr during the physical reset */
 -      pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
 -      pci_write_config_word(phba->pcidev, PCI_COMMAND,
 -                            (cfg_value &
 -                            ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
 -
        spin_lock_irq(&phba->hbalock);
        psli->sli_flag &= ~(LPFC_PROCESS_LA);
        phba->fcf.fcf_flag = 0;
        /* Now physically reset the device */
        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                        "0389 Performing PCI function reset!\n");
 +
 +      /* Turn off parity checking and serr during the physical reset */
 +      pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
 +      pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
 +                            ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
 +
        /* Perform FCoE PCI function reset */
        lpfc_pci_function_reset(phba);
  
 +      /* Restore PCI cmd register */
 +      pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
 +
        return 0;
  }
  
@@@ -4685,10 -4317,6 +4685,10 @@@ lpfc_sli4_hba_setup(struct lpfc_hba *ph
        struct lpfc_vport *vport = phba->pport;
        struct lpfc_dmabuf *mp;
  
 +      /*
 +       * TODO:  Why does this routine execute these task in a different
 +       * order from probe?
 +       */
        /* Perform a PCI function reset to start from clean */
        rc = lpfc_pci_function_reset(phba);
        if (unlikely(rc))
        }
  
        rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
 -      if (unlikely(rc))
 -              goto out_free_vpd;
 -
 +      if (unlikely(rc)) {
 +              kfree(vpd);
 +              goto out_free_mbox;
 +      }
        mqe = &mboxq->u.mqe;
        phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
        if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
 -              phba->hba_flag |= HBA_FCOE_SUPPORT;
 +              phba->hba_flag |= HBA_FCOE_MODE;
 +      else
 +              phba->hba_flag &= ~HBA_FCOE_MODE;
  
        if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
                LPFC_DCBX_CEE_MODE)
                phba->hba_flag &= ~HBA_FIP_SUPPORT;
  
        if (phba->sli_rev != LPFC_SLI_REV4 ||
 -          !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
 +          !(phba->hba_flag & HBA_FCOE_MODE)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
                        "0376 READ_REV Error. SLI Level %d "
                        "FCoE enabled %d\n",
 -                      phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
 +                      phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
                rc = -EIO;
 -              goto out_free_vpd;
 +              kfree(vpd);
 +              goto out_free_mbox;
        }
        /*
         * Evaluate the read rev and vpd data. Populate the driver
                                "Using defaults.\n", rc);
                rc = 0;
        }
 +      kfree(vpd);
  
        /* Save information as VPD data */
        phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
        rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
        if (unlikely(rc)) {
                rc = -EIO;
 -              goto out_free_vpd;
 +              goto out_free_mbox;
        }
  
        /*
        if (rc) {
                phba->link_state = LPFC_HBA_ERROR;
                rc = -ENOMEM;
 -              goto out_free_vpd;
 +              goto out_free_mbox;
        }
  
        mboxq->vport = vport;
                                rc, bf_get(lpfc_mqe_status, mqe));
                phba->link_state = LPFC_HBA_ERROR;
                rc = -EIO;
 -              goto out_free_vpd;
 +              goto out_free_mbox;
        }
  
        if (phba->cfg_soft_wwnn)
                                "0582 Error %d during sgl post operation\n",
                                        rc);
                rc = -ENODEV;
 -              goto out_free_vpd;
 +              goto out_free_mbox;
        }
  
        /* Register SCSI SGL pool to the device */
                /* Some Scsi buffers were moved to the abort scsi list */
                /* A pci function reset will repost them */
                rc = -ENODEV;
 -              goto out_free_vpd;
 +              goto out_free_mbox;
        }
  
        /* Post the rpi header region to the device. */
                                "0393 Error %d during rpi post operation\n",
                                rc);
                rc = -ENODEV;
 -              goto out_free_vpd;
 +              goto out_free_mbox;
        }
  
        /* Set up all the queues to the device */
                }
        }
  
 +      if (!(phba->hba_flag & HBA_FCOE_MODE)) {
 +              /*
 +               * The FC Port needs to register FCFI (index 0)
 +               */
 +              lpfc_reg_fcfi(phba, mboxq);
 +              mboxq->vport = phba->pport;
 +              rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 +              if (rc == MBX_SUCCESS)
 +                      rc = 0;
 +              else
 +                      goto out_unset_queue;
 +      }
        /*
         * The port is ready, set the host's link state to LINK_DOWN
         * in preparation for link interrupts.
         */
        spin_lock_irq(&phba->hbalock);
        phba->link_state = LPFC_LINK_DOWN;
        spin_unlock_irq(&phba->hbalock);
 -      rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
 -      if (unlikely(rc != MBX_NOT_FINISHED)) {
 -              kfree(vpd);
 -              return 0;
 -      } else
 -              rc = -EIO;
 -
 +      rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
 +out_unset_queue:
        /* Unset all the queues set up in this routine when error out */
        if (rc)
                lpfc_sli4_queue_unset(phba);
  out_stop_timers:
        if (rc)
                lpfc_stop_hba_timers(phba);
 -out_free_vpd:
 -      kfree(vpd);
  out_free_mbox:
        mempool_free(mboxq, phba->mbox_mem_pool);
        return rc;
@@@ -6240,8 -5863,6 +6240,8 @@@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba
        IOCB_t *icmd;
        int numBdes = 0;
        int i = 0;
 +      uint32_t offset = 0; /* accumulated offset in the sg request list */
 +      int inbound = 0; /* number of sg reply entries inbound from firmware */
  
        if (!piocbq || !sglq)
                return xritag;
                         */
                        bde.tus.w = le32_to_cpu(bpl->tus.w);
                        sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
 +                      /* The offsets in the sgl need to be accumulated
 +                       * separately for the request and reply lists.
 +                       * The request is always first, the reply follows.
 +                       */
 +                      if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
 +                              /* add up the reply sg entries */
 +                              if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
 +                                      inbound++;
 +                              /* first inbound? reset the offset */
 +                              if (inbound == 1)
 +                                      offset = 0;
 +                              bf_set(lpfc_sli4_sge_offset, sgl, offset);
 +                              offset += bde.tus.f.bdeSize;
 +                      }
                        bpl++;
                        sgl++;
                }
@@@ -6421,6 -6028,11 +6421,6 @@@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phb
                bf_set(els_req64_vf, &wqe->els_req, 0);
                /* And a VFID for word 12 */
                bf_set(els_req64_vfid, &wqe->els_req, 0);
 -              /*
 -               * Set ct field to 3, indicates that the context_tag field
 -               * contains the FCFI and remote N_Port_ID is
 -               * in word 5.
 -               */
                ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
                bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
                       iocbq->iocb.ulpContext);
                bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
        break;
        case CMD_GEN_REQUEST64_CR:
 +              /* For this command calculate the xmit length of the
 +               * request bde.
 +               */
 +              xmit_len = 0;
 +              numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
 +                      sizeof(struct ulp_bde64);
 +              for (i = 0; i < numBdes; i++) {
 +                      if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64)
 +                              break;
 +                      bde.tus.w = le32_to_cpu(bpl[i].tus.w);
 +                      xmit_len += bde.tus.f.bdeSize;
 +              }
                /* word3 iocb=IO_TAG wqe=request_payload_len */
                wqe->gen_req.request_payload_len = xmit_len;
                /* word4 iocb=parameter wqe=relative_offset memcpy */
@@@ -6720,7 -6320,7 +6720,7 @@@ __lpfc_sli_issue_iocb_s4(struct lpfc_hb
                                        return IOCB_BUSY;
                                }
                        } else {
 -                      sglq = __lpfc_sli_get_sglq(phba);
 +                      sglq = __lpfc_sli_get_sglq(phba, piocb);
                                if (!sglq) {
                                        if (!(flag & SLI_IOCB_RET_IOCB)) {
                                                __lpfc_sli_ringtx_put(phba,
@@@ -8433,66 -8033,29 +8433,66 @@@ static in
  lpfc_sli4_eratt_read(struct lpfc_hba *phba)
  {
        uint32_t uerr_sta_hi, uerr_sta_lo;
 +      uint32_t if_type, portsmphr;
 +      struct lpfc_register portstat_reg;
  
 -      /* For now, use the SLI4 device internal unrecoverable error
 +      /*
 +       * For now, use the SLI4 device internal unrecoverable error
         * registers for error attention. This can be changed later.
         */
 -      uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
 -      uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
 -      if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
 -          (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
 +      if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
 +      switch (if_type) {
 +      case LPFC_SLI_INTF_IF_TYPE_0:
 +              uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
 +              uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
 +              if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
 +                  (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
 +                      lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                                      "1423 HBA Unrecoverable error: "
 +                                      "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
 +                                      "ue_mask_lo_reg=0x%x, "
 +                                      "ue_mask_hi_reg=0x%x\n",
 +                                      uerr_sta_lo, uerr_sta_hi,
 +                                      phba->sli4_hba.ue_mask_lo,
 +                                      phba->sli4_hba.ue_mask_hi);
 +                      phba->work_status[0] = uerr_sta_lo;
 +                      phba->work_status[1] = uerr_sta_hi;
 +                      phba->work_ha |= HA_ERATT;
 +                      phba->hba_flag |= HBA_ERATT_HANDLED;
 +                      return 1;
 +              }
 +              break;
 +      case LPFC_SLI_INTF_IF_TYPE_2:
 +              portstat_reg.word0 =
 +                      readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
 +              portsmphr = readl(phba->sli4_hba.PSMPHRregaddr);
 +              if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
 +                      phba->work_status[0] =
 +                              readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
 +                      phba->work_status[1] =
 +                              readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
 +                      lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 +                                      "2885 Port Error Detected: "
 +                                      "port status reg 0x%x, "
 +                                      "port smphr reg 0x%x, "
 +                                      "error 1=0x%x, error 2=0x%x\n",
 +                                      portstat_reg.word0,
 +                                      portsmphr,
 +                                      phba->work_status[0],
 +                                      phba->work_status[1]);
 +                      phba->work_ha |= HA_ERATT;
 +                      phba->hba_flag |= HBA_ERATT_HANDLED;
 +                      return 1;
 +              }
 +              break;
 +      case LPFC_SLI_INTF_IF_TYPE_1:
 +      default:
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 -                              "1423 HBA Unrecoverable error: "
 -                              "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
 -                              "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n",
 -                              uerr_sta_lo, uerr_sta_hi,
 -                              phba->sli4_hba.ue_mask_lo,
 -                              phba->sli4_hba.ue_mask_hi);
 -              phba->work_status[0] = uerr_sta_lo;
 -              phba->work_status[1] = uerr_sta_hi;
 -              /* Set the driver HA work bitmap */
 -              phba->work_ha |= HA_ERATT;
 -              /* Indicate polling handles this ERATT */
 -              phba->hba_flag |= HBA_ERATT_HANDLED;
 +                              "2886 HBA Error Attention on unsupported "
 +                              "if type %d.", if_type);
                return 1;
        }
 +
        return 0;
  }
  
@@@ -8547,7 -8110,7 +8547,7 @@@ lpfc_sli_check_eratt(struct lpfc_hba *p
                ha_copy = lpfc_sli_eratt_read(phba);
                break;
        case LPFC_SLI_REV4:
 -              /* Read devcie Uncoverable Error (UERR) registers */
 +              /* Read device Uncoverable Error (UERR) registers */
                ha_copy = lpfc_sli4_eratt_read(phba);
                break;
        default:
@@@ -10172,7 -9735,7 +10172,7 @@@ lpfc_sli4_intr_handler(int irq, void *d
   * lpfc_sli4_queue_free - free a queue structure and associated memory
   * @queue: The queue structure to free.
   *
-  * This function frees a queue structure and the DMAable memeory used for
+  * This function frees a queue structure and the DMAable memory used for
   * the host resident queue. This function must be called after destroying the
   * queue on the HBA.
   **/
@@@ -10592,20 -10155,16 +10592,20 @@@ lpfc_mq_create(struct lpfc_hba *phba, s
                         length, LPFC_SLI4_MBX_EMBED);
  
        mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
 -      bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request,
 -                  mq->page_count);
 -      bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request,
 -             1);
 -      bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste,
 +      bf_set(lpfc_mbx_mq_create_ext_num_pages,
 +             &mq_create_ext->u.request, mq->page_count);
 +      bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
 +             &mq_create_ext->u.request, 1);
 +      bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
               &mq_create_ext->u.request, 1);
        bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
               &mq_create_ext->u.request, 1);
 -      bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
 -             cq->queue_id);
 +      bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
 +             &mq_create_ext->u.request, 1);
 +      bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
 +             &mq_create_ext->u.request, 1);
 +      bf_set(lpfc_mq_context_cq_id,
 +             &mq_create_ext->u.request.context, cq->queue_id);
        bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
        switch (mq->entry_count) {
        default:
@@@ -11578,8 -11137,7 +11578,8 @@@ lpfc_sli4_post_scsi_sgl_block(struct lp
  static int
  lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
  {
 -      char *rctl_names[] = FC_RCTL_NAMES_INIT;
 +      /*  make rctl_names static to save stack space */
 +      static char *rctl_names[] = FC_RCTL_NAMES_INIT;
        char *type_names[] = FC_TYPE_NAMES_INIT;
        struct fc_vft_header *fc_vft_hdr;
  
@@@ -11980,10 -11538,6 +11980,10 @@@ lpfc_sli4_seq_abort_acc(struct lpfc_hb
                                "SID:x%x\n", oxid, sid);
                return;
        }
 +      if (rxid >= phba->sli4_hba.max_cfg_param.xri_base
 +              && rxid <= (phba->sli4_hba.max_cfg_param.max_xri
 +              + phba->sli4_hba.max_cfg_param.xri_base))
 +              lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
  
        /* Allocate buffer for acc iocb */
        ctiocb = lpfc_sli_get_iocbq(phba);
        icmd->ulpLe = 1;
        icmd->ulpClass = CLASS3;
        icmd->ulpContext = ndlp->nlp_rpi;
 +      ctiocb->context1 = ndlp;
  
        ctiocb->iocb_cmpl = NULL;
        ctiocb->vport = phba->pport;
@@@ -12576,37 -12129,42 +12576,37 @@@ lpfc_sli4_resume_rpi(struct lpfc_nodeli
  
  /**
   * lpfc_sli4_init_vpi - Initialize a vpi with the port
 - * @phba: pointer to lpfc hba data structure.
 - * @vpi: vpi value to activate with the port.
 + * @vport: Pointer to the vport for which the vpi is being initialized
   *
 - * This routine is invoked to activate a vpi with the
 - * port when the host intends to use vports with a
 - * nonzero vpi.
 + * This routine is invoked to activate a vpi with the port.
   *
   * Returns:
   *    0 success
   *    -Evalue otherwise
   **/
  int
 -lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
 +lpfc_sli4_init_vpi(struct lpfc_vport *vport)
  {
        LPFC_MBOXQ_t *mboxq;
        int rc = 0;
        int retval = MBX_SUCCESS;
        uint32_t mbox_tmo;
 -
 -      if (vpi == 0)
 -              return -EINVAL;
 +      struct lpfc_hba *phba = vport->phba;
        mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mboxq)
                return -ENOMEM;
 -      lpfc_init_vpi(phba, mboxq, vpi);
 +      lpfc_init_vpi(phba, mboxq, vport->vpi);
        mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
        rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
        if (rc != MBX_SUCCESS) {
 -              lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 +              lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
                                "2022 INIT VPI Mailbox failed "
                                "status %d, mbxStatus x%x\n", rc,
                                bf_get(lpfc_mqe_status, &mboxq->u.mqe));
                retval = -EIO;
        }
        if (rc != MBX_TIMEOUT)
 -              mempool_free(mboxq, phba->mbox_mem_pool);
 +              mempool_free(mboxq, vport->phba->mbox_mem_pool);
  
        return retval;
  }
@@@ -13296,7 -12854,6 +13296,7 @@@ lpfc_cleanup_pending_mbox(struct lpfc_v
        struct lpfc_nodelist *act_mbx_ndlp = NULL;
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
        LIST_HEAD(mbox_cmd_list);
 +      uint8_t restart_loop;
  
        /* Clean up internally queued mailbox commands with the vport */
        spin_lock_irq(&phba->hbalock);
                        mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
                }
        }
 +      /* Cleanup any mailbox completions which are not yet processed */
 +      do {
 +              restart_loop = 0;
 +              list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
 +                      /*
 +                       * If this mailox is already processed or it is
 +                       * for another vport ignore it.
 +                       */
 +                      if ((mb->vport != vport) ||
 +                              (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
 +                              continue;
 +
 +                      if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
 +                              (mb->u.mb.mbxCommand != MBX_REG_VPI))
 +                              continue;
 +
 +                      mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
 +                      if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
 +                              ndlp = (struct lpfc_nodelist *)mb->context2;
 +                              /* Unregister the RPI when mailbox complete */
 +                              mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
 +                              restart_loop = 1;
 +                              spin_unlock_irq(&phba->hbalock);
 +                              spin_lock(shost->host_lock);
 +                              ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
 +                              spin_unlock(shost->host_lock);
 +                              spin_lock_irq(&phba->hbalock);
 +                              break;
 +                      }
 +              }
 +      } while (restart_loop);
 +
        spin_unlock_irq(&phba->hbalock);
  
        /* Release the cleaned-up mailbox commands */
        while (!list_empty(&mbox_cmd_list)) {
                list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
                if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
 -                      if (phba->sli_rev == LPFC_SLI_REV4)
 -                              __lpfc_sli4_free_rpi(phba,
 -                                              mb->u.mb.un.varRegLogin.rpi);
                        mp = (struct lpfc_dmabuf *) (mb->context1);
                        if (mp) {
                                __lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@@ -13420,13 -12948,12 +13420,13 @@@ lpfc_drain_txq(struct lpfc_hba *phba
        while (pring->txq_cnt) {
                spin_lock_irqsave(&phba->hbalock, iflags);
  
 -              sglq = __lpfc_sli_get_sglq(phba);
 +              piocbq = lpfc_sli_ringtx_get(phba, pring);
 +              sglq = __lpfc_sli_get_sglq(phba, piocbq);
                if (!sglq) {
 +                      __lpfc_sli_ringtx_put(phba, pring, piocbq);
                        spin_unlock_irqrestore(&phba->hbalock, iflags);
                        break;
                } else {
 -                      piocbq = lpfc_sli_ringtx_get(phba, pring);
                        if (!piocbq) {
                                /* The txq_cnt out of sync. This should
                                 * never happen
@@@ -864,13 -864,15 +864,15 @@@ int scsi_sysfs_add_sdev(struct scsi_dev
  
        error = device_add(&sdev->sdev_gendev);
        if (error) {
-               printk(KERN_INFO "error 1\n");
+               sdev_printk(KERN_INFO, sdev,
+                               "failed to add device: %d\n", error);
                return error;
        }
        device_enable_async_suspend(&sdev->sdev_dev);
        error = device_add(&sdev->sdev_dev);
        if (error) {
-               printk(KERN_INFO "error 2\n");
+               sdev_printk(KERN_INFO, sdev,
+                               "failed to add class device: %d\n", error);
                device_del(&sdev->sdev_gendev);
                return error;
        }
@@@ -993,14 -995,16 +995,14 @@@ static int __remove_child (struct devic
   */
  void scsi_remove_target(struct device *dev)
  {
 -      struct device *rdev;
 -
        if (scsi_is_target_device(dev)) {
                __scsi_remove_target(to_scsi_target(dev));
                return;
        }
  
 -      rdev = get_device(dev);
 +      get_device(dev);
        device_for_each_child(dev, NULL, __remove_child);
 -      put_device(rdev);
 +      put_device(dev);
  }
  EXPORT_SYMBOL(scsi_remove_target);
  
@@@ -27,6 -27,7 +27,6 @@@
  #include <asm/uaccess.h>
  #include <linux/ctype.h>
  #include <linux/reboot.h>
 -#include <linux/gpio.h>
  #include <asm/tsc.h>
  #include <asm/olpc.h>
  
@@@ -48,7 -49,7 +48,7 @@@ struct dcon_platform_data 
        int (*init)(void);
        void (*bus_stabilize_wiggle)(void);
        void (*set_dconload)(int);
 -      int (*read_status)(void);
 +      u8 (*read_status)(void);
  };
  
  static struct dcon_platform_data *pdata;
@@@ -614,7 -615,7 +614,7 @@@ static struct device_attribute dcon_dev
        __ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
  };
  
- static struct backlight_ops dcon_bl_ops = {
+ static const struct backlight_ops dcon_bl_ops = {
        .get_brightness = dconbl_get,
        .update_status = dconbl_set
  };
diff --combined drivers/telephony/ixj.c
@@@ -284,11 -284,12 +284,11 @@@ static int samplerate = 100
  
  module_param(ixjdebug, int, 0);
  
 -static struct pci_device_id ixj_pci_tbl[] __devinitdata = {
 +static DEFINE_PCI_DEVICE_TABLE(ixj_pci_tbl) = {
        { PCI_VENDOR_ID_QUICKNET, PCI_DEVICE_ID_QUICKNET_XJ,
          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
        { }
  };
 -
  MODULE_DEVICE_TABLE(pci, ixj_pci_tbl);
  
  /************************************************************************
@@@ -6580,7 -6581,8 +6580,8 @@@ static long do_ixj_ioctl(struct file *f
        case IXJCTL_SET_FILTER:
                if (copy_from_user(&jf, argp, sizeof(jf))) 
                        retval = -EFAULT;
-               retval = ixj_init_filter(j, &jf);
+               else
+                       retval = ixj_init_filter(j, &jf);
                break;
        case IXJCTL_SET_FILTER_RAW:
                if (copy_from_user(&jfr, argp, sizeof(jfr))) 
@@@ -1191,17 -1191,13 +1191,17 @@@ static irqreturn_t imx_udc_ctrl_irq(in
        return IRQ_HANDLED;
  }
  
 +#ifndef MX1_INT_USBD0
 +#define MX1_INT_USBD0 MX1_USBD_INT0
 +#endif
 +
  static irqreturn_t imx_udc_bulk_irq(int irq, void *dev)
  {
        struct imx_udc_struct *imx_usb = dev;
 -      struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[irq - USBD_INT0];
 +      struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[irq - MX1_INT_USBD0];
        int intr = __raw_readl(imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
  
 -      dump_ep_intr(__func__, irq - USBD_INT0, intr, imx_usb->dev);
 +      dump_ep_intr(__func__, irq - MX1_INT_USBD0, intr, imx_usb->dev);
  
        if (!imx_usb->driver) {
                __raw_writel(intr, imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
@@@ -1320,7 -1316,7 +1320,7 @@@ static struct imx_udc_struct controlle
  };
  
  /*******************************************************************************
-  * USB gadged driver functions
+  * USB gadget driver functions
   *******************************************************************************
   */
  int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
@@@ -2225,7 -2225,6 +2225,7 @@@ static void handle_setup_packet(struct 
        u16     wValue = le16_to_cpu(setup->wValue);
        u16     wIndex = le16_to_cpu(setup->wIndex);
        u16     wLength = le16_to_cpu(setup->wLength);
 +      u32     portsc1;
  
        dev_vdbg(&dev->pdev->dev, "---> %s()\n", __func__);
  
                                        dev->dev_status &= ~(1 << wValue);
                                }
                                break;
 +                      case USB_DEVICE_TEST_MODE:
 +                              dev_dbg(&dev->pdev->dev, "SETUP: TEST MODE\n");
 +                              if ((wIndex & 0xff) ||
 +                                      (dev->gadget.speed != USB_SPEED_HIGH))
 +                                      ep0_stall(dev);
 +
 +                              switch (wIndex >> 8) {
 +                              case TEST_J:
 +                              case TEST_K:
 +                              case TEST_SE0_NAK:
 +                              case TEST_PACKET:
 +                              case TEST_FORCE_EN:
 +                                      if (prime_status_phase(dev, EP_DIR_IN))
 +                                              ep0_stall(dev);
 +                                      portsc1 = readl(&dev->op_regs->portsc1);
 +                                      portsc1 |= (wIndex & 0xf00) << 8;
 +                                      writel(portsc1, &dev->op_regs->portsc1);
 +                                      goto end;
 +                              default:
 +                                      rc = -EOPNOTSUPP;
 +                              }
 +                              break;
                        default:
                                rc = -EOPNOTSUPP;
                                break;
@@@ -3086,7 -3063,7 +3086,7 @@@ static void langwell_udc_remove(struct 
  
        kfree(dev->ep);
  
-       /* diable IRQ handler */
+       /* disable IRQ handler */
        if (dev->got_irq)
                free_irq(pdev->irq, dev);
  
@@@ -3406,7 -3383,7 +3406,7 @@@ static int langwell_udc_suspend(struct 
        /* disable interrupt and set controller to stop state */
        langwell_udc_stop(dev);
  
-       /* diable IRQ handler */
+       /* disable IRQ handler */
        if (dev->got_irq)
                free_irq(pdev->irq, dev);
        dev->got_irq = 0;
@@@ -1136,16 -1136,13 +1136,16 @@@ struct usb_request *musb_alloc_request(
        struct musb_request     *request = NULL;
  
        request = kzalloc(sizeof *request, gfp_flags);
 -      if (request) {
 -              INIT_LIST_HEAD(&request->request.list);
 -              request->request.dma = DMA_ADDR_INVALID;
 -              request->epnum = musb_ep->current_epnum;
 -              request->ep = musb_ep;
 +      if (!request) {
 +              DBG(4, "not enough memory\n");
 +              return NULL;
        }
  
 +      INIT_LIST_HEAD(&request->request.list);
 +      request->request.dma = DMA_ADDR_INVALID;
 +      request->epnum = musb_ep->current_epnum;
 +      request->ep = musb_ep;
 +
        return &request->request;
  }
  
@@@ -1684,7 -1681,7 +1684,7 @@@ static inline void __init musb_g_init_e
        struct musb_hw_ep       *hw_ep;
        unsigned                count = 0;
  
-       /* intialize endpoint list just once */
+       /* initialize endpoint list just once */
        INIT_LIST_HEAD(&(musb->g.ep_list));
  
        for (epnum = 0, hw_ep = musb->endpoints;
@@@ -1765,7 -1762,7 +1765,7 @@@ void musb_gadget_cleanup(struct musb *m
   *
   * -EINVAL something went wrong (not driver)
   * -EBUSY another gadget is already using the controller
-  * -ENOMEM no memeory to perform the operation
+  * -ENOMEM no memory to perform the operation
   *
   * @param driver the gadget driver
   * @param bind the driver's bind function
@@@ -2221,7 -2221,7 +2221,7 @@@ static int aty_bl_get_brightness(struc
        return bd->props.brightness;
  }
  
- static struct backlight_ops aty_bl_data = {
+ static const struct backlight_ops aty_bl_data = {
        .get_brightness = aty_bl_get_brightness,
        .update_status  = aty_bl_update_status,
  };
@@@ -2969,8 -2969,10 +2969,8 @@@ static int __devinit atyfb_setup_sparc(
  {
        struct atyfb_par *par = info->par;
        struct device_node *dp;
 -      char prop[128];
 -      phandle node;
 -      int len, i, j, ret;
        u32 mem, chip_id;
 +      int i, j, ret;
  
        /*
         * Map memory-mapped registers.
                aty_st_le32(MEM_CNTL, mem, par);
        }
  
 -      /*
 -       * If this is the console device, we will set default video
 -       * settings to what the PROM left us with.
 -       */
 -      node = prom_getchild(prom_root_node);
 -      node = prom_searchsiblings(node, "aliases");
 -      if (node) {
 -              len = prom_getproperty(node, "screen", prop, sizeof(prop));
 -              if (len > 0) {
 -                      prop[len] = '\0';
 -                      node = prom_finddevice(prop);
 -              } else
 -                      node = 0;
 -      }
 -
        dp = pci_device_to_OF_node(pdev);
 -      if (node == dp->phandle) {
 +      if (dp == of_console_device) {
                struct fb_var_screeninfo *var = &default_var;
                unsigned int N, P, Q, M, T, R;
                u32 v_total, h_total;
                u8 pll_regs[16];
                u8 clock_cntl;
  
 -              crtc.vxres = prom_getintdefault(node, "width", 1024);
 -              crtc.vyres = prom_getintdefault(node, "height", 768);
 -              var->bits_per_pixel = prom_getintdefault(node, "depth", 8);
 +              crtc.vxres = of_getintprop_default(dp, "width", 1024);
 +              crtc.vyres = of_getintprop_default(dp, "height", 768);
 +              var->bits_per_pixel = of_getintprop_default(dp, "depth", 8);
                var->xoffset = var->yoffset = 0;
                crtc.h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
                crtc.h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
diff --combined fs/anon_inodes.c
@@@ -64,9 -64,9 +64,9 @@@ static const struct address_space_opera
  };
  
  /**
-  * anon_inode_getfd - creates a new file instance by hooking it up to an
-  *                    anonymous inode, and a dentry that describe the "class"
-  *                    of the file
+  * anon_inode_getfile - creates a new file instance by hooking it up to an
+  *                      anonymous inode, and a dentry that describe the "class"
+  *                      of the file
   *
   * @name:    [in]    name of the "class" of the new file
   * @fops:    [in]    file operations for the new file
@@@ -102,7 -102,7 +102,7 @@@ struct file *anon_inode_getfile(const c
        this.name = name;
        this.len = strlen(name);
        this.hash = 0;
 -      path.dentry = d_alloc(anon_inode_mnt->mnt_sb->s_root, &this);
 +      path.dentry = d_alloc_pseudo(anon_inode_mnt->mnt_sb, &this);
        if (!path.dentry)
                goto err_module;
  
         */
        ihold(anon_inode_inode);
  
 -      path.dentry->d_op = &anon_inodefs_dentry_operations;
 +      d_set_d_op(path.dentry, &anon_inodefs_dentry_operations);
        d_instantiate(path.dentry, anon_inode_inode);
  
        error = -ENFILE;
@@@ -232,7 -232,7 +232,7 @@@ static int __init anon_inode_init(void
        return 0;
  
  err_mntput:
 -      mntput(anon_inode_mnt);
 +      mntput_long(anon_inode_mnt);
  err_unregister_filesystem:
        unregister_filesystem(&anon_inode_fs_type);
  err_exit:
diff --combined fs/coda/inode.c
@@@ -45,7 -45,7 +45,7 @@@ static struct kmem_cache * coda_inode_c
  static struct inode *coda_alloc_inode(struct super_block *sb)
  {
        struct coda_inode_info *ei;
-       ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
+       ei = kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
        if (!ei)
                return NULL;
        memset(&ei->c_fid, 0, sizeof(struct CodaFid));
        return &ei->vfs_inode;
  }
  
 -static void coda_destroy_inode(struct inode *inode)
 +static void coda_i_callback(struct rcu_head *head)
  {
 +      struct inode *inode = container_of(head, struct inode, i_rcu);
 +      INIT_LIST_HEAD(&inode->i_dentry);
        kmem_cache_free(coda_inode_cachep, ITOC(inode));
  }
  
 +static void coda_destroy_inode(struct inode *inode)
 +{
 +      call_rcu(&inode->i_rcu, coda_i_callback);
 +}
 +
  static void init_once(void *foo)
  {
        struct coda_inode_info *ei = (struct coda_inode_info *) foo;
diff --combined fs/ext4/ext4.h
@@@ -62,8 -62,8 +62,8 @@@
  #define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...)                       \
        ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a)
  
 -#define EXT4_ERROR_FILE(file, fmt, a...)      \
 -      ext4_error_file(__func__, __LINE__, (file), (fmt), ## a)
 +#define EXT4_ERROR_FILE(file, block, fmt, a...)                               \
 +      ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a)
  
  /* data type for block offset of block group */
  typedef int ext4_grpblk_t;
@@@ -561,7 -561,23 +561,7 @@@ struct ext4_new_group_data 
  #define EXT4_IOC32_SETVERSION_OLD     FS_IOC32_SETVERSION
  #endif
  
- /* Max physical block we can addres w/o extents */
 -
 -/*
 - *  Mount options
 - */
 -struct ext4_mount_options {
 -      unsigned long s_mount_opt;
 -      uid_t s_resuid;
 -      gid_t s_resgid;
 -      unsigned long s_commit_interval;
 -      u32 s_min_batch_time, s_max_batch_time;
 -#ifdef CONFIG_QUOTA
 -      int s_jquota_fmt;
 -      char *s_qf_names[MAXQUOTAS];
 -#endif
 -};
 -
+ /* Max physical block we can address w/o extents */
  #define EXT4_MAX_BLOCK_FILE_PHYS      0xFFFFFFFF
  
  /*
@@@ -693,8 -709,6 +693,8 @@@ do {                                                                              
        if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra))     \
                ext4_decode_extra_time(&(inode)->xtime,                        \
                                       raw_inode->xtime ## _extra);            \
 +      else                                                                   \
 +              (inode)->xtime.tv_nsec = 0;                                    \
  } while (0)
  
  #define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode)                              \
@@@ -705,8 -719,6 +705,8 @@@ do {                                                                              
        if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra))            \
                ext4_decode_extra_time(&(einode)->xtime,                       \
                                       raw_inode->xtime ## _extra);            \
 +      else                                                                   \
 +              (einode)->xtime.tv_nsec = 0;                                   \
  } while (0)
  
  #define i_disk_version osd1.linux1.l_i_version
  
  /*
   * storage for cached extent
 + * If ec_len == 0, then the cache is invalid.
 + * If ec_start == 0, then the cache represents a gap (null mapping)
   */
  struct ext4_ext_cache {
        ext4_fsblk_t    ec_start;
        ext4_lblk_t     ec_block;
        __u32           ec_len; /* must be 32bit to return holes */
 -      __u32           ec_type;
  };
  
  /*
@@@ -763,12 -774,10 +763,12 @@@ struct ext4_inode_info 
         * near to their parent directory's inode.
         */
        ext4_group_t    i_block_group;
 +      ext4_lblk_t     i_dir_start_lookup;
 +#if (BITS_PER_LONG < 64)
        unsigned long   i_state_flags;          /* Dynamic state flags */
 +#endif
        unsigned long   i_flags;
  
 -      ext4_lblk_t             i_dir_start_lookup;
  #ifdef CONFIG_EXT4_FS_XATTR
        /*
         * Extended attributes can be read independently of the main file
         */
        struct rw_semaphore i_data_sem;
        struct inode vfs_inode;
 -      struct jbd2_inode jinode;
 +      struct jbd2_inode *jinode;
  
        struct ext4_ext_cache i_cached_extent;
        /*
        unsigned int i_reserved_data_blocks;
        unsigned int i_reserved_meta_blocks;
        unsigned int i_allocated_meta_blocks;
 -      unsigned short i_delalloc_reserved_flag;
 -      sector_t i_da_metadata_calc_last_lblock;
 +      ext4_lblk_t i_da_metadata_calc_last_lblock;
        int i_da_metadata_calc_len;
  
        /* on-disk additional length */
        __u16 i_extra_isize;
  
 -      spinlock_t i_block_reservation_lock;
  #ifdef CONFIG_QUOTA
        /* quota space reservation, managed internally by quota code */
        qsize_t i_reserved_quota;
        /* completed IOs that might need unwritten extents handling */
        struct list_head i_completed_io_list;
        spinlock_t i_completed_io_lock;
 +      atomic_t i_ioend_count; /* Number of outstanding io_end structs */
        /* current io_end structure for async DIO write*/
        ext4_io_end_t *cur_aio_dio;
 -      atomic_t i_ioend_count; /* Number of outstanding io_end structs */
 +
 +      spinlock_t i_block_reservation_lock;
  
        /*
         * Transactions that contain inode's metadata needed to complete
  #define EXT4_MOUNT_DISCARD            0x40000000 /* Issue DISCARD requests */
  #define EXT4_MOUNT_INIT_INODE_TABLE   0x80000000 /* Initialize uninitialized itables */
  
 -#define clear_opt(o, opt)             o &= ~EXT4_MOUNT_##opt
 -#define set_opt(o, opt)                       o |= EXT4_MOUNT_##opt
 +#define clear_opt(sb, opt)            EXT4_SB(sb)->s_mount_opt &= \
 +                                              ~EXT4_MOUNT_##opt
 +#define set_opt(sb, opt)              EXT4_SB(sb)->s_mount_opt |= \
 +                                              EXT4_MOUNT_##opt
  #define test_opt(sb, opt)             (EXT4_SB(sb)->s_mount_opt & \
                                         EXT4_MOUNT_##opt)
  
 +#define clear_opt2(sb, opt)           EXT4_SB(sb)->s_mount_opt2 &= \
 +                                              ~EXT4_MOUNT2_##opt
 +#define set_opt2(sb, opt)             EXT4_SB(sb)->s_mount_opt2 |= \
 +                                              EXT4_MOUNT2_##opt
 +#define test_opt2(sb, opt)            (EXT4_SB(sb)->s_mount_opt2 & \
 +                                       EXT4_MOUNT2_##opt)
 +
  #define ext4_set_bit                  ext2_set_bit
  #define ext4_set_bit_atomic           ext2_set_bit_atomic
  #define ext4_clear_bit                        ext2_clear_bit
@@@ -1087,7 -1087,6 +1087,7 @@@ struct ext4_sb_info 
        struct ext4_super_block *s_es;  /* Pointer to the super block in the buffer */
        struct buffer_head **s_group_desc;
        unsigned int s_mount_opt;
 +      unsigned int s_mount_opt2;
        unsigned int s_mount_flags;
        ext4_fsblk_t s_sb_block;
        uid_t s_resuid;
@@@ -1238,39 -1237,24 +1238,39 @@@ enum 
        EXT4_STATE_EXT_MIGRATE,         /* Inode is migrating */
        EXT4_STATE_DIO_UNWRITTEN,       /* need convert on dio done*/
        EXT4_STATE_NEWENTRY,            /* File just added to dir */
 +      EXT4_STATE_DELALLOC_RESERVED,   /* blks already reserved for delalloc */
  };
  
 -#define EXT4_INODE_BIT_FNS(name, field)                                       \
 +#define EXT4_INODE_BIT_FNS(name, field, offset)                               \
  static inline int ext4_test_inode_##name(struct inode *inode, int bit)        \
  {                                                                     \
 -      return test_bit(bit, &EXT4_I(inode)->i_##field);                \
 +      return test_bit(bit + (offset), &EXT4_I(inode)->i_##field);     \
  }                                                                     \
  static inline void ext4_set_inode_##name(struct inode *inode, int bit)        \
  {                                                                     \
 -      set_bit(bit, &EXT4_I(inode)->i_##field);                        \
 +      set_bit(bit + (offset), &EXT4_I(inode)->i_##field);             \
  }                                                                     \
  static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
  {                                                                     \
 -      clear_bit(bit, &EXT4_I(inode)->i_##field);                      \
 +      clear_bit(bit + (offset), &EXT4_I(inode)->i_##field);           \
  }
  
 -EXT4_INODE_BIT_FNS(flag, flags)
 -EXT4_INODE_BIT_FNS(state, state_flags)
 +EXT4_INODE_BIT_FNS(flag, flags, 0)
 +#if (BITS_PER_LONG < 64)
 +EXT4_INODE_BIT_FNS(state, state_flags, 0)
 +
 +static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 +{
 +      (ei)->i_state_flags = 0;
 +}
 +#else
 +EXT4_INODE_BIT_FNS(state, flags, 32)
 +
 +static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 +{
 +      /* We depend on the fact that callers will set i_flags */
 +}
 +#endif
  #else
  /* Assume that user mode programs are passing in an ext4fs superblock, not
   * a kernel struct super_block.  This will allow us to call the feature-test
@@@ -1658,12 -1642,10 +1658,12 @@@ extern unsigned ext4_init_block_bitmap(
  
  /* dir.c */
  extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
 +                                struct file *,
                                  struct ext4_dir_entry_2 *,
                                  struct buffer_head *, unsigned int);
 -#define ext4_check_dir_entry(dir, de, bh, offset) \
 -      __ext4_check_dir_entry(__func__, __LINE__, (dir), (de), (bh), (offset))
 +#define ext4_check_dir_entry(dir, filp, de, bh, offset)                       \
 +      unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
 +                                      (de), (bh), (offset)))
  extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
                                    __u32 minor_hash,
                                    struct ext4_dir_entry_2 *dirent);
@@@ -1671,7 -1653,6 +1671,7 @@@ extern void ext4_htree_free_dir_info(st
  
  /* fsync.c */
  extern int ext4_sync_file(struct file *, int);
 +extern int ext4_flush_completed_IO(struct inode *);
  
  /* hash.c */
  extern int ext4fs_dirhash(const char *name, int len, struct
@@@ -1771,8 -1752,8 +1771,8 @@@ extern void ext4_error_inode(struct ino
                             ext4_fsblk_t, const char *, ...)
        __attribute__ ((format (printf, 5, 6)));
  extern void ext4_error_file(struct file *, const char *, unsigned int,
 -                          const char *, ...)
 -      __attribute__ ((format (printf, 4, 5)));
 +                          ext4_fsblk_t, const char *, ...)
 +      __attribute__ ((format (printf, 5, 6)));
  extern void __ext4_std_error(struct super_block *, const char *,
                             unsigned int, int);
  extern void __ext4_abort(struct super_block *, const char *, unsigned int,
diff --combined fs/ext4/extents.c
@@@ -117,33 -117,11 +117,33 @@@ static ext4_fsblk_t ext4_ext_find_goal(
                struct ext4_extent *ex;
                depth = path->p_depth;
  
 -              /* try to predict block placement */
 +              /*
 +               * Try to predict block placement assuming that we are
 +               * filling in a file which will eventually be
 +               * non-sparse --- i.e., in the case of libbfd writing
 +               * an ELF object sections out-of-order but in a way
 +               * the eventually results in a contiguous object or
 +               * executable file, or some database extending a table
 +               * space file.  However, this is actually somewhat
 +               * non-ideal if we are writing a sparse file such as
 +               * qemu or KVM writing a raw image file that is going
 +               * to stay fairly sparse, since it will end up
 +               * fragmenting the file system's free space.  Maybe we
 +               * should have some hueristics or some way to allow
 +               * userspace to pass a hint to file system,
 +               * especiially if the latter case turns out to be
 +               * common.
 +               */
                ex = path[depth].p_ext;
 -              if (ex)
 -                      return (ext4_ext_pblock(ex) +
 -                              (block - le32_to_cpu(ex->ee_block)));
 +              if (ex) {
 +                      ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
 +                      ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
 +
 +                      if (block > ext_block)
 +                              return ext_pblk + (block - ext_block);
 +                      else
 +                              return ext_pblk - (ext_block - block);
 +              }
  
                /* it looks like index is empty;
                 * try to find starting block from index itself */
@@@ -266,7 -244,7 +266,7 @@@ static inline int ext4_ext_space_root_i
   * to allocate @blocks
   * Worse case is one block per extent
   */
 -int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
 +int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
  {
        struct ext4_inode_info *ei = EXT4_I(inode);
        int idxs, num = 0;
@@@ -1894,10 -1872,12 +1894,10 @@@ static int ext4_ext_walk_space(struct i
                        cbex.ec_block = start;
                        cbex.ec_len = end - start;
                        cbex.ec_start = 0;
 -                      cbex.ec_type = EXT4_EXT_CACHE_GAP;
                } else {
                        cbex.ec_block = le32_to_cpu(ex->ee_block);
                        cbex.ec_len = ext4_ext_get_actual_len(ex);
                        cbex.ec_start = ext4_ext_pblock(ex);
 -                      cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
                }
  
                if (unlikely(cbex.ec_len == 0)) {
  
  static void
  ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
 -                      __u32 len, ext4_fsblk_t start, int type)
 +                      __u32 len, ext4_fsblk_t start)
  {
        struct ext4_ext_cache *cex;
        BUG_ON(len == 0);
        spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
        cex = &EXT4_I(inode)->i_cached_extent;
 -      cex->ec_type = type;
        cex->ec_block = block;
        cex->ec_len = len;
        cex->ec_start = start;
@@@ -1995,18 -1976,15 +1995,18 @@@ ext4_ext_put_gap_in_cache(struct inode 
        }
  
        ext_debug(" -> %u:%lu\n", lblock, len);
 -      ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
 +      ext4_ext_put_in_cache(inode, lblock, len, 0);
  }
  
 +/*
 + * Return 0 if cache is invalid; 1 if the cache is valid
 + */
  static int
  ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
                        struct ext4_extent *ex)
  {
        struct ext4_ext_cache *cex;
 -      int ret = EXT4_EXT_CACHE_NO;
 +      int ret = 0;
  
        /*
         * We borrow i_block_reservation_lock to protect i_cached_extent
        cex = &EXT4_I(inode)->i_cached_extent;
  
        /* has cache valid data? */
 -      if (cex->ec_type == EXT4_EXT_CACHE_NO)
 +      if (cex->ec_len == 0)
                goto errout;
  
 -      BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
 -                      cex->ec_type != EXT4_EXT_CACHE_EXTENT);
        if (in_range(block, cex->ec_block, cex->ec_len)) {
                ex->ee_block = cpu_to_le32(cex->ec_block);
                ext4_ext_store_pblock(ex, cex->ec_start);
                ext_debug("%u cached by %u:%u:%llu\n",
                                block,
                                cex->ec_block, cex->ec_len, cex->ec_start);
 -              ret = cex->ec_type;
 +              ret = 1;
        }
  errout:
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
@@@ -2845,14 -2825,14 +2845,14 @@@ fix_extent_len
   * to an uninitialized extent.
   *
   * Writing to an uninitized extent may result in splitting the uninitialized
-  * extent into multiple /intialized unintialized extents (up to three)
+  * extent into multiple /initialized uninitialized extents (up to three)
   * There are three possibilities:
   *   a> There is no split required: Entire extent should be uninitialized
   *   b> Splits in two extents: Write is happening at either end of the extent
   *   c> Splits in three extents: Somone is writing in middle of the extent
   *
   * One of more index blocks maybe needed if the extent tree grow after
-  * the unintialized extent split. To prevent ENOSPC occur at the IO
+  * the uninitialized extent split. To prevent ENOSPC occur at the IO
   * complete, we need to split the uninitialized extent before DIO submit
   * the IO. The uninitialized extent called at this time will be split
   * into three uninitialized extent(at most). After IO complete, the part
@@@ -3102,7 -3082,7 +3102,7 @@@ static void unmap_underlying_metadata_b
   * Handle EOFBLOCKS_FL flag, clearing it if necessary
   */
  static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
 -                            struct ext4_map_blocks *map,
 +                            ext4_lblk_t lblk,
                              struct ext4_ext_path *path,
                              unsigned int len)
  {
         * this turns out to be false, we can bail out from this
         * function immediately.
         */
 -      if (map->m_lblk + len < le32_to_cpu(last_ex->ee_block) +
 +      if (lblk + len < le32_to_cpu(last_ex->ee_block) +
            ext4_ext_get_actual_len(last_ex))
                return 0;
        /*
@@@ -3188,8 -3168,8 +3188,8 @@@ ext4_ext_handle_uninitialized_extents(h
                                                        path);
                if (ret >= 0) {
                        ext4_update_inode_fsync_trans(handle, inode, 1);
 -                      err = check_eofblocks_fl(handle, inode, map, path,
 -                                               map->m_len);
 +                      err = check_eofblocks_fl(handle, inode, map->m_lblk,
 +                                               path, map->m_len);
                } else
                        err = ret;
                goto out2;
        ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
        if (ret >= 0) {
                ext4_update_inode_fsync_trans(handle, inode, 1);
 -              err = check_eofblocks_fl(handle, inode, map, path, map->m_len);
 +              err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
 +                                       map->m_len);
                if (err < 0)
                        goto out2;
        }
@@@ -3297,7 -3276,7 +3297,7 @@@ int ext4_ext_map_blocks(handle_t *handl
        struct ext4_extent_header *eh;
        struct ext4_extent newex, *ex;
        ext4_fsblk_t newblock;
 -      int err = 0, depth, ret, cache_type;
 +      int err = 0, depth, ret;
        unsigned int allocated = 0;
        struct ext4_allocation_request ar;
        ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
                  map->m_lblk, map->m_len, inode->i_ino);
  
        /* check in cache */
 -      cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
 -      if (cache_type) {
 -              if (cache_type == EXT4_EXT_CACHE_GAP) {
 +      if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
 +              if (!newex.ee_start_lo && !newex.ee_start_hi) {
                        if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
                                /*
                                 * block isn't allocated yet and
                                goto out2;
                        }
                        /* we should allocate requested block */
 -              } else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
 +              } else {
                        /* block is already allocated */
                        newblock = map->m_lblk
                                   - le32_to_cpu(newex.ee_block)
                        allocated = ext4_ext_get_actual_len(&newex) -
                                (map->m_lblk - le32_to_cpu(newex.ee_block));
                        goto out;
 -              } else {
 -                      BUG();
                }
        }
  
                        /* Do not put uninitialized extent in the cache */
                        if (!ext4_ext_is_uninitialized(ex)) {
                                ext4_ext_put_in_cache(inode, ee_block,
 -                                                      ee_len, ee_start,
 -                                                      EXT4_EXT_CACHE_EXTENT);
 +                                                      ee_len, ee_start);
                                goto out;
                        }
                        ret = ext4_ext_handle_uninitialized_extents(handle,
                        map->m_flags |= EXT4_MAP_UNINIT;
        }
  
 -      err = check_eofblocks_fl(handle, inode, map, path, ar.len);
 +      err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
        if (err)
                goto out2;
  
         * when it is _not_ an uninitialized extent.
         */
        if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
 -              ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
 -                                              EXT4_EXT_CACHE_EXTENT);
 +              ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
                ext4_update_inode_fsync_trans(handle, inode, 1);
        } else
                ext4_update_inode_fsync_trans(handle, inode, 0);
@@@ -3535,12 -3519,6 +3535,12 @@@ void ext4_ext_truncate(struct inode *in
        int err = 0;
  
        /*
 +       * finish any pending end_io work so we won't run the risk of
 +       * converting any truncated blocks to initialized later
 +       */
 +      ext4_flush_completed_IO(inode);
 +
 +      /*
         * probably first extent we're gonna free will be last in block
         */
        err = ext4_writepage_trans_blocks(inode);
@@@ -3789,7 -3767,7 +3789,7 @@@ static int ext4_ext_fiemap_cb(struct in
  
        logical =  (__u64)newex->ec_block << blksize_bits;
  
 -      if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
 +      if (newex->ec_start == 0) {
                pgoff_t offset;
                struct page *page;
                struct buffer_head *bh = NULL;
diff --combined fs/ext4/inode.c
@@@ -39,9 -39,7 +39,9 @@@
  #include <linux/bio.h>
  #include <linux/workqueue.h>
  #include <linux/kernel.h>
 +#include <linux/printk.h>
  #include <linux/slab.h>
 +#include <linux/ratelimit.h>
  
  #include "ext4_jbd2.h"
  #include "xattr.h"
@@@ -56,17 -54,10 +56,17 @@@ static inline int ext4_begin_ordered_tr
                                              loff_t new_size)
  {
        trace_ext4_begin_ordered_truncate(inode, new_size);
 -      return jbd2_journal_begin_ordered_truncate(
 -                                      EXT4_SB(inode->i_sb)->s_journal,
 -                                      &EXT4_I(inode)->jinode,
 -                                      new_size);
 +      /*
 +       * If jinode is zero, then we never opened the file for
 +       * writing, so there's no need to call
 +       * jbd2_journal_begin_ordered_truncate() since there's no
 +       * outstanding writes we need to flush.
 +       */
 +      if (!EXT4_I(inode)->jinode)
 +              return 0;
 +      return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
 +                                                 EXT4_I(inode)->jinode,
 +                                                 new_size);
  }
  
  static void ext4_invalidatepage(struct page *page, unsigned long offset);
@@@ -561,7 -552,7 +561,7 @@@ static ext4_fsblk_t ext4_find_goal(stru
  }
  
  /**
 - *    ext4_blks_to_allocate: Look up the block map and count the number
 + *    ext4_blks_to_allocate - Look up the block map and count the number
   *    of direct blocks need to be allocated for the given branch.
   *
   *    @branch: chain of indirect blocks
@@@ -600,19 -591,13 +600,19 @@@ static int ext4_blks_to_allocate(Indire
  
  /**
   *    ext4_alloc_blocks: multiple allocate blocks needed for a branch
 + *    @handle: handle for this transaction
 + *    @inode: inode which needs allocated blocks
 + *    @iblock: the logical block to start allocated at
 + *    @goal: preferred physical block of allocation
   *    @indirect_blks: the number of blocks need to allocate for indirect
   *                    blocks
 - *
 + *    @blks: number of desired blocks
   *    @new_blocks: on return it will store the new block numbers for
   *    the indirect blocks(if needed) and the first direct block,
 - *    @blks:  on return it will store the total number of allocated
 - *            direct blocks
 + *    @err: on return it will store the error code
 + *
 + *    This function will return the number of blocks allocated as
 + *    requested by the passed-in parameters.
   */
  static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
                             ext4_lblk_t iblock, ext4_fsblk_t goal,
@@@ -726,11 -711,9 +726,11 @@@ failed_out
  
  /**
   *    ext4_alloc_branch - allocate and set up a chain of blocks.
 + *    @handle: handle for this transaction
   *    @inode: owner
   *    @indirect_blks: number of allocated indirect blocks
   *    @blks: number of allocated direct blocks
 + *    @goal: preferred place for allocation
   *    @offsets: offsets (in the blocks) to store the pointers to next.
   *    @branch: place to store the chain in.
   *
@@@ -843,7 -826,6 +843,7 @@@ failed
  
  /**
   * ext4_splice_branch - splice the allocated branch onto inode.
 + * @handle: handle for this transaction
   * @inode: owner
   * @block: (logical) number of block we are adding
   * @chain: chain of indirect blocks (with a missing link - see
@@@ -1099,7 -1081,7 +1099,7 @@@ static int ext4_indirect_calc_metadata_
   * Calculate the number of metadata blocks need to reserve
   * to allocate a block located at @lblock
   */
 -static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
 +static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
  {
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
                return ext4_ext_calc_metadata_amount(inode, lblock);
@@@ -1338,7 -1320,7 +1338,7 @@@ int ext4_map_blocks(handle_t *handle, s
         * avoid double accounting
         */
        if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
 -              EXT4_I(inode)->i_delalloc_reserved_flag = 1;
 +              ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
        /*
         * We need to check for EXT4 here because migrate
         * could have changed the inode type in between
                        ext4_da_update_reserve_space(inode, retval, 1);
        }
        if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
 -              EXT4_I(inode)->i_delalloc_reserved_flag = 0;
 +              ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
  
        up_write((&EXT4_I(inode)->i_data_sem));
        if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
@@@ -1896,7 -1878,7 +1896,7 @@@ static int ext4_journalled_write_end(st
  /*
   * Reserve a single block located at lblock
   */
 -static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
 +static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
  {
        int retries = 0;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@@ -2257,7 -2239,7 +2257,7 @@@ static void mpage_da_map_and_submit(str
         * affects functions in many different parts of the allocation
         * call path.  This flag exists primarily because we don't
         * want to change *many* call functions, so ext4_map_blocks()
 -       * will set the magic i_delalloc_reserved_flag once the
 +       * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
         * inode's allocation semaphore is taken.
         *
         * If the blocks in questions were delalloc blocks, set
@@@ -3380,7 -3362,7 +3380,7 @@@ int ext4_alloc_da_blocks(struct inode *
         * doing I/O at all.
         *
         * We could call write_cache_pages(), and then redirty all of
-        * the pages by calling redirty_page_for_writeback() but that
+        * the pages by calling redirty_page_for_writepage() but that
         * would be ugly in the extreme.  So instead we would need to
         * replicate parts of the code in the above functions,
         * simplifying them becuase we wouldn't actually intend to
@@@ -3738,7 -3720,8 +3738,7 @@@ static int ext4_set_bh_endio(struct buf
  retry:
        io_end = ext4_init_io_end(inode, GFP_ATOMIC);
        if (!io_end) {
 -              if (printk_ratelimit())
 -                      printk(KERN_WARNING "%s: allocation fail\n", __func__);
 +              pr_warn_ratelimited("%s: allocation fail\n", __func__);
                schedule();
                goto retry;
        }
   * preallocated extents, and those write extend the file, no need to
   * fall back to buffered IO.
   *
-  * For holes, we fallocate those blocks, mark them as unintialized
+  * For holes, we fallocate those blocks, mark them as uninitialized
   * If those blocks were preallocated, we mark sure they are splited, but
-  * still keep the range to write as unintialized.
+  * still keep the range to write as uninitialized.
   *
   * The unwrritten extents will be converted to written when DIO is completed.
   * For async direct IO, since the IO may still pending when return, we
@@@ -4062,7 -4045,7 +4062,7 @@@ int ext4_block_truncate_page(handle_t *
        if (ext4_should_journal_data(inode)) {
                err = ext4_handle_dirty_metadata(handle, inode, bh);
        } else {
 -              if (ext4_should_order_data(inode))
 +              if (ext4_should_order_data(inode) && EXT4_I(inode)->jinode)
                        err = ext4_jbd2_file_inode(handle, inode);
                mark_buffer_dirty(bh);
        }
@@@ -4186,7 -4169,6 +4186,7 @@@ static int ext4_clear_blocks(handle_t *
  {
        __le32 *p;
        int     flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
 +      int     err;
  
        if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
                flags |= EXT4_FREE_BLOCKS_METADATA;
        if (try_to_extend_transaction(handle, inode)) {
                if (bh) {
                        BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
 -                      ext4_handle_dirty_metadata(handle, inode, bh);
 +                      err = ext4_handle_dirty_metadata(handle, inode, bh);
 +                      if (unlikely(err)) {
 +                              ext4_std_error(inode->i_sb, err);
 +                              return 1;
 +                      }
 +              }
 +              err = ext4_mark_inode_dirty(handle, inode);
 +              if (unlikely(err)) {
 +                      ext4_std_error(inode->i_sb, err);
 +                      return 1;
 +              }
 +              err = ext4_truncate_restart_trans(handle, inode,
 +                                                blocks_for_truncate(inode));
 +              if (unlikely(err)) {
 +                      ext4_std_error(inode->i_sb, err);
 +                      return 1;
                }
 -              ext4_mark_inode_dirty(handle, inode);
 -              ext4_truncate_restart_trans(handle, inode,
 -                                          blocks_for_truncate(inode));
                if (bh) {
                        BUFFER_TRACE(bh, "retaking write access");
                        ext4_journal_get_write_access(handle, bh);
@@@ -4379,7 -4349,6 +4379,7 @@@ static void ext4_free_branches(handle_
                                        (__le32 *) bh->b_data,
                                        (__le32 *) bh->b_data + addr_per_block,
                                        depth);
 +                      brelse(bh);
  
                        /*
                         * Everything below this this pointer has been
@@@ -4890,7 -4859,7 +4890,7 @@@ struct inode *ext4_iget(struct super_bl
        }
        inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
  
 -      ei->i_state_flags = 0;
 +      ext4_clear_state_flags(ei);     /* Only relevant on 32-bit archs */
        ei->i_dir_start_lookup = 0;
        ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
        /* We now have enough fields to check if the inode was active or not.
@@@ -5149,7 -5118,7 +5149,7 @@@ static int ext4_do_update_inode(handle_
        if (ext4_inode_blocks_set(handle, raw_inode, ei))
                goto out_brelse;
        raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
 -      raw_inode->i_flags = cpu_to_le32(ei->i_flags);
 +      raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
        if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
            cpu_to_le32(EXT4_OS_HURD))
                raw_inode->i_file_acl_high =
diff --combined fs/jbd2/transaction.c
@@@ -251,7 -251,7 +251,7 @@@ repeat
         * the committing transaction.  Really, we only need to give it
         * committing_transaction->t_outstanding_credits plus "enough" for
         * the log control blocks.
-        * Also, this test is inconsitent with the matching one in
+        * Also, this test is inconsistent with the matching one in
         * jbd2_journal_extend().
         */
        if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
@@@ -340,7 -340,9 +340,7 @@@ handle_t *jbd2__journal_start(journal_
                jbd2_free_handle(handle);
                current->journal_info = NULL;
                handle = ERR_PTR(err);
 -              goto out;
        }
 -out:
        return handle;
  }
  EXPORT_SYMBOL(jbd2__journal_start);
@@@ -587,7 -589,7 +587,7 @@@ do_get_write_access(handle_t *handle, s
        transaction = handle->h_transaction;
        journal = transaction->t_journal;
  
 -      jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
 +      jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
  
        JBUFFER_TRACE(jh, "entry");
  repeat:
@@@ -772,7 -774,7 +772,7 @@@ done
                J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
                            "Possible IO failure.\n");
                page = jh2bh(jh)->b_page;
 -              offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
 +              offset = offset_in_page(jh2bh(jh)->b_data);
                source = kmap_atomic(page, KM_USER0);
                /* Fire data frozen trigger just before we copy the data */
                jbd2_buffer_frozen_trigger(jh, source + offset,
@@@ -834,11 -834,8 +834,11 @@@ xfsaild_wakeup
        struct xfs_ail          *ailp,
        xfs_lsn_t               threshold_lsn)
  {
 -      ailp->xa_target = threshold_lsn;
 -      wake_up_process(ailp->xa_task);
 +      /* only ever move the target forwards */
 +      if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
 +              ailp->xa_target = threshold_lsn;
 +              wake_up_process(ailp->xa_task);
 +      }
  }
  
  STATIC int
@@@ -850,17 -847,8 +850,17 @@@ xfsaild
        long            tout = 0; /* milliseconds */
  
        while (!kthread_should_stop()) {
 -              schedule_timeout_interruptible(tout ?
 -                              msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
 +              /*
 +               * for short sleeps indicating congestion, don't allow us to
 +               * get woken early. Otherwise all we do is bang on the AIL lock
 +               * without making progress.
 +               */
 +              if (tout && tout <= 20)
 +                      __set_current_state(TASK_KILLABLE);
 +              else
 +                      __set_current_state(TASK_INTERRUPTIBLE);
 +              schedule_timeout(tout ?
 +                               msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
  
                /* swsusp */
                try_to_freeze();
@@@ -947,7 -935,7 +947,7 @@@ out_reclaim
   * Slab object creation initialisation for the XFS inode.
   * This covers only the idempotent fields in the XFS inode;
   * all other fields need to be initialised on allocation
-  * from the slab. This avoids the need to repeatedly intialise
+  * from the slab. This avoids the need to repeatedly initialise
   * fields in the xfs inode that left in the initialise state
   * when freeing the inode.
   */
@@@ -1130,8 -1118,6 +1130,8 @@@ xfs_fs_evict_inode
         */
        ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
        mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
 +      lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
 +                      &xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
  
        xfs_inactive(ip);
  }
diff --combined include/linux/suspend.h
@@@ -122,7 -122,7 +122,7 @@@ struct platform_suspend_ops 
   * suspend_set_ops - set platform dependent suspend operations
   * @ops: The new suspend operations to set.
   */
- extern void suspend_set_ops(struct platform_suspend_ops *ops);
+ extern void suspend_set_ops(const struct platform_suspend_ops *ops);
  extern int suspend_valid_only_mem(suspend_state_t state);
  
  /**
@@@ -147,7 -147,7 +147,7 @@@ extern int pm_suspend(suspend_state_t s
  #else /* !CONFIG_SUSPEND */
  #define suspend_valid_only_mem        NULL
  
- static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
+ static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
  static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
  #endif /* !CONFIG_SUSPEND */
  
@@@ -245,7 -245,7 +245,7 @@@ extern void swsusp_set_page_free(struc
  extern void swsusp_unset_page_free(struct page *);
  extern unsigned long get_safe_page(gfp_t gfp_mask);
  
- extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
+ extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
  extern int hibernate(void);
  extern bool system_entering_hibernation(void);
  #else /* CONFIG_HIBERNATION */
@@@ -253,7 -253,7 +253,7 @@@ static inline int swsusp_page_is_forbid
  static inline void swsusp_set_page_free(struct page *p) {}
  static inline void swsusp_unset_page_free(struct page *p) {}
  
- static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
+ static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
  static inline int hibernate(void) { return -ENOSYS; }
  static inline bool system_entering_hibernation(void) { return false; }
  #endif /* CONFIG_HIBERNATION */
@@@ -292,7 -292,7 +292,7 @@@ extern int unregister_pm_notifier(struc
  /* drivers/base/power/wakeup.c */
  extern bool events_check_enabled;
  
 -extern bool pm_check_wakeup_events(void);
 +extern bool pm_wakeup_pending(void);
  extern bool pm_get_wakeup_count(unsigned int *count);
  extern bool pm_save_wakeup_count(unsigned int count);
  #else /* !CONFIG_PM_SLEEP */
@@@ -309,7 -309,7 +309,7 @@@ static inline int unregister_pm_notifie
  
  #define pm_notifier(fn, pri)  do { (void)(fn); } while (0)
  
 -static inline bool pm_check_wakeup_events(void) { return true; }
 +static inline bool pm_wakeup_pending(void) { return false; }
  #endif /* !CONFIG_PM_SLEEP */
  
  extern struct mutex pm_mutex;
diff --combined init/Kconfig
@@@ -130,16 -130,13 +130,16 @@@ config HAVE_KERNEL_BZIP
  config HAVE_KERNEL_LZMA
        bool
  
 +config HAVE_KERNEL_XZ
 +      bool
 +
  config HAVE_KERNEL_LZO
        bool
  
  choice
        prompt "Kernel compression mode"
        default KERNEL_GZIP
 -      depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_LZO
 +      depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO
        help
          The linux kernel is a kind of self-extracting executable.
          Several compression algorithms are available, which differ
@@@ -184,21 -181,6 +184,21 @@@ config KERNEL_LZM
          two. Compression is slowest.  The kernel size is about 33%
          smaller with LZMA in comparison to gzip.
  
 +config KERNEL_XZ
 +      bool "XZ"
 +      depends on HAVE_KERNEL_XZ
 +      help
 +        XZ uses the LZMA2 algorithm and instruction set specific
 +        BCJ filters which can improve compression ratio of executable
 +        code. The size of the kernel is about 30% smaller with XZ in
 +        comparison to gzip. On architectures for which there is a BCJ
 +        filter (i386, x86_64, ARM, IA-64, PowerPC, and SPARC), XZ
 +        will create a few percent smaller kernel than plain LZMA.
 +
 +        The speed is about the same as with LZMA: The decompression
 +        speed of XZ is better than that of bzip2 but worse than gzip
 +        and LZO. Compression is slow.
 +
  config KERNEL_LZO
        bool "LZO"
        depends on HAVE_KERNEL_LZO
@@@ -411,6 -393,7 +411,6 @@@ config PREEMPT_RC
  
  config RCU_TRACE
        bool "Enable tracing for RCU"
 -      depends on TREE_RCU || TREE_PREEMPT_RCU
        help
          This option provides tracing in RCU which presents stats
          in debugfs for debugging RCU implementation.
@@@ -476,60 -459,6 +476,60 @@@ config TREE_RCU_TRAC
          TREE_PREEMPT_RCU implementations, permitting Makefile to
          trivially select kernel/rcutree_trace.c.
  
 +config RCU_BOOST
 +      bool "Enable RCU priority boosting"
 +      depends on RT_MUTEXES && TINY_PREEMPT_RCU
 +      default n
 +      help
 +        This option boosts the priority of preempted RCU readers that
 +        block the current preemptible RCU grace period for too long.
 +        This option also prevents heavy loads from blocking RCU
 +        callback invocation for all flavors of RCU.
 +
 +        Say Y here if you are working with real-time apps or heavy loads
 +        Say N here if you are unsure.
 +
 +config RCU_BOOST_PRIO
 +      int "Real-time priority to boost RCU readers to"
 +      range 1 99
 +      depends on RCU_BOOST
 +      default 1
 +      help
 +        This option specifies the real-time priority to which preempted
 +        RCU readers are to be boosted.  If you are working with CPU-bound
 +        real-time applications, you should specify a priority higher then
 +        the highest-priority CPU-bound application.
 +
 +        Specify the real-time priority, or take the default if unsure.
 +
 +config RCU_BOOST_DELAY
 +      int "Milliseconds to delay boosting after RCU grace-period start"
 +      range 0 3000
 +      depends on RCU_BOOST
 +      default 500
 +      help
 +        This option specifies the time to wait after the beginning of
 +        a given grace period before priority-boosting preempted RCU
 +        readers blocking that grace period.  Note that any RCU reader
 +        blocking an expedited RCU grace period is boosted immediately.
 +
 +        Accept the default if unsure.
 +
 +config SRCU_SYNCHRONIZE_DELAY
 +      int "Microseconds to delay before waiting for readers"
 +      range 0 20
 +      default 10
 +      help
 +        This option controls how long SRCU delays before entering its
 +        loop waiting on SRCU readers.  The purpose of this loop is
 +        to avoid the unconditional context-switch penalty that would
 +        otherwise be incurred if there was an active SRCU reader,
 +        in a manner similar to adaptive locking schemes.  This should
 +        be set to be a bit longer than the common-case SRCU read-side
 +        critical-section overhead.
 +
 +        Accept the default if unsure.
 +
  endmenu # "RCU Subsystem"
  
  config IKCONFIG
@@@ -691,7 -620,7 +691,7 @@@ config CGROUP_MEM_RES_CTLR_SWAP_ENABLE
        help
          Memory Resource Controller Swap Extension comes with its price in
          a bigger memory consumption. General purpose distribution kernels
-         which want to enable the feautre but keep it disabled by default
+         which want to enable the feature but keep it disabled by default
          and let the user enable it by swapaccount boot command line
          parameter should have this option unselected.
          For those who want to have the feature enabled by default should
@@@ -812,19 -741,6 +812,19 @@@ config NET_N
  
  endif # NAMESPACES
  
 +config SCHED_AUTOGROUP
 +      bool "Automatic process group scheduling"
 +      select EVENTFD
 +      select CGROUPS
 +      select CGROUP_SCHED
 +      select FAIR_GROUP_SCHED
 +      help
 +        This option optimizes the scheduler for common desktop workloads by
 +        automatically creating and populating task groups.  This separation
 +        of workloads isolates aggressive CPU burners (like build jobs) from
 +        desktop applications.  Task group autogeneration is currently based
 +        upon task session.
 +
  config MM_OWNER
        bool
  
diff --combined kernel/hrtimer.c
@@@ -497,7 -497,7 +497,7 @@@ static inline int hrtimer_is_hres_enabl
   */
  static inline int hrtimer_hres_active(void)
  {
 -      return __get_cpu_var(hrtimer_bases).hres_active;
 +      return __this_cpu_read(hrtimer_bases.hres_active);
  }
  
  /*
@@@ -516,13 -516,10 +516,13 @@@ hrtimer_force_reprogram(struct hrtimer_
  
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
                struct hrtimer *timer;
 +              struct timerqueue_node *next;
  
 -              if (!base->first)
 +              next = timerqueue_getnext(&base->active);
 +              if (!next)
                        continue;
 -              timer = rb_entry(base->first, struct hrtimer, node);
 +              timer = container_of(next, struct hrtimer, node);
 +
                expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
                /*
                 * clock_was_set() has changed base->offset so the
@@@ -843,17 -840,48 +843,17 @@@ EXPORT_SYMBOL_GPL(hrtimer_forward)
  static int enqueue_hrtimer(struct hrtimer *timer,
                           struct hrtimer_clock_base *base)
  {
 -      struct rb_node **link = &base->active.rb_node;
 -      struct rb_node *parent = NULL;
 -      struct hrtimer *entry;
 -      int leftmost = 1;
 -
        debug_activate(timer);
  
 -      /*
 -       * Find the right place in the rbtree:
 -       */
 -      while (*link) {
 -              parent = *link;
 -              entry = rb_entry(parent, struct hrtimer, node);
 -              /*
 -               * We dont care about collisions. Nodes with
 -               * the same expiry time stay together.
 -               */
 -              if (hrtimer_get_expires_tv64(timer) <
 -                              hrtimer_get_expires_tv64(entry)) {
 -                      link = &(*link)->rb_left;
 -              } else {
 -                      link = &(*link)->rb_right;
 -                      leftmost = 0;
 -              }
 -      }
 -
 -      /*
 -       * Insert the timer to the rbtree and check whether it
 -       * replaces the first pending timer
 -       */
 -      if (leftmost)
 -              base->first = &timer->node;
 +      timerqueue_add(&base->active, &timer->node);
  
 -      rb_link_node(&timer->node, parent, link);
 -      rb_insert_color(&timer->node, &base->active);
        /*
         * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
         * state of a possibly running callback.
         */
        timer->state |= HRTIMER_STATE_ENQUEUED;
  
 -      return leftmost;
 +      return (&timer->node == base->active.next);
  }
  
  /*
@@@ -873,7 -901,12 +873,7 @@@ static void __remove_hrtimer(struct hrt
        if (!(timer->state & HRTIMER_STATE_ENQUEUED))
                goto out;
  
 -      /*
 -       * Remove the timer from the rbtree and replace the first
 -       * entry pointer if necessary.
 -       */
 -      if (base->first == &timer->node) {
 -              base->first = rb_next(&timer->node);
 +      if (&timer->node == timerqueue_getnext(&base->active)) {
  #ifdef CONFIG_HIGH_RES_TIMERS
                /* Reprogram the clock event device. if enabled */
                if (reprogram && hrtimer_hres_active()) {
                }
  #endif
        }
 -      rb_erase(&timer->node, &base->active);
 +      timerqueue_del(&base->active, &timer->node);
  out:
        timer->state = newstate;
  }
@@@ -1095,13 -1128,11 +1095,13 @@@ ktime_t hrtimer_get_next_event(void
        if (!hrtimer_hres_active()) {
                for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
                        struct hrtimer *timer;
 +                      struct timerqueue_node *next;
  
 -                      if (!base->first)
 +                      next = timerqueue_getnext(&base->active);
 +                      if (!next)
                                continue;
  
 -                      timer = rb_entry(base->first, struct hrtimer, node);
 +                      timer = container_of(next, struct hrtimer, node);
                        delta.tv64 = hrtimer_get_expires_tv64(timer);
                        delta = ktime_sub(delta, base->get_time());
                        if (delta.tv64 < mindelta.tv64)
@@@ -1131,7 -1162,6 +1131,7 @@@ static void __hrtimer_init(struct hrtim
  
        timer->base = &cpu_base->clock_base[clock_id];
        hrtimer_init_timer_hres(timer);
 +      timerqueue_init(&timer->node);
  
  #ifdef CONFIG_TIMER_STATS
        timer->start_site = NULL;
@@@ -1248,14 -1278,14 +1248,14 @@@ retry
  
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
                ktime_t basenow;
 -              struct rb_node *node;
 +              struct timerqueue_node *node;
  
                basenow = ktime_add(now, base->offset);
  
 -              while ((node = base->first)) {
 +              while ((node = timerqueue_getnext(&base->active))) {
                        struct hrtimer *timer;
  
 -                      timer = rb_entry(node, struct hrtimer, node);
 +                      timer = container_of(node, struct hrtimer, node);
  
                        /*
                         * The immediate goal for using the softexpires is
@@@ -1411,7 -1441,7 +1411,7 @@@ void hrtimer_run_pending(void
   */
  void hrtimer_run_queues(void)
  {
 -      struct rb_node *node;
 +      struct timerqueue_node *node;
        struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
        struct hrtimer_clock_base *base;
        int index, gettime = 1;
  
        for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
                base = &cpu_base->clock_base[index];
 -
 -              if (!base->first)
 +              if (!timerqueue_getnext(&base->active))
                        continue;
  
                if (gettime) {
  
                raw_spin_lock(&cpu_base->lock);
  
 -              while ((node = base->first)) {
 +              while ((node = timerqueue_getnext(&base->active))) {
                        struct hrtimer *timer;
  
 -                      timer = rb_entry(node, struct hrtimer, node);
 +                      timer = container_of(node, struct hrtimer, node);
                        if (base->softirq_time.tv64 <=
                                        hrtimer_get_expires_tv64(timer))
                                break;
@@@ -1599,10 -1630,8 +1599,10 @@@ static void __cpuinit init_hrtimers_cpu
  
        raw_spin_lock_init(&cpu_base->lock);
  
 -      for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
 +      for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
                cpu_base->clock_base[i].cpu_base = cpu_base;
 +              timerqueue_init_head(&cpu_base->clock_base[i].active);
 +      }
  
        hrtimer_init_hres(cpu_base);
  }
@@@ -1613,10 -1642,10 +1613,10 @@@ static void migrate_hrtimer_list(struc
                                struct hrtimer_clock_base *new_base)
  {
        struct hrtimer *timer;
 -      struct rb_node *node;
 +      struct timerqueue_node *node;
  
 -      while ((node = rb_first(&old_base->active))) {
 -              timer = rb_entry(node, struct hrtimer, node);
 +      while ((node = timerqueue_getnext(&old_base->active))) {
 +              timer = container_of(node, struct hrtimer, node);
                BUG_ON(hrtimer_callback_running(timer));
                debug_deactivate(timer);
  
@@@ -1745,7 -1774,7 +1745,7 @@@ schedule_hrtimeout_range_clock(ktime_t 
        }
  
        /*
-        * A NULL parameter means "inifinte"
+        * A NULL parameter means "infinite"
         */
        if (!expires) {
                schedule();
diff --combined kernel/perf_event.c
@@@ -13,7 -13,6 +13,7 @@@
  #include <linux/mm.h>
  #include <linux/cpu.h>
  #include <linux/smp.h>
 +#include <linux/idr.h>
  #include <linux/file.h>
  #include <linux/poll.h>
  #include <linux/slab.h>
@@@ -22,9 -21,7 +22,9 @@@
  #include <linux/dcache.h>
  #include <linux/percpu.h>
  #include <linux/ptrace.h>
 +#include <linux/reboot.h>
  #include <linux/vmstat.h>
 +#include <linux/device.h>
  #include <linux/vmalloc.h>
  #include <linux/hardirq.h>
  #include <linux/rculist.h>
  
  #include <asm/irq_regs.h>
  
 +enum event_type_t {
 +      EVENT_FLEXIBLE = 0x1,
 +      EVENT_PINNED = 0x2,
 +      EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
 +};
 +
  atomic_t perf_task_events __read_mostly;
  static atomic_t nr_mmap_events __read_mostly;
  static atomic_t nr_comm_events __read_mostly;
@@@ -71,12 -62,6 +71,12 @@@ int sysctl_perf_event_sample_rate __rea
  
  static atomic64_t perf_event_id;
  
 +static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
 +                            enum event_type_t event_type);
 +
 +static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
 +                           enum event_type_t event_type);
 +
  void __weak perf_event_print_debug(void)      { }
  
  extern __weak const char *perf_pmu_name(void)
        return "pmu";
  }
  
 +static inline u64 perf_clock(void)
 +{
 +      return local_clock();
 +}
 +
  void perf_pmu_disable(struct pmu *pmu)
  {
        int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@@ -153,28 -133,6 +153,28 @@@ static void unclone_ctx(struct perf_eve
        }
  }
  
 +static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
 +{
 +      /*
 +       * only top level events have the pid namespace they were created in
 +       */
 +      if (event->parent)
 +              event = event->parent;
 +
 +      return task_tgid_nr_ns(p, event->ns);
 +}
 +
 +static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
 +{
 +      /*
 +       * only top level events have the pid namespace they were created in
 +       */
 +      if (event->parent)
 +              event = event->parent;
 +
 +      return task_pid_nr_ns(p, event->ns);
 +}
 +
  /*
   * If we inherit events we want to return the parent event id
   * to userspace.
@@@ -257,6 -215,11 +257,6 @@@ static void perf_unpin_context(struct p
        put_ctx(ctx);
  }
  
 -static inline u64 perf_clock(void)
 -{
 -      return local_clock();
 -}
 -
  /*
   * Update the record of the current time in a context.
   */
@@@ -268,12 -231,6 +268,12 @@@ static void update_context_time(struct 
        ctx->timestamp = now;
  }
  
 +static u64 perf_event_time(struct perf_event *event)
 +{
 +      struct perf_event_context *ctx = event->ctx;
 +      return ctx ? ctx->time : 0;
 +}
 +
  /*
   * Update the total_time_enabled and total_time_running fields for a event.
   */
@@@ -287,7 -244,7 +287,7 @@@ static void update_event_times(struct p
                return;
  
        if (ctx->is_active)
 -              run_end = ctx->time;
 +              run_end = perf_event_time(event);
        else
                run_end = event->tstamp_stopped;
  
        if (event->state == PERF_EVENT_STATE_INACTIVE)
                run_end = event->tstamp_stopped;
        else
 -              run_end = ctx->time;
 +              run_end = perf_event_time(event);
  
        event->total_time_running = run_end - event->tstamp_running;
  }
@@@ -355,84 -312,9 +355,84 @@@ list_add_event(struct perf_event *event
                ctx->nr_stat++;
  }
  
 +/*
 + * Called at perf_event creation and when events are attached/detached from a
 + * group.
 + */
 +static void perf_event__read_size(struct perf_event *event)
 +{
 +      int entry = sizeof(u64); /* value */
 +      int size = 0;
 +      int nr = 1;
 +
 +      if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 +              size += sizeof(u64);
 +
 +      if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 +              size += sizeof(u64);
 +
 +      if (event->attr.read_format & PERF_FORMAT_ID)
 +              entry += sizeof(u64);
 +
 +      if (event->attr.read_format & PERF_FORMAT_GROUP) {
 +              nr += event->group_leader->nr_siblings;
 +              size += sizeof(u64);
 +      }
 +
 +      size += entry * nr;
 +      event->read_size = size;
 +}
 +
 +static void perf_event__header_size(struct perf_event *event)
 +{
 +      struct perf_sample_data *data;
 +      u64 sample_type = event->attr.sample_type;
 +      u16 size = 0;
 +
 +      perf_event__read_size(event);
 +
 +      if (sample_type & PERF_SAMPLE_IP)
 +              size += sizeof(data->ip);
 +
 +      if (sample_type & PERF_SAMPLE_ADDR)
 +              size += sizeof(data->addr);
 +
 +      if (sample_type & PERF_SAMPLE_PERIOD)
 +              size += sizeof(data->period);
 +
 +      if (sample_type & PERF_SAMPLE_READ)
 +              size += event->read_size;
 +
 +      event->header_size = size;
 +}
 +
 +static void perf_event__id_header_size(struct perf_event *event)
 +{
 +      struct perf_sample_data *data;
 +      u64 sample_type = event->attr.sample_type;
 +      u16 size = 0;
 +
 +      if (sample_type & PERF_SAMPLE_TID)
 +              size += sizeof(data->tid_entry);
 +
 +      if (sample_type & PERF_SAMPLE_TIME)
 +              size += sizeof(data->time);
 +
 +      if (sample_type & PERF_SAMPLE_ID)
 +              size += sizeof(data->id);
 +
 +      if (sample_type & PERF_SAMPLE_STREAM_ID)
 +              size += sizeof(data->stream_id);
 +
 +      if (sample_type & PERF_SAMPLE_CPU)
 +              size += sizeof(data->cpu_entry);
 +
 +      event->id_header_size = size;
 +}
 +
  static void perf_group_attach(struct perf_event *event)
  {
 -      struct perf_event *group_leader = event->group_leader;
 +      struct perf_event *group_leader = event->group_leader, *pos;
  
        /*
         * We can have double attach due to group movement in perf_event_open.
  
        list_add_tail(&event->group_entry, &group_leader->sibling_list);
        group_leader->nr_siblings++;
 +
 +      perf_event__header_size(group_leader);
 +
 +      list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
 +              perf_event__header_size(pos);
  }
  
  /*
@@@ -514,7 -391,7 +514,7 @@@ static void perf_group_detach(struct pe
        if (event->group_leader != event) {
                list_del_init(&event->group_entry);
                event->group_leader->nr_siblings--;
 -              return;
 +              goto out;
        }
  
        if (!list_empty(&event->group_entry))
                /* Inherit group flags from the previous leader */
                sibling->group_flags = event->group_flags;
        }
 +
 +out:
 +      perf_event__header_size(event->group_leader);
 +
 +      list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
 +              perf_event__header_size(tmp);
  }
  
  static inline int
@@@ -552,7 -423,6 +552,7 @@@ event_sched_out(struct perf_event *even
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
  {
 +      u64 tstamp = perf_event_time(event);
        u64 delta;
        /*
         * An event which could not be activated because of
            && !event_filter_match(event)) {
                delta = ctx->time - event->tstamp_stopped;
                event->tstamp_running += delta;
 -              event->tstamp_stopped = ctx->time;
 +              event->tstamp_stopped = tstamp;
        }
  
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                event->pending_disable = 0;
                event->state = PERF_EVENT_STATE_OFF;
        }
 -      event->tstamp_stopped = ctx->time;
 +      event->tstamp_stopped = tstamp;
        event->pmu->del(event, 0);
        event->oncpu = -1;
  
@@@ -787,8 -657,6 +787,8 @@@ event_sched_in(struct perf_event *event
                 struct perf_cpu_context *cpuctx,
                 struct perf_event_context *ctx)
  {
 +      u64 tstamp = perf_event_time(event);
 +
        if (event->state <= PERF_EVENT_STATE_OFF)
                return 0;
  
                return -EAGAIN;
        }
  
 -      event->tstamp_running += ctx->time - event->tstamp_stopped;
 +      event->tstamp_running += tstamp - event->tstamp_stopped;
  
 -      event->shadow_ctx_time = ctx->time - ctx->timestamp;
 +      event->shadow_ctx_time = tstamp - ctx->timestamp;
  
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
@@@ -919,13 -787,11 +919,13 @@@ static int group_can_go_on(struct perf_
  static void add_event_to_ctx(struct perf_event *event,
                               struct perf_event_context *ctx)
  {
 +      u64 tstamp = perf_event_time(event);
 +
        list_add_event(event, ctx);
        perf_group_attach(event);
 -      event->tstamp_enabled = ctx->time;
 -      event->tstamp_running = ctx->time;
 -      event->tstamp_stopped = ctx->time;
 +      event->tstamp_enabled = tstamp;
 +      event->tstamp_running = tstamp;
 +      event->tstamp_stopped = tstamp;
  }
  
  /*
@@@ -960,7 -826,7 +960,7 @@@ static void __perf_install_in_context(v
  
        add_event_to_ctx(event, ctx);
  
 -      if (event->cpu != -1 && event->cpu != smp_processor_id())
 +      if (!event_filter_match(event))
                goto unlock;
  
        /*
@@@ -1065,13 -931,14 +1065,13 @@@ static void __perf_event_mark_enabled(s
                                        struct perf_event_context *ctx)
  {
        struct perf_event *sub;
 +      u64 tstamp = perf_event_time(event);
  
        event->state = PERF_EVENT_STATE_INACTIVE;
 -      event->tstamp_enabled = ctx->time - event->total_time_enabled;
 +      event->tstamp_enabled = tstamp - event->total_time_enabled;
        list_for_each_entry(sub, &event->sibling_list, group_entry) {
 -              if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
 -                      sub->tstamp_enabled =
 -                              ctx->time - sub->total_time_enabled;
 -              }
 +              if (sub->state >= PERF_EVENT_STATE_INACTIVE)
 +                      sub->tstamp_enabled = tstamp - sub->total_time_enabled;
        }
  }
  
@@@ -1104,7 -971,7 +1104,7 @@@ static void __perf_event_enable(void *i
                goto unlock;
        __perf_event_mark_enabled(event, ctx);
  
 -      if (event->cpu != -1 && event->cpu != smp_processor_id())
 +      if (!event_filter_match(event))
                goto unlock;
  
        /*
@@@ -1206,7 -1073,7 +1206,7 @@@ static int perf_event_refresh(struct pe
        /*
         * not supported on inherited events
         */
 -      if (event->attr.inherit)
 +      if (event->attr.inherit || !is_sampling_event(event))
                return -EINVAL;
  
        atomic_add(refresh, &event->event_limit);
        return 0;
  }
  
 -enum event_type_t {
 -      EVENT_FLEXIBLE = 0x1,
 -      EVENT_PINNED = 0x2,
 -      EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
 -};
 -
  static void ctx_sched_out(struct perf_event_context *ctx,
                          struct perf_cpu_context *cpuctx,
                          enum event_type_t event_type)
@@@ -1451,7 -1324,7 +1451,7 @@@ ctx_pinned_sched_in(struct perf_event_c
        list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
                if (event->state <= PERF_EVENT_STATE_OFF)
                        continue;
 -              if (event->cpu != -1 && event->cpu != smp_processor_id())
 +              if (!event_filter_match(event))
                        continue;
  
                if (group_can_go_on(event, cpuctx, 1))
@@@ -1483,7 -1356,7 +1483,7 @@@ ctx_flexible_sched_in(struct perf_event
                 * Listen to the 'cpu' scheduling filter constraint
                 * of events:
                 */
 -              if (event->cpu != -1 && event->cpu != smp_processor_id())
 +              if (!event_filter_match(event))
                        continue;
  
                if (group_can_go_on(event, cpuctx, can_add_hw)) {
@@@ -1710,7 -1583,7 +1710,7 @@@ static void perf_ctx_adjust_freq(struc
                if (event->state != PERF_EVENT_STATE_ACTIVE)
                        continue;
  
 -              if (event->cpu != -1 && event->cpu != smp_processor_id())
 +              if (!event_filter_match(event))
                        continue;
  
                hwc = &event->hw;
@@@ -2416,6 -2289,31 +2416,6 @@@ static int perf_release(struct inode *i
        return perf_event_release_kernel(event);
  }
  
 -static int perf_event_read_size(struct perf_event *event)
 -{
 -      int entry = sizeof(u64); /* value */
 -      int size = 0;
 -      int nr = 1;
 -
 -      if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 -              size += sizeof(u64);
 -
 -      if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 -              size += sizeof(u64);
 -
 -      if (event->attr.read_format & PERF_FORMAT_ID)
 -              entry += sizeof(u64);
 -
 -      if (event->attr.read_format & PERF_FORMAT_GROUP) {
 -              nr += event->group_leader->nr_siblings;
 -              size += sizeof(u64);
 -      }
 -
 -      size += entry * nr;
 -
 -      return size;
 -}
 -
  u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
  {
        struct perf_event *child;
@@@ -2530,7 -2428,7 +2530,7 @@@ perf_read_hw(struct perf_event *event, 
        if (event->state == PERF_EVENT_STATE_ERROR)
                return 0;
  
 -      if (count < perf_event_read_size(event))
 +      if (count < event->read_size)
                return -ENOSPC;
  
        WARN_ON_ONCE(event->ctx->parent_ctx);
@@@ -2616,7 -2514,7 +2616,7 @@@ static int perf_event_period(struct per
        int ret = 0;
        u64 value;
  
 -      if (!event->attr.sample_period)
 +      if (!is_sampling_event(event))
                return -EINVAL;
  
        if (copy_from_user(&value, arg, sizeof(value)))
@@@ -3407,73 -3305,6 +3407,73 @@@ __always_inline void perf_output_copy(s
        } while (len);
  }
  
 +static void __perf_event_header__init_id(struct perf_event_header *header,
 +                                       struct perf_sample_data *data,
 +                                       struct perf_event *event)
 +{
 +      u64 sample_type = event->attr.sample_type;
 +
 +      data->type = sample_type;
 +      header->size += event->id_header_size;
 +
 +      if (sample_type & PERF_SAMPLE_TID) {
 +              /* namespace issues */
 +              data->tid_entry.pid = perf_event_pid(event, current);
 +              data->tid_entry.tid = perf_event_tid(event, current);
 +      }
 +
 +      if (sample_type & PERF_SAMPLE_TIME)
 +              data->time = perf_clock();
 +
 +      if (sample_type & PERF_SAMPLE_ID)
 +              data->id = primary_event_id(event);
 +
 +      if (sample_type & PERF_SAMPLE_STREAM_ID)
 +              data->stream_id = event->id;
 +
 +      if (sample_type & PERF_SAMPLE_CPU) {
 +              data->cpu_entry.cpu      = raw_smp_processor_id();
 +              data->cpu_entry.reserved = 0;
 +      }
 +}
 +
 +static void perf_event_header__init_id(struct perf_event_header *header,
 +                                     struct perf_sample_data *data,
 +                                     struct perf_event *event)
 +{
 +      if (event->attr.sample_id_all)
 +              __perf_event_header__init_id(header, data, event);
 +}
 +
 +static void __perf_event__output_id_sample(struct perf_output_handle *handle,
 +                                         struct perf_sample_data *data)
 +{
 +      u64 sample_type = data->type;
 +
 +      if (sample_type & PERF_SAMPLE_TID)
 +              perf_output_put(handle, data->tid_entry);
 +
 +      if (sample_type & PERF_SAMPLE_TIME)
 +              perf_output_put(handle, data->time);
 +
 +      if (sample_type & PERF_SAMPLE_ID)
 +              perf_output_put(handle, data->id);
 +
 +      if (sample_type & PERF_SAMPLE_STREAM_ID)
 +              perf_output_put(handle, data->stream_id);
 +
 +      if (sample_type & PERF_SAMPLE_CPU)
 +              perf_output_put(handle, data->cpu_entry);
 +}
 +
 +static void perf_event__output_id_sample(struct perf_event *event,
 +                                       struct perf_output_handle *handle,
 +                                       struct perf_sample_data *sample)
 +{
 +      if (event->attr.sample_id_all)
 +              __perf_event__output_id_sample(handle, sample);
 +}
 +
  int perf_output_begin(struct perf_output_handle *handle,
                      struct perf_event *event, unsigned int size,
                      int nmi, int sample)
        struct perf_buffer *buffer;
        unsigned long tail, offset, head;
        int have_lost;
 +      struct perf_sample_data sample_data;
        struct {
                struct perf_event_header header;
                u64                      id;
                goto out;
  
        have_lost = local_read(&buffer->lost);
 -      if (have_lost)
 -              size += sizeof(lost_event);
 +      if (have_lost) {
 +              lost_event.header.size = sizeof(lost_event);
 +              perf_event_header__init_id(&lost_event.header, &sample_data,
 +                                         event);
 +              size += lost_event.header.size;
 +      }
  
        perf_output_get_handle(handle);
  
        if (have_lost) {
                lost_event.header.type = PERF_RECORD_LOST;
                lost_event.header.misc = 0;
 -              lost_event.header.size = sizeof(lost_event);
                lost_event.id          = event->id;
                lost_event.lost        = local_xchg(&buffer->lost, 0);
  
                perf_output_put(handle, lost_event);
 +              perf_event__output_id_sample(event, handle, &sample_data);
        }
  
        return 0;
@@@ -3581,6 -3407,28 +3581,6 @@@ void perf_output_end(struct perf_output
        rcu_read_unlock();
  }
  
 -static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
 -{
 -      /*
 -       * only top level events have the pid namespace they were created in
 -       */
 -      if (event->parent)
 -              event = event->parent;
 -
 -      return task_tgid_nr_ns(p, event->ns);
 -}
 -
 -static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
 -{
 -      /*
 -       * only top level events have the pid namespace they were created in
 -       */
 -      if (event->parent)
 -              event = event->parent;
 -
 -      return task_pid_nr_ns(p, event->ns);
 -}
 -
  static void perf_output_read_one(struct perf_output_handle *handle,
                                 struct perf_event *event,
                                 u64 enabled, u64 running)
@@@ -3755,16 -3603,61 +3755,16 @@@ void perf_prepare_sample(struct perf_ev
  {
        u64 sample_type = event->attr.sample_type;
  
 -      data->type = sample_type;
 -
        header->type = PERF_RECORD_SAMPLE;
 -      header->size = sizeof(*header);
 +      header->size = sizeof(*header) + event->header_size;
  
        header->misc = 0;
        header->misc |= perf_misc_flags(regs);
  
 -      if (sample_type & PERF_SAMPLE_IP) {
 -              data->ip = perf_instruction_pointer(regs);
 -
 -              header->size += sizeof(data->ip);
 -      }
 -
 -      if (sample_type & PERF_SAMPLE_TID) {
 -              /* namespace issues */
 -              data->tid_entry.pid = perf_event_pid(event, current);
 -              data->tid_entry.tid = perf_event_tid(event, current);
 -
 -              header->size += sizeof(data->tid_entry);
 -      }
 -
 -      if (sample_type & PERF_SAMPLE_TIME) {
 -              data->time = perf_clock();
 -
 -              header->size += sizeof(data->time);
 -      }
 -
 -      if (sample_type & PERF_SAMPLE_ADDR)
 -              header->size += sizeof(data->addr);
 -
 -      if (sample_type & PERF_SAMPLE_ID) {
 -              data->id = primary_event_id(event);
 -
 -              header->size += sizeof(data->id);
 -      }
 -
 -      if (sample_type & PERF_SAMPLE_STREAM_ID) {
 -              data->stream_id = event->id;
 -
 -              header->size += sizeof(data->stream_id);
 -      }
 -
 -      if (sample_type & PERF_SAMPLE_CPU) {
 -              data->cpu_entry.cpu             = raw_smp_processor_id();
 -              data->cpu_entry.reserved        = 0;
 -
 -              header->size += sizeof(data->cpu_entry);
 -      }
 +      __perf_event_header__init_id(header, data, event);
  
 -      if (sample_type & PERF_SAMPLE_PERIOD)
 -              header->size += sizeof(data->period);
 -
 -      if (sample_type & PERF_SAMPLE_READ)
 -              header->size += perf_event_read_size(event);
 +      if (sample_type & PERF_SAMPLE_IP)
 +              data->ip = perf_instruction_pointer(regs);
  
        if (sample_type & PERF_SAMPLE_CALLCHAIN) {
                int size = 1;
@@@ -3829,26 -3722,23 +3829,26 @@@ perf_event_read_event(struct perf_even
                        struct task_struct *task)
  {
        struct perf_output_handle handle;
 +      struct perf_sample_data sample;
        struct perf_read_event read_event = {
                .header = {
                        .type = PERF_RECORD_READ,
                        .misc = 0,
 -                      .size = sizeof(read_event) + perf_event_read_size(event),
 +                      .size = sizeof(read_event) + event->read_size,
                },
                .pid = perf_event_pid(event, task),
                .tid = perf_event_tid(event, task),
        };
        int ret;
  
 +      perf_event_header__init_id(&read_event.header, &sample, event);
        ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
        if (ret)
                return;
  
        perf_output_put(&handle, read_event);
        perf_output_read(&handle, event);
 +      perf_event__output_id_sample(event, &handle, &sample);
  
        perf_output_end(&handle);
  }
@@@ -3878,16 -3768,14 +3878,16 @@@ static void perf_event_task_output(stru
                                     struct perf_task_event *task_event)
  {
        struct perf_output_handle handle;
 +      struct perf_sample_data sample;
        struct task_struct *task = task_event->task;
 -      int size, ret;
 +      int ret, size = task_event->event_id.header.size;
  
 -      size  = task_event->event_id.header.size;
 -      ret = perf_output_begin(&handle, event, size, 0, 0);
 +      perf_event_header__init_id(&task_event->event_id.header, &sample, event);
  
 +      ret = perf_output_begin(&handle, event,
 +                              task_event->event_id.header.size, 0, 0);
        if (ret)
 -              return;
 +              goto out;
  
        task_event->event_id.pid = perf_event_pid(event, task);
        task_event->event_id.ppid = perf_event_pid(event, current);
  
        perf_output_put(&handle, task_event->event_id);
  
 +      perf_event__output_id_sample(event, &handle, &sample);
 +
        perf_output_end(&handle);
 +out:
 +      task_event->event_id.header.size = size;
  }
  
  static int perf_event_task_match(struct perf_event *event)
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
  
 -      if (event->cpu != -1 && event->cpu != smp_processor_id())
 +      if (!event_filter_match(event))
                return 0;
  
        if (event->attr.comm || event->attr.mmap ||
@@@ -4016,16 -3900,11 +4016,16 @@@ static void perf_event_comm_output(stru
                                     struct perf_comm_event *comm_event)
  {
        struct perf_output_handle handle;
 +      struct perf_sample_data sample;
        int size = comm_event->event_id.header.size;
 -      int ret = perf_output_begin(&handle, event, size, 0, 0);
 +      int ret;
 +
 +      perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
 +      ret = perf_output_begin(&handle, event,
 +                              comm_event->event_id.header.size, 0, 0);
  
        if (ret)
 -              return;
 +              goto out;
  
        comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
        comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
        perf_output_put(&handle, comm_event->event_id);
        perf_output_copy(&handle, comm_event->comm,
                                   comm_event->comm_size);
 +
 +      perf_event__output_id_sample(event, &handle, &sample);
 +
        perf_output_end(&handle);
 +out:
 +      comm_event->event_id.header.size = size;
  }
  
  static int perf_event_comm_match(struct perf_event *event)
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
  
 -      if (event->cpu != -1 && event->cpu != smp_processor_id())
 +      if (!event_filter_match(event))
                return 0;
  
        if (event->attr.comm)
@@@ -4083,6 -3957,7 +4083,6 @@@ static void perf_event_comm_event(struc
        comm_event->comm_size = size;
  
        comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
 -
        rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
@@@ -4163,15 -4038,11 +4163,15 @@@ static void perf_event_mmap_output(stru
                                     struct perf_mmap_event *mmap_event)
  {
        struct perf_output_handle handle;
 +      struct perf_sample_data sample;
        int size = mmap_event->event_id.header.size;
 -      int ret = perf_output_begin(&handle, event, size, 0, 0);
 +      int ret;
  
 +      perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
 +      ret = perf_output_begin(&handle, event,
 +                              mmap_event->event_id.header.size, 0, 0);
        if (ret)
 -              return;
 +              goto out;
  
        mmap_event->event_id.pid = perf_event_pid(event, current);
        mmap_event->event_id.tid = perf_event_tid(event, current);
        perf_output_put(&handle, mmap_event->event_id);
        perf_output_copy(&handle, mmap_event->file_name,
                                   mmap_event->file_size);
 +
 +      perf_event__output_id_sample(event, &handle, &sample);
 +
        perf_output_end(&handle);
 +out:
 +      mmap_event->event_id.header.size = size;
  }
  
  static int perf_event_mmap_match(struct perf_event *event,
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
  
 -      if (event->cpu != -1 && event->cpu != smp_processor_id())
 +      if (!event_filter_match(event))
                return 0;
  
        if ((!executable && event->attr.mmap_data) ||
@@@ -4339,7 -4205,6 +4339,7 @@@ void perf_event_mmap(struct vm_area_str
  static void perf_log_throttle(struct perf_event *event, int enable)
  {
        struct perf_output_handle handle;
 +      struct perf_sample_data sample;
        int ret;
  
        struct {
        if (enable)
                throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
  
 -      ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
 +      perf_event_header__init_id(&throttle_event.header, &sample, event);
 +
 +      ret = perf_output_begin(&handle, event,
 +                              throttle_event.header.size, 1, 0);
        if (ret)
                return;
  
        perf_output_put(&handle, throttle_event);
 +      perf_event__output_id_sample(event, &handle, &sample);
        perf_output_end(&handle);
  }
  
@@@ -4385,13 -4246,6 +4385,13 @@@ static int __perf_event_overflow(struc
        struct hw_perf_event *hwc = &event->hw;
        int ret = 0;
  
 +      /*
 +       * Non-sampling counters might still use the PMI to fold short
 +       * hardware counters, ignore those.
 +       */
 +      if (unlikely(!is_sampling_event(event)))
 +              return 0;
 +
        if (!throttle) {
                hwc->interrupts++;
        } else {
@@@ -4537,7 -4391,7 +4537,7 @@@ static void perf_swevent_event(struct p
        if (!regs)
                return;
  
 -      if (!hwc->sample_period)
 +      if (!is_sampling_event(event))
                return;
  
        if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
@@@ -4664,7 -4518,7 +4664,7 @@@ int perf_swevent_get_recursion_context(
  }
  EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
  
- void inline perf_swevent_put_recursion_context(int rctx)
+ inline void perf_swevent_put_recursion_context(int rctx)
  {
        struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
  
@@@ -4700,7 -4554,7 +4700,7 @@@ static int perf_swevent_add(struct perf
        struct hw_perf_event *hwc = &event->hw;
        struct hlist_head *head;
  
 -      if (hwc->sample_period) {
 +      if (is_sampling_event(event)) {
                hwc->last_period = hwc->sample_period;
                perf_swevent_set_period(event);
        }
@@@ -4957,6 -4811,15 +4957,6 @@@ static int perf_tp_event_init(struct pe
        if (event->attr.type != PERF_TYPE_TRACEPOINT)
                return -ENOENT;
  
 -      /*
 -       * Raw tracepoint data is a severe data leak, only allow root to
 -       * have these.
 -       */
 -      if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
 -                      perf_paranoid_tracepoint_raw() &&
 -                      !capable(CAP_SYS_ADMIN))
 -              return -EPERM;
 -
        err = perf_trace_init(event);
        if (err)
                return err;
@@@ -4979,7 -4842,7 +4979,7 @@@ static struct pmu perf_tracepoint = 
  
  static inline void perf_tp_register(void)
  {
 -      perf_pmu_register(&perf_tracepoint);
 +      perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
  }
  
  static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@@ -5069,33 -4932,31 +5069,33 @@@ static enum hrtimer_restart perf_sweven
  static void perf_swevent_start_hrtimer(struct perf_event *event)
  {
        struct hw_perf_event *hwc = &event->hw;
 +      s64 period;
 +
 +      if (!is_sampling_event(event))
 +              return;
  
        hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hwc->hrtimer.function = perf_swevent_hrtimer;
 -      if (hwc->sample_period) {
 -              s64 period = local64_read(&hwc->period_left);
  
 -              if (period) {
 -                      if (period < 0)
 -                              period = 10000;
 +      period = local64_read(&hwc->period_left);
 +      if (period) {
 +              if (period < 0)
 +                      period = 10000;
  
 -                      local64_set(&hwc->period_left, 0);
 -              } else {
 -                      period = max_t(u64, 10000, hwc->sample_period);
 -              }
 -              __hrtimer_start_range_ns(&hwc->hrtimer,
 +              local64_set(&hwc->period_left, 0);
 +      } else {
 +              period = max_t(u64, 10000, hwc->sample_period);
 +      }
 +      __hrtimer_start_range_ns(&hwc->hrtimer,
                                ns_to_ktime(period), 0,
                                HRTIMER_MODE_REL_PINNED, 0);
 -      }
  }
  
  static void perf_swevent_cancel_hrtimer(struct perf_event *event)
  {
        struct hw_perf_event *hwc = &event->hw;
  
 -      if (hwc->sample_period) {
 +      if (is_sampling_event(event)) {
                ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
                local64_set(&hwc->period_left, ktime_to_ns(remaining));
  
@@@ -5323,61 -5184,8 +5323,61 @@@ static void free_pmu_context(struct pm
  out:
        mutex_unlock(&pmus_lock);
  }
 +static struct idr pmu_idr;
 +
 +static ssize_t
 +type_show(struct device *dev, struct device_attribute *attr, char *page)
 +{
 +      struct pmu *pmu = dev_get_drvdata(dev);
 +
 +      return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
 +}
 +
 +static struct device_attribute pmu_dev_attrs[] = {
 +       __ATTR_RO(type),
 +       __ATTR_NULL,
 +};
 +
 +static int pmu_bus_running;
 +static struct bus_type pmu_bus = {
 +      .name           = "event_source",
 +      .dev_attrs      = pmu_dev_attrs,
 +};
 +
 +static void pmu_dev_release(struct device *dev)
 +{
 +      kfree(dev);
 +}
 +
 +static int pmu_dev_alloc(struct pmu *pmu)
 +{
 +      int ret = -ENOMEM;
 +
 +      pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
 +      if (!pmu->dev)
 +              goto out;
 +
 +      device_initialize(pmu->dev);
 +      ret = dev_set_name(pmu->dev, "%s", pmu->name);
 +      if (ret)
 +              goto free_dev;
 +
 +      dev_set_drvdata(pmu->dev, pmu);
 +      pmu->dev->bus = &pmu_bus;
 +      pmu->dev->release = pmu_dev_release;
 +      ret = device_add(pmu->dev);
 +      if (ret)
 +              goto free_dev;
 +
 +out:
 +      return ret;
 +
 +free_dev:
 +      put_device(pmu->dev);
 +      goto out;
 +}
  
 -int perf_pmu_register(struct pmu *pmu)
 +int perf_pmu_register(struct pmu *pmu, char *name, int type)
  {
        int cpu, ret;
  
        if (!pmu->pmu_disable_count)
                goto unlock;
  
 +      pmu->type = -1;
 +      if (!name)
 +              goto skip_type;
 +      pmu->name = name;
 +
 +      if (type < 0) {
 +              int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
 +              if (!err)
 +                      goto free_pdc;
 +
 +              err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
 +              if (err) {
 +                      ret = err;
 +                      goto free_pdc;
 +              }
 +      }
 +      pmu->type = type;
 +
 +      if (pmu_bus_running) {
 +              ret = pmu_dev_alloc(pmu);
 +              if (ret)
 +                      goto free_idr;
 +      }
 +
 +skip_type:
        pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
        if (pmu->pmu_cpu_context)
                goto got_cpu_context;
  
        pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
        if (!pmu->pmu_cpu_context)
 -              goto free_pdc;
 +              goto free_dev;
  
        for_each_possible_cpu(cpu) {
                struct perf_cpu_context *cpuctx;
@@@ -5462,14 -5245,6 +5462,14 @@@ unlock
  
        return ret;
  
 +free_dev:
 +      device_del(pmu->dev);
 +      put_device(pmu->dev);
 +
 +free_idr:
 +      if (pmu->type >= PERF_TYPE_MAX)
 +              idr_remove(&pmu_idr, pmu->type);
 +
  free_pdc:
        free_percpu(pmu->pmu_disable_count);
        goto unlock;
@@@ -5489,10 -5264,6 +5489,10 @@@ void perf_pmu_unregister(struct pmu *pm
        synchronize_rcu();
  
        free_percpu(pmu->pmu_disable_count);
 +      if (pmu->type >= PERF_TYPE_MAX)
 +              idr_remove(&pmu_idr, pmu->type);
 +      device_del(pmu->dev);
 +      put_device(pmu->dev);
        free_pmu_context(pmu);
  }
  
@@@ -5502,13 -5273,6 +5502,13 @@@ struct pmu *perf_init_event(struct perf
        int idx;
  
        idx = srcu_read_lock(&pmus_srcu);
 +
 +      rcu_read_lock();
 +      pmu = idr_find(&pmu_idr, event->attr.type);
 +      rcu_read_unlock();
 +      if (pmu)
 +              goto unlock;
 +
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                int ret = pmu->event_init(event);
                if (!ret)
@@@ -5974,12 -5738,6 +5974,12 @@@ SYSCALL_DEFINE5(perf_event_open
        mutex_unlock(&current->perf_event_mutex);
  
        /*
 +       * Precalculate sample_data sizes
 +       */
 +      perf_event__header_size(event);
 +      perf_event__id_header_size(event);
 +
 +      /*
         * Drop the reference on the group_event after placing the
         * new event on the sibling_list. This ensures destruction
         * of the group leader will find the pointer to itself in
@@@ -6332,12 -6090,6 +6332,12 @@@ inherit_event(struct perf_event *parent
        child_event->overflow_handler = parent_event->overflow_handler;
  
        /*
 +       * Precalculate sample_data sizes
 +       */
 +      perf_event__header_size(child_event);
 +      perf_event__id_header_size(child_event);
 +
 +      /*
         * Link it up in the child's context:
         */
        raw_spin_lock_irqsave(&child_ctx->lock, flags);
@@@ -6568,7 -6320,7 +6568,7 @@@ static void __cpuinit perf_event_init_c
        mutex_unlock(&swhash->hlist_mutex);
  }
  
 -#ifdef CONFIG_HOTPLUG_CPU
 +#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
  static void perf_pmu_rotate_stop(struct pmu *pmu)
  {
        struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
@@@ -6622,26 -6374,6 +6622,26 @@@ static void perf_event_exit_cpu(int cpu
  static inline void perf_event_exit_cpu(int cpu) { }
  #endif
  
 +static int
 +perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
 +{
 +      int cpu;
 +
 +      for_each_online_cpu(cpu)
 +              perf_event_exit_cpu(cpu);
 +
 +      return NOTIFY_OK;
 +}
 +
 +/*
 + * Run the perf reboot notifier at the very last possible moment so that
 + * the generic watchdog code runs as long as possible.
 + */
 +static struct notifier_block perf_reboot_notifier = {
 +      .notifier_call = perf_reboot,
 +      .priority = INT_MIN,
 +};
 +
  static int __cpuinit
  perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
  {
@@@ -6670,45 -6402,14 +6670,45 @@@ void __init perf_event_init(void
  {
        int ret;
  
 +      idr_init(&pmu_idr);
 +
        perf_event_init_all_cpus();
        init_srcu_struct(&pmus_srcu);
 -      perf_pmu_register(&perf_swevent);
 -      perf_pmu_register(&perf_cpu_clock);
 -      perf_pmu_register(&perf_task_clock);
 +      perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
 +      perf_pmu_register(&perf_cpu_clock, NULL, -1);
 +      perf_pmu_register(&perf_task_clock, NULL, -1);
        perf_tp_register();
        perf_cpu_notifier(perf_cpu_notify);
 +      register_reboot_notifier(&perf_reboot_notifier);
  
        ret = init_hw_breakpoint();
        WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
  }
 +
 +static int __init perf_event_sysfs_init(void)
 +{
 +      struct pmu *pmu;
 +      int ret;
 +
 +      mutex_lock(&pmus_lock);
 +
 +      ret = bus_register(&pmu_bus);
 +      if (ret)
 +              goto unlock;
 +
 +      list_for_each_entry(pmu, &pmus, entry) {
 +              if (!pmu->name || pmu->type < 0)
 +                      continue;
 +
 +              ret = pmu_dev_alloc(pmu);
 +              WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
 +      }
 +      pmu_bus_running = 1;
 +      ret = 0;
 +
 +unlock:
 +      mutex_unlock(&pmus_lock);
 +
 +      return ret;
 +}
 +device_initcall(perf_event_sysfs_init);
diff --combined kernel/power/hibernate.c
@@@ -51,18 -51,18 +51,18 @@@ enum 
  
  static int hibernation_mode = HIBERNATION_SHUTDOWN;
  
- static struct platform_hibernation_ops *hibernation_ops;
+ static const struct platform_hibernation_ops *hibernation_ops;
  
  /**
   * hibernation_set_ops - set the global hibernate operations
   * @ops: the hibernation operations to use in subsequent hibernation transitions
   */
  
- void hibernation_set_ops(struct platform_hibernation_ops *ops)
+ void hibernation_set_ops(const struct platform_hibernation_ops *ops)
  {
        if (ops && !(ops->begin && ops->end &&  ops->pre_snapshot
            && ops->prepare && ops->finish && ops->enter && ops->pre_restore
 -          && ops->restore_cleanup)) {
 +          && ops->restore_cleanup && ops->leave)) {
                WARN_ON(1);
                return;
        }
@@@ -278,7 -278,7 +278,7 @@@ static int create_image(int platform_mo
                goto Enable_irqs;
        }
  
 -      if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events())
 +      if (hibernation_test(TEST_CORE) || pm_wakeup_pending())
                goto Power_up;
  
        in_suspend = 1;
@@@ -516,7 -516,7 +516,7 @@@ int hibernation_platform_enter(void
  
        local_irq_disable();
        sysdev_suspend(PMSG_HIBERNATE);
 -      if (!pm_check_wakeup_events()) {
 +      if (pm_wakeup_pending()) {
                error = -EAGAIN;
                goto Power_up;
        }
@@@ -647,7 -647,6 +647,7 @@@ int hibernate(void
                swsusp_free();
                if (!error)
                        power_down();
 +              in_suspend = 0;
                pm_restore_gfp_mask();
        } else {
                pr_debug("PM: Image restored successfully.\n");
diff --combined kernel/power/suspend.c
@@@ -22,7 -22,6 +22,7 @@@
  #include <linux/mm.h>
  #include <linux/slab.h>
  #include <linux/suspend.h>
 +#include <trace/events/power.h>
  
  #include "power.h"
  
@@@ -31,13 -30,13 +31,13 @@@ const char *const pm_states[PM_SUSPEND_
        [PM_SUSPEND_MEM]        = "mem",
  };
  
- static struct platform_suspend_ops *suspend_ops;
+ static const struct platform_suspend_ops *suspend_ops;
  
  /**
   *    suspend_set_ops - Set the global suspend method table.
   *    @ops:   Pointer to ops structure.
   */
- void suspend_set_ops(struct platform_suspend_ops *ops)
+ void suspend_set_ops(const struct platform_suspend_ops *ops)
  {
        mutex_lock(&pm_mutex);
        suspend_ops = ops;
@@@ -164,7 -163,7 +164,7 @@@ static int suspend_enter(suspend_state_
  
        error = sysdev_suspend(PMSG_SUSPEND);
        if (!error) {
 -              if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) {
 +              if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
                        error = suspend_ops->enter(state);
                        events_check_enabled = false;
                }
@@@ -202,7 -201,6 +202,7 @@@ int suspend_devices_and_enter(suspend_s
        if (!suspend_ops)
                return -ENOSYS;
  
 +      trace_machine_suspend(state);
        if (suspend_ops->begin) {
                error = suspend_ops->begin(state);
                if (error)
   Close:
        if (suspend_ops->end)
                suspend_ops->end();
 +      trace_machine_suspend(PWR_EVENT_EXIT);
        return error;
  
   Recover_platform:
diff --combined kernel/sched.c
  
  #include <asm/tlb.h>
  #include <asm/irq_regs.h>
 +#include <asm/mutex.h>
  
  #include "sched_cpupri.h"
  #include "workqueue_sched.h"
 +#include "sched_autogroup.h"
  
  #define CREATE_TRACE_POINTS
  #include <trace/events/sched.h>
@@@ -255,8 -253,6 +255,8 @@@ struct task_group 
        /* runqueue "owned" by this group on each cpu */
        struct cfs_rq **cfs_rq;
        unsigned long shares;
 +
 +      atomic_t load_weight;
  #endif
  
  #ifdef CONFIG_RT_GROUP_SCHED
        struct task_group *parent;
        struct list_head siblings;
        struct list_head children;
 -};
  
 -#define root_task_group init_task_group
 +#ifdef CONFIG_SCHED_AUTOGROUP
 +      struct autogroup *autogroup;
 +#endif
 +};
  
 -/* task_group_lock serializes add/remove of task groups and also changes to
 - * a task group's cpu shares.
 - */
 +/* task_group_lock serializes the addition/removal of task groups */
  static DEFINE_SPINLOCK(task_group_lock);
  
  #ifdef CONFIG_FAIR_GROUP_SCHED
  
 -#ifdef CONFIG_SMP
 -static int root_task_group_empty(void)
 -{
 -      return list_empty(&root_task_group.children);
 -}
 -#endif
 -
 -# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
 +# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
  
  /*
   * A weight of 0 or 1 can cause arithmetics problems.
  #define MIN_SHARES    2
  #define MAX_SHARES    (1UL << 18)
  
 -static int init_task_group_load = INIT_TASK_GROUP_LOAD;
 +static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
  #endif
  
  /* Default task group.
   *    Every task in system belong to this group at bootup.
   */
 -struct task_group init_task_group;
 +struct task_group root_task_group;
  
  #endif        /* CONFIG_CGROUP_SCHED */
  
@@@ -339,7 -342,6 +339,7 @@@ struct cfs_rq 
         * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
         * list is used during load balance.
         */
 +      int on_list;
        struct list_head leaf_cfs_rq_list;
        struct task_group *tg;  /* group that "owns" this runqueue */
  
        unsigned long h_load;
  
        /*
 -       * this cpu's part of tg->shares
 +       * Maintaining per-cpu shares distribution for group scheduling
 +       *
 +       * load_stamp is the last time we updated the load average
 +       * load_last is the last time we updated the load average and saw load
 +       * load_unacc_exec_time is currently unaccounted execution time
         */
 -      unsigned long shares;
 +      u64 load_avg;
 +      u64 load_period;
 +      u64 load_stamp, load_last, load_unacc_exec_time;
  
 -      /*
 -       * load.weight at the time we set shares
 -       */
 -      unsigned long rq_weight;
 +      unsigned long load_contribution;
  #endif
  #endif
  };
@@@ -606,14 -605,11 +606,14 @@@ static inline int cpu_of(struct rq *rq
   */
  static inline struct task_group *task_group(struct task_struct *p)
  {
 +      struct task_group *tg;
        struct cgroup_subsys_state *css;
  
        css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
                        lockdep_is_held(&task_rq(p)->lock));
 -      return container_of(css, struct task_group, css);
 +      tg = container_of(css, struct task_group, css);
 +
 +      return autogroup_task_group(p, tg);
  }
  
  /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
@@@ -741,7 -737,7 +741,7 @@@ sched_feat_write(struct file *filp, con
        buf[cnt] = 0;
        cmp = strstrip(buf);
  
 -      if (strncmp(buf, "NO_", 3) == 0) {
 +      if (strncmp(cmp, "NO_", 3) == 0) {
                neg = 1;
                cmp += 3;
        }
@@@ -797,6 -793,20 +797,6 @@@ late_initcall(sched_init_debug)
  const_debug unsigned int sysctl_sched_nr_migrate = 32;
  
  /*
 - * ratelimit for updating the group shares.
 - * default: 0.25ms
 - */
 -unsigned int sysctl_sched_shares_ratelimit = 250000;
 -unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
 -
 -/*
 - * Inject some fuzzyness into changing the per-cpu group shares
 - * this avoids remote rq-locks at the expense of fairness.
 - * default: 4
 - */
 -unsigned int sysctl_sched_shares_thresh = 4;
 -
 -/*
   * period over which we average the RT time consumption, measured
   * in ms.
   *
@@@ -1345,12 -1355,6 +1345,12 @@@ static inline void update_load_sub(stru
        lw->inv_weight = 0;
  }
  
 +static inline void update_load_set(struct load_weight *lw, unsigned long w)
 +{
 +      lw->weight = w;
 +      lw->inv_weight = 0;
 +}
 +
  /*
   * To aid in avoiding the subversion of "niceness" due to uneven distribution
   * of tasks with abnormal "nice" values across CPUs the contribution that
@@@ -1539,6 -1543,101 +1539,6 @@@ static unsigned long cpu_avg_load_per_t
  
  #ifdef CONFIG_FAIR_GROUP_SCHED
  
 -static __read_mostly unsigned long __percpu *update_shares_data;
 -
 -static void __set_se_shares(struct sched_entity *se, unsigned long shares);
 -
 -/*
 - * Calculate and set the cpu's group shares.
 - */
 -static void update_group_shares_cpu(struct task_group *tg, int cpu,
 -                                  unsigned long sd_shares,
 -                                  unsigned long sd_rq_weight,
 -                                  unsigned long *usd_rq_weight)
 -{
 -      unsigned long shares, rq_weight;
 -      int boost = 0;
 -
 -      rq_weight = usd_rq_weight[cpu];
 -      if (!rq_weight) {
 -              boost = 1;
 -              rq_weight = NICE_0_LOAD;
 -      }
 -
 -      /*
 -       *             \Sum_j shares_j * rq_weight_i
 -       * shares_i =  -----------------------------
 -       *                  \Sum_j rq_weight_j
 -       */
 -      shares = (sd_shares * rq_weight) / sd_rq_weight;
 -      shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
 -
 -      if (abs(shares - tg->se[cpu]->load.weight) >
 -                      sysctl_sched_shares_thresh) {
 -              struct rq *rq = cpu_rq(cpu);
 -              unsigned long flags;
 -
 -              raw_spin_lock_irqsave(&rq->lock, flags);
 -              tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
 -              tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
 -              __set_se_shares(tg->se[cpu], shares);
 -              raw_spin_unlock_irqrestore(&rq->lock, flags);
 -      }
 -}
 -
 -/*
 - * Re-compute the task group their per cpu shares over the given domain.
 - * This needs to be done in a bottom-up fashion because the rq weight of a
 - * parent group depends on the shares of its child groups.
 - */
 -static int tg_shares_up(struct task_group *tg, void *data)
 -{
 -      unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
 -      unsigned long *usd_rq_weight;
 -      struct sched_domain *sd = data;
 -      unsigned long flags;
 -      int i;
 -
 -      if (!tg->se[0])
 -              return 0;
 -
 -      local_irq_save(flags);
 -      usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
 -
 -      for_each_cpu(i, sched_domain_span(sd)) {
 -              weight = tg->cfs_rq[i]->load.weight;
 -              usd_rq_weight[i] = weight;
 -
 -              rq_weight += weight;
 -              /*
 -               * If there are currently no tasks on the cpu pretend there
 -               * is one of average load so that when a new task gets to
 -               * run here it will not get delayed by group starvation.
 -               */
 -              if (!weight)
 -                      weight = NICE_0_LOAD;
 -
 -              sum_weight += weight;
 -              shares += tg->cfs_rq[i]->shares;
 -      }
 -
 -      if (!rq_weight)
 -              rq_weight = sum_weight;
 -
 -      if ((!shares && rq_weight) || shares > tg->shares)
 -              shares = tg->shares;
 -
 -      if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
 -              shares = tg->shares;
 -
 -      for_each_cpu(i, sched_domain_span(sd))
 -              update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
 -
 -      local_irq_restore(flags);
 -
 -      return 0;
 -}
 -
  /*
   * Compute the cpu's hierarchical load factor for each task group.
   * This needs to be done in a top-down fashion because the load of a child
@@@ -1553,7 -1652,7 +1553,7 @@@ static int tg_load_down(struct task_gro
                load = cpu_rq(cpu)->load.weight;
        } else {
                load = tg->parent->cfs_rq[cpu]->h_load;
 -              load *= tg->cfs_rq[cpu]->shares;
 +              load *= tg->se[cpu]->load.weight;
                load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
        }
  
        return 0;
  }
  
 -static void update_shares(struct sched_domain *sd)
 -{
 -      s64 elapsed;
 -      u64 now;
 -
 -      if (root_task_group_empty())
 -              return;
 -
 -      now = local_clock();
 -      elapsed = now - sd->last_update;
 -
 -      if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
 -              sd->last_update = now;
 -              walk_tg_tree(tg_nop, tg_shares_up, sd);
 -      }
 -}
 -
  static void update_h_load(long cpu)
  {
        walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
  }
  
 -#else
 -
 -static inline void update_shares(struct sched_domain *sd)
 -{
 -}
 -
  #endif
  
  #ifdef CONFIG_PREEMPT
@@@ -1688,6 -1810,15 +1688,6 @@@ static void double_rq_unlock(struct rq 
  
  #endif
  
 -#ifdef CONFIG_FAIR_GROUP_SCHED
 -static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
 -{
 -#ifdef CONFIG_SMP
 -      cfs_rq->shares = shares;
 -#endif
 -}
 -#endif
 -
  static void calc_load_account_idle(struct rq *this_rq);
  static void update_sysctl(void);
  static int get_update_sysctl_factor(void);
@@@ -1932,7 -2063,6 +1932,7 @@@ static void update_rq_clock_task(struc
  #include "sched_idletask.c"
  #include "sched_fair.c"
  #include "sched_rt.c"
 +#include "sched_autogroup.c"
  #include "sched_stoptask.c"
  #ifdef CONFIG_SCHED_DEBUG
  # include "sched_debug.c"
@@@ -2125,8 -2255,10 +2125,8 @@@ static int migration_cpu_stop(void *dat
   * The task's runqueue lock must be held.
   * Returns true if you have to wait for migration thread.
   */
 -static bool migrate_task(struct task_struct *p, int dest_cpu)
 +static bool migrate_task(struct task_struct *p, struct rq *rq)
  {
 -      struct rq *rq = task_rq(p);
 -
        /*
         * If the task is not on a runqueue (and not running), then
         * the next wake-up will properly place the task.
@@@ -2306,15 -2438,18 +2306,15 @@@ static int select_fallback_rq(int cpu, 
                return dest_cpu;
  
        /* No more Mr. Nice Guy. */
 -      if (unlikely(dest_cpu >= nr_cpu_ids)) {
 -              dest_cpu = cpuset_cpus_allowed_fallback(p);
 -              /*
 -               * Don't tell them about moving exiting tasks or
 -               * kernel threads (both mm NULL), since they never
 -               * leave kernel.
 -               */
 -              if (p->mm && printk_ratelimit()) {
 -                      printk(KERN_INFO "process %d (%s) no "
 -                             "longer affine to cpu%d\n",
 -                             task_pid_nr(p), p->comm, cpu);
 -              }
 +      dest_cpu = cpuset_cpus_allowed_fallback(p);
 +      /*
 +       * Don't tell them about moving exiting tasks or
 +       * kernel threads (both mm NULL), since they never
 +       * leave kernel.
 +       */
 +      if (p->mm && printk_ratelimit()) {
 +              printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
 +                              task_pid_nr(p), p->comm, cpu);
        }
  
        return dest_cpu;
@@@ -2505,7 -2640,7 +2505,7 @@@ out
   * try_to_wake_up_local - try to wake up a local task with rq lock held
   * @p: the thread to be awakened
   *
-  * Put @p on the run-queue if it's not alredy there.  The caller must
+  * Put @p on the run-queue if it's not already there.  The caller must
   * ensure that this_rq() is locked, @p is bound to this_rq() and not
   * the current task.  this_rq() stays locked over invocation.
   */
@@@ -2650,9 -2785,7 +2650,9 @@@ void sched_fork(struct task_struct *p, 
        /* Want to start with kernel preemption disabled. */
        task_thread_info(p)->preempt_count = 1;
  #endif
 +#ifdef CONFIG_SMP
        plist_node_init(&p->pushable_tasks, MAX_PRIO);
 +#endif
  
        put_cpu();
  }
@@@ -3416,7 -3549,7 +3416,7 @@@ void sched_exec(void
         * select_task_rq() can race against ->cpus_allowed
         */
        if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
 -          likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) {
 +          likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) {
                struct migration_arg arg = { p, dest_cpu };
  
                task_rq_unlock(rq, &flags);
@@@ -4081,7 -4214,7 +4081,7 @@@ int mutex_spin_on_owner(struct mutex *l
                if (task_thread_info(rq->curr) != owner || need_resched())
                        return 0;
  
 -              cpu_relax();
 +              arch_mutex_cpu_relax();
        }
  
        return 1;
@@@ -4393,7 -4526,7 +4393,7 @@@ EXPORT_SYMBOL(wait_for_completion_inter
   * This waits for either a completion of a specific task to be signaled or for a
   * specified timeout to expire. It is interruptible. The timeout is in jiffies.
   */
 -unsigned long __sched
 +long __sched
  wait_for_completion_interruptible_timeout(struct completion *x,
                                          unsigned long timeout)
  {
@@@ -4426,7 -4559,7 +4426,7 @@@ EXPORT_SYMBOL(wait_for_completion_killa
   * signaled or for a specified timeout to expire. It can be
   * interrupted by a kill signal. The timeout is in jiffies.
   */
 -unsigned long __sched
 +long __sched
  wait_for_completion_killable_timeout(struct completion *x,
                                     unsigned long timeout)
  {
@@@ -4768,7 -4901,7 +4768,7 @@@ static bool check_same_owner(struct tas
  }
  
  static int __sched_setscheduler(struct task_struct *p, int policy,
 -                              struct sched_param *param, bool user)
 +                              const struct sched_param *param, bool user)
  {
        int retval, oldprio, oldpolicy = -1, on_rq, running;
        unsigned long flags;
@@@ -4923,7 -5056,7 +4923,7 @@@ recheck
   * NOTE that the task may be already dead.
   */
  int sched_setscheduler(struct task_struct *p, int policy,
 -                     struct sched_param *param)
 +                     const struct sched_param *param)
  {
        return __sched_setscheduler(p, policy, param, true);
  }
@@@ -4941,7 -5074,7 +4941,7 @@@ EXPORT_SYMBOL_GPL(sched_setscheduler)
   * but our caller might not have that capability.
   */
  int sched_setscheduler_nocheck(struct task_struct *p, int policy,
 -                             struct sched_param *param)
 +                             const struct sched_param *param)
  {
        return __sched_setscheduler(p, policy, param, false);
  }
@@@ -5457,7 -5590,7 +5457,7 @@@ void sched_show_task(struct task_struc
        unsigned state;
  
        state = p->state ? __ffs(p->state) + 1 : 0;
 -      printk(KERN_INFO "%-13.13s %c", p->comm,
 +      printk(KERN_INFO "%-15.15s %c", p->comm,
                state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
  #if BITS_PER_LONG == 32
        if (state == TASK_RUNNING)
@@@ -5621,6 -5754,7 +5621,6 @@@ static void update_sysctl(void
        SET_SYSCTL(sched_min_granularity);
        SET_SYSCTL(sched_latency);
        SET_SYSCTL(sched_wakeup_granularity);
 -      SET_SYSCTL(sched_shares_ratelimit);
  #undef SET_SYSCTL
  }
  
@@@ -5696,7 -5830,7 +5696,7 @@@ again
                goto out;
  
        dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
 -      if (migrate_task(p, dest_cpu)) {
 +      if (migrate_task(p, rq)) {
                struct migration_arg arg = { p, dest_cpu };
                /* Need help from migration thread: drop lock and wait. */
                task_rq_unlock(rq, &flags);
@@@ -5778,20 -5912,29 +5778,20 @@@ static int migration_cpu_stop(void *dat
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
 +
  /*
 - * Figure out where task on dead CPU should go, use force if necessary.
 + * Ensures that the idle task is using init_mm right before its cpu goes
 + * offline.
   */
 -void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
 +void idle_task_exit(void)
  {
 -      struct rq *rq = cpu_rq(dead_cpu);
 -      int needs_cpu, uninitialized_var(dest_cpu);
 -      unsigned long flags;
 +      struct mm_struct *mm = current->active_mm;
  
 -      local_irq_save(flags);
 +      BUG_ON(cpu_online(smp_processor_id()));
  
 -      raw_spin_lock(&rq->lock);
 -      needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING);
 -      if (needs_cpu)
 -              dest_cpu = select_fallback_rq(dead_cpu, p);
 -      raw_spin_unlock(&rq->lock);
 -      /*
 -       * It can only fail if we race with set_cpus_allowed(),
 -       * in the racer should migrate the task anyway.
 -       */
 -      if (needs_cpu)
 -              __migrate_task(p, dead_cpu, dest_cpu);
 -      local_irq_restore(flags);
 +      if (mm != &init_mm)
 +              switch_mm(mm, &init_mm, current);
 +      mmdrop(mm);
  }
  
  /*
  static void migrate_nr_uninterruptible(struct rq *rq_src)
  {
        struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
 -      unsigned long flags;
  
 -      local_irq_save(flags);
 -      double_rq_lock(rq_src, rq_dest);
        rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
        rq_src->nr_uninterruptible = 0;
  }
  
  /*
 - * Schedules idle task to be the next runnable task on current CPU.
 - * It does so by boosting its priority to highest possible.
 - * Used by CPU offline code.
 + * remove the tasks which were accounted by rq from calc_load_tasks.
   */
 -void sched_idle_next(void)
 +static void calc_global_load_remove(struct rq *rq)
  {
 -      int this_cpu = smp_processor_id();
 -      struct rq *rq = cpu_rq(this_cpu);
 -      struct task_struct *p = rq->idle;
 -      unsigned long flags;
 -
 -      /* cpu has to be offline */
 -      BUG_ON(cpu_online(this_cpu));
 -
 -      /*
 -       * Strictly not necessary since rest of the CPUs are stopped by now
 -       * and interrupts disabled on the current cpu.
 -       */
 -      raw_spin_lock_irqsave(&rq->lock, flags);
 -
 -      __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
 -
 -      activate_task(rq, p, 0);
 -
 -      raw_spin_unlock_irqrestore(&rq->lock, flags);
 +      atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
 +      rq->calc_load_active = 0;
  }
  
  /*
 - * Ensures that the idle task is using init_mm right before its cpu goes
 - * offline.
 + * Migrate all tasks from the rq, sleeping tasks will be migrated by
 + * try_to_wake_up()->select_task_rq().
 + *
 + * Called with rq->lock held even though we'er in stop_machine() and
 + * there's no concurrency possible, we hold the required locks anyway
 + * because of lock validation efforts.
   */
 -void idle_task_exit(void)
 -{
 -      struct mm_struct *mm = current->active_mm;
 -
 -      BUG_ON(cpu_online(smp_processor_id()));
 -
 -      if (mm != &init_mm)
 -              switch_mm(mm, &init_mm, current);
 -      mmdrop(mm);
 -}
 -
 -/* called under rq->lock with disabled interrupts */
 -static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
 +static void migrate_tasks(unsigned int dead_cpu)
  {
        struct rq *rq = cpu_rq(dead_cpu);
 -
 -      /* Must be exiting, otherwise would be on tasklist. */
 -      BUG_ON(!p->exit_state);
 -
 -      /* Cannot have done final schedule yet: would have vanished. */
 -      BUG_ON(p->state == TASK_DEAD);
 -
 -      get_task_struct(p);
 +      struct task_struct *next, *stop = rq->stop;
 +      int dest_cpu;
  
        /*
 -       * Drop lock around migration; if someone else moves it,
 -       * that's OK. No task can be added to this CPU, so iteration is
 -       * fine.
 +       * Fudge the rq selection such that the below task selection loop
 +       * doesn't get stuck on the currently eligible stop task.
 +       *
 +       * We're currently inside stop_machine() and the rq is either stuck
 +       * in the stop_machine_cpu_stop() loop, or we're executing this code,
 +       * either way we should never end up calling schedule() until we're
 +       * done here.
         */
 -      raw_spin_unlock_irq(&rq->lock);
 -      move_task_off_dead_cpu(dead_cpu, p);
 -      raw_spin_lock_irq(&rq->lock);
 -
 -      put_task_struct(p);
 -}
 -
 -/* release_task() removes task from tasklist, so we won't find dead tasks. */
 -static void migrate_dead_tasks(unsigned int dead_cpu)
 -{
 -      struct rq *rq = cpu_rq(dead_cpu);
 -      struct task_struct *next;
 +      rq->stop = NULL;
  
        for ( ; ; ) {
 -              if (!rq->nr_running)
 +              /*
 +               * There's this thread running, bail when that's the only
 +               * remaining thread.
 +               */
 +              if (rq->nr_running == 1)
                        break;
 +
                next = pick_next_task(rq);
 -              if (!next)
 -                      break;
 +              BUG_ON(!next);
                next->sched_class->put_prev_task(rq, next);
 -              migrate_dead(dead_cpu, next);
  
 +              /* Find suitable destination for @next, with force if needed. */
 +              dest_cpu = select_fallback_rq(dead_cpu, next);
 +              raw_spin_unlock(&rq->lock);
 +
 +              __migrate_task(next, dead_cpu, dest_cpu);
 +
 +              raw_spin_lock(&rq->lock);
        }
 -}
  
 -/*
 - * remove the tasks which were accounted by rq from calc_load_tasks.
 - */
 -static void calc_global_load_remove(struct rq *rq)
 -{
 -      atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
 -      rq->calc_load_active = 0;
 +      rq->stop = stop;
  }
 +
  #endif /* CONFIG_HOTPLUG_CPU */
  
  #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@@ -6076,13 -6278,15 +6076,13 @@@ migration_call(struct notifier_block *n
        unsigned long flags;
        struct rq *rq = cpu_rq(cpu);
  
 -      switch (action) {
 +      switch (action & ~CPU_TASKS_FROZEN) {
  
        case CPU_UP_PREPARE:
 -      case CPU_UP_PREPARE_FROZEN:
                rq->calc_load_update = calc_load_update;
                break;
  
        case CPU_ONLINE:
 -      case CPU_ONLINE_FROZEN:
                /* Update our root-domain */
                raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                break;
  
  #ifdef CONFIG_HOTPLUG_CPU
 -      case CPU_DEAD:
 -      case CPU_DEAD_FROZEN:
 -              migrate_live_tasks(cpu);
 -              /* Idle task back to normal (off runqueue, low prio) */
 -              raw_spin_lock_irq(&rq->lock);
 -              deactivate_task(rq, rq->idle, 0);
 -              __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
 -              rq->idle->sched_class = &idle_sched_class;
 -              migrate_dead_tasks(cpu);
 -              raw_spin_unlock_irq(&rq->lock);
 -              migrate_nr_uninterruptible(rq);
 -              BUG_ON(rq->nr_running != 0);
 -              calc_global_load_remove(rq);
 -              break;
 -
        case CPU_DYING:
 -      case CPU_DYING_FROZEN:
                /* Update our root-domain */
                raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
                        set_rq_offline(rq);
                }
 +              migrate_tasks(cpu);
 +              BUG_ON(rq->nr_running != 1); /* the migration thread */
                raw_spin_unlock_irqrestore(&rq->lock, flags);
 +
 +              migrate_nr_uninterruptible(rq);
 +              calc_global_load_remove(rq);
                break;
  #endif
        }
@@@ -7837,16 -8052,18 +7837,16 @@@ static void init_rt_rq(struct rt_rq *rt
  
  #ifdef CONFIG_FAIR_GROUP_SCHED
  static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 -                              struct sched_entity *se, int cpu, int add,
 +                              struct sched_entity *se, int cpu,
                                struct sched_entity *parent)
  {
        struct rq *rq = cpu_rq(cpu);
        tg->cfs_rq[cpu] = cfs_rq;
        init_cfs_rq(cfs_rq, rq);
        cfs_rq->tg = tg;
 -      if (add)
 -              list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
  
        tg->se[cpu] = se;
 -      /* se could be NULL for init_task_group */
 +      /* se could be NULL for root_task_group */
        if (!se)
                return;
  
                se->cfs_rq = parent->my_q;
  
        se->my_q = cfs_rq;
 -      se->load.weight = tg->shares;
 -      se->load.inv_weight = 0;
 +      update_load_set(&se->load, 0);
        se->parent = parent;
  }
  #endif
  
  #ifdef CONFIG_RT_GROUP_SCHED
  static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 -              struct sched_rt_entity *rt_se, int cpu, int add,
 +              struct sched_rt_entity *rt_se, int cpu,
                struct sched_rt_entity *parent)
  {
        struct rq *rq = cpu_rq(cpu);
        init_rt_rq(rt_rq, rq);
        rt_rq->tg = tg;
        rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
 -      if (add)
 -              list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
  
        tg->rt_se[cpu] = rt_se;
        if (!rt_se)
@@@ -7906,18 -8126,18 +7906,18 @@@ void __init sched_init(void
                ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
  
  #ifdef CONFIG_FAIR_GROUP_SCHED
 -              init_task_group.se = (struct sched_entity **)ptr;
 +              root_task_group.se = (struct sched_entity **)ptr;
                ptr += nr_cpu_ids * sizeof(void **);
  
 -              init_task_group.cfs_rq = (struct cfs_rq **)ptr;
 +              root_task_group.cfs_rq = (struct cfs_rq **)ptr;
                ptr += nr_cpu_ids * sizeof(void **);
  
  #endif /* CONFIG_FAIR_GROUP_SCHED */
  #ifdef CONFIG_RT_GROUP_SCHED
 -              init_task_group.rt_se = (struct sched_rt_entity **)ptr;
 +              root_task_group.rt_se = (struct sched_rt_entity **)ptr;
                ptr += nr_cpu_ids * sizeof(void **);
  
 -              init_task_group.rt_rq = (struct rt_rq **)ptr;
 +              root_task_group.rt_rq = (struct rt_rq **)ptr;
                ptr += nr_cpu_ids * sizeof(void **);
  
  #endif /* CONFIG_RT_GROUP_SCHED */
                        global_rt_period(), global_rt_runtime());
  
  #ifdef CONFIG_RT_GROUP_SCHED
 -      init_rt_bandwidth(&init_task_group.rt_bandwidth,
 +      init_rt_bandwidth(&root_task_group.rt_bandwidth,
                        global_rt_period(), global_rt_runtime());
  #endif /* CONFIG_RT_GROUP_SCHED */
  
  #ifdef CONFIG_CGROUP_SCHED
 -      list_add(&init_task_group.list, &task_groups);
 -      INIT_LIST_HEAD(&init_task_group.children);
 -
 +      list_add(&root_task_group.list, &task_groups);
 +      INIT_LIST_HEAD(&root_task_group.children);
 +      autogroup_init(&init_task);
  #endif /* CONFIG_CGROUP_SCHED */
  
 -#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
 -      update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
 -                                          __alignof__(unsigned long));
 -#endif
        for_each_possible_cpu(i) {
                struct rq *rq;
  
                init_cfs_rq(&rq->cfs, rq);
                init_rt_rq(&rq->rt, rq);
  #ifdef CONFIG_FAIR_GROUP_SCHED
 -              init_task_group.shares = init_task_group_load;
 +              root_task_group.shares = root_task_group_load;
                INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
 -#ifdef CONFIG_CGROUP_SCHED
                /*
 -               * How much cpu bandwidth does init_task_group get?
 +               * How much cpu bandwidth does root_task_group get?
                 *
                 * In case of task-groups formed thr' the cgroup filesystem, it
                 * gets 100% of the cpu resources in the system. This overall
                 * system cpu resource is divided among the tasks of
 -               * init_task_group and its child task-groups in a fair manner,
 +               * root_task_group and its child task-groups in a fair manner,
                 * based on each entity's (task or task-group's) weight
                 * (se->load.weight).
                 *
 -               * In other words, if init_task_group has 10 tasks of weight
 +               * In other words, if root_task_group has 10 tasks of weight
                 * 1024) and two child groups A0 and A1 (of weight 1024 each),
                 * then A0's share of the cpu resource is:
                 *
                 *      A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
                 *
 -               * We achieve this by letting init_task_group's tasks sit
 -               * directly in rq->cfs (i.e init_task_group->se[] = NULL).
 +               * We achieve this by letting root_task_group's tasks sit
 +               * directly in rq->cfs (i.e root_task_group->se[] = NULL).
                 */
 -              init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
 -#endif
 +              init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
  #endif /* CONFIG_FAIR_GROUP_SCHED */
  
                rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
  #ifdef CONFIG_RT_GROUP_SCHED
                INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
 -#ifdef CONFIG_CGROUP_SCHED
 -              init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
 -#endif
 +              init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
  #endif
  
                for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
                zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
  #endif /* SMP */
  
 -      perf_event_init();
 -
        scheduler_running = 1;
  }
  
@@@ -8258,7 -8488,7 +8258,7 @@@ int alloc_fair_sched_group(struct task_
                if (!se)
                        goto err_free_rq;
  
 -              init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
 +              init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
        }
  
        return 1;
        return 0;
  }
  
 -static inline void register_fair_sched_group(struct task_group *tg, int cpu)
 -{
 -      list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
 -                      &cpu_rq(cpu)->leaf_cfs_rq_list);
 -}
 -
  static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
  {
 -      list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
 +      struct rq *rq = cpu_rq(cpu);
 +      unsigned long flags;
 +
 +      /*
 +      * Only empty task groups can be destroyed; so we can speculatively
 +      * check on_list without danger of it being re-added.
 +      */
 +      if (!tg->cfs_rq[cpu]->on_list)
 +              return;
 +
 +      raw_spin_lock_irqsave(&rq->lock, flags);
 +      list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
 +      raw_spin_unlock_irqrestore(&rq->lock, flags);
  }
  #else /* !CONFG_FAIR_GROUP_SCHED */
  static inline void free_fair_sched_group(struct task_group *tg)
@@@ -8296,6 -8520,10 +8296,6 @@@ int alloc_fair_sched_group(struct task_
        return 1;
  }
  
 -static inline void register_fair_sched_group(struct task_group *tg, int cpu)
 -{
 -}
 -
  static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
  {
  }
@@@ -8350,7 -8578,7 +8350,7 @@@ int alloc_rt_sched_group(struct task_gr
                if (!rt_se)
                        goto err_free_rq;
  
 -              init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
 +              init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
        }
  
        return 1;
@@@ -8360,6 -8588,17 +8360,6 @@@ err_free_rq
  err:
        return 0;
  }
 -
 -static inline void register_rt_sched_group(struct task_group *tg, int cpu)
 -{
 -      list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
 -                      &cpu_rq(cpu)->leaf_rt_rq_list);
 -}
 -
 -static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
 -{
 -      list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
 -}
  #else /* !CONFIG_RT_GROUP_SCHED */
  static inline void free_rt_sched_group(struct task_group *tg)
  {
@@@ -8370,6 -8609,14 +8370,6 @@@ int alloc_rt_sched_group(struct task_gr
  {
        return 1;
  }
 -
 -static inline void register_rt_sched_group(struct task_group *tg, int cpu)
 -{
 -}
 -
 -static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
 -{
 -}
  #endif /* CONFIG_RT_GROUP_SCHED */
  
  #ifdef CONFIG_CGROUP_SCHED
@@@ -8377,7 -8624,6 +8377,7 @@@ static void free_sched_group(struct tas
  {
        free_fair_sched_group(tg);
        free_rt_sched_group(tg);
 +      autogroup_free(tg);
        kfree(tg);
  }
  
@@@ -8386,6 -8632,7 +8386,6 @@@ struct task_group *sched_create_group(s
  {
        struct task_group *tg;
        unsigned long flags;
 -      int i;
  
        tg = kzalloc(sizeof(*tg), GFP_KERNEL);
        if (!tg)
                goto err;
  
        spin_lock_irqsave(&task_group_lock, flags);
 -      for_each_possible_cpu(i) {
 -              register_fair_sched_group(tg, i);
 -              register_rt_sched_group(tg, i);
 -      }
        list_add_rcu(&tg->list, &task_groups);
  
        WARN_ON(!parent); /* root should already exist */
@@@ -8427,11 -8678,11 +8427,11 @@@ void sched_destroy_group(struct task_gr
        unsigned long flags;
        int i;
  
 -      spin_lock_irqsave(&task_group_lock, flags);
 -      for_each_possible_cpu(i) {
 +      /* end participation in shares distribution */
 +      for_each_possible_cpu(i)
                unregister_fair_sched_group(tg, i);
 -              unregister_rt_sched_group(tg, i);
 -      }
 +
 +      spin_lock_irqsave(&task_group_lock, flags);
        list_del_rcu(&tg->list);
        list_del_rcu(&tg->siblings);
        spin_unlock_irqrestore(&task_group_lock, flags);
@@@ -8478,6 -8729,33 +8478,6 @@@ void sched_move_task(struct task_struc
  #endif /* CONFIG_CGROUP_SCHED */
  
  #ifdef CONFIG_FAIR_GROUP_SCHED
 -static void __set_se_shares(struct sched_entity *se, unsigned long shares)
 -{
 -      struct cfs_rq *cfs_rq = se->cfs_rq;
 -      int on_rq;
 -
 -      on_rq = se->on_rq;
 -      if (on_rq)
 -              dequeue_entity(cfs_rq, se, 0);
 -
 -      se->load.weight = shares;
 -      se->load.inv_weight = 0;
 -
 -      if (on_rq)
 -              enqueue_entity(cfs_rq, se, 0);
 -}
 -
 -static void set_se_shares(struct sched_entity *se, unsigned long shares)
 -{
 -      struct cfs_rq *cfs_rq = se->cfs_rq;
 -      struct rq *rq = cfs_rq->rq;
 -      unsigned long flags;
 -
 -      raw_spin_lock_irqsave(&rq->lock, flags);
 -      __set_se_shares(se, shares);
 -      raw_spin_unlock_irqrestore(&rq->lock, flags);
 -}
 -
  static DEFINE_MUTEX(shares_mutex);
  
  int sched_group_set_shares(struct task_group *tg, unsigned long shares)
        if (tg->shares == shares)
                goto done;
  
 -      spin_lock_irqsave(&task_group_lock, flags);
 -      for_each_possible_cpu(i)
 -              unregister_fair_sched_group(tg, i);
 -      list_del_rcu(&tg->siblings);
 -      spin_unlock_irqrestore(&task_group_lock, flags);
 -
 -      /* wait for any ongoing reference to this group to finish */
 -      synchronize_sched();
 -
 -      /*
 -       * Now we are free to modify the group's share on each cpu
 -       * w/o tripping rebalance_share or load_balance_fair.
 -       */
        tg->shares = shares;
        for_each_possible_cpu(i) {
 -              /*
 -               * force a rebalance
 -               */
 -              cfs_rq_set_shares(tg->cfs_rq[i], 0);
 -              set_se_shares(tg->se[i], shares);
 +              struct rq *rq = cpu_rq(i);
 +              struct sched_entity *se;
 +
 +              se = tg->se[i];
 +              /* Propagate contribution to hierarchy */
 +              raw_spin_lock_irqsave(&rq->lock, flags);
 +              for_each_sched_entity(se)
 +                      update_cfs_shares(group_cfs_rq(se), 0);
 +              raw_spin_unlock_irqrestore(&rq->lock, flags);
        }
  
 -      /*
 -       * Enable load balance activity on this group, by inserting it back on
 -       * each cpu's rq->leaf_cfs_rq_list.
 -       */
 -      spin_lock_irqsave(&task_group_lock, flags);
 -      for_each_possible_cpu(i)
 -              register_fair_sched_group(tg, i);
 -      list_add_rcu(&tg->siblings, &tg->parent->children);
 -      spin_unlock_irqrestore(&task_group_lock, flags);
  done:
        mutex_unlock(&shares_mutex);
        return 0;
@@@ -8811,7 -9107,7 +8811,7 @@@ cpu_cgroup_create(struct cgroup_subsys 
  
        if (!cgrp->parent) {
                /* This is early initialization for the top cgroup */
 -              return &init_task_group.css;
 +              return &root_task_group.css;
        }
  
        parent = cgroup_tg(cgrp->parent);
@@@ -9238,3 -9534,72 +9238,3 @@@ struct cgroup_subsys cpuacct_subsys = 
  };
  #endif        /* CONFIG_CGROUP_CPUACCT */
  
 -#ifndef CONFIG_SMP
 -
 -void synchronize_sched_expedited(void)
 -{
 -      barrier();
 -}
 -EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
 -
 -#else /* #ifndef CONFIG_SMP */
 -
 -static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
 -
 -static int synchronize_sched_expedited_cpu_stop(void *data)
 -{
 -      /*
 -       * There must be a full memory barrier on each affected CPU
 -       * between the time that try_stop_cpus() is called and the
 -       * time that it returns.
 -       *
 -       * In the current initial implementation of cpu_stop, the
 -       * above condition is already met when the control reaches
 -       * this point and the following smp_mb() is not strictly
 -       * necessary.  Do smp_mb() anyway for documentation and
 -       * robustness against future implementation changes.
 -       */
 -      smp_mb(); /* See above comment block. */
 -      return 0;
 -}
 -
 -/*
 - * Wait for an rcu-sched grace period to elapse, but use "big hammer"
 - * approach to force grace period to end quickly.  This consumes
 - * significant time on all CPUs, and is thus not recommended for
 - * any sort of common-case code.
 - *
 - * Note that it is illegal to call this function while holding any
 - * lock that is acquired by a CPU-hotplug notifier.  Failing to
 - * observe this restriction will result in deadlock.
 - */
 -void synchronize_sched_expedited(void)
 -{
 -      int snap, trycount = 0;
 -
 -      smp_mb();  /* ensure prior mod happens before capturing snap. */
 -      snap = atomic_read(&synchronize_sched_expedited_count) + 1;
 -      get_online_cpus();
 -      while (try_stop_cpus(cpu_online_mask,
 -                           synchronize_sched_expedited_cpu_stop,
 -                           NULL) == -EAGAIN) {
 -              put_online_cpus();
 -              if (trycount++ < 10)
 -                      udelay(trycount * num_online_cpus());
 -              else {
 -                      synchronize_sched();
 -                      return;
 -              }
 -              if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
 -                      smp_mb(); /* ensure test happens before caller kfree */
 -                      return;
 -              }
 -              get_online_cpus();
 -      }
 -      atomic_inc(&synchronize_sched_expedited_count);
 -      smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
 -      put_online_cpus();
 -}
 -EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
 -
 -#endif /* #else #ifndef CONFIG_SMP */
diff --combined kernel/sysctl_binary.c
@@@ -136,6 -136,7 +136,6 @@@ static const struct bin_table bin_kern_
        { CTL_INT,      KERN_IA64_UNALIGNED,            "ignore-unaligned-usertrap" },
        { CTL_INT,      KERN_COMPAT_LOG,                "compat-log" },
        { CTL_INT,      KERN_MAX_LOCK_DEPTH,            "max_lock_depth" },
 -      { CTL_INT,      KERN_NMI_WATCHDOG,              "nmi_watchdog" },
        { CTL_INT,      KERN_PANIC_ON_NMI,              "panic_on_unrecovered_nmi" },
        {}
  };
@@@ -1192,7 -1193,7 +1192,7 @@@ static ssize_t bin_dn_node_address(stru
  
                buf[result] = '\0';
  
-               /* Convert the decnet addresss to binary */
+               /* Convert the decnet address to binary */
                result = -EIO;
                nodep = strchr(buf, '.') + 1;
                if (!nodep)
@@@ -152,7 -152,6 +152,7 @@@ clocks_calc_mult_shift(u32 *mult, u32 *
         */
        for (sft = 32; sft > 0; sft--) {
                tmp = (u64) to << sft;
 +              tmp += from / 2;
                do_div(tmp, from);
                if ((tmp >> sftacc) == 0)
                        break;
@@@ -679,7 -678,7 +679,7 @@@ EXPORT_SYMBOL_GPL(__clocksource_updatef
  int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
  {
  
-       /* Intialize mult/shift and max_idle_ns */
+       /* Initialize mult/shift and max_idle_ns */
        __clocksource_updatefreq_scale(cs, scale, freq);
  
        /* Add clocksource to the clcoksource list */
diff --combined lib/nlattr.c
@@@ -15,7 -15,7 +15,7 @@@
  #include <linux/types.h>
  #include <net/netlink.h>
  
 -static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
 +static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
        [NLA_U8]        = sizeof(u8),
        [NLA_U16]       = sizeof(u16),
        [NLA_U32]       = sizeof(u32),
@@@ -23,7 -23,7 +23,7 @@@
        [NLA_NESTED]    = NLA_HDRLEN,
  };
  
 -static int validate_nla(struct nlattr *nla, int maxtype,
 +static int validate_nla(const struct nlattr *nla, int maxtype,
                        const struct nla_policy *policy)
  {
        const struct nla_policy *pt;
   *
   * Returns 0 on success or a negative error code.
   */
 -int nla_validate(struct nlattr *head, int len, int maxtype,
 +int nla_validate(const struct nlattr *head, int len, int maxtype,
                 const struct nla_policy *policy)
  {
 -      struct nlattr *nla;
 +      const struct nlattr *nla;
        int rem, err;
  
        nla_for_each_attr(nla, head, len, rem) {
@@@ -167,16 -167,16 +167,16 @@@ nla_policy_len(const struct nla_policy 
   * @policy: validation policy
   *
   * Parses a stream of attributes and stores a pointer to each attribute in
-  * the tb array accessable via the attribute type. Attributes with a type
+  * the tb array accessible via the attribute type. Attributes with a type
   * exceeding maxtype will be silently ignored for backwards compatibility
   * reasons. policy may be set to NULL if no validation is required.
   *
   * Returns 0 on success or a negative error code.
   */
 -int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
 -            const struct nla_policy *policy)
 +int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
 +            int len, const struct nla_policy *policy)
  {
 -      struct nlattr *nla;
 +      const struct nlattr *nla;
        int rem, err;
  
        memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
                                        goto errout;
                        }
  
 -                      tb[type] = nla;
 +                      tb[type] = (struct nlattr *)nla;
                }
        }
  
@@@ -212,14 -212,14 +212,14 @@@ errout
   *
   * Returns the first attribute in the stream matching the specified type.
   */
 -struct nlattr *nla_find(struct nlattr *head, int len, int attrtype)
 +struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
  {
 -      struct nlattr *nla;
 +      const struct nlattr *nla;
        int rem;
  
        nla_for_each_attr(nla, head, len, rem)
                if (nla_type(nla) == attrtype)
 -                      return nla;
 +                      return (struct nlattr *)nla;
  
        return NULL;
  }
diff --combined mm/page-writeback.c
@@@ -404,7 -404,7 +404,7 @@@ unsigned long determine_dirtyable_memor
   * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
   * - vm.dirty_ratio             or  vm.dirty_bytes
   * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
-  * runtime tasks.
+  * real-time tasks.
   */
  void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
  {
@@@ -563,7 -563,7 +563,7 @@@ static void balance_dirty_pages(struct 
                                break;          /* We've done our duty */
                }
                trace_wbc_balance_dirty_wait(&wbc, bdi);
 -              __set_current_state(TASK_INTERRUPTIBLE);
 +              __set_current_state(TASK_UNINTERRUPTIBLE);
                io_schedule_timeout(pause);
  
                /*
diff --combined mm/percpu.c
@@@ -258,7 -258,7 +258,7 @@@ static void __maybe_unused pcpu_next_po
  
  /*
   * (Un)populated page region iterators.  Iterate over (un)populated
-  * page regions betwen @start and @end in @chunk.  @rs and @re should
+  * page regions between @start and @end in @chunk.  @rs and @re should
   * be integer variables and will be set to start and end page index of
   * the current region.
   */
@@@ -293,8 -293,12 +293,8 @@@ static void *pcpu_mem_alloc(size_t size
  
        if (size <= PAGE_SIZE)
                return kzalloc(size, GFP_KERNEL);
 -      else {
 -              void *ptr = vmalloc(size);
 -              if (ptr)
 -                      memset(ptr, 0, size);
 -              return ptr;
 -      }
 +      else
 +              return vzalloc(size);
  }
  
  /**
@@@ -1264,7 -1268,7 +1264,7 @@@ int __init pcpu_setup_first_chunk(cons
  
        /* we're done parsing the input, undefine BUG macro and dump config */
  #undef PCPU_SETUP_BUG_ON
 -      pcpu_dump_alloc_info(KERN_INFO, ai);
 +      pcpu_dump_alloc_info(KERN_DEBUG, ai);
  
        pcpu_nr_groups = ai->nr_groups;
        pcpu_group_offsets = group_offsets;
diff --combined net/Kconfig
@@@ -214,18 -214,12 +214,18 @@@ source "net/ieee802154/Kconfig
  source "net/sched/Kconfig"
  source "net/dcb/Kconfig"
  source "net/dns_resolver/Kconfig"
 +source "net/batman-adv/Kconfig"
  
  config RPS
        boolean
        depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
        default y
  
 +config XPS
 +      boolean
 +      depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
 +      default y
 +
  menu "Network testing"
  
  config NET_PKTGEN
@@@ -253,7 -247,9 +253,9 @@@ config NET_TCPPROB
        what was just said, you don't need it: say N.
  
        Documentation on how to use TCP connection probing can be found
-       at http://linux-net.osdl.org/index.php/TcpProbe
+       at:
+       
+         http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe
  
        To compile this code as a module, choose M here: the
        module will be called tcp_probe.
diff --combined net/core/dev.c
@@@ -743,31 -743,34 +743,31 @@@ struct net_device *dev_get_by_index(str
  EXPORT_SYMBOL(dev_get_by_index);
  
  /**
 - *    dev_getbyhwaddr - find a device by its hardware address
 + *    dev_getbyhwaddr_rcu - find a device by its hardware address
   *    @net: the applicable net namespace
   *    @type: media type of device
   *    @ha: hardware address
   *
   *    Search for an interface by MAC address. Returns NULL if the device
 - *    is not found or a pointer to the device. The caller must hold the
 - *    rtnl semaphore. The returned device has not had its ref count increased
 + *    is not found or a pointer to the device. The caller must hold RCU
 + *    The returned device has not had its ref count increased
   *    and the caller must therefore be careful about locking
   *
 - *    BUGS:
 - *    If the API was consistent this would be __dev_get_by_hwaddr
   */
  
 -struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
 +struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
 +                                     const char *ha)
  {
        struct net_device *dev;
  
 -      ASSERT_RTNL();
 -
 -      for_each_netdev(net, dev)
 +      for_each_netdev_rcu(net, dev)
                if (dev->type == type &&
                    !memcmp(dev->dev_addr, ha, dev->addr_len))
                        return dev;
  
        return NULL;
  }
 -EXPORT_SYMBOL(dev_getbyhwaddr);
 +EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
  
  struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
  {
@@@ -1222,90 -1225,52 +1222,90 @@@ int dev_open(struct net_device *dev
  }
  EXPORT_SYMBOL(dev_open);
  
 -static int __dev_close(struct net_device *dev)
 +static int __dev_close_many(struct list_head *head)
  {
 -      const struct net_device_ops *ops = dev->netdev_ops;
 +      struct net_device *dev;
  
        ASSERT_RTNL();
        might_sleep();
  
 -      /*
 -       *      Tell people we are going down, so that they can
 -       *      prepare to death, when device is still operating.
 -       */
 -      call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
 +      list_for_each_entry(dev, head, unreg_list) {
 +              /*
 +               *      Tell people we are going down, so that they can
 +               *      prepare to death, when device is still operating.
 +               */
 +              call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
 +
 +              clear_bit(__LINK_STATE_START, &dev->state);
  
 -      clear_bit(__LINK_STATE_START, &dev->state);
 +              /* Synchronize to scheduled poll. We cannot touch poll list, it
 +               * can be even on different cpu. So just clear netif_running().
 +               *
 +               * dev->stop() will invoke napi_disable() on all of it's
 +               * napi_struct instances on this device.
 +               */
 +              smp_mb__after_clear_bit(); /* Commit netif_running(). */
 +      }
  
 -      /* Synchronize to scheduled poll. We cannot touch poll list,
 -       * it can be even on different cpu. So just clear netif_running().
 -       *
 -       * dev->stop() will invoke napi_disable() on all of it's
 -       * napi_struct instances on this device.
 -       */
 -      smp_mb__after_clear_bit(); /* Commit netif_running(). */
 +      dev_deactivate_many(head);
  
 -      dev_deactivate(dev);
 +      list_for_each_entry(dev, head, unreg_list) {
 +              const struct net_device_ops *ops = dev->netdev_ops;
  
 -      /*
 -       *      Call the device specific close. This cannot fail.
 -       *      Only if device is UP
 -       *
 -       *      We allow it to be called even after a DETACH hot-plug
 -       *      event.
 -       */
 -      if (ops->ndo_stop)
 -              ops->ndo_stop(dev);
 +              /*
 +               *      Call the device specific close. This cannot fail.
 +               *      Only if device is UP
 +               *
 +               *      We allow it to be called even after a DETACH hot-plug
 +               *      event.
 +               */
 +              if (ops->ndo_stop)
 +                      ops->ndo_stop(dev);
  
 -      /*
 -       *      Device is now down.
 -       */
 +              /*
 +               *      Device is now down.
 +               */
  
 -      dev->flags &= ~IFF_UP;
 +              dev->flags &= ~IFF_UP;
 +
 +              /*
 +               *      Shutdown NET_DMA
 +               */
 +              net_dmaengine_put();
 +      }
 +
 +      return 0;
 +}
 +
 +static int __dev_close(struct net_device *dev)
 +{
 +      LIST_HEAD(single);
 +
 +      list_add(&dev->unreg_list, &single);
 +      return __dev_close_many(&single);
 +}
 +
 +int dev_close_many(struct list_head *head)
 +{
 +      struct net_device *dev, *tmp;
 +      LIST_HEAD(tmp_list);
 +
 +      list_for_each_entry_safe(dev, tmp, head, unreg_list)
 +              if (!(dev->flags & IFF_UP))
 +                      list_move(&dev->unreg_list, &tmp_list);
 +
 +      __dev_close_many(head);
  
        /*
 -       *      Shutdown NET_DMA
 +       * Tell people we are down
         */
 -      net_dmaengine_put();
 +      list_for_each_entry(dev, head, unreg_list) {
 +              rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
 +              call_netdevice_notifiers(NETDEV_DOWN, dev);
 +      }
  
 +      /* rollback_registered_many needs the complete original list */
 +      list_splice(&tmp_list, head);
        return 0;
  }
  
   */
  int dev_close(struct net_device *dev)
  {
 -      if (!(dev->flags & IFF_UP))
 -              return 0;
 -
 -      __dev_close(dev);
 +      LIST_HEAD(single);
  
 -      /*
 -       * Tell people we are down
 -       */
 -      rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
 -      call_netdevice_notifiers(NETDEV_DOWN, dev);
 +      list_add(&dev->unreg_list, &single);
 +      dev_close_many(&single);
  
        return 0;
  }
@@@ -1528,14 -1499,6 +1528,14 @@@ int dev_forward_skb(struct net_device *
  }
  EXPORT_SYMBOL_GPL(dev_forward_skb);
  
 +static inline int deliver_skb(struct sk_buff *skb,
 +                            struct packet_type *pt_prev,
 +                            struct net_device *orig_dev)
 +{
 +      atomic_inc(&skb->users);
 +      return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 +}
 +
  /*
   *    Support routine. Sends outgoing frames to any network
   *    taps currently in use.
  static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
  {
        struct packet_type *ptype;
 -
 -#ifdef CONFIG_NET_CLS_ACT
 -      if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
 -              net_timestamp_set(skb);
 -#else
 -      net_timestamp_set(skb);
 -#endif
 +      struct sk_buff *skb2 = NULL;
 +      struct packet_type *pt_prev = NULL;
  
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, &ptype_all, list) {
                if ((ptype->dev == dev || !ptype->dev) &&
                    (ptype->af_packet_priv == NULL ||
                     (struct sock *)ptype->af_packet_priv != skb->sk)) {
 -                      struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 +                      if (pt_prev) {
 +                              deliver_skb(skb2, pt_prev, skb->dev);
 +                              pt_prev = ptype;
 +                              continue;
 +                      }
 +
 +                      skb2 = skb_clone(skb, GFP_ATOMIC);
                        if (!skb2)
                                break;
  
 +                      net_timestamp_set(skb2);
 +
                        /* skb->nh should be correctly
                           set by sender, so that the second statement is
                           just protection against buggy protocols.
  
                        skb2->transport_header = skb2->network_header;
                        skb2->pkt_type = PACKET_OUTGOING;
 -                      ptype->func(skb2, skb->dev, ptype, skb->dev);
 +                      pt_prev = ptype;
                }
        }
 +      if (pt_prev)
 +              pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
        rcu_read_unlock();
  }
  
   */
  int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
  {
 +      int rc;
 +
        if (txq < 1 || txq > dev->num_tx_queues)
                return -EINVAL;
  
        if (dev->reg_state == NETREG_REGISTERED) {
                ASSERT_RTNL();
  
 +              rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
 +                                                txq);
 +              if (rc)
 +                      return rc;
 +
                if (txq < dev->real_num_tx_queues)
                        qdisc_reset_all_tx_gt(dev, txq);
        }
@@@ -1732,6 -1683,33 +1732,6 @@@ void netif_device_attach(struct net_dev
  }
  EXPORT_SYMBOL(netif_device_attach);
  
 -static bool can_checksum_protocol(unsigned long features, __be16 protocol)
 -{
 -      return ((features & NETIF_F_NO_CSUM) ||
 -              ((features & NETIF_F_V4_CSUM) &&
 -               protocol == htons(ETH_P_IP)) ||
 -              ((features & NETIF_F_V6_CSUM) &&
 -               protocol == htons(ETH_P_IPV6)) ||
 -              ((features & NETIF_F_FCOE_CRC) &&
 -               protocol == htons(ETH_P_FCOE)));
 -}
 -
 -static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
 -{
 -      __be16 protocol = skb->protocol;
 -      int features = dev->features;
 -
 -      if (vlan_tx_tag_present(skb)) {
 -              features &= dev->vlan_features;
 -      } else if (protocol == htons(ETH_P_8021Q)) {
 -              struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
 -              protocol = veh->h_vlan_encapsulated_proto;
 -              features &= dev->vlan_features;
 -      }
 -
 -      return can_checksum_protocol(features, protocol);
 -}
 -
  /**
   * skb_dev_set -- assign a new device to a buffer
   * @skb: buffer for the new device
@@@ -1779,7 -1757,7 +1779,7 @@@ int skb_checksum_help(struct sk_buff *s
                goto out_set_summed;
        }
  
 -      offset = skb->csum_start - skb_headroom(skb);
 +      offset = skb_checksum_start_offset(skb);
        BUG_ON(offset >= skb_headlen(skb));
        csum = skb_checksum(skb, offset, skb->len - offset, 0);
  
@@@ -1816,18 -1794,16 +1816,18 @@@ struct sk_buff *skb_gso_segment(struct 
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_type *ptype;
        __be16 type = skb->protocol;
 +      int vlan_depth = ETH_HLEN;
        int err;
  
 -      if (type == htons(ETH_P_8021Q)) {
 -              struct vlan_ethhdr *veh;
 +      while (type == htons(ETH_P_8021Q)) {
 +              struct vlan_hdr *vh;
  
 -              if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
 +              if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
                        return ERR_PTR(-EINVAL);
  
 -              veh = (struct vlan_ethhdr *)skb->data;
 -              type = veh->h_vlan_encapsulated_proto;
 +              vh = (struct vlan_hdr *)(skb->data + vlan_depth);
 +              type = vh->h_vlan_encapsulated_proto;
 +              vlan_depth += VLAN_HLEN;
        }
  
        skb_reset_mac_header(skb);
                if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
                        dev->ethtool_ops->get_drvinfo(dev, &info);
  
 -              WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
 -                      "ip_summed=%d",
 +              WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
                     info.driver, dev ? dev->features : 0L,
                     skb->sk ? skb->sk->sk_route_caps : 0L,
                     skb->len, skb->data_len, skb->ip_summed);
@@@ -1944,14 -1921,16 +1944,14 @@@ static void dev_gso_skb_destructor(stru
  /**
   *    dev_gso_segment - Perform emulated hardware segmentation on skb.
   *    @skb: buffer to segment
 + *    @features: device features as applicable to this skb
   *
   *    This function segments the given skb and stores the list of segments
   *    in skb->next.
   */
 -static int dev_gso_segment(struct sk_buff *skb)
 +static int dev_gso_segment(struct sk_buff *skb, int features)
  {
 -      struct net_device *dev = skb->dev;
        struct sk_buff *segs;
 -      int features = dev->features & ~(illegal_highdma(dev, skb) ?
 -                                       NETIF_F_SG : 0);
  
        segs = skb_gso_segment(skb, features);
  
@@@ -1988,53 -1967,6 +1988,53 @@@ static inline void skb_orphan_try(struc
        }
  }
  
 +static bool can_checksum_protocol(unsigned long features, __be16 protocol)
 +{
 +      return ((features & NETIF_F_GEN_CSUM) ||
 +              ((features & NETIF_F_V4_CSUM) &&
 +               protocol == htons(ETH_P_IP)) ||
 +              ((features & NETIF_F_V6_CSUM) &&
 +               protocol == htons(ETH_P_IPV6)) ||
 +              ((features & NETIF_F_FCOE_CRC) &&
 +               protocol == htons(ETH_P_FCOE)));
 +}
 +
 +static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
 +{
 +      if (!can_checksum_protocol(protocol, features)) {
 +              features &= ~NETIF_F_ALL_CSUM;
 +              features &= ~NETIF_F_SG;
 +      } else if (illegal_highdma(skb->dev, skb)) {
 +              features &= ~NETIF_F_SG;
 +      }
 +
 +      return features;
 +}
 +
 +int netif_skb_features(struct sk_buff *skb)
 +{
 +      __be16 protocol = skb->protocol;
 +      int features = skb->dev->features;
 +
 +      if (protocol == htons(ETH_P_8021Q)) {
 +              struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
 +              protocol = veh->h_vlan_encapsulated_proto;
 +      } else if (!vlan_tx_tag_present(skb)) {
 +              return harmonize_features(skb, protocol, features);
 +      }
 +
 +      features &= skb->dev->vlan_features;
 +
 +      if (protocol != htons(ETH_P_8021Q)) {
 +              return harmonize_features(skb, protocol, features);
 +      } else {
 +              features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
 +                              NETIF_F_GEN_CSUM;
 +              return harmonize_features(skb, protocol, features);
 +      }
 +}
 +EXPORT_SYMBOL(netif_skb_features);
 +
  /*
   * Returns true if either:
   *    1. skb has frag_list and the device doesn't support FRAGLIST, or
   *       support DMA from it.
   */
  static inline int skb_needs_linearize(struct sk_buff *skb,
 -                                    struct net_device *dev)
 +                                    int features)
  {
 -      int features = dev->features;
 -
 -      if (skb->protocol == htons(ETH_P_8021Q) || vlan_tx_tag_present(skb))
 -              features &= dev->vlan_features;
 -
        return skb_is_nonlinear(skb) &&
 -             ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
 -              (skb_shinfo(skb)->nr_frags && (!(features & NETIF_F_SG) ||
 -                                            illegal_highdma(dev, skb))));
 +                      ((skb_has_frag_list(skb) &&
 +                              !(features & NETIF_F_FRAGLIST)) ||
 +                      (skb_shinfo(skb)->nr_frags &&
 +                              !(features & NETIF_F_SG)));
  }
  
  int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
        int rc = NETDEV_TX_OK;
  
        if (likely(!skb->next)) {
 -              if (!list_empty(&ptype_all))
 -                      dev_queue_xmit_nit(skb, dev);
 +              int features;
  
                /*
                 * If device doesnt need skb->dst, release it right now while
                if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
                        skb_dst_drop(skb);
  
 +              if (!list_empty(&ptype_all))
 +                      dev_queue_xmit_nit(skb, dev);
 +
                skb_orphan_try(skb);
  
 +              features = netif_skb_features(skb);
 +
                if (vlan_tx_tag_present(skb) &&
 -                  !(dev->features & NETIF_F_HW_VLAN_TX)) {
 +                  !(features & NETIF_F_HW_VLAN_TX)) {
                        skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
                        if (unlikely(!skb))
                                goto out;
                        skb->vlan_tci = 0;
                }
  
 -              if (netif_needs_gso(dev, skb)) {
 -                      if (unlikely(dev_gso_segment(skb)))
 +              if (netif_needs_gso(skb, features)) {
 +                      if (unlikely(dev_gso_segment(skb, features)))
                                goto out_kfree_skb;
                        if (skb->next)
                                goto gso;
                } else {
 -                      if (skb_needs_linearize(skb, dev) &&
 +                      if (skb_needs_linearize(skb, features) &&
                            __skb_linearize(skb))
                                goto out_kfree_skb;
  
                         * checksumming here.
                         */
                        if (skb->ip_summed == CHECKSUM_PARTIAL) {
 -                              skb_set_transport_header(skb, skb->csum_start -
 -                                            skb_headroom(skb));
 -                              if (!dev_can_checksum(dev, skb) &&
 +                              skb_set_transport_header(skb,
 +                                      skb_checksum_start_offset(skb));
 +                              if (!(features & NETIF_F_ALL_CSUM) &&
                                     skb_checksum_help(skb))
                                        goto out_kfree_skb;
                        }
  
  static u32 hashrnd __read_mostly;
  
 -u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
 +/*
 + * Returns a Tx hash based on the given packet descriptor a Tx queues' number
 + * to be used as a distribution range.
 + */
 +u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
 +                unsigned int num_tx_queues)
  {
        u32 hash;
  
        if (skb_rx_queue_recorded(skb)) {
                hash = skb_get_rx_queue(skb);
 -              while (unlikely(hash >= dev->real_num_tx_queues))
 -                      hash -= dev->real_num_tx_queues;
 +              while (unlikely(hash >= num_tx_queues))
 +                      hash -= num_tx_queues;
                return hash;
        }
  
                hash = (__force u16) skb->protocol ^ skb->rxhash;
        hash = jhash_1word(hash, hashrnd);
  
 -      return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
 +      return (u16) (((u64) hash * num_tx_queues) >> 32);
  }
 -EXPORT_SYMBOL(skb_tx_hash);
 +EXPORT_SYMBOL(__skb_tx_hash);
  
  static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
  {
        return queue_index;
  }
  
 +static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
 +{
 +#ifdef CONFIG_XPS
 +      struct xps_dev_maps *dev_maps;
 +      struct xps_map *map;
 +      int queue_index = -1;
 +
 +      rcu_read_lock();
 +      dev_maps = rcu_dereference(dev->xps_maps);
 +      if (dev_maps) {
 +              map = rcu_dereference(
 +                  dev_maps->cpu_map[raw_smp_processor_id()]);
 +              if (map) {
 +                      if (map->len == 1)
 +                              queue_index = map->queues[0];
 +                      else {
 +                              u32 hash;
 +                              if (skb->sk && skb->sk->sk_hash)
 +                                      hash = skb->sk->sk_hash;
 +                              else
 +                                      hash = (__force u16) skb->protocol ^
 +                                          skb->rxhash;
 +                              hash = jhash_1word(hash, hashrnd);
 +                              queue_index = map->queues[
 +                                  ((u64)hash * map->len) >> 32];
 +                      }
 +                      if (unlikely(queue_index >= dev->real_num_tx_queues))
 +                              queue_index = -1;
 +              }
 +      }
 +      rcu_read_unlock();
 +
 +      return queue_index;
 +#else
 +      return -1;
 +#endif
 +}
 +
  static struct netdev_queue *dev_pick_tx(struct net_device *dev,
                                        struct sk_buff *skb)
  {
        int queue_index;
        const struct net_device_ops *ops = dev->netdev_ops;
  
 -      if (ops->ndo_select_queue) {
 +      if (dev->real_num_tx_queues == 1)
 +              queue_index = 0;
 +      else if (ops->ndo_select_queue) {
                queue_index = ops->ndo_select_queue(dev, skb);
                queue_index = dev_cap_txqueue(dev, queue_index);
        } else {
                struct sock *sk = skb->sk;
                queue_index = sk_tx_queue_get(sk);
 -              if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
  
 -                      queue_index = 0;
 -                      if (dev->real_num_tx_queues > 1)
 +              if (queue_index < 0 || skb->ooo_okay ||
 +                  queue_index >= dev->real_num_tx_queues) {
 +                      int old_index = queue_index;
 +
 +                      queue_index = get_xps_queue(dev, skb);
 +                      if (queue_index < 0)
                                queue_index = skb_tx_hash(dev, skb);
  
 -                      if (sk) {
 -                              struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
 +                      if (queue_index != old_index && sk) {
 +                              struct dst_entry *dst =
 +                                  rcu_dereference_check(sk->sk_dst_cache, 1);
  
                                if (dst && skb_dst(skb) == dst)
                                        sk_tx_queue_set(sk, queue_index);
@@@ -2297,10 -2180,7 +2297,10 @@@ static inline int __dev_xmit_skb(struc
                 */
                if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
                        skb_dst_force(skb);
 -              __qdisc_update_bstats(q, skb->len);
 +
 +              qdisc_skb_cb(skb)->pkt_len = skb->len;
 +              qdisc_bstats_update(q, skb);
 +
                if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
                        if (unlikely(contended)) {
                                spin_unlock(&q->busylock);
@@@ -2832,6 -2712,14 +2832,6 @@@ static void net_tx_action(struct softir
        }
  }
  
 -static inline int deliver_skb(struct sk_buff *skb,
 -                            struct packet_type *pt_prev,
 -                            struct net_device *orig_dev)
 -{
 -      atomic_inc(&skb->users);
 -      return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 -}
 -
  #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
      (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
  /* This hook is defined here for ATM LANE */
@@@ -4999,12 -4887,10 +4999,12 @@@ static void rollback_registered_many(st
                }
  
                BUG_ON(dev->reg_state != NETREG_REGISTERED);
 +      }
  
 -              /* If device is running, close it first. */
 -              dev_close(dev);
 +      /* If device is running, close it first. */
 +      dev_close_many(head);
  
 +      list_for_each_entry(dev, head, unreg_list) {
                /* And unlink it from device chain. */
                unlist_netdevice(dev);
  
@@@ -5081,13 -4967,10 +5081,13 @@@ unsigned long netdev_fix_features(unsig
        }
  
        if (features & NETIF_F_UFO) {
 -              if (!(features & NETIF_F_GEN_CSUM)) {
 +              /* maybe split UFO into V4 and V6? */
 +              if (!((features & NETIF_F_GEN_CSUM) ||
 +                  (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
 +                          == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
                        if (name)
                                printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
 -                                     "since no NETIF_F_HW_CSUM feature.\n",
 +                                     "since no checksum offload features.\n",
                                       name);
                        features &= ~NETIF_F_UFO;
                }
@@@ -5131,9 -5014,9 +5131,9 @@@ void netif_stacked_transfer_operstate(c
  }
  EXPORT_SYMBOL(netif_stacked_transfer_operstate);
  
 +#ifdef CONFIG_RPS
  static int netif_alloc_rx_queues(struct net_device *dev)
  {
 -#ifdef CONFIG_RPS
        unsigned int i, count = dev->num_rx_queues;
        struct netdev_rx_queue *rx;
  
        }
        dev->_rx = rx;
  
 -      /*
 -       * Set a pointer to first element in the array which holds the
 -       * reference count.
 -       */
        for (i = 0; i < count; i++)
 -              rx[i].first = rx;
 -#endif
 +              rx[i].dev = dev;
        return 0;
  }
 +#endif
 +
 +static void netdev_init_one_queue(struct net_device *dev,
 +                                struct netdev_queue *queue, void *_unused)
 +{
 +      /* Initialize queue lock */
 +      spin_lock_init(&queue->_xmit_lock);
 +      netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
 +      queue->xmit_lock_owner = -1;
 +      netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
 +      queue->dev = dev;
 +}
  
  static int netif_alloc_netdev_queues(struct net_device *dev)
  {
                return -ENOMEM;
        }
        dev->_tx = tx;
 -      return 0;
 -}
 -
 -static void netdev_init_one_queue(struct net_device *dev,
 -                                struct netdev_queue *queue,
 -                                void *_unused)
 -{
 -      queue->dev = dev;
 -
 -      /* Initialize queue lock */
 -      spin_lock_init(&queue->_xmit_lock);
 -      netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
 -      queue->xmit_lock_owner = -1;
 -}
  
 -static void netdev_init_queues(struct net_device *dev)
 -{
        netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
        spin_lock_init(&dev->tx_global_lock);
 +
 +      return 0;
  }
  
  /**
@@@ -5220,6 -5110,16 +5220,6 @@@ int register_netdevice(struct net_devic
  
        dev->iflink = -1;
  
 -      ret = netif_alloc_rx_queues(dev);
 -      if (ret)
 -              goto out;
 -
 -      ret = netif_alloc_netdev_queues(dev);
 -      if (ret)
 -              goto out;
 -
 -      netdev_init_queues(dev);
 -
        /* Init, if this function is available */
        if (dev->netdev_ops->ndo_init) {
                ret = dev->netdev_ops->ndo_init(dev);
@@@ -5620,20 -5520,18 +5620,20 @@@ struct netdev_queue *dev_ingress_queue_
  }
  
  /**
 - *    alloc_netdev_mq - allocate network device
 + *    alloc_netdev_mqs - allocate network device
   *    @sizeof_priv:   size of private data to allocate space for
   *    @name:          device name format string
   *    @setup:         callback to initialize device
 - *    @queue_count:   the number of subqueues to allocate
 + *    @txqs:          the number of TX subqueues to allocate
 + *    @rxqs:          the number of RX subqueues to allocate
   *
   *    Allocates a struct net_device with private data area for driver use
   *    and performs basic initialization.  Also allocates subquue structs
 - *    for each queue on the device at the end of the netdevice.
 + *    for each queue on the device.
   */
 -struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
 -              void (*setup)(struct net_device *), unsigned int queue_count)
 +struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 +              void (*setup)(struct net_device *),
 +              unsigned int txqs, unsigned int rxqs)
  {
        struct net_device *dev;
        size_t alloc_size;
  
        BUG_ON(strlen(name) >= sizeof(dev->name));
  
 -      if (queue_count < 1) {
 +      if (txqs < 1) {
                pr_err("alloc_netdev: Unable to allocate device "
                       "with zero queues.\n");
                return NULL;
        }
  
 +#ifdef CONFIG_RPS
 +      if (rxqs < 1) {
 +              pr_err("alloc_netdev: Unable to allocate device "
 +                     "with zero RX queues.\n");
 +              return NULL;
 +      }
 +#endif
 +
        alloc_size = sizeof(struct net_device);
        if (sizeof_priv) {
                /* ensure 32-byte alignment of private area */
  
        dev_net_set(dev, &init_net);
  
 -      dev->num_tx_queues = queue_count;
 -      dev->real_num_tx_queues = queue_count;
 +      dev->num_tx_queues = txqs;
 +      dev->real_num_tx_queues = txqs;
 +      if (netif_alloc_netdev_queues(dev))
 +              goto free_pcpu;
  
  #ifdef CONFIG_RPS
 -      dev->num_rx_queues = queue_count;
 -      dev->real_num_rx_queues = queue_count;
 +      dev->num_rx_queues = rxqs;
 +      dev->real_num_rx_queues = rxqs;
 +      if (netif_alloc_rx_queues(dev))
 +              goto free_pcpu;
  #endif
  
        dev->gso_max_size = GSO_MAX_SIZE;
  
  free_pcpu:
        free_percpu(dev->pcpu_refcnt);
 +      kfree(dev->_tx);
 +#ifdef CONFIG_RPS
 +      kfree(dev->_rx);
 +#endif
 +
  free_p:
        kfree(p);
        return NULL;
  }
 -EXPORT_SYMBOL(alloc_netdev_mq);
 +EXPORT_SYMBOL(alloc_netdev_mqs);
  
  /**
   *    free_netdev - free network device
@@@ -5737,9 -5618,6 +5737,9 @@@ void free_netdev(struct net_device *dev
        release_net(dev_net(dev));
  
        kfree(dev->_tx);
 +#ifdef CONFIG_RPS
 +      kfree(dev->_rx);
 +#endif
  
        kfree(rcu_dereference_raw(dev->ingress_queue));
  
@@@ -6218,7 -6096,7 +6218,7 @@@ static void __net_exit default_device_e
  static void __net_exit default_device_exit_batch(struct list_head *net_list)
  {
        /* At exit all network devices most be removed from a network
-        * namespace.  Do this in the reverse order of registeration.
+        * namespace.  Do this in the reverse order of registration.
         * Do this across as many network namespaces as possible to
         * improve batching efficiency.
         */
diff --combined net/decnet/dn_dev.c
@@@ -267,7 -267,7 +267,7 @@@ static int dn_forwarding_proc(ctl_tabl
        if (table->extra1 == NULL)
                return -EINVAL;
  
 -      dn_db = dev->dn_ptr;
 +      dn_db = rcu_dereference_raw(dev->dn_ptr);
        old = dn_db->parms.forwarding;
  
        err = proc_dointvec(table, write, buffer, lenp, ppos);
@@@ -332,19 -332,14 +332,19 @@@ static struct dn_ifaddr *dn_dev_alloc_i
        return ifa;
  }
  
 -static __inline__ void dn_dev_free_ifa(struct dn_ifaddr *ifa)
 +static void dn_dev_free_ifa_rcu(struct rcu_head *head)
  {
 -      kfree(ifa);
 +      kfree(container_of(head, struct dn_ifaddr, rcu));
  }
  
 -static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int destroy)
 +static void dn_dev_free_ifa(struct dn_ifaddr *ifa)
  {
 -      struct dn_ifaddr *ifa1 = *ifap;
 +      call_rcu(&ifa->rcu, dn_dev_free_ifa_rcu);
 +}
 +
 +static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy)
 +{
 +      struct dn_ifaddr *ifa1 = rtnl_dereference(*ifap);
        unsigned char mac_addr[6];
        struct net_device *dev = dn_db->dev;
  
@@@ -378,9 -373,7 +378,9 @@@ static int dn_dev_insert_ifa(struct dn_
        ASSERT_RTNL();
  
        /* Check for duplicates */
 -      for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
 +      for (ifa1 = rtnl_dereference(dn_db->ifa_list);
 +           ifa1 != NULL;
 +           ifa1 = rtnl_dereference(ifa1->ifa_next)) {
                if (ifa1->ifa_local == ifa->ifa_local)
                        return -EEXIST;
        }
        }
  
        ifa->ifa_next = dn_db->ifa_list;
 -      dn_db->ifa_list = ifa;
 +      rcu_assign_pointer(dn_db->ifa_list, ifa);
  
        dn_ifaddr_notify(RTM_NEWADDR, ifa);
        blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
  
  static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa)
  {
 -      struct dn_dev *dn_db = dev->dn_ptr;
 +      struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
        int rv;
  
        if (dn_db == NULL) {
@@@ -432,8 -425,7 +432,8 @@@ int dn_dev_ioctl(unsigned int cmd, voi
        struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
        struct dn_dev *dn_db;
        struct net_device *dev;
 -      struct dn_ifaddr *ifa = NULL, **ifap = NULL;
 +      struct dn_ifaddr *ifa = NULL;
 +      struct dn_ifaddr __rcu **ifap = NULL;
        int ret = 0;
  
        if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
                goto done;
        }
  
 -      if ((dn_db = dev->dn_ptr) != NULL) {
 -              for (ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next)
 +      if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) {
 +              for (ifap = &dn_db->ifa_list;
 +                   (ifa = rtnl_dereference(*ifap)) != NULL;
 +                   ifap = &ifa->ifa_next)
                        if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
                                break;
        }
@@@ -568,7 -558,7 +568,7 @@@ static struct dn_dev *dn_dev_by_index(i
  
        dev = __dev_get_by_index(&init_net, ifindex);
        if (dev)
 -              dn_dev = dev->dn_ptr;
 +              dn_dev = rtnl_dereference(dev->dn_ptr);
  
        return dn_dev;
  }
@@@ -586,8 -576,7 +586,8 @@@ static int dn_nl_deladdr(struct sk_buf
        struct nlattr *tb[IFA_MAX+1];
        struct dn_dev *dn_db;
        struct ifaddrmsg *ifm;
 -      struct dn_ifaddr *ifa, **ifap;
 +      struct dn_ifaddr *ifa;
 +      struct dn_ifaddr __rcu **ifap;
        int err = -EINVAL;
  
        if (!net_eq(net, &init_net))
                goto errout;
  
        err = -EADDRNOTAVAIL;
 -      for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) {
 +      for (ifap = &dn_db->ifa_list;
 +           (ifa = rtnl_dereference(*ifap)) != NULL;
 +           ifap = &ifa->ifa_next) {
                if (tb[IFA_LOCAL] &&
                    nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
                        continue;
@@@ -645,7 -632,7 +645,7 @@@ static int dn_nl_newaddr(struct sk_buf
        if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL)
                return -ENODEV;
  
 -      if ((dn_db = dev->dn_ptr) == NULL) {
 +      if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) {
                dn_db = dn_dev_create(dev, &err);
                if (!dn_db)
                        return err;
@@@ -761,11 -748,11 +761,11 @@@ static int dn_nl_dump_ifaddr(struct sk_
                        skip_naddr = 0;
                }
  
 -              if ((dn_db = dev->dn_ptr) == NULL)
 +              if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL)
                        goto cont;
  
 -              for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
 -                   ifa = ifa->ifa_next, dn_idx++) {
 +              for (ifa = rtnl_dereference(dn_db->ifa_list), dn_idx = 0; ifa;
 +                   ifa = rtnl_dereference(ifa->ifa_next), dn_idx++) {
                        if (dn_idx < skip_naddr)
                                continue;
  
@@@ -786,22 -773,21 +786,22 @@@ done
  
  static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
  {
 -      struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
 +      struct dn_dev *dn_db;
        struct dn_ifaddr *ifa;
        int rv = -ENODEV;
  
 +      rcu_read_lock();
 +      dn_db = rcu_dereference(dev->dn_ptr);
        if (dn_db == NULL)
                goto out;
  
 -      rtnl_lock();
 -      ifa = dn_db->ifa_list;
 +      ifa = rcu_dereference(dn_db->ifa_list);
        if (ifa != NULL) {
                *addr = ifa->ifa_local;
                rv = 0;
        }
 -      rtnl_unlock();
  out:
 +      rcu_read_unlock();
        return rv;
  }
  
@@@ -837,7 -823,7 +837,7 @@@ static void dn_send_endnode_hello(struc
        struct endnode_hello_message *msg;
        struct sk_buff *skb = NULL;
        __le16 *pktlen;
 -      struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
 +      struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
  
        if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
                return;
@@@ -903,7 -889,7 +903,7 @@@ static int dn_am_i_a_router(struct dn_n
  static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
  {
        int n;
 -      struct dn_dev *dn_db = dev->dn_ptr;
 +      struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
        struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
        struct sk_buff *skb;
        size_t size;
  
  static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
  {
 -      struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
 +      struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
  
        if (dn_db->parms.forwarding == 0)
                dn_send_endnode_hello(dev, ifa);
@@@ -1012,7 -998,7 +1012,7 @@@ static void dn_send_ptp_hello(struct ne
  
  static int dn_eth_up(struct net_device *dev)
  {
 -      struct dn_dev *dn_db = dev->dn_ptr;
 +      struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
  
        if (dn_db->parms.forwarding == 0)
                dev_mc_add(dev, dn_rt_all_end_mcast);
  
  static void dn_eth_down(struct net_device *dev)
  {
 -      struct dn_dev *dn_db = dev->dn_ptr;
 +      struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
  
        if (dn_db->parms.forwarding == 0)
                dev_mc_del(dev, dn_rt_all_end_mcast);
@@@ -1039,16 -1025,12 +1039,16 @@@ static void dn_dev_set_timer(struct net
  static void dn_dev_timer_func(unsigned long arg)
  {
        struct net_device *dev = (struct net_device *)arg;
 -      struct dn_dev *dn_db = dev->dn_ptr;
 +      struct dn_dev *dn_db;
        struct dn_ifaddr *ifa;
  
 +      rcu_read_lock();
 +      dn_db = rcu_dereference(dev->dn_ptr);
        if (dn_db->t3 <= dn_db->parms.t2) {
                if (dn_db->parms.timer3) {
 -                      for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
 +                      for (ifa = rcu_dereference(dn_db->ifa_list);
 +                           ifa;
 +                           ifa = rcu_dereference(ifa->ifa_next)) {
                                if (!(ifa->ifa_flags & IFA_F_SECONDARY))
                                        dn_db->parms.timer3(dev, ifa);
                        }
        } else {
                dn_db->t3 -= dn_db->parms.t2;
        }
 -
 +      rcu_read_unlock();
        dn_dev_set_timer(dev);
  }
  
  static void dn_dev_set_timer(struct net_device *dev)
  {
 -      struct dn_dev *dn_db = dev->dn_ptr;
 +      struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
  
        if (dn_db->parms.t2 > dn_db->parms.t3)
                dn_db->parms.t2 = dn_db->parms.t3;
@@@ -1095,8 -1077,8 +1095,8 @@@ static struct dn_dev *dn_dev_create(str
                return NULL;
  
        memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
 -      smp_wmb();
 -      dev->dn_ptr = dn_db;
 +
 +      rcu_assign_pointer(dev->dn_ptr, dn_db);
        dn_db->dev = dev;
        init_timer(&dn_db->timer);
  
  
        dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
        if (!dn_db->neigh_parms) {
 -              dev->dn_ptr = NULL;
 +              rcu_assign_pointer(dev->dn_ptr, NULL);
                kfree(dn_db);
                return NULL;
        }
  /*
   * This processes a device up event. We only start up
   * the loopback device & ethernet devices with correct
-  * MAC addreses automatically. Others must be started
+  * MAC addresses automatically. Others must be started
   * specifically.
   *
   * FIXME: How should we configure the loopback address ? If we could dispense
@@@ -1143,7 -1125,7 +1143,7 @@@ void dn_dev_up(struct net_device *dev
        struct dn_ifaddr *ifa;
        __le16 addr = decnet_address;
        int maybe_default = 0;
 -      struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
 +      struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
  
        if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
                return;
  
  static void dn_dev_delete(struct net_device *dev)
  {
 -      struct dn_dev *dn_db = dev->dn_ptr;
 +      struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
  
        if (dn_db == NULL)
                return;
  
  void dn_dev_down(struct net_device *dev)
  {
 -      struct dn_dev *dn_db = dev->dn_ptr;
 +      struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
        struct dn_ifaddr *ifa;
  
        if (dn_db == NULL)
                return;
  
 -      while((ifa = dn_db->ifa_list) != NULL) {
 +      while ((ifa = rtnl_dereference(dn_db->ifa_list)) != NULL) {
                dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0);
                dn_dev_free_ifa(ifa);
        }
@@@ -1288,7 -1270,7 +1288,7 @@@ static inline int is_dn_dev(struct net_
  }
  
  static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
 -      __acquires(rcu)
 +      __acquires(RCU)
  {
        int i;
        struct net_device *dev;
@@@ -1331,7 -1313,7 +1331,7 @@@ static void *dn_dev_seq_next(struct seq
  }
  
  static void dn_dev_seq_stop(struct seq_file *seq, void *v)
 -      __releases(rcu)
 +      __releases(RCU)
  {
        rcu_read_unlock();
  }
@@@ -1358,7 -1340,7 +1358,7 @@@ static int dn_dev_seq_show(struct seq_f
                struct net_device *dev = v;
                char peer_buf[DN_ASCBUF_LEN];
                char router_buf[DN_ASCBUF_LEN];
 -              struct dn_dev *dn_db = dev->dn_ptr;
 +              struct dn_dev *dn_db = rcu_dereference(dev->dn_ptr);
  
                seq_printf(seq, "%-8s %1s     %04u %04u   %04lu %04lu"
                                "   %04hu    %03d %02x    %-10s %-7s %-7s\n",
diff --combined net/ipv4/tcp_output.c
@@@ -55,7 -55,7 +55,7 @@@ int sysctl_tcp_workaround_signed_window
  int sysctl_tcp_tso_win_divisor __read_mostly = 3;
  
  int sysctl_tcp_mtu_probing __read_mostly = 0;
 -int sysctl_tcp_base_mss __read_mostly = 512;
 +int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
  
  /* By default, RFC2861 behavior.  */
  int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
@@@ -119,13 -119,9 +119,13 @@@ static __u16 tcp_advertise_mss(struct s
        struct dst_entry *dst = __sk_dst_get(sk);
        int mss = tp->advmss;
  
 -      if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
 -              mss = dst_metric(dst, RTAX_ADVMSS);
 -              tp->advmss = mss;
 +      if (dst) {
 +              unsigned int metric = dst_metric_advmss(dst);
 +
 +              if (metric < mss) {
 +                      mss = metric;
 +                      tp->advmss = mss;
 +              }
        }
  
        return (__u16)mss;
@@@ -228,15 -224,10 +228,15 @@@ void tcp_select_initial_window(int __sp
                }
        }
  
 -      /* Set initial window to value enough for senders, following RFC5681. */
 +      /* Set initial window to a value enough for senders starting with
 +       * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
 +       * a limit on the initial window when mss is larger than 1460.
 +       */
        if (mss > (1 << *rcv_wscale)) {
 -              int init_cwnd = rfc3390_bytes_to_packets(mss);
 -
 +              int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
 +              if (mss > 1460)
 +                      init_cwnd =
 +                      max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
                /* when initializing use the value from init_rcv_wnd
                 * rather than the default from above
                 */
@@@ -833,11 -824,8 +833,11 @@@ static int tcp_transmit_skb(struct soc
                                                           &md5);
        tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
  
 -      if (tcp_packets_in_flight(tp) == 0)
 +      if (tcp_packets_in_flight(tp) == 0) {
                tcp_ca_event(sk, CA_EVENT_TX_START);
 +              skb->ooo_okay = 1;
 +      } else
 +              skb->ooo_okay = 0;
  
        skb_push(skb, tcp_header_size);
        skb_reset_transport_header(skb);
@@@ -1350,7 -1338,7 +1350,7 @@@ static inline unsigned int tcp_cwnd_tes
        return 0;
  }
  
- /* Intialize TSO state of a skb.
+ /* Initialize TSO state of a skb.
   * This must be invoked the first time we consider transmitting
   * SKB onto the wire.
   */
@@@ -2431,7 -2419,7 +2431,7 @@@ struct sk_buff *tcp_make_synack(struct 
  
        skb_dst_set(skb, dst_clone(dst));
  
 -      mss = dst_metric(dst, RTAX_ADVMSS);
 +      mss = dst_metric_advmss(dst);
        if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
                mss = tp->rx_opt.user_mss;
  
@@@ -2565,7 -2553,7 +2565,7 @@@ static void tcp_connect_init(struct soc
  
        if (!tp->window_clamp)
                tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
 -      tp->advmss = dst_metric(dst, RTAX_ADVMSS);
 +      tp->advmss = dst_metric_advmss(dst);
        if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
                tp->advmss = tp->rx_opt.user_mss;
  
@@@ -2608,7 -2596,6 +2608,7 @@@ int tcp_connect(struct sock *sk
  {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *buff;
 +      int err;
  
        tcp_connect_init(sk);
  
        sk->sk_wmem_queued += buff->truesize;
        sk_mem_charge(sk, buff->truesize);
        tp->packets_out += tcp_skb_pcount(buff);
 -      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
 +      err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
 +      if (err == -ECONNREFUSED)
 +              return err;
  
        /* We change tp->snd_nxt after the tcp_transmit_skb() call
         * in order to make this packet get counted in tcpOutSegs.
diff --combined net/ipv6/af_inet6.c
@@@ -300,7 -300,7 +300,7 @@@ int inet6_bind(struct socket *sock, str
                        goto out;
                }
  
-               /* Reproduce AF_INET checks to make the bindings consitant */
+               /* Reproduce AF_INET checks to make the bindings consistent */
                v4addr = addr->sin6_addr.s6_addr32[3];
                chk_addr_ret = inet_addr_type(net, v4addr);
                if (!sysctl_ip_nonlocal_bind &&
@@@ -810,7 -810,7 +810,7 @@@ static struct sk_buff *ipv6_gso_segment
        }
        rcu_read_unlock();
  
 -      if (unlikely(IS_ERR(segs)))
 +      if (IS_ERR(segs))
                goto out;
  
        for (skb = segs; skb; skb = skb->next) {
diff --combined scripts/mod/modpost.c
@@@ -790,7 -790,6 +790,7 @@@ static const char *section_white_list[
  {
        ".comment*",
        ".debug*",
 +      ".zdebug*",             /* Compressed debug sections. */
        ".GCC-command-line",    /* mn10300 */
        ".mdebug*",        /* alpha, score, mips etc. */
        ".pdr",            /* alpha, score, mips etc. */
@@@ -1442,7 -1441,7 +1442,7 @@@ static unsigned int *reloc_location(str
        int section = shndx2secindex(sechdr->sh_info);
  
        return (void *)elf->hdr + sechdrs[section].sh_offset +
 -              r->r_offset - sechdrs[section].sh_addr;
 +              r->r_offset;
  }
  
  static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
@@@ -1615,7 -1614,7 +1615,7 @@@ static void section_rel(const char *mod
   * A module includes a number of sections that are discarded
   * either when loaded or when used as built-in.
   * For loaded modules all functions marked __init and all data
-  * marked __initdata will be discarded when the module has been intialized.
+  * marked __initdata will be discarded when the module has been initialized.
   * Likewise for modules used built-in the sections marked __exit
   * are discarded because __exit marked function are supposed to be called
   * only when a module is unloaded which never happens for built-in modules.
@@@ -15,7 -15,6 +15,7 @@@
  #ifndef __AA_MATCH_H
  #define __AA_MATCH_H
  
 +#include <linux/kref.h>
  #include <linux/workqueue.h>
  
  #define DFA_NOMATCH                   0
@@@ -28,7 -27,7 +28,7 @@@
   * The format used for transition tables is based on the GNU flex table
   * file format (--tables-file option; see Table File Format in the flex
   * info pages and the flex sources for documentation). The magic number
-  * used in the header is 0x1B5E783D insted of 0xF13C57B1 though, because
+  * used in the header is 0x1B5E783D instead of 0xF13C57B1 though, because
   * the YY_ID_CHK (check) and YY_ID_DEF (default) tables are used
   * slightly differently (see the apparmor-parser package).
   */
@@@ -40,6 -40,7 +40,6 @@@ struct max98088_cdata 
  };
  
  struct max98088_priv {
 -       u8 reg_cache[M98088_REG_CNT];
         enum max98088_type devtype;
         void *control_data;
         struct max98088_pdata *pdata;
@@@ -1587,7 -1588,7 +1587,7 @@@ static int max98088_dai2_set_fmt(struc
  
  static void max98088_sync_cache(struct snd_soc_codec *codec)
  {
 -       struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
 +       u16 *reg_cache = codec->reg_cache;
         int i;
  
         if (!codec->cache_sync)
         /* write back cached values if they're writeable and
          * different from the hardware default.
          */
 -       for (i = 1; i < ARRAY_SIZE(max98088->reg_cache); i++) {
 +       for (i = 1; i < codec->driver->reg_cache_size; i++) {
                 if (!max98088_access[i].writable)
                         continue;
  
 -               if (max98088->reg_cache[i] == max98088_reg[i])
 +               if (reg_cache[i] == max98088_reg[i])
                         continue;
  
 -               snd_soc_write(codec, i, max98088->reg_cache[i]);
 +               snd_soc_write(codec, i, reg_cache[i]);
         }
  
         codec->cache_sync = 0;
@@@ -1950,6 -1951,7 +1950,6 @@@ static int max98088_probe(struct snd_so
         int ret = 0;
  
         codec->cache_sync = 1;
 -       memcpy(codec->reg_cache, max98088_reg, sizeof(max98088_reg));
  
         ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
         if (ret != 0) {
                 return ret;
         }
  
-        /* initalize private data */
+        /* initialize private data */
  
         max98088->sysclk = (unsigned)-1;
         max98088->eq_textcnt = 0;