Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Mar 2011 23:48:01 +0000 (15:48 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Mar 2011 23:48:01 +0000 (15:48 -0800)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6:
  DNS: Fix a NULL pointer deref when trying to read an error key [CVE-2011-1076]

110 files changed:
Documentation/networking/00-INDEX
MAINTAINERS
arch/alpha/Kconfig
arch/alpha/kernel/irq.c
arch/alpha/kernel/irq_alpha.c
arch/alpha/kernel/irq_i8259.c
arch/alpha/kernel/irq_impl.h
arch/alpha/kernel/irq_pyxis.c
arch/alpha/kernel/irq_srm.c
arch/alpha/kernel/sys_alcor.c
arch/alpha/kernel/sys_cabriolet.c
arch/alpha/kernel/sys_dp264.c
arch/alpha/kernel/sys_eb64p.c
arch/alpha/kernel/sys_eiger.c
arch/alpha/kernel/sys_jensen.c
arch/alpha/kernel/sys_marvel.c
arch/alpha/kernel/sys_mikasa.c
arch/alpha/kernel/sys_noritake.c
arch/alpha/kernel/sys_rawhide.c
arch/alpha/kernel/sys_rx164.c
arch/alpha/kernel/sys_sable.c
arch/alpha/kernel/sys_takara.c
arch/alpha/kernel/sys_titan.c
arch/alpha/kernel/sys_wildfire.c
arch/powerpc/include/asm/machdep.h
arch/powerpc/kernel/machine_kexec.c
arch/powerpc/kernel/process.c
arch/powerpc/mm/tlb_hash64.c
arch/x86/include/asm/msr-index.h
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/platform/olpc/olpc_dt.c
block/blk-core.c
block/blk-flush.c
block/blk-lib.c
block/blk-throttle.c
block/cfq-iosched.c
block/elevator.c
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/evxfgpe.c
drivers/acpi/debugfs.c
drivers/block/loop.c
drivers/cpufreq/cpufreq.c
drivers/gpu/drm/i915/i915_dma.c
drivers/idle/intel_idle.c
drivers/isdn/hardware/eicon/istream.c
drivers/mfd/asic3.c
drivers/mfd/davinci_voicecodec.c
drivers/mfd/tps6586x.c
drivers/mfd/ucb1x00-ts.c
drivers/mfd/wm8994-core.c
drivers/net/bnx2x/bnx2x.h
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bnx2x/bnx2x_cmn.h
drivers/net/bnx2x/bnx2x_ethtool.c
drivers/net/bnx2x/bnx2x_init.h
drivers/net/bnx2x/bnx2x_main.c
drivers/net/bnx2x/bnx2x_stats.c
drivers/net/can/softing/softing_main.c
drivers/net/cnic.c
drivers/net/davinci_emac.c
drivers/net/dnet.c
drivers/net/e1000/e1000_osdep.h
drivers/net/e1000e/netdev.c
drivers/net/fec.c
drivers/net/igbvf/vf.c
drivers/net/macb.c
drivers/net/pcmcia/fmvj18x_cs.c
drivers/net/r8169.c
drivers/net/skge.c
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/p54/p54usb.c
drivers/net/wireless/rndis_wlan.c
drivers/of/pdt.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_fc.c
drivers/tty/serial/serial_cs.c
drivers/usb/gadget/f_phonet.c
drivers/usb/host/ehci-xilinx-of.c
fs/exofs/namei.c
fs/ext2/namei.c
fs/hfs/dir.c
fs/minix/namei.c
fs/nilfs2/namei.c
fs/nilfs2/segment.c
fs/proc/proc_devtree.c
fs/reiserfs/namei.c
fs/sysv/namei.c
fs/udf/namei.c
fs/ufs/namei.c
fs/xfs/linux-2.6/xfs_ioctl.c
include/keys/rxrpc-type.h
include/linux/blkdev.h
include/linux/blktrace_api.h
include/linux/mfd/wm8994/core.h
include/trace/events/block.h
kernel/trace/blktrace.c
lib/nlattr.c
net/core/dev_addr_lists.c
net/dcb/dcbnl.c
net/dccp/input.c
net/ipv6/route.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_log.c
net/netlink/af_netlink.c
net/rxrpc/ar-input.c

index fe5c099..4edd78d 100644 (file)
@@ -40,8 +40,6 @@ decnet.txt
        - info on using the DECnet networking layer in Linux.
 depca.txt
        - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
-dgrs.txt
-       - the Digi International RightSwitch SE-X Ethernet driver
 dmfe.txt
        - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
 e100.txt
@@ -50,8 +48,6 @@ e1000.txt
        - info on Intel's E1000 line of gigabit ethernet boards
 eql.txt
        - serial IP load balancing
-ethertap.txt
-       - the Ethertap user space packet reception and transmission driver
 ewrk3.txt
        - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
 filter.txt
@@ -104,8 +100,6 @@ tuntap.txt
        - TUN/TAP device driver, allowing user space Rx/Tx of packets.
 vortex.txt
        - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
-wavelan.txt
-       - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
 x25.txt
        - general info on X.25 development.
 x25-iface.txt
index 8afba63..6d18148 100644 (file)
@@ -1467,6 +1467,7 @@ F:        include/net/bluetooth/
 
 BONDING DRIVER
 M:     Jay Vosburgh <fubar@us.ibm.com>
+M:     Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
 S:     Supported
@@ -2033,7 +2034,7 @@ F:        Documentation/scsi/dc395x.txt
 F:     drivers/scsi/dc395x.*
 
 DCCP PROTOCOL
-M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+M:     Gerrit Renker <gerrit@erg.abdn.ac.uk>
 L:     dccp@vger.kernel.org
 W:     http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
 S:     Maintained
index 47f63d4..cc31bec 100644 (file)
@@ -11,6 +11,7 @@ config ALPHA
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_PROBE
        select AUTO_IRQ_AFFINITY if SMP
+       select GENERIC_HARDIRQS_NO_DEPRECATED
        help
          The Alpha is a 64-bit general-purpose processor designed and
          marketed by the Digital Equipment Corporation of blessed memory,
index 9ab234f..a19d600 100644 (file)
@@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS];
 
 int irq_select_affinity(unsigned int irq)
 {
-       struct irq_desc *desc = irq_to_desc[irq];
+       struct irq_data *data = irq_get_irq_data(irq);
+       struct irq_chip *chip;
        static int last_cpu;
        int cpu = last_cpu + 1;
 
-       if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq])
+       if (!data)
+               return 1;
+       chip = irq_data_get_irq_chip(data);
+
+       if (!chip->irq_set_affinity || irq_user_affinity[irq])
                return 1;
 
        while (!cpu_possible(cpu) ||
@@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq)
                cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
        last_cpu = cpu;
 
-       cpumask_copy(desc->affinity, cpumask_of(cpu));
-       get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu));
+       cpumask_copy(data->affinity, cpumask_of(cpu));
+       chip->irq_set_affinity(data, cpumask_of(cpu), false);
        return 0;
 }
 #endif /* CONFIG_SMP */
index 2d0679b..411ca11 100644 (file)
@@ -228,14 +228,9 @@ struct irqaction timer_irqaction = {
 void __init
 init_rtc_irq(void)
 {
-       struct irq_desc *desc = irq_to_desc(RTC_IRQ);
-
-       if (desc) {
-               desc->status |= IRQ_DISABLED;
-               set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
-                       handle_simple_irq, "RTC");
-               setup_irq(RTC_IRQ, &timer_irqaction);
-       }
+       set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
+                                     handle_simple_irq, "RTC");
+       setup_irq(RTC_IRQ, &timer_irqaction);
 }
 
 /* Dummy irqactions.  */
index 956ea0e..c7cc981 100644 (file)
@@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask)
 }
 
 inline void
-i8259a_enable_irq(unsigned int irq)
+i8259a_enable_irq(struct irq_data *d)
 {
        spin_lock(&i8259_irq_lock);
-       i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
+       i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
        spin_unlock(&i8259_irq_lock);
 }
 
@@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq)
 }
 
 void
-i8259a_disable_irq(unsigned int irq)
+i8259a_disable_irq(struct irq_data *d)
 {
        spin_lock(&i8259_irq_lock);
-       __i8259a_disable_irq(irq);
+       __i8259a_disable_irq(d->irq);
        spin_unlock(&i8259_irq_lock);
 }
 
 void
-i8259a_mask_and_ack_irq(unsigned int irq)
+i8259a_mask_and_ack_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
+
        spin_lock(&i8259_irq_lock);
        __i8259a_disable_irq(irq);
 
@@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq)
 
 struct irq_chip i8259a_irq_type = {
        .name           = "XT-PIC",
-       .unmask         = i8259a_enable_irq,
-       .mask           = i8259a_disable_irq,
-       .mask_ack       = i8259a_mask_and_ack_irq,
+       .irq_unmask     = i8259a_enable_irq,
+       .irq_mask       = i8259a_disable_irq,
+       .irq_mask_ack   = i8259a_mask_and_ack_irq,
 };
 
 void __init
index b63ccd7..d507a23 100644 (file)
@@ -31,11 +31,9 @@ extern void init_rtc_irq(void);
 
 extern void common_init_isa_dma(void);
 
-extern void i8259a_enable_irq(unsigned int);
-extern void i8259a_disable_irq(unsigned int);
-extern void i8259a_mask_and_ack_irq(unsigned int);
-extern unsigned int i8259a_startup_irq(unsigned int);
-extern void i8259a_end_irq(unsigned int);
+extern void i8259a_enable_irq(struct irq_data *d);
+extern void i8259a_disable_irq(struct irq_data *d);
+extern void i8259a_mask_and_ack_irq(struct irq_data *d);
 extern struct irq_chip i8259a_irq_type;
 extern void init_i8259a_irqs(void);
 
index 2863458..b30227f 100644 (file)
@@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask)
 }
 
 static inline void
-pyxis_enable_irq(unsigned int irq)
+pyxis_enable_irq(struct irq_data *d)
 {
-       pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+       pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
 }
 
 static void
-pyxis_disable_irq(unsigned int irq)
+pyxis_disable_irq(struct irq_data *d)
 {
-       pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+       pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
 }
 
 static void
-pyxis_mask_and_ack_irq(unsigned int irq)
+pyxis_mask_and_ack_irq(struct irq_data *d)
 {
-       unsigned long bit = 1UL << (irq - 16);
+       unsigned long bit = 1UL << (d->irq - 16);
        unsigned long mask = cached_irq_mask &= ~bit;
 
        /* Disable the interrupt.  */
@@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip pyxis_irq_type = {
        .name           = "PYXIS",
-       .mask_ack       = pyxis_mask_and_ack_irq,
-       .mask           = pyxis_disable_irq,
-       .unmask         = pyxis_enable_irq,
+       .irq_mask_ack   = pyxis_mask_and_ack_irq,
+       .irq_mask       = pyxis_disable_irq,
+       .irq_unmask     = pyxis_enable_irq,
 };
 
 void 
@@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
                if ((ignore_mask >> i) & 1)
                        continue;
                set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
-               irq_to_desc(i)->status |= IRQ_LEVEL;
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        setup_irq(16+7, &isa_cascade_irqaction);
index 0e57e82..82a47bb 100644 (file)
 DEFINE_SPINLOCK(srm_irq_lock);
 
 static inline void
-srm_enable_irq(unsigned int irq)
+srm_enable_irq(struct irq_data *d)
 {
        spin_lock(&srm_irq_lock);
-       cserve_ena(irq - 16);
+       cserve_ena(d->irq - 16);
        spin_unlock(&srm_irq_lock);
 }
 
 static void
-srm_disable_irq(unsigned int irq)
+srm_disable_irq(struct irq_data *d)
 {
        spin_lock(&srm_irq_lock);
-       cserve_dis(irq - 16);
+       cserve_dis(d->irq - 16);
        spin_unlock(&srm_irq_lock);
 }
 
 /* Handle interrupts from the SRM, assuming no additional weirdness.  */
 static struct irq_chip srm_irq_type = {
        .name           = "SRM",
-       .unmask         = srm_enable_irq,
-       .mask           = srm_disable_irq,
-       .mask_ack       = srm_disable_irq,
+       .irq_unmask     = srm_enable_irq,
+       .irq_mask       = srm_disable_irq,
+       .irq_mask_ack   = srm_disable_irq,
 };
 
 void __init
@@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
                if (i < 64 && ((ignore_mask >> i) & 1))
                        continue;
                set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
-               irq_to_desc(i)->status |= IRQ_LEVEL;
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 }
 
index 7bef617..88d95e8 100644 (file)
@@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask)
 }
 
 static inline void
-alcor_enable_irq(unsigned int irq)
+alcor_enable_irq(struct irq_data *d)
 {
-       alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+       alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
 }
 
 static void
-alcor_disable_irq(unsigned int irq)
+alcor_disable_irq(struct irq_data *d)
 {
-       alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+       alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
 }
 
 static void
-alcor_mask_and_ack_irq(unsigned int irq)
+alcor_mask_and_ack_irq(struct irq_data *d)
 {
-       alcor_disable_irq(irq);
+       alcor_disable_irq(d);
 
        /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
-       *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb();
+       *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
        *(vuip)GRU_INT_CLEAR = 0; mb();
 }
 
 static void
-alcor_isa_mask_and_ack_irq(unsigned int irq)
+alcor_isa_mask_and_ack_irq(struct irq_data *d)
 {
-       i8259a_mask_and_ack_irq(irq);
+       i8259a_mask_and_ack_irq(d);
 
        /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
        *(vuip)GRU_INT_CLEAR = 0x80000000; mb();
@@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip alcor_irq_type = {
        .name           = "ALCOR",
-       .unmask         = alcor_enable_irq,
-       .mask           = alcor_disable_irq,
-       .mask_ack       = alcor_mask_and_ack_irq,
+       .irq_unmask     = alcor_enable_irq,
+       .irq_mask       = alcor_disable_irq,
+       .irq_mask_ack   = alcor_mask_and_ack_irq,
 };
 
 static void
@@ -126,9 +126,9 @@ alcor_init_irq(void)
                if (i >= 16+20 && i <= 16+30)
                        continue;
                set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
-               irq_to_desc(i)->status |= IRQ_LEVEL;
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
-       i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq;
+       i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
 
        init_i8259a_irqs();
        common_init_isa_dma();
index b0c9164..57eb630 100644 (file)
@@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
 }
 
 static inline void
-cabriolet_enable_irq(unsigned int irq)
+cabriolet_enable_irq(struct irq_data *d)
 {
-       cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq));
+       cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
 }
 
 static void
-cabriolet_disable_irq(unsigned int irq)
+cabriolet_disable_irq(struct irq_data *d)
 {
-       cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq);
+       cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
 }
 
 static struct irq_chip cabriolet_irq_type = {
        .name           = "CABRIOLET",
-       .unmask         = cabriolet_enable_irq,
-       .mask           = cabriolet_disable_irq,
-       .mask_ack       = cabriolet_disable_irq,
+       .irq_unmask     = cabriolet_enable_irq,
+       .irq_mask       = cabriolet_disable_irq,
+       .irq_mask_ack   = cabriolet_disable_irq,
 };
 
 static void 
@@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
                for (i = 16; i < 35; ++i) {
                        set_irq_chip_and_handler(i, &cabriolet_irq_type,
                                handle_level_irq);
-                       irq_to_desc(i)->status |= IRQ_LEVEL;
+                       irq_set_status_flags(i, IRQ_LEVEL);
                }
        }
 
index edad5f7..481df4e 100644 (file)
@@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask)
 }
 
 static void
-dp264_enable_irq(unsigned int irq)
+dp264_enable_irq(struct irq_data *d)
 {
        spin_lock(&dp264_irq_lock);
-       cached_irq_mask |= 1UL << irq;
+       cached_irq_mask |= 1UL << d->irq;
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 }
 
 static void
-dp264_disable_irq(unsigned int irq)
+dp264_disable_irq(struct irq_data *d)
 {
        spin_lock(&dp264_irq_lock);
-       cached_irq_mask &= ~(1UL << irq);
+       cached_irq_mask &= ~(1UL << d->irq);
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 }
 
 static void
-clipper_enable_irq(unsigned int irq)
+clipper_enable_irq(struct irq_data *d)
 {
        spin_lock(&dp264_irq_lock);
-       cached_irq_mask |= 1UL << (irq - 16);
+       cached_irq_mask |= 1UL << (d->irq - 16);
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 }
 
 static void
-clipper_disable_irq(unsigned int irq)
+clipper_disable_irq(struct irq_data *d)
 {
        spin_lock(&dp264_irq_lock);
-       cached_irq_mask &= ~(1UL << (irq - 16));
+       cached_irq_mask &= ~(1UL << (d->irq - 16));
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 }
@@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 }
 
 static int
-dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
-{ 
+dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
+                  bool force)
+{
        spin_lock(&dp264_irq_lock);
-       cpu_set_irq_affinity(irq, *affinity);
+       cpu_set_irq_affinity(d->irq, *affinity);
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 
@@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
 }
 
 static int
-clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
-{ 
+clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
+                    bool force)
+{
        spin_lock(&dp264_irq_lock);
-       cpu_set_irq_affinity(irq - 16, *affinity);
+       cpu_set_irq_affinity(d->irq - 16, *affinity);
        tsunami_update_irq_hw(cached_irq_mask);
        spin_unlock(&dp264_irq_lock);
 
@@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
 }
 
 static struct irq_chip dp264_irq_type = {
-       .name           = "DP264",
-       .unmask         = dp264_enable_irq,
-       .mask           = dp264_disable_irq,
-       .mask_ack       = dp264_disable_irq,
-       .set_affinity   = dp264_set_affinity,
+       .name                   = "DP264",
+       .irq_unmask             = dp264_enable_irq,
+       .irq_mask               = dp264_disable_irq,
+       .irq_mask_ack           = dp264_disable_irq,
+       .irq_set_affinity       = dp264_set_affinity,
 };
 
 static struct irq_chip clipper_irq_type = {
-       .name           = "CLIPPER",
-       .unmask         = clipper_enable_irq,
-       .mask           = clipper_disable_irq,
-       .mask_ack       = clipper_disable_irq,
-       .set_affinity   = clipper_set_affinity,
+       .name                   = "CLIPPER",
+       .irq_unmask             = clipper_enable_irq,
+       .irq_mask               = clipper_disable_irq,
+       .irq_mask_ack           = clipper_disable_irq,
+       .irq_set_affinity       = clipper_set_affinity,
 };
 
 static void
@@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
 {
        long i;
        for (i = imin; i <= imax; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, ops, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 }
 
index ae5f29d..402e908 100644 (file)
@@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
 }
 
 static inline void
-eb64p_enable_irq(unsigned int irq)
+eb64p_enable_irq(struct irq_data *d)
 {
-       eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
+       eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
 }
 
 static void
-eb64p_disable_irq(unsigned int irq)
+eb64p_disable_irq(struct irq_data *d)
 {
-       eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
+       eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
 }
 
 static struct irq_chip eb64p_irq_type = {
        .name           = "EB64P",
-       .unmask         = eb64p_enable_irq,
-       .mask           = eb64p_disable_irq,
-       .mask_ack       = eb64p_disable_irq,
+       .irq_unmask     = eb64p_enable_irq,
+       .irq_mask       = eb64p_disable_irq,
+       .irq_mask_ack   = eb64p_disable_irq,
 };
 
 static void 
@@ -118,9 +118,9 @@ eb64p_init_irq(void)
        init_i8259a_irqs();
 
        for (i = 16; i < 32; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
-       }               
+               irq_set_status_flags(i, IRQ_LEVEL);
+       }
 
        common_init_isa_dma();
        setup_irq(16+5, &isa_cascade_irqaction);
index 1121bc5..0b44a54 100644 (file)
@@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask)
 }
 
 static inline void
-eiger_enable_irq(unsigned int irq)
+eiger_enable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        unsigned long mask;
        mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
        eiger_update_irq_hw(irq, mask);
 }
 
 static void
-eiger_disable_irq(unsigned int irq)
+eiger_disable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        unsigned long mask;
        mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
        eiger_update_irq_hw(irq, mask);
@@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq)
 
 static struct irq_chip eiger_irq_type = {
        .name           = "EIGER",
-       .unmask         = eiger_enable_irq,
-       .mask           = eiger_disable_irq,
-       .mask_ack       = eiger_disable_irq,
+       .irq_unmask     = eiger_enable_irq,
+       .irq_mask       = eiger_disable_irq,
+       .irq_mask_ack   = eiger_disable_irq,
 };
 
 static void
@@ -136,8 +138,8 @@ eiger_init_irq(void)
        init_i8259a_irqs();
 
        for (i = 16; i < 128; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 }
 
index 34f55e0..00341b7 100644 (file)
  */
 
 static void
-jensen_local_enable(unsigned int irq)
+jensen_local_enable(struct irq_data *d)
 {
        /* the parport is really hw IRQ 1, silly Jensen.  */
-       if (irq == 7)
-               i8259a_enable_irq(1);
+       if (d->irq == 7)
+               i8259a_enable_irq(d);
 }
 
 static void
-jensen_local_disable(unsigned int irq)
+jensen_local_disable(struct irq_data *d)
 {
        /* the parport is really hw IRQ 1, silly Jensen.  */
-       if (irq == 7)
-               i8259a_disable_irq(1);
+       if (d->irq == 7)
+               i8259a_disable_irq(d);
 }
 
 static void
-jensen_local_mask_ack(unsigned int irq)
+jensen_local_mask_ack(struct irq_data *d)
 {
        /* the parport is really hw IRQ 1, silly Jensen.  */
-       if (irq == 7)
-               i8259a_mask_and_ack_irq(1);
+       if (d->irq == 7)
+               i8259a_mask_and_ack_irq(d);
 }
 
 static struct irq_chip jensen_local_irq_type = {
        .name           = "LOCAL",
-       .unmask         = jensen_local_enable,
-       .mask           = jensen_local_disable,
-       .mask_ack       = jensen_local_mask_ack,
+       .irq_unmask     = jensen_local_enable,
+       .irq_mask       = jensen_local_disable,
+       .irq_mask_ack   = jensen_local_mask_ack,
 };
 
 static void 
index 2bfc9f1..e619107 100644 (file)
@@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
 }
 
 static void
-io7_enable_irq(unsigned int irq)
+io7_enable_irq(struct irq_data *d)
 {
        volatile unsigned long *ctl;
+       unsigned int irq = d->irq;
        struct io7 *io7;
 
        ctl = io7_get_irq_ctl(irq, &io7);
@@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq)
                       __func__, irq);
                return;
        }
-               
+
        spin_lock(&io7->irq_lock);
        *ctl |= 1UL << 24;
        mb();
@@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq)
 }
 
 static void
-io7_disable_irq(unsigned int irq)
+io7_disable_irq(struct irq_data *d)
 {
        volatile unsigned long *ctl;
+       unsigned int irq = d->irq;
        struct io7 *io7;
 
        ctl = io7_get_irq_ctl(irq, &io7);
@@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq)
                       __func__, irq);
                return;
        }
-               
+
        spin_lock(&io7->irq_lock);
        *ctl &= ~(1UL << 24);
        mb();
@@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq)
 }
 
 static void
-marvel_irq_noop(unsigned int irq) 
-{ 
-       return; 
-}
-
-static unsigned int
-marvel_irq_noop_return(unsigned int irq) 
-{ 
-       return 0; 
+marvel_irq_noop(struct irq_data *d)
+{
+       return;
 }
 
 static struct irq_chip marvel_legacy_irq_type = {
        .name           = "LEGACY",
-       .mask           = marvel_irq_noop,
-       .unmask         = marvel_irq_noop,
+       .irq_mask       = marvel_irq_noop,
+       .irq_unmask     = marvel_irq_noop,
 };
 
 static struct irq_chip io7_lsi_irq_type = {
        .name           = "LSI",
-       .unmask         = io7_enable_irq,
-       .mask           = io7_disable_irq,
-       .mask_ack       = io7_disable_irq,
+       .irq_unmask     = io7_enable_irq,
+       .irq_mask       = io7_disable_irq,
+       .irq_mask_ack   = io7_disable_irq,
 };
 
 static struct irq_chip io7_msi_irq_type = {
        .name           = "MSI",
-       .unmask         = io7_enable_irq,
-       .mask           = io7_disable_irq,
-       .ack            = marvel_irq_noop,
+       .irq_unmask     = io7_enable_irq,
+       .irq_mask       = io7_disable_irq,
+       .irq_ack        = marvel_irq_noop,
 };
 
 static void
@@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7,
 
        /* Set up the lsi irqs.  */
        for (i = 0; i < 128; ++i) {
-               irq_to_desc(base + i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        /* Disable the implemented irqs in hardware.  */
@@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7,
 
        /* Set up the msi irqs.  */
        for (i = 128; i < (128 + 512); ++i) {
-               irq_to_desc(base + i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        for (i = 0; i < 16; ++i)
index bcc1639..cf7f43d 100644 (file)
@@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask)
 }
 
 static inline void
-mikasa_enable_irq(unsigned int irq)
+mikasa_enable_irq(struct irq_data *d)
 {
-       mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16));
+       mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
 }
 
 static void
-mikasa_disable_irq(unsigned int irq)
+mikasa_disable_irq(struct irq_data *d)
 {
-       mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16)));
+       mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
 }
 
 static struct irq_chip mikasa_irq_type = {
        .name           = "MIKASA",
-       .unmask         = mikasa_enable_irq,
-       .mask           = mikasa_disable_irq,
-       .mask_ack       = mikasa_disable_irq,
+       .irq_unmask     = mikasa_enable_irq,
+       .irq_mask       = mikasa_disable_irq,
+       .irq_mask_ack   = mikasa_disable_irq,
 };
 
 static void 
@@ -98,8 +98,8 @@ mikasa_init_irq(void)
        mikasa_update_irq_hw(0);
 
        for (i = 16; i < 32; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        init_i8259a_irqs();
index e88f4ae..92bc188 100644 (file)
@@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask)
 }
 
 static void
-noritake_enable_irq(unsigned int irq)
+noritake_enable_irq(struct irq_data *d)
 {
-       noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16));
+       noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
 }
 
 static void
-noritake_disable_irq(unsigned int irq)
+noritake_disable_irq(struct irq_data *d)
 {
-       noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16)));
+       noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
 }
 
 static struct irq_chip noritake_irq_type = {
        .name           = "NORITAKE",
-       .unmask         = noritake_enable_irq,
-       .mask           = noritake_disable_irq,
-       .mask_ack       = noritake_disable_irq,
+       .irq_unmask     = noritake_enable_irq,
+       .irq_mask       = noritake_disable_irq,
+       .irq_mask_ack   = noritake_disable_irq,
 };
 
 static void 
@@ -127,8 +127,8 @@ noritake_init_irq(void)
        outw(0, 0x54c);
 
        for (i = 16; i < 48; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        init_i8259a_irqs();
index 6a51364..936d414 100644 (file)
@@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask)
   (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
 
 static inline void 
-rawhide_enable_irq(unsigned int irq)
+rawhide_enable_irq(struct irq_data *d)
 {
        unsigned int mask, hose;
+       unsigned int irq = d->irq;
 
        irq -= 16;
        hose = irq / 24;
@@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq)
 }
 
 static void 
-rawhide_disable_irq(unsigned int irq)
+rawhide_disable_irq(struct irq_data *d)
 {
        unsigned int mask, hose;
+       unsigned int irq = d->irq;
 
        irq -= 16;
        hose = irq / 24;
@@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq)
 }
 
 static void
-rawhide_mask_and_ack_irq(unsigned int irq)
+rawhide_mask_and_ack_irq(struct irq_data *d)
 {
        unsigned int mask, mask1, hose;
+       unsigned int irq = d->irq;
 
        irq -= 16;
        hose = irq / 24;
@@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip rawhide_irq_type = {
        .name           = "RAWHIDE",
-       .unmask         = rawhide_enable_irq,
-       .mask           = rawhide_disable_irq,
-       .mask_ack       = rawhide_mask_and_ack_irq,
+       .irq_unmask     = rawhide_enable_irq,
+       .irq_mask       = rawhide_disable_irq,
+       .irq_mask_ack   = rawhide_mask_and_ack_irq,
 };
 
 static void 
@@ -177,8 +180,8 @@ rawhide_init_irq(void)
        }
 
        for (i = 16; i < 128; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        init_i8259a_irqs();
index 89e7e37..cea22a6 100644 (file)
@@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask)
 }
 
 static inline void
-rx164_enable_irq(unsigned int irq)
+rx164_enable_irq(struct irq_data *d)
 {
-       rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+       rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
 }
 
 static void
-rx164_disable_irq(unsigned int irq)
+rx164_disable_irq(struct irq_data *d)
 {
-       rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+       rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
 }
 
 static struct irq_chip rx164_irq_type = {
        .name           = "RX164",
-       .unmask         = rx164_enable_irq,
-       .mask           = rx164_disable_irq,
-       .mask_ack       = rx164_disable_irq,
+       .irq_unmask     = rx164_enable_irq,
+       .irq_mask       = rx164_disable_irq,
+       .irq_mask_ack   = rx164_disable_irq,
 };
 
 static void 
@@ -99,8 +99,8 @@ rx164_init_irq(void)
 
        rx164_update_irq_hw(0);
        for (i = 16; i < 40; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        init_i8259a_irqs();
index 5c4423d..a349538 100644 (file)
@@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
 /* GENERIC irq routines */
 
 static inline void
-sable_lynx_enable_irq(unsigned int irq)
+sable_lynx_enable_irq(struct irq_data *d)
 {
        unsigned long bit, mask;
 
-       bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+       bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
        spin_lock(&sable_lynx_irq_lock);
        mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
        sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq)
 }
 
 static void
-sable_lynx_disable_irq(unsigned int irq)
+sable_lynx_disable_irq(struct irq_data *d)
 {
        unsigned long bit, mask;
 
-       bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+       bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
        spin_lock(&sable_lynx_irq_lock);
        mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
        sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq)
 }
 
 static void
-sable_lynx_mask_and_ack_irq(unsigned int irq)
+sable_lynx_mask_and_ack_irq(struct irq_data *d)
 {
        unsigned long bit, mask;
 
-       bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
+       bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
        spin_lock(&sable_lynx_irq_lock);
        mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
        sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip sable_lynx_irq_type = {
        .name           = "SABLE/LYNX",
-       .unmask         = sable_lynx_enable_irq,
-       .mask           = sable_lynx_disable_irq,
-       .mask_ack       = sable_lynx_mask_and_ack_irq,
+       .irq_unmask     = sable_lynx_enable_irq,
+       .irq_mask       = sable_lynx_disable_irq,
+       .irq_mask_ack   = sable_lynx_mask_and_ack_irq,
 };
 
 static void 
@@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
        long i;
 
        for (i = 0; i < nr_of_irqs; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &sable_lynx_irq_type,
                        handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        common_init_isa_dma();
index f8a1e8a..42a5331 100644 (file)
@@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask)
 }
 
 static inline void
-takara_enable_irq(unsigned int irq)
+takara_enable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        unsigned long mask;
        mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
        takara_update_irq_hw(irq, mask);
 }
 
 static void
-takara_disable_irq(unsigned int irq)
+takara_disable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        unsigned long mask;
        mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
        takara_update_irq_hw(irq, mask);
@@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq)
 
 static struct irq_chip takara_irq_type = {
        .name           = "TAKARA",
-       .unmask         = takara_enable_irq,
-       .mask           = takara_disable_irq,
-       .mask_ack       = takara_disable_irq,
+       .irq_unmask     = takara_enable_irq,
+       .irq_mask       = takara_disable_irq,
+       .irq_mask_ack   = takara_disable_irq,
 };
 
 static void
@@ -136,8 +138,8 @@ takara_init_irq(void)
                takara_update_irq_hw(i, -1);
 
        for (i = 16; i < 128; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 
        common_init_isa_dma();
index e02494b..f6c108a 100644 (file)
@@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask)
 }
 
 static inline void
-titan_enable_irq(unsigned int irq)
+titan_enable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        spin_lock(&titan_irq_lock);
        titan_cached_irq_mask |= 1UL << (irq - 16);
        titan_update_irq_hw(titan_cached_irq_mask);
@@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq)
 }
 
 static inline void
-titan_disable_irq(unsigned int irq)
+titan_disable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
        spin_lock(&titan_irq_lock);
        titan_cached_irq_mask &= ~(1UL << (irq - 16));
        titan_update_irq_hw(titan_cached_irq_mask);
@@ -144,7 +146,8 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 }
 
 static int
-titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
+titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
+                      bool force)
 { 
        spin_lock(&titan_irq_lock);
        titan_cpu_set_irq_affinity(irq - 16, *affinity);
@@ -175,17 +178,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
 {
        long i;
        for (i = imin; i <= imax; ++i) {
-               irq_to_desc(i)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i, ops, handle_level_irq);
+               irq_set_status_flags(i, IRQ_LEVEL);
        }
 }
 
 static struct irq_chip titan_irq_type = {
-       .name           = "TITAN",
-       .unmask         = titan_enable_irq,
-       .mask           = titan_disable_irq,
-       .mask_ack       = titan_disable_irq,
-       .set_affinity   = titan_set_irq_affinity,
+       .name                   = "TITAN",
+       .irq_unmask             = titan_enable_irq,
+       .irq_mask               = titan_disable_irq,
+       .irq_mask_ack           = titan_disable_irq,
+       .irq_set_affinity       = titan_set_irq_affinity,
 };
 
 static irqreturn_t
index eec5259..ca60a38 100644 (file)
@@ -104,10 +104,12 @@ wildfire_init_irq_hw(void)
 }
 
 static void
-wildfire_enable_irq(unsigned int irq)
+wildfire_enable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
+
        if (irq < 16)
-               i8259a_enable_irq(irq);
+               i8259a_enable_irq(d);
 
        spin_lock(&wildfire_irq_lock);
        set_bit(irq, &cached_irq_mask);
@@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq)
 }
 
 static void
-wildfire_disable_irq(unsigned int irq)
+wildfire_disable_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
+
        if (irq < 16)
-               i8259a_disable_irq(irq);
+               i8259a_disable_irq(d);
 
        spin_lock(&wildfire_irq_lock);
        clear_bit(irq, &cached_irq_mask);
@@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq)
 }
 
 static void
-wildfire_mask_and_ack_irq(unsigned int irq)
+wildfire_mask_and_ack_irq(struct irq_data *d)
 {
+       unsigned int irq = d->irq;
+
        if (irq < 16)
-               i8259a_mask_and_ack_irq(irq);
+               i8259a_mask_and_ack_irq(d);
 
        spin_lock(&wildfire_irq_lock);
        clear_bit(irq, &cached_irq_mask);
@@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq)
 
 static struct irq_chip wildfire_irq_type = {
        .name           = "WILDFIRE",
-       .unmask         = wildfire_enable_irq,
-       .mask           = wildfire_disable_irq,
-       .mask_ack       = wildfire_mask_and_ack_irq,
+       .irq_unmask     = wildfire_enable_irq,
+       .irq_mask       = wildfire_disable_irq,
+       .irq_mask_ack   = wildfire_mask_and_ack_irq,
 };
 
 static void __init
@@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
        for (i = 0; i < 16; ++i) {
                if (i == 2)
                        continue;
-               irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
                        handle_level_irq);
+               irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
        }
 
-       irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
        set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
                handle_level_irq);
+       irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
        for (i = 40; i < 64; ++i) {
-               irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
                set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
                        handle_level_irq);
+               irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
        }
 
-       setup_irq(32+irq_bias, &isa_enable);    
+       setup_irq(32+irq_bias, &isa_enable);
 }
 
 static void __init
index 991d599..fe56a23 100644 (file)
@@ -240,6 +240,12 @@ struct machdep_calls {
         * claims to support kexec.
         */
        int (*machine_kexec_prepare)(struct kimage *image);
+
+       /* Called to perform the _real_ kexec.
+        * Do NOT allocate memory or fail here. We are past the point of
+        * no return.
+        */
+       void (*machine_kexec)(struct kimage *image);
 #endif /* CONFIG_KEXEC */
 
 #ifdef CONFIG_SUSPEND
index 49a170a..a5f8672 100644 (file)
@@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image)
 
        save_ftrace_enabled = __ftrace_enabled_save();
 
-       default_machine_kexec(image);
+       if (ppc_md.machine_kexec)
+               ppc_md.machine_kexec(image);
+       else
+               default_machine_kexec(image);
 
        __ftrace_enabled_restore(save_ftrace_enabled);
 
index 7a1d5cb..8303a6c 100644 (file)
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
                        prime_debug_regs(new_thread);
 }
 #else  /* !CONFIG_PPC_ADV_DEBUG_REGS */
+#ifndef CONFIG_HAVE_HW_BREAKPOINT
 static void set_debug_reg_defaults(struct thread_struct *thread)
 {
        if (thread->dabr) {
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
                set_dabr(0);
        }
 }
+#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
 
 int set_dabr(unsigned long dabr)
@@ -670,11 +672,11 @@ void flush_thread(void)
 {
        discard_lazy_cpu_state();
 
-#ifdef CONFIG_HAVE_HW_BREAKPOINTS
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
        flush_ptrace_hw_breakpoint(current);
-#else /* CONFIG_HAVE_HW_BREAKPOINTS */
+#else /* CONFIG_HAVE_HW_BREAKPOINT */
        set_debug_reg_defaults(&current->thread);
-#endif /* CONFIG_HAVE_HW_BREAKPOINTS */
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
 }
 
 void
index 1ec0657..c14d09f 100644 (file)
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
  * neesd to be flushed. This function will either perform the flush
  * immediately or will batch it up if the current CPU has an active
  * batch on it.
- *
- * Must be called from within some kind of spinlock/non-preempt region...
  */
 void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, unsigned long pte, int huge)
 {
-       struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
+       struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
        unsigned long vsid, vaddr;
        unsigned int psize;
        int ssize;
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
         */
        if (!batch->active) {
                flush_hash_page(vaddr, rpte, psize, ssize, 0);
+               put_cpu_var(ppc64_tlb_batch);
                return;
        }
 
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
        batch->index = ++i;
        if (i >= PPC64_TLB_BATCH_NR)
                __flush_tlb_pending(batch);
+       put_cpu_var(ppc64_tlb_batch);
 }
 
 /*
index 4d0dfa0..43a18c7 100644 (file)
 #define MSR_IA32_PERFCTR1              0x000000c2
 #define MSR_FSB_FREQ                   0x000000cd
 
+#define MSR_NHM_SNB_PKG_CST_CFG_CTL    0x000000e2
+#define NHM_C3_AUTO_DEMOTE             (1UL << 25)
+#define NHM_C1_AUTO_DEMOTE             (1UL << 26)
+#define ATM_LNC_C6_AUTO_DEMOTE         (1UL << 25)
+
 #define MSR_MTRRcap                    0x000000fe
 #define MSR_IA32_BBL_CR_CTL            0x00000119
 
index bd1cac7..52c9364 100644 (file)
@@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
 {
        if (c->x86 == 0x06) {
                if (cpu_has(c, X86_FEATURE_EST))
-                       printk(KERN_WARNING PFX "Warning: EST-capable CPU "
-                              "detected. The acpi-cpufreq module offers "
-                              "voltage scaling in addition of frequency "
+                       printk_once(KERN_WARNING PFX "Warning: EST-capable "
+                              "CPU detected. The acpi-cpufreq module offers "
+                              "voltage scaling in addition to frequency "
                               "scaling. You should use that instead of "
                               "p4-clockmod, if possible.\n");
                switch (c->x86_model) {
index 35c7e65..c567dec 100644 (file)
@@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = {
 static int __cpuinit powernowk8_init(void)
 {
        unsigned int i, supported_cpus = 0, cpu;
+       int rv;
 
        for_each_online_cpu(i) {
                int rc;
@@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void)
 
                cpb_capable = true;
 
-               register_cpu_notifier(&cpb_nb);
-
                msrs = msrs_alloc();
                if (!msrs) {
                        printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
                        return -ENOMEM;
                }
 
+               register_cpu_notifier(&cpb_nb);
+
                rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
 
                for_each_cpu(cpu, cpu_online_mask) {
@@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void)
                        (cpb_enabled ? "on" : "off"));
        }
 
-       return cpufreq_register_driver(&cpufreq_amd64_driver);
+       rv = cpufreq_register_driver(&cpufreq_amd64_driver);
+       if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
+               unregister_cpu_notifier(&cpb_nb);
+               msrs_free(msrs);
+               msrs = NULL;
+       }
+       return rv;
 }
 
 /* driver entry point for term */
index dab8746..044bda5 100644 (file)
@@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size)
                 * wasted bootmem) and hand off chunks of it to callers.
                 */
                res = alloc_bootmem(chunk_size);
-               if (!res)
-                       return NULL;
+               BUG_ON(!res);
                prom_early_allocated += chunk_size;
                memset(res, 0, chunk_size);
                free_mem = chunk_size;
index 2f4002f..518dd42 100644 (file)
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
        WARN_ON(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-       __blk_run_queue(q);
+       __blk_run_queue(q, false);
 }
 EXPORT_SYMBOL(blk_start_queue);
 
@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
 /**
  * __blk_run_queue - run a single device queue
  * @q: The queue to run
+ * @force_kblockd: Don't run @q->request_fn directly.  Use kblockd.
  *
  * Description:
  *    See @blk_run_queue. This variant must be called with the queue lock
  *    held and interrupts disabled.
  *
  */
-void __blk_run_queue(struct request_queue *q)
+void __blk_run_queue(struct request_queue *q, bool force_kblockd)
 {
        blk_remove_plug(q);
 
@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
         * Only recurse once to avoid overrunning the stack, let the unplug
         * handling reinvoke the handler shortly if we already got there.
         */
-       if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+       if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
                q->request_fn(q);
                queue_flag_clear(QUEUE_FLAG_REENTER, q);
        } else {
@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
        unsigned long flags;
 
        spin_lock_irqsave(q->queue_lock, flags);
-       __blk_run_queue(q);
+       __blk_run_queue(q, false);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
 
        drive_stat_acct(rq, 1);
        __elv_add_request(q, rq, where, 0);
-       __blk_run_queue(q);
+       __blk_run_queue(q, false);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_insert_request);
@@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
 }
 EXPORT_SYMBOL(kblockd_schedule_work);
 
-int kblockd_schedule_delayed_work(struct request_queue *q,
-                       struct delayed_work *dwork, unsigned long delay)
-{
-       return queue_delayed_work(kblockd_workqueue, dwork, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-
 int __init blk_dev_init(void)
 {
        BUILD_BUG_ON(__REQ_NR_BITS > 8 *
index 54b123d..b27d020 100644 (file)
@@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
 
        /*
         * Moving a request silently to empty queue_head may stall the
-        * queue.  Kick the queue in those cases.
+        * queue.  Kick the queue in those cases.  This function is called
+        * from request completion path and calling directly into
+        * request_fn may confuse the driver.  Always use kblockd.
         */
        if (was_empty && next_rq)
-               __blk_run_queue(q);
+               __blk_run_queue(q, true);
 }
 
 static void pre_flush_end_io(struct request *rq, int error)
@@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
                BUG();
        }
 
-       elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+       elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
        return rq;
 }
 
index 1a320d2..eec78be 100644 (file)
@@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
 }
 
 /**
- * blkdev_issue_zeroout generate number of zero filed write bios
+ * blkdev_issue_zeroout - generate number of zero filed write bios
  * @bdev:      blockdev to issue
  * @sector:    start sector
  * @nr_sects:  number of sectors to write
index a89043a..e36cc10 100644 (file)
@@ -20,6 +20,11 @@ static int throtl_quantum = 32;
 /* Throttling is performed over 100ms slice and after that slice is renewed */
 static unsigned long throtl_slice = HZ/10;     /* 100 ms */
 
+/* A workqueue to queue throttle related work */
+static struct workqueue_struct *kthrotld_workqueue;
+static void throtl_schedule_delayed_work(struct throtl_data *td,
+                               unsigned long delay);
+
 struct throtl_rb_root {
        struct rb_root rb;
        struct rb_node *left;
@@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
        update_min_dispatch_time(st);
 
        if (time_before_eq(st->min_disptime, jiffies))
-               throtl_schedule_delayed_work(td->queue, 0);
+               throtl_schedule_delayed_work(td, 0);
        else
-               throtl_schedule_delayed_work(td->queue,
-                               (st->min_disptime - jiffies));
+               throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
 }
 
 static inline void
@@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
 }
 
 /* Call with queue lock held */
-void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
+static void
+throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
 {
 
-       struct throtl_data *td = q->td;
        struct delayed_work *dwork = &td->throtl_work;
 
        if (total_nr_queued(td) > 0) {
@@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
                 * Cancel that and schedule a new one.
                 */
                __cancel_delayed_work(dwork);
-               kblockd_schedule_delayed_work(q, dwork, delay);
+               queue_delayed_work(kthrotld_workqueue, dwork, delay);
                throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
                                delay, jiffies);
        }
 }
-EXPORT_SYMBOL(throtl_schedule_delayed_work);
 
 static void
 throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
        smp_mb__after_atomic_inc();
 
        /* Schedule a work now to process the limit change */
-       throtl_schedule_delayed_work(td->queue, 0);
+       throtl_schedule_delayed_work(td, 0);
 }
 
 static void throtl_update_blkio_group_write_bps(void *key,
@@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
        smp_mb__before_atomic_inc();
        atomic_inc(&td->limits_changed);
        smp_mb__after_atomic_inc();
-       throtl_schedule_delayed_work(td->queue, 0);
+       throtl_schedule_delayed_work(td, 0);
 }
 
 static void throtl_update_blkio_group_read_iops(void *key,
@@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
        smp_mb__before_atomic_inc();
        atomic_inc(&td->limits_changed);
        smp_mb__after_atomic_inc();
-       throtl_schedule_delayed_work(td->queue, 0);
+       throtl_schedule_delayed_work(td, 0);
 }
 
 static void throtl_update_blkio_group_write_iops(void *key,
@@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
        smp_mb__before_atomic_inc();
        atomic_inc(&td->limits_changed);
        smp_mb__after_atomic_inc();
-       throtl_schedule_delayed_work(td->queue, 0);
+       throtl_schedule_delayed_work(td, 0);
 }
 
 void throtl_shutdown_timer_wq(struct request_queue *q)
@@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
 
 static int __init throtl_init(void)
 {
+       kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
+       if (!kthrotld_workqueue)
+               panic("Failed to create kthrotld\n");
+
        blkio_policy_register(&blkio_policy_throtl);
        return 0;
 }
index 7be4c79..ea83a4f 100644 (file)
@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                            cfqd->busy_queues > 1) {
                                cfq_del_timer(cfqd, cfqq);
                                cfq_clear_cfqq_wait_request(cfqq);
-                               __blk_run_queue(cfqd->queue);
+                               __blk_run_queue(cfqd->queue, false);
                        } else {
                                cfq_blkiocg_update_idle_time_stats(
                                                &cfqq->cfqg->blkg);
@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               __blk_run_queue(cfqd->queue);
+               __blk_run_queue(cfqd->queue, false);
        }
 }
 
@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
        struct request_queue *q = cfqd->queue;
 
        spin_lock_irq(q->queue_lock);
-       __blk_run_queue(cfqd->queue);
+       __blk_run_queue(cfqd->queue, false);
        spin_unlock_irq(q->queue_lock);
 }
 
index 2569512..236e93c 100644 (file)
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
         */
        elv_drain_elevator(q);
        while (q->rq.elvpriv) {
-               __blk_run_queue(q);
+               __blk_run_queue(q, false);
                spin_unlock_irq(q->queue_lock);
                msleep(10);
                spin_lock_irq(q->queue_lock);
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
                 *   with anything.  There's no point in delaying queue
                 *   processing.
                 */
-               __blk_run_queue(q);
+               __blk_run_queue(q, false);
                break;
 
        case ELEVATOR_INSERT_SORT:
index 54784bb..edc2586 100644 (file)
@@ -416,10 +416,15 @@ struct acpi_gpe_handler_info {
        u8 originally_enabled;  /* True if GPE was originally enabled */
 };
 
+struct acpi_gpe_notify_object {
+       struct acpi_namespace_node *node;
+       struct acpi_gpe_notify_object *next;
+};
+
 union acpi_gpe_dispatch_info {
        struct acpi_namespace_node *method_node;        /* Method node for this GPE level */
        struct acpi_gpe_handler_info *handler;  /* Installed GPE handler */
-       struct acpi_namespace_node *device_node;        /* Parent _PRW device for implicit notify */
+       struct acpi_gpe_notify_object device;   /* List of _PRW devices for implicit notify */
 };
 
 /*
index 14988a8..f472521 100644 (file)
@@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
        acpi_status status;
        struct acpi_gpe_event_info *local_gpe_event_info;
        struct acpi_evaluate_info *info;
+       struct acpi_gpe_notify_object *notify_object;
 
        ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
 
@@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
                 * from this thread -- because handlers may in turn run other
                 * control methods.
                 */
-               status =
-                   acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
-                                                device_node,
-                                                ACPI_NOTIFY_DEVICE_WAKE);
+               status = acpi_ev_queue_notify_request(
+                               local_gpe_event_info->dispatch.device.node,
+                               ACPI_NOTIFY_DEVICE_WAKE);
+
+               notify_object = local_gpe_event_info->dispatch.device.next;
+               while (ACPI_SUCCESS(status) && notify_object) {
+                       status = acpi_ev_queue_notify_request(
+                                       notify_object->node,
+                                       ACPI_NOTIFY_DEVICE_WAKE);
+                       notify_object = notify_object->next;
+               }
+
                break;
 
        case ACPI_GPE_DISPATCH_METHOD:
index 3b20a34..52aaff3 100644 (file)
@@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
        acpi_status status = AE_BAD_PARAMETER;
        struct acpi_gpe_event_info *gpe_event_info;
        struct acpi_namespace_node *device_node;
+       struct acpi_gpe_notify_object *notify_object;
        acpi_cpu_flags flags;
+       u8 gpe_dispatch_mask;
 
        ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
 
@@ -221,27 +223,49 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
                goto unlock_and_exit;
        }
 
+       if (wake_device == ACPI_ROOT_OBJECT) {
+               goto out;
+       }
+
        /*
         * If there is no method or handler for this GPE, then the
         * wake_device will be notified whenever this GPE fires (aka
         * "implicit notify") Note: The GPE is assumed to be
         * level-triggered (for windows compatibility).
         */
-       if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-             ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) {
+       gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
+       if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
+           && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
+               goto out;
+       }
 
-               /* Validate wake_device is of type Device */
+       /* Validate wake_device is of type Device */
 
-               device_node = ACPI_CAST_PTR(struct acpi_namespace_node,
-                                           wake_device);
-               if (device_node->type != ACPI_TYPE_DEVICE) {
-                       goto unlock_and_exit;
-               }
+       device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+       if (device_node->type != ACPI_TYPE_DEVICE) {
+               goto unlock_and_exit;
+       }
+
+       if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
                gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
                                         ACPI_GPE_LEVEL_TRIGGERED);
-               gpe_event_info->dispatch.device_node = device_node;
+               gpe_event_info->dispatch.device.node = device_node;
+               gpe_event_info->dispatch.device.next = NULL;
+       } else {
+               /* There are multiple devices to notify implicitly. */
+
+               notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
+               if (!notify_object) {
+                       status = AE_NO_MEMORY;
+                       goto unlock_and_exit;
+               }
+
+               notify_object->node = device_node;
+               notify_object->next = gpe_event_info->dispatch.device.next;
+               gpe_event_info->dispatch.device.next = notify_object;
        }
 
+ out:
        gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
        status = AE_OK;
 
index 5df67f1..384f7ab 100644 (file)
@@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
                        size_t count, loff_t *ppos)
 {
        static char *buf;
-       static int uncopied_bytes;
+       static u32 max_size;
+       static u32 uncopied_bytes;
+
        struct acpi_table_header table;
        acpi_status status;
 
@@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
                if (copy_from_user(&table, user_buf,
                                   sizeof(struct acpi_table_header)))
                        return -EFAULT;
-               uncopied_bytes = table.length;
-               buf = kzalloc(uncopied_bytes, GFP_KERNEL);
+               uncopied_bytes = max_size = table.length;
+               buf = kzalloc(max_size, GFP_KERNEL);
                if (!buf)
                        return -ENOMEM;
        }
 
-       if (uncopied_bytes < count) {
-               kfree(buf);
+       if (buf == NULL)
+               return -EINVAL;
+
+       if ((*ppos > max_size) ||
+           (*ppos + count > max_size) ||
+           (*ppos + count < count) ||
+           (count > uncopied_bytes))
                return -EINVAL;
-       }
 
        if (copy_from_user(buf + (*ppos), user_buf, count)) {
                kfree(buf);
+               buf = NULL;
                return -EFAULT;
        }
 
@@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
        if (!uncopied_bytes) {
                status = acpi_install_method(buf);
                kfree(buf);
+               buf = NULL;
                if (ACPI_FAILURE(status))
                        return -EINVAL;
                add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
index 49e6a54..dbf31ec 100644 (file)
@@ -78,7 +78,6 @@
 
 #include <asm/uaccess.h>
 
-static DEFINE_MUTEX(loop_mutex);
 static LIST_HEAD(loop_devices);
 static DEFINE_MUTEX(loop_devices_mutex);
 
@@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
 {
        struct loop_device *lo = bdev->bd_disk->private_data;
 
-       mutex_lock(&loop_mutex);
        mutex_lock(&lo->lo_ctl_mutex);
        lo->lo_refcnt++;
        mutex_unlock(&lo->lo_ctl_mutex);
-       mutex_unlock(&loop_mutex);
 
        return 0;
 }
@@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
        struct loop_device *lo = disk->private_data;
        int err;
 
-       mutex_lock(&loop_mutex);
        mutex_lock(&lo->lo_ctl_mutex);
 
        if (--lo->lo_refcnt)
@@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
 out:
        mutex_unlock(&lo->lo_ctl_mutex);
 out_unlocked:
-       mutex_unlock(&loop_mutex);
        return 0;
 }
 
index 1109f68..5cb4d09 100644 (file)
@@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 
        ret = sysdev_driver_register(&cpu_sysdev_class,
                                        &cpufreq_sysdev_driver);
+       if (ret)
+               goto err_null_driver;
 
-       if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
+       if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
                int i;
                ret = -ENODEV;
 
@@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
                if (ret) {
                        dprintk("no CPU initialized for driver %s\n",
                                                        driver_data->name);
-                       sysdev_driver_unregister(&cpu_sysdev_class,
-                                               &cpufreq_sysdev_driver);
-
-                       spin_lock_irqsave(&cpufreq_driver_lock, flags);
-                       cpufreq_driver = NULL;
-                       spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+                       goto err_sysdev_unreg;
                }
        }
 
-       if (!ret) {
-               register_hotcpu_notifier(&cpufreq_cpu_notifier);
-               dprintk("driver %s up and running\n", driver_data->name);
-               cpufreq_debug_enable_ratelimit();
-       }
+       register_hotcpu_notifier(&cpufreq_cpu_notifier);
+       dprintk("driver %s up and running\n", driver_data->name);
+       cpufreq_debug_enable_ratelimit();
 
+       return 0;
+err_sysdev_unreg:
+       sysdev_driver_unregister(&cpu_sysdev_class,
+                       &cpufreq_sysdev_driver);
+err_null_driver:
+       spin_lock_irqsave(&cpufreq_driver_lock, flags);
+       cpufreq_driver = NULL;
+       spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
index 17bd766..e33d9be 100644 (file)
@@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (IS_GEN2(dev))
                dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
 
+       /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+        * using 32bit addressing, overwriting memory if HWS is located
+        * above 4GB.
+        *
+        * The documentation also mentions an issue with undefined
+        * behaviour if any general state is accessed within a page above 4GB,
+        * which also needs to be handled carefully.
+        */
+       if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+               dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
        mmio_bar = IS_GEN2(dev) ? 1 : 0;
        dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
        if (!dev_priv->regs) {
index 1fa091e..4a5c4a4 100644 (file)
@@ -62,6 +62,7 @@
 #include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <asm/mwait.h>
+#include <asm/msr.h>
 
 #define INTEL_IDLE_VERSION "0.4"
 #define PREFIX "intel_idle: "
@@ -85,6 +86,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
 static struct cpuidle_state *cpuidle_state_table;
 
 /*
+ * Hardware C-state auto-demotion may not always be optimal.
+ * Indicate which enable bits to clear here.
+ */
+static unsigned long long auto_demotion_disable_flags;
+
+/*
  * Set this flag for states where the HW flushes the TLB for us
  * and so we don't need cross-calls to keep it consistent.
  * If this flag is set, SW flushes the TLB, so even if the
@@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = {
        .notifier_call = setup_broadcast_cpuhp_notify,
 };
 
+static void auto_demotion_disable(void *dummy)
+{
+       unsigned long long msr_bits;
+
+       rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+       msr_bits &= ~auto_demotion_disable_flags;
+       wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+}
+
 /*
  * intel_idle_probe()
  */
@@ -324,11 +340,17 @@ static int intel_idle_probe(void)
        case 0x25:      /* Westmere */
        case 0x2C:      /* Westmere */
                cpuidle_state_table = nehalem_cstates;
+               auto_demotion_disable_flags =
+                       (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
                break;
 
        case 0x1C:      /* 28 - Atom Processor */
+               cpuidle_state_table = atom_cstates;
+               break;
+
        case 0x26:      /* 38 - Lincroft Atom Processor */
                cpuidle_state_table = atom_cstates;
+               auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
                break;
 
        case 0x2A:      /* SNB */
@@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void)
                        return -EIO;
                }
        }
+       if (auto_demotion_disable_flags)
+               smp_call_function(auto_demotion_disable, NULL, 1);
 
        return 0;
 }
index 18f8798..7bd5baa 100644 (file)
@@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a,
   stream interface.
   If synchronous service was requested, then function
   does return amount of data written to stream.
-  'final' does indicate that pice of data to be written is
+  'final' does indicate that piece of data to be written is
   final part of frame (necessary only by structured datatransfer)
   return  0 if zero lengh packet was written
   return -1 if stream is full
index 6a1f940..c45e630 100644 (file)
@@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
        unsigned long flags;
        struct asic3 *asic;
 
-       desc->chip->ack(irq);
+       desc->irq_data.chip->irq_ack(&desc->irq_data);
 
-       asic = desc->handler_data;
+       asic = get_irq_data(irq);
 
        for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
                u32 status;
index 33c923d..fdd8a1b 100644 (file)
@@ -118,12 +118,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
 
        /* Voice codec interface client */
        cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
-       cell->name = "davinci_vcif";
+       cell->name = "davinci-vcif";
        cell->driver_data = davinci_vc;
 
        /* Voice codec CQ93VC client */
        cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
-       cell->name = "cq93vc";
+       cell->name = "cq93vc-codec";
        cell->driver_data = davinci_vc;
 
        ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
index 627cf57..e9018d1 100644 (file)
@@ -150,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client,
 static inline int __tps6586x_writes(struct i2c_client *client, int reg,
                                  int len, uint8_t *val)
 {
-       int ret;
+       int ret, i;
 
-       ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
-       if (ret < 0) {
-               dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
-               return ret;
+       for (i = 0; i < len; i++) {
+               ret = __tps6586x_write(client, reg + i, *(val + i));
+               if (ret < 0)
+                       return ret;
        }
 
        return 0;
index 000cb41..92b85e2 100644 (file)
@@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
        idev->close      = ucb1x00_ts_close;
 
        __set_bit(EV_ABS, idev->evbit);
-       __set_bit(ABS_X, idev->absbit);
-       __set_bit(ABS_Y, idev->absbit);
-       __set_bit(ABS_PRESSURE, idev->absbit);
 
        input_set_drvdata(idev, ts);
 
+       ucb1x00_adc_enable(ts->ucb);
+       ts->x_res = ucb1x00_ts_read_xres(ts);
+       ts->y_res = ucb1x00_ts_read_yres(ts);
+       ucb1x00_adc_disable(ts->ucb);
+
+       input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
+       input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
+       input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
+
        err = input_register_device(idev);
        if (err)
                goto fail;
index 41233c7..f4016a0 100644 (file)
@@ -246,6 +246,16 @@ static int wm8994_suspend(struct device *dev)
        struct wm8994 *wm8994 = dev_get_drvdata(dev);
        int ret;
 
+       /* Don't actually go through with the suspend if the CODEC is
+        * still active (eg, for audio passthrough from CP. */
+       ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
+       if (ret < 0) {
+               dev_err(dev, "Failed to read power status: %d\n", ret);
+       } else if (ret & WM8994_VMID_SEL_MASK) {
+               dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+               return 0;
+       }
+
        /* GPIO configuration state is saved here since we may be configuring
         * the GPIO alternate functions even if we're not using the gpiolib
         * driver for them.
@@ -261,6 +271,8 @@ static int wm8994_suspend(struct device *dev)
        if (ret < 0)
                dev_err(dev, "Failed to save LDO registers: %d\n", ret);
 
+       wm8994->suspended = true;
+
        ret = regulator_bulk_disable(wm8994->num_supplies,
                                     wm8994->supplies);
        if (ret != 0) {
@@ -276,6 +288,10 @@ static int wm8994_resume(struct device *dev)
        struct wm8994 *wm8994 = dev_get_drvdata(dev);
        int ret;
 
+       /* We may have lied to the PM core about suspending */
+       if (!wm8994->suspended)
+               return 0;
+
        ret = regulator_bulk_enable(wm8994->num_supplies,
                                    wm8994->supplies);
        if (ret != 0) {
@@ -298,6 +314,8 @@ static int wm8994_resume(struct device *dev)
        if (ret < 0)
                dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
 
+       wm8994->suspended = false;
+
        return 0;
 }
 #endif
index 653c624..7897d11 100644 (file)
@@ -22,7 +22,7 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.62.00-5"
+#define DRV_MODULE_VERSION      "1.62.00-6"
 #define DRV_MODULE_RELDATE      "2011/01/30"
 #define BNX2X_BC_VER            0x040200
 
@@ -1613,19 +1613,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 #define BNX2X_BTR                      4
 #define MAX_SPQ_PENDING                        8
 
-
-/* CMNG constants
-   derived from lab experiments, and not from system spec calculations !!! */
-#define DEF_MIN_RATE                   100
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE                                   100
 /* resolution of the rate shaping timer - 100 usec */
-#define RS_PERIODIC_TIMEOUT_USEC       100
-/* resolution of fairness algorithm in usecs -
-   coefficient for calculating the actual t fair */
-#define T_FAIR_COEF                    10000000
+#define RS_PERIODIC_TIMEOUT_USEC                       100
 /* number of bytes in single QM arbitration cycle -
-   coefficient for calculating the fairness timer */
-#define QM_ARB_BYTES                   40000
-#define FAIR_MEM                       2
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES                                   160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES                                                100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH                               32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF    ((MIN_ABOVE_THRESH +  QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM                                       2
 
 
 #define ATTN_NIG_FOR_FUNC              (1L << 8)
index 710ce5d..9379812 100644 (file)
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 #endif
 }
 
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ *             nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN     12
+/**
+ * Calculate the approximate value of the MSS for this
+ * aggregation using the first packet of it.
+ *
+ * @param bp
+ * @param parsing_flags Parsing flags from the START CQE
+ * @param len_on_bd Total length of the first packet for the
+ *                  aggregation.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+                                   u16 len_on_bd)
+{
+       /* TPA arrgregation won't have an IP options and TCP options
+        * other than timestamp.
+        */
+       u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+
+
+       /* Check if there was a TCP timestamp, if there is it's will
+        * always be 12 bytes length: nop nop kind length echo val.
+        *
+        * Otherwise FW would close the aggregation.
+        */
+       if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+               hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+       return len_on_bd - hdrs_len;
+}
+
 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                               struct sk_buff *skb,
                               struct eth_fast_path_rx_cqe *fp_cqe,
-                              u16 cqe_idx)
+                              u16 cqe_idx, u16 parsing_flags)
 {
        struct sw_rx_page *rx_pg, old_rx_pg;
        u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
        /* This is needed in order to enable forwarding support */
        if (frag_size)
-               skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
-                                              max(frag_size, (u32)len_on_bd));
+               skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
+                                                             len_on_bd);
 
 #ifdef BNX2X_STOP_ON_ERROR
        if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        if (likely(new_skb)) {
                /* fix ip xsum and give it to the stack */
                /* (no need to map the new skb) */
+               u16 parsing_flags =
+                       le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
 
                prefetch(skb);
                prefetch(((char *)(skb)) + L1_CACHE_BYTES);
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                }
 
                if (!bnx2x_fill_frag_skb(bp, fp, skb,
-                                        &cqe->fast_path_cqe, cqe_idx)) {
-                       if ((le16_to_cpu(cqe->fast_path_cqe.
-                           pars_flags.flags) & PARSING_FLAGS_VLAN))
+                                        &cqe->fast_path_cqe, cqe_idx,
+                                        parsing_flags)) {
+                       if (parsing_flags & PARSING_FLAGS_VLAN)
                                __vlan_hwaccel_put_tag(skb,
                                                 le16_to_cpu(cqe->fast_path_cqe.
                                                             vlan_tag));
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
 {
        u16 line_speed = bp->link_vars.line_speed;
        if (IS_MF(bp)) {
-               u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
-                                               FUNC_MF_CFG_MAX_BW_MASK) >>
-                                               FUNC_MF_CFG_MAX_BW_SHIFT;
-               /* Calculate the current MAX line speed limit for the DCC
-                * capable devices
+               u16 maxCfg = bnx2x_extract_max_cfg(bp,
+                                                  bp->mf_config[BP_VN(bp)]);
+
+               /* Calculate the current MAX line speed limit for the MF
+                * devices
                 */
-               if (IS_MF_SD(bp)) {
+               if (IS_MF_SI(bp))
+                       line_speed = (line_speed * maxCfg) / 100;
+               else { /* SD mode */
                        u16 vn_max_rate = maxCfg * 100;
 
                        if (vn_max_rate < line_speed)
                                line_speed = vn_max_rate;
-               } else /* IS_MF_SI(bp)) */
-                       line_speed = (line_speed * maxCfg) / 100;
+               }
        }
 
        return line_speed;
index 03eb4d6..326ba44 100644 (file)
@@ -1044,4 +1044,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
 void bnx2x_acquire_phy_lock(struct bnx2x *bp);
 void bnx2x_release_phy_lock(struct bnx2x *bp);
 
+/**
+ * Extracts MAX BW part from MF configuration.
+ *
+ * @param bp
+ * @param mf_cfg
+ *
+ * @return u16
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+       u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+                             FUNC_MF_CFG_MAX_BW_SHIFT;
+       if (!max_cfg) {
+               BNX2X_ERR("Illegal configuration detected for Max BW - "
+                         "using 100 instead\n");
+               max_cfg = 100;
+       }
+       return max_cfg;
+}
+
 #endif /* BNX2X_CMN_H */
index 5b44a8b..ef29199 100644 (file)
@@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        speed |= (cmd->speed_hi << 16);
 
        if (IS_MF_SI(bp)) {
-               u32 param = 0;
+               u32 param = 0, part;
                u32 line_speed = bp->link_vars.line_speed;
 
                /* use 10G if no link detected */
@@ -251,9 +251,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                                       REQ_BC_VER_4_SET_MF_BW);
                        return -EINVAL;
                }
-               if (line_speed < speed) {
-                       BNX2X_DEV_INFO("New speed should be less or equal "
-                                      "to actual line speed\n");
+               part = (speed * 100) / line_speed;
+               if (line_speed < speed || !part) {
+                       BNX2X_DEV_INFO("Speed setting should be in a range "
+                                      "from 1%% to 100%% "
+                                      "of actual line speed\n");
                        return -EINVAL;
                }
                /* load old values */
@@ -263,8 +265,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                param &= FUNC_MF_CFG_MIN_BW_MASK;
 
                /* set new MAX value */
-               param |= (((speed * 100) / line_speed)
-                                << FUNC_MF_CFG_MAX_BW_SHIFT)
+               param |= (part << FUNC_MF_CFG_MAX_BW_SHIFT)
                                  & FUNC_MF_CFG_MAX_BW_MASK;
 
                bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
@@ -1781,9 +1782,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
                { 0x100, 0x350 }, /* manuf_info */
                { 0x450,  0xf0 }, /* feature_info */
                { 0x640,  0x64 }, /* upgrade_key_info */
-               { 0x6a4,  0x64 },
                { 0x708,  0x70 }, /* manuf_key_info */
-               { 0x778,  0x70 },
                {     0,     0 }
        };
        __be32 buf[0x350 / 4];
@@ -1933,11 +1932,11 @@ static void bnx2x_self_test(struct net_device *dev,
                buf[4] = 1;
                etest->flags |= ETH_TEST_FL_FAILED;
        }
-       if (bp->port.pmf)
-               if (bnx2x_link_test(bp, is_serdes) != 0) {
-                       buf[5] = 1;
-                       etest->flags |= ETH_TEST_FL_FAILED;
-               }
+
+       if (bnx2x_link_test(bp, is_serdes) != 0) {
+               buf[5] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
 
 #ifdef BNX2X_EXTRA_DEBUG
        bnx2x_panic_dump(bp);
index 5a268e9..fa6dbe3 100644 (file)
@@ -241,7 +241,7 @@ static const struct {
        /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
         * want to handle "system kill" flow at the moment.
         */
-       BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
+       BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
        BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
        BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
        BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
index d584d32..032ae18 100644 (file)
@@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
                vn_max_rate = 0;
 
        } else {
+               u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
                vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
                                FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
-               /* If min rate is zero - set it to 1 */
+               /* If fairness is enabled (not all min rates are zeroes) and
+                  if current min rate is zero - set it to 1.
+                  This is a requirement of the algorithm. */
                if (bp->vn_weight_sum && (vn_min_rate == 0))
                        vn_min_rate = DEF_MIN_RATE;
-               vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
-                               FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+
+               if (IS_MF_SI(bp))
+                       /* maxCfg in percents of linkspeed */
+                       vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+               else
+                       /* maxCfg is absolute in 100Mb units */
+                       vn_max_rate = maxCfg * 100;
        }
 
        DP(NETIF_MSG_IFUP,
@@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
                m_fair_vn.vn_credit_delta =
                        max_t(u32, (vn_min_rate * (T_FAIR_COEF /
                                                   (8 * bp->vn_weight_sum))),
-                             (bp->cmng.fair_vars.fair_threshold * 2));
+                             (bp->cmng.fair_vars.fair_threshold +
+                                                       MIN_ABOVE_THRESH));
                DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
                   m_fair_vn.vn_credit_delta);
        }
index bda60d5..3445ded 100644 (file)
@@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
        if (unlikely(bp->panic))
                return;
 
+       bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
        /* Protect a state change flow */
        spin_lock_bh(&bp->stats_lock);
        state = bp->stats_state;
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
        spin_unlock_bh(&bp->stats_lock);
 
-       bnx2x_stats_stm[state][event].action(bp);
-
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
                   state, event, bp->stats_state);
index 5157e15..aeea9f9 100644 (file)
@@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = {
 };
 
 static const struct can_bittiming_const softing_btr_const = {
+       .name = "softing",
        .tseg1_min = 1,
        .tseg1_max = 16,
        .tseg2_min = 1,
index 7ff170c..302be4a 100644 (file)
@@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
        u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
        int kcqe_cnt;
 
+       /* status block index must be read before reading other fields */
+       rmb();
        cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
 
        while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
                barrier();
                if (status_idx != *cp->kcq1.status_idx_ptr) {
                        status_idx = (u16) *cp->kcq1.status_idx_ptr;
+                       /* status block index must be read first */
+                       rmb();
                        cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
                } else
                        break;
@@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
        u32 last_status = *info->status_idx_ptr;
        int kcqe_cnt;
 
+       /* status block index must be read before reading the KCQ */
+       rmb();
        while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
 
                service_kcqes(dev, kcqe_cnt);
@@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
                        break;
 
                last_status = *info->status_idx_ptr;
+               /* status block index must be read before reading the KCQ */
+               rmb();
        }
        return last_status;
 }
@@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data)
 {
        struct cnic_dev *dev = (struct cnic_dev *) data;
        struct cnic_local *cp = dev->cnic_priv;
-       u32 status_idx;
+       u32 status_idx, new_status_idx;
 
        if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
                return;
 
-       status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+       while (1) {
+               status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
 
-       CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
+               CNIC_WR16(dev, cp->kcq1.io_addr,
+                         cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
 
-       if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
-               status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+               if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
+                       cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
+                                          status_idx, IGU_INT_ENABLE, 1);
+                       break;
+               }
+
+               new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+
+               if (new_status_idx != status_idx)
+                       continue;
 
                CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
                          MAX_KCQ_IDX);
 
                cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
                                status_idx, IGU_INT_ENABLE, 1);
-       } else {
-               cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
-                                  status_idx, IGU_INT_ENABLE, 1);
+
+               break;
        }
 }
 
index 2a628d1..7018bfe 100644 (file)
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
        int                     ret;
 
        /* free and bail if we are shutting down */
-       if (unlikely(!netif_running(ndev))) {
+       if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
                dev_kfree_skb_any(skb);
                return;
        }
index 9d8a20b..8318ea0 100644 (file)
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                bp->mii_bus->irq[i] = PHY_POLL;
 
-       platform_set_drvdata(bp->dev, bp->mii_bus);
-
        if (mdiobus_register(bp->mii_bus)) {
                err = -ENXIO;
                goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
        bp = netdev_priv(dev);
        bp->dev = dev;
 
+       platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        spin_lock_init(&bp->lock);
index 55c1711..33e7c45 100644 (file)
@@ -42,7 +42,8 @@
 #define GBE_CONFIG_RAM_BASE \
        ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
 
-#define GBE_CONFIG_BASE_VIRT    phys_to_virt(GBE_CONFIG_RAM_BASE)
+#define GBE_CONFIG_BASE_VIRT \
+       ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
 
 #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
        (iowrite16_rep(base + offset, data, count))
index 3fa110d..2e50228 100644 (file)
@@ -5967,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
                /* APME bit in EEPROM is mapped to WUC.APME */
                eeprom_data = er32(WUC);
                eeprom_apme_mask = E1000_WUC_APME;
-               if (eeprom_data & E1000_WUC_PHY_WAKE)
+               if ((hw->mac.type > e1000_ich10lan) &&
+                   (eeprom_data & E1000_WUC_PHY_WAKE))
                        adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
        } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
                if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
index 2a71373..cd0282d 100644 (file)
@@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = {
        }, {
                .name = "imx28-fec",
                .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
-       }
+       },
+       { }
 };
 
 static unsigned char macaddr[ETH_ALEN];
index 74486a8..af3822f 100644 (file)
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
  *  The parameter rar_count will usually be hw->mac.rar_entry_count
  *  unless there are workarounds that change this.
  **/
-void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
                                   u8 *mc_addr_list, u32 mc_addr_count,
                                   u32 rar_used_count, u32 rar_count)
 {
index f69e73e..79ccb54 100644 (file)
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp)
        for (i = 0; i < PHY_MAX_ADDR; i++)
                bp->mii_bus->irq[i] = PHY_POLL;
 
-       platform_set_drvdata(bp->dev, bp->mii_bus);
+       dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
 
        if (mdiobus_register(bp->mii_bus))
                goto err_out_free_mdio_irq;
index 9226cda..530ab5a 100644 (file)
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
        PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
        PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
        PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
+       PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
        PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
        PCMCIA_DEVICE_NULL,
 };
index ef2133b..7ffdb80 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/pm_runtime.h>
 #include <linux/firmware.h>
+#include <linux/pci-aspm.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
@@ -3020,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        mii->reg_num_mask = 0x1f;
        mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
 
+       /* disable ASPM completely as that cause random device stop working
+        * problems as well as full system hangs for some PCIe devices users */
+       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+                                    PCIE_LINK_STATE_CLKPM);
+
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
        rc = pci_enable_device(pdev);
        if (rc < 0) {
index 42daf98..35b28f4 100644 (file)
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
        memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
-       /* device is off until link detection */
-       netif_carrier_off(dev);
-
        return dev;
 }
 
index 5ab3084..07b1633 100644 (file)
@@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
        struct tx_buf *tx_buf = NULL;
        struct sk_buff *nskb = NULL;
        int ret = 0, i;
-       u16 *hdr, tx_skb_cnt = 0;
+       u16 tx_skb_cnt = 0;
        u8 *buf;
+       __le16 *hdr;
 
        if (hif_dev->tx.tx_skb_cnt == 0)
                return 0;
@@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
 
                buf = tx_buf->buf;
                buf += tx_buf->offset;
-               hdr = (u16 *)buf;
-               *hdr++ = nskb->len;
-               *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+               hdr = (__le16 *)buf;
+               *hdr++ = cpu_to_le16(nskb->len);
+               *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
                buf += 4;
                memcpy(buf, nskb->data, nskb->len);
                tx_buf->len = nskb->len + 4;
index 180170d..2915b11 100644 (file)
@@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
        struct ath_common *common = ath9k_hw_common(ah);
 
        if (!(ints & ATH9K_INT_GLOBAL))
-               ath9k_hw_enable_interrupts(ah);
+               ath9k_hw_disable_interrupts(ah);
 
        ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
 
@@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
                        REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
        }
 
-       ath9k_hw_enable_interrupts(ah);
+       if (ints & ATH9K_INT_GLOBAL)
+               ath9k_hw_enable_interrupts(ah);
 
        return;
 }
index 537732e..f82c400 100644 (file)
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
        { USB_DEVICE(0x057c, 0x8402) },
        /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
        { USB_DEVICE(0x1668, 0x1200) },
+       /* Airlive X.USB a/b/g/n */
+       { USB_DEVICE(0x1b75, 0x9170) },
 
        /* terminate */
        {}
index 79ab0a6..537fb8c 100644 (file)
@@ -51,7 +51,7 @@
 #include "iwl-agn-debugfs.h"
 
 /* Highest firmware API version supported */
-#define IWL5000_UCODE_API_MAX 2
+#define IWL5000_UCODE_API_MAX 5
 #define IWL5150_UCODE_API_MAX 2
 
 /* Lowest firmware API version supported */
index 21713a7..9b344a9 100644 (file)
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
        {USB_DEVICE(0x1413, 0x5400)},   /* Telsey 802.11g USB2.0 Adapter */
        {USB_DEVICE(0x1435, 0x0427)},   /* Inventel UR054G */
        {USB_DEVICE(0x1668, 0x1050)},   /* Actiontec 802UIG-1 */
+       {USB_DEVICE(0x1740, 0x1000)},   /* Senao NUB-350 */
        {USB_DEVICE(0x2001, 0x3704)},   /* DLink DWL-G122 rev A2 */
        {USB_DEVICE(0x2001, 0x3705)},   /* D-Link DWL-G120 rev C1 */
        {USB_DEVICE(0x413c, 0x5513)},   /* Dell WLA3310 USB Wireless Adapter */
index 848cc2c..518542b 100644 (file)
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
        __le32 mode;
        int ret;
 
+       if (priv->device_type != RNDIS_BCM4320B)
+               return -ENOTSUPP;
+
        netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
                                enabled ? "enabled" : "disabled",
                                timeout);
index 28295d0..4d87b5d 100644 (file)
@@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata;
        (p)->unique_id = of_pdt_unique_id++; \
 } while (0)
 
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
 {
-       return dp->path_component_name;
+       int len, ourlen, plen;
+       char *n;
+
+       dp->path_component_name = build_path_component(dp);
+
+       plen = strlen(dp->parent->full_name);
+       ourlen = strlen(dp->path_component_name);
+       len = ourlen + plen + 2;
+
+       n = prom_early_alloc(len);
+       strcpy(n, dp->parent->full_name);
+       if (!of_node_is_root(dp->parent)) {
+               strcpy(n + plen, "/");
+               plen++;
+       }
+       strcpy(n + plen, dp->path_component_name);
+
+       return n;
 }
 
-#else
+#else /* CONFIG_SPARC */
 
 static inline void of_pdt_incr_unique_id(void *p) { }
 static inline void irq_trans_init(struct device_node *dp) { }
 
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
 {
-       return dp->name;
+       static int failsafe_id = 0; /* for generating unique names on failure */
+       char *buf;
+       int len;
+
+       if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len))
+               goto failsafe;
+
+       buf = prom_early_alloc(len + 1);
+       if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len))
+               goto failsafe;
+       return buf;
+
+ failsafe:
+       buf = prom_early_alloc(strlen(dp->parent->full_name) +
+                              strlen(dp->name) + 16);
+       sprintf(buf, "%s/%s@unknown%i",
+               of_node_is_root(dp->parent) ? "" : dp->parent->full_name,
+               dp->name, failsafe_id++);
+       pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
+       return buf;
 }
 
 #endif /* !CONFIG_SPARC */
@@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name)
        return buf;
 }
 
-static char * __init of_pdt_try_pkg2path(phandle node)
-{
-       char *res, *buf = NULL;
-       int len;
-
-       if (!of_pdt_prom_ops->pkg2path)
-               return NULL;
-
-       if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len))
-               return NULL;
-       buf = prom_early_alloc(len + 1);
-       if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) {
-               pr_err("%s: package-to-path failed\n", __func__);
-               return NULL;
-       }
-
-       res = strrchr(buf, '/');
-       if (!res) {
-               pr_err("%s: couldn't find / in %s\n", __func__, buf);
-               return NULL;
-       }
-       return res+1;
-}
-
-/*
- * When fetching the node's name, first try using package-to-path; if
- * that fails (either because the arch hasn't supplied a PROM callback,
- * or some other random failure), fall back to just looking at the node's
- * 'name' property.
- */
-static char * __init of_pdt_build_name(phandle node)
-{
-       char *buf;
-
-       buf = of_pdt_try_pkg2path(node);
-       if (!buf)
-               buf = of_pdt_get_one_property(node, "name");
-
-       return buf;
-}
-
 static struct device_node * __init of_pdt_create_node(phandle node,
                                                    struct device_node *parent)
 {
@@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node,
 
        kref_init(&dp->kref);
 
-       dp->name = of_pdt_build_name(node);
+       dp->name = of_pdt_get_one_property(node, "name");
        dp->type = of_pdt_get_one_property(node, "device_type");
        dp->phandle = node;
 
@@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
        return dp;
 }
 
-static char * __init of_pdt_build_full_name(struct device_node *dp)
-{
-       int len, ourlen, plen;
-       char *n;
-
-       plen = strlen(dp->parent->full_name);
-       ourlen = strlen(of_pdt_node_name(dp));
-       len = ourlen + plen + 2;
-
-       n = prom_early_alloc(len);
-       strcpy(n, dp->parent->full_name);
-       if (!of_node_is_root(dp->parent)) {
-               strcpy(n + plen, "/");
-               plen++;
-       }
-       strcpy(n + plen, of_pdt_node_name(dp));
-
-       return n;
-}
-
 static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
                                                   phandle node,
                                                   struct device_node ***nextp)
@@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
                *(*nextp) = dp;
                *nextp = &dp->allnext;
 
-#if defined(CONFIG_SPARC)
-               dp->path_component_name = build_path_component(dp);
-#endif
                dp->full_name = of_pdt_build_full_name(dp);
 
                dp->child = of_pdt_build_tree(dp,
index 9045c52..fb2bb35 100644 (file)
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
                                        &sdev->request_queue->queue_flags);
                if (flagset)
                        queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
-               __blk_run_queue(sdev->request_queue);
+               __blk_run_queue(sdev->request_queue, false);
                if (flagset)
                        queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
                spin_unlock(sdev->request_queue->queue_lock);
index 998c01b..5c3ccfc 100644 (file)
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
                  !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
        if (flagset)
                queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
-       __blk_run_queue(rport->rqst_q);
+       __blk_run_queue(rport->rqst_q, false);
        if (flagset)
                queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
        spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
index 93760b2..1ef4df9 100644 (file)
@@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = {
        PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
        PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
        PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
+       PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05),
        PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
        PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
        PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
index 3c6e1a0..5e14950 100644 (file)
@@ -346,14 +346,19 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
 
                if (unlikely(!skb))
                        break;
-               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
-                               req->actual);
-               page = NULL;
 
-               if (req->actual < req->length) { /* Last fragment */
+               if (skb->len == 0) { /* First fragment */
                        skb->protocol = htons(ETH_P_PHONET);
                        skb_reset_mac_header(skb);
-                       pskb_pull(skb, 1);
+                       /* Can't use pskb_pull() on page in IRQ */
+                       memcpy(skb_put(skb, 1), page_address(page), 1);
+               }
+
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                               skb->len == 0, req->actual);
+               page = NULL;
+
+               if (req->actual < req->length) { /* Last fragment */
                        skb->dev = dev;
                        dev->stats.rx_packets++;
                        dev->stats.rx_bytes += skb->len;
index e8f4f36..a6f21b8 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
 
 /**
  * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
index 264e95d..4d70db1 100644 (file)
@@ -272,7 +272,6 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
                new_de = exofs_find_entry(new_dir, new_dentry, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                err = exofs_set_link(new_dir, new_de, new_page, old_inode);
                new_inode->i_ctime = CURRENT_TIME;
                if (dir_de)
@@ -286,12 +285,9 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        if (new_dir->i_nlink >= EXOFS_LINK_MAX)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = exofs_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
@@ -299,7 +295,7 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
        old_inode->i_ctime = CURRENT_TIME;
 
        exofs_delete_entry(old_de, old_page);
-       inode_dec_link_count(old_inode);
+       mark_inode_dirty(old_inode);
 
        if (dir_de) {
                err = exofs_set_link(old_inode, dir_de, dir_page, new_dir);
index 2e1d834..adb9185 100644 (file)
@@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
                new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
@@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
                        if (new_dir->i_nlink >= EXT2_LINK_MAX)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = ext2_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
@@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
        /*
         * Like most other Unix systems, set the ctime for inodes on a
         * rename.
-        * inode_dec_link_count() will mark the inode dirty.
         */
        old_inode->i_ctime = CURRENT_TIME_SEC;
+       mark_inode_dirty(old_inode);
 
        ext2_delete_entry (old_de, old_page);
-       inode_dec_link_count(old_inode);
 
        if (dir_de) {
                if (old_dir != new_dir)
index afa66aa..b4d70b1 100644 (file)
@@ -238,46 +238,22 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 }
 
 /*
- * hfs_unlink()
+ * hfs_remove()
  *
- * This is the unlink() entry in the inode_operations structure for
- * regular HFS directories.  The purpose is to delete an existing
- * file, given the inode for the parent directory and the name
- * (and its length) of the existing file.
- */
-static int hfs_unlink(struct inode *dir, struct dentry *dentry)
-{
-       struct inode *inode;
-       int res;
-
-       inode = dentry->d_inode;
-       res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
-       if (res)
-               return res;
-
-       drop_nlink(inode);
-       hfs_delete_inode(inode);
-       inode->i_ctime = CURRENT_TIME_SEC;
-       mark_inode_dirty(inode);
-
-       return res;
-}
-
-/*
- * hfs_rmdir()
+ * This serves as both unlink() and rmdir() in the inode_operations
+ * structure for regular HFS directories.  The purpose is to delete
+ * an existing child, given the inode for the parent directory and
+ * the name (and its length) of the existing directory.
  *
- * This is the rmdir() entry in the inode_operations structure for
- * regular HFS directories.  The purpose is to delete an existing
- * directory, given the inode for the parent directory and the name
- * (and its length) of the existing directory.
+ * HFS does not have hardlinks, so both rmdir and unlink set the
+ * link count to 0.  The only difference is the emptiness check.
  */
-static int hfs_rmdir(struct inode *dir, struct dentry *dentry)
+static int hfs_remove(struct inode *dir, struct dentry *dentry)
 {
-       struct inode *inode;
+       struct inode *inode = dentry->d_inode;
        int res;
 
-       inode = dentry->d_inode;
-       if (inode->i_size != 2)
+       if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
                return -ENOTEMPTY;
        res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
        if (res)
@@ -307,7 +283,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        /* Unlink destination if it already exists */
        if (new_dentry->d_inode) {
-               res = hfs_unlink(new_dir, new_dentry);
+               res = hfs_remove(new_dir, new_dentry);
                if (res)
                        return res;
        }
@@ -332,9 +308,9 @@ const struct file_operations hfs_dir_operations = {
 const struct inode_operations hfs_dir_inode_operations = {
        .create         = hfs_create,
        .lookup         = hfs_lookup,
-       .unlink         = hfs_unlink,
+       .unlink         = hfs_remove,
        .mkdir          = hfs_mkdir,
-       .rmdir          = hfs_rmdir,
+       .rmdir          = hfs_remove,
        .rename         = hfs_rename,
        .setattr        = hfs_inode_setattr,
 };
index ce7337d..6e6777f 100644 (file)
@@ -213,7 +213,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
                new_de = minix_find_entry(new_dentry, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                minix_set_link(new_de, new_page, old_inode);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
@@ -225,18 +224,15 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
                        if (new_dir->i_nlink >= info->s_link_max)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = minix_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
 
        minix_delete_entry(old_de, old_page);
-       inode_dec_link_count(old_inode);
+       mark_inode_dirty(old_inode);
 
        if (dir_de) {
                minix_set_link(dir_de, dir_page, new_dir);
index 9803427..161791d 100644 (file)
@@ -397,7 +397,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
                if (!new_de)
                        goto out_dir;
-               inc_nlink(old_inode);
                nilfs_set_link(new_dir, new_de, new_page, old_inode);
                nilfs_mark_inode_dirty(new_dir);
                new_inode->i_ctime = CURRENT_TIME;
@@ -411,13 +410,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        if (new_dir->i_nlink >= NILFS_LINK_MAX)
                                goto out_dir;
                }
-               inc_nlink(old_inode);
                err = nilfs_add_link(new_dentry, old_inode);
-               if (err) {
-                       drop_nlink(old_inode);
-                       nilfs_mark_inode_dirty(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de) {
                        inc_nlink(new_dir);
                        nilfs_mark_inode_dirty(new_dir);
@@ -431,7 +426,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        old_inode->i_ctime = CURRENT_TIME;
 
        nilfs_delete_entry(old_de, old_page);
-       drop_nlink(old_inode);
 
        if (dir_de) {
                nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
index 55ebae5..2de9f63 100644 (file)
@@ -430,7 +430,8 @@ static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
        nilfs_segctor_map_segsum_entry(
                sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
 
-       if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
+       if (NILFS_I(inode)->i_root &&
+           !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
                set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
        /* skip finfo */
 }
index d9396a4..927cbd1 100644 (file)
@@ -233,7 +233,7 @@ void __init proc_device_tree_init(void)
                return;
        root = of_find_node_by_path("/");
        if (root == NULL) {
-               printk(KERN_ERR "/proc/device-tree: can't find root\n");
+               pr_debug("/proc/device-tree: can't find root\n");
                return;
        }
        proc_device_tree_add_node(root, proc_device_tree);
index ba5f51e..68fdf45 100644 (file)
@@ -771,7 +771,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
                                        EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
                                        dentry, inode, &security);
        if (retval) {
-               dir->i_nlink--;
+               DEC_DIR_INODE_NLINK(dir)
                goto out_failed;
        }
 
index b427b12..e474fbc 100644 (file)
@@ -245,7 +245,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
                new_de = sysv_find_entry(new_dentry, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                sysv_set_link(new_de, new_page, old_inode);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
@@ -257,18 +256,15 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
                        if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = sysv_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
 
        sysv_delete_entry(old_de, old_page);
-       inode_dec_link_count(old_inode);
+       mark_inode_dirty(old_inode);
 
        if (dir_de) {
                sysv_set_link(dir_de, dir_page, new_dir);
index 2be0f9e..b7c338d 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/crc-itu-t.h>
 #include <linux/exportfs.h>
 
+enum { UDF_MAX_LINKS = 0xffff };
+
 static inline int udf_match(int len1, const unsigned char *name1, int len2,
                            const unsigned char *name2)
 {
@@ -650,7 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        struct udf_inode_info *iinfo;
 
        err = -EMLINK;
-       if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1)
+       if (dir->i_nlink >= UDF_MAX_LINKS)
                goto out;
 
        err = -EIO;
@@ -1034,9 +1036,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
        struct fileIdentDesc cfi, *fi;
        int err;
 
-       if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
+       if (inode->i_nlink >= UDF_MAX_LINKS)
                return -EMLINK;
-       }
 
        fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
        if (!fi) {
@@ -1131,9 +1132,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
                        goto end_rename;
 
                retval = -EMLINK;
-               if (!new_inode &&
-                       new_dir->i_nlink >=
-                               (256 << sizeof(new_dir->i_nlink)) - 1)
+               if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS)
                        goto end_rename;
        }
        if (!nfi) {
index 12f39b9..d6f6815 100644 (file)
@@ -306,7 +306,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
                new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
                if (!new_de)
                        goto out_dir;
-               inode_inc_link_count(old_inode);
                ufs_set_link(new_dir, new_de, new_page, old_inode);
                new_inode->i_ctime = CURRENT_TIME_SEC;
                if (dir_de)
@@ -318,12 +317,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        if (new_dir->i_nlink >= UFS_LINK_MAX)
                                goto out_dir;
                }
-               inode_inc_link_count(old_inode);
                err = ufs_add_link(new_dentry, old_inode);
-               if (err) {
-                       inode_dec_link_count(old_inode);
+               if (err)
                        goto out_dir;
-               }
                if (dir_de)
                        inode_inc_link_count(new_dir);
        }
@@ -331,12 +327,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
        /*
         * Like most other Unix systems, set the ctime for inodes on a
         * rename.
-        * inode_dec_link_count() will mark the inode dirty.
         */
        old_inode->i_ctime = CURRENT_TIME_SEC;
 
        ufs_delete_entry(old_dir, old_de, old_page);
-       inode_dec_link_count(old_inode);
+       mark_inode_dirty(old_inode);
 
        if (dir_de) {
                ufs_set_link(old_inode, dir_de, dir_page, new_dir);
index f5e2a19..0ca0e3c 100644 (file)
@@ -695,14 +695,19 @@ xfs_ioc_fsgeometry_v1(
        xfs_mount_t             *mp,
        void                    __user *arg)
 {
-       xfs_fsop_geom_v1_t      fsgeo;
+       xfs_fsop_geom_t         fsgeo;
        int                     error;
 
-       error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
+       error = xfs_fs_geometry(mp, &fsgeo, 3);
        if (error)
                return -error;
 
-       if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
+       /*
+        * Caller should have passed an argument of type
+        * xfs_fsop_geom_v1_t.  This is a proper subset of the
+        * xfs_fsop_geom_t that xfs_fs_geometry() fills in.
+        */
+       if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t)))
                return -XFS_ERROR(EFAULT);
        return 0;
 }
index 5cb86c3..fc48754 100644 (file)
@@ -99,7 +99,6 @@ struct rxrpc_key_token {
  * structure of raw payloads passed to add_key() or instantiate key
  */
 struct rxrpc_key_data_v1 {
-       u32             kif_version;            /* 1 */
        u16             security_index;
        u16             ticket_length;
        u32             expiry;                 /* time_t */
index 4d18ff3..d5063e1 100644 (file)
@@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q);
 extern void blk_stop_queue(struct request_queue *q);
 extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *);
+extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
 extern void blk_run_queue(struct request_queue *);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
                           struct rq_map_data *, void __user *, unsigned long,
@@ -1088,7 +1088,6 @@ static inline void put_dev_sector(Sector p)
 
 struct work_struct;
 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
 
 #ifdef CONFIG_BLK_CGROUP
 /*
@@ -1136,7 +1135,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
 extern int blk_throtl_init(struct request_queue *q);
 extern void blk_throtl_exit(struct request_queue *q);
 extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
-extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
 extern void throtl_shutdown_timer_wq(struct request_queue *q);
 #else /* CONFIG_BLK_DEV_THROTTLING */
 static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
@@ -1146,7 +1144,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
 
 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
-static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
 static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
 #endif /* CONFIG_BLK_DEV_THROTTLING */
 
index 3395cf7..b22fb0d 100644 (file)
@@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq)
 
 extern void blk_dump_cmd(char *buf, struct request *rq);
 extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
-extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
 
 #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
 
index 3fd3684..ef4f0b6 100644 (file)
@@ -71,6 +71,7 @@ struct wm8994 {
        u16 irq_masks_cache[WM8994_NUM_IRQ_REGS];
 
        /* Used over suspend/resume */
+       bool suspended;
        u16 ldo_regs[WM8994_NUM_LDO_REGS];
        u16 gpio_regs[WM8994_NUM_GPIO_REGS];
 
index aba421d..78f18ad 100644 (file)
@@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
                                        0 : blk_rq_sectors(rq);
                __entry->errors    = rq->errors;
 
-               blk_fill_rwbs_rq(__entry->rwbs, rq);
+               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
                blk_dump_cmd(__get_str(cmd), rq);
        ),
 
@@ -118,7 +118,7 @@ DECLARE_EVENT_CLASS(block_rq,
                __entry->bytes     = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
                                        blk_rq_bytes(rq) : 0;
 
-               blk_fill_rwbs_rq(__entry->rwbs, rq);
+               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
                blk_dump_cmd(__get_str(cmd), rq);
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
@@ -563,7 +563,7 @@ TRACE_EVENT(block_rq_remap,
                __entry->nr_sector      = blk_rq_sectors(rq);
                __entry->old_dev        = dev;
                __entry->old_sector     = from;
-               blk_fill_rwbs_rq(__entry->rwbs, rq);
+               blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
        ),
 
        TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
index d95721f..cbafed7 100644 (file)
@@ -1827,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
        rwbs[i] = '\0';
 }
 
-void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
-{
-       int rw = rq->cmd_flags & 0x03;
-       int bytes;
-
-       if (rq->cmd_flags & REQ_DISCARD)
-               rw |= REQ_DISCARD;
-
-       if (rq->cmd_flags & REQ_SECURE)
-               rw |= REQ_SECURE;
-
-       bytes = blk_rq_bytes(rq);
-
-       blk_fill_rwbs(rwbs, rw, bytes);
-}
-
 #endif /* CONFIG_EVENT_TRACING */
 
index 5021cbc..ac09f22 100644 (file)
@@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n)
 {
        int i, len = 0;
 
-       for (i = 0; i < n; i++) {
+       for (i = 0; i < n; i++, p++) {
                if (p->len)
                        len += nla_total_size(p->len);
                else if (nla_attr_minlen[p->type])
index 508f9c1..133fd22 100644 (file)
@@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
 
        list_for_each_entry(ha, &from_list->list, list) {
                type = addr_type ? addr_type : ha->type;
-               __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
+               __hw_addr_del(to_list, ha->addr, addr_len, type);
        }
 }
 EXPORT_SYMBOL(__hw_addr_del_multiple);
index d5074a5..c44348a 100644 (file)
@@ -1193,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
                        goto err;
        }
 
-       if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) {
+       if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
                struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
                err = ops->ieee_setpfc(netdev, pfc);
                if (err)
index 8cde009..4222e7a 100644 (file)
@@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                /* Caller (dccp_v4_do_rcv) will send Reset */
                dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
                return 1;
+       } else if (sk->sk_state == DCCP_CLOSED) {
+               dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
+               return 1;
        }
 
        if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
@@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        }
 
        switch (sk->sk_state) {
-       case DCCP_CLOSED:
-               dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
-               return 1;
-
        case DCCP_REQUESTING:
                queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
                if (queued >= 0)
index a998db6..904312e 100644 (file)
@@ -2557,14 +2557,16 @@ static
 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
                              void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       struct net *net = current->nsproxy->net_ns;
-       int delay = net->ipv6.sysctl.flush_delay;
-       if (write) {
-               proc_dointvec(ctl, write, buffer, lenp, ppos);
-               fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
-               return 0;
-       } else
+       struct net *net;
+       int delay;
+       if (!write)
                return -EINVAL;
+
+       net = (struct net *)ctl->extra1;
+       delay = net->ipv6.sysctl.flush_delay;
+       proc_dointvec(ctl, write, buffer, lenp, ppos);
+       fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
+       return 0;
 }
 
 ctl_table ipv6_route_table_template[] = {
@@ -2651,6 +2653,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
 
        if (table) {
                table[0].data = &net->ipv6.sysctl.flush_delay;
+               table[0].extra1 = net;
                table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
                table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
                table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
index 22f7ad5..ba98e13 100644 (file)
@@ -808,9 +808,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
        dest->u_threshold = udest->u_threshold;
        dest->l_threshold = udest->l_threshold;
 
-       spin_lock(&dest->dst_lock);
+       spin_lock_bh(&dest->dst_lock);
        ip_vs_dst_reset(dest);
-       spin_unlock(&dest->dst_lock);
+       spin_unlock_bh(&dest->dst_lock);
 
        if (add)
                ip_vs_new_estimator(&dest->stats);
index b07393e..9181699 100644 (file)
@@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister);
 
 int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
 {
+       if (pf >= ARRAY_SIZE(nf_loggers))
+               return -EINVAL;
        mutex_lock(&nf_log_mutex);
        if (__find_logger(pf, logger->name) == NULL) {
                mutex_unlock(&nf_log_mutex);
@@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf);
 
 void nf_log_unbind_pf(u_int8_t pf)
 {
+       if (pf >= ARRAY_SIZE(nf_loggers))
+               return;
        mutex_lock(&nf_log_mutex);
        rcu_assign_pointer(nf_loggers[pf], NULL);
        mutex_unlock(&nf_log_mutex);
index 478181d..1f92459 100644 (file)
@@ -1407,7 +1407,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        int noblock = flags&MSG_DONTWAIT;
        size_t copied;
        struct sk_buff *skb, *data_skb;
-       int err;
+       int err, ret;
 
        if (flags&MSG_OOB)
                return -EOPNOTSUPP;
@@ -1470,8 +1470,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
 
        skb_free_datagram(sk, skb);
 
-       if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
-               netlink_dump(sk);
+       if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+               ret = netlink_dump(sk);
+               if (ret) {
+                       sk->sk_err = ret;
+                       sk->sk_error_report(sk);
+               }
+       }
 
        scm_recv(sock, msg, siocb->scm, flags);
 out:
@@ -1736,6 +1741,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        struct netlink_callback *cb;
        struct sock *sk;
        struct netlink_sock *nlk;
+       int ret;
 
        cb = kzalloc(sizeof(*cb), GFP_KERNEL);
        if (cb == NULL)
@@ -1764,9 +1770,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        nlk->cb = cb;
        mutex_unlock(nlk->cb_mutex);
 
-       netlink_dump(sk);
+       ret = netlink_dump(sk);
+
        sock_put(sk);
 
+       if (ret)
+               return ret;
+
        /* We successfully started a dump, by returning -EINTR we
         * signal not to send ACK even if it was requested.
         */
index 8931500..1a2b063 100644 (file)
@@ -423,6 +423,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
                        goto protocol_error;
                }
 
+       case RXRPC_PACKET_TYPE_ACKALL:
        case RXRPC_PACKET_TYPE_ACK:
                /* ACK processing is done in process context */
                read_lock_bh(&call->state_lock);