Merge branch 'for-next' into for-linus
authorJiri Kosina <jkosina@suse.cz>
Mon, 7 Dec 2009 17:36:35 +0000 (18:36 +0100)
committerJiri Kosina <jkosina@suse.cz>
Mon, 7 Dec 2009 17:36:35 +0000 (18:36 +0100)
Conflicts:

kernel/irq/chip.c

48 files changed:
1  2 
Documentation/vm/page-types.c
arch/arm/mach-s3c6410/Kconfig
arch/arm/plat-omap/dma.c
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/kprobes.c
arch/x86/mm/kmmio.c
drivers/ata/sata_fsl.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rv770.c
drivers/isdn/hardware/eicon/maintidi.c
drivers/isdn/hisax/hfc_usb.c
drivers/isdn/i4l/isdn_ppp.c
drivers/media/video/s2255drv.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_main.c
drivers/net/cxgb3/sge.c
drivers/net/s2io.c
drivers/net/smsc911x.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/ipw2x00/libipw_module.c
drivers/net/wireless/wavelan_cs.c
drivers/platform/x86/thinkpad_acpi.c
drivers/rtc/rtc-v3020.c
drivers/scsi/pmcraid.c
drivers/usb/host/ehci-hcd.c
drivers/video/atmel_lcdfb.c
drivers/video/gbefb.c
fs/btrfs/extent_map.c
fs/cifs/inode.c
fs/compat_ioctl.c
fs/ext3/inode.c
fs/ext4/inode.c
fs/ocfs2/refcounttree.c
include/net/sctp/structs.h
kernel/irq/spurious.c
kernel/perf_event.c
lib/Kconfig.debug
lib/swiotlb.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
samples/Kconfig
security/selinux/ss/services.c
sound/pci/hda/patch_realtek.c

@@@ -218,7 -218,7 +218,7 @@@ static void fatal(const char *x, ...
        exit(EXIT_FAILURE);
  }
  
 -int checked_open(const char *pathname, int flags)
 +static int checked_open(const char *pathname, int flags)
  {
        int fd = open(pathname, flags);
  
@@@ -301,7 -301,7 +301,7 @@@ static char *page_flag_name(uint64_t fl
                present = (flags >> i) & 1;
                if (!page_flag_names[i]) {
                        if (present)
-                               fatal("unkown flag bit %d\n", i);
+                               fatal("unknown flag bit %d\n", i);
                        continue;
                }
                buf[j++] = present ? page_flag_names[i][0] : '_';
@@@ -1,5 -1,3 +1,3 @@@
- # arch/arm/mach-s3c6410/Kconfig
- #
  # Copyright 2008 Openmoko, Inc.
  # Copyright 2008 Simtec Electronics
  #
@@@ -77,7 -75,6 +75,7 @@@ config SMDK6410_WM1190_EV
        depends on MACH_SMDK6410
        select REGULATOR
        select REGULATOR_WM8350
 +      select S3C24XX_GPIO_EXTRA64
        select MFD_WM8350_I2C
        select MFD_WM8350_CONFIG_MODE_0
        select MFD_WM8350_CONFIG_MODE_3
diff --combined arch/arm/plat-omap/dma.c
@@@ -691,16 -691,13 +691,16 @@@ static inline void disable_lnk(int lch
  static inline void omap2_enable_irq_lch(int lch)
  {
        u32 val;
 +      unsigned long flags;
  
        if (!cpu_class_is_omap2())
                return;
  
 +      spin_lock_irqsave(&dma_chan_lock, flags);
        val = dma_read(IRQENABLE_L0);
        val |= 1 << lch;
        dma_write(val, IRQENABLE_L0);
 +      spin_unlock_irqrestore(&dma_chan_lock, flags);
  }
  
  int omap_request_dma(int dev_id, const char *dev_name,
@@@ -802,13 -799,10 +802,13 @@@ void omap_free_dma(int lch
  
        if (cpu_class_is_omap2()) {
                u32 val;
 +
 +              spin_lock_irqsave(&dma_chan_lock, flags);
                /* Disable interrupts */
                val = dma_read(IRQENABLE_L0);
                val &= ~(1 << lch);
                dma_write(val, IRQENABLE_L0);
 +              spin_unlock_irqrestore(&dma_chan_lock, flags);
  
                /* Clear the CSR register and IRQ status register */
                dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch));
@@@ -1114,14 -1108,6 +1114,14 @@@ int omap_dma_running(void
  {
        int lch;
  
 +      /*
 +       * On OMAP1510, internal LCD controller will start the transfer
 +       * when it gets enabled, so assume DMA running if LCD enabled.
 +       */
 +      if (cpu_is_omap1510())
 +              if (omap_readw(0xfffec000 + 0x00) & (1 << 0))
 +                      return 1;
 +
        /* Check if LCD DMA is running */
        if (cpu_is_omap16xx())
                if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
@@@ -1246,7 -1232,7 +1246,7 @@@ static void create_dma_lch_chain(int lc
   *                                          OMAP_DMA_DYNAMIC_CHAIN
   * @params - Channel parameters
   *
-  * @return - Succes : 0
+  * @return - Success : 0
   *         Failure: -EINVAL/-ENOMEM
   */
  int omap_request_dma_chain(int dev_id, const char *dev_name,
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
 + * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
   * Author: Joerg Roedel <joerg.roedel@amd.com>
   *         Leo Duran <leo.duran@amd.com>
   *
@@@ -28,7 -28,6 +28,7 @@@
  #include <asm/proto.h>
  #include <asm/iommu.h>
  #include <asm/gart.h>
 +#include <asm/amd_iommu_proto.h>
  #include <asm/amd_iommu_types.h>
  #include <asm/amd_iommu.h>
  
@@@ -57,115 -56,20 +57,115 @@@ struct iommu_cmd 
        u32 data[4];
  };
  
 -static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
 -                           struct unity_map_entry *e);
 -static struct dma_ops_domain *find_protection_domain(u16 devid);
 -static u64 *alloc_pte(struct protection_domain *domain,
 -                    unsigned long address, int end_lvl,
 -                    u64 **pte_page, gfp_t gfp);
 -static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
 -                                    unsigned long start_page,
 -                                    unsigned int pages);
  static void reset_iommu_command_buffer(struct amd_iommu *iommu);
 -static u64 *fetch_pte(struct protection_domain *domain,
 -                    unsigned long address, int map_size);
  static void update_domain(struct protection_domain *domain);
  
 +/****************************************************************************
 + *
 + * Helper functions
 + *
 + ****************************************************************************/
 +
 +static inline u16 get_device_id(struct device *dev)
 +{
 +      struct pci_dev *pdev = to_pci_dev(dev);
 +
 +      return calc_devid(pdev->bus->number, pdev->devfn);
 +}
 +
 +static struct iommu_dev_data *get_dev_data(struct device *dev)
 +{
 +      return dev->archdata.iommu;
 +}
 +
 +/*
 + * In this function the list of preallocated protection domains is traversed to
 + * find the domain for a specific device
 + */
 +static struct dma_ops_domain *find_protection_domain(u16 devid)
 +{
 +      struct dma_ops_domain *entry, *ret = NULL;
 +      unsigned long flags;
 +      u16 alias = amd_iommu_alias_table[devid];
 +
 +      if (list_empty(&iommu_pd_list))
 +              return NULL;
 +
 +      spin_lock_irqsave(&iommu_pd_list_lock, flags);
 +
 +      list_for_each_entry(entry, &iommu_pd_list, list) {
 +              if (entry->target_dev == devid ||
 +                  entry->target_dev == alias) {
 +                      ret = entry;
 +                      break;
 +              }
 +      }
 +
 +      spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
 +
 +      return ret;
 +}
 +
 +/*
 + * This function checks if the driver got a valid device from the caller to
 + * avoid dereferencing invalid pointers.
 + */
 +static bool check_device(struct device *dev)
 +{
 +      u16 devid;
 +
 +      if (!dev || !dev->dma_mask)
 +              return false;
 +
 +      /* No device or no PCI device */
 +      if (!dev || dev->bus != &pci_bus_type)
 +              return false;
 +
 +      devid = get_device_id(dev);
 +
 +      /* Out of our scope? */
 +      if (devid > amd_iommu_last_bdf)
 +              return false;
 +
 +      if (amd_iommu_rlookup_table[devid] == NULL)
 +              return false;
 +
 +      return true;
 +}
 +
 +static int iommu_init_device(struct device *dev)
 +{
 +      struct iommu_dev_data *dev_data;
 +      struct pci_dev *pdev;
 +      u16 devid, alias;
 +
 +      if (dev->archdata.iommu)
 +              return 0;
 +
 +      dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
 +      if (!dev_data)
 +              return -ENOMEM;
 +
 +      dev_data->dev = dev;
 +
 +      devid = get_device_id(dev);
 +      alias = amd_iommu_alias_table[devid];
 +      pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
 +      if (pdev)
 +              dev_data->alias = &pdev->dev;
 +
 +      atomic_set(&dev_data->bind, 0);
 +
 +      dev->archdata.iommu = dev_data;
 +
 +
 +      return 0;
 +}
 +
 +static void iommu_uninit_device(struct device *dev)
 +{
 +      kfree(dev->archdata.iommu);
 +}
  #ifdef CONFIG_AMD_IOMMU_STATS
  
  /*
@@@ -186,6 -90,7 +186,6 @@@ DECLARE_STATS_COUNTER(alloced_io_mem)
  DECLARE_STATS_COUNTER(total_map_requests);
  
  static struct dentry *stats_dir;
 -static struct dentry *de_isolate;
  static struct dentry *de_fflush;
  
  static void amd_iommu_stats_add(struct __iommu_counter *cnt)
@@@ -203,6 -108,9 +203,6 @@@ static void amd_iommu_stats_init(void
        if (stats_dir == NULL)
                return;
  
 -      de_isolate = debugfs_create_bool("isolation", 0444, stats_dir,
 -                                       (u32 *)&amd_iommu_isolate);
 -
        de_fflush  = debugfs_create_bool("fullflush", 0444, stats_dir,
                                         (u32 *)&amd_iommu_unmap_flush);
  
  
  #endif
  
 -/* returns !0 if the IOMMU is caching non-present entries in its TLB */
 -static int iommu_has_npcache(struct amd_iommu *iommu)
 -{
 -      return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
 -}
 -
  /****************************************************************************
   *
   * Interrupt handling functions
@@@ -285,7 -199,6 +285,7 @@@ static void iommu_print_event(struct am
                break;
        case EVENT_TYPE_ILL_CMD:
                printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
 +              iommu->reset_in_progress = true;
                reset_iommu_command_buffer(iommu);
                dump_command(address);
                break;
@@@ -408,8 -321,11 +408,8 @@@ static void __iommu_wait_for_completion
        status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
        writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
  
 -      if (unlikely(i == EXIT_LOOP_COUNT)) {
 -              spin_unlock(&iommu->lock);
 -              reset_iommu_command_buffer(iommu);
 -              spin_lock(&iommu->lock);
 -      }
 +      if (unlikely(i == EXIT_LOOP_COUNT))
 +              iommu->reset_in_progress = true;
  }
  
  /*
@@@ -456,46 -372,26 +456,46 @@@ static int iommu_completion_wait(struc
  out:
        spin_unlock_irqrestore(&iommu->lock, flags);
  
 +      if (iommu->reset_in_progress)
 +              reset_iommu_command_buffer(iommu);
 +
        return 0;
  }
  
 +static void iommu_flush_complete(struct protection_domain *domain)
 +{
 +      int i;
 +
 +      for (i = 0; i < amd_iommus_present; ++i) {
 +              if (!domain->dev_iommu[i])
 +                      continue;
 +
 +              /*
 +               * Devices of this domain are behind this IOMMU
 +               * We need to wait for completion of all commands.
 +               */
 +              iommu_completion_wait(amd_iommus[i]);
 +      }
 +}
 +
  /*
   * Command send function for invalidating a device table entry
   */
 -static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
 +static int iommu_flush_device(struct device *dev)
  {
 +      struct amd_iommu *iommu;
        struct iommu_cmd cmd;
 -      int ret;
 +      u16 devid;
  
 -      BUG_ON(iommu == NULL);
 +      devid = get_device_id(dev);
 +      iommu = amd_iommu_rlookup_table[devid];
  
 +      /* Build command */
        memset(&cmd, 0, sizeof(cmd));
        CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
        cmd.data[0] = devid;
  
 -      ret = iommu_queue_command(iommu, &cmd);
 -
 -      return ret;
 +      return iommu_queue_command(iommu, &cmd);
  }
  
  static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
@@@ -534,11 -430,11 +534,11 @@@ static int iommu_queue_inv_iommu_pages(
   * It invalidates a single PTE if the range to flush is within a single
   * page. Otherwise it flushes the whole TLB of the IOMMU.
   */
 -static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
 -              u64 address, size_t size)
 +static void __iommu_flush_pages(struct protection_domain *domain,
 +                              u64 address, size_t size, int pde)
  {
 -      int s = 0;
 -      unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
 +      int s = 0, i;
 +      unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE);
  
        address &= PAGE_MASK;
  
                s = 1;
        }
  
 -      iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
  
 -      return 0;
 +      for (i = 0; i < amd_iommus_present; ++i) {
 +              if (!domain->dev_iommu[i])
 +                      continue;
 +
 +              /*
 +               * Devices of this domain are behind this IOMMU
 +               * We need a TLB flush
 +               */
 +              iommu_queue_inv_iommu_pages(amd_iommus[i], address,
 +                                          domain->id, pde, s);
 +      }
 +
 +      return;
  }
  
 -/* Flush the whole IO/TLB for a given protection domain */
 -static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
 +static void iommu_flush_pages(struct protection_domain *domain,
 +                           u64 address, size_t size)
  {
 -      u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 -
 -      INC_STATS_COUNTER(domain_flush_single);
 +      __iommu_flush_pages(domain, address, size, 0);
 +}
  
 -      iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
 +/* Flush the whole IO/TLB for a given protection domain */
 +static void iommu_flush_tlb(struct protection_domain *domain)
 +{
 +      __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
  }
  
  /* Flush the whole IO/TLB for a given protection domain - including PDE */
 -static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid)
 +static void iommu_flush_tlb_pde(struct protection_domain *domain)
  {
 -       u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
 -
 -       INC_STATS_COUNTER(domain_flush_single);
 -
 -       iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1);
 +      __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
  }
  
 +
  /*
 - * This function flushes one domain on one IOMMU
 + * This function flushes the DTEs for all devices in domain
   */
 -static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid)
 +static void iommu_flush_domain_devices(struct protection_domain *domain)
  {
 -      struct iommu_cmd cmd;
 +      struct iommu_dev_data *dev_data;
        unsigned long flags;
  
 -      __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
 -                                    domid, 1, 1);
 +      spin_lock_irqsave(&domain->lock, flags);
  
 -      spin_lock_irqsave(&iommu->lock, flags);
 -      __iommu_queue_command(iommu, &cmd);
 -      __iommu_completion_wait(iommu);
 -      __iommu_wait_for_completion(iommu);
 -      spin_unlock_irqrestore(&iommu->lock, flags);
 +      list_for_each_entry(dev_data, &domain->dev_list, list)
 +              iommu_flush_device(dev_data->dev);
 +
 +      spin_unlock_irqrestore(&domain->lock, flags);
  }
  
 -static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
 +static void iommu_flush_all_domain_devices(void)
  {
 -      int i;
 +      struct protection_domain *domain;
 +      unsigned long flags;
  
 -      for (i = 1; i < MAX_DOMAIN_ID; ++i) {
 -              if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
 -                      continue;
 -              flush_domain_on_iommu(iommu, i);
 +      spin_lock_irqsave(&amd_iommu_pd_lock, flags);
 +
 +      list_for_each_entry(domain, &amd_iommu_pd_list, list) {
 +              iommu_flush_domain_devices(domain);
 +              iommu_flush_complete(domain);
        }
  
 +      spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
 +}
 +
 +void amd_iommu_flush_all_devices(void)
 +{
 +      iommu_flush_all_domain_devices();
  }
  
  /*
 - * This function is used to flush the IO/TLB for a given protection domain
 - * on every IOMMU in the system
 + * This function uses heavy locking and may disable irqs for some time. But
 + * this is no issue because it is only called during resume.
   */
 -static void iommu_flush_domain(u16 domid)
 +void amd_iommu_flush_all_domains(void)
  {
 -      struct amd_iommu *iommu;
 +      struct protection_domain *domain;
 +      unsigned long flags;
  
 -      INC_STATS_COUNTER(domain_flush_all);
 +      spin_lock_irqsave(&amd_iommu_pd_lock, flags);
  
 -      for_each_iommu(iommu)
 -              flush_domain_on_iommu(iommu, domid);
 +      list_for_each_entry(domain, &amd_iommu_pd_list, list) {
 +              spin_lock(&domain->lock);
 +              iommu_flush_tlb_pde(domain);
 +              iommu_flush_complete(domain);
 +              spin_unlock(&domain->lock);
 +      }
 +
 +      spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
  }
  
 -void amd_iommu_flush_all_domains(void)
 +static void reset_iommu_command_buffer(struct amd_iommu *iommu)
  {
 -      struct amd_iommu *iommu;
 +      pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
  
 -      for_each_iommu(iommu)
 -              flush_all_domains_on_iommu(iommu);
 +      if (iommu->reset_in_progress)
 +              panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
 +
 +      amd_iommu_reset_cmd_buffer(iommu);
 +      amd_iommu_flush_all_devices();
 +      amd_iommu_flush_all_domains();
 +
 +      iommu->reset_in_progress = false;
  }
  
 -static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
 +/****************************************************************************
 + *
 + * The functions below are used the create the page table mappings for
 + * unity mapped regions.
 + *
 + ****************************************************************************/
 +
 +/*
 + * This function is used to add another level to an IO page table. Adding
 + * another level increases the size of the address space by 9 bits to a size up
 + * to 64 bits.
 + */
 +static bool increase_address_space(struct protection_domain *domain,
 +                                 gfp_t gfp)
  {
 -      int i;
 +      u64 *pte;
  
 -      for (i = 0; i <= amd_iommu_last_bdf; ++i) {
 -              if (iommu != amd_iommu_rlookup_table[i])
 -                      continue;
 +      if (domain->mode == PAGE_MODE_6_LEVEL)
 +              /* address space already 64 bit large */
 +              return false;
  
 -              iommu_queue_inv_dev_entry(iommu, i);
 -              iommu_completion_wait(iommu);
 -      }
 +      pte = (void *)get_zeroed_page(gfp);
 +      if (!pte)
 +              return false;
 +
 +      *pte             = PM_LEVEL_PDE(domain->mode,
 +                                      virt_to_phys(domain->pt_root));
 +      domain->pt_root  = pte;
 +      domain->mode    += 1;
 +      domain->updated  = true;
 +
 +      return true;
  }
  
 -static void flush_devices_by_domain(struct protection_domain *domain)
 +static u64 *alloc_pte(struct protection_domain *domain,
 +                    unsigned long address,
 +                    int end_lvl,
 +                    u64 **pte_page,
 +                    gfp_t gfp)
  {
 -      struct amd_iommu *iommu;
 -      int i;
 +      u64 *pte, *page;
 +      int level;
  
 -      for (i = 0; i <= amd_iommu_last_bdf; ++i) {
 -              if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
 -                  (amd_iommu_pd_table[i] != domain))
 -                      continue;
 +      while (address > PM_LEVEL_SIZE(domain->mode))
 +              increase_address_space(domain, gfp);
  
 -              iommu = amd_iommu_rlookup_table[i];
 -              if (!iommu)
 -                      continue;
 +      level =  domain->mode - 1;
 +      pte   = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
 +
 +      while (level > end_lvl) {
 +              if (!IOMMU_PTE_PRESENT(*pte)) {
 +                      page = (u64 *)get_zeroed_page(gfp);
 +                      if (!page)
 +                              return NULL;
 +                      *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
 +              }
 +
 +              level -= 1;
  
 -              iommu_queue_inv_dev_entry(iommu, i);
 -              iommu_completion_wait(iommu);
 +              pte = IOMMU_PTE_PAGE(*pte);
 +
 +              if (pte_page && level == end_lvl)
 +                      *pte_page = pte;
 +
 +              pte = &pte[PM_LEVEL_INDEX(level, address)];
        }
 +
 +      return pte;
  }
  
 -static void reset_iommu_command_buffer(struct amd_iommu *iommu)
 +/*
 + * This function checks if there is a PTE for a given dma address. If
 + * there is one, it returns the pointer to it.
 + */
 +static u64 *fetch_pte(struct protection_domain *domain,
 +                    unsigned long address, int map_size)
  {
 -      pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
 +      int level;
 +      u64 *pte;
  
 -      if (iommu->reset_in_progress)
 -              panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
 +      level =  domain->mode - 1;
 +      pte   = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
  
 -      iommu->reset_in_progress = true;
 +      while (level > map_size) {
 +              if (!IOMMU_PTE_PRESENT(*pte))
 +                      return NULL;
  
 -      amd_iommu_reset_cmd_buffer(iommu);
 -      flush_all_devices_for_iommu(iommu);
 -      flush_all_domains_on_iommu(iommu);
 +              level -= 1;
  
 -      iommu->reset_in_progress = false;
 -}
 +              pte = IOMMU_PTE_PAGE(*pte);
 +              pte = &pte[PM_LEVEL_INDEX(level, address)];
  
 -void amd_iommu_flush_all_devices(void)
 -{
 -      flush_devices_by_domain(NULL);
 -}
 +              if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
 +                      pte = NULL;
 +                      break;
 +              }
 +      }
  
 -/****************************************************************************
 - *
 - * The functions below are used the create the page table mappings for
 - * unity mapped regions.
 - *
 - ****************************************************************************/
 +      return pte;
 +}
  
  /*
   * Generic mapping functions. It maps a physical address into a DMA
@@@ -828,6 -654,28 +828,6 @@@ static int iommu_for_unity_map(struct a
  }
  
  /*
 - * Init the unity mappings for a specific IOMMU in the system
 - *
 - * Basically iterates over all unity mapping entries and applies them to
 - * the default domain DMA of that IOMMU if necessary.
 - */
 -static int iommu_init_unity_mappings(struct amd_iommu *iommu)
 -{
 -      struct unity_map_entry *entry;
 -      int ret;
 -
 -      list_for_each_entry(entry, &amd_iommu_unity_map, list) {
 -              if (!iommu_for_unity_map(iommu, entry))
 -                      continue;
 -              ret = dma_ops_unity_map(iommu->default_dom, entry);
 -              if (ret)
 -                      return ret;
 -      }
 -
 -      return 0;
 -}
 -
 -/*
   * This function actually applies the mapping to the page table of the
   * dma_ops domain.
   */
@@@ -856,28 -704,6 +856,28 @@@ static int dma_ops_unity_map(struct dma
  }
  
  /*
 + * Init the unity mappings for a specific IOMMU in the system
 + *
 + * Basically iterates over all unity mapping entries and applies them to
 + * the default domain DMA of that IOMMU if necessary.
 + */
 +static int iommu_init_unity_mappings(struct amd_iommu *iommu)
 +{
 +      struct unity_map_entry *entry;
 +      int ret;
 +
 +      list_for_each_entry(entry, &amd_iommu_unity_map, list) {
 +              if (!iommu_for_unity_map(iommu, entry))
 +                      continue;
 +              ret = dma_ops_unity_map(iommu->default_dom, entry);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      return 0;
 +}
 +
 +/*
   * Inits the unity mappings required for a specific device
   */
  static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
   */
  
  /*
 - * This function checks if there is a PTE for a given dma address. If
 - * there is one, it returns the pointer to it.
 + * Used to reserve address ranges in the aperture (e.g. for exclusion
 + * ranges.
   */
 -static u64 *fetch_pte(struct protection_domain *domain,
 -                    unsigned long address, int map_size)
 +static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
 +                                    unsigned long start_page,
 +                                    unsigned int pages)
  {
 -      int level;
 -      u64 *pte;
 -
 -      level =  domain->mode - 1;
 -      pte   = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
 -
 -      while (level > map_size) {
 -              if (!IOMMU_PTE_PRESENT(*pte))
 -                      return NULL;
 -
 -              level -= 1;
 +      unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
  
 -              pte = IOMMU_PTE_PAGE(*pte);
 -              pte = &pte[PM_LEVEL_INDEX(level, address)];
 +      if (start_page + pages > last_page)
 +              pages = last_page - start_page;
  
 -              if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
 -                      pte = NULL;
 -                      break;
 -              }
 +      for (i = start_page; i < start_page + pages; ++i) {
 +              int index = i / APERTURE_RANGE_PAGES;
 +              int page  = i % APERTURE_RANGE_PAGES;
 +              __set_bit(page, dom->aperture[index]->bitmap);
        }
 -
 -      return pte;
  }
  
  /*
   * aperture in case of dma_ops domain allocation or address allocation
   * failure.
   */
 -static int alloc_new_range(struct amd_iommu *iommu,
 -                         struct dma_ops_domain *dma_dom,
 +static int alloc_new_range(struct dma_ops_domain *dma_dom,
                           bool populate, gfp_t gfp)
  {
        int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
 +      struct amd_iommu *iommu;
        int i;
  
  #ifdef CONFIG_IOMMU_STRESS
        dma_dom->aperture_size += APERTURE_RANGE_SIZE;
  
        /* Intialize the exclusion range if necessary */
 -      if (iommu->exclusion_start &&
 -          iommu->exclusion_start >= dma_dom->aperture[index]->offset &&
 -          iommu->exclusion_start < dma_dom->aperture_size) {
 -              unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
 -              int pages = iommu_num_pages(iommu->exclusion_start,
 -                                          iommu->exclusion_length,
 -                                          PAGE_SIZE);
 -              dma_ops_reserve_addresses(dma_dom, startpage, pages);
 +      for_each_iommu(iommu) {
 +              if (iommu->exclusion_start &&
 +                  iommu->exclusion_start >= dma_dom->aperture[index]->offset
 +                  && iommu->exclusion_start < dma_dom->aperture_size) {
 +                      unsigned long startpage;
 +                      int pages = iommu_num_pages(iommu->exclusion_start,
 +                                                  iommu->exclusion_length,
 +                                                  PAGE_SIZE);
 +                      startpage = iommu->exclusion_start >> PAGE_SHIFT;
 +                      dma_ops_reserve_addresses(dma_dom, startpage, pages);
 +              }
        }
  
        /*
@@@ -1094,7 -928,7 +1094,7 @@@ static unsigned long dma_ops_alloc_addr
        }
  
        if (unlikely(address == -1))
 -              address = bad_dma_address;
 +              address = DMA_ERROR_CODE;
  
        WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
  
@@@ -1139,31 -973,6 +1139,31 @@@ static void dma_ops_free_addresses(stru
   *
   ****************************************************************************/
  
 +/*
 + * This function adds a protection domain to the global protection domain list
 + */
 +static void add_domain_to_list(struct protection_domain *domain)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&amd_iommu_pd_lock, flags);
 +      list_add(&domain->list, &amd_iommu_pd_list);
 +      spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
 +}
 +
 +/*
 + * This function removes a protection domain to the global
 + * protection domain list
 + */
 +static void del_domain_from_list(struct protection_domain *domain)
 +{
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&amd_iommu_pd_lock, flags);
 +      list_del(&domain->list);
 +      spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
 +}
 +
  static u16 domain_id_alloc(void)
  {
        unsigned long flags;
@@@ -1191,6 -1000,26 +1191,6 @@@ static void domain_id_free(int id
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  }
  
 -/*
 - * Used to reserve address ranges in the aperture (e.g. for exclusion
 - * ranges.
 - */
 -static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
 -                                    unsigned long start_page,
 -                                    unsigned int pages)
 -{
 -      unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
 -
 -      if (start_page + pages > last_page)
 -              pages = last_page - start_page;
 -
 -      for (i = start_page; i < start_page + pages; ++i) {
 -              int index = i / APERTURE_RANGE_PAGES;
 -              int page  = i % APERTURE_RANGE_PAGES;
 -              __set_bit(page, dom->aperture[index]->bitmap);
 -      }
 -}
 -
  static void free_pagetable(struct protection_domain *domain)
  {
        int i, j;
@@@ -1232,8 -1061,6 +1232,8 @@@ static void dma_ops_domain_free(struct 
        if (!dom)
                return;
  
 +      del_domain_from_list(&dom->domain);
 +
        free_pagetable(&dom->domain);
  
        for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
   * It also intializes the page table and the address allocator data
   * structures required for the dma_ops interface
   */
 -static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
 +static struct dma_ops_domain *dma_ops_domain_alloc(void)
  {
        struct dma_ops_domain *dma_dom;
  
        dma_dom->domain.id = domain_id_alloc();
        if (dma_dom->domain.id == 0)
                goto free_dma_dom;
 +      INIT_LIST_HEAD(&dma_dom->domain.dev_list);
        dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
        dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
        dma_dom->domain.flags = PD_DMA_OPS_MASK;
        dma_dom->need_flush = false;
        dma_dom->target_dev = 0xffff;
  
 -      if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL))
 +      add_domain_to_list(&dma_dom->domain);
 +
 +      if (alloc_new_range(dma_dom, true, GFP_KERNEL))
                goto free_dma_dom;
  
        /*
@@@ -1305,6 -1129,22 +1305,6 @@@ static bool dma_ops_domain(struct prote
        return domain->flags & PD_DMA_OPS_MASK;
  }
  
 -/*
 - * Find out the protection domain structure for a given PCI device. This
 - * will give us the pointer to the page table root for example.
 - */
 -static struct protection_domain *domain_for_device(u16 devid)
 -{
 -      struct protection_domain *dom;
 -      unsigned long flags;
 -
 -      read_lock_irqsave(&amd_iommu_devtable_lock, flags);
 -      dom = amd_iommu_pd_table[devid];
 -      read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 -
 -      return dom;
 -}
 -
  static void set_dte_entry(u16 devid, struct protection_domain *domain)
  {
        u64 pte_root = virt_to_phys(domain->pt_root);
        amd_iommu_dev_table[devid].data[2] = domain->id;
        amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
        amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
 +}
 +
 +static void clear_dte_entry(u16 devid)
 +{
 +      /* remove entry from the device table seen by the hardware */
 +      amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
 +      amd_iommu_dev_table[devid].data[1] = 0;
 +      amd_iommu_dev_table[devid].data[2] = 0;
 +
 +      amd_iommu_apply_erratum_63(devid);
 +}
 +
 +static void do_attach(struct device *dev, struct protection_domain *domain)
 +{
 +      struct iommu_dev_data *dev_data;
 +      struct amd_iommu *iommu;
 +      u16 devid;
  
 -      amd_iommu_pd_table[devid] = domain;
 +      devid    = get_device_id(dev);
 +      iommu    = amd_iommu_rlookup_table[devid];
 +      dev_data = get_dev_data(dev);
 +
 +      /* Update data structures */
 +      dev_data->domain = domain;
 +      list_add(&dev_data->list, &domain->dev_list);
 +      set_dte_entry(devid, domain);
 +
 +      /* Do reference counting */
 +      domain->dev_iommu[iommu->index] += 1;
 +      domain->dev_cnt                 += 1;
 +
 +      /* Flush the DTE entry */
 +      iommu_flush_device(dev);
 +}
 +
 +static void do_detach(struct device *dev)
 +{
 +      struct iommu_dev_data *dev_data;
 +      struct amd_iommu *iommu;
 +      u16 devid;
 +
 +      devid    = get_device_id(dev);
 +      iommu    = amd_iommu_rlookup_table[devid];
 +      dev_data = get_dev_data(dev);
 +
 +      /* decrease reference counters */
 +      dev_data->domain->dev_iommu[iommu->index] -= 1;
 +      dev_data->domain->dev_cnt                 -= 1;
 +
 +      /* Update data structures */
 +      dev_data->domain = NULL;
 +      list_del(&dev_data->list);
 +      clear_dte_entry(devid);
 +
 +      /* Flush the DTE entry */
 +      iommu_flush_device(dev);
  }
  
  /*
   * If a device is not yet associated with a domain, this function does
   * assigns it visible for the hardware
   */
 -static void __attach_device(struct amd_iommu *iommu,
 -                          struct protection_domain *domain,
 -                          u16 devid)
 +static int __attach_device(struct device *dev,
 +                         struct protection_domain *domain)
  {
 +      struct iommu_dev_data *dev_data, *alias_data;
 +
 +      dev_data   = get_dev_data(dev);
 +      alias_data = get_dev_data(dev_data->alias);
 +
 +      if (!alias_data)
 +              return -EINVAL;
 +
        /* lock domain */
        spin_lock(&domain->lock);
  
 -      /* update DTE entry */
 -      set_dte_entry(devid, domain);
 +      /* Some sanity checks */
 +      if (alias_data->domain != NULL &&
 +          alias_data->domain != domain)
 +              return -EBUSY;
 +
 +      if (dev_data->domain != NULL &&
 +          dev_data->domain != domain)
 +              return -EBUSY;
 +
 +      /* Do real assignment */
 +      if (dev_data->alias != dev) {
 +              alias_data = get_dev_data(dev_data->alias);
 +              if (alias_data->domain == NULL)
 +                      do_attach(dev_data->alias, domain);
 +
 +              atomic_inc(&alias_data->bind);
 +      }
 +
 +      if (dev_data->domain == NULL)
 +              do_attach(dev, domain);
  
 -      domain->dev_cnt += 1;
 +      atomic_inc(&dev_data->bind);
  
        /* ready */
        spin_unlock(&domain->lock);
 +
 +      return 0;
  }
  
  /*
   * If a device is not yet associated with a domain, this function does
   * assigns it visible for the hardware
   */
 -static void attach_device(struct amd_iommu *iommu,
 -                        struct protection_domain *domain,
 -                        u16 devid)
 +static int attach_device(struct device *dev,
 +                       struct protection_domain *domain)
  {
        unsigned long flags;
 +      int ret;
  
        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
 -      __attach_device(iommu, domain, devid);
 +      ret = __attach_device(dev, domain);
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  
        /*
         * left the caches in the IOMMU dirty. So we have to flush
         * here to evict all dirty stuff.
         */
 -      iommu_queue_inv_dev_entry(iommu, devid);
 -      iommu_flush_tlb_pde(iommu, domain->id);
 +      iommu_flush_tlb_pde(domain);
 +
 +      return ret;
  }
  
  /*
   * Removes a device from a protection domain (unlocked)
   */
 -static void __detach_device(struct protection_domain *domain, u16 devid)
 +static void __detach_device(struct device *dev)
  {
 +      struct iommu_dev_data *dev_data = get_dev_data(dev);
 +      struct iommu_dev_data *alias_data;
 +      unsigned long flags;
  
 -      /* lock domain */
 -      spin_lock(&domain->lock);
 -
 -      /* remove domain from the lookup table */
 -      amd_iommu_pd_table[devid] = NULL;
 +      BUG_ON(!dev_data->domain);
  
 -      /* remove entry from the device table seen by the hardware */
 -      amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
 -      amd_iommu_dev_table[devid].data[1] = 0;
 -      amd_iommu_dev_table[devid].data[2] = 0;
 +      spin_lock_irqsave(&dev_data->domain->lock, flags);
  
 -      amd_iommu_apply_erratum_63(devid);
 +      if (dev_data->alias != dev) {
 +              alias_data = get_dev_data(dev_data->alias);
 +              if (atomic_dec_and_test(&alias_data->bind))
 +                      do_detach(dev_data->alias);
 +      }
  
 -      /* decrease reference counter */
 -      domain->dev_cnt -= 1;
 +      if (atomic_dec_and_test(&dev_data->bind))
 +              do_detach(dev);
  
 -      /* ready */
 -      spin_unlock(&domain->lock);
 +      spin_unlock_irqrestore(&dev_data->domain->lock, flags);
  
        /*
         * If we run in passthrough mode the device must be assigned to the
         * passthrough domain if it is detached from any other domain
         */
 -      if (iommu_pass_through) {
 -              struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
 -              __attach_device(iommu, pt_domain, devid);
 -      }
 +      if (iommu_pass_through && dev_data->domain == NULL)
 +              __attach_device(dev, pt_domain);
  }
  
  /*
   * Removes a device from a protection domain (with devtable_lock held)
   */
 -static void detach_device(struct protection_domain *domain, u16 devid)
 +static void detach_device(struct device *dev)
  {
        unsigned long flags;
  
        /* lock device table */
        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
 -      __detach_device(domain, devid);
 +      __detach_device(dev);
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  }
  
 +/*
 + * Find out the protection domain structure for a given PCI device. This
 + * will give us the pointer to the page table root for example.
 + */
 +static struct protection_domain *domain_for_device(struct device *dev)
 +{
 +      struct protection_domain *dom;
 +      struct iommu_dev_data *dev_data, *alias_data;
 +      unsigned long flags;
 +      u16 devid, alias;
 +
 +      devid      = get_device_id(dev);
 +      alias      = amd_iommu_alias_table[devid];
 +      dev_data   = get_dev_data(dev);
 +      alias_data = get_dev_data(dev_data->alias);
 +      if (!alias_data)
 +              return NULL;
 +
 +      read_lock_irqsave(&amd_iommu_devtable_lock, flags);
 +      dom = dev_data->domain;
 +      if (dom == NULL &&
 +          alias_data->domain != NULL) {
 +              __attach_device(dev, alias_data->domain);
 +              dom = alias_data->domain;
 +      }
 +
 +      read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 +
 +      return dom;
 +}
 +
  static int device_change_notifier(struct notifier_block *nb,
                                  unsigned long action, void *data)
  {
        struct device *dev = data;
 -      struct pci_dev *pdev = to_pci_dev(dev);
 -      u16 devid = calc_devid(pdev->bus->number, pdev->devfn);
 +      u16 devid;
        struct protection_domain *domain;
        struct dma_ops_domain *dma_domain;
        struct amd_iommu *iommu;
        unsigned long flags;
  
 -      if (devid > amd_iommu_last_bdf)
 -              goto out;
 -
 -      devid = amd_iommu_alias_table[devid];
 -
 -      iommu = amd_iommu_rlookup_table[devid];
 -      if (iommu == NULL)
 -              goto out;
 -
 -      domain = domain_for_device(devid);
 +      if (!check_device(dev))
 +              return 0;
  
 -      if (domain && !dma_ops_domain(domain))
 -              WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound "
 -                        "to a non-dma-ops domain\n", dev_name(dev));
 +      devid  = get_device_id(dev);
 +      iommu  = amd_iommu_rlookup_table[devid];
  
        switch (action) {
        case BUS_NOTIFY_UNBOUND_DRIVER:
 +
 +              domain = domain_for_device(dev);
 +
                if (!domain)
                        goto out;
                if (iommu_pass_through)
                        break;
 -              detach_device(domain, devid);
 +              detach_device(dev);
                break;
        case BUS_NOTIFY_ADD_DEVICE:
 +
 +              iommu_init_device(dev);
 +
 +              domain = domain_for_device(dev);
 +
                /* allocate a protection domain if a device is added */
                dma_domain = find_protection_domain(devid);
                if (dma_domain)
                        goto out;
 -              dma_domain = dma_ops_domain_alloc(iommu);
 +              dma_domain = dma_ops_domain_alloc();
                if (!dma_domain)
                        goto out;
                dma_domain->target_dev = devid;
                spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
  
                break;
 +      case BUS_NOTIFY_DEL_DEVICE:
 +
 +              iommu_uninit_device(dev);
 +
        default:
                goto out;
        }
  
 -      iommu_queue_inv_dev_entry(iommu, devid);
 +      iommu_flush_device(dev);
        iommu_completion_wait(iommu);
  
  out:
@@@ -1594,46 -1322,106 +1594,46 @@@ static struct notifier_block device_nb 
   *****************************************************************************/
  
  /*
 - * This function checks if the driver got a valid device from the caller to
 - * avoid dereferencing invalid pointers.
 - */
 -static bool check_device(struct device *dev)
 -{
 -      if (!dev || !dev->dma_mask)
 -              return false;
 -
 -      return true;
 -}
 -
 -/*
 - * In this function the list of preallocated protection domains is traversed to
 - * find the domain for a specific device
 - */
 -static struct dma_ops_domain *find_protection_domain(u16 devid)
 -{
 -      struct dma_ops_domain *entry, *ret = NULL;
 -      unsigned long flags;
 -
 -      if (list_empty(&iommu_pd_list))
 -              return NULL;
 -
 -      spin_lock_irqsave(&iommu_pd_list_lock, flags);
 -
 -      list_for_each_entry(entry, &iommu_pd_list, list) {
 -              if (entry->target_dev == devid) {
 -                      ret = entry;
 -                      break;
 -              }
 -      }
 -
 -      spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
 -
 -      return ret;
 -}
 -
 -/*
   * In the dma_ops path we only have the struct device. This function
   * finds the corresponding IOMMU, the protection domain and the
   * requestor id for a given device.
   * If the device is not yet associated with a domain this is also done
   * in this function.
   */
 -static int get_device_resources(struct device *dev,
 -                              struct amd_iommu **iommu,
 -                              struct protection_domain **domain,
 -                              u16 *bdf)
 +static struct protection_domain *get_domain(struct device *dev)
  {
 +      struct protection_domain *domain;
        struct dma_ops_domain *dma_dom;
 -      struct pci_dev *pcidev;
 -      u16 _bdf;
 -
 -      *iommu = NULL;
 -      *domain = NULL;
 -      *bdf = 0xffff;
 -
 -      if (dev->bus != &pci_bus_type)
 -              return 0;
 -
 -      pcidev = to_pci_dev(dev);
 -      _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
 +      u16 devid = get_device_id(dev);
  
 -      /* device not translated by any IOMMU in the system? */
 -      if (_bdf > amd_iommu_last_bdf)
 -              return 0;
 +      if (!check_device(dev))
 +              return ERR_PTR(-EINVAL);
  
 -      *bdf = amd_iommu_alias_table[_bdf];
 +      domain = domain_for_device(dev);
 +      if (domain != NULL && !dma_ops_domain(domain))
 +              return ERR_PTR(-EBUSY);
  
 -      *iommu = amd_iommu_rlookup_table[*bdf];
 -      if (*iommu == NULL)
 -              return 0;
 -      *domain = domain_for_device(*bdf);
 -      if (*domain == NULL) {
 -              dma_dom = find_protection_domain(*bdf);
 -              if (!dma_dom)
 -                      dma_dom = (*iommu)->default_dom;
 -              *domain = &dma_dom->domain;
 -              attach_device(*iommu, *domain, *bdf);
 -              DUMP_printk("Using protection domain %d for device %s\n",
 -                          (*domain)->id, dev_name(dev));
 -      }
 +      if (domain != NULL)
 +              return domain;
  
 -      if (domain_for_device(_bdf) == NULL)
 -              attach_device(*iommu, *domain, _bdf);
 +      /* Device not bount yet - bind it */
 +      dma_dom = find_protection_domain(devid);
 +      if (!dma_dom)
 +              dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
 +      attach_device(dev, &dma_dom->domain);
 +      DUMP_printk("Using protection domain %d for device %s\n",
 +                  dma_dom->domain.id, dev_name(dev));
  
 -      return 1;
 +      return &dma_dom->domain;
  }
  
  static void update_device_table(struct protection_domain *domain)
  {
 -      unsigned long flags;
 -      int i;
 +      struct iommu_dev_data *dev_data;
  
 -      for (i = 0; i <= amd_iommu_last_bdf; ++i) {
 -              if (amd_iommu_pd_table[i] != domain)
 -                      continue;
 -              write_lock_irqsave(&amd_iommu_devtable_lock, flags);
 -              set_dte_entry(i, domain);
 -              write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 +      list_for_each_entry(dev_data, &domain->dev_list, list) {
 +              u16 devid = get_device_id(dev_data->dev);
 +              set_dte_entry(devid, domain);
        }
  }
  
@@@ -1643,13 -1431,76 +1643,13 @@@ static void update_domain(struct protec
                return;
  
        update_device_table(domain);
 -      flush_devices_by_domain(domain);
 -      iommu_flush_domain(domain->id);
 +      iommu_flush_domain_devices(domain);
 +      iommu_flush_tlb_pde(domain);
  
        domain->updated = false;
  }
  
  /*
 - * This function is used to add another level to an IO page table. Adding
 - * another level increases the size of the address space by 9 bits to a size up
 - * to 64 bits.
 - */
 -static bool increase_address_space(struct protection_domain *domain,
 -                                 gfp_t gfp)
 -{
 -      u64 *pte;
 -
 -      if (domain->mode == PAGE_MODE_6_LEVEL)
 -              /* address space already 64 bit large */
 -              return false;
 -
 -      pte = (void *)get_zeroed_page(gfp);
 -      if (!pte)
 -              return false;
 -
 -      *pte             = PM_LEVEL_PDE(domain->mode,
 -                                      virt_to_phys(domain->pt_root));
 -      domain->pt_root  = pte;
 -      domain->mode    += 1;
 -      domain->updated  = true;
 -
 -      return true;
 -}
 -
 -static u64 *alloc_pte(struct protection_domain *domain,
 -                    unsigned long address,
 -                    int end_lvl,
 -                    u64 **pte_page,
 -                    gfp_t gfp)
 -{
 -      u64 *pte, *page;
 -      int level;
 -
 -      while (address > PM_LEVEL_SIZE(domain->mode))
 -              increase_address_space(domain, gfp);
 -
 -      level =  domain->mode - 1;
 -      pte   = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
 -
 -      while (level > end_lvl) {
 -              if (!IOMMU_PTE_PRESENT(*pte)) {
 -                      page = (u64 *)get_zeroed_page(gfp);
 -                      if (!page)
 -                              return NULL;
 -                      *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
 -              }
 -
 -              level -= 1;
 -
 -              pte = IOMMU_PTE_PAGE(*pte);
 -
 -              if (pte_page && level == end_lvl)
 -                      *pte_page = pte;
 -
 -              pte = &pte[PM_LEVEL_INDEX(level, address)];
 -      }
 -
 -      return pte;
 -}
 -
 -/*
   * This function fetches the PTE for a given address in the aperture
   */
  static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
   * This is the generic map function. It maps one 4kb page at paddr to
   * the given address in the DMA address space for the domain.
   */
 -static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
 -                                   struct dma_ops_domain *dom,
 +static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
                                     unsigned long address,
                                     phys_addr_t paddr,
                                     int direction)
  
        pte  = dma_ops_get_pte(dom, address);
        if (!pte)
 -              return bad_dma_address;
 +              return DMA_ERROR_CODE;
  
        __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
  
  /*
   * The generic unmapping function for on page in the DMA address space.
   */
 -static void dma_ops_domain_unmap(struct amd_iommu *iommu,
 -                               struct dma_ops_domain *dom,
 +static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
                                 unsigned long address)
  {
        struct aperture_range *aperture;
   * Must be called with the domain lock held.
   */
  static dma_addr_t __map_single(struct device *dev,
 -                             struct amd_iommu *iommu,
                               struct dma_ops_domain *dma_dom,
                               phys_addr_t paddr,
                               size_t size,
  retry:
        address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
                                          dma_mask);
 -      if (unlikely(address == bad_dma_address)) {
 +      if (unlikely(address == DMA_ERROR_CODE)) {
                /*
                 * setting next_address here will let the address
                 * allocator only scan the new allocated range in the
                 */
                dma_dom->next_address = dma_dom->aperture_size;
  
 -              if (alloc_new_range(iommu, dma_dom, false, GFP_ATOMIC))
 +              if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
                        goto out;
  
                /*
-                * aperture was sucessfully enlarged by 128 MB, try
+                * aperture was successfully enlarged by 128 MB, try
                 * allocation again
                 */
                goto retry;
  
        start = address;
        for (i = 0; i < pages; ++i) {
 -              ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
 -              if (ret == bad_dma_address)
 +              ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
 +              if (ret == DMA_ERROR_CODE)
                        goto out_unmap;
  
                paddr += PAGE_SIZE;
        ADD_STATS_COUNTER(alloced_io_mem, size);
  
        if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
 -              iommu_flush_tlb(iommu, dma_dom->domain.id);
 +              iommu_flush_tlb(&dma_dom->domain);
                dma_dom->need_flush = false;
 -      } else if (unlikely(iommu_has_npcache(iommu)))
 -              iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
 +      } else if (unlikely(amd_iommu_np_cache))
 +              iommu_flush_pages(&dma_dom->domain, address, size);
  
  out:
        return address;
@@@ -1815,19 -1669,20 +1815,19 @@@ out_unmap
  
        for (--i; i >= 0; --i) {
                start -= PAGE_SIZE;
 -              dma_ops_domain_unmap(iommu, dma_dom, start);
 +              dma_ops_domain_unmap(dma_dom, start);
        }
  
        dma_ops_free_addresses(dma_dom, address, pages);
  
 -      return bad_dma_address;
 +      return DMA_ERROR_CODE;
  }
  
  /*
   * Does the reverse of the __map_single function. Must be called with
   * the domain lock held too
   */
 -static void __unmap_single(struct amd_iommu *iommu,
 -                         struct dma_ops_domain *dma_dom,
 +static void __unmap_single(struct dma_ops_domain *dma_dom,
                           dma_addr_t dma_addr,
                           size_t size,
                           int dir)
        dma_addr_t i, start;
        unsigned int pages;
  
 -      if ((dma_addr == bad_dma_address) ||
 +      if ((dma_addr == DMA_ERROR_CODE) ||
            (dma_addr + size > dma_dom->aperture_size))
                return;
  
        start = dma_addr;
  
        for (i = 0; i < pages; ++i) {
 -              dma_ops_domain_unmap(iommu, dma_dom, start);
 +              dma_ops_domain_unmap(dma_dom, start);
                start += PAGE_SIZE;
        }
  
        dma_ops_free_addresses(dma_dom, dma_addr, pages);
  
        if (amd_iommu_unmap_flush || dma_dom->need_flush) {
 -              iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
 +              iommu_flush_pages(&dma_dom->domain, dma_addr, size);
                dma_dom->need_flush = false;
        }
  }
@@@ -1867,29 -1722,36 +1867,29 @@@ static dma_addr_t map_page(struct devic
                           struct dma_attrs *attrs)
  {
        unsigned long flags;
 -      struct amd_iommu *iommu;
        struct protection_domain *domain;
 -      u16 devid;
        dma_addr_t addr;
        u64 dma_mask;
        phys_addr_t paddr = page_to_phys(page) + offset;
  
        INC_STATS_COUNTER(cnt_map_single);
  
 -      if (!check_device(dev))
 -              return bad_dma_address;
 -
 -      dma_mask = *dev->dma_mask;
 -
 -      get_device_resources(dev, &iommu, &domain, &devid);
 -
 -      if (iommu == NULL || domain == NULL)
 -              /* device not handled by any AMD IOMMU */
 +      domain = get_domain(dev);
 +      if (PTR_ERR(domain) == -EINVAL)
                return (dma_addr_t)paddr;
 +      else if (IS_ERR(domain))
 +              return DMA_ERROR_CODE;
  
 -      if (!dma_ops_domain(domain))
 -              return bad_dma_address;
 +      dma_mask = *dev->dma_mask;
  
        spin_lock_irqsave(&domain->lock, flags);
 -      addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
 +
 +      addr = __map_single(dev, domain->priv, paddr, size, dir, false,
                            dma_mask);
 -      if (addr == bad_dma_address)
 +      if (addr == DMA_ERROR_CODE)
                goto out;
  
 -      iommu_completion_wait(iommu);
 +      iommu_flush_complete(domain);
  
  out:
        spin_unlock_irqrestore(&domain->lock, flags);
@@@ -1904,19 -1766,25 +1904,19 @@@ static void unmap_page(struct device *d
                       enum dma_data_direction dir, struct dma_attrs *attrs)
  {
        unsigned long flags;
 -      struct amd_iommu *iommu;
        struct protection_domain *domain;
  
        INC_STATS_COUNTER(cnt_unmap_single);
  
 -      if (!check_device(dev) ||
 -          !get_device_resources(dev, &iommu, &domain, &devid))
 -              /* device not handled by any AMD IOMMU */
 -              return;
 -
 -      if (!dma_ops_domain(domain))
 +      domain = get_domain(dev);
 +      if (IS_ERR(domain))
                return;
  
        spin_lock_irqsave(&domain->lock, flags);
  
 -      __unmap_single(iommu, domain->priv, dma_addr, size, dir);
 +      __unmap_single(domain->priv, dma_addr, size, dir);
  
 -      iommu_completion_wait(iommu);
 +      iommu_flush_complete(domain);
  
        spin_unlock_irqrestore(&domain->lock, flags);
  }
@@@ -1948,7 -1816,9 +1948,7 @@@ static int map_sg(struct device *dev, s
                  struct dma_attrs *attrs)
  {
        unsigned long flags;
 -      struct amd_iommu *iommu;
        struct protection_domain *domain;
 -      u16 devid;
        int i;
        struct scatterlist *s;
        phys_addr_t paddr;
  
        INC_STATS_COUNTER(cnt_map_sg);
  
 -      if (!check_device(dev))
 +      domain = get_domain(dev);
 +      if (PTR_ERR(domain) == -EINVAL)
 +              return map_sg_no_iommu(dev, sglist, nelems, dir);
 +      else if (IS_ERR(domain))
                return 0;
  
        dma_mask = *dev->dma_mask;
  
 -      get_device_resources(dev, &iommu, &domain, &devid);
 -
 -      if (!iommu || !domain)
 -              return map_sg_no_iommu(dev, sglist, nelems, dir);
 -
 -      if (!dma_ops_domain(domain))
 -              return 0;
 -
        spin_lock_irqsave(&domain->lock, flags);
  
        for_each_sg(sglist, s, nelems, i) {
                paddr = sg_phys(s);
  
 -              s->dma_address = __map_single(dev, iommu, domain->priv,
 +              s->dma_address = __map_single(dev, domain->priv,
                                              paddr, s->length, dir, false,
                                              dma_mask);
  
                        goto unmap;
        }
  
 -      iommu_completion_wait(iommu);
 +      iommu_flush_complete(domain);
  
  out:
        spin_unlock_irqrestore(&domain->lock, flags);
  unmap:
        for_each_sg(sglist, s, mapped_elems, i) {
                if (s->dma_address)
 -                      __unmap_single(iommu, domain->priv, s->dma_address,
 +                      __unmap_single(domain->priv, s->dma_address,
                                       s->dma_length, dir);
                s->dma_address = s->dma_length = 0;
        }
@@@ -2009,25 -1884,30 +2009,25 @@@ static void unmap_sg(struct device *dev
                     struct dma_attrs *attrs)
  {
        unsigned long flags;
 -      struct amd_iommu *iommu;
        struct protection_domain *domain;
        struct scatterlist *s;
 -      u16 devid;
        int i;
  
        INC_STATS_COUNTER(cnt_unmap_sg);
  
 -      if (!check_device(dev) ||
 -          !get_device_resources(dev, &iommu, &domain, &devid))
 -              return;
 -
 -      if (!dma_ops_domain(domain))
 +      domain = get_domain(dev);
 +      if (IS_ERR(domain))
                return;
  
        spin_lock_irqsave(&domain->lock, flags);
  
        for_each_sg(sglist, s, nelems, i) {
 -              __unmap_single(iommu, domain->priv, s->dma_address,
 +              __unmap_single(domain->priv, s->dma_address,
                               s->dma_length, dir);
                s->dma_address = s->dma_length = 0;
        }
  
 -      iommu_completion_wait(iommu);
 +      iommu_flush_complete(domain);
  
        spin_unlock_irqrestore(&domain->lock, flags);
  }
@@@ -2040,44 -1920,49 +2040,44 @@@ static void *alloc_coherent(struct devi
  {
        unsigned long flags;
        void *virt_addr;
 -      struct amd_iommu *iommu;
        struct protection_domain *domain;
 -      u16 devid;
        phys_addr_t paddr;
        u64 dma_mask = dev->coherent_dma_mask;
  
        INC_STATS_COUNTER(cnt_alloc_coherent);
  
 -      if (!check_device(dev))
 +      domain = get_domain(dev);
 +      if (PTR_ERR(domain) == -EINVAL) {
 +              virt_addr = (void *)__get_free_pages(flag, get_order(size));
 +              *dma_addr = __pa(virt_addr);
 +              return virt_addr;
 +      } else if (IS_ERR(domain))
                return NULL;
  
 -      if (!get_device_resources(dev, &iommu, &domain, &devid))
 -              flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
 +      dma_mask  = dev->coherent_dma_mask;
 +      flag     &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
 +      flag     |= __GFP_ZERO;
  
 -      flag |= __GFP_ZERO;
        virt_addr = (void *)__get_free_pages(flag, get_order(size));
        if (!virt_addr)
                return NULL;
  
        paddr = virt_to_phys(virt_addr);
  
 -      if (!iommu || !domain) {
 -              *dma_addr = (dma_addr_t)paddr;
 -              return virt_addr;
 -      }
 -
 -      if (!dma_ops_domain(domain))
 -              goto out_free;
 -
        if (!dma_mask)
                dma_mask = *dev->dma_mask;
  
        spin_lock_irqsave(&domain->lock, flags);
  
 -      *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
 +      *dma_addr = __map_single(dev, domain->priv, paddr,
                                 size, DMA_BIDIRECTIONAL, true, dma_mask);
  
 -      if (*dma_addr == bad_dma_address) {
 +      if (*dma_addr == DMA_ERROR_CODE) {
                spin_unlock_irqrestore(&domain->lock, flags);
                goto out_free;
        }
  
 -      iommu_completion_wait(iommu);
 +      iommu_flush_complete(domain);
  
        spin_unlock_irqrestore(&domain->lock, flags);
  
@@@ -2097,19 -1982,28 +2097,19 @@@ static void free_coherent(struct devic
                          void *virt_addr, dma_addr_t dma_addr)
  {
        unsigned long flags;
 -      struct amd_iommu *iommu;
        struct protection_domain *domain;
  
        INC_STATS_COUNTER(cnt_free_coherent);
  
 -      if (!check_device(dev))
 -              return;
 -
 -      get_device_resources(dev, &iommu, &domain, &devid);
 -
 -      if (!iommu || !domain)
 -              goto free_mem;
 -
 -      if (!dma_ops_domain(domain))
 +      domain = get_domain(dev);
 +      if (IS_ERR(domain))
                goto free_mem;
  
        spin_lock_irqsave(&domain->lock, flags);
  
 -      __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
 +      __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
  
 -      iommu_completion_wait(iommu);
 +      iommu_flush_complete(domain);
  
        spin_unlock_irqrestore(&domain->lock, flags);
  
@@@ -2123,7 -2017,22 +2123,7 @@@ free_mem
   */
  static int amd_iommu_dma_supported(struct device *dev, u64 mask)
  {
 -      u16 bdf;
 -      struct pci_dev *pcidev;
 -
 -      /* No device or no PCI device */
 -      if (!dev || dev->bus != &pci_bus_type)
 -              return 0;
 -
 -      pcidev = to_pci_dev(dev);
 -
 -      bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
 -
 -      /* Out of our scope? */
 -      if (bdf > amd_iommu_last_bdf)
 -              return 0;
 -
 -      return 1;
 +      return check_device(dev);
  }
  
  /*
@@@ -2137,30 -2046,25 +2137,30 @@@ static void prealloc_protection_domains
  {
        struct pci_dev *dev = NULL;
        struct dma_ops_domain *dma_dom;
        u16 devid;
  
        while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
 -              devid = calc_devid(dev->bus->number, dev->devfn);
 -              if (devid > amd_iommu_last_bdf)
 -                      continue;
 -              devid = amd_iommu_alias_table[devid];
 -              if (domain_for_device(devid))
 +
 +              /* Do we handle this device? */
 +              if (!check_device(&dev->dev))
                        continue;
 -              iommu = amd_iommu_rlookup_table[devid];
 -              if (!iommu)
 +
 +              iommu_init_device(&dev->dev);
 +
 +              /* Is there already any domain for it? */
 +              if (domain_for_device(&dev->dev))
                        continue;
 -              dma_dom = dma_ops_domain_alloc(iommu);
 +
 +              devid = get_device_id(&dev->dev);
 +
 +              dma_dom = dma_ops_domain_alloc();
                if (!dma_dom)
                        continue;
                init_unity_mappings_for_device(dma_dom, devid);
                dma_dom->target_dev = devid;
  
 +              attach_device(&dev->dev, &dma_dom->domain);
 +
                list_add_tail(&dma_dom->list, &iommu_pd_list);
        }
  }
@@@ -2189,7 -2093,7 +2189,7 @@@ int __init amd_iommu_init_dma_ops(void
         * protection domain will be assigned to the default one.
         */
        for_each_iommu(iommu) {
 -              iommu->default_dom = dma_ops_domain_alloc(iommu);
 +              iommu->default_dom = dma_ops_domain_alloc();
                if (iommu->default_dom == NULL)
                        return -ENOMEM;
                iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
        }
  
        /*
 -       * If device isolation is enabled, pre-allocate the protection
 -       * domains for each device.
 +       * Pre-allocate the protection domains for each device.
         */
 -      if (amd_iommu_isolate)
 -              prealloc_protection_domains();
 +      prealloc_protection_domains();
  
        iommu_detected = 1;
 -      force_iommu = 1;
 -      bad_dma_address = 0;
 +      swiotlb = 0;
  #ifdef CONFIG_GART_IOMMU
        gart_iommu_aperture_disabled = 1;
        gart_iommu_aperture = 0;
@@@ -2243,17 -2150,14 +2243,17 @@@ free_domains
  
  static void cleanup_domain(struct protection_domain *domain)
  {
 +      struct iommu_dev_data *dev_data, *next;
        unsigned long flags;
  
        write_lock_irqsave(&amd_iommu_devtable_lock, flags);
  
 -      for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
 -              if (amd_iommu_pd_table[devid] == domain)
 -                      __detach_device(domain, devid);
 +      list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
 +              struct device *dev = dev_data->dev;
 +
 +              do_detach(dev);
 +              atomic_set(&dev_data->bind, 0);
 +      }
  
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
  }
@@@ -2263,8 -2167,6 +2263,8 @@@ static void protection_domain_free(stru
        if (!domain)
                return;
  
 +      del_domain_from_list(domain);
 +
        if (domain->id)
                domain_id_free(domain->id);
  
@@@ -2283,9 -2185,6 +2283,9 @@@ static struct protection_domain *protec
        domain->id = domain_id_alloc();
        if (!domain->id)
                goto out_err;
 +      INIT_LIST_HEAD(&domain->dev_list);
 +
 +      add_domain_to_list(domain);
  
        return domain;
  
@@@ -2342,23 -2241,26 +2342,23 @@@ static void amd_iommu_domain_destroy(st
  static void amd_iommu_detach_device(struct iommu_domain *dom,
                                    struct device *dev)
  {
 -      struct protection_domain *domain = dom->priv;
 +      struct iommu_dev_data *dev_data = dev->archdata.iommu;
        struct amd_iommu *iommu;
 -      struct pci_dev *pdev;
        u16 devid;
  
 -      if (dev->bus != &pci_bus_type)
 +      if (!check_device(dev))
                return;
  
 -      pdev = to_pci_dev(dev);
 -
 -      devid = calc_devid(pdev->bus->number, pdev->devfn);
 +      devid = get_device_id(dev);
  
 -      if (devid > 0)
 -              detach_device(domain, devid);
 +      if (dev_data->domain != NULL)
 +              detach_device(dev);
  
        iommu = amd_iommu_rlookup_table[devid];
        if (!iommu)
                return;
  
 -      iommu_queue_inv_dev_entry(iommu, devid);
 +      iommu_flush_device(dev);
        iommu_completion_wait(iommu);
  }
  
@@@ -2366,30 -2268,35 +2366,30 @@@ static int amd_iommu_attach_device(stru
                                   struct device *dev)
  {
        struct protection_domain *domain = dom->priv;
 -      struct protection_domain *old_domain;
 +      struct iommu_dev_data *dev_data;
        struct amd_iommu *iommu;
 -      struct pci_dev *pdev;
 +      int ret;
        u16 devid;
  
 -      if (dev->bus != &pci_bus_type)
 +      if (!check_device(dev))
                return -EINVAL;
  
 -      pdev = to_pci_dev(dev);
 -
 -      devid = calc_devid(pdev->bus->number, pdev->devfn);
 +      dev_data = dev->archdata.iommu;
  
 -      if (devid >= amd_iommu_last_bdf ||
 -                      devid != amd_iommu_alias_table[devid])
 -              return -EINVAL;
 +      devid = get_device_id(dev);
  
        iommu = amd_iommu_rlookup_table[devid];
        if (!iommu)
                return -EINVAL;
  
 -      old_domain = domain_for_device(devid);
 -      if (old_domain)
 -              detach_device(old_domain, devid);
 +      if (dev_data->domain)
 +              detach_device(dev);
  
 -      attach_device(iommu, domain, devid);
 +      ret = attach_device(dev, domain);
  
        iommu_completion_wait(iommu);
  
 -      return 0;
 +      return ret;
  }
  
  static int amd_iommu_map_range(struct iommu_domain *dom,
@@@ -2435,7 -2342,7 +2435,7 @@@ static void amd_iommu_unmap_range(struc
                iova  += PAGE_SIZE;
        }
  
 -      iommu_flush_domain(domain->id);
 +      iommu_flush_tlb_pde(domain);
  }
  
  static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@@ -2486,11 -2393,10 +2486,11 @@@ static struct iommu_ops amd_iommu_ops 
  
  int __init amd_iommu_init_passthrough(void)
  {
 +      struct amd_iommu *iommu;
        struct pci_dev *dev = NULL;
 -      u16 devid, devid2;
 +      u16 devid;
  
-       /* allocate passthroug domain */
+       /* allocate passthrough domain */
        pt_domain = protection_domain_alloc();
        if (!pt_domain)
                return -ENOMEM;
        pt_domain->mode |= PAGE_MODE_NONE;
  
        while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
 -              struct amd_iommu *iommu;
  
 -              devid = calc_devid(dev->bus->number, dev->devfn);
 -              if (devid > amd_iommu_last_bdf)
 +              if (!check_device(&dev->dev))
                        continue;
  
 -              devid2 = amd_iommu_alias_table[devid];
 +              devid = get_device_id(&dev->dev);
  
 -              iommu = amd_iommu_rlookup_table[devid2];
 +              iommu = amd_iommu_rlookup_table[devid];
                if (!iommu)
                        continue;
  
 -              __attach_device(iommu, pt_domain, devid);
 -              __attach_device(iommu, pt_domain, devid2);
 +              attach_device(&dev->dev, pt_domain);
        }
  
        pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
@@@ -77,18 -77,6 +77,18 @@@ struct cpu_hw_events 
        struct debug_store      *ds;
  };
  
 +struct event_constraint {
 +      unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 +      int             code;
 +};
 +
 +#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) }
 +#define EVENT_CONSTRAINT_END  { .code = 0, .idxmsk[0] = 0 }
 +
 +#define for_each_event_constraint(e, c) \
 +      for ((e) = (c); (e)->idxmsk[0]; (e)++)
 +
 +
  /*
   * struct x86_pmu - generic x86 pmu
   */
@@@ -114,8 -102,6 +114,8 @@@ struct x86_pmu 
        u64             intel_ctrl;
        void            (*enable_bts)(u64 config);
        void            (*disable_bts)(void);
 +      int             (*get_event_idx)(struct cpu_hw_events *cpuc,
 +                                       struct hw_perf_event *hwc);
  };
  
  static struct x86_pmu x86_pmu __read_mostly;
@@@ -124,8 -110,6 +124,8 @@@ static DEFINE_PER_CPU(struct cpu_hw_eve
        .enabled = 1,
  };
  
 +static const struct event_constraint *event_constraints;
 +
  /*
   * Not sure about some of these
   */
@@@ -171,16 -155,6 +171,16 @@@ static u64 p6_pmu_raw_event(u64 hw_even
        return hw_event & P6_EVNTSEL_MASK;
  }
  
 +static const struct event_constraint intel_p6_event_constraints[] =
 +{
 +      EVENT_CONSTRAINT(0xc1, 0x1),    /* FLOPS */
 +      EVENT_CONSTRAINT(0x10, 0x1),    /* FP_COMP_OPS_EXE */
 +      EVENT_CONSTRAINT(0x11, 0x1),    /* FP_ASSIST */
 +      EVENT_CONSTRAINT(0x12, 0x2),    /* MUL */
 +      EVENT_CONSTRAINT(0x13, 0x2),    /* DIV */
 +      EVENT_CONSTRAINT(0x14, 0x1),    /* CYCLES_DIV_BUSY */
 +      EVENT_CONSTRAINT_END
 +};
  
  /*
   * Intel PerfMon v3. Used on Core2 and later.
@@@ -196,35 -170,6 +196,35 @@@ static const u64 intel_perfmon_event_ma
    [PERF_COUNT_HW_BUS_CYCLES]          = 0x013c,
  };
  
 +static const struct event_constraint intel_core_event_constraints[] =
 +{
 +      EVENT_CONSTRAINT(0x10, 0x1),    /* FP_COMP_OPS_EXE */
 +      EVENT_CONSTRAINT(0x11, 0x2),    /* FP_ASSIST */
 +      EVENT_CONSTRAINT(0x12, 0x2),    /* MUL */
 +      EVENT_CONSTRAINT(0x13, 0x2),    /* DIV */
 +      EVENT_CONSTRAINT(0x14, 0x1),    /* CYCLES_DIV_BUSY */
 +      EVENT_CONSTRAINT(0x18, 0x1),    /* IDLE_DURING_DIV */
 +      EVENT_CONSTRAINT(0x19, 0x2),    /* DELAYED_BYPASS */
 +      EVENT_CONSTRAINT(0xa1, 0x1),    /* RS_UOPS_DISPATCH_CYCLES */
 +      EVENT_CONSTRAINT(0xcb, 0x1),    /* MEM_LOAD_RETIRED */
 +      EVENT_CONSTRAINT_END
 +};
 +
 +static const struct event_constraint intel_nehalem_event_constraints[] =
 +{
 +      EVENT_CONSTRAINT(0x40, 0x3),    /* L1D_CACHE_LD */
 +      EVENT_CONSTRAINT(0x41, 0x3),    /* L1D_CACHE_ST */
 +      EVENT_CONSTRAINT(0x42, 0x3),    /* L1D_CACHE_LOCK */
 +      EVENT_CONSTRAINT(0x43, 0x3),    /* L1D_ALL_REF */
 +      EVENT_CONSTRAINT(0x4e, 0x3),    /* L1D_PREFETCH */
 +      EVENT_CONSTRAINT(0x4c, 0x3),    /* LOAD_HIT_PRE */
 +      EVENT_CONSTRAINT(0x51, 0x3),    /* L1D */
 +      EVENT_CONSTRAINT(0x52, 0x3),    /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
 +      EVENT_CONSTRAINT(0x53, 0x3),    /* L1D_CACHE_LOCK_FB_HIT */
 +      EVENT_CONSTRAINT(0xc5, 0x3),    /* CACHE_LOCK_CYCLES */
 +      EVENT_CONSTRAINT_END
 +};
 +
  static u64 intel_pmu_event_map(int hw_event)
  {
        return intel_perfmon_event_map[hw_event];
@@@ -245,7 -190,7 +245,7 @@@ static u64 __read_mostly hw_cache_event
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
  
 -static const u64 nehalem_hw_cache_event_ids
 +static __initconst u64 nehalem_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
   },
  };
  
 -static const u64 core2_hw_cache_event_ids
 +static __initconst u64 core2_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
   },
  };
  
 -static const u64 atom_hw_cache_event_ids
 +static __initconst u64 atom_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@@ -524,7 -469,7 +524,7 @@@ static u64 intel_pmu_raw_event(u64 hw_e
  #define CORE_EVNTSEL_UNIT_MASK                0x0000FF00ULL
  #define CORE_EVNTSEL_EDGE_MASK                0x00040000ULL
  #define CORE_EVNTSEL_INV_MASK         0x00800000ULL
 -#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
 +#define CORE_EVNTSEL_REG_MASK         0xFF000000ULL
  
  #define CORE_EVNTSEL_MASK             \
        (CORE_EVNTSEL_EVENT_MASK |      \
        return hw_event & CORE_EVNTSEL_MASK;
  }
  
 -static const u64 amd_hw_cache_event_ids
 +static __initconst u64 amd_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@@ -987,8 -932,6 +987,8 @@@ static int __hw_perf_event_init(struct 
         */
        hwc->config = ARCH_PERFMON_EVENTSEL_INT;
  
 +      hwc->idx = -1;
 +
        /*
         * Count user and OS events unless requested not to.
         */
@@@ -1286,7 -1229,7 +1286,7 @@@ x86_perf_event_set_period(struct perf_e
                return 0;
  
        /*
-        * If we are way outside a reasoable range then just skip forward:
+        * If we are way outside a reasonable range then just skip forward:
         */
        if (unlikely(left <= -period)) {
                left = period;
@@@ -1391,7 -1334,8 +1391,7 @@@ static void amd_pmu_enable_event(struc
                x86_pmu_enable_event(hwc, idx);
  }
  
 -static int
 -fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
 +static int fixed_mode_idx(struct hw_perf_event *hwc)
  {
        unsigned int hw_event;
  
        if (!x86_pmu.num_events_fixed)
                return -1;
  
 +      /*
 +       * fixed counters do not take all possible filters
 +       */
 +      if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK)
 +              return -1;
 +
        if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
                return X86_PMC_IDX_FIXED_INSTRUCTIONS;
        if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
  }
  
  /*
 - * Find a PMC slot for the freshly enabled / scheduled in event:
 + * generic counter allocator: get next free counter
   */
 -static int x86_pmu_enable(struct perf_event *event)
 +static int
 +gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
 +{
 +      int idx;
 +
 +      idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
 +      return idx == x86_pmu.num_events ? -1 : idx;
 +}
 +
 +/*
 + * intel-specific counter allocator: check event constraints
 + */
 +static int
 +intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
 +{
 +      const struct event_constraint *event_constraint;
 +      int i, code;
 +
 +      if (!event_constraints)
 +              goto skip;
 +
 +      code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
 +
 +      for_each_event_constraint(event_constraint, event_constraints) {
 +              if (code == event_constraint->code) {
 +                      for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
 +                              if (!test_and_set_bit(i, cpuc->used_mask))
 +                                      return i;
 +                      }
 +                      return -1;
 +              }
 +      }
 +skip:
 +      return gen_get_event_idx(cpuc, hwc);
 +}
 +
 +static int
 +x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
  {
 -      struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 -      struct hw_perf_event *hwc = &event->hw;
        int idx;
  
 -      idx = fixed_mode_idx(event, hwc);
 +      idx = fixed_mode_idx(hwc);
        if (idx == X86_PMC_IDX_FIXED_BTS) {
                /* BTS is already occupied. */
                if (test_and_set_bit(idx, cpuc->used_mask))
                        return -EAGAIN;
  
                hwc->config_base        = 0;
 -              hwc->event_base = 0;
 +              hwc->event_base         = 0;
                hwc->idx                = idx;
        } else if (idx >= 0) {
                /*
        } else {
                idx = hwc->idx;
                /* Try to get the previous generic event again */
 -              if (test_and_set_bit(idx, cpuc->used_mask)) {
 +              if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
  try_generic:
 -                      idx = find_first_zero_bit(cpuc->used_mask,
 -                                                x86_pmu.num_events);
 -                      if (idx == x86_pmu.num_events)
 +                      idx = x86_pmu.get_event_idx(cpuc, hwc);
 +                      if (idx == -1)
                                return -EAGAIN;
  
                        set_bit(idx, cpuc->used_mask);
                        hwc->idx = idx;
                }
 -              hwc->config_base  = x86_pmu.eventsel;
 -              hwc->event_base = x86_pmu.perfctr;
 +              hwc->config_base = x86_pmu.eventsel;
 +              hwc->event_base  = x86_pmu.perfctr;
        }
  
 +      return idx;
 +}
 +
 +/*
 + * Find a PMC slot for the freshly enabled / scheduled in event:
 + */
 +static int x86_pmu_enable(struct perf_event *event)
 +{
 +      struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 +      struct hw_perf_event *hwc = &event->hw;
 +      int idx;
 +
 +      idx = x86_schedule_event(cpuc, hwc);
 +      if (idx < 0)
 +              return idx;
 +
        perf_events_lapic_init();
  
        x86_pmu.disable(hwc, idx);
@@@ -1964,7 -1852,7 +1964,7 @@@ static __read_mostly struct notifier_bl
        .priority               = 1
  };
  
 -static struct x86_pmu p6_pmu = {
 +static __initconst struct x86_pmu p6_pmu = {
        .name                   = "p6",
        .handle_irq             = p6_pmu_handle_irq,
        .disable_all            = p6_pmu_disable_all,
         */
        .event_bits             = 32,
        .event_mask             = (1ULL << 32) - 1,
 +      .get_event_idx          = intel_get_event_idx,
  };
  
 -static struct x86_pmu intel_pmu = {
 +static __initconst struct x86_pmu intel_pmu = {
        .name                   = "Intel",
        .handle_irq             = intel_pmu_handle_irq,
        .disable_all            = intel_pmu_disable_all,
        .max_period             = (1ULL << 31) - 1,
        .enable_bts             = intel_pmu_enable_bts,
        .disable_bts            = intel_pmu_disable_bts,
 +      .get_event_idx          = intel_get_event_idx,
  };
  
 -static struct x86_pmu amd_pmu = {
 +static __initconst struct x86_pmu amd_pmu = {
        .name                   = "AMD",
        .handle_irq             = amd_pmu_handle_irq,
        .disable_all            = amd_pmu_disable_all,
        .apic                   = 1,
        /* use highest bit to detect overflow */
        .max_period             = (1ULL << 47) - 1,
 +      .get_event_idx          = gen_get_event_idx,
  };
  
 -static int p6_pmu_init(void)
 +static __init int p6_pmu_init(void)
  {
        switch (boot_cpu_data.x86_model) {
        case 1:
        case 7:
        case 8:
        case 11: /* Pentium III */
 +              event_constraints = intel_p6_event_constraints;
                break;
        case 9:
        case 13:
                /* Pentium M */
 +              event_constraints = intel_p6_event_constraints;
                break;
        default:
                pr_cont("unsupported p6 CPU model %d ",
        return 0;
  }
  
 -static int intel_pmu_init(void)
 +static __init int intel_pmu_init(void)
  {
        union cpuid10_edx edx;
        union cpuid10_eax eax;
                       sizeof(hw_cache_event_ids));
  
                pr_cont("Core2 events, ");
 +              event_constraints = intel_core_event_constraints;
                break;
        default:
        case 26:
                memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
  
 +              event_constraints = intel_nehalem_event_constraints;
                pr_cont("Nehalem/Corei7 events, ");
                break;
        case 28:
        return 0;
  }
  
 -static int amd_pmu_init(void)
 +static __init int amd_pmu_init(void)
  {
        /* Performance-monitoring supported from K7 and later: */
        if (boot_cpu_data.x86 < 6)
@@@ -2224,47 -2105,11 +2224,47 @@@ static const struct pmu pmu = 
        .unthrottle     = x86_pmu_unthrottle,
  };
  
 +static int
 +validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
 +{
 +      struct hw_perf_event fake_event = event->hw;
 +
 +      if (event->pmu && event->pmu != &pmu)
 +              return 0;
 +
 +      return x86_schedule_event(cpuc, &fake_event) >= 0;
 +}
 +
 +static int validate_group(struct perf_event *event)
 +{
 +      struct perf_event *sibling, *leader = event->group_leader;
 +      struct cpu_hw_events fake_pmu;
 +
 +      memset(&fake_pmu, 0, sizeof(fake_pmu));
 +
 +      if (!validate_event(&fake_pmu, leader))
 +              return -ENOSPC;
 +
 +      list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
 +              if (!validate_event(&fake_pmu, sibling))
 +                      return -ENOSPC;
 +      }
 +
 +      if (!validate_event(&fake_pmu, event))
 +              return -ENOSPC;
 +
 +      return 0;
 +}
 +
  const struct pmu *hw_perf_event_init(struct perf_event *event)
  {
        int err;
  
        err = __hw_perf_event_init(event);
 +      if (!err) {
 +              if (event->group_leader != event)
 +                      err = validate_group(event);
 +      }
        if (err) {
                if (event->destroy)
                        event->destroy(event);
  #include <linux/preempt.h>
  #include <linux/module.h>
  #include <linux/kdebug.h>
 +#include <linux/kallsyms.h>
  
  #include <asm/cacheflush.h>
  #include <asm/desc.h>
  #include <asm/pgtable.h>
  #include <asm/uaccess.h>
  #include <asm/alternative.h>
 +#include <asm/insn.h>
 +#include <asm/debugreg.h>
  
  void jprobe_return_end(void);
  
  DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  
 -#ifdef CONFIG_X86_64
 -#define stack_addr(regs) ((unsigned long *)regs->sp)
 -#else
 -/*
 - * "&regs->sp" looks wrong, but it's correct for x86_32.  x86_32 CPUs
 - * don't save the ss and esp registers if the CPU is already in kernel
 - * mode when it traps.  So for kprobes, regs->sp and regs->ss are not
 - * the [nonexistent] saved stack pointer and ss register, but rather
 - * the top 8 bytes of the pre-int3 stack.  So &regs->sp happens to
 - * point to the top of the pre-int3 stack.
 - */
 -#define stack_addr(regs) ((unsigned long *)&regs->sp)
 -#endif
 +#define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
  
  #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
        (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) |   \
@@@ -97,6 -106,50 +97,6 @@@ static const u32 twobyte_is_boostable[2
        /*      -----------------------------------------------         */
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
  };
 -static const u32 onebyte_has_modrm[256 / 32] = {
 -      /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
 -      /*      -----------------------------------------------         */
 -      W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 00 */
 -      W(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 10 */
 -      W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 20 */
 -      W(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 30 */
 -      W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
 -      W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
 -      W(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) | /* 60 */
 -      W(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 70 */
 -      W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
 -      W(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 90 */
 -      W(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* a0 */
 -      W(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* b0 */
 -      W(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* c0 */
 -      W(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
 -      W(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* e0 */
 -      W(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1)   /* f0 */
 -      /*      -----------------------------------------------         */
 -      /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
 -};
 -static const u32 twobyte_has_modrm[256 / 32] = {
 -      /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
 -      /*      -----------------------------------------------         */
 -      W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) | /* 0f */
 -      W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) , /* 1f */
 -      W(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 2f */
 -      W(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 3f */
 -      W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 4f */
 -      W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 5f */
 -      W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 6f */
 -      W(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) , /* 7f */
 -      W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 8f */
 -      W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 9f */
 -      W(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) | /* af */
 -      W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* bf */
 -      W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* cf */
 -      W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* df */
 -      W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* ef */
 -      W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)   /* ff */
 -      /*      -----------------------------------------------         */
 -      /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
 -};
  #undef W
  
  struct kretprobe_blackpoint kretprobe_blacklist[] = {
@@@ -191,75 -244,6 +191,75 @@@ retry
        }
  }
  
 +/* Recover the probed instruction at addr for further analysis. */
 +static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
 +{
 +      struct kprobe *kp;
 +      kp = get_kprobe((void *)addr);
 +      if (!kp)
 +              return -EINVAL;
 +
 +      /*
 +       *  Basically, kp->ainsn.insn has an original instruction.
 +       *  However, RIP-relative instruction can not do single-stepping
 +       *  at different place, fix_riprel() tweaks the displacement of
 +       *  that instruction. In that case, we can't recover the instruction
 +       *  from the kp->ainsn.insn.
 +       *
 +       *  On the other hand, kp->opcode has a copy of the first byte of
 +       *  the probed instruction, which is overwritten by int3. And
 +       *  the instruction at kp->addr is not modified by kprobes except
 +       *  for the first byte, we can recover the original instruction
 +       *  from it and kp->opcode.
 +       */
 +      memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
 +      buf[0] = kp->opcode;
 +      return 0;
 +}
 +
 +/* Dummy buffers for kallsyms_lookup */
 +static char __dummy_buf[KSYM_NAME_LEN];
 +
 +/* Check if paddr is at an instruction boundary */
 +static int __kprobes can_probe(unsigned long paddr)
 +{
 +      int ret;
 +      unsigned long addr, offset = 0;
 +      struct insn insn;
 +      kprobe_opcode_t buf[MAX_INSN_SIZE];
 +
 +      if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf))
 +              return 0;
 +
 +      /* Decode instructions */
 +      addr = paddr - offset;
 +      while (addr < paddr) {
 +              kernel_insn_init(&insn, (void *)addr);
 +              insn_get_opcode(&insn);
 +
 +              /*
 +               * Check if the instruction has been modified by another
 +               * kprobe, in which case we replace the breakpoint by the
 +               * original instruction in our buffer.
 +               */
 +              if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
 +                      ret = recover_probed_instruction(buf, addr);
 +                      if (ret)
 +                              /*
 +                               * Another debugging subsystem might insert
 +                               * this breakpoint. In that case, we can't
 +                               * recover it.
 +                               */
 +                              return 0;
 +                      kernel_insn_init(&insn, buf);
 +              }
 +              insn_get_length(&insn);
 +              addr += insn.length;
 +      }
 +
 +      return (addr == paddr);
 +}
 +
  /*
   * Returns non-zero if opcode modifies the interrupt flag.
   */
@@@ -293,30 -277,68 +293,30 @@@ static int __kprobes is_IF_modifier(kpr
  static void __kprobes fix_riprel(struct kprobe *p)
  {
  #ifdef CONFIG_X86_64
 -      u8 *insn = p->ainsn.insn;
 -      s64 disp;
 -      int need_modrm;
 -
 -      /* Skip legacy instruction prefixes.  */
 -      while (1) {
 -              switch (*insn) {
 -              case 0x66:
 -              case 0x67:
 -              case 0x2e:
 -              case 0x3e:
 -              case 0x26:
 -              case 0x64:
 -              case 0x65:
 -              case 0x36:
 -              case 0xf0:
 -              case 0xf3:
 -              case 0xf2:
 -                      ++insn;
 -                      continue;
 -              }
 -              break;
 -      }
 +      struct insn insn;
 +      kernel_insn_init(&insn, p->ainsn.insn);
  
 -      /* Skip REX instruction prefix.  */
 -      if (is_REX_prefix(insn))
 -              ++insn;
 -
 -      if (*insn == 0x0f) {
 -              /* Two-byte opcode.  */
 -              ++insn;
 -              need_modrm = test_bit(*insn,
 -                                    (unsigned long *)twobyte_has_modrm);
 -      } else
 -              /* One-byte opcode.  */
 -              need_modrm = test_bit(*insn,
 -                                    (unsigned long *)onebyte_has_modrm);
 -
 -      if (need_modrm) {
 -              u8 modrm = *++insn;
 -              if ((modrm & 0xc7) == 0x05) {
 -                      /* %rip+disp32 addressing mode */
 -                      /* Displacement follows ModRM byte.  */
 -                      ++insn;
 -                      /*
 -                       * The copied instruction uses the %rip-relative
 -                       * addressing mode.  Adjust the displacement for the
 -                       * difference between the original location of this
 -                       * instruction and the location of the copy that will
 -                       * actually be run.  The tricky bit here is making sure
 -                       * that the sign extension happens correctly in this
 -                       * calculation, since we need a signed 32-bit result to
 -                       * be sign-extended to 64 bits when it's added to the
 -                       * %rip value and yield the same 64-bit result that the
 -                       * sign-extension of the original signed 32-bit
 -                       * displacement would have given.
 -                       */
 -                      disp = (u8 *) p->addr + *((s32 *) insn) -
 -                             (u8 *) p->ainsn.insn;
 -                      BUG_ON((s64) (s32) disp != disp); /* Sanity check.  */
 -                      *(s32 *)insn = (s32) disp;
 -              }
 +      if (insn_rip_relative(&insn)) {
 +              s64 newdisp;
 +              u8 *disp;
 +              insn_get_displacement(&insn);
 +              /*
 +               * The copied instruction uses the %rip-relative addressing
 +               * mode.  Adjust the displacement for the difference between
 +               * the original location of this instruction and the location
 +               * of the copy that will actually be run.  The tricky bit here
 +               * is making sure that the sign extension happens correctly in
 +               * this calculation, since we need a signed 32-bit result to
 +               * be sign-extended to 64 bits when it's added to the %rip
 +               * value and yield the same 64-bit result that the sign-
 +               * extension of the original signed 32-bit displacement would
 +               * have given.
 +               */
 +              newdisp = (u8 *) p->addr + (s64) insn.displacement.value -
 +                        (u8 *) p->ainsn.insn;
 +              BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check.  */
 +              disp = (u8 *) p->ainsn.insn + insn_offset_displacement(&insn);
 +              *(s32 *) disp = (s32) newdisp;
        }
  #endif
  }
@@@ -337,8 -359,6 +337,8 @@@ static void __kprobes arch_copy_kprobe(
  
  int __kprobes arch_prepare_kprobe(struct kprobe *p)
  {
 +      if (!can_probe((unsigned long)p->addr))
 +              return -EILSEQ;
        /* insn: must be on special executable page on x86. */
        p->ainsn.insn = get_insn_slot();
        if (!p->ainsn.insn)
@@@ -452,6 -472,17 +452,6 @@@ static int __kprobes reenter_kprobe(str
  {
        switch (kcb->kprobe_status) {
        case KPROBE_HIT_SSDONE:
 -#ifdef CONFIG_X86_64
 -              /* TODO: Provide re-entrancy from post_kprobes_handler() and
 -               * avoid exception stack corruption while single-stepping on
 -               * the instruction of the new probe.
 -               */
 -              arch_disarm_kprobe(p);
 -              regs->ip = (unsigned long)p->addr;
 -              reset_current_kprobe();
 -              preempt_enable_no_resched();
 -              break;
 -#endif
        case KPROBE_HIT_ACTIVE:
                save_previous_kprobe(kcb);
                set_current_kprobe(p, regs, kcb);
                kcb->kprobe_status = KPROBE_REENTER;
                break;
        case KPROBE_HIT_SS:
 -              if (p == kprobe_running()) {
 -                      regs->flags &= ~X86_EFLAGS_TF;
 -                      regs->flags |= kcb->kprobe_saved_flags;
 -                      return 0;
 -              } else {
 -                      /* A probe has been hit in the codepath leading up
 -                       * to, or just after, single-stepping of a probed
 -                       * instruction. This entire codepath should strictly
 -                       * reside in .kprobes.text section. Raise a warning
 -                       * to highlight this peculiar case.
 -                       */
 -              }
 +              /* A probe has been hit in the codepath leading up to, or just
 +               * after, single-stepping of a probed instruction. This entire
 +               * codepath should strictly reside in .kprobes.text section.
 +               * Raise a BUG or we'll continue in an endless reentering loop
 +               * and eventually a stack overflow.
 +               */
 +              printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
 +                     p->addr);
 +              dump_kprobe(p);
 +              BUG();
        default:
                /* impossible cases */
                WARN_ON(1);
  
  /*
   * Interrupts are disabled on entry as trap3 is an interrupt gate and they
-  * remain disabled thorough out this function.
+  * remain disabled throughout this function.
   */
  static int __kprobes kprobe_handler(struct pt_regs *regs)
  {
@@@ -818,7 -851,7 +818,7 @@@ no_change
  
  /*
   * Interrupts are disabled on entry as trap1 is an interrupt gate and they
-  * remain disabled thoroughout this function.
+  * remain disabled throughout this function.
   */
  static int __kprobes post_kprobe_handler(struct pt_regs *regs)
  {
@@@ -934,14 -967,8 +934,14 @@@ int __kprobes kprobe_exceptions_notify(
                        ret = NOTIFY_STOP;
                break;
        case DIE_DEBUG:
 -              if (post_kprobe_handler(args->regs))
 +              if (post_kprobe_handler(args->regs)) {
 +                      /*
 +                       * Reset the BS bit in dr6 (pointed by args->err) to
 +                       * denote completion of processing
 +                       */
 +                      (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
                        ret = NOTIFY_STOP;
 +              }
                break;
        case DIE_GPF:
                /*
diff --combined arch/x86/mm/kmmio.c
@@@ -203,7 -203,7 +203,7 @@@ static void disarm_kmmio_fault_page(str
   */
  /*
   * Interrupts are disabled on entry as trap3 is an interrupt gate
-  * and they remain disabled thorough out this function.
+  * and they remain disabled throughout this function.
   */
  int kmmio_handler(struct pt_regs *regs, unsigned long addr)
  {
@@@ -302,7 -302,7 +302,7 @@@ no_kmmio
  
  /*
   * Interrupts are disabled on entry as trap1 is an interrupt gate
-  * and they remain disabled thorough out this function.
+  * and they remain disabled throughout this function.
   * This must always get called as the pair to kmmio_handler().
   */
  static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
@@@ -540,14 -540,8 +540,14 @@@ kmmio_die_notifier(struct notifier_bloc
        struct die_args *arg = args;
  
        if (val == DIE_DEBUG && (arg->err & DR_STEP))
 -              if (post_kmmio_handler(arg->err, arg->regs) == 1)
 +              if (post_kmmio_handler(arg->err, arg->regs) == 1) {
 +                      /*
 +                       * Reset the BS bit in dr6 (pointed by args->err) to
 +                       * denote completion of processing
 +                       */
 +                      (*(unsigned long *)ERR_PTR(arg->err)) &= ~DR_STEP;
                        return NOTIFY_STOP;
 +              }
  
        return NOTIFY_DONE;
  }
diff --combined drivers/ata/sata_fsl.c
@@@ -43,9 -43,9 +43,9 @@@ enum 
        /*
         * SATA-FSL host controller supports a max. of (15+1) direct PRDEs, and
         * chained indirect PRDEs upto a max count of 63.
-        * We are allocating an array of 63 PRDEs contigiously, but PRDE#15 will
+        * We are allocating an array of 63 PRDEs contiguously, but PRDE#15 will
         * be setup as an indirect descriptor, pointing to it's next
-        * (contigious) PRDE. Though chained indirect PRDE arrays are
+        * (contiguous) PRDE. Though chained indirect PRDE arrays are
         * supported,it will be more efficient to use a direct PRDT and
         * a single chain/link to indirect PRDE array/PRDT.
         */
@@@ -314,7 -314,7 +314,7 @@@ static unsigned int sata_fsl_fill_sg(st
        u32 ttl_dwords = 0;
  
        /*
-        * NOTE : direct & indirect prdt's are contigiously allocated
+        * NOTE : direct & indirect prdt's are contiguously allocated
         */
        struct prde *prd = (struct prde *)&((struct command_desc *)
                                            cmd_desc)->prdt;
@@@ -707,17 -707,34 +707,17 @@@ static unsigned int sata_fsl_dev_classi
        return ata_dev_classify(&tf);
  }
  
 -static int sata_fsl_prereset(struct ata_link *link, unsigned long deadline)
 -{
 -      /* FIXME: Never skip softreset, sata_fsl_softreset() is
 -       * combination of soft and hard resets.  sata_fsl_softreset()
 -       * needs to be splitted into soft and hard resets.
 -       */
 -      return 0;
 -}
 -
 -static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
 +static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class,
                                        unsigned long deadline)
  {
        struct ata_port *ap = link->ap;
 -      struct sata_fsl_port_priv *pp = ap->private_data;
        struct sata_fsl_host_priv *host_priv = ap->host->private_data;
        void __iomem *hcr_base = host_priv->hcr_base;
 -      int pmp = sata_srst_pmp(link);
        u32 temp;
        int i = 0;
        unsigned long start_jiffies;
  
 -      DPRINTK("in xx_softreset\n");
 -
 -      if (pmp != SATA_PMP_CTRL_PORT)
 -              goto issue_srst;
 +      DPRINTK("in xx_hardreset\n");
  
  try_offline_again:
        /*
  
        if (temp & ONLINE) {
                ata_port_printk(ap, KERN_ERR,
 -                              "Softreset failed, not off-lined %d\n", i);
 +                              "Hardreset failed, not off-lined %d\n", i);
  
                /*
                 * Try to offline controller atleast twice
                        goto try_offline_again;
        }
  
 -      DPRINTK("softreset, controller off-lined\n");
 +      DPRINTK("hardreset, controller off-lined\n");
        VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
        VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
  
  
        if (!(temp & ONLINE)) {
                ata_port_printk(ap, KERN_ERR,
 -                              "Softreset failed, not on-lined\n");
 +                              "Hardreset failed, not on-lined\n");
                goto err;
        }
  
 -      DPRINTK("softreset, controller off-lined & on-lined\n");
 +      DPRINTK("hardreset, controller off-lined & on-lined\n");
        VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
        VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
  
                                "No Device OR PHYRDY change,Hstatus = 0x%x\n",
                                ioread32(hcr_base + HSTATUS));
                *class = ATA_DEV_NONE;
 -              goto out;
 +              return 0;
        }
  
        /*
        if ((temp & 0xFF) != 0x18) {
                ata_port_printk(ap, KERN_WARNING, "No Signature Update\n");
                *class = ATA_DEV_NONE;
 -              goto out;
 +              goto do_followup_srst;
        } else {
                ata_port_printk(ap, KERN_INFO,
                                "Signature Update detected @ %d msecs\n",
                                jiffies_to_msecs(jiffies - start_jiffies));
 +              *class = sata_fsl_dev_classify(ap);
 +              return 0;
 +      }
 +
 +do_followup_srst:
 +      /*
 +       * request libATA to perform follow-up softreset
 +       */
 +      return -EAGAIN;
 +
 +err:
 +      return -EIO;
 +}
 +
 +static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
 +                                      unsigned long deadline)
 +{
 +      struct ata_port *ap = link->ap;
 +      struct sata_fsl_port_priv *pp = ap->private_data;
 +      struct sata_fsl_host_priv *host_priv = ap->host->private_data;
 +      void __iomem *hcr_base = host_priv->hcr_base;
 +      int pmp = sata_srst_pmp(link);
 +      u32 temp;
 +      struct ata_taskfile tf;
 +      u8 *cfis;
 +      u32 Serror;
 +
 +      DPRINTK("in xx_softreset\n");
 +
 +      if (ata_link_offline(link)) {
 +              DPRINTK("PHY reports no device\n");
 +              *class = ATA_DEV_NONE;
 +              return 0;
        }
  
        /*
         * reached here, we can send a command to the target device
         */
  
 -issue_srst:
        DPRINTK("Sending SRST/device reset\n");
  
        ata_tf_init(link->device, &tf);
                ioread32(CA + hcr_base), ioread32(CC + hcr_base));
  
        iowrite32(0xFFFF, CC + hcr_base);
 +      if (pmp != SATA_PMP_CTRL_PORT)
 +              iowrite32(pmp, CQPMP + hcr_base);
        iowrite32(1, CQ + hcr_base);
  
        temp = ata_wait_register(CQ + hcr_base, 0x1, 0x1, 1, 5000);
                VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE));
        }
  
 -out:
        return 0;
  
  err:
@@@ -1004,6 -988,18 +1004,6 @@@ static void sata_fsl_error_intr(struct 
                ehi->err_mask |= AC_ERR_ATA_BUS;
                ehi->action |= ATA_EH_SOFTRESET;
  
 -              /*
 -               * Ignore serror in case of fatal errors as we always want
 -               * to do a soft-reset of the FSL SATA controller. Analyzing
 -               * serror may cause libata to schedule a hard-reset action,
 -               * and hard-reset currently does not do controller
 -               * offline/online, causing command timeouts and leads to an
 -               * un-recoverable state, hence make libATA ignore
 -               * autopsy in case of fatal errors.
 -               */
 -
 -              ehi->flags |= ATA_EHI_NO_AUTOPSY;
 -
                freeze = 1;
        }
  
@@@ -1271,8 -1267,8 +1271,8 @@@ static struct ata_port_operations sata_
  
        .freeze = sata_fsl_freeze,
        .thaw = sata_fsl_thaw,
 -      .prereset = sata_fsl_prereset,
        .softreset = sata_fsl_softreset,
 +      .hardreset = sata_fsl_hardreset,
        .pmp_softreset = sata_fsl_softreset,
        .error_handler = sata_fsl_error_handler,
        .post_internal_cmd = sata_fsl_post_internal_cmd,
@@@ -1141,7 -1141,7 +1141,7 @@@ typedef struct _LVDS_ENCODER_CONTROL_PA
  /* ucTableFormatRevision=1,ucTableContentRevision=2 */
  typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
        USHORT usPixelClock;    /*  in 10KHz; for bios convenient */
-       UCHAR ucMisc;           /*  see PANEL_ENCODER_MISC_xx defintions below */
+       UCHAR ucMisc;           /*  see PANEL_ENCODER_MISC_xx definitions below */
        UCHAR ucAction;         /*  0: turn off encoder */
        /*  1: setup and turn on encoder */
        UCHAR ucTruncate;       /*  bit0=0: Disable truncate */
@@@ -1424,7 -1424,7 +1424,7 @@@ typedef struct _ATOM_MULTIMEDIA_CONFIG_
  /*  Structures used in FirmwareInfoTable */
  /****************************************************************************/
  
- /*  usBIOSCapability Defintion: */
+ /*  usBIOSCapability Definition: */
  /*  Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */
  /*  Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */
  /*  Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */
@@@ -2314,7 -2314,7 +2314,7 @@@ typedef struct _ATOM_SPREAD_SPECTRUM_AS
        UCHAR ucSS_Step;
        UCHAR ucSS_Delay;
        UCHAR ucSS_Id;
 -      UCHAR ucRecommandedRef_Div;
 +      UCHAR ucRecommendedRef_Div;
        UCHAR ucSS_Range;       /* it was reserved for V11 */
  } ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
  
@@@ -2386,7 -2386,7 +2386,7 @@@ typedef struct _ATOM_ANALOG_TV_INFO_V1_
  } ATOM_ANALOG_TV_INFO_V1_2;
  
  /**************************************************************************/
- /*  VRAM usage and their defintions */
+ /*  VRAM usage and their definitions */
  
  /*  One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */
  /*  Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */
@@@ -3046,7 -3046,7 +3046,7 @@@ typedef struct _ATOM_ASIC_INTERNAL_SS_I
  #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC     2
  #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
  
- /* Byte aligned defintion for BIOS usage */
+ /* Byte aligned definition for BIOS usage */
  #define ATOM_S0_CRT1_MONOb0             0x01
  #define ATOM_S0_CRT1_COLORb0            0x02
  #define ATOM_S0_CRT1_MASKb0             (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
  #define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
  #define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK   0xC0000000L
  
- /* Byte aligned defintion for BIOS usage */
+ /* Byte aligned definition for BIOS usage */
  #define ATOM_S2_TV1_STANDARD_MASKb0     0x0F
  #define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
  #define ATOM_S2_CRT1_DPMS_STATEb2       0x01
  #define ATOM_S3_ALLOW_FAST_PWR_SWITCH   0x40000000L
  #define ATOM_S3_RQST_GPU_USE_MIN_PWR    0x80000000L
  
- /* Byte aligned defintion for BIOS usage */
+ /* Byte aligned definition for BIOS usage */
  #define ATOM_S3_CRT1_ACTIVEb0           0x01
  #define ATOM_S3_LCD1_ACTIVEb0           0x02
  #define ATOM_S3_TV1_ACTIVEb0            0x04
  #define ATOM_S4_LCD1_REFRESH_MASK       0x0000FF00L
  #define ATOM_S4_LCD1_REFRESH_SHIFT      8
  
- /* Byte aligned defintion for BIOS usage */
+ /* Byte aligned definition for BIOS usage */
  #define ATOM_S4_LCD1_PANEL_ID_MASKb0    0x0FF
  #define ATOM_S4_LCD1_REFRESH_MASKb1             ATOM_S4_LCD1_PANEL_ID_MASKb0
  #define ATOM_S4_VRAM_INFO_MASKb2        ATOM_S4_LCD1_PANEL_ID_MASKb0
  #define ATOM_S6_VRI_BRIGHTNESS_CHANGE       0x40000000L
  #define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK  0x80000000L
  
- /* Byte aligned defintion for BIOS usage */
+ /* Byte aligned definition for BIOS usage */
  #define ATOM_S6_DEVICE_CHANGEb0         0x01
  #define ATOM_S6_SCALER_CHANGEb0         0x02
  #define ATOM_S6_LID_CHANGEb0            0x04
@@@ -339,10 -339,11 +339,10 @@@ int r600_mc_init(struct radeon_device *
  {
        fixed20_12 a;
        u32 tmp;
 -      int chansize;
 +      int chansize, numchan;
        int r;
  
        /* Get VRAM informations */
 -      rdev->mc.vram_width = 128;
        rdev->mc.vram_is_ddr = true;
        tmp = RREG32(RAMCFG);
        if (tmp & CHANSIZE_OVERRIDE) {
        } else {
                chansize = 32;
        }
 -      if (rdev->family == CHIP_R600) {
 -              rdev->mc.vram_width = 8 * chansize;
 -      } else if (rdev->family == CHIP_RV670) {
 -              rdev->mc.vram_width = 4 * chansize;
 -      } else if ((rdev->family == CHIP_RV610) ||
 -                      (rdev->family == CHIP_RV620)) {
 -              rdev->mc.vram_width = chansize;
 -      } else if ((rdev->family == CHIP_RV630) ||
 -                      (rdev->family == CHIP_RV635)) {
 -              rdev->mc.vram_width = 2 * chansize;
 +      tmp = RREG32(CHMAP);
 +      switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
 +      case 0:
 +      default:
 +              numchan = 1;
 +              break;
 +      case 1:
 +              numchan = 2;
 +              break;
 +      case 2:
 +              numchan = 4;
 +              break;
 +      case 3:
 +              numchan = 8;
 +              break;
        }
 +      rdev->mc.vram_width = numchan * chansize;
        /* Could aper size report 0 ? */
        rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
        rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
                 * AGP so that GPU can catch out of VRAM/AGP access
                 */
                if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
-                       /* Enought place before */
+                       /* Enough place before */
                        rdev->mc.vram_location = rdev->mc.gtt_location -
                                                        rdev->mc.mc_vram_size;
                } else if (tmp > rdev->mc.mc_vram_size) {
-                       /* Enought place after */
+                       /* Enough place after */
                        rdev->mc.vram_location = rdev->mc.gtt_location +
                                                        rdev->mc.gtt_size;
                } else {
                        rdev->mc.gtt_location = rdev->mc.mc_vram_size;
                }
        } else {
 -              if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
 -                      rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
 -                                                              0xFFFF) << 24;
 -                      rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
 -                      tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
 -                      if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
 -                              /* Enough place after vram */
 -                              rdev->mc.gtt_location = tmp;
 -                      } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
 -                              /* Enough place before vram */
 +              rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
 +              rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
 +                                                      0xFFFF) << 24;
 +              tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
 +              if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
 +                      /* Enough place after vram */
 +                      rdev->mc.gtt_location = tmp;
 +              } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
 +                      /* Enough place before vram */
 +                      rdev->mc.gtt_location = 0;
 +              } else {
 +                      /* Not enough place after or before shrink
 +                       * gart size
 +                       */
 +                      if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
                                rdev->mc.gtt_location = 0;
 +                              rdev->mc.gtt_size = rdev->mc.vram_location;
                        } else {
 -                              /* Not enough place after or before shrink
 -                               * gart size
 -                               */
 -                              if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
 -                                      rdev->mc.gtt_location = 0;
 -                                      rdev->mc.gtt_size = rdev->mc.vram_location;
 -                              } else {
 -                                      rdev->mc.gtt_location = tmp;
 -                                      rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
 -                              }
 +                              rdev->mc.gtt_location = tmp;
 +                              rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
                        }
 -                      rdev->mc.gtt_location = rdev->mc.mc_vram_size;
 -              } else {
 -                      rdev->mc.vram_location = 0x00000000UL;
 -                      rdev->mc.gtt_location = rdev->mc.mc_vram_size;
 -                      rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
                }
 +              rdev->mc.gtt_location = rdev->mc.mc_vram_size;
        }
        rdev->mc.vram_start = rdev->mc.vram_location;
        rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
@@@ -858,8 -859,7 +858,8 @@@ void r600_gpu_init(struct radeon_devic
            ((rdev->family) == CHIP_RV630) ||
            ((rdev->family) == CHIP_RV610) ||
            ((rdev->family) == CHIP_RV620) ||
 -          ((rdev->family) == CHIP_RS780)) {
 +          ((rdev->family) == CHIP_RS780) ||
 +          ((rdev->family) == CHIP_RS880)) {
                WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
        } else {
                WREG32(DB_DEBUG, 0);
        tmp = RREG32(SQ_MS_FIFO_SIZES);
        if (((rdev->family) == CHIP_RV610) ||
            ((rdev->family) == CHIP_RV620) ||
 -          ((rdev->family) == CHIP_RS780)) {
 +          ((rdev->family) == CHIP_RS780) ||
 +          ((rdev->family) == CHIP_RS880)) {
                tmp = (CACHE_FIFO_SIZE(0xa) |
                       FETCH_FIFO_HIWATER(0xa) |
                       DONE_FIFO_HIWATER(0xe0) |
                                            NUM_ES_STACK_ENTRIES(0));
        } else if (((rdev->family) == CHIP_RV610) ||
                   ((rdev->family) == CHIP_RV620) ||
 -                 ((rdev->family) == CHIP_RS780)) {
 +                 ((rdev->family) == CHIP_RS780) ||
 +                 ((rdev->family) == CHIP_RS880)) {
                /* no vertex cache */
                sq_config &= ~VC_ENABLE;
  
  
        if (((rdev->family) == CHIP_RV610) ||
            ((rdev->family) == CHIP_RV620) ||
 -          ((rdev->family) == CHIP_RS780)) {
 +          ((rdev->family) == CHIP_RS780) ||
 +          ((rdev->family) == CHIP_RS880)) {
                WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
        } else {
                WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
        tmp = rdev->config.r600.max_pipes * 16;
        switch (rdev->family) {
        case CHIP_RV610:
 -      case CHIP_RS780:
        case CHIP_RV620:
 +      case CHIP_RS780:
 +      case CHIP_RS880:
                tmp += 32;
                break;
        case CHIP_RV670:
  
        switch (rdev->family) {
        case CHIP_RV610:
 -      case CHIP_RS780:
        case CHIP_RV620:
 +      case CHIP_RS780:
 +      case CHIP_RS880:
                tmp = TC_L2_SIZE(8);
                break;
        case CHIP_RV630:
@@@ -1272,17 -1267,19 +1272,17 @@@ int r600_cp_resume(struct radeon_devic
  
        /* Set ring buffer size */
        rb_bufsz = drm_order(rdev->cp.ring_size / 8);
 +      tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  #ifdef __BIG_ENDIAN
 -      WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE |
 -              (drm_order(4096/8) << 8) | rb_bufsz);
 -#else
 -      WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(4096/8) << 8) | rb_bufsz);
 +      tmp |= BUF_SWAP_32BIT;
  #endif
 +      WREG32(CP_RB_CNTL, tmp);
        WREG32(CP_SEM_WAIT_TIMER, 0x4);
  
        /* Set the write pointer delay */
        WREG32(CP_RB_WPTR_DELAY, 0);
  
        /* Initialize the ring buffer's read and write pointers */
 -      tmp = RREG32(CP_RB_CNTL);
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
        WREG32(CP_RB_WPTR, 0);
@@@ -1403,7 -1400,7 +1403,7 @@@ int r600_wb_enable(struct radeon_devic
        int r;
  
        if (rdev->wb.wb_obj == NULL) {
 -              r = radeon_object_create(rdev, NULL, 4096, true,
 +              r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
                                RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
                if (r) {
                        dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
@@@ -1453,8 -1450,8 +1453,8 @@@ int r600_copy_blit(struct radeon_devic
                   uint64_t src_offset, uint64_t dst_offset,
                   unsigned num_pages, struct radeon_fence *fence)
  {
 -      r600_blit_prepare_copy(rdev, num_pages * 4096);
 -      r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * 4096);
 +      r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
 +      r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
        r600_blit_done_copy(rdev, fence);
        return 0;
  }
@@@ -1635,13 -1632,10 +1635,13 @@@ int r600_init(struct radeon_device *rde
        r600_scratch_init(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 +      /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
        r = radeon_clocks_init(rdev);
        if (r)
                return r;
 +      /* Initialize power management */
 +      radeon_pm_init(rdev);
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
        if (r)
@@@ -295,12 -295,6 +295,12 @@@ static int radeon_move_vram_ram(struct 
        if (unlikely(r)) {
                return r;
        }
 +
 +      r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
 +      if (unlikely(r)) {
 +              goto out_cleanup;
 +      }
 +
        r = ttm_tt_bind(bo->ttm, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
@@@ -378,7 -372,7 +378,7 @@@ static int radeon_bo_move(struct ttm_bu
             new_mem->mem_type == TTM_PL_SYSTEM) ||
            (old_mem->mem_type == TTM_PL_SYSTEM &&
             new_mem->mem_type == TTM_PL_TT)) {
-               /* bind is enought */
+               /* bind is enough */
                radeon_move_null(bo, new_mem);
                return 0;
        }
@@@ -529,11 -529,11 +529,11 @@@ static void rv770_gpu_init(struct radeo
        if (rdev->family == CHIP_RV770)
                gb_tiling_config |= BANK_TILING(1);
        else
 -              gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_SHIFT) >> NOOFBANK_MASK);
 +              gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
  
        gb_tiling_config |= GROUP_SIZE(0);
  
 -      if (((mc_arb_ramcfg & NOOFROWS_MASK) & NOOFROWS_SHIFT) > 3) {
 +      if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
                gb_tiling_config |= ROW_TILING(3);
                gb_tiling_config |= SAMPLE_SPLIT(3);
        } else {
  
        /* set HW defaults for 3D engine */
        WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
 -                                              ROQ_IB2_START(0x2b)));
 +                                   ROQ_IB2_START(0x2b)));
  
        WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
  
        WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
 -                                      SYNC_GRADIENT |
 -                                      SYNC_WALKER |
 -                                      SYNC_ALIGNER));
 +                           SYNC_GRADIENT |
 +                           SYNC_WALKER |
 +                           SYNC_ALIGNER));
  
        sx_debug_1 = RREG32(SX_DEBUG_1);
        sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
        WREG32(SMX_DC_CTL0, smx_dc_ctl0);
  
        WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
 -                                        GS_FLUSH_CTL(4) |
 -                                        ACK_FLUSH_CTL(3) |
 -                                        SYNC_FLUSH_CTL));
 +                             GS_FLUSH_CTL(4) |
 +                             ACK_FLUSH_CTL(3) |
 +                             SYNC_FLUSH_CTL));
  
        if (rdev->family == CHIP_RV770)
                WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f));
        }
  
        WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.rv770.sx_max_export_size / 4) - 1) |
 -                                                 POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
 -                                                 SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
 +                                      POSITION_BUFFER_SIZE((rdev->config.rv770.sx_max_export_pos_size / 4) - 1) |
 +                                      SMX_BUFFER_SIZE((rdev->config.rv770.sx_max_export_smx_size / 4) - 1)));
  
        WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.rv770.sc_prim_fifo_size) |
 -                                               SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
 -                                               SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
 +                               SC_HIZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_hiz_tile_fifo_size) |
 +                               SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.rv770.sc_earlyz_tile_fifo_fize)));
  
        WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
  
@@@ -774,36 -774,14 +774,36 @@@ int rv770_mc_init(struct radeon_device 
  {
        fixed20_12 a;
        u32 tmp;
 +      int chansize, numchan;
        int r;
  
        /* Get VRAM informations */
 -      /* FIXME: Don't know how to determine vram width, need to check
 -       * vram_width usage
 -       */
 -      rdev->mc.vram_width = 128;
        rdev->mc.vram_is_ddr = true;
 +      tmp = RREG32(MC_ARB_RAMCFG);
 +      if (tmp & CHANSIZE_OVERRIDE) {
 +              chansize = 16;
 +      } else if (tmp & CHANSIZE_MASK) {
 +              chansize = 64;
 +      } else {
 +              chansize = 32;
 +      }
 +      tmp = RREG32(MC_SHARED_CHMAP);
 +      switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
 +      case 0:
 +      default:
 +              numchan = 1;
 +              break;
 +      case 1:
 +              numchan = 2;
 +              break;
 +      case 2:
 +              numchan = 4;
 +              break;
 +      case 3:
 +              numchan = 8;
 +              break;
 +      }
 +      rdev->mc.vram_width = numchan * chansize;
        /* Could aper size report 0 ? */
        rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
        rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
                 * AGP so that GPU can catch out of VRAM/AGP access
                 */
                if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
-                       /* Enought place before */
+                       /* Enough place before */
                        rdev->mc.vram_location = rdev->mc.gtt_location -
                                                        rdev->mc.mc_vram_size;
                } else if (tmp > rdev->mc.mc_vram_size) {
-                       /* Enought place after */
+                       /* Enough place after */
                        rdev->mc.vram_location = rdev->mc.gtt_location +
                                                        rdev->mc.gtt_size;
                } else {
@@@ -983,13 -961,10 +983,13 @@@ int rv770_init(struct radeon_device *rd
        r600_scratch_init(rdev);
        /* Initialize surface registers */
        radeon_surface_init(rdev);
 +      /* Initialize clocks */
        radeon_get_clock_info(rdev->ddev);
        r = radeon_clocks_init(rdev);
        if (r)
                return r;
 +      /* Initialize power management */
 +      radeon_pm_init(rdev);
        /* Fence driver */
        r = radeon_fence_driver_init(rdev);
        if (r)
@@@ -385,7 -385,7 +385,7 @@@ static int SuperTraceMessageInput (void
                    }
                    break;
                  default:
-                   diva_mnt_internal_dprintf (0, DLI_ERR, "Unknon IDI Ind (DMA mode): %02x", Ind);
+                   diva_mnt_internal_dprintf (0, DLI_ERR, "Unknown IDI Ind (DMA mode): %02x", Ind);
                }
                p += (this_ind_length+1);
                total_length -= (4 + this_ind_length);
              }
              break;
            default:
-             diva_mnt_internal_dprintf (0, DLI_ERR, "Unknon IDI Ind: %02x", Ind);
+             diva_mnt_internal_dprintf (0, DLI_ERR, "Unknown IDI Ind: %02x", Ind);
          }
        }
      }
@@@ -959,9 -959,8 +959,9 @@@ static int process_idi_event (diva_stra
        }
        if (!strncmp("State\\Layer2 No1", path, pVar->path_length)) {
                char* tmp = &pLib->lines[0].pInterface->Layer2[0];
 -    dword l2_state;
 -    diva_strace_read_uint (pVar, &l2_state);
 +              dword l2_state;
 +              if (diva_strace_read_uint(pVar, &l2_state))
 +                      return -1;
  
                switch (l2_state) {
                        case 0:
@@@ -817,8 -817,8 +817,8 @@@ collect_rx_frame(usb_fifo * fifo, __u8 
        }
        /* we have a complete hdlc packet */
        if (finish) {
 -              if ((!fifo->skbuff->data[fifo->skbuff->len - 1])
 -                  && (fifo->skbuff->len > 3)) {
 +              if (fifo->skbuff->len > 3 &&
 +                              !fifo->skbuff->data[fifo->skbuff->len - 1]) {
  
                        if (fifon == HFCUSB_D_RX) {
                                DBG(HFCUSB_DBG_DCHANNEL,
@@@ -1086,7 -1086,7 +1086,7 @@@ hfc_usb_l2l1(struct hisax_if *my_hisax_
                        break;
                default:
                        DBG(HFCUSB_DBG_STATES,
-                              "HFC_USB: hfc_usb_d_l2l1: unkown state : %#x", pr);
+                              "HFC_USB: hfc_usb_d_l2l1: unknown state : %#x", pr);
                        break;
        }
  }
@@@ -836,7 -836,7 +836,7 @@@ isdn_ppp_write(int min, struct file *fi
                        unsigned short hl;
                        struct sk_buff *skb;
                        /*
-                        * we need to reserve enought space in front of
+                        * we need to reserve enough space in front of
                         * sk_buff. old call to dev_alloc_skb only reserved
                         * 16 bytes, now we are looking what the driver want
                         */
@@@ -1326,7 -1326,7 +1326,7 @@@ isdn_ppp_xmit(struct sk_buff *skb, stru
                struct sk_buff *new_skb;
                unsigned short hl;
                /*
-                * we need to reserve enought space in front of
+                * we need to reserve enough space in front of
                 * sk_buff. old call to dev_alloc_skb only reserved
                 * 16 bytes, now we are looking what the driver want.
                 */
@@@ -1535,8 -1535,10 +1535,8 @@@ static int isdn_ppp_mp_bundle_array_ini
        int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle);
        if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL )
                return -ENOMEM;
 -      for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
 +      for( i = 0; i < ISDN_MAX_CHANNELS; i++ )
                spin_lock_init(&isdn_ppp_bundle_arr[i].lock);
 -              skb_queue_head_init(&isdn_ppp_bundle_arr[i].frags);
 -      }
        return 0;
  }
  
@@@ -1569,7 -1571,7 +1569,7 @@@ static int isdn_ppp_mp_init( isdn_net_l
                if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL)
                        return -ENOMEM;
                lp->next = lp->last = lp;       /* nobody else in a queue */
 -              skb_queue_head_init(&lp->netdev->pb->frags);
 +              lp->netdev->pb->frags = NULL;
                lp->netdev->pb->frames = 0;
                lp->netdev->pb->seq = UINT_MAX;
        }
  
  static u32 isdn_ppp_mp_get_seq( int short_seq, 
                                        struct sk_buff * skb, u32 last_seq );
 -static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from,
 -                              struct sk_buff *to);
 -static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
 -                                 struct sk_buff *from, struct sk_buff *to,
 -                                 u32 lastseq);
 -static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb);
 +static struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp,
 +                      struct sk_buff * from, struct sk_buff * to );
 +static void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
 +                              struct sk_buff * from, struct sk_buff * to );
 +static void isdn_ppp_mp_free_skb( ippp_bundle * mp, struct sk_buff * skb );
  static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb );
  
  static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, 
 -                              struct sk_buff *skb)
 +                                                      struct sk_buff *skb)
  {
 -      struct sk_buff *newfrag, *frag, *start, *nextf;
 -      u32 newseq, minseq, thisseq;
 -      isdn_mppp_stats *stats;
        struct ippp_struct *is;
 +      isdn_net_local * lpq;
 +      ippp_bundle * mp;
 +      isdn_mppp_stats * stats;
 +      struct sk_buff * newfrag, * frag, * start, *nextf;
 +      u32 newseq, minseq, thisseq;
        unsigned long flags;
        int slot;
  
        spin_lock_irqsave(&net_dev->pb->lock, flags);
 -      mp = net_dev->pb;
 -      stats = &mp->stats;
 +      mp = net_dev->pb;
 +        stats = &mp->stats;
        slot = lp->ppp_slot;
        if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
                printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
                return;
        }
        is = ippp_table[slot];
 -      if (++mp->frames > stats->max_queue_len)
 +      if( ++mp->frames > stats->max_queue_len )
                stats->max_queue_len = mp->frames;
 -
 +      
        if (is->debug & 0x8)
                isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb);
  
 -      newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ,
 -                                   skb, is->last_link_seqno);
 +      newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, 
 +                                              skb, is->last_link_seqno);
 +
  
        /* if this packet seq # is less than last already processed one,
         * toss it right away, but check for sequence start case first 
         */
 -      if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) {
 +      if( mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT) ) {
                mp->seq = newseq;       /* the first packet: required for
                                         * rfc1990 non-compliant clients --
                                         * prevents constant packet toss */
                spin_unlock_irqrestore(&mp->lock, flags);
                return;
        }
 -
 +      
        /* find the minimum received sequence number over all links */
        is->last_link_seqno = minseq = newseq;
        for (lpq = net_dev->queue;;) {
                                         * packets */
        newfrag = skb;
  
 -      /* Insert new fragment into the proper sequence slot.  */
 -      skb_queue_walk(&mp->frags, frag) {
 -              if (MP_SEQ(frag) == newseq) {
 -                      isdn_ppp_mp_free_skb(mp, newfrag);
 -                      newfrag = NULL;
 -                      break;
 -              }
 -              if (MP_LT(newseq, MP_SEQ(frag))) {
 -                      __skb_queue_before(&mp->frags, frag, newfrag);
 -                      newfrag = NULL;
 -                      break;
 -              }
 -      }
 -      if (newfrag)
 -              __skb_queue_tail(&mp->frags, newfrag);
 +      /* if this new fragment is before the first one, then enqueue it now. */
 +      if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) {
 +              newfrag->next = frag;
 +              mp->frags = frag = newfrag;
 +              newfrag = NULL;
 +      }
  
 -      frag = skb_peek(&mp->frags);
 -      start = ((MP_FLAGS(frag) & MP_BEGIN_FRAG) &&
 -               (MP_SEQ(frag) == mp->seq)) ? frag : NULL;
 -      if (!start)
 -              goto check_overflow;
 +      start = MP_FLAGS(frag) & MP_BEGIN_FRAG &&
 +                              MP_SEQ(frag) == mp->seq ? frag : NULL;
  
 -      /* main fragment traversing loop
 +      /* 
 +       * main fragment traversing loop
         *
         * try to accomplish several tasks:
 +       * - insert new fragment into the proper sequence slot (once that's done
 +       *   newfrag will be set to NULL)
         * - reassemble any complete fragment sequence (non-null 'start'
-        *   indicates there is a continguous sequence present)
+        *   indicates there is a contiguous sequence present)
         * - discard any incomplete sequences that are below minseq -- due
         *   to the fact that sender always increment sequence number, if there
         *   is an incomplete sequence below minseq, no new fragments would
         *   come to complete such sequence and it should be discarded
         *
         * loop completes when we accomplished the following tasks:
 +       * - new fragment is inserted in the proper sequence ('newfrag' is 
 +       *   set to NULL)
         * - we hit a gap in the sequence, so no reassembly/processing is 
         *   possible ('start' would be set to NULL)
         *
         * algorithm for this code is derived from code in the book
         * 'PPP Design And Debugging' by James Carlson (Addison-Wesley)
         */
 -      skb_queue_walk_safe(&mp->frags, frag, nextf) {
 -              thisseq = MP_SEQ(frag);
 -
 -              /* check for misplaced start */
 -              if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
 -                      printk(KERN_WARNING"isdn_mppp(seq %d): new "
 -                             "BEGIN flag with no prior END", thisseq);
 -                      stats->seqerrs++;
 -                      stats->frame_drops++;
 -                      isdn_ppp_mp_discard(mp, start, frag);
 -                      start = frag;
 -              } else if (MP_LE(thisseq, minseq)) {            
 -                      if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
 +      while (start != NULL || newfrag != NULL) {
 +
 +              thisseq = MP_SEQ(frag);
 +              nextf = frag->next;
 +
 +              /* drop any duplicate fragments */
 +              if (newfrag != NULL && thisseq == newseq) {
 +                              isdn_ppp_mp_free_skb(mp, newfrag);
 +                              newfrag = NULL;
 +              }
 +
 +              /* insert new fragment before next element if possible. */
 +              if (newfrag != NULL && (nextf == NULL || 
 +                                              MP_LT(newseq, MP_SEQ(nextf)))) {
 +                              newfrag->next = nextf;
 +                              frag->next = nextf = newfrag;
 +                              newfrag = NULL;
 +              }
 +
 +              if (start != NULL) {
 +                      /* check for misplaced start */
 +                              if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
 +                              printk(KERN_WARNING"isdn_mppp(seq %d): new "
 +                                    "BEGIN flag with no prior END", thisseq);
 +                              stats->seqerrs++;
 +                              stats->frame_drops++;
 +                              start = isdn_ppp_mp_discard(mp, start,frag);
 +                              nextf = frag->next;
 +                              }
 +              } else if (MP_LE(thisseq, minseq)) {            
 +                              if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
                                start = frag;
 -                      else {
 +                              else {
                                if (MP_FLAGS(frag) & MP_END_FRAG)
 -                                      stats->frame_drops++;
 -                              __skb_unlink(skb, &mp->frags);
 +                                      stats->frame_drops++;
 +                              if( mp->frags == frag )
 +                                      mp->frags = nextf;      
                                isdn_ppp_mp_free_skb(mp, frag);
 +                              frag = nextf;
                                continue;
 -                      }
 +                              }
                }
 -
 -              /* if we have end fragment, then we have full reassembly
 -               * sequence -- reassemble and process packet now
 +              
 +              /* if start is non-null and we have end fragment, then
 +               * we have full reassembly sequence -- reassemble 
 +               * and process packet now
                 */
 -              if (MP_FLAGS(frag) & MP_END_FRAG) {
 -                      minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK;
 -                      /* Reassemble the packet then dispatch it */
 -                      isdn_ppp_mp_reassembly(net_dev, lp, start, frag, thisseq);
 +              if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) {
 +                              minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK;
 +                              /* Reassemble the packet then dispatch it */
 +                      isdn_ppp_mp_reassembly(net_dev, lp, start, nextf);
 +      
 +                              start = NULL;
 +                              frag = NULL;
  
 -                      start = NULL;
 -                      frag = NULL;
 -              }
 +                              mp->frags = nextf;
 +              }
  
                /* check if need to update start pointer: if we just
                 * reassembled the packet and sequence is contiguous
                 * below low watermark and set start to the next frag or
                 * clear start ptr.
                 */ 
 -              if (nextf != (struct sk_buff *)&mp->frags && 
 +              if (nextf != NULL && 
                    ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) {
 -                      /* if we just reassembled and the next one is here, 
 -                       * then start another reassembly.
 -                       */
 -                      if (frag == NULL) {
 +                              /* if we just reassembled and the next one is here, 
 +                       * then start another reassembly. */
 +
 +                              if (frag == NULL) {
                                if (MP_FLAGS(nextf) & MP_BEGIN_FRAG)
 -                                      start = nextf;
 -                              else {
 -                                      printk(KERN_WARNING"isdn_mppp(seq %d):"
 -                                             " END flag with no following "
 -                                             "BEGIN", thisseq);
 +                                      start = nextf;
 +                              else
 +                              {
 +                                      printk(KERN_WARNING"isdn_mppp(seq %d):"
 +                                              " END flag with no following "
 +                                              "BEGIN", thisseq);
                                        stats->seqerrs++;
                                }
                        }
 -              } else {
 -                      if (nextf != (struct sk_buff *)&mp->frags &&
 -                          frag != NULL &&
 -                          MP_LT(thisseq, minseq)) {
 +
 +              } else {
 +                      if ( nextf != NULL && frag != NULL &&
 +                                              MP_LT(thisseq, minseq)) {
                                /* we've got a break in the sequence
                                 * and we not at the end yet
                                 * and we did not just reassembled
                                 * discard all the frames below low watermark 
                                 * and start over */
                                stats->frame_drops++;
 -                              isdn_ppp_mp_discard(mp, start, nextf);
 +                              mp->frags = isdn_ppp_mp_discard(mp,start,nextf);
                        }
                        /* break in the sequence, no reassembly */
 -                      start = NULL;
 -              }
 -              if (!start)
 -                      break;
 -      }
 -
 -check_overflow:
 +                              start = NULL;
 +              }
 +                              
 +              frag = nextf;
 +      }       /* while -- main loop */
 +      
 +      if (mp->frags == NULL)
 +              mp->frags = frag;
 +              
        /* rather straighforward way to deal with (not very) possible 
 -       * queue overflow
 -       */
 +       * queue overflow */
        if (mp->frames > MP_MAX_QUEUE_LEN) {
                stats->overflows++;
 -              skb_queue_walk_safe(&mp->frags, frag, nextf) {
 -                      if (mp->frames <= MP_MAX_QUEUE_LEN)
 -                              break;
 -                      __skb_unlink(frag, &mp->frags);
 -                      isdn_ppp_mp_free_skb(mp, frag);
 +              while (mp->frames > MP_MAX_QUEUE_LEN) {
 +                      frag = mp->frags->next;
 +                      isdn_ppp_mp_free_skb(mp, mp->frags);
 +                      mp->frags = frag;
                }
        }
        spin_unlock_irqrestore(&mp->lock, flags);
  }
  
 -static void isdn_ppp_mp_cleanup(isdn_net_local *lp)
 +static void isdn_ppp_mp_cleanup( isdn_net_local * lp )
  {
 -      struct sk_buff *skb, *tmp;
 -
 -      skb_queue_walk_safe(&lp->netdev->pb->frags, skb, tmp) {
 -              __skb_unlink(skb, &lp->netdev->pb->frags);
 -              isdn_ppp_mp_free_skb(lp->netdev->pb, skb);
 -      }
 +      struct sk_buff * frag = lp->netdev->pb->frags;
 +      struct sk_buff * nextfrag;
 +      while( frag ) {
 +              nextfrag = frag->next;
 +              isdn_ppp_mp_free_skb(lp->netdev->pb, frag);
 +              frag = nextfrag;
 +      }
 +      lp->netdev->pb->frags = NULL;
  }
  
  static u32 isdn_ppp_mp_get_seq( int short_seq, 
        return seq;
  }
  
 -static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from,
 -                              struct sk_buff *to)
 +struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp,
 +                      struct sk_buff * from, struct sk_buff * to )
  {
 -      if (from) {
 -              struct sk_buff *skb, *tmp;
 -              int freeing = 0;
 -
 -              skb_queue_walk_safe(&mp->frags, skb, tmp) {
 -                      if (skb == to)
 -                              break;
 -                      if (skb == from)
 -                              freeing = 1;
 -                      if (!freeing)
 -                              continue;
 -                      __skb_unlink(skb, &mp->frags);
 -                      isdn_ppp_mp_free_skb(mp, skb);
 +      if( from )
 +              while (from != to) {
 +                      struct sk_buff * next = from->next;
 +                      isdn_ppp_mp_free_skb(mp, from);
 +                      from = next;
                }
 -      }
 -}
 -
 -static unsigned int calc_tot_len(struct sk_buff_head *queue,
 -                               struct sk_buff *from, struct sk_buff *to)
 -{
 -      unsigned int tot_len = 0;
 -      struct sk_buff *skb;
 -      int found_start = 0;
 -
 -      skb_queue_walk(queue, skb) {
 -              if (skb == from)
 -                      found_start = 1;
 -              if (!found_start)
 -                      continue;
 -              tot_len += skb->len - MP_HEADER_LEN;
 -              if (skb == to)
 -                      break;
 -      }
 -      return tot_len;
 +      return from;
  }
  
 -/* Reassemble packet using fragments in the reassembly queue from
 - * 'from' until 'to', inclusive.
 - */
 -static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
 -                                 struct sk_buff *from, struct sk_buff *to,
 -                                 u32 lastseq)
 +void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
 +                              struct sk_buff * from, struct sk_buff * to )
  {
 -      ippp_bundle *mp = net_dev->pb;
 -      unsigned int tot_len;
 -      struct sk_buff *skb;
 +      ippp_bundle * mp = net_dev->pb;
        int proto;
 +      struct sk_buff * skb;
 +      unsigned int tot_len;
  
        if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
                printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
                        __func__, lp->ppp_slot);
                return;
        }
 -
 -      tot_len = calc_tot_len(&mp->frags, from, to);
 -
 -      if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) {
 -              if (ippp_table[lp->ppp_slot]->debug & 0x40)
 +      if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) {
 +              if( ippp_table[lp->ppp_slot]->debug & 0x40 )
                        printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, "
 -                             "len %d\n", MP_SEQ(from), from->len);
 +                                      "len %d\n", MP_SEQ(from), from->len );
                skb = from;
                skb_pull(skb, MP_HEADER_LEN);
 -              __skb_unlink(skb, &mp->frags);
                mp->frames--;   
        } else {
 -              struct sk_buff *walk, *tmp;
 -              int found_start = 0;
 +              struct sk_buff * frag;
 +              int n;
  
 -              if (ippp_table[lp->ppp_slot]->debug & 0x40)
 -                      printk(KERN_DEBUG"isdn_mppp: reassembling frames %d "
 -                             "to %d, len %d\n", MP_SEQ(from), lastseq,
 -                             tot_len);
 +              for(tot_len=n=0, frag=from; frag != to; frag=frag->next, n++)
 +                      tot_len += frag->len - MP_HEADER_LEN;
  
 -              skb = dev_alloc_skb(tot_len);
 -              if (!skb)
 +              if( ippp_table[lp->ppp_slot]->debug & 0x40 )
 +                      printk(KERN_DEBUG"isdn_mppp: reassembling frames %d "
 +                              "to %d, len %d\n", MP_SEQ(from), 
 +                              (MP_SEQ(from)+n-1) & MP_LONGSEQ_MASK, tot_len );
 +              if( (skb = dev_alloc_skb(tot_len)) == NULL ) {
                        printk(KERN_ERR "isdn_mppp: cannot allocate sk buff "
 -                             "of size %d\n", tot_len);
 -
 -              found_start = 0;
 -              skb_queue_walk_safe(&mp->frags, walk, tmp) {
 -                      if (walk == from)
 -                              found_start = 1;
 -                      if (!found_start)
 -                              continue;
 +                                      "of size %d\n", tot_len);
 +                      isdn_ppp_mp_discard(mp, from, to);
 +                      return;
 +              }
  
 -                      if (skb) {
 -                              unsigned int len = walk->len - MP_HEADER_LEN;
 -                              skb_copy_from_linear_data_offset(walk, MP_HEADER_LEN,
 -                                                               skb_put(skb, len),
 -                                                               len);
 -                      }
 -                      __skb_unlink(walk, &mp->frags);
 -                      isdn_ppp_mp_free_skb(mp, walk);
 +              while( from != to ) {
 +                      unsigned int len = from->len - MP_HEADER_LEN;
  
 -                      if (walk == to)
 -                              break;
 +                      skb_copy_from_linear_data_offset(from, MP_HEADER_LEN,
 +                                                       skb_put(skb,len),
 +                                                       len);
 +                      frag = from->next;
 +                      isdn_ppp_mp_free_skb(mp, from);
 +                      from = frag; 
                }
        }
 -      if (!skb)
 -              return;
 -
        proto = isdn_ppp_strip_proto(skb);
        isdn_ppp_push_higher(net_dev, lp, skb, proto);
  }
  
 -static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb)
 +static void isdn_ppp_mp_free_skb(ippp_bundle * mp, struct sk_buff * skb)
  {
        dev_kfree_skb(skb);
        mp->frames--;
@@@ -598,6 -598,11 +598,6 @@@ static int s2255_got_frame(struct s2255
        buf = list_entry(dma_q->active.next,
                         struct s2255_buffer, vb.queue);
  
 -      if (!waitqueue_active(&buf->vb.done)) {
 -              /* no one active */
 -              rc = -1;
 -              goto unlock;
 -      }
        list_del(&buf->vb.queue);
        do_gettimeofday(&buf->vb.ts);
        dprintk(100, "[%p/%d] wakeup\n", buf, buf->vb.i);
@@@ -1980,7 -1985,7 +1980,7 @@@ static int save_frame(struct s2255_dev 
                                        wake_up(&dev->fw_data->wait_fw);
                                        break;
                                default:
-                                       printk(KERN_INFO "s2255 unknwn resp\n");
+                                       printk(KERN_INFO "s2255 unknown resp\n");
                                }
                        default:
                                pdata++;
@@@ -68,7 -68,7 +68,7 @@@ enum 
  #define CQE_STATUS_COMPL_MASK         0xFFFF
  #define CQE_STATUS_COMPL_SHIFT                0       /* bits 0 - 15 */
  #define CQE_STATUS_EXTD_MASK          0xFFFF
 -#define CQE_STATUS_EXTD_SHIFT         0       /* bits 0 - 15 */
 +#define CQE_STATUS_EXTD_SHIFT         16      /* bits 16 - 31 */
  
  struct be_mcc_compl {
        u32 status;             /* dword 0 */
@@@ -435,7 -435,7 +435,7 @@@ enum be_if_flags 
   * filtering capabilities. */
  struct be_cmd_req_if_create {
        struct be_cmd_req_hdr hdr;
-       u32 version;            /* ignore currntly */
+       u32 version;            /* ignore currently */
        u32 capability_flags;
        u32 enable_flags;
        u8 mac_addr[ETH_ALEN];
@@@ -1610,21 -1610,11 +1610,21 @@@ static int be_open(struct net_device *n
  
        status = be_cmd_link_status_query(adapter, &link_up);
        if (status)
 -              return status;
 +              goto ret_sts;
        be_link_status_update(adapter, link_up);
  
 +      status = be_vid_config(adapter);
 +      if (status)
 +              goto ret_sts;
 +
 +      status = be_cmd_set_flow_control(adapter,
 +                                      adapter->tx_fc, adapter->rx_fc);
 +      if (status)
 +              goto ret_sts;
 +
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
 -      return 0;
 +ret_sts:
 +      return status;
  }
  
  static int be_setup(struct be_adapter *adapter)
        if (status != 0)
                goto rx_qs_destroy;
  
 -      status = be_vid_config(adapter);
 -      if (status != 0)
 -              goto mccqs_destroy;
 -
 -      status = be_cmd_set_flow_control(adapter, true, true);
 -      if (status != 0)
 -              goto mccqs_destroy;
        return 0;
  
 -mccqs_destroy:
 -      be_mcc_queues_destroy(adapter);
  rx_qs_destroy:
        be_rx_queues_destroy(adapter);
  tx_qs_destroy:
@@@ -1877,7 -1876,7 +1877,7 @@@ int be_load_fw(struct be_adapter *adapt
                goto fw_exit;
        }
  
-       dev_info(&adapter->pdev->dev, "Firmware flashed succesfully\n");
+       dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
  
  fw_exit:
        release_firmware(fw);
@@@ -1910,10 -1909,6 +1910,10 @@@ static void be_netdev_init(struct net_d
  
        adapter->rx_csum = true;
  
 +      /* Default settings for Rx and Tx flow control */
 +      adapter->rx_fc = true;
 +      adapter->tx_fc = true;
 +
        netif_set_gso_max_size(netdev, 65535);
  
        BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
@@@ -2176,7 -2171,6 +2176,7 @@@ static int be_suspend(struct pci_dev *p
                be_close(netdev);
                rtnl_unlock();
        }
 +      be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
        be_clear(adapter);
  
        pci_save_state(pdev);
diff --combined drivers/net/cxgb3/sge.c
@@@ -879,7 -879,7 +879,7 @@@ recycle
        pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
                                    PCI_DMA_FROMDEVICE);
        (*sd->pg_chunk.p_cnt)--;
 -      if (!*sd->pg_chunk.p_cnt)
 +      if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
                pci_unmap_page(adap->pdev,
                               sd->pg_chunk.mapping,
                               fl->alloc_size,
@@@ -1285,7 -1285,7 +1285,7 @@@ netdev_tx_t t3_eth_xmit(struct sk_buff 
  
        /*
         * We do not use Tx completion interrupts to free DMAd Tx packets.
-        * This is good for performamce but means that we rely on new Tx
+        * This is good for performance but means that we rely on new Tx
         * packets arriving to run the destructors of completed packets,
         * which open up space in their sockets' send queues.  Sometimes
         * we do not get such new packets causing Tx to stall.  A single
@@@ -2088,7 -2088,7 +2088,7 @@@ static void lro_add_page(struct adapte
                                    PCI_DMA_FROMDEVICE);
  
        (*sd->pg_chunk.p_cnt)--;
 -      if (!*sd->pg_chunk.p_cnt)
 +      if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
                pci_unmap_page(adap->pdev,
                               sd->pg_chunk.mapping,
                               fl->alloc_size,
diff --combined drivers/net/s2io.c
@@@ -3238,7 -3238,7 +3238,7 @@@ static u64 s2io_mdio_read(u32 mmd_type
  
  /**
   *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
-  *  @counter      : couter value to be updated
+  *  @counter      : counter value to be updated
   *  @flag         : flag to indicate the status
   *  @type         : counter type
   *  Description:
@@@ -3494,7 -3494,6 +3494,7 @@@ static void s2io_reset(struct s2io_nic 
  
                /* Restore the PCI state saved during initialization. */
                pci_restore_state(sp->pdev);
 +              pci_save_state(sp->pdev);
                pci_read_config_word(sp->pdev, 0x2, &val16);
                if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
                        break;
diff --combined drivers/net/smsc911x.c
@@@ -816,7 -816,7 +816,7 @@@ static int smsc911x_mii_probe(struct ne
        SMSC_TRACE(HW, "Passed Loop Back Test");
  #endif                                /* USE_PHY_WORK_AROUND */
  
-       SMSC_TRACE(HW, "phy initialised succesfully");
+       SMSC_TRACE(HW, "phy initialised successfully");
        return 0;
  }
  
@@@ -986,7 -986,7 +986,7 @@@ static int smsc911x_poll(struct napi_st
        struct net_device *dev = pdata->dev;
        int npackets = 0;
  
 -      while (likely(netif_running(dev)) && (npackets < budget)) {
 +      while (npackets < budget) {
                unsigned int pktlength;
                unsigned int pktwords;
                struct sk_buff *skb;
@@@ -2955,7 -2955,7 +2955,7 @@@ static void do_periodic_work(struct b43
  /* Periodic work locking policy:
   *    The whole periodic work handler is protected by
   *    wl->mutex. If another lock is needed somewhere in the
-  *    pwork callchain, it's aquired in-place, where it's needed.
+  *    pwork callchain, it's acquired in-place, where it's needed.
   */
  static void b43_periodic_work_handler(struct work_struct *work)
  {
@@@ -4521,8 -4521,9 +4521,8 @@@ static int b43_op_beacon_set_tim(struc
  {
        struct b43_wl *wl = hw_to_b43_wl(hw);
  
 -      mutex_lock(&wl->mutex);
 +      /* FIXME: add locking */
        b43_update_templates(wl);
 -      mutex_unlock(&wl->mutex);
  
        return 0;
  }
@@@ -551,7 -551,7 +551,7 @@@ static int ipw2100_get_ordinal(struct i
                /* get number of entries */
                field_count = *(((u16 *) & field_info) + 1);
  
-               /* abort if no enought memory */
+               /* abort if no enough memory */
                total_length = field_len * field_count;
                if (total_length > *len) {
                        *len = total_length;
@@@ -3044,7 -3044,7 +3044,7 @@@ static void ipw2100_tx_send_data(struc
                             IPW_MAX_BDS)) {
                        /* TODO: Support merging buffers if more than
                         * IPW_MAX_BDS are used */
-                       IPW_DEBUG_INFO("%s: Maximum BD theshold exceeded.  "
+                       IPW_DEBUG_INFO("%s: Maximum BD threshold exceeded.  "
                                       "Increase fragmentation level.\n",
                                       priv->net_dev->name);
                }
@@@ -6029,7 -6029,7 +6029,7 @@@ static struct net_device *ipw2100_alloc
        struct ipw2100_priv *priv;
        struct net_device *dev;
  
 -      dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0);
 +      dev = alloc_ieee80211(sizeof(struct ipw2100_priv));
        if (!dev)
                return NULL;
        priv = libipw_priv(dev);
@@@ -6325,8 -6325,10 +6325,8 @@@ static int ipw2100_pci_init_one(struct 
  
        fail:
        if (dev) {
 -              if (registered) {
 -                      unregister_ieee80211(priv->ieee);
 +              if (registered)
                        unregister_netdev(dev);
 -              }
  
                ipw2100_hw_stop_adapter(priv);
  
                sysfs_remove_group(&pci_dev->dev.kobj,
                                   &ipw2100_attribute_group);
  
 -              free_ieee80211(dev, 0);
 +              free_ieee80211(dev);
                pci_set_drvdata(pci_dev, NULL);
        }
  
@@@ -6383,6 -6385,7 +6383,6 @@@ static void __devexit ipw2100_pci_remov
                /* Unregister the device first - this results in close()
                 * being called if the device is open.  If we free storage
                 * first, then close() will crash. */
 -              unregister_ieee80211(priv->ieee);
                unregister_netdev(dev);
  
                /* ipw2100_down will ensure that there is no more pending work
                if (dev->base_addr)
                        iounmap((void __iomem *)dev->base_addr);
  
 -              free_ieee80211(dev, 0);
 +              free_ieee80211(dev);
        }
  
        pci_release_regions(pci_dev);
@@@ -6820,7 -6823,7 +6820,7 @@@ static int ipw2100_wx_get_range(struct 
        range->max_qual.updated = 7;    /* Updated all three */
  
        range->avg_qual.qual = 70;      /* > 8% missed beacons is 'bad' */
-       /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
+       /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
        range->avg_qual.level = 20 + IPW2100_RSSI_TO_DBM;
        range->avg_qual.noise = 0;
        range->avg_qual.updated = 7;    /* Updated all three */
@@@ -104,6 -104,25 +104,6 @@@ static int antenna = CFG_SYS_ANTENNA_BO
  static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
  #endif
  
 -static struct ieee80211_rate ipw2200_rates[] = {
 -      { .bitrate = 10 },
 -      { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 -      { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 -      { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
 -      { .bitrate = 60 },
 -      { .bitrate = 90 },
 -      { .bitrate = 120 },
 -      { .bitrate = 180 },
 -      { .bitrate = 240 },
 -      { .bitrate = 360 },
 -      { .bitrate = 480 },
 -      { .bitrate = 540 }
 -};
 -
 -#define ipw2200_a_rates               (ipw2200_rates + 4)
 -#define ipw2200_num_a_rates   8
 -#define ipw2200_bg_rates      (ipw2200_rates + 0)
 -#define ipw2200_num_bg_rates  12
  
  #ifdef CONFIG_IPW2200_QOS
  static int qos_enable = 0;
@@@ -768,7 -787,7 +768,7 @@@ static int ipw_get_ordinal(struct ipw_p
                /* get number of entries */
                field_count = *(((u16 *) & field_info) + 1);
  
-               /* abort if not enought memory */
+               /* abort if not enough memory */
                total_len = field_len * field_count;
                if (total_len > *len) {
                        *len = total_len;
@@@ -7732,7 -7751,7 +7732,7 @@@ static void ipw_rebuild_decrypted_skb(s
        case SEC_LEVEL_0:
                break;
        default:
-               printk(KERN_ERR "Unknow security level %d\n",
+               printk(KERN_ERR "Unknown security level %d\n",
                       priv->ieee->sec.level);
                break;
        }
@@@ -8655,24 -8674,6 +8655,24 @@@ static int ipw_sw_reset(struct ipw_pri
   *
   */
  
 +static int ipw_wx_get_name(struct net_device *dev,
 +                         struct iw_request_info *info,
 +                         union iwreq_data *wrqu, char *extra)
 +{
 +      struct ipw_priv *priv = libipw_priv(dev);
 +      mutex_lock(&priv->mutex);
 +      if (priv->status & STATUS_RF_KILL_MASK)
 +              strcpy(wrqu->name, "radio off");
 +      else if (!(priv->status & STATUS_ASSOCIATED))
 +              strcpy(wrqu->name, "unassociated");
 +      else
 +              snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
 +                       ipw_modes[priv->assoc_request.ieee_mode]);
 +      IPW_DEBUG_WX("Name: %s\n", wrqu->name);
 +      mutex_unlock(&priv->mutex);
 +      return 0;
 +}
 +
  static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
  {
        if (channel == 0) {
@@@ -8916,7 -8917,7 +8916,7 @@@ static int ipw_wx_get_range(struct net_
        range->max_qual.updated = 7;    /* Updated all three */
  
        range->avg_qual.qual = 70;
-       /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
+       /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
        range->avg_qual.level = 0;      /* FIXME to real average level */
        range->avg_qual.noise = 0;
        range->avg_qual.updated = 7;    /* Updated all three */
@@@ -9972,7 -9973,7 +9972,7 @@@ static int ipw_wx_sw_reset(struct net_d
  /* Rebase the WE IOCTLs to zero for the handler array */
  #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
  static iw_handler ipw_wx_handlers[] = {
 -      IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname,
 +      IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
        IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
        IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
        IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
@@@ -10289,7 -10290,7 +10289,7 @@@ static int ipw_tx_skb(struct ipw_priv *
                case SEC_LEVEL_0:
                        break;
                default:
-                       printk(KERN_ERR "Unknow security level %d\n",
+                       printk(KERN_ERR "Unknown security level %d\n",
                               priv->ieee->sec.level);
                        break;
                }
@@@ -11416,16 -11417,100 +11416,16 @@@ static void ipw_bg_down(struct work_str
  /* Called by register_netdev() */
  static int ipw_net_init(struct net_device *dev)
  {
 -      int i, rc = 0;
        struct ipw_priv *priv = libipw_priv(dev);
        mutex_lock(&priv->mutex);
  
        if (ipw_up(priv)) {
 -              rc = -EIO;
 -              goto out;
 -      }
 -
 -      memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
 -
 -      /* fill-out priv->ieee->bg_band */
 -      if (geo->bg_channels) {
 -              struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
 -
 -              bg_band->band = IEEE80211_BAND_2GHZ;
 -              bg_band->n_channels = geo->bg_channels;
 -              bg_band->channels =
 -                      kzalloc(geo->bg_channels *
 -                              sizeof(struct ieee80211_channel), GFP_KERNEL);
 -              /* translate geo->bg to bg_band.channels */
 -              for (i = 0; i < geo->bg_channels; i++) {
 -                      bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
 -                      bg_band->channels[i].center_freq = geo->bg[i].freq;
 -                      bg_band->channels[i].hw_value = geo->bg[i].channel;
 -                      bg_band->channels[i].max_power = geo->bg[i].max_power;
 -                      if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
 -                              bg_band->channels[i].flags |=
 -                                      IEEE80211_CHAN_PASSIVE_SCAN;
 -                      if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
 -                              bg_band->channels[i].flags |=
 -                                      IEEE80211_CHAN_NO_IBSS;
 -                      if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
 -                              bg_band->channels[i].flags |=
 -                                      IEEE80211_CHAN_RADAR;
 -                      /* No equivalent for LIBIPW_CH_80211H_RULES,
 -                         LIBIPW_CH_UNIFORM_SPREADING, or
 -                         LIBIPW_CH_B_ONLY... */
 -              }
 -              /* point at bitrate info */
 -              bg_band->bitrates = ipw2200_bg_rates;
 -              bg_band->n_bitrates = ipw2200_num_bg_rates;
 -
 -              wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
 -      }
 -
 -      /* fill-out priv->ieee->a_band */
 -      if (geo->a_channels) {
 -              struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
 -
 -              a_band->band = IEEE80211_BAND_5GHZ;
 -              a_band->n_channels = geo->a_channels;
 -              a_band->channels =
 -                      kzalloc(geo->a_channels *
 -                              sizeof(struct ieee80211_channel), GFP_KERNEL);
 -              /* translate geo->bg to a_band.channels */
 -              for (i = 0; i < geo->a_channels; i++) {
 -                      a_band->channels[i].band = IEEE80211_BAND_2GHZ;
 -                      a_band->channels[i].center_freq = geo->a[i].freq;
 -                      a_band->channels[i].hw_value = geo->a[i].channel;
 -                      a_band->channels[i].max_power = geo->a[i].max_power;
 -                      if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
 -                              a_band->channels[i].flags |=
 -                                      IEEE80211_CHAN_PASSIVE_SCAN;
 -                      if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
 -                              a_band->channels[i].flags |=
 -                                      IEEE80211_CHAN_NO_IBSS;
 -                      if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
 -                              a_band->channels[i].flags |=
 -                                      IEEE80211_CHAN_RADAR;
 -                      /* No equivalent for LIBIPW_CH_80211H_RULES,
 -                         LIBIPW_CH_UNIFORM_SPREADING, or
 -                         LIBIPW_CH_B_ONLY... */
 -              }
 -              /* point at bitrate info */
 -              a_band->bitrates = ipw2200_a_rates;
 -              a_band->n_bitrates = ipw2200_num_a_rates;
 -
 -              wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
 -      }
 -
 -      set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
 -
 -      /* With that information in place, we can now register the wiphy... */
 -      if (wiphy_register(wdev->wiphy)) {
 -              rc = -EIO;
 -              goto out;
 +              mutex_unlock(&priv->mutex);
 +              return -EIO;
        }
  
 -out:
        mutex_unlock(&priv->mutex);
 -      return rc;
 +      return 0;
  }
  
  /* PCI driver stuff */
@@@ -11556,7 -11641,7 +11556,7 @@@ static int ipw_prom_alloc(struct ipw_pr
        if (priv->prom_net_dev)
                return -EPERM;
  
 -      priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1);
 +      priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
        if (priv->prom_net_dev == NULL)
                return -ENOMEM;
  
  
        rc = register_netdev(priv->prom_net_dev);
        if (rc) {
 -              free_ieee80211(priv->prom_net_dev, 1);
 +              free_ieee80211(priv->prom_net_dev);
                priv->prom_net_dev = NULL;
                return rc;
        }
@@@ -11589,7 -11674,7 +11589,7 @@@ static void ipw_prom_free(struct ipw_pr
                return;
  
        unregister_netdev(priv->prom_net_dev);
 -      free_ieee80211(priv->prom_net_dev, 1);
 +      free_ieee80211(priv->prom_net_dev);
  
        priv->prom_net_dev = NULL;
  }
@@@ -11617,7 -11702,7 +11617,7 @@@ static int __devinit ipw_pci_probe(stru
        struct ipw_priv *priv;
        int i;
  
 -      net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0);
 +      net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
        if (net_dev == NULL) {
                err = -ENOMEM;
                goto out;
                if (err) {
                        IPW_ERROR("Failed to register promiscuous network "
                                  "device (error %d).\n", err);
 -                      unregister_ieee80211(priv->ieee);
                        unregister_netdev(priv->net_dev);
                        goto out_remove_sysfs;
                }
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
        out_free_ieee80211:
 -      free_ieee80211(priv->net_dev, 0);
 +      free_ieee80211(priv->net_dev);
        out:
        return err;
  }
@@@ -11787,6 -11873,7 +11787,6 @@@ static void __devexit ipw_pci_remove(st
  
        mutex_unlock(&priv->mutex);
  
 -      unregister_ieee80211(priv->ieee);
        unregister_netdev(priv->net_dev);
  
        if (priv->rxq) {
        pci_release_regions(pdev);
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
 -      free_ieee80211(priv->net_dev, 0);
 +      free_ieee80211(priv->net_dev);
        free_firmware();
  }
  
@@@ -62,6 -62,9 +62,6 @@@ MODULE_DESCRIPTION(DRV_DESCRIPTION)
  MODULE_AUTHOR(DRV_COPYRIGHT);
  MODULE_LICENSE("GPL");
  
 -struct cfg80211_ops libipw_config_ops = { };
 -void *libipw_wiphy_privid = &libipw_wiphy_privid;
 -
  static int libipw_networks_allocate(struct libipw_device *ieee)
  {
        if (ieee->networks)
@@@ -140,7 -143,7 +140,7 @@@ int libipw_change_mtu(struct net_devic
  }
  EXPORT_SYMBOL(libipw_change_mtu);
  
 -struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
 +struct net_device *alloc_ieee80211(int sizeof_priv)
  {
        struct libipw_device *ieee;
        struct net_device *dev;
  
        ieee->dev = dev;
  
        err = libipw_networks_allocate(ieee);
        if (err) {
                LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err);
 -              goto failed_free_wiphy;
 +              goto failed_free_netdev;
        }
        libipw_networks_initialize(ieee);
  
        ieee->host_decrypt = 1;
        ieee->host_mc_decrypt = 1;
  
-       /* Host fragementation in Open mode. Default is enabled.
+       /* Host fragmentation in Open mode. Default is enabled.
         * Note: host fragmentation is always enabled if host encryption
         * is enabled. For cards can do hardware encryption, they must do
         * hardware fragmentation as well. So we don't need a variable
  
        return dev;
  
 -failed_free_wiphy:
 -      if (!monitor)
 -              wiphy_free(ieee->wdev.wiphy);
  failed_free_netdev:
        free_netdev(dev);
  failed:
        return NULL;
  }
  
 -void free_ieee80211(struct net_device *dev, int monitor)
 +void free_ieee80211(struct net_device *dev)
  {
        struct libipw_device *ieee = netdev_priv(dev);
  
        lib80211_crypt_info_free(&ieee->crypt_info);
  
        libipw_networks_free(ieee);
        free_netdev(dev);
  }
  
 -void unregister_ieee80211(struct libipw_device *ieee)
 -{
 -      wiphy_unregister(ieee->wdev.wiphy);
 -      kfree(ieee->a_band.channels);
 -      kfree(ieee->bg_band.channels);
 -}
 -
  #ifdef CONFIG_LIBIPW_DEBUG
  
  static int debug = 0;
@@@ -294,3 -333,4 +294,3 @@@ module_init(libipw_init)
  
  EXPORT_SYMBOL(alloc_ieee80211);
  EXPORT_SYMBOL(free_ieee80211);
 -EXPORT_SYMBOL(unregister_ieee80211);
@@@ -3656,7 -3656,10 +3656,7 @@@ wv_pcmcia_reset(struct net_device *    dev
  
    i = pcmcia_access_configuration_register(link, &reg);
    if (i != 0)
 -    {
 -      cs_error(link, AccessConfigurationRegister, i);
        return FALSE;
 -    }
        
  #ifdef DEBUG_CONFIG_INFO
    printk(KERN_DEBUG "%s: wavelan_pcmcia_reset(): Config reg is 0x%x\n",
    reg.Value = reg.Value | COR_SW_RESET;
    i = pcmcia_access_configuration_register(link, &reg);
    if (i != 0)
 -    {
 -      cs_error(link, AccessConfigurationRegister, i);
        return FALSE;
 -    }
        
    reg.Action = CS_WRITE;
    reg.Value = COR_LEVEL_IRQ | COR_CONFIG;
    i = pcmcia_access_configuration_register(link, &reg);
    if (i != 0)
 -    {
 -      cs_error(link, AccessConfigurationRegister, i);
        return FALSE;
 -    }
  
  #ifdef DEBUG_CONFIG_TRACE
    printk(KERN_DEBUG "%s: <-wv_pcmcia_reset()\n", dev->name);
@@@ -3848,7 -3857,10 +3848,7 @@@ wv_pcmcia_config(struct pcmcia_device 
      {
        i = pcmcia_request_io(link, &link->io);
        if (i != 0)
 -      {
 -        cs_error(link, RequestIO, i);
          break;
 -      }
  
        /*
         * Now allocate an interrupt line.  Note that this does not
         */
        i = pcmcia_request_irq(link, &link->irq);
        if (i != 0)
 -      {
 -        cs_error(link, RequestIRQ, i);
          break;
 -      }
  
        /*
         * This actually configures the PCMCIA socket -- setting up
        link->conf.ConfigIndex = 1;
        i = pcmcia_request_configuration(link, &link->conf);
        if (i != 0)
 -      {
 -        cs_error(link, RequestConfiguration, i);
          break;
 -      }
  
        /*
         * Allocate a small memory window.  Note that the struct pcmcia_device
        req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
        req.Base = req.Size = 0;
        req.AccessSpeed = mem_speed;
 -      i = pcmcia_request_window(&link, &req, &link->win);
 +      i = pcmcia_request_window(link, &req, &link->win);
        if (i != 0)
 -      {
 -        cs_error(link, RequestWindow, i);
          break;
 -      }
  
        lp->mem = ioremap(req.Base, req.Size);
        dev->mem_start = (u_long)lp->mem;
        dev->mem_end = dev->mem_start + req.Size;
  
        mem.CardOffset = 0; mem.Page = 0;
 -      i = pcmcia_map_mem_page(link->win, &mem);
 +      i = pcmcia_map_mem_page(link, link->win, &mem);
        if (i != 0)
 -      {
 -        cs_error(link, MapMemPage, i);
          break;
 -      }
  
        /* Feed device with this info... */
        dev->irq = link->irq.AssignedIRQ;
             lp->mem, dev->irq, (u_int) dev->base_addr);
  #endif
  
 -      SET_NETDEV_DEV(dev, &handle_to_dev(link));
 +      SET_NETDEV_DEV(dev, &link->dev);
        i = register_netdev(dev);
        if(i != 0)
        {
@@@ -3987,7 -4011,7 +3987,7 @@@ wavelan_interrupt(int           irq
  #endif
  
    /* Prevent reentrancy. We need to do that because we may have
-    * multiple interrupt handler running concurently.
+    * multiple interrupt handler running concurrently.
     * It is safe because interrupts are disabled before aquiring
     * the spinlock. */
    spin_lock(&lp->spinlock);
@@@ -4438,7 -4462,8 +4438,7 @@@ wavelan_probe(struct pcmcia_device *p_d
    p_dev->io.IOAddrLines = 3;
  
    /* Interrupt setup */
 -  p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
 -  p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
 +  p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
    p_dev->irq.Handler = wavelan_interrupt;
  
    /* General socket configuration */
    if (!dev)
        return -ENOMEM;
  
 -  p_dev->priv = p_dev->irq.Instance = dev;
 +  p_dev->priv = dev;
  
    lp = netdev_priv(dev);
  
@@@ -1680,48 -1680,36 +1680,48 @@@ static void tpacpi_remove_driver_attrib
                          | (__bv1) << 8 | (__bv2) }
  
  #define TPV_Q_X(__v, __bid1, __bid2, __bv1, __bv2,    \
 -              __eid1, __eid2, __ev1, __ev2)           \
 +              __eid, __ev1, __ev2)                    \
        { .vendor       = (__v),                        \
          .bios         = TPID(__bid1, __bid2),         \
 -        .ec           = TPID(__eid1, __eid2),         \
 +        .ec           = __eid,                        \
          .quirks       = (__ev1) << 24 | (__ev2) << 16 \
                          | (__bv1) << 8 | (__bv2) }
  
  #define TPV_QI0(__id1, __id2, __bv1, __bv2) \
        TPV_Q(PCI_VENDOR_ID_IBM, __id1, __id2, __bv1, __bv2)
  
 +/* Outdated IBM BIOSes often lack the EC id string */
  #define TPV_QI1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \
        TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2,        \
 -              __bv1, __bv2, __id1, __id2, __ev1, __ev2)
 +              __bv1, __bv2, TPID(__id1, __id2),       \
 +              __ev1, __ev2),                          \
 +      TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2,        \
 +              __bv1, __bv2, TPACPI_MATCH_UNKNOWN,     \
 +              __ev1, __ev2)
  
 +/* Outdated IBM BIOSes often lack the EC id string */
  #define TPV_QI2(__bid1, __bid2, __bv1, __bv2,         \
                __eid1, __eid2, __ev1, __ev2)           \
        TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2,      \
 -              __bv1, __bv2, __eid1, __eid2, __ev1, __ev2)
 +              __bv1, __bv2, TPID(__eid1, __eid2),     \
 +              __ev1, __ev2),                          \
 +      TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2,      \
 +              __bv1, __bv2, TPACPI_MATCH_UNKNOWN,     \
 +              __ev1, __ev2)
  
  #define TPV_QL0(__id1, __id2, __bv1, __bv2) \
        TPV_Q(PCI_VENDOR_ID_LENOVO, __id1, __id2, __bv1, __bv2)
  
  #define TPV_QL1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \
        TPV_Q_X(PCI_VENDOR_ID_LENOVO, __id1, __id2,     \
 -              __bv1, __bv2, __id1, __id2, __ev1, __ev2)
 +              __bv1, __bv2, TPID(__id1, __id2),       \
 +              __ev1, __ev2)
  
  #define TPV_QL2(__bid1, __bid2, __bv1, __bv2,         \
                __eid1, __eid2, __ev1, __ev2)           \
        TPV_Q_X(PCI_VENDOR_ID_LENOVO, __bid1, __bid2,   \
 -              __bv1, __bv2, __eid1, __eid2, __ev1, __ev2)
 +              __bv1, __bv2, TPID(__eid1, __eid2),     \
 +              __ev1, __ev2)
  
  static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
        /*  Numeric models ------------------ */
@@@ -6325,7 -6313,7 +6325,7 @@@ static int brightness_write(char *buf
         * Doing it this way makes the syscall restartable in case of EINTR
         */
        rc = brightness_set(level);
 -      return (rc == -EINTR)? ERESTARTSYS : rc;
 +      return (rc == -EINTR)? -ERESTARTSYS : rc;
  }
  
  static struct ibm_struct brightness_driver_data = {
@@@ -6545,7 -6533,7 +6545,7 @@@ static struct ibm_struct volume_driver_
   *    The speeds are stored on handles
   *    (FANA:FAN9), (FANC:FANB), (FANE:FAND).
   *
-  *    There are three default speed sets, acessible as handles:
+  *    There are three default speed sets, accessible as handles:
   *    FS1L,FS1M,FS1H; FS2L,FS2M,FS2H; FS3L,FS3M,FS3H
   *
   *    ACPI DSDT switches which set is in use depending on various
diff --combined drivers/rtc/rtc-v3020.c
@@@ -96,7 -96,7 +96,7 @@@ static void v3020_mmio_write_bit(struc
  
  static unsigned char v3020_mmio_read_bit(struct v3020 *chip)
  {
 -      return readl(chip->ioaddress) & (1 << chip->leftshift);
 +      return !!(readl(chip->ioaddress) & (1 << chip->leftshift));
  }
  
  static struct v3020_chip_ops v3020_mmio_ops = {
@@@ -335,7 -335,7 +335,7 @@@ static int rtc_probe(struct platform_de
                goto err_io;
        }
  
-       /* Make sure frequency measurment mode, test modes, and lock
+       /* Make sure frequency measurement mode, test modes, and lock
         * are all disabled */
        v3020_set_reg(chip, V3020_STATUS_0, 0x0);
  
diff --combined drivers/scsi/pmcraid.c
@@@ -1071,7 -1071,7 +1071,7 @@@ static struct pmcraid_cmd *pmcraid_init
  
        ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
  
 -      ioadl[0].flags |= cpu_to_le32(IOADL_FLAGS_READ_LAST);
 +      ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
        ioadl[0].data_len = cpu_to_le32(rcb_size);
        ioadl[0].address = cpu_to_le32(dma);
  
@@@ -2251,7 -2251,7 +2251,7 @@@ static void pmcraid_request_sense(struc
  
        ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
        ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
 -      ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
 +      ioadl->flags = IOADL_FLAGS_LAST_DESC;
  
        /* request sense might be called as part of error response processing
         * which runs in tasklets context. It is possible that mid-layer might
@@@ -3017,7 -3017,7 +3017,7 @@@ static int pmcraid_build_ioadl
                ioadl[i].flags = 0;
        }
        /* setup last descriptor */
 -      ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
 +      ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
  
        return 0;
  }
@@@ -3342,7 -3342,7 +3342,7 @@@ static int pmcraid_chr_fasync(int fd, s
   * @direction : data transfer direction
   *
   * Return value
-  *  0 on sucess, non-zero error code on failure
+  *  0 on success, non-zero error code on failure
   */
  static int pmcraid_build_passthrough_ioadls(
        struct pmcraid_cmd *cmd,
        }
  
        /* setup the last descriptor */
 -      ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
 +      ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
  
        return 0;
  }
   * @direction: data transfer direction
   *
   * Return value
-  *  0 on sucess, non-zero error code on failure
+  *  0 on success, non-zero error code on failure
   */
  static void pmcraid_release_passthrough_ioadls(
        struct pmcraid_cmd *cmd,
   * @arg: pointer to pmcraid_passthrough_buffer user buffer
   *
   * Return value
-  *  0 on sucess, non-zero error code on failure
+  *  0 on success, non-zero error code on failure
   */
  static long pmcraid_ioctl_passthrough(
        struct pmcraid_instance *pinstance,
@@@ -5314,7 -5314,7 +5314,7 @@@ static void pmcraid_querycfg(struct pmc
                cpu_to_le32(sizeof(struct pmcraid_config_table));
  
        ioadl = &(ioarcb->add_data.u.ioadl[0]);
 -      ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
 +      ioadl->flags = IOADL_FLAGS_LAST_DESC;
        ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
        ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
  
@@@ -28,7 -28,6 +28,7 @@@
  #include <linux/errno.h>
  #include <linux/init.h>
  #include <linux/timer.h>
 +#include <linux/ktime.h>
  #include <linux/list.h>
  #include <linux/interrupt.h>
  #include <linux/usb.h>
@@@ -210,7 -209,7 +210,7 @@@ static int handshake_on_error_set_halt(
        if (error) {
                ehci_halt(ehci);
                ehci_to_hcd(ehci)->state = HC_STATE_HALT;
-               ehci_err(ehci, "force halt; handhake %p %08x %08x -> %d\n",
+               ehci_err(ehci, "force halt; handshake %p %08x %08x -> %d\n",
                        ptr, mask, done, error);
        }
  
@@@ -677,7 -676,6 +677,7 @@@ static int ehci_run (struct usb_hcd *hc
        ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
        msleep(5);
        up_write(&ehci_cf_port_reset_rwsem);
 +      ehci->last_periodic_enable = ktime_get_real();
  
        temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
        ehci_info (ehci,
@@@ -484,7 -484,6 +484,7 @@@ static int atmel_lcdfb_set_par(struct f
        unsigned long value;
        unsigned long clk_value_khz;
        unsigned long bits_per_line;
 +      unsigned long pix_factor = 2;
  
        might_sleep();
  
        /* Now, the LCDC core... */
  
        /* Set pixel clock */
 +      if (cpu_is_at91sam9g45() && !cpu_is_at91sam9g45es())
 +              pix_factor = 1;
 +
        clk_value_khz = clk_get_rate(sinfo->lcdc_clk) / 1000;
  
        value = DIV_ROUND_UP(clk_value_khz, PICOS2KHZ(info->var.pixclock));
  
 -      if (value < 2) {
 +      if (value < pix_factor) {
                dev_notice(info->device, "Bypassing pixel clock divider\n");
                lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1, ATMEL_LCDC_BYPASS);
        } else {
 -              value = (value / 2) - 1;
 +              value = (value / pix_factor) - 1;
                dev_dbg(info->device, "  * programming CLKVAL = 0x%08lx\n",
                                value);
                lcdc_writel(sinfo, ATMEL_LCDC_LCDCON1,
                                value << ATMEL_LCDC_CLKVAL_OFFSET);
 -              info->var.pixclock = KHZ2PICOS(clk_value_khz / (2 * (value + 1)));
 +              info->var.pixclock =
 +                      KHZ2PICOS(clk_value_khz / (pix_factor * (value + 1)));
                dev_dbg(info->device, "  updated pixclk:     %lu KHz\n",
                                        PICOS2KHZ(info->var.pixclock));
        }
@@@ -964,7 -959,7 +964,7 @@@ static int __init atmel_lcdfb_probe(str
        if (sinfo->atmel_lcdfb_power_control)
                sinfo->atmel_lcdfb_power_control(1);
  
-       dev_info(dev, "fb%d: Atmel LCDC at 0x%08lx (mapped at %p), irq %lu\n",
+       dev_info(dev, "fb%d: Atmel LCDC at 0x%08lx (mapped at %p), irq %d\n",
                       info->node, info->fix.mmio_start, sinfo->mmio, sinfo->irq_base);
  
        return 0;
diff --combined drivers/video/gbefb.c
@@@ -701,7 -701,7 +701,7 @@@ static int gbefb_set_par(struct fb_inf
           blocks of 512x128, 256x128 or 128x128 pixels, respectively for 8bit,
           16bit and 32 bit modes (64 kB). They cover the screen with partial
           tiles on the right and/or bottom of the screen if needed.
-          For exemple in 640x480 8 bit mode the mapping is:
+          For example in 640x480 8 bit mode the mapping is:
  
           <-------- 640 ----->
           <---- 512 ----><128|384 offscreen>
@@@ -1147,7 -1147,7 +1147,7 @@@ static int __init gbefb_probe(struct pl
        gbefb_setup(options);
  #endif
  
 -      if (!request_region(GBE_BASE, sizeof(struct sgi_gbe), "GBE")) {
 +      if (!request_mem_region(GBE_BASE, sizeof(struct sgi_gbe), "GBE")) {
                printk(KERN_ERR "gbefb: couldn't reserve mmio region\n");
                ret = -EBUSY;
                goto out_release_framebuffer;
diff --combined fs/btrfs/extent_map.c
@@@ -208,7 -208,7 +208,7 @@@ int unpin_extent_cache(struct extent_ma
        write_lock(&tree->lock);
        em = lookup_extent_mapping(tree, start, len);
  
 -      WARN_ON(em->start != start || !em);
 +      WARN_ON(!em || em->start != start);
  
        if (!em)
                goto out;
@@@ -256,7 -256,7 +256,7 @@@ out
   * Insert @em into @tree or perform a simple forward/backward merge with
   * existing mappings.  The extent_map struct passed in will be inserted
   * into the tree directly, with an additional reference taken, or a
-  * reference dropped if the merge attempt was sucessfull.
+  * reference dropped if the merge attempt was successfull.
   */
  int add_extent_mapping(struct extent_map_tree *tree,
                       struct extent_map *em)
diff --combined fs/cifs/inode.c
@@@ -512,10 -512,13 +512,10 @@@ int cifs_get_inode_info(struct inode **
                                        cifs_sb->local_nls,
                                        cifs_sb->mnt_cifs_flags &
                                                CIFS_MOUNT_MAP_SPECIAL_CHR);
 -                      if (rc1) {
 +                      if (rc1 || !fattr.cf_uniqueid) {
                                cFYI(1, ("GetSrvInodeNum rc %d", rc1));
                                fattr.cf_uniqueid = iunique(sb, ROOT_I);
 -                              /* disable serverino if call not supported */
 -                              if (rc1 == -EINVAL)
 -                                      cifs_sb->mnt_cifs_flags &=
 -                                                      ~CIFS_MOUNT_SERVER_INUM;
 +                              cifs_autodisable_serverino(cifs_sb);
                        }
                } else {
                        fattr.cf_uniqueid = iunique(sb, ROOT_I);
@@@ -914,8 -917,8 +914,8 @@@ undo_setattr
  /*
   * If dentry->d_inode is null (usually meaning the cached dentry
   * is a negative dentry) then we would attempt a standard SMB delete, but
-  * if that fails we can not attempt the fall back mechanisms on EACESS
-  * but will return the EACESS to the caller.  Note that the VFS does not call
+  * if that fails we can not attempt the fall back mechanisms on EACCESS
+  * but will return the EACCESS to the caller. Note that the VFS does not call
   * unlink on negative dentries currently.
   */
  int cifs_unlink(struct inode *dir, struct dentry *dentry)
diff --combined fs/compat_ioctl.c
@@@ -1800,7 -1800,7 +1800,7 @@@ struct space_resv_32 
  /* just account for different alignment */
  static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
  {
 -      struct space_resv_32    __user *p32 = (void __user *)arg;
 +      struct space_resv_32    __user *p32 = compat_ptr(arg);
        struct space_resv       __user *p = compat_alloc_user_space(sizeof(*p));
  
        if (copy_in_user(&p->l_type,    &p32->l_type,   sizeof(s16)) ||
@@@ -2655,7 -2655,7 +2655,7 @@@ COMPATIBLE_IOCTL(TIOCSLTC
  #endif
  #ifdef TIOCSTART
  /*
-  * For these two we have defintions in ioctls.h and/or termios.h on
+  * For these two we have definitions in ioctls.h and/or termios.h on
   * some architectures but no actual implemention.  Some applications
   * like bash call them if they are defined in the headers, so we provide
   * entries here to avoid syslog message spew.
@@@ -2802,7 -2802,7 +2802,7 @@@ asmlinkage long compat_sys_ioctl(unsign
  #else
        case FS_IOC_RESVSP:
        case FS_IOC_RESVSP64:
 -              error = ioctl_preallocate(filp, (void __user *)arg);
 +              error = ioctl_preallocate(filp, compat_ptr(arg));
                goto out_fput;
  #endif
  
diff --combined fs/ext3/inode.c
@@@ -699,9 -699,8 +699,9 @@@ static int ext3_splice_branch(handle_t 
        int err = 0;
        struct ext3_block_alloc_info *block_i;
        ext3_fsblk_t current_block;
 +      struct ext3_inode_info *ei = EXT3_I(inode);
  
 -      block_i = EXT3_I(inode)->i_block_alloc_info;
 +      block_i = ei->i_block_alloc_info;
        /*
         * If we're splicing into a [td]indirect block (as opposed to the
         * inode) then we need to get write access to the [td]indirect block
  
        inode->i_ctime = CURRENT_TIME_SEC;
        ext3_mark_inode_dirty(handle, inode);
 +      /* ext3_mark_inode_dirty already updated i_sync_tid */
 +      atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
  
        /* had we spliced it onto indirect block? */
        if (where->bh) {
@@@ -1738,7 -1735,6 +1738,7 @@@ static ssize_t ext3_direct_IO(int rw, s
        ssize_t ret;
        int orphan = 0;
        size_t count = iov_length(iov, nr_segs);
 +      int retries = 0;
  
        if (rw == WRITE) {
                loff_t final_size = offset + count;
                }
        }
  
 +retry:
        ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
                                 offset, nr_segs,
                                 ext3_get_block, NULL);
 +      if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
 +              goto retry;
  
        if (orphan) {
                int err;
@@@ -2033,7 -2026,7 +2033,7 @@@ static Indirect *ext3_find_shared(struc
        int k, err;
  
        *top = 0;
-       /* Make k index the deepest non-null offest + 1 */
+       /* Make k index the deepest non-null offset + 1 */
        for (k = depth; k > 1 && !offsets[k-1]; k--)
                ;
        partial = ext3_get_branch(inode, k, offsets, chain, &err);
@@@ -2757,8 -2750,6 +2757,8 @@@ struct inode *ext3_iget(struct super_bl
        struct ext3_inode_info *ei;
        struct buffer_head *bh;
        struct inode *inode;
 +      journal_t *journal = EXT3_SB(sb)->s_journal;
 +      transaction_t *transaction;
        long ret;
        int block;
  
                ei->i_data[block] = raw_inode->i_block[block];
        INIT_LIST_HEAD(&ei->i_orphan);
  
 +      /*
 +       * Set transaction id's of transactions that have to be committed
 +       * to finish f[data]sync. We set them to currently running transaction
 +       * as we cannot be sure that the inode or some of its metadata isn't
 +       * part of the transaction - the inode could have been reclaimed and
 +       * now it is reread from disk.
 +       */
 +      if (journal) {
 +              tid_t tid;
 +
 +              spin_lock(&journal->j_state_lock);
 +              if (journal->j_running_transaction)
 +                      transaction = journal->j_running_transaction;
 +              else
 +                      transaction = journal->j_committing_transaction;
 +              if (transaction)
 +                      tid = transaction->t_tid;
 +              else
 +                      tid = journal->j_commit_sequence;
 +              spin_unlock(&journal->j_state_lock);
 +              atomic_set(&ei->i_sync_tid, tid);
 +              atomic_set(&ei->i_datasync_tid, tid);
 +      }
 +
        if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
            EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
                /*
@@@ -3044,7 -3011,6 +3044,7 @@@ again
                err = rc;
        ei->i_state &= ~EXT3_STATE_NEW;
  
 +      atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
  out_brelse:
        brelse (bh);
        ext3_std_error(inode->i_sb, err);
diff --combined fs/ext4/inode.c
@@@ -193,7 -193,7 +193,7 @@@ static int try_to_extend_transaction(ha
   * so before we call here everything must be consistently dirtied against
   * this transaction.
   */
 - int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
 +int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
                                 int nblocks)
  {
        int ret;
        up_write(&EXT4_I(inode)->i_data_sem);
        ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
        down_write(&EXT4_I(inode)->i_data_sem);
 +      ext4_discard_preallocations(inode);
  
        return ret;
  }
@@@ -2933,7 -2932,7 +2933,7 @@@ retry
                ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
                                        &mpd);
                /*
-                * If we have a contigous extent of pages and we
+                * If we have a contiguous extent of pages and we
                 * haven't done the I/O yet, map the blocks and submit
                 * them for I/O.
                 */
@@@ -3446,6 -3445,8 +3446,6 @@@ out
        return ret;
  }
  
 -/* Maximum number of blocks we map for direct IO at once. */
 -
  static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock,
                   struct buffer_head *bh_result, int create)
  {
@@@ -3653,14 -3654,13 +3653,14 @@@ static void ext4_end_io_dio(struct kioc
          ext4_io_end_t *io_end = iocb->private;
        struct workqueue_struct *wq;
  
 +      /* if not async direct IO or dio with 0 bytes write, just return */
 +      if (!io_end || !size)
 +              return;
 +
        ext_debug("ext4_end_io_dio(): io_end 0x%p"
                  "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
                  iocb->private, io_end->inode->i_ino, iocb, offset,
                  size);
 -      /* if not async direct IO or dio with 0 bytes write, just return */
 -      if (!io_end || !size)
 -              return;
  
        /* if not aio dio with unwritten extents, just free io and return */
        if (io_end->flag != DIO_AIO_UNWRITTEN){
@@@ -3771,19 -3771,13 +3771,19 @@@ static ssize_t ext4_ext_direct_IO(int r
                if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
                        ext4_free_io_end(iocb->private);
                        iocb->private = NULL;
 -              } else if (ret > 0)
 +              } else if (ret > 0 && (EXT4_I(inode)->i_state &
 +                                     EXT4_STATE_DIO_UNWRITTEN)) {
 +                      int err;
                        /*
                         * for non AIO case, since the IO is already
                         * completed, we could do the convertion right here
                         */
 -                      ret = ext4_convert_unwritten_extents(inode,
 -                                                              offset, ret);
 +                      err = ext4_convert_unwritten_extents(inode,
 +                                                           offset, ret);
 +                      if (err < 0)
 +                              ret = err;
 +                      EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN;
 +              }
                return ret;
        }
  
@@@ -4064,7 -4058,7 +4064,7 @@@ static Indirect *ext4_find_shared(struc
        int k, err;
  
        *top = 0;
-       /* Make k index the deepest non-null offest + 1 */
+       /* Make k index the deepest non-null offset + 1 */
        for (k = depth; k > 1 && !offsets[k-1]; k--)
                ;
        partial = ext4_get_branch(inode, k, offsets, chain, &err);
@@@ -5376,7 -5370,7 +5376,7 @@@ static int ext4_index_trans_blocks(stru
   * worse case, the indexs blocks spread over different block groups
   *
   * If datablocks are discontiguous, they are possible to spread over
-  * different block groups too. If they are contiugous, with flexbg,
+  * different block groups too. If they are contiuguous, with flexbg,
   * they could still across block group boundary.
   *
   * Also account for superblock, inode, quota and xattr blocks
@@@ -5452,7 -5446,7 +5452,7 @@@ int ext4_writepage_trans_blocks(struct 
   * Calculate the journal credits for a chunk of data modification.
   *
   * This is called from DIO, fallocate or whoever calling
-  * ext4_get_blocks() to map/allocate a chunk of contigous disk blocks.
+  * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks.
   *
   * journal buffers for data blocks are not included here, as DIO
   * and fallocate do no need to journal data buffers.
diff --combined fs/ocfs2/refcounttree.c
@@@ -2431,7 -2431,7 +2431,7 @@@ out
   * we gonna touch and whether we need to create new blocks.
   *
   * Normally the refcount blocks store these refcount should be
-  * continguous also, so that we can get the number easily.
+  * contiguous also, so that we can get the number easily.
   * As for meta_ac, we will at most add split 2 refcount record and
   * 2 more refcount block, so just check it in a rough way.
   *
@@@ -3743,9 -3743,6 +3743,9 @@@ static int ocfs2_attach_refcount_tree(s
                goto out;
        }
  
 +      if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
 +              goto attach_xattr;
 +
        ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
  
        size = i_size_read(inode);
                cpos += num_clusters;
        }
  
 +attach_xattr:
        if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
                ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
                                                       &ref_tree->rf_ci,
        return ret;
  }
  
 +static int ocfs2_duplicate_inline_data(struct inode *s_inode,
 +                                     struct buffer_head *s_bh,
 +                                     struct inode *t_inode,
 +                                     struct buffer_head *t_bh)
 +{
 +      int ret;
 +      handle_t *handle;
 +      struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
 +      struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
 +      struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
 +
 +      BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
 +
 +      handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
 +      if (IS_ERR(handle)) {
 +              ret = PTR_ERR(handle);
 +              mlog_errno(ret);
 +              goto out;
 +      }
 +
 +      ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
 +                                    OCFS2_JOURNAL_ACCESS_WRITE);
 +      if (ret) {
 +              mlog_errno(ret);
 +              goto out_commit;
 +      }
 +
 +      t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
 +      memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
 +             le16_to_cpu(s_di->id2.i_data.id_count));
 +      spin_lock(&OCFS2_I(t_inode)->ip_lock);
 +      OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
 +      t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
 +      spin_unlock(&OCFS2_I(t_inode)->ip_lock);
 +
 +      ocfs2_journal_dirty(handle, t_bh);
 +
 +out_commit:
 +      ocfs2_commit_trans(osb, handle);
 +out:
 +      return ret;
 +}
 +
  static int ocfs2_duplicate_extent_list(struct inode *s_inode,
                                struct inode *t_inode,
                                struct buffer_head *t_bh,
@@@ -4044,14 -3997,6 +4044,14 @@@ static int ocfs2_create_reflink_node(st
                goto out;
        }
  
 +      if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
 +              ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
 +                                                t_inode, t_bh);
 +              if (ret)
 +                      mlog_errno(ret);
 +              goto out;
 +      }
 +
        ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
                                       1, &ref_tree, &ref_root_bh);
        if (ret) {
                goto out_unlock_refcount;
        }
  
 -      ret = ocfs2_complete_reflink(s_inode, s_bh, t_inode, t_bh, preserve);
 -      if (ret)
 -              mlog_errno(ret);
 -
  out_unlock_refcount:
        ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
        brelse(ref_root_bh);
@@@ -4119,17 -4068,9 +4119,17 @@@ static int __ocfs2_reflink(struct dentr
                ret = ocfs2_reflink_xattrs(inode, old_bh,
                                           new_inode, new_bh,
                                           preserve);
 -              if (ret)
 +              if (ret) {
                        mlog_errno(ret);
 +                      goto inode_unlock;
 +              }
        }
 +
 +      ret = ocfs2_complete_reflink(inode, old_bh,
 +                                   new_inode, new_bh, preserve);
 +      if (ret)
 +              mlog_errno(ret);
 +
  inode_unlock:
        ocfs2_inode_unlock(new_inode, 1);
        brelse(new_bh);
@@@ -893,6 -893,7 +893,6 @@@ struct sctp_transport 
         */
        /* RTO         : The current retransmission timeout value.  */
        unsigned long rto;
 -      unsigned long last_rto;
  
        __u32 rtt;              /* This is the most recent RTT.  */
  
        int init_sent_count;
  
        /* state       : The current state of this destination,
-        *             : i.e. SCTP_ACTIVE, SCTP_INACTIVE, SCTP_UNKOWN.
+        *             : i.e. SCTP_ACTIVE, SCTP_INACTIVE, SCTP_UNKNOWN.
         */
        int state;
  
@@@ -1979,7 -1980,7 +1979,7 @@@ void sctp_assoc_set_primary(struct sctp
  void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
                                    struct sctp_transport *);
  int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *,
 -                                   gfp_t);
 +                                   sctp_scope_t, gfp_t);
  int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *,
                                         struct sctp_cookie*,
                                         gfp_t gfp);
diff --combined kernel/irq/spurious.c
@@@ -104,7 -104,7 +104,7 @@@ static int misrouted_irq(int irq
        return ok;
  }
  
 -static void poll_all_shared_irqs(void)
 +static void poll_spurious_irqs(unsigned long dummy)
  {
        struct irq_desc *desc;
        int i;
                if (!(status & IRQ_SPURIOUS_DISABLED))
                        continue;
  
 +              local_irq_disable();
                try_one_irq(i, desc);
 +              local_irq_enable();
        }
 -}
 -
 -static void poll_spurious_irqs(unsigned long dummy)
 -{
 -      poll_all_shared_irqs();
  
        mod_timer(&poll_spurious_irq_timer,
                  jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
  }
  
 -#ifdef CONFIG_DEBUG_SHIRQ
 -void debug_poll_all_shared_irqs(void)
 -{
 -      poll_all_shared_irqs();
 -}
 -#endif
 -
  /*
   * If 99,900 of the previous 100,000 interrupts have not been handled
   * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@@ -220,7 -230,7 +220,7 @@@ void note_interrupt(unsigned int irq, s
                /*
                 * If we are seeing only the odd spurious IRQ caused by
                 * bus asynchronicity then don't eventually trigger an error,
-                * otherwise the couter becomes a doomsday timer for otherwise
+                * otherwise the counter becomes a doomsday timer for otherwise
                 * working systems
                 */
                if (time_after(jiffies, desc->last_unhandled + HZ/10))
diff --combined kernel/perf_event.c
@@@ -28,8 -28,6 +28,8 @@@
  #include <linux/anon_inodes.h>
  #include <linux/kernel_stat.h>
  #include <linux/perf_event.h>
 +#include <linux/ftrace_event.h>
 +#include <linux/hw_breakpoint.h>
  
  #include <asm/irq_regs.h>
  
@@@ -246,49 -244,6 +246,49 @@@ static void perf_unpin_context(struct p
        put_ctx(ctx);
  }
  
 +static inline u64 perf_clock(void)
 +{
 +      return cpu_clock(smp_processor_id());
 +}
 +
 +/*
 + * Update the record of the current time in a context.
 + */
 +static void update_context_time(struct perf_event_context *ctx)
 +{
 +      u64 now = perf_clock();
 +
 +      ctx->time += now - ctx->timestamp;
 +      ctx->timestamp = now;
 +}
 +
 +/*
 + * Update the total_time_enabled and total_time_running fields for a event.
 + */
 +static void update_event_times(struct perf_event *event)
 +{
 +      struct perf_event_context *ctx = event->ctx;
 +      u64 run_end;
 +
 +      if (event->state < PERF_EVENT_STATE_INACTIVE ||
 +          event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
 +              return;
 +
 +      if (ctx->is_active)
 +              run_end = ctx->time;
 +      else
 +              run_end = event->tstamp_stopped;
 +
 +      event->total_time_enabled = run_end - event->tstamp_enabled;
 +
 +      if (event->state == PERF_EVENT_STATE_INACTIVE)
 +              run_end = event->tstamp_stopped;
 +      else
 +              run_end = ctx->time;
 +
 +      event->total_time_running = run_end - event->tstamp_running;
 +}
 +
  /*
   * Add a event from the lists for its context.
   * Must be called with ctx->mutex and ctx->lock held.
@@@ -337,18 -292,6 +337,18 @@@ list_del_event(struct perf_event *event
        if (event->group_leader != event)
                event->group_leader->nr_siblings--;
  
 +      update_event_times(event);
 +
 +      /*
 +       * If event was in error state, then keep it
 +       * that way, otherwise bogus counts will be
 +       * returned on read(). The only way to get out
 +       * of error state is by explicit re-enabling
 +       * of the event
 +       */
 +      if (event->state > PERF_EVENT_STATE_OFF)
 +              event->state = PERF_EVENT_STATE_OFF;
 +
        /*
         * If this was a group event with sibling events then
         * upgrade the siblings to singleton events by adding them
@@@ -476,7 -419,7 +476,7 @@@ static void perf_event_remove_from_cont
        if (!task) {
                /*
                 * Per cpu events are removed via an smp call and
-                * the removal is always sucessful.
+                * the removal is always successful.
                 */
                smp_call_function_single(event->cpu,
                                         __perf_event_remove_from_context,
@@@ -502,11 -445,50 +502,11 @@@ retry
         * can remove the event safely, if the call above did not
         * succeed.
         */
 -      if (!list_empty(&event->group_entry)) {
 +      if (!list_empty(&event->group_entry))
                list_del_event(event, ctx);
        spin_unlock_irq(&ctx->lock);
  }
  
 -static inline u64 perf_clock(void)
 -{
 -      return cpu_clock(smp_processor_id());
 -}
 -
 -/*
 - * Update the record of the current time in a context.
 - */
 -static void update_context_time(struct perf_event_context *ctx)
 -{
 -      u64 now = perf_clock();
 -
 -      ctx->time += now - ctx->timestamp;
 -      ctx->timestamp = now;
 -}
 -
 -/*
 - * Update the total_time_enabled and total_time_running fields for a event.
 - */
 -static void update_event_times(struct perf_event *event)
 -{
 -      struct perf_event_context *ctx = event->ctx;
 -      u64 run_end;
 -
 -      if (event->state < PERF_EVENT_STATE_INACTIVE ||
 -          event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
 -              return;
 -
 -      event->total_time_enabled = ctx->time - event->tstamp_enabled;
 -
 -      if (event->state == PERF_EVENT_STATE_INACTIVE)
 -              run_end = event->tstamp_stopped;
 -      else
 -              run_end = ctx->time;
 -
 -      event->total_time_running = run_end - event->tstamp_running;
 -}
 -
  /*
   * Update total_time_enabled and total_time_running for all events in a group.
   */
@@@ -845,7 -827,7 +845,7 @@@ perf_install_in_context(struct perf_eve
        if (!task) {
                /*
                 * Per cpu events are installed via an smp call and
-                * the install is always sucessful.
+                * the install is always successful.
                 */
                smp_call_function_single(cpu, __perf_install_in_context,
                                         event, 1);
@@@ -1049,10 -1031,10 +1049,10 @@@ void __perf_event_sched_out(struct perf
        update_context_time(ctx);
  
        perf_disable();
 -      if (ctx->nr_active)
 +      if (ctx->nr_active) {
                list_for_each_entry(event, &ctx->group_list, group_entry)
                        group_sched_out(event, cpuctx, ctx);
 -
 +      }
        perf_enable();
   out:
        spin_unlock(&ctx->lock);
@@@ -1077,6 -1059,8 +1077,6 @@@ static int context_equiv(struct perf_ev
                && !ctx1->pin_count && !ctx2->pin_count;
  }
  
 -static void __perf_event_read(void *event);
 -
  static void __perf_event_sync_stat(struct perf_event *event,
                                     struct perf_event *next_event)
  {
         */
        switch (event->state) {
        case PERF_EVENT_STATE_ACTIVE:
 -              __perf_event_read(event);
 -              break;
 +              event->pmu->read(event);
 +              /* fall-through */
  
        case PERF_EVENT_STATE_INACTIVE:
                update_event_times(event);
@@@ -1134,8 -1118,6 +1134,8 @@@ static void perf_event_sync_stat(struc
        if (!ctx->nr_stat)
                return;
  
 +      update_context_time(ctx);
 +
        event = list_first_entry(&ctx->event_list,
                                   struct perf_event, event_entry);
  
@@@ -1179,6 -1161,8 +1179,6 @@@ void perf_event_task_sched_out(struct t
        if (likely(!ctx || !cpuctx->task_ctx))
                return;
  
 -      update_context_time(ctx);
 -
        rcu_read_lock();
        parent = rcu_dereference(ctx->parent_ctx);
        next_ctx = next->perf_event_ctxp;
@@@ -1531,6 -1515,7 +1531,6 @@@ static void __perf_event_read(void *inf
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_event *event = info;
        struct perf_event_context *ctx = event->ctx;
 -      unsigned long flags;
  
        /*
         * If this is a task context, we need to check whether it is
        if (ctx->task && cpuctx->task_ctx != ctx)
                return;
  
 -      local_irq_save(flags);
 -      if (ctx->is_active)
 -              update_context_time(ctx);
 -      event->pmu->read(event);
 +      spin_lock(&ctx->lock);
 +      update_context_time(ctx);
        update_event_times(event);
 -      local_irq_restore(flags);
 +      spin_unlock(&ctx->lock);
 +
 +      event->pmu->read(event);
  }
  
  static u64 perf_event_read(struct perf_event *event)
                smp_call_function_single(event->oncpu,
                                         __perf_event_read, event, 1);
        } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
 +              struct perf_event_context *ctx = event->ctx;
 +              unsigned long flags;
 +
 +              spin_lock_irqsave(&ctx->lock, flags);
 +              update_context_time(ctx);
                update_event_times(event);
 +              spin_unlock_irqrestore(&ctx->lock, flags);
        }
  
        return atomic64_read(&event->count);
@@@ -1679,8 -1658,6 +1679,8 @@@ static struct perf_event_context *find_
        return ERR_PTR(err);
  }
  
 +static void perf_event_free_filter(struct perf_event *event);
 +
  static void free_event_rcu(struct rcu_head *head)
  {
        struct perf_event *event;
        event = container_of(head, struct perf_event, rcu_head);
        if (event->ns)
                put_pid_ns(event->ns);
 +      perf_event_free_filter(event);
        kfree(event);
  }
  
@@@ -1720,10 -1696,16 +1720,10 @@@ static void free_event(struct perf_even
        call_rcu(&event->rcu_head, free_event_rcu);
  }
  
 -/*
 - * Called when the last reference to the file is gone.
 - */
 -static int perf_release(struct inode *inode, struct file *file)
 +int perf_event_release_kernel(struct perf_event *event)
  {
 -      struct perf_event *event = file->private_data;
        struct perf_event_context *ctx = event->ctx;
  
 -      file->private_data = NULL;
 -
        WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
        perf_event_remove_from_context(event);
  
        return 0;
  }
 +EXPORT_SYMBOL_GPL(perf_event_release_kernel);
 +
 +/*
 + * Called when the last reference to the file is gone.
 + */
 +static int perf_release(struct inode *inode, struct file *file)
 +{
 +      struct perf_event *event = file->private_data;
 +
 +      file->private_data = NULL;
 +
 +      return perf_event_release_kernel(event);
 +}
  
  static int perf_event_read_size(struct perf_event *event)
  {
        return size;
  }
  
 -static u64 perf_event_read_value(struct perf_event *event)
 +u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
  {
        struct perf_event *child;
        u64 total = 0;
  
 +      *enabled = 0;
 +      *running = 0;
 +
 +      mutex_lock(&event->child_mutex);
        total += perf_event_read(event);
 -      list_for_each_entry(child, &event->child_list, child_list)
 +      *enabled += event->total_time_enabled +
 +                      atomic64_read(&event->child_total_time_enabled);
 +      *running += event->total_time_running +
 +                      atomic64_read(&event->child_total_time_running);
 +
 +      list_for_each_entry(child, &event->child_list, child_list) {
                total += perf_event_read(child);
 +              *enabled += child->total_time_enabled;
 +              *running += child->total_time_running;
 +      }
 +      mutex_unlock(&event->child_mutex);
  
        return total;
  }
 -
 -static int perf_event_read_entry(struct perf_event *event,
 -                                 u64 read_format, char __user *buf)
 -{
 -      int n = 0, count = 0;
 -      u64 values[2];
 -
 -      values[n++] = perf_event_read_value(event);
 -      if (read_format & PERF_FORMAT_ID)
 -              values[n++] = primary_event_id(event);
 -
 -      count = n * sizeof(u64);
 -
 -      if (copy_to_user(buf, values, count))
 -              return -EFAULT;
 -
 -      return count;
 -}
 +EXPORT_SYMBOL_GPL(perf_event_read_value);
  
  static int perf_event_read_group(struct perf_event *event,
                                   u64 read_format, char __user *buf)
  {
        struct perf_event *leader = event->group_leader, *sub;
 -      int n = 0, size = 0, err = -EFAULT;
 -      u64 values[3];
 +      int n = 0, size = 0, ret = -EFAULT;
 +      struct perf_event_context *ctx = leader->ctx;
 +      u64 values[5];
 +      u64 count, enabled, running;
 +
 +      mutex_lock(&ctx->mutex);
 +      count = perf_event_read_value(leader, &enabled, &running);
  
        values[n++] = 1 + leader->nr_siblings;
 -      if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
 -              values[n++] = leader->total_time_enabled +
 -                      atomic64_read(&leader->child_total_time_enabled);
 -      }
 -      if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
 -              values[n++] = leader->total_time_running +
 -                      atomic64_read(&leader->child_total_time_running);
 -      }
 +      if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 +              values[n++] = enabled;
 +      if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 +              values[n++] = running;
 +      values[n++] = count;
 +      if (read_format & PERF_FORMAT_ID)
 +              values[n++] = primary_event_id(leader);
  
        size = n * sizeof(u64);
  
        if (copy_to_user(buf, values, size))
 -              return -EFAULT;
 -
 -      err = perf_event_read_entry(leader, read_format, buf + size);
 -      if (err < 0)
 -              return err;
 +              goto unlock;
  
 -      size += err;
 +      ret = size;
  
        list_for_each_entry(sub, &leader->sibling_list, group_entry) {
 -              err = perf_event_read_entry(sub, read_format,
 -                              buf + size);
 -              if (err < 0)
 -                      return err;
 +              n = 0;
 +
 +              values[n++] = perf_event_read_value(sub, &enabled, &running);
 +              if (read_format & PERF_FORMAT_ID)
 +                      values[n++] = primary_event_id(sub);
 +
 +              size = n * sizeof(u64);
  
 -              size += err;
 +              if (copy_to_user(buf + ret, values, size)) {
 +                      ret = -EFAULT;
 +                      goto unlock;
 +              }
 +
 +              ret += size;
        }
 +unlock:
 +      mutex_unlock(&ctx->mutex);
  
 -      return size;
 +      return ret;
  }
  
  static int perf_event_read_one(struct perf_event *event,
                                 u64 read_format, char __user *buf)
  {
 +      u64 enabled, running;
        u64 values[4];
        int n = 0;
  
 -      values[n++] = perf_event_read_value(event);
 -      if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
 -              values[n++] = event->total_time_enabled +
 -                      atomic64_read(&event->child_total_time_enabled);
 -      }
 -      if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
 -              values[n++] = event->total_time_running +
 -                      atomic64_read(&event->child_total_time_running);
 -      }
 +      values[n++] = perf_event_read_value(event, &enabled, &running);
 +      if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 +              values[n++] = enabled;
 +      if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 +              values[n++] = running;
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(event);
  
@@@ -1895,10 -1861,12 +1895,10 @@@ perf_read_hw(struct perf_event *event, 
                return -ENOSPC;
  
        WARN_ON_ONCE(event->ctx->parent_ctx);
 -      mutex_lock(&event->child_mutex);
        if (read_format & PERF_FORMAT_GROUP)
                ret = perf_event_read_group(event, read_format, buf);
        else
                ret = perf_event_read_one(event, read_format, buf);
 -      mutex_unlock(&event->child_mutex);
  
        return ret;
  }
@@@ -2006,8 -1974,7 +2006,8 @@@ unlock
        return ret;
  }
  
 -int perf_event_set_output(struct perf_event *event, int output_fd);
 +static int perf_event_set_output(struct perf_event *event, int output_fd);
 +static int perf_event_set_filter(struct perf_event *event, void __user *arg);
  
  static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  {
        case PERF_EVENT_IOC_SET_OUTPUT:
                return perf_event_set_output(event, arg);
  
 +      case PERF_EVENT_IOC_SET_FILTER:
 +              return perf_event_set_filter(event, (void __user *)arg);
 +
        default:
                return -ENOTTY;
        }
@@@ -2210,7 -2174,6 +2210,7 @@@ static void perf_mmap_data_free(struct 
        perf_mmap_free_page((unsigned long)data->user_page);
        for (i = 0; i < data->nr_pages; i++)
                perf_mmap_free_page((unsigned long)data->data_pages[i]);
 +      kfree(data);
  }
  
  #else
@@@ -2251,7 -2214,6 +2251,7 @@@ static void perf_mmap_data_free_work(st
                perf_mmap_unmark_page(base + (i * PAGE_SIZE));
  
        vfree(base);
 +      kfree(data);
  }
  
  static void perf_mmap_data_free(struct perf_mmap_data *data)
@@@ -2345,7 -2307,7 +2345,7 @@@ perf_mmap_data_init(struct perf_event *
        }
  
        if (!data->watermark)
 -              data->watermark = max_t(long, PAGE_SIZE, max_size / 2);
 +              data->watermark = max_size / 2;
  
  
        rcu_assign_pointer(event->data, data);
@@@ -2357,6 -2319,7 +2357,6 @@@ static void perf_mmap_data_free_rcu(str
  
        data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
        perf_mmap_data_free(data);
 -      kfree(data);
  }
  
  static void perf_mmap_data_release(struct perf_event *event)
@@@ -2703,21 -2666,20 +2703,21 @@@ static void perf_output_wakeup(struct p
  static void perf_output_lock(struct perf_output_handle *handle)
  {
        struct perf_mmap_data *data = handle->data;
 -      int cpu;
 +      int cur, cpu = get_cpu();
  
        handle->locked = 0;
  
 -      local_irq_save(handle->flags);
 -      cpu = smp_processor_id();
 -
 -      if (in_nmi() && atomic_read(&data->lock) == cpu)
 -              return;
 +      for (;;) {
 +              cur = atomic_cmpxchg(&data->lock, -1, cpu);
 +              if (cur == -1) {
 +                      handle->locked = 1;
 +                      break;
 +              }
 +              if (cur == cpu)
 +                      break;
  
 -      while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
                cpu_relax();
 -
 -      handle->locked = 1;
 +      }
  }
  
  static void perf_output_unlock(struct perf_output_handle *handle)
@@@ -2763,7 -2725,7 +2763,7 @@@ again
        if (atomic_xchg(&data->wakeup, 0))
                perf_output_wakeup(handle);
  out:
 -      local_irq_restore(handle->flags);
 +      put_cpu();
  }
  
  void perf_output_copy(struct perf_output_handle *handle,
@@@ -3274,10 -3236,15 +3274,10 @@@ static void perf_event_task_ctx(struct 
  {
        struct perf_event *event;
  
 -      if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
 -              return;
 -
 -      rcu_read_lock();
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
                if (perf_event_task_match(event))
                        perf_event_task_output(event, task_event);
        }
 -      rcu_read_unlock();
  }
  
  static void perf_event_task_event(struct perf_task_event *task_event)
        struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx = task_event->task_ctx;
  
 +      rcu_read_lock();
        cpuctx = &get_cpu_var(perf_cpu_context);
        perf_event_task_ctx(&cpuctx->ctx, task_event);
        put_cpu_var(perf_cpu_context);
  
 -      rcu_read_lock();
        if (!ctx)
                ctx = rcu_dereference(task_event->task->perf_event_ctxp);
        if (ctx)
@@@ -3381,10 -3348,15 +3381,10 @@@ static void perf_event_comm_ctx(struct 
  {
        struct perf_event *event;
  
 -      if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
 -              return;
 -
 -      rcu_read_lock();
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
                if (perf_event_comm_match(event))
                        perf_event_comm_output(event, comm_event);
        }
 -      rcu_read_unlock();
  }
  
  static void perf_event_comm_event(struct perf_comm_event *comm_event)
        char comm[TASK_COMM_LEN];
  
        memset(comm, 0, sizeof(comm));
 -      strncpy(comm, comm_event->task->comm, sizeof(comm));
 +      strlcpy(comm, comm_event->task->comm, sizeof(comm));
        size = ALIGN(strlen(comm)+1, sizeof(u64));
  
        comm_event->comm = comm;
  
        comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
  
 +      rcu_read_lock();
        cpuctx = &get_cpu_var(perf_cpu_context);
        perf_event_comm_ctx(&cpuctx->ctx, comm_event);
        put_cpu_var(perf_cpu_context);
  
 -      rcu_read_lock();
        /*
         * doesn't really matter which of the child contexts the
         * events ends up in.
@@@ -3500,10 -3472,15 +3500,10 @@@ static void perf_event_mmap_ctx(struct 
  {
        struct perf_event *event;
  
 -      if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
 -              return;
 -
 -      rcu_read_lock();
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
                if (perf_event_mmap_match(event, mmap_event))
                        perf_event_mmap_output(event, mmap_event);
        }
 -      rcu_read_unlock();
  }
  
  static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
@@@ -3559,11 -3536,11 +3559,11 @@@ got_name
  
        mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
  
 +      rcu_read_lock();
        cpuctx = &get_cpu_var(perf_cpu_context);
        perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
        put_cpu_var(perf_cpu_context);
  
 -      rcu_read_lock();
        /*
         * doesn't really matter which of the child contexts the
         * events ends up in.
@@@ -3702,11 -3679,7 +3702,11 @@@ static int __perf_event_overflow(struc
                        perf_event_disable(event);
        }
  
 -      perf_event_output(event, nmi, data, regs);
 +      if (event->overflow_handler)
 +              event->overflow_handler(event, nmi, data, regs);
 +      else
 +              perf_event_output(event, nmi, data, regs);
 +
        return ret;
  }
  
@@@ -3751,16 -3724,16 +3751,16 @@@ again
        return nr;
  }
  
 -static void perf_swevent_overflow(struct perf_event *event,
 +static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
                                    int nmi, struct perf_sample_data *data,
                                    struct pt_regs *regs)
  {
        struct hw_perf_event *hwc = &event->hw;
        int throttle = 0;
 -      u64 overflow;
  
        data->period = event->hw.last_period;
 -      overflow = perf_swevent_set_period(event);
 +      if (!overflow)
 +              overflow = perf_swevent_set_period(event);
  
        if (hwc->interrupts == MAX_INTERRUPTS)
                return;
@@@ -3793,19 -3766,14 +3793,19 @@@ static void perf_swevent_add(struct per
  
        atomic64_add(nr, &event->count);
  
 +      if (!regs)
 +              return;
 +
        if (!hwc->sample_period)
                return;
  
 -      if (!regs)
 +      if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
 +              return perf_swevent_overflow(event, 1, nmi, data, regs);
 +
 +      if (atomic64_add_negative(nr, &hwc->period_left))
                return;
  
 -      if (!atomic64_add_negative(nr, &hwc->period_left))
 -              perf_swevent_overflow(event, nmi, data, regs);
 +      perf_swevent_overflow(event, 0, nmi, data, regs);
  }
  
  static int perf_swevent_is_counting(struct perf_event *event)
        return 1;
  }
  
 +static int perf_tp_event_match(struct perf_event *event,
 +                              struct perf_sample_data *data);
 +
 +static int perf_exclude_event(struct perf_event *event,
 +                            struct pt_regs *regs)
 +{
 +      if (regs) {
 +              if (event->attr.exclude_user && user_mode(regs))
 +                      return 1;
 +
 +              if (event->attr.exclude_kernel && !user_mode(regs))
 +                      return 1;
 +      }
 +
 +      return 0;
 +}
 +
  static int perf_swevent_match(struct perf_event *event,
                                enum perf_type_id type,
 -                              u32 event_id, struct pt_regs *regs)
 +                              u32 event_id,
 +                              struct perf_sample_data *data,
 +                              struct pt_regs *regs)
  {
        if (!perf_swevent_is_counting(event))
                return 0;
  
        if (event->attr.type != type)
                return 0;
 +
        if (event->attr.config != event_id)
                return 0;
  
 -      if (regs) {
 -              if (event->attr.exclude_user && user_mode(regs))
 -                      return 0;
 +      if (perf_exclude_event(event, regs))
 +              return 0;
  
 -              if (event->attr.exclude_kernel && !user_mode(regs))
 -                      return 0;
 -      }
 +      if (event->attr.type == PERF_TYPE_TRACEPOINT &&
 +          !perf_tp_event_match(event, data))
 +              return 0;
  
        return 1;
  }
@@@ -3888,59 -3837,49 +3888,59 @@@ static void perf_swevent_ctx_event(stru
  {
        struct perf_event *event;
  
 -      if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
 -              return;
 -
 -      rcu_read_lock();
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
 -              if (perf_swevent_match(event, type, event_id, regs))
 +              if (perf_swevent_match(event, type, event_id, data, regs))
                        perf_swevent_add(event, nr, nmi, data, regs);
        }
 -      rcu_read_unlock();
  }
  
 -static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx)
 +int perf_swevent_get_recursion_context(void)
  {
 +      struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
 +      int rctx;
 +
        if (in_nmi())
 -              return &cpuctx->recursion[3];
 +              rctx = 3;
 +      else if (in_irq())
 +              rctx = 2;
 +      else if (in_softirq())
 +              rctx = 1;
 +      else
 +              rctx = 0;
 +
 +      if (cpuctx->recursion[rctx]) {
 +              put_cpu_var(perf_cpu_context);
 +              return -1;
 +      }
  
 -      if (in_irq())
 -              return &cpuctx->recursion[2];
 +      cpuctx->recursion[rctx]++;
 +      barrier();
  
 -      if (in_softirq())
 -              return &cpuctx->recursion[1];
 +      return rctx;
 +}
 +EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
  
 -      return &cpuctx->recursion[0];
 +void perf_swevent_put_recursion_context(int rctx)
 +{
 +      struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
 +      barrier();
 +      cpuctx->recursion[rctx]--;
 +      put_cpu_var(perf_cpu_context);
  }
 +EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
  
  static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
                                    u64 nr, int nmi,
                                    struct perf_sample_data *data,
                                    struct pt_regs *regs)
  {
 -      struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
 -      int *recursion = perf_swevent_recursion_context(cpuctx);
 +      struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx;
  
 -      if (*recursion)
 -              goto out;
 -
 -      (*recursion)++;
 -      barrier();
 -
 +      cpuctx = &__get_cpu_var(perf_cpu_context);
 +      rcu_read_lock();
        perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
                                 nr, nmi, data, regs);
 -      rcu_read_lock();
        /*
         * doesn't really matter which of the child contexts the
         * events ends up in.
        if (ctx)
                perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
        rcu_read_unlock();
 -
 -      barrier();
 -      (*recursion)--;
 -
 -out:
 -      put_cpu_var(perf_cpu_context);
  }
  
  void __perf_sw_event(u32 event_id, u64 nr, int nmi,
                            struct pt_regs *regs, u64 addr)
  {
 -      struct perf_sample_data data = {
 -              .addr = addr,
 -      };
 +      struct perf_sample_data data;
 +      int rctx;
  
 -      do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi,
 -                              &data, regs);
 +      rctx = perf_swevent_get_recursion_context();
 +      if (rctx < 0)
 +              return;
 +
 +      data.addr = addr;
 +      data.raw  = NULL;
 +
 +      do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
 +
 +      perf_swevent_put_recursion_context(rctx);
  }
  
  static void perf_swevent_read(struct perf_event *event)
@@@ -4011,7 -3949,6 +4011,7 @@@ static enum hrtimer_restart perf_sweven
        event->pmu->read(event);
  
        data.addr = 0;
 +      data.period = event->hw.last_period;
        regs = get_irq_regs();
        /*
         * In case we exclude kernel IPs or are somehow not in interrupt
@@@ -4171,7 -4108,6 +4171,7 @@@ static const struct pmu perf_ops_task_c
  };
  
  #ifdef CONFIG_EVENT_PROFILE
 +
  void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
                          int entry_size)
  {
        if (!regs)
                regs = task_pt_regs(current);
  
 +      /* Trace events already protected against recursion */
        do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
                                &data, regs);
  }
  EXPORT_SYMBOL_GPL(perf_tp_event);
  
 -extern int ftrace_profile_enable(int);
 -extern void ftrace_profile_disable(int);
 +static int perf_tp_event_match(struct perf_event *event,
 +                              struct perf_sample_data *data)
 +{
 +      void *record = data->raw->data;
 +
 +      if (likely(!event->filter) || filter_match_preds(event->filter, record))
 +              return 1;
 +      return 0;
 +}
  
  static void tp_perf_event_destroy(struct perf_event *event)
  {
@@@ -4229,99 -4157,11 +4229,99 @@@ static const struct pmu *tp_perf_event_
  
        return &perf_ops_generic;
  }
 +
 +static int perf_event_set_filter(struct perf_event *event, void __user *arg)
 +{
 +      char *filter_str;
 +      int ret;
 +
 +      if (event->attr.type != PERF_TYPE_TRACEPOINT)
 +              return -EINVAL;
 +
 +      filter_str = strndup_user(arg, PAGE_SIZE);
 +      if (IS_ERR(filter_str))
 +              return PTR_ERR(filter_str);
 +
 +      ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
 +
 +      kfree(filter_str);
 +      return ret;
 +}
 +
 +static void perf_event_free_filter(struct perf_event *event)
 +{
 +      ftrace_profile_free_filter(event);
 +}
 +
  #else
 +
 +static int perf_tp_event_match(struct perf_event *event,
 +                              struct perf_sample_data *data)
 +{
 +      return 1;
 +}
 +
  static const struct pmu *tp_perf_event_init(struct perf_event *event)
  {
        return NULL;
  }
 +
 +static int perf_event_set_filter(struct perf_event *event, void __user *arg)
 +{
 +      return -ENOENT;
 +}
 +
 +static void perf_event_free_filter(struct perf_event *event)
 +{
 +}
 +
 +#endif /* CONFIG_EVENT_PROFILE */
 +
 +#ifdef CONFIG_HAVE_HW_BREAKPOINT
 +static void bp_perf_event_destroy(struct perf_event *event)
 +{
 +      release_bp_slot(event);
 +}
 +
 +static const struct pmu *bp_perf_event_init(struct perf_event *bp)
 +{
 +      int err;
 +      /*
 +       * The breakpoint is already filled if we haven't created the counter
 +       * through perf syscall
 +       * FIXME: manage to get trigerred to NULL if it comes from syscalls
 +       */
 +      if (!bp->callback)
 +              err = register_perf_hw_breakpoint(bp);
 +      else
 +              err = __register_perf_hw_breakpoint(bp);
 +      if (err)
 +              return ERR_PTR(err);
 +
 +      bp->destroy = bp_perf_event_destroy;
 +
 +      return &perf_ops_bp;
 +}
 +
 +void perf_bp_event(struct perf_event *bp, void *data)
 +{
 +      struct perf_sample_data sample;
 +      struct pt_regs *regs = data;
 +
 +      sample.addr = bp->attr.bp_addr;
 +
 +      if (!perf_exclude_event(bp, regs))
 +              perf_swevent_add(bp, 1, 1, &sample, regs);
 +}
 +#else
 +static const struct pmu *bp_perf_event_init(struct perf_event *bp)
 +{
 +      return NULL;
 +}
 +
 +void perf_bp_event(struct perf_event *bp, void *regs)
 +{
 +}
  #endif
  
  atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
@@@ -4368,8 -4208,6 +4368,8 @@@ static const struct pmu *sw_perf_event_
        case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
        case PERF_COUNT_SW_CONTEXT_SWITCHES:
        case PERF_COUNT_SW_CPU_MIGRATIONS:
 +      case PERF_COUNT_SW_ALIGNMENT_FAULTS:
 +      case PERF_COUNT_SW_EMULATION_FAULTS:
                if (!event->parent) {
                        atomic_inc(&perf_swevent_enabled[event_id]);
                        event->destroy = sw_perf_event_destroy;
@@@ -4390,7 -4228,6 +4390,7 @@@ perf_event_alloc(struct perf_event_att
                   struct perf_event_context *ctx,
                   struct perf_event *group_leader,
                   struct perf_event *parent_event,
 +                 perf_callback_t callback,
                   gfp_t gfpflags)
  {
        const struct pmu *pmu;
  
        event->state            = PERF_EVENT_STATE_INACTIVE;
  
 +      if (!callback && parent_event)
 +              callback = parent_event->callback;
 +      
 +      event->callback = callback;
 +
        if (attr->disabled)
                event->state = PERF_EVENT_STATE_OFF;
  
                pmu = tp_perf_event_init(event);
                break;
  
 +      case PERF_TYPE_BREAKPOINT:
 +              pmu = bp_perf_event_init(event);
 +              break;
 +
 +
        default:
                break;
        }
@@@ -4589,7 -4416,7 +4589,7 @@@ err_size
        goto out;
  }
  
 -int perf_event_set_output(struct perf_event *event, int output_fd)
 +static int perf_event_set_output(struct perf_event *event, int output_fd)
  {
        struct perf_event *output_event = NULL;
        struct file *output_file = NULL;
@@@ -4719,7 -4546,7 +4719,7 @@@ SYSCALL_DEFINE5(perf_event_open
        }
  
        event = perf_event_alloc(&attr, cpu, ctx, group_leader,
 -                                   NULL, GFP_KERNEL);
 +                                   NULL, NULL, GFP_KERNEL);
        err = PTR_ERR(event);
        if (IS_ERR(event))
                goto err_put_context;
@@@ -4767,60 -4594,6 +4767,60 @@@ err_put_context
        return err;
  }
  
 +/**
 + * perf_event_create_kernel_counter
 + *
 + * @attr: attributes of the counter to create
 + * @cpu: cpu in which the counter is bound
 + * @pid: task to profile
 + */
 +struct perf_event *
 +perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
 +                               pid_t pid, perf_callback_t callback)
 +{
 +      struct perf_event *event;
 +      struct perf_event_context *ctx;
 +      int err;
 +
 +      /*
 +       * Get the target context (task or percpu):
 +       */
 +
 +      ctx = find_get_context(pid, cpu);
 +      if (IS_ERR(ctx)) {
 +              err = PTR_ERR(ctx);
 +              goto err_exit;
 +      }
 +
 +      event = perf_event_alloc(attr, cpu, ctx, NULL,
 +                                   NULL, callback, GFP_KERNEL);
 +      if (IS_ERR(event)) {
 +              err = PTR_ERR(event);
 +              goto err_put_context;
 +      }
 +
 +      event->filp = NULL;
 +      WARN_ON_ONCE(ctx->parent_ctx);
 +      mutex_lock(&ctx->mutex);
 +      perf_install_in_context(ctx, event, cpu);
 +      ++ctx->generation;
 +      mutex_unlock(&ctx->mutex);
 +
 +      event->owner = current;
 +      get_task_struct(current);
 +      mutex_lock(&current->perf_event_mutex);
 +      list_add_tail(&event->owner_entry, &current->perf_event_list);
 +      mutex_unlock(&current->perf_event_mutex);
 +
 +      return event;
 +
 + err_put_context:
 +      put_ctx(ctx);
 + err_exit:
 +      return ERR_PTR(err);
 +}
 +EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
 +
  /*
   * inherit a event from parent task to child task:
   */
@@@ -4846,7 -4619,7 +4846,7 @@@ inherit_event(struct perf_event *parent
        child_event = perf_event_alloc(&parent_event->attr,
                                           parent_event->cpu, child_ctx,
                                           group_leader, parent_event,
 -                                         GFP_KERNEL);
 +                                         NULL, GFP_KERNEL);
        if (IS_ERR(child_event))
                return child_event;
        get_ctx(child_ctx);
        if (parent_event->attr.freq)
                child_event->hw.sample_period = parent_event->hw.sample_period;
  
 +      child_event->overflow_handler = parent_event->overflow_handler;
 +
        /*
         * Link it up in the child's context:
         */
@@@ -4955,6 -4726,7 +4955,6 @@@ __perf_event_exit_task(struct perf_even
  {
        struct perf_event *parent_event;
  
 -      update_event_times(child_event);
        perf_event_remove_from_context(child_event);
  
        parent_event = child_event->parent;
@@@ -5006,7 -4778,6 +5006,7 @@@ void perf_event_exit_task(struct task_s
         * the events from it.
         */
        unclone_ctx(child_ctx);
 +      update_context_time(child_ctx);
        spin_unlock_irqrestore(&child_ctx->lock, flags);
  
        /*
diff --combined lib/Kconfig.debug
@@@ -105,7 -105,7 +105,7 @@@ config DEBUG_SECTION_MISMATC
        bool "Enable full Section mismatch analysis"
        depends on UNDEFINED
        # This option is on purpose disabled for now.
-       # It will be enabled when we are down to a resonable number
+       # It will be enabled when we are down to a reasonable number
        # of section mismatch warnings (< 10 for an allyesconfig build)
        help
          The section mismatch analysis checks if there are illegal
@@@ -750,7 -750,7 +750,7 @@@ config RCU_TORTURE_TEST_RUNNABL
  config RCU_CPU_STALL_DETECTOR
        bool "Check for stalled CPUs delaying RCU grace periods"
        depends on TREE_RCU || TREE_PREEMPT_RCU
 -      default n
 +      default y
        help
          This option causes RCU to printk information on which
          CPUs are delaying the current grace period, but only when
diff --combined lib/swiotlb.c
@@@ -97,8 -97,6 +97,8 @@@ static phys_addr_t *io_tlb_orig_addr
   */
  static DEFINE_SPINLOCK(io_tlb_lock);
  
 +static int late_alloc;
 +
  static int __init
  setup_io_tlb_npages(char *str)
  {
                ++str;
        if (!strcmp(str, "force"))
                swiotlb_force = 1;
 +
        return 1;
  }
  __setup("swiotlb=", setup_io_tlb_npages);
@@@ -124,9 -121,8 +124,9 @@@ static dma_addr_t swiotlb_virt_to_bus(s
        return phys_to_dma(hwdev, virt_to_phys(address));
  }
  
 -static void swiotlb_print_info(unsigned long bytes)
 +void swiotlb_print_info(void)
  {
 +      unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
        phys_addr_t pstart, pend;
  
        pstart = virt_to_phys(io_tlb_start);
   * structures for the software IO TLB used to implement the DMA API.
   */
  void __init
 -swiotlb_init_with_default_size(size_t default_size)
 +swiotlb_init_with_default_size(size_t default_size, int verbose)
  {
        unsigned long i, bytes;
  
        io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
        if (!io_tlb_overflow_buffer)
                panic("Cannot allocate SWIOTLB overflow buffer!\n");
 -
 -      swiotlb_print_info(bytes);
 +      if (verbose)
 +              swiotlb_print_info();
  }
  
  void __init
 -swiotlb_init(void)
 +swiotlb_init(int verbose)
  {
 -      swiotlb_init_with_default_size(64 * (1<<20));   /* default to 64MB */
 +      swiotlb_init_with_default_size(64 * (1<<20), verbose);  /* default to 64MB */
  }
  
  /*
@@@ -264,9 -260,7 +264,9 @@@ swiotlb_late_init_with_default_size(siz
        if (!io_tlb_overflow_buffer)
                goto cleanup4;
  
 -      swiotlb_print_info(bytes);
 +      swiotlb_print_info();
 +
 +      late_alloc = 1;
  
        return 0;
  
@@@ -287,32 -281,6 +287,32 @@@ cleanup1
        return -ENOMEM;
  }
  
 +void __init swiotlb_free(void)
 +{
 +      if (!io_tlb_overflow_buffer)
 +              return;
 +
 +      if (late_alloc) {
 +              free_pages((unsigned long)io_tlb_overflow_buffer,
 +                         get_order(io_tlb_overflow));
 +              free_pages((unsigned long)io_tlb_orig_addr,
 +                         get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
 +              free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
 +                                                               sizeof(int)));
 +              free_pages((unsigned long)io_tlb_start,
 +                         get_order(io_tlb_nslabs << IO_TLB_SHIFT));
 +      } else {
 +              free_bootmem_late(__pa(io_tlb_overflow_buffer),
 +                                io_tlb_overflow);
 +              free_bootmem_late(__pa(io_tlb_orig_addr),
 +                                io_tlb_nslabs * sizeof(phys_addr_t));
 +              free_bootmem_late(__pa(io_tlb_list),
 +                                io_tlb_nslabs * sizeof(int));
 +              free_bootmem_late(__pa(io_tlb_start),
 +                                io_tlb_nslabs << IO_TLB_SHIFT);
 +      }
 +}
 +
  static int is_swiotlb_buffer(phys_addr_t paddr)
  {
        return paddr >= virt_to_phys(io_tlb_start) &&
@@@ -485,7 -453,7 +485,7 @@@ do_unmap_single(struct device *hwdev, c
  
        /*
         * Return the buffer to the free list by setting the corresponding
-        * entries to indicate the number of contigous entries available.
+        * entries to indicate the number of contiguous entries available.
         * While returning the entries to the free list, we merge the entries
         * with slots below and above the pool being returned.
         */
diff --combined net/sctp/sm_sideeffect.c
@@@ -480,6 -480,7 +480,6 @@@ static void sctp_do_8_2_transport_strik
         * that indicates that we have an outstanding HB.
         */
        if (!is_hb || transport->hb_sent) {
 -              transport->last_rto = transport->rto;
                transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
        }
  }
@@@ -718,7 -719,7 +718,7 @@@ static void sctp_cmd_new_state(sctp_cmd
  
        if (sctp_style(sk, TCP)) {
                /* Change the sk->sk_state of a TCP-style socket that has
-                * sucessfully completed a connect() call.
+                * successfully completed a connect() call.
                 */
                if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
                        sk->sk_state = SCTP_SS_ESTABLISHED;
diff --combined net/sctp/sm_statefuns.c
@@@ -384,11 -384,6 +384,11 @@@ sctp_disposition_t sctp_sf_do_5_1B_init
        if (!new_asoc)
                goto nomem;
  
 +      if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
 +                                           sctp_scope(sctp_source(chunk)),
 +                                           GFP_ATOMIC) < 0)
 +              goto nomem_init;
 +
        /* The call, sctp_process_init(), can fail on memory allocation.  */
        if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
                               sctp_source(chunk),
                len = ntohs(err_chunk->chunk_hdr->length) -
                        sizeof(sctp_chunkhdr_t);
  
 -      if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
 -              goto nomem_init;
 -
        repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
        if (!repl)
                goto nomem_init;
@@@ -1454,10 -1452,6 +1454,10 @@@ static sctp_disposition_t sctp_sf_do_un
        if (!new_asoc)
                goto nomem;
  
 +      if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
 +                              sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
 +              goto nomem;
 +
        /* In the outbound INIT ACK the endpoint MUST copy its current
         * Verification Tag and Peers Verification tag into a reserved
         * place (local tie-tag and per tie-tag) within the state cookie.
                        sizeof(sctp_chunkhdr_t);
        }
  
 -      if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
 -              goto nomem;
 -
        repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
        if (!repl)
                goto nomem;
@@@ -3572,7 -3569,7 +3572,7 @@@ sctp_disposition_t sctp_sf_do_asconf(co
         * To do this properly, we'll set the destination address of the chunk
         * and at the transmit time, will try look up the transport to use.
         * Since ASCONFs may be bundled, the correct transport may not be
-        * created untill we process the entire packet, thus this workaround.
+        * created until we process the entire packet, thus this workaround.
         */
        asconf_ack->dest = chunk->source;
        sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
diff --combined samples/Kconfig
@@@ -1,5 -1,3 +1,3 @@@
- # samples/Kconfig
  menuconfig SAMPLES
        bool "Sample kernel code"
        help
@@@ -40,11 -38,4 +38,10 @@@ config SAMPLE_KRETPROBE
        default m
        depends on SAMPLE_KPROBES && KRETPROBES
  
 +config SAMPLE_HW_BREAKPOINT
 +      tristate "Build kernel hardware breakpoint examples -- loadable module only"
 +      depends on HAVE_HW_BREAKPOINT && m
 +      help
 +        This builds kernel hardware breakpoint example modules.
 +
  endif # SAMPLES
  #include "audit.h"
  
  extern void selnl_notify_policyload(u32 seqno);
 -unsigned int policydb_loaded_version;
  
  int selinux_policycap_netpeer;
  int selinux_policycap_openperm;
  
 -/*
 - * This is declared in avc.c
 - */
 -extern const struct selinux_class_perm selinux_class_perm;
 -
  static DEFINE_RWLOCK(policy_rwlock);
  
  static struct sidtab sidtab;
@@@ -92,165 -98,6 +92,165 @@@ static int context_struct_compute_av(st
                                     u16 tclass,
                                     u32 requested,
                                     struct av_decision *avd);
 +
 +struct selinux_mapping {
 +      u16 value; /* policy value */
 +      unsigned num_perms;
 +      u32 perms[sizeof(u32) * 8];
 +};
 +
 +static struct selinux_mapping *current_mapping;
 +static u16 current_mapping_size;
 +
 +static int selinux_set_mapping(struct policydb *pol,
 +                             struct security_class_mapping *map,
 +                             struct selinux_mapping **out_map_p,
 +                             u16 *out_map_size)
 +{
 +      struct selinux_mapping *out_map = NULL;
 +      size_t size = sizeof(struct selinux_mapping);
 +      u16 i, j;
 +      unsigned k;
 +      bool print_unknown_handle = false;
 +
 +      /* Find number of classes in the input mapping */
 +      if (!map)
 +              return -EINVAL;
 +      i = 0;
 +      while (map[i].name)
 +              i++;
 +
 +      /* Allocate space for the class records, plus one for class zero */
 +      out_map = kcalloc(++i, size, GFP_ATOMIC);
 +      if (!out_map)
 +              return -ENOMEM;
 +
 +      /* Store the raw class and permission values */
 +      j = 0;
 +      while (map[j].name) {
 +              struct security_class_mapping *p_in = map + (j++);
 +              struct selinux_mapping *p_out = out_map + j;
 +
 +              /* An empty class string skips ahead */
 +              if (!strcmp(p_in->name, "")) {
 +                      p_out->num_perms = 0;
 +                      continue;
 +              }
 +
 +              p_out->value = string_to_security_class(pol, p_in->name);
 +              if (!p_out->value) {
 +                      printk(KERN_INFO
 +                             "SELinux:  Class %s not defined in policy.\n",
 +                             p_in->name);
 +                      if (pol->reject_unknown)
 +                              goto err;
 +                      p_out->num_perms = 0;
 +                      print_unknown_handle = true;
 +                      continue;
 +              }
 +
 +              k = 0;
 +              while (p_in->perms && p_in->perms[k]) {
 +                      /* An empty permission string skips ahead */
 +                      if (!*p_in->perms[k]) {
 +                              k++;
 +                              continue;
 +                      }
 +                      p_out->perms[k] = string_to_av_perm(pol, p_out->value,
 +                                                          p_in->perms[k]);
 +                      if (!p_out->perms[k]) {
 +                              printk(KERN_INFO
 +                                     "SELinux:  Permission %s in class %s not defined in policy.\n",
 +                                     p_in->perms[k], p_in->name);
 +                              if (pol->reject_unknown)
 +                                      goto err;
 +                              print_unknown_handle = true;
 +                      }
 +
 +                      k++;
 +              }
 +              p_out->num_perms = k;
 +      }
 +
 +      if (print_unknown_handle)
 +              printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n",
 +                     pol->allow_unknown ? "allowed" : "denied");
 +
 +      *out_map_p = out_map;
 +      *out_map_size = i;
 +      return 0;
 +err:
 +      kfree(out_map);
 +      return -EINVAL;
 +}
 +
 +/*
 + * Get real, policy values from mapped values
 + */
 +
 +static u16 unmap_class(u16 tclass)
 +{
 +      if (tclass < current_mapping_size)
 +              return current_mapping[tclass].value;
 +
 +      return tclass;
 +}
 +
 +static u32 unmap_perm(u16 tclass, u32 tperm)
 +{
 +      if (tclass < current_mapping_size) {
 +              unsigned i;
 +              u32 kperm = 0;
 +
 +              for (i = 0; i < current_mapping[tclass].num_perms; i++)
 +                      if (tperm & (1<<i)) {
 +                              kperm |= current_mapping[tclass].perms[i];
 +                              tperm &= ~(1<<i);
 +                      }
 +              return kperm;
 +      }
 +
 +      return tperm;
 +}
 +
 +static void map_decision(u16 tclass, struct av_decision *avd,
 +                       int allow_unknown)
 +{
 +      if (tclass < current_mapping_size) {
 +              unsigned i, n = current_mapping[tclass].num_perms;
 +              u32 result;
 +
 +              for (i = 0, result = 0; i < n; i++) {
 +                      if (avd->allowed & current_mapping[tclass].perms[i])
 +                              result |= 1<<i;
 +                      if (allow_unknown && !current_mapping[tclass].perms[i])
 +                              result |= 1<<i;
 +              }
 +              avd->allowed = result;
 +
 +              for (i = 0, result = 0; i < n; i++)
 +                      if (avd->auditallow & current_mapping[tclass].perms[i])
 +                              result |= 1<<i;
 +              avd->auditallow = result;
 +
 +              for (i = 0, result = 0; i < n; i++) {
 +                      if (avd->auditdeny & current_mapping[tclass].perms[i])
 +                              result |= 1<<i;
 +                      if (!allow_unknown && !current_mapping[tclass].perms[i])
 +                              result |= 1<<i;
 +              }
 +              /*
 +               * In case the kernel has a bug and requests a permission
 +               * between num_perms and the maximum permission number, we
 +               * should audit that denial
 +               */
 +              for (; i < (sizeof(u32)*8); i++)
 +                      result |= 1<<i;
 +              avd->auditdeny = result;
 +      }
 +}
 +
 +
  /*
   * Return the boolean value of a constraint expression
   * when it is applied to the specified source and target
@@@ -620,9 -467,21 +620,9 @@@ static int context_struct_compute_av(st
        struct class_datum *tclass_datum;
        struct ebitmap *sattr, *tattr;
        struct ebitmap_node *snode, *tnode;
        unsigned int i, j;
  
        /*
 -       * Remap extended Netlink classes for old policy versions.
 -       * Do this here rather than socket_type_to_security_class()
 -       * in case a newer policy version is loaded, allowing sockets
 -       * to remain in the correct class.
 -       */
 -      if (policydb_loaded_version < POLICYDB_VERSION_NLCLASS)
 -              if (tclass >= SECCLASS_NETLINK_ROUTE_SOCKET &&
 -                  tclass <= SECCLASS_NETLINK_DNRT_SOCKET)
 -                      tclass = SECCLASS_NETLINK_SOCKET;
 -
 -      /*
         * Initialize the access vectors to the default values.
         */
        avd->allowed = 0;
        avd->seqno = latest_granting;
        avd->flags = 0;
  
 -      /*
 -       * Check for all the invalid cases.
 -       * - tclass 0
 -       * - tclass > policy and > kernel
 -       * - tclass > policy but is a userspace class
 -       * - tclass > policy but we do not allow unknowns
 -       */
 -      if (unlikely(!tclass))
 -              goto inval_class;
 -      if (unlikely(tclass > policydb.p_classes.nprim))
 -              if (tclass > kdefs->cts_len ||
 -                  !kdefs->class_to_string[tclass] ||
 -                  !policydb.allow_unknown)
 -                      goto inval_class;
 -
 -      /*
 -       * Kernel class and we allow unknown so pad the allow decision
 -       * the pad will be all 1 for unknown classes.
 -       */
 -      if (tclass <= kdefs->cts_len && policydb.allow_unknown)
 -              avd->allowed = policydb.undefined_perms[tclass - 1];
 -
 -      /*
 -       * Not in policy. Since decision is completed (all 1 or all 0) return.
 -       */
 -      if (unlikely(tclass > policydb.p_classes.nprim))
 -              return 0;
 +      if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
 +              if (printk_ratelimit())
 +                      printk(KERN_WARNING "SELinux:  Invalid class %hu\n", tclass);
 +              return -EINVAL;
 +      }
  
        tclass_datum = policydb.class_val_to_struct[tclass - 1];
  
         * role is changing, then check the (current_role, new_role)
         * pair.
         */
 -      if (tclass == SECCLASS_PROCESS &&
 -          (avd->allowed & (PROCESS__TRANSITION | PROCESS__DYNTRANSITION)) &&
 +      if (tclass == policydb.process_class &&
 +          (avd->allowed & policydb.process_trans_perms) &&
            scontext->role != tcontext->role) {
                for (ra = policydb.role_allow; ra; ra = ra->next) {
                        if (scontext->role == ra->role &&
                                break;
                }
                if (!ra)
 -                      avd->allowed &= ~(PROCESS__TRANSITION |
 -                                        PROCESS__DYNTRANSITION);
 +                      avd->allowed &= ~policydb.process_trans_perms;
        }
  
        /*
                                 tclass, requested, avd);
  
        return 0;
 -
 -inval_class:
 -      if (!tclass || tclass > kdefs->cts_len ||
 -          !kdefs->class_to_string[tclass]) {
 -              if (printk_ratelimit())
 -                      printk(KERN_ERR "SELinux: %s:  unrecognized class %d\n",
 -                             __func__, tclass);
 -              return -EINVAL;
 -      }
 -
 -      /*
 -       * Known to the kernel, but not to the policy.
 -       * Handle as a denial (allowed is 0).
 -       */
 -      return 0;
  }
  
  static int security_validtrans_handle_fail(struct context *ocontext,
@@@ -739,14 -636,13 +739,14 @@@ out
  }
  
  int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
 -                               u16 tclass)
 +                               u16 orig_tclass)
  {
        struct context *ocontext;
        struct context *ncontext;
        struct context *tcontext;
        struct class_datum *tclass_datum;
        struct constraint_node *constraint;
 +      u16 tclass;
        int rc = 0;
  
        if (!ss_initialized)
  
        read_lock(&policy_rwlock);
  
 -      /*
 -       * Remap extended Netlink classes for old policy versions.
 -       * Do this here rather than socket_type_to_security_class()
 -       * in case a newer policy version is loaded, allowing sockets
 -       * to remain in the correct class.
 -       */
 -      if (policydb_loaded_version < POLICYDB_VERSION_NLCLASS)
 -              if (tclass >= SECCLASS_NETLINK_ROUTE_SOCKET &&
 -                  tclass <= SECCLASS_NETLINK_DNRT_SOCKET)
 -                      tclass = SECCLASS_NETLINK_SOCKET;
 +      tclass = unmap_class(orig_tclass);
  
        if (!tclass || tclass > policydb.p_classes.nprim) {
                printk(KERN_ERR "SELinux: %s:  unrecognized class %d\n",
@@@ -836,7 -741,7 +836,7 @@@ int security_bounded_transition(u32 old
                goto out;
        }
  
-       /* type/domain unchaned */
+       /* type/domain unchanged */
        if (old_context->type == new_context->type) {
                rc = 0;
                goto out;
@@@ -887,38 -792,6 +887,38 @@@ out
  }
  
  
 +static int security_compute_av_core(u32 ssid,
 +                                  u32 tsid,
 +                                  u16 tclass,
 +                                  u32 requested,
 +                                  struct av_decision *avd)
 +{
 +      struct context *scontext = NULL, *tcontext = NULL;
 +      int rc = 0;
 +
 +      scontext = sidtab_search(&sidtab, ssid);
 +      if (!scontext) {
 +              printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
 +                     __func__, ssid);
 +              return -EINVAL;
 +      }
 +      tcontext = sidtab_search(&sidtab, tsid);
 +      if (!tcontext) {
 +              printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
 +                     __func__, tsid);
 +              return -EINVAL;
 +      }
 +
 +      rc = context_struct_compute_av(scontext, tcontext, tclass,
 +                                     requested, avd);
 +
 +      /* permissive domain? */
 +      if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
 +              avd->flags |= AVD_FLAGS_PERMISSIVE;
 +
 +      return rc;
 +}
 +
  /**
   * security_compute_av - Compute access vector decisions.
   * @ssid: source security identifier
   */
  int security_compute_av(u32 ssid,
                        u32 tsid,
 -                      u16 tclass,
 -                      u32 requested,
 +                      u16 orig_tclass,
 +                      u32 orig_requested,
                        struct av_decision *avd)
  {
 -      struct context *scontext = NULL, *tcontext = NULL;
 -      int rc = 0;
 +      u16 tclass;
 +      u32 requested;
 +      int rc;
 +
 +      read_lock(&policy_rwlock);
 +
 +      if (!ss_initialized)
 +              goto allow;
 +
 +      requested = unmap_perm(orig_tclass, orig_requested);
 +      tclass = unmap_class(orig_tclass);
 +      if (unlikely(orig_tclass && !tclass)) {
 +              if (policydb.allow_unknown)
 +                      goto allow;
 +              rc = -EINVAL;
 +              goto out;
 +      }
 +      rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
 +      map_decision(orig_tclass, avd, policydb.allow_unknown);
 +out:
 +      read_unlock(&policy_rwlock);
 +      return rc;
 +allow:
 +      avd->allowed = 0xffffffff;
 +      avd->auditallow = 0;
 +      avd->auditdeny = 0xffffffff;
 +      avd->seqno = latest_granting;
 +      avd->flags = 0;
 +      rc = 0;
 +      goto out;
 +}
 +
 +int security_compute_av_user(u32 ssid,
 +                           u32 tsid,
 +                           u16 tclass,
 +                           u32 requested,
 +                           struct av_decision *avd)
 +{
 +      int rc;
  
        if (!ss_initialized) {
                avd->allowed = 0xffffffff;
        }
  
        read_lock(&policy_rwlock);
 -
 -      scontext = sidtab_search(&sidtab, ssid);
 -      if (!scontext) {
 -              printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
 -                     __func__, ssid);
 -              rc = -EINVAL;
 -              goto out;
 -      }
 -      tcontext = sidtab_search(&sidtab, tsid);
 -      if (!tcontext) {
 -              printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
 -                     __func__, tsid);
 -              rc = -EINVAL;
 -              goto out;
 -      }
 -
 -      rc = context_struct_compute_av(scontext, tcontext, tclass,
 -                                     requested, avd);
 -
 -      /* permissive domain? */
 -      if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
 -          avd->flags |= AVD_FLAGS_PERMISSIVE;
 -out:
 +      rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
        read_unlock(&policy_rwlock);
        return rc;
  }
  
  static int security_compute_sid(u32 ssid,
                                u32 tsid,
 -                              u16 tclass,
 +                              u16 orig_tclass,
                                u32 specified,
 -                              u32 *out_sid)
 +                              u32 *out_sid,
 +                              bool kern)
  {
        struct context *scontext = NULL, *tcontext = NULL, newcontext;
        struct role_trans *roletr = NULL;
        struct avtab_key avkey;
        struct avtab_datum *avdatum;
        struct avtab_node *node;
 +      u16 tclass;
        int rc = 0;
  
        if (!ss_initialized) {
 -              switch (tclass) {
 -              case SECCLASS_PROCESS:
 +              switch (orig_tclass) {
 +              case SECCLASS_PROCESS: /* kernel value */
                        *out_sid = ssid;
                        break;
                default:
  
        read_lock(&policy_rwlock);
  
 +      if (kern)
 +              tclass = unmap_class(orig_tclass);
 +      else
 +              tclass = orig_tclass;
 +
        scontext = sidtab_search(&sidtab, ssid);
        if (!scontext) {
                printk(KERN_ERR "SELinux: %s:  unrecognized SID %d\n",
        }
  
        /* Set the role and type to default values. */
 -      switch (tclass) {
 -      case SECCLASS_PROCESS:
 +      if (tclass == policydb.process_class) {
                /* Use the current role and type of process. */
                newcontext.role = scontext->role;
                newcontext.type = scontext->type;
 -              break;
 -      default:
 +      } else {
                /* Use the well-defined object role. */
                newcontext.role = OBJECT_R_VAL;
                /* Use the type of the related object. */
        }
  
        /* Check for class-specific changes. */
 -      switch (tclass) {
 -      case SECCLASS_PROCESS:
 +      if  (tclass == policydb.process_class) {
                if (specified & AVTAB_TRANSITION) {
                        /* Look for a role transition rule. */
                        for (roletr = policydb.role_tr; roletr;
                                }
                        }
                }
 -              break;
 -      default:
 -              break;
        }
  
        /* Set the MLS attributes.
@@@ -1501,17 -1358,7 +1501,17 @@@ int security_transition_sid(u32 ssid
                            u16 tclass,
                            u32 *out_sid)
  {
 -      return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, out_sid);
 +      return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
 +                                  out_sid, true);
 +}
 +
 +int security_transition_sid_user(u32 ssid,
 +                               u32 tsid,
 +                               u16 tclass,
 +                               u32 *out_sid)
 +{
 +      return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
 +                                  out_sid, false);
  }
  
  /**
@@@ -1532,8 -1379,7 +1532,8 @@@ int security_member_sid(u32 ssid
                        u16 tclass,
                        u32 *out_sid)
  {
 -      return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, out_sid);
 +      return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, out_sid,
 +                                  false);
  }
  
  /**
@@@ -1554,8 -1400,144 +1554,8 @@@ int security_change_sid(u32 ssid
                        u16 tclass,
                        u32 *out_sid)
  {
 -      return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, out_sid);
 -}
 -
 -/*
 - * Verify that each kernel class that is defined in the
 - * policy is correct
 - */
 -static int validate_classes(struct policydb *p)
 -{
 -      int i, j;
 -      struct class_datum *cladatum;
 -      struct perm_datum *perdatum;
 -      u32 nprim, tmp, common_pts_len, perm_val, pol_val;
 -      u16 class_val;
 -      const struct selinux_class_perm *kdefs = &selinux_class_perm;
 -      const char *def_class, *def_perm, *pol_class;
 -      struct symtab *perms;
 -      bool print_unknown_handle = 0;
 -
 -      if (p->allow_unknown) {
 -              u32 num_classes = kdefs->cts_len;
 -              p->undefined_perms = kcalloc(num_classes, sizeof(u32), GFP_KERNEL);
 -              if (!p->undefined_perms)
 -                      return -ENOMEM;
 -      }
 -
 -      for (i = 1; i < kdefs->cts_len; i++) {
 -              def_class = kdefs->class_to_string[i];
 -              if (!def_class)
 -                      continue;
 -              if (i > p->p_classes.nprim) {
 -                      printk(KERN_INFO
 -                             "SELinux:  class %s not defined in policy\n",
 -                             def_class);
 -                      if (p->reject_unknown)
 -                              return -EINVAL;
 -                      if (p->allow_unknown)
 -                              p->undefined_perms[i-1] = ~0U;
 -                      print_unknown_handle = 1;
 -                      continue;
 -              }
 -              pol_class = p->p_class_val_to_name[i-1];
 -              if (strcmp(pol_class, def_class)) {
 -                      printk(KERN_ERR
 -                             "SELinux:  class %d is incorrect, found %s but should be %s\n",
 -                             i, pol_class, def_class);
 -                      return -EINVAL;
 -              }
 -      }
 -      for (i = 0; i < kdefs->av_pts_len; i++) {
 -              class_val = kdefs->av_perm_to_string[i].tclass;
 -              perm_val = kdefs->av_perm_to_string[i].value;
 -              def_perm = kdefs->av_perm_to_string[i].name;
 -              if (class_val > p->p_classes.nprim)
 -                      continue;
 -              pol_class = p->p_class_val_to_name[class_val-1];
 -              cladatum = hashtab_search(p->p_classes.table, pol_class);
 -              BUG_ON(!cladatum);
 -              perms = &cladatum->permissions;
 -              nprim = 1 << (perms->nprim - 1);
 -              if (perm_val > nprim) {
 -                      printk(KERN_INFO
 -                             "SELinux:  permission %s in class %s not defined in policy\n",
 -                             def_perm, pol_class);
 -                      if (p->reject_unknown)
 -                              return -EINVAL;
 -                      if (p->allow_unknown)
 -                              p->undefined_perms[class_val-1] |= perm_val;
 -                      print_unknown_handle = 1;
 -                      continue;
 -              }
 -              perdatum = hashtab_search(perms->table, def_perm);
 -              if (perdatum == NULL) {
 -                      printk(KERN_ERR
 -                             "SELinux:  permission %s in class %s not found in policy, bad policy\n",
 -                             def_perm, pol_class);
 -                      return -EINVAL;
 -              }
 -              pol_val = 1 << (perdatum->value - 1);
 -              if (pol_val != perm_val) {
 -                      printk(KERN_ERR
 -                             "SELinux:  permission %s in class %s has incorrect value\n",
 -                             def_perm, pol_class);
 -                      return -EINVAL;
 -              }
 -      }
 -      for (i = 0; i < kdefs->av_inherit_len; i++) {
 -              class_val = kdefs->av_inherit[i].tclass;
 -              if (class_val > p->p_classes.nprim)
 -                      continue;
 -              pol_class = p->p_class_val_to_name[class_val-1];
 -              cladatum = hashtab_search(p->p_classes.table, pol_class);
 -              BUG_ON(!cladatum);
 -              if (!cladatum->comdatum) {
 -                      printk(KERN_ERR
 -                             "SELinux:  class %s should have an inherits clause but does not\n",
 -                             pol_class);
 -                      return -EINVAL;
 -              }
 -              tmp = kdefs->av_inherit[i].common_base;
 -              common_pts_len = 0;
 -              while (!(tmp & 0x01)) {
 -                      common_pts_len++;
 -                      tmp >>= 1;
 -              }
 -              perms = &cladatum->comdatum->permissions;
 -              for (j = 0; j < common_pts_len; j++) {
 -                      def_perm = kdefs->av_inherit[i].common_pts[j];
 -                      if (j >= perms->nprim) {
 -                              printk(KERN_INFO
 -                                     "SELinux:  permission %s in class %s not defined in policy\n",
 -                                     def_perm, pol_class);
 -                              if (p->reject_unknown)
 -                                      return -EINVAL;
 -                              if (p->allow_unknown)
 -                                      p->undefined_perms[class_val-1] |= (1 << j);
 -                              print_unknown_handle = 1;
 -                              continue;
 -                      }
 -                      perdatum = hashtab_search(perms->table, def_perm);
 -                      if (perdatum == NULL) {
 -                              printk(KERN_ERR
 -                                     "SELinux:  permission %s in class %s not found in policy, bad policy\n",
 -                                     def_perm, pol_class);
 -                              return -EINVAL;
 -                      }
 -                      if (perdatum->value != j + 1) {
 -                              printk(KERN_ERR
 -                                     "SELinux:  permission %s in class %s has incorrect value\n",
 -                                     def_perm, pol_class);
 -                              return -EINVAL;
 -                      }
 -              }
 -      }
 -      if (print_unknown_handle)
 -              printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n",
 -                      (security_get_allow_unknown() ? "allowed" : "denied"));
 -      return 0;
 +      return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, out_sid,
 +                                  false);
  }
  
  /* Clone the SID into the new SID table. */
@@@ -1728,10 -1710,8 +1728,10 @@@ int security_load_policy(void *data, si
  {
        struct policydb oldpolicydb, newpolicydb;
        struct sidtab oldsidtab, newsidtab;
 +      struct selinux_mapping *oldmap, *map = NULL;
        struct convert_context_args args;
        u32 seqno;
 +      u16 map_size;
        int rc = 0;
        struct policy_file file = { data, len }, *fp = &file;
  
                        avtab_cache_destroy();
                        return -EINVAL;
                }
 -              if (policydb_load_isids(&policydb, &sidtab)) {
 +              if (selinux_set_mapping(&policydb, secclass_map,
 +                                      &current_mapping,
 +                                      &current_mapping_size)) {
                        policydb_destroy(&policydb);
                        avtab_cache_destroy();
                        return -EINVAL;
                }
 -              /* Verify that the kernel defined classes are correct. */
 -              if (validate_classes(&policydb)) {
 -                      printk(KERN_ERR
 -                             "SELinux:  the definition of a class is incorrect\n");
 -                      sidtab_destroy(&sidtab);
 +              if (policydb_load_isids(&policydb, &sidtab)) {
                        policydb_destroy(&policydb);
                        avtab_cache_destroy();
                        return -EINVAL;
                }
                security_load_policycaps();
 -              policydb_loaded_version = policydb.policyvers;
                ss_initialized = 1;
                seqno = ++latest_granting;
                selinux_complete_init();
                return -ENOMEM;
        }
  
 -      /* Verify that the kernel defined classes are correct. */
 -      if (validate_classes(&newpolicydb)) {
 -              printk(KERN_ERR
 -                     "SELinux:  the definition of a class is incorrect\n");
 -              rc = -EINVAL;
 +      if (selinux_set_mapping(&newpolicydb, secclass_map,
 +                              &map, &map_size))
                goto err;
 -      }
  
        rc = security_preserve_bools(&newpolicydb);
        if (rc) {
        memcpy(&policydb, &newpolicydb, sizeof policydb);
        sidtab_set(&sidtab, &newsidtab);
        security_load_policycaps();
 +      oldmap = current_mapping;
 +      current_mapping = map;
 +      current_mapping_size = map_size;
        seqno = ++latest_granting;
 -      policydb_loaded_version = policydb.policyvers;
        write_unlock_irq(&policy_rwlock);
  
        /* Free the old policydb and SID table. */
        policydb_destroy(&oldpolicydb);
        sidtab_destroy(&oldsidtab);
 +      kfree(oldmap);
  
        avc_ss_reset(seqno);
        selnl_notify_policyload(seqno);
        return 0;
  
  err:
 +      kfree(map);
        sidtab_destroy(&newsidtab);
        policydb_destroy(&newpolicydb);
        return rc;
@@@ -2108,7 -2091,7 +2108,7 @@@ out_unlock
        }
        for (i = 0, j = 0; i < mynel; i++) {
                rc = avc_has_perm_noaudit(fromsid, mysids[i],
 -                                        SECCLASS_PROCESS,
 +                                        SECCLASS_PROCESS, /* kernel value */
                                          PROCESS__TRANSITION, AVC_STRICT,
                                          NULL);
                if (!rc)
   */
  int security_genfs_sid(const char *fstype,
                       char *path,
 -                     u16 sclass,
 +                     u16 orig_sclass,
                       u32 *sid)
  {
        int len;
 +      u16 sclass;
        struct genfs *genfs;
        struct ocontext *c;
        int rc = 0, cmp = 0;
  
        read_lock(&policy_rwlock);
  
 +      sclass = unmap_class(orig_sclass);
 +
        for (genfs = policydb.genfs; genfs; genfs = genfs->next) {
                cmp = strcmp(fstype, genfs->fstype);
                if (cmp <= 0)
@@@ -4684,9 -4684,9 +4684,9 @@@ static int alc880_parse_auto_config(str
                        spec->multiout.dig_out_nid = dig_nid;
                else {
                        spec->multiout.slave_dig_outs = spec->slave_dig_outs;
 -                      spec->slave_dig_outs[i - 1] = dig_nid;
 -                      if (i == ARRAY_SIZE(spec->slave_dig_outs) - 1)
 +                      if (i >= ARRAY_SIZE(spec->slave_dig_outs) - 1)
                                break;
 +                      spec->slave_dig_outs[i - 1] = dig_nid;
                }
        }
        if (spec->autocfg.dig_in_pin)
@@@ -6249,7 -6249,7 +6249,7 @@@ static struct snd_pci_quirk alc260_cfg_
        SND_PCI_QUIRK(0x1025, 0x008f, "Acer", ALC260_ACER),
        SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FAVORIT100),
        SND_PCI_QUIRK(0x103c, 0x2808, "HP d5700", ALC260_HP_3013),
 -      SND_PCI_QUIRK(0x103c, 0x280a, "HP d5750", ALC260_HP_3013),
 +      SND_PCI_QUIRK(0x103c, 0x280a, "HP d5750", ALC260_AUTO), /* no quirk */
        SND_PCI_QUIRK(0x103c, 0x3010, "HP", ALC260_HP_3013),
        SND_PCI_QUIRK(0x103c, 0x3011, "HP", ALC260_HP_3013),
        SND_PCI_QUIRK(0x103c, 0x3012, "HP", ALC260_HP_DC7600),
@@@ -6619,7 -6619,7 +6619,7 @@@ static struct hda_input_mux alc889A_mb3
                /* Front Mic (0x01) unused */
                { "Line", 0x2 },
                /* Line 2 (0x03) unused */
-               /* CD (0x04) unsused? */
+               /* CD (0x04) unused? */
        },
  };
  
@@@ -8911,11 -8911,10 +8911,11 @@@ static struct snd_pci_quirk alc882_ssid
        SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3),
        SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24),
        SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC885_MB5),
 -      /* FIXME: HP jack sense seems not working for MBP 5,1, so apparently
 -       * no perfect solution yet
 +      /* FIXME: HP jack sense seems not working for MBP 5,1 or 5,2,
 +       * so apparently no perfect solution yet
         */
        SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC885_MB5),
 +      SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC885_MB5),
        {} /* terminator */
  };
  
@@@ -9814,9 -9813,9 +9814,9 @@@ static int alc882_parse_auto_config(str
                        spec->multiout.dig_out_nid = dig_nid;
                else {
                        spec->multiout.slave_dig_outs = spec->slave_dig_outs;
 -                      spec->slave_dig_outs[i - 1] = dig_nid;
 -                      if (i == ARRAY_SIZE(spec->slave_dig_outs) - 1)
 +                      if (i >= ARRAY_SIZE(spec->slave_dig_outs) - 1)
                                break;
 +                      spec->slave_dig_outs[i - 1] = dig_nid;
                }
        }
        if (spec->autocfg.dig_in_pin)
@@@ -11461,8 -11460,6 +11461,8 @@@ static struct snd_pci_quirk alc262_cfg_
        SND_PCI_QUIRK(0x104d, 0x820f, "Sony ASSAMD", ALC262_SONY_ASSAMD),
        SND_PCI_QUIRK(0x104d, 0x9016, "Sony VAIO", ALC262_AUTO), /* dig-only */
        SND_PCI_QUIRK(0x104d, 0x9025, "Sony VAIO Z21MN", ALC262_TOSHIBA_S06),
 +      SND_PCI_QUIRK(0x104d, 0x9035, "Sony VAIO VGN-FW170J", ALC262_AUTO),
 +      SND_PCI_QUIRK(0x104d, 0x9047, "Sony VAIO Type G", ALC262_AUTO),
        SND_PCI_QUIRK_MASK(0x104d, 0xff00, 0x9000, "Sony VAIO",
                           ALC262_SONY_ASSAMD),
        SND_PCI_QUIRK(0x1179, 0x0001, "Toshiba dynabook SS RX1",