- Update to 3.3-rc2.
authorJeff Mahoney <jeffm@suse.com>
Tue, 7 Feb 2012 17:12:40 +0000 (12:12 -0500)
committerJeff Mahoney <jeffm@suse.com>
Tue, 7 Feb 2012 17:12:40 +0000 (12:12 -0500)
  - Eliminated 5 patches.
  - Xen is disabled.

suse-commit: d017ea13f0d761c4f5a21c4d833f69272bbb13cf

111 files changed:
1  2 
Documentation/kernel-parameters.txt
Documentation/sysctl/kernel.txt
Makefile
arch/ia64/Kconfig
arch/powerpc/kernel/legacy_serial.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/platforms/chrp/setup.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/xmon/xmon.c
arch/s390/Kconfig
arch/s390/Makefile
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/e820.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/head_64.S
arch/x86/kernel/hpet.c
arch/x86/kernel/setup.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/cpuid.c
arch/x86/kvm/svm.c
arch/x86/kvm/x86.c
arch/x86/mm/init.c
block/partitions/efi.c
drivers/acpi/Kconfig
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/ec_sys.c
drivers/acpi/osl.c
drivers/ata/ata_piix.c
drivers/char/Kconfig
drivers/char/lp.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/input/mouse/synaptics.c
drivers/input/mouse/synaptics.h
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/md/dm-mpath.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/misc/Kconfig
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/wireless/b43/main.c
drivers/scsi/device_handler/scsi_dh.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_netlink.c
drivers/scsi/scsi_scan.c
drivers/scsi/sd.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250.c
drivers/video/Kconfig
drivers/video/Makefile
drivers/video/omap2/dss/venc.c
fs/Kconfig
fs/Makefile
fs/ext4/ext4.h
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/super.c
fs/namei.c
fs/nfs/dir.c
fs/nfs/inode.c
fs/super.c
include/acpi/acpiosxf.h
include/linux/acpi.h
include/linux/blkdev.h
include/linux/device.h
include/linux/fb.h
include/linux/fs.h
include/linux/kernel.h
include/linux/mm.h
include/linux/module.h
include/linux/nfs_fs.h
include/scsi/scsi_device.h
init/Kconfig
init/main.c
kernel/Kconfig.preempt
kernel/Makefile
kernel/ksysfs.c
kernel/module.c
kernel/panic.c
kernel/printk.c
kernel/sysctl.c
kernel/sysctl_binary.c
lib/Kconfig.debug
mm/page_alloc.c
mm/truncate.c
net/bridge/br_if.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/nf_conntrack_ftp.c
net/sunrpc/sched.c
scripts/Makefile.build
scripts/kconfig/Makefile
security/apparmor/apparmorfs.c
security/apparmor/lsm.c
tools/perf/builtin-timechart.c
virt/kvm/ioapic.c

Simple merge
Simple merge
diff --cc Makefile
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -1361,23 -1349,7 +1349,22 @@@ static int __init dmi_ignore_irq0_timer
        }
        return 0;
  }
- #endif
  
 +static int __init force_acpi_rsdt(const struct dmi_system_id *d)
 +{
 +      if (!acpi_force) {
 +              printk(KERN_NOTICE "%s detected: force use of acpi=rsdt\n",
 +                     d->ident);
 +              acpi_rsdt_forced = 1;
 +      } else {
 +              printk(KERN_NOTICE
 +                     "Warning: acpi=force overrules DMI blacklist: "
 +                     "acpi=rsdt\n");
 +      }
 +      return 0;
 +
 +}
 +
  /*
   * If your system is blacklisted here, but you find that acpi=force
   * works for you, please contact linux-acpi@vger.kernel.org
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -1217,41 -1217,7 +1222,41 @@@ ENTRY(call_softirq
        CFI_ENDPROC
  END(call_softirq)
  
 +#ifdef CONFIG_STACK_UNWIND
 +ENTRY(arch_unwind_init_running)
 +      CFI_STARTPROC
 +      movq    %r15, R15(%rdi)
 +      movq    %r14, R14(%rdi)
 +      xchgq   %rsi, %rdx
 +      movq    %r13, R13(%rdi)
 +      movq    %r12, R12(%rdi)
 +      xorl    %eax, %eax
 +      movq    %rbp, RBP(%rdi)
 +      movq    %rbx, RBX(%rdi)
 +      movq    (%rsp), %r9
 +      xchgq   %rdx, %rcx
 +      movq    %rax, R11(%rdi)
 +      movq    %rax, R10(%rdi)
 +      movq    %rax, R9(%rdi)
 +      movq    %rax, R8(%rdi)
 +      movq    %rax, RAX(%rdi)
 +      movq    %rax, RCX(%rdi)
 +      movq    %rax, RDX(%rdi)
 +      movq    %rax, RSI(%rdi)
 +      movq    %rax, RDI(%rdi)
 +      movq    %rax, ORIG_RAX(%rdi)
 +      movq    %r9, RIP(%rdi)
 +      leaq    8(%rsp), %r9
 +      movq    $__KERNEL_CS, CS(%rdi)
 +      movq    %rax, EFLAGS(%rdi)
 +      movq    %r9, RSP(%rdi)
 +      movq    $__KERNEL_DS, SS(%rdi)
 +      jmpq    *%rcx
 +      CFI_ENDPROC
 +END(arch_unwind_init_running)
 +#endif
 +
- #ifdef CONFIG_PARAVIRT_XEN
+ #ifdef CONFIG_XEN
  zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
  
  /*
@@@ -1519,11 -1496,162 +1535,162 @@@ END(error_exit
  ENTRY(nmi)
        INTR_FRAME
        PARAVIRT_ADJUST_EXCEPTION_FRAME
-       pushq_cfi $-1
+       /*
+        * We allow breakpoints in NMIs. If a breakpoint occurs, then
+        * the iretq it performs will take us out of NMI context.
+        * This means that we can have nested NMIs where the next
+        * NMI is using the top of the stack of the previous NMI. We
+        * can't let it execute because the nested NMI will corrupt the
+        * stack of the previous NMI. NMI handlers are not re-entrant
+        * anyway.
+        *
+        * To handle this case we do the following:
+        *  Check the a special location on the stack that contains
+        *  a variable that is set when NMIs are executing.
+        *  The interrupted task's stack is also checked to see if it
+        *  is an NMI stack.
+        *  If the variable is not set and the stack is not the NMI
+        *  stack then:
+        *    o Set the special variable on the stack
+        *    o Copy the interrupt frame into a "saved" location on the stack
+        *    o Copy the interrupt frame into a "copy" location on the stack
+        *    o Continue processing the NMI
+        *  If the variable is set or the previous stack is the NMI stack:
+        *    o Modify the "copy" location to jump to the repeate_nmi
+        *    o return back to the first NMI
+        *
+        * Now on exit of the first NMI, we first clear the stack variable
+        * The NMI stack will tell any nested NMIs at that point that it is
+        * nested. Then we pop the stack normally with iret, and if there was
+        * a nested NMI that updated the copy interrupt stack frame, a
+        * jump will be made to the repeat_nmi code that will handle the second
+        * NMI.
+        */
+       /* Use %rdx as out temp variable throughout */
+       pushq_cfi %rdx
+       /*
+        * Check the special variable on the stack to see if NMIs are
+        * executing.
+        */
+       cmp $1, -8(%rsp)
+       je nested_nmi
+       /*
+        * Now test if the previous stack was an NMI stack.
+        * We need the double check. We check the NMI stack to satisfy the
+        * race when the first NMI clears the variable before returning.
+        * We check the variable because the first NMI could be in a
+        * breakpoint routine using a breakpoint stack.
+        */
+       lea 6*8(%rsp), %rdx
+       test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
+ nested_nmi:
+       /*
+        * Do nothing if we interrupted the fixup in repeat_nmi.
+        * It's about to repeat the NMI handler, so we are fine
+        * with ignoring this one.
+        */
+       movq $repeat_nmi, %rdx
+       cmpq 8(%rsp), %rdx
+       ja 1f
+       movq $end_repeat_nmi, %rdx
+       cmpq 8(%rsp), %rdx
+       ja nested_nmi_out
+ 1:
+       /* Set up the interrupted NMIs stack to jump to repeat_nmi */
+       leaq -6*8(%rsp), %rdx
+       movq %rdx, %rsp
+       CFI_ADJUST_CFA_OFFSET 6*8
+       pushq_cfi $__KERNEL_DS
+       pushq_cfi %rdx
+       pushfq_cfi
+       pushq_cfi $__KERNEL_CS
+       pushq_cfi $repeat_nmi
+       /* Put stack back */
+       addq $(11*8), %rsp
+       CFI_ADJUST_CFA_OFFSET -11*8
+ nested_nmi_out:
+       popq_cfi %rdx
+       /* No need to check faults here */
+       INTERRUPT_RETURN
+ first_nmi:
+       /*
+        * Because nested NMIs will use the pushed location that we
+        * stored in rdx, we must keep that space available.
+        * Here's what our stack frame will look like:
+        * +-------------------------+
+        * | original SS             |
+        * | original Return RSP     |
+        * | original RFLAGS         |
+        * | original CS             |
+        * | original RIP            |
+        * +-------------------------+
+        * | temp storage for rdx    |
+        * +-------------------------+
+        * | NMI executing variable  |
+        * +-------------------------+
+        * | Saved SS                |
+        * | Saved Return RSP        |
+        * | Saved RFLAGS            |
+        * | Saved CS                |
+        * | Saved RIP               |
+        * +-------------------------+
+        * | copied SS               |
+        * | copied Return RSP       |
+        * | copied RFLAGS           |
+        * | copied CS               |
+        * | copied RIP              |
+        * +-------------------------+
+        * | pt_regs                 |
+        * +-------------------------+
+        *
+        * The saved RIP is used to fix up the copied RIP that a nested
+        * NMI may zero out. The original stack frame and the temp storage
+        * is also used by nested NMIs and can not be trusted on exit.
+        */
+       /* Set the NMI executing variable on the stack. */
+       pushq_cfi $1
+       /* Copy the stack frame to the Saved frame */
+       .rept 5
+       pushq_cfi 6*8(%rsp)
+       .endr
+       /* Make another copy, this one may be modified by nested NMIs */
+       .rept 5
+       pushq_cfi 4*8(%rsp)
+       .endr
+       /* Do not pop rdx, nested NMIs will corrupt it */
+       movq 11*8(%rsp), %rdx
+       /*
+        * Everything below this point can be preempted by a nested
+        * NMI if the first NMI took an exception. Repeated NMIs
+        * caused by an exception and nested NMI will start here, and
+        * can still be preempted by another NMI.
+        */
+ restart_nmi:
+       pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
        subq $ORIG_RAX-R15, %rsp
        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
+       /*
+        * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
+        * as we should not be calling schedule in NMI context.
+        * Even with normal interrupts enabled. An NMI should not be
+        * setting NEED_RESCHED or anything that normal interrupts and
+        * exceptions might do.
+        */
        call save_paranoid
 -      DEFAULT_FRAME 0
 +      DEFAULT_FRAME -1
        /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
        movq %rsp,%rdi
        movq $-1,%rsi
Simple merge
Simple merge
@@@ -411,20 -412,12 +412,20 @@@ static void __init reserve_initrd(void
                 */
                initrd_start = ramdisk_image + PAGE_OFFSET;
                initrd_end = initrd_start + ramdisk_size;
 -              return;
 +      } else {
 +              relocate_initrd();
-               memblock_x86_free_range(ramdisk_image, ramdisk_end);
++              memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
        }
 -
 -      relocate_initrd();
 -
 -      memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
 +#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
 +      acpi_initrd_offset = acpi_initrd_table_override((void *)initrd_start,
 +                                                      (void *)initrd_end);
 +      if (!acpi_initrd_offset)
 +              return;
 +      printk(KERN_INFO "Found acpi tables of size: %lu at 0x%lx\n",
 +             acpi_initrd_offset, initrd_start);
 +      initrd_start += acpi_initrd_offset;
 +      return;
 +#endif
  }
  #else
  static void __init reserve_initrd(void)
Simple merge
index 0000000,89b02bf..c6c18aa
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,670 +1,670 @@@
+ /*
+  * Kernel-based Virtual Machine driver for Linux
+  * cpuid support routines
+  *
+  * derived from arch/x86/kvm/x86.c
+  *
+  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
+  * Copyright IBM Corporation, 2008
+  *
+  * This work is licensed under the terms of the GNU GPL, version 2.  See
+  * the COPYING file in the top-level directory.
+  *
+  */
+ #include <linux/kvm_host.h>
+ #include <linux/module.h>
+ #include <linux/vmalloc.h>
+ #include <linux/uaccess.h>
+ #include <asm/user.h>
+ #include <asm/xsave.h>
+ #include "cpuid.h"
+ #include "lapic.h"
+ #include "mmu.h"
+ #include "trace.h"
+ void kvm_update_cpuid(struct kvm_vcpu *vcpu)
+ {
+       struct kvm_cpuid_entry2 *best;
+       struct kvm_lapic *apic = vcpu->arch.apic;
+       best = kvm_find_cpuid_entry(vcpu, 1, 0);
+       if (!best)
+               return;
+       /* Update OSXSAVE bit */
+       if (cpu_has_xsave && best->function == 0x1) {
+               best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
+               if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
+                       best->ecx |= bit(X86_FEATURE_OSXSAVE);
+       }
+       if (apic) {
+               if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
+                       apic->lapic_timer.timer_mode_mask = 3 << 17;
+               else
+                       apic->lapic_timer.timer_mode_mask = 1 << 17;
+       }
+       kvm_pmu_cpuid_update(vcpu);
+ }
+ static int is_efer_nx(void)
+ {
+       unsigned long long efer = 0;
+       rdmsrl_safe(MSR_EFER, &efer);
+       return efer & EFER_NX;
+ }
+ static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
+ {
+       int i;
+       struct kvm_cpuid_entry2 *e, *entry;
+       entry = NULL;
+       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
+               e = &vcpu->arch.cpuid_entries[i];
+               if (e->function == 0x80000001) {
+                       entry = e;
+                       break;
+               }
+       }
+       if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
+               entry->edx &= ~(1 << 20);
+               printk(KERN_INFO "kvm: guest NX capability removed\n");
+       }
+ }
+ /* when an old userspace process fills a new kernel module */
+ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
+                            struct kvm_cpuid *cpuid,
+                            struct kvm_cpuid_entry __user *entries)
+ {
+       int r, i;
+       struct kvm_cpuid_entry *cpuid_entries;
+       r = -E2BIG;
+       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+               goto out;
+       r = -ENOMEM;
+       cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
+       if (!cpuid_entries)
+               goto out;
+       r = -EFAULT;
+       if (copy_from_user(cpuid_entries, entries,
+                          cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+               goto out_free;
+       for (i = 0; i < cpuid->nent; i++) {
+               vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
+               vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
+               vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
+               vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
+               vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
+               vcpu->arch.cpuid_entries[i].index = 0;
+               vcpu->arch.cpuid_entries[i].flags = 0;
+               vcpu->arch.cpuid_entries[i].padding[0] = 0;
+               vcpu->arch.cpuid_entries[i].padding[1] = 0;
+               vcpu->arch.cpuid_entries[i].padding[2] = 0;
+       }
+       vcpu->arch.cpuid_nent = cpuid->nent;
+       cpuid_fix_nx_cap(vcpu);
+       r = 0;
+       kvm_apic_set_version(vcpu);
+       kvm_x86_ops->cpuid_update(vcpu);
+       kvm_update_cpuid(vcpu);
+ out_free:
+       vfree(cpuid_entries);
+ out:
+       return r;
+ }
+ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries)
+ {
+       int r;
+       r = -E2BIG;
+       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+               goto out;
+       r = -EFAULT;
+       if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
+                          cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out;
+       vcpu->arch.cpuid_nent = cpuid->nent;
+       kvm_apic_set_version(vcpu);
+       kvm_x86_ops->cpuid_update(vcpu);
+       kvm_update_cpuid(vcpu);
+       return 0;
+ out:
+       return r;
+ }
+ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+                             struct kvm_cpuid2 *cpuid,
+                             struct kvm_cpuid_entry2 __user *entries)
+ {
+       int r;
+       r = -E2BIG;
+       if (cpuid->nent < vcpu->arch.cpuid_nent)
+               goto out;
+       r = -EFAULT;
+       if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+                        vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out;
+       return 0;
+ out:
+       cpuid->nent = vcpu->arch.cpuid_nent;
+       return r;
+ }
+ static void cpuid_mask(u32 *word, int wordnum)
+ {
+       *word &= boot_cpu_data.x86_capability[wordnum];
+ }
+ static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+                          u32 index)
+ {
+       entry->function = function;
+       entry->index = index;
+       cpuid_count(entry->function, entry->index,
+                   &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
+       entry->flags = 0;
+ }
+ static bool supported_xcr0_bit(unsigned bit)
+ {
+       u64 mask = ((u64)1 << bit);
+       return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0;
+ }
+ #define F(x) bit(X86_FEATURE_##x)
+ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+                        u32 index, int *nent, int maxnent)
+ {
+       int r;
+       unsigned f_nx = is_efer_nx() ? F(NX) : 0;
+ #ifdef CONFIG_X86_64
+       unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
+                               ? F(GBPAGES) : 0;
+       unsigned f_lm = F(LM);
+ #else
+       unsigned f_gbpages = 0;
+       unsigned f_lm = 0;
+ #endif
+       unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
+       /* cpuid 1.edx */
+       const u32 kvm_supported_word0_x86_features =
+               F(FPU) | F(VME) | F(DE) | F(PSE) |
+               F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+               F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
+               F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+               F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
+               0 /* Reserved, DS, ACPI */ | F(MMX) |
+               F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
+               0 /* HTT, TM, Reserved, PBE */;
+       /* cpuid 0x80000001.edx */
+       const u32 kvm_supported_word1_x86_features =
+               F(FPU) | F(VME) | F(DE) | F(PSE) |
+               F(TSC) | F(MSR) | F(PAE) | F(MCE) |
+               F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
+               F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
+               F(PAT) | F(PSE36) | 0 /* Reserved */ |
+               f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
+               F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
+               0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
+       /* cpuid 1.ecx */
+       const u32 kvm_supported_word4_x86_features =
 -              F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
++              F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64 */ | F(MWAIT) |
+               0 /* DS-CPL, VMX, SMX, EST */ |
+               0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
+               F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
+               0 /* Reserved, DCA */ | F(XMM4_1) |
+               F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
+               0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
+               F(F16C) | F(RDRAND);
+       /* cpuid 0x80000001.ecx */
+       const u32 kvm_supported_word6_x86_features =
+               F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
+               F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
+               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
+               0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
+       /* cpuid 0xC0000001.edx */
+       const u32 kvm_supported_word5_x86_features =
+               F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
+               F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
+               F(PMM) | F(PMM_EN);
+       /* cpuid 7.0.ebx */
+       const u32 kvm_supported_word9_x86_features =
+               F(FSGSBASE) | F(BMI1) | F(AVX2) | F(SMEP) | F(BMI2) | F(ERMS);
+       /* all calls to cpuid_count() should be made on the same cpu */
+       get_cpu();
+       r = -E2BIG;
+       if (*nent >= maxnent)
+               goto out;
+       do_cpuid_1_ent(entry, function, index);
+       ++*nent;
+       switch (function) {
+       case 0:
+               entry->eax = min(entry->eax, (u32)0xd);
+               break;
+       case 1:
+               entry->edx &= kvm_supported_word0_x86_features;
+               cpuid_mask(&entry->edx, 0);
+               entry->ecx &= kvm_supported_word4_x86_features;
+               cpuid_mask(&entry->ecx, 4);
+               /* we support x2apic emulation even if host does not support
+                * it since we emulate x2apic in software */
+               entry->ecx |= F(X2APIC);
+               break;
+       /* function 2 entries are STATEFUL. That is, repeated cpuid commands
+        * may return different values. This forces us to get_cpu() before
+        * issuing the first command, and also to emulate this annoying behavior
+        * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
+       case 2: {
+               int t, times = entry->eax & 0xff;
+               entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
+               entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+               for (t = 1; t < times; ++t) {
+                       if (*nent >= maxnent)
+                               goto out;
+                       do_cpuid_1_ent(&entry[t], function, 0);
+                       entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
+                       ++*nent;
+               }
+               break;
+       }
+       /* function 4 has additional index. */
+       case 4: {
+               int i, cache_type;
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               /* read more entries until cache_type is zero */
+               for (i = 1; ; ++i) {
+                       if (*nent >= maxnent)
+                               goto out;
+                       cache_type = entry[i - 1].eax & 0x1f;
+                       if (!cache_type)
+                               break;
+                       do_cpuid_1_ent(&entry[i], function, i);
+                       entry[i].flags |=
+                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+                       ++*nent;
+               }
+               break;
+       }
+       case 7: {
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               /* Mask ebx against host capbability word 9 */
+               if (index == 0) {
+                       entry->ebx &= kvm_supported_word9_x86_features;
+                       cpuid_mask(&entry->ebx, 9);
+               } else
+                       entry->ebx = 0;
+               entry->eax = 0;
+               entry->ecx = 0;
+               entry->edx = 0;
+               break;
+       }
+       case 9:
+               break;
+       case 0xa: { /* Architectural Performance Monitoring */
+               struct x86_pmu_capability cap;
+               union cpuid10_eax eax;
+               union cpuid10_edx edx;
+               perf_get_x86_pmu_capability(&cap);
+               /*
+                * Only support guest architectural pmu on a host
+                * with architectural pmu.
+                */
+               if (!cap.version)
+                       memset(&cap, 0, sizeof(cap));
+               eax.split.version_id = min(cap.version, 2);
+               eax.split.num_counters = cap.num_counters_gp;
+               eax.split.bit_width = cap.bit_width_gp;
+               eax.split.mask_length = cap.events_mask_len;
+               edx.split.num_counters_fixed = cap.num_counters_fixed;
+               edx.split.bit_width_fixed = cap.bit_width_fixed;
+               edx.split.reserved = 0;
+               entry->eax = eax.full;
+               entry->ebx = cap.events_mask;
+               entry->ecx = 0;
+               entry->edx = edx.full;
+               break;
+       }
+       /* function 0xb has additional index. */
+       case 0xb: {
+               int i, level_type;
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               /* read more entries until level_type is zero */
+               for (i = 1; ; ++i) {
+                       if (*nent >= maxnent)
+                               goto out;
+                       level_type = entry[i - 1].ecx & 0xff00;
+                       if (!level_type)
+                               break;
+                       do_cpuid_1_ent(&entry[i], function, i);
+                       entry[i].flags |=
+                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+                       ++*nent;
+               }
+               break;
+       }
+       case 0xd: {
+               int idx, i;
+               entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+               for (idx = 1, i = 1; idx < 64; ++idx) {
+                       if (*nent >= maxnent)
+                               goto out;
+                       do_cpuid_1_ent(&entry[i], function, idx);
+                       if (entry[i].eax == 0 || !supported_xcr0_bit(idx))
+                               continue;
+                       entry[i].flags |=
+                              KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+                       ++*nent;
+                       ++i;
+               }
+               break;
+       }
+       case KVM_CPUID_SIGNATURE: {
+               char signature[12] = "KVMKVMKVM\0\0";
+               u32 *sigptr = (u32 *)signature;
+               entry->eax = 0;
+               entry->ebx = sigptr[0];
+               entry->ecx = sigptr[1];
+               entry->edx = sigptr[2];
+               break;
+       }
+       case KVM_CPUID_FEATURES:
+               entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
+                            (1 << KVM_FEATURE_NOP_IO_DELAY) |
+                            (1 << KVM_FEATURE_CLOCKSOURCE2) |
+                            (1 << KVM_FEATURE_ASYNC_PF) |
+                            (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
+               if (sched_info_on())
+                       entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
+               entry->ebx = 0;
+               entry->ecx = 0;
+               entry->edx = 0;
+               break;
+       case 0x80000000:
+               entry->eax = min(entry->eax, 0x8000001a);
+               break;
+       case 0x80000001:
+               entry->edx &= kvm_supported_word1_x86_features;
+               cpuid_mask(&entry->edx, 1);
+               entry->ecx &= kvm_supported_word6_x86_features;
+               cpuid_mask(&entry->ecx, 6);
+               break;
+       case 0x80000008: {
+               unsigned g_phys_as = (entry->eax >> 16) & 0xff;
+               unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
+               unsigned phys_as = entry->eax & 0xff;
+               if (!g_phys_as)
+                       g_phys_as = phys_as;
+               entry->eax = g_phys_as | (virt_as << 8);
+               entry->ebx = entry->edx = 0;
+               break;
+       }
+       case 0x80000019:
+               entry->ecx = entry->edx = 0;
+               break;
+       case 0x8000001a:
+               break;
+       case 0x8000001d:
+               break;
+       /*Add support for Centaur's CPUID instruction*/
+       case 0xC0000000:
+               /*Just support up to 0xC0000004 now*/
+               entry->eax = min(entry->eax, 0xC0000004);
+               break;
+       case 0xC0000001:
+               entry->edx &= kvm_supported_word5_x86_features;
+               cpuid_mask(&entry->edx, 5);
+               break;
+       case 3: /* Processor serial number */
+       case 5: /* MONITOR/MWAIT */
+       case 6: /* Thermal management */
+       case 0x80000007: /* Advanced power management */
+       case 0xC0000002:
+       case 0xC0000003:
+       case 0xC0000004:
+       default:
+               entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+               break;
+       }
+       kvm_x86_ops->set_supported_cpuid(function, entry);
+       r = 0;
+ out:
+       put_cpu();
+       return r;
+ }
+ #undef F
+ struct kvm_cpuid_param {
+       u32 func;
+       u32 idx;
+       bool has_leaf_count;
+       bool (*qualifier)(struct kvm_cpuid_param *param);
+ };
+ static bool is_centaur_cpu(struct kvm_cpuid_param *param)
+ {
+       return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
+ }
+ int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
+                                     struct kvm_cpuid_entry2 __user *entries)
+ {
+       struct kvm_cpuid_entry2 *cpuid_entries;
+       int limit, nent = 0, r = -E2BIG, i;
+       u32 func;
+       static struct kvm_cpuid_param param[] = {
+               { .func = 0, .has_leaf_count = true },
+               { .func = 0x80000000, .has_leaf_count = true },
+               { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
+               { .func = KVM_CPUID_SIGNATURE },
+               { .func = KVM_CPUID_FEATURES },
+       };
+       if (cpuid->nent < 1)
+               goto out;
+       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
+               cpuid->nent = KVM_MAX_CPUID_ENTRIES;
+       r = -ENOMEM;
+       cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
+       if (!cpuid_entries)
+               goto out;
+       r = 0;
+       for (i = 0; i < ARRAY_SIZE(param); i++) {
+               struct kvm_cpuid_param *ent = &param[i];
+               if (ent->qualifier && !ent->qualifier(ent))
+                       continue;
+               r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
+                               &nent, cpuid->nent);
+               if (r)
+                       goto out_free;
+               if (!ent->has_leaf_count)
+                       continue;
+               limit = cpuid_entries[nent - 1].eax;
+               for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
+                       r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
+                                    &nent, cpuid->nent);
+               if (r)
+                       goto out_free;
+       }
+       r = -EFAULT;
+       if (copy_to_user(entries, cpuid_entries,
+                        nent * sizeof(struct kvm_cpuid_entry2)))
+               goto out_free;
+       cpuid->nent = nent;
+       r = 0;
+ out_free:
+       vfree(cpuid_entries);
+ out:
+       return r;
+ }
+ static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
+ {
+       struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
+       int j, nent = vcpu->arch.cpuid_nent;
+       e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
+       /* when no next entry is found, the current entry[i] is reselected */
+       for (j = i + 1; ; j = (j + 1) % nent) {
+               struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
+               if (ej->function == e->function) {
+                       ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+                       return j;
+               }
+       }
+       return 0; /* silence gcc, even though control never reaches here */
+ }
+ /* find an entry with matching function, matching index (if needed), and that
+  * should be read next (if it's stateful) */
+ static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
+       u32 function, u32 index)
+ {
+       if (e->function != function)
+               return 0;
+       if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
+               return 0;
+       if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
+           !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
+               return 0;
+       return 1;
+ }
+ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
+                                             u32 function, u32 index)
+ {
+       int i;
+       struct kvm_cpuid_entry2 *best = NULL;
+       for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
+               struct kvm_cpuid_entry2 *e;
+               e = &vcpu->arch.cpuid_entries[i];
+               if (is_matching_cpuid_entry(e, function, index)) {
+                       if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
+                               move_to_next_stateful_cpuid_entry(vcpu, i);
+                       best = e;
+                       break;
+               }
+       }
+       return best;
+ }
+ EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
+ int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
+ {
+       struct kvm_cpuid_entry2 *best;
+       best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
+       if (!best || best->eax < 0x80000008)
+               goto not_found;
+       best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+       if (best)
+               return best->eax & 0xff;
+ not_found:
+       return 36;
+ }
+ /*
+  * If no match is found, check whether we exceed the vCPU's limit
+  * and return the content of the highest valid _standard_ leaf instead.
+  * This is to satisfy the CPUID specification.
+  */
+ static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
+                                                   u32 function, u32 index)
+ {
+       struct kvm_cpuid_entry2 *maxlevel;
+       maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
+       if (!maxlevel || maxlevel->eax >= function)
+               return NULL;
+       if (function & 0x80000000) {
+               maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
+               if (!maxlevel)
+                       return NULL;
+       }
+       return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
+ }
+ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
+ {
+       u32 function, index;
+       struct kvm_cpuid_entry2 *best;
+       function = kvm_register_read(vcpu, VCPU_REGS_RAX);
+       index = kvm_register_read(vcpu, VCPU_REGS_RCX);
+       kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
+       kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
+       best = kvm_find_cpuid_entry(vcpu, function, index);
+       if (!best)
+               best = check_cpuid_limit(vcpu, function, index);
+       if (best) {
+               kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
+               kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
+               kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
+               kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
+       }
+       kvm_x86_ops->skip_emulated_instruction(vcpu);
+       trace_kvm_cpuid(function,
+                       kvm_register_read(vcpu, VCPU_REGS_RAX),
+                       kvm_register_read(vcpu, VCPU_REGS_RBX),
+                       kvm_register_read(vcpu, VCPU_REGS_RCX),
+                       kvm_register_read(vcpu, VCPU_REGS_RDX));
+ }
+ EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
Simple merge
Simple merge
Simple merge
index 0000000,6296b40..e4998a3
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,675 +1,684 @@@
+ /************************************************************
+  * EFI GUID Partition Table handling
+  *
+  * http://www.uefi.org/specs/
+  * http://www.intel.com/technology/efi/
+  *
+  * efi.[ch] by Matt Domsch <Matt_Domsch@dell.com>
+  *   Copyright 2000,2001,2002,2004 Dell Inc.
+  *
+  *  This program is free software; you can redistribute it and/or modify
+  *  it under the terms of the GNU General Public License as published by
+  *  the Free Software Foundation; either version 2 of the License, or
+  *  (at your option) any later version.
+  *
+  *  This program is distributed in the hope that it will be useful,
+  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  *  GNU General Public License for more details.
+  *
+  *  You should have received a copy of the GNU General Public License
+  *  along with this program; if not, write to the Free Software
+  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+  *
+  *
+  * TODO:
+  *
+  * Changelog:
+  * Mon Nov 09 2004 Matt Domsch <Matt_Domsch@dell.com>
+  * - test for valid PMBR and valid PGPT before ever reading
+  *   AGPT, allow override with 'gpt' kernel command line option.
+  * - check for first/last_usable_lba outside of size of disk
+  *
+  * Tue  Mar 26 2002 Matt Domsch <Matt_Domsch@dell.com>
+  * - Ported to 2.5.7-pre1 and 2.5.7-dj2
+  * - Applied patch to avoid fault in alternate header handling
+  * - cleaned up find_valid_gpt
+  * - On-disk structure and copy in memory is *always* LE now - 
+  *   swab fields as needed
+  * - remove print_gpt_header()
+  * - only use first max_p partition entries, to keep the kernel minor number
+  *   and partition numbers tied.
+  *
+  * Mon  Feb 04 2002 Matt Domsch <Matt_Domsch@dell.com>
+  * - Removed __PRIPTR_PREFIX - not being used
+  *
+  * Mon  Jan 14 2002 Matt Domsch <Matt_Domsch@dell.com>
+  * - Ported to 2.5.2-pre11 + library crc32 patch Linus applied
+  *
+  * Thu Dec 6 2001 Matt Domsch <Matt_Domsch@dell.com>
+  * - Added compare_gpts().
+  * - moved le_efi_guid_to_cpus() back into this file.  GPT is the only
+  *   thing that keeps EFI GUIDs on disk.
+  * - Changed gpt structure names and members to be simpler and more Linux-like.
+  * 
+  * Wed Oct 17 2001 Matt Domsch <Matt_Domsch@dell.com>
+  * - Removed CONFIG_DEVFS_VOLUMES_UUID code entirely per Martin Wilck
+  *
+  * Wed Oct 10 2001 Matt Domsch <Matt_Domsch@dell.com>
+  * - Changed function comments to DocBook style per Andreas Dilger suggestion.
+  *
+  * Mon Oct 08 2001 Matt Domsch <Matt_Domsch@dell.com>
+  * - Change read_lba() to use the page cache per Al Viro's work.
+  * - print u64s properly on all architectures
+  * - fixed debug_printk(), now Dprintk()
+  *
+  * Mon Oct 01 2001 Matt Domsch <Matt_Domsch@dell.com>
+  * - Style cleanups
+  * - made most functions static
+  * - Endianness addition
+  * - remove test for second alternate header, as it's not per spec,
+  *   and is unnecessary.  There's now a method to read/write the last
+  *   sector of an odd-sized disk from user space.  No tools have ever
+  *   been released which used this code, so it's effectively dead.
+  * - Per Asit Mallick of Intel, added a test for a valid PMBR.
+  * - Added kernel command line option 'gpt' to override valid PMBR test.
+  *
+  * Wed Jun  6 2001 Martin Wilck <Martin.Wilck@Fujitsu-Siemens.com>
+  * - added devfs volume UUID support (/dev/volumes/uuids) for
+  *   mounting file systems by the partition GUID. 
+  *
+  * Tue Dec  5 2000 Matt Domsch <Matt_Domsch@dell.com>
+  * - Moved crc32() to linux/lib, added efi_crc32().
+  *
+  * Thu Nov 30 2000 Matt Domsch <Matt_Domsch@dell.com>
+  * - Replaced Intel's CRC32 function with an equivalent
+  *   non-license-restricted version.
+  *
+  * Wed Oct 25 2000 Matt Domsch <Matt_Domsch@dell.com>
+  * - Fixed the last_lba() call to return the proper last block
+  *
+  * Thu Oct 12 2000 Matt Domsch <Matt_Domsch@dell.com>
+  * - Thanks to Andries Brouwer for his debugging assistance.
+  * - Code works, detects all the partitions.
+  *
+  ************************************************************/
+ #include <linux/crc32.h>
+ #include <linux/ctype.h>
+ #include <linux/math64.h>
+ #include <linux/slab.h>
+ #include "check.h"
+ #include "efi.h"
+ /* This allows a kernel command line option 'gpt' to override
+  * the test for invalid PMBR.  Not __initdata because reloading
+  * the partition tables happens after init too.
+  */
+ static int force_gpt;
+ static int __init
+ force_gpt_fn(char *str)
+ {
+       force_gpt = 1;
+       return 1;
+ }
+ __setup("gpt", force_gpt_fn);
+ /**
+  * efi_crc32() - EFI version of crc32 function
+  * @buf: buffer to calculate crc32 of
+  * @len - length of buf
+  *
+  * Description: Returns EFI-style CRC32 value for @buf
+  * 
+  * This function uses the little endian Ethernet polynomial
+  * but seeds the function with ~0, and xor's with ~0 at the end.
+  * Note, the EFI Specification, v1.02, has a reference to
+  * Dr. Dobbs Journal, May 1994 (actually it's in May 1992).
+  */
+ static inline u32
+ efi_crc32(const void *buf, unsigned long len)
+ {
+       return (crc32(~0L, buf, len) ^ ~0L);
+ }
+ /**
+  * last_lba(): return number of last logical block of device
+  * @bdev: block device
+  * 
+  * Description: Returns last LBA value on success, 0 on error.
+  * This is stored (by sd and ide-geometry) in
+  *  the part[0] entry for this disk, and is the number of
+  *  physical sectors available on the disk.
+  */
+ static u64 last_lba(struct block_device *bdev)
+ {
+       if (!bdev || !bdev->bd_inode)
+               return 0;
+       return div_u64(bdev->bd_inode->i_size,
+                      bdev_logical_block_size(bdev)) - 1ULL;
+ }
+ static inline int
+ pmbr_part_valid(struct partition *part)
+ {
+         if (part->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT &&
+             le32_to_cpu(part->start_sect) == 1UL)
+                 return 1;
+         return 0;
+ }
+ /**
+  * is_pmbr_valid(): test Protective MBR for validity
+  * @mbr: pointer to a legacy mbr structure
+  *
+  * Description: Returns 1 if PMBR is valid, 0 otherwise.
+  * Validity depends on two things:
+  *  1) MSDOS signature is in the last two bytes of the MBR
+  *  2) One partition of type 0xEE is found
+  */
+ static int
+ is_pmbr_valid(legacy_mbr *mbr)
+ {
+       int i;
+       if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
+                 return 0;
+       for (i = 0; i < 4; i++)
+               if (pmbr_part_valid(&mbr->partition_record[i]))
+                         return 1;
+       return 0;
+ }
+ /**
+  * read_lba(): Read bytes from disk, starting at given LBA
+  * @state
+  * @lba
+  * @buffer
+  * @size_t
+  *
+  * Description: Reads @count bytes from @state->bdev into @buffer.
+  * Returns number of bytes read on success, 0 on error.
+  */
+ static size_t read_lba(struct parsed_partitions *state,
+                      u64 lba, u8 *buffer, size_t count)
+ {
+       size_t totalreadcount = 0;
+       struct block_device *bdev = state->bdev;
+       sector_t n = lba * (bdev_logical_block_size(bdev) / 512);
+       if (!buffer || lba > last_lba(bdev))
+                 return 0;
+       while (count) {
+               int copied = 512;
+               Sector sect;
+               unsigned char *data = read_part_sector(state, n++, &sect);
+               if (!data)
+                       break;
+               if (copied > count)
+                       copied = count;
+               memcpy(buffer, data, copied);
+               put_dev_sector(sect);
+               buffer += copied;
+               totalreadcount +=copied;
+               count -= copied;
+       }
+       return totalreadcount;
+ }
+ /**
+  * alloc_read_gpt_entries(): reads partition entries from disk
+  * @state
+  * @gpt - GPT header
+  * 
+  * Description: Returns ptes on success,  NULL on error.
+  * Allocates space for PTEs based on information found in @gpt.
+  * Notes: remember to free pte when you're done!
+  */
+ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
+                                        gpt_header *gpt)
+ {
+       size_t count;
+       gpt_entry *pte;
+       if (!gpt)
+               return NULL;
+       count = le32_to_cpu(gpt->num_partition_entries) *
+                 le32_to_cpu(gpt->sizeof_partition_entry);
+       if (!count)
+               return NULL;
+       pte = kzalloc(count, GFP_KERNEL);
+       if (!pte)
+               return NULL;
+       if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
+                      (u8 *) pte,
+                    count) < count) {
+               kfree(pte);
+                 pte=NULL;
+               return NULL;
+       }
+       return pte;
+ }
+ /**
+  * alloc_read_gpt_header(): Allocates GPT header, reads into it from disk
+  * @state
+  * @lba is the Logical Block Address of the partition table
+  * 
+  * Description: returns GPT header on success, NULL on error.   Allocates
+  * and fills a GPT header starting at @ from @state->bdev.
+  * Note: remember to free gpt when finished with it.
+  */
+ static gpt_header *alloc_read_gpt_header(struct parsed_partitions *state,
+                                        u64 lba)
+ {
+       gpt_header *gpt;
+       unsigned ssz = bdev_logical_block_size(state->bdev);
+       gpt = kzalloc(ssz, GFP_KERNEL);
+       if (!gpt)
+               return NULL;
+       if (read_lba(state, lba, (u8 *) gpt, ssz) < ssz) {
+               kfree(gpt);
+                 gpt=NULL;
+               return NULL;
+       }
+       return gpt;
+ }
+ /**
+  * is_gpt_valid() - tests one GPT header and PTEs for validity
+  * @state
+  * @lba is the logical block address of the GPT header to test
+  * @gpt is a GPT header ptr, filled on return.
+  * @ptes is a PTEs ptr, filled on return.
+  *
+  * Description: returns 1 if valid,  0 on error.
+  * If valid, returns pointers to newly allocated GPT header and PTEs.
+  */
+ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
+                       gpt_header **gpt, gpt_entry **ptes)
+ {
+       u32 crc, origcrc;
+       u64 lastlba;
+       if (!ptes)
+               return 0;
+       if (!(*gpt = alloc_read_gpt_header(state, lba)))
+               return 0;
+       /* Check the GUID Partition Table signature */
+       if (le64_to_cpu((*gpt)->signature) != GPT_HEADER_SIGNATURE) {
+               pr_debug("GUID Partition Table Header signature is wrong:"
+                        "%lld != %lld\n",
+                        (unsigned long long)le64_to_cpu((*gpt)->signature),
+                        (unsigned long long)GPT_HEADER_SIGNATURE);
+               goto fail;
+       }
+       /* Check the GUID Partition Table header size */
+       if (le32_to_cpu((*gpt)->header_size) >
+                       bdev_logical_block_size(state->bdev)) {
+               pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
+                       le32_to_cpu((*gpt)->header_size),
+                       bdev_logical_block_size(state->bdev));
+               goto fail;
+       }
++      /* Check the GUID Partition Table header size */
++      if (le32_to_cpu((*gpt)->header_size) >
++                      bdev_logical_block_size(state->bdev)) {
++              pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
++                      le32_to_cpu((*gpt)->header_size),
++                      bdev_logical_block_size(state->bdev));
++              goto fail;
++      }
++
+       /* Check the GUID Partition Table CRC */
+       origcrc = le32_to_cpu((*gpt)->header_crc32);
+       (*gpt)->header_crc32 = 0;
+       crc = efi_crc32((const unsigned char *) (*gpt), le32_to_cpu((*gpt)->header_size));
+       if (crc != origcrc) {
+               pr_debug("GUID Partition Table Header CRC is wrong: %x != %x\n",
+                        crc, origcrc);
+               goto fail;
+       }
+       (*gpt)->header_crc32 = cpu_to_le32(origcrc);
+       /* Check that the my_lba entry points to the LBA that contains
+        * the GUID Partition Table */
+       if (le64_to_cpu((*gpt)->my_lba) != lba) {
+               pr_debug("GPT my_lba incorrect: %lld != %lld\n",
+                        (unsigned long long)le64_to_cpu((*gpt)->my_lba),
+                        (unsigned long long)lba);
+               goto fail;
+       }
+       /* Check the first_usable_lba and last_usable_lba are
+        * within the disk.
+        */
+       lastlba = last_lba(state->bdev);
+       if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) {
+               pr_debug("GPT: first_usable_lba incorrect: %lld > %lld\n",
+                        (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba),
+                        (unsigned long long)lastlba);
+               goto fail;
+       }
+       if (le64_to_cpu((*gpt)->last_usable_lba) > lastlba) {
+               pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n",
+                        (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba),
+                        (unsigned long long)lastlba);
+               goto fail;
+       }
+       /* Check that sizeof_partition_entry has the correct value */
+       if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
+               pr_debug("GUID Partitition Entry Size check failed.\n");
+               goto fail;
+       }
+       if (!(*ptes = alloc_read_gpt_entries(state, *gpt)))
+               goto fail;
+       /* Check the GUID Partition Entry Array CRC */
+       crc = efi_crc32((const unsigned char *) (*ptes),
+                       le32_to_cpu((*gpt)->num_partition_entries) *
+                       le32_to_cpu((*gpt)->sizeof_partition_entry));
+       if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
+               pr_debug("GUID Partitition Entry Array CRC check failed.\n");
+               goto fail_ptes;
+       }
+       /* We're done, all's well */
+       return 1;
+  fail_ptes:
+       kfree(*ptes);
+       *ptes = NULL;
+  fail:
+       kfree(*gpt);
+       *gpt = NULL;
+       return 0;
+ }
+ /**
+  * is_pte_valid() - tests one PTE for validity
+  * @pte is the pte to check
+  * @lastlba is last lba of the disk
+  *
+  * Description: returns 1 if valid,  0 on error.
+  */
+ static inline int
+ is_pte_valid(const gpt_entry *pte, const u64 lastlba)
+ {
+       if ((!efi_guidcmp(pte->partition_type_guid, NULL_GUID)) ||
+           le64_to_cpu(pte->starting_lba) > lastlba         ||
+           le64_to_cpu(pte->ending_lba)   > lastlba)
+               return 0;
+       return 1;
+ }
+ /**
+  * compare_gpts() - Search disk for valid GPT headers and PTEs
+  * @pgpt is the primary GPT header
+  * @agpt is the alternate GPT header
+  * @lastlba is the last LBA number
+  * Description: Returns nothing.  Sanity checks pgpt and agpt fields
+  * and prints warnings on discrepancies.
+  * 
+  */
+ static void
+ compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
+ {
+       int error_found = 0;
+       if (!pgpt || !agpt)
+               return;
+       if (le64_to_cpu(pgpt->my_lba) != le64_to_cpu(agpt->alternate_lba)) {
+               printk(KERN_WARNING
+                      "GPT:Primary header LBA != Alt. header alternate_lba\n");
+               printk(KERN_WARNING "GPT:%lld != %lld\n",
+                      (unsigned long long)le64_to_cpu(pgpt->my_lba),
+                        (unsigned long long)le64_to_cpu(agpt->alternate_lba));
+               error_found++;
+       }
+       if (le64_to_cpu(pgpt->alternate_lba) != le64_to_cpu(agpt->my_lba)) {
+               printk(KERN_WARNING
+                      "GPT:Primary header alternate_lba != Alt. header my_lba\n");
+               printk(KERN_WARNING "GPT:%lld != %lld\n",
+                      (unsigned long long)le64_to_cpu(pgpt->alternate_lba),
+                        (unsigned long long)le64_to_cpu(agpt->my_lba));
+               error_found++;
+       }
+       if (le64_to_cpu(pgpt->first_usable_lba) !=
+             le64_to_cpu(agpt->first_usable_lba)) {
+               printk(KERN_WARNING "GPT:first_usable_lbas don't match.\n");
+               printk(KERN_WARNING "GPT:%lld != %lld\n",
+                      (unsigned long long)le64_to_cpu(pgpt->first_usable_lba),
+                        (unsigned long long)le64_to_cpu(agpt->first_usable_lba));
+               error_found++;
+       }
+       if (le64_to_cpu(pgpt->last_usable_lba) !=
+             le64_to_cpu(agpt->last_usable_lba)) {
+               printk(KERN_WARNING "GPT:last_usable_lbas don't match.\n");
+               printk(KERN_WARNING "GPT:%lld != %lld\n",
+                      (unsigned long long)le64_to_cpu(pgpt->last_usable_lba),
+                        (unsigned long long)le64_to_cpu(agpt->last_usable_lba));
+               error_found++;
+       }
+       if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid)) {
+               printk(KERN_WARNING "GPT:disk_guids don't match.\n");
+               error_found++;
+       }
+       if (le32_to_cpu(pgpt->num_partition_entries) !=
+             le32_to_cpu(agpt->num_partition_entries)) {
+               printk(KERN_WARNING "GPT:num_partition_entries don't match: "
+                      "0x%x != 0x%x\n",
+                      le32_to_cpu(pgpt->num_partition_entries),
+                      le32_to_cpu(agpt->num_partition_entries));
+               error_found++;
+       }
+       if (le32_to_cpu(pgpt->sizeof_partition_entry) !=
+             le32_to_cpu(agpt->sizeof_partition_entry)) {
+               printk(KERN_WARNING
+                      "GPT:sizeof_partition_entry values don't match: "
+                      "0x%x != 0x%x\n",
+                        le32_to_cpu(pgpt->sizeof_partition_entry),
+                      le32_to_cpu(agpt->sizeof_partition_entry));
+               error_found++;
+       }
+       if (le32_to_cpu(pgpt->partition_entry_array_crc32) !=
+             le32_to_cpu(agpt->partition_entry_array_crc32)) {
+               printk(KERN_WARNING
+                      "GPT:partition_entry_array_crc32 values don't match: "
+                      "0x%x != 0x%x\n",
+                        le32_to_cpu(pgpt->partition_entry_array_crc32),
+                      le32_to_cpu(agpt->partition_entry_array_crc32));
+               error_found++;
+       }
+       if (le64_to_cpu(pgpt->alternate_lba) != lastlba) {
+               printk(KERN_WARNING
+                      "GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
+               printk(KERN_WARNING "GPT:%lld != %lld\n",
+                       (unsigned long long)le64_to_cpu(pgpt->alternate_lba),
+                       (unsigned long long)lastlba);
+               error_found++;
+       }
+       if (le64_to_cpu(agpt->my_lba) != lastlba) {
+               printk(KERN_WARNING
+                      "GPT:Alternate GPT header not at the end of the disk.\n");
+               printk(KERN_WARNING "GPT:%lld != %lld\n",
+                       (unsigned long long)le64_to_cpu(agpt->my_lba),
+                       (unsigned long long)lastlba);
+               error_found++;
+       }
+       if (error_found)
+               printk(KERN_WARNING
+                      "GPT: Use GNU Parted to correct GPT errors.\n");
+       return;
+ }
+ /**
+  * find_valid_gpt() - Search disk for valid GPT headers and PTEs
+  * @state
+  * @gpt is a GPT header ptr, filled on return.
+  * @ptes is a PTEs ptr, filled on return.
+  * Description: Returns 1 if valid, 0 on error.
+  * If valid, returns pointers to newly allocated GPT header and PTEs.
+  * Validity depends on PMBR being valid (or being overridden by the
+  * 'gpt' kernel command line option) and finding either the Primary
+  * GPT header and PTEs valid, or the Alternate GPT header and PTEs
+  * valid.  If the Primary GPT header is not valid, the Alternate GPT header
+  * is not checked unless the 'gpt' kernel command line option is passed.
+  * This protects against devices which misreport their size, and forces
+  * the user to decide to use the Alternate GPT.
+  */
+ static int find_valid_gpt(struct parsed_partitions *state, gpt_header **gpt,
+                         gpt_entry **ptes)
+ {
+       int good_pgpt = 0, good_agpt = 0, good_pmbr = 0;
+       gpt_header *pgpt = NULL, *agpt = NULL;
+       gpt_entry *pptes = NULL, *aptes = NULL;
+       legacy_mbr *legacymbr;
+       u64 lastlba;
+       if (!ptes)
+               return 0;
+       lastlba = last_lba(state->bdev);
+         if (!force_gpt) {
+                 /* This will be added to the EFI Spec. per Intel after v1.02. */
+                 legacymbr = kzalloc(sizeof (*legacymbr), GFP_KERNEL);
+                 if (legacymbr) {
+                         read_lba(state, 0, (u8 *) legacymbr,
+                                sizeof (*legacymbr));
+                         good_pmbr = is_pmbr_valid(legacymbr);
+                         kfree(legacymbr);
+                 }
+                 if (!good_pmbr)
+                         goto fail;
+         }
+       good_pgpt = is_gpt_valid(state, GPT_PRIMARY_PARTITION_TABLE_LBA,
+                                &pgpt, &pptes);
+         if (good_pgpt)
+               good_agpt = is_gpt_valid(state,
+                                        le64_to_cpu(pgpt->alternate_lba),
+                                        &agpt, &aptes);
+         if (!good_agpt && force_gpt)
+                 good_agpt = is_gpt_valid(state, lastlba, &agpt, &aptes);
+         /* The obviously unsuccessful case */
+         if (!good_pgpt && !good_agpt)
+                 goto fail;
+         compare_gpts(pgpt, agpt, lastlba);
+         /* The good cases */
+         if (good_pgpt) {
+                 *gpt  = pgpt;
+                 *ptes = pptes;
+                 kfree(agpt);
+                 kfree(aptes);
+                 if (!good_agpt) {
+                         printk(KERN_WARNING 
+                              "Alternate GPT is invalid, "
+                                "using primary GPT.\n");
+                 }
+                 return 1;
+         }
+         else if (good_agpt) {
+                 *gpt  = agpt;
+                 *ptes = aptes;
+                 kfree(pgpt);
+                 kfree(pptes);
+                 printk(KERN_WARNING 
+                        "Primary GPT is invalid, using alternate GPT.\n");
+                 return 1;
+         }
+  fail:
+         kfree(pgpt);
+         kfree(agpt);
+         kfree(pptes);
+         kfree(aptes);
+         *gpt = NULL;
+         *ptes = NULL;
+         return 0;
+ }
+ /**
+  * efi_partition(struct parsed_partitions *state)
+  * @state
+  *
+  * Description: called from check.c, if the disk contains GPT
+  * partitions, sets up partition entries in the kernel.
+  *
+  * If the first block on the disk is a legacy MBR,
+  * it will get handled by msdos_partition().
+  * If it's a Protective MBR, we'll handle it here.
+  *
+  * We do not create a Linux partition for GPT, but
+  * only for the actual data partitions.
+  * Returns:
+  * -1 if unable to read the partition table
+  *  0 if this isn't our partition table
+  *  1 if successful
+  *
+  */
+ int efi_partition(struct parsed_partitions *state)
+ {
+       gpt_header *gpt = NULL;
+       gpt_entry *ptes = NULL;
+       u32 i;
+       unsigned ssz = bdev_logical_block_size(state->bdev) / 512;
+       u8 unparsed_guid[37];
+       if (!find_valid_gpt(state, &gpt, &ptes) || !gpt || !ptes) {
+               kfree(gpt);
+               kfree(ptes);
+               return 0;
+       }
+       pr_debug("GUID Partition Table is valid!  Yea!\n");
+       for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
+               struct partition_meta_info *info;
+               unsigned label_count = 0;
+               unsigned label_max;
+               u64 start = le64_to_cpu(ptes[i].starting_lba);
+               u64 size = le64_to_cpu(ptes[i].ending_lba) -
+                          le64_to_cpu(ptes[i].starting_lba) + 1ULL;
+               if (!is_pte_valid(&ptes[i], last_lba(state->bdev)))
+                       continue;
+               put_partition(state, i+1, start * ssz, size * ssz);
+               /* If this is a RAID volume, tell md */
+               if (!efi_guidcmp(ptes[i].partition_type_guid,
+                                PARTITION_LINUX_RAID_GUID))
+                       state->parts[i + 1].flags = ADDPART_FLAG_RAID;
+               info = &state->parts[i + 1].info;
+               /* Instead of doing a manual swap to big endian, reuse the
+                * common ASCII hex format as the interim.
+                */
+               efi_guid_unparse(&ptes[i].unique_partition_guid, unparsed_guid);
+               part_pack_uuid(unparsed_guid, info->uuid);
+               /* Naively convert UTF16-LE to 7 bits. */
+               label_max = min(sizeof(info->volname) - 1,
+                               sizeof(ptes[i].partition_name));
+               info->volname[label_max] = 0;
+               while (label_count < label_max) {
+                       u8 c = ptes[i].partition_name[label_count] & 0xff;
+                       if (c && !isprint(c))
+                               c = '!';
+                       info->volname[label_count] = c;
+                       label_count++;
+               }
+               state->parts[i + 1].has_info = true;
+       }
+       kfree(ptes);
+       kfree(gpt);
+       strlcat(state->pp_buf, "\n", PAGE_SIZE);
+       return 1;
+ }
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -501,109 -531,6 +532,107 @@@ acpi_os_predefined_override(const struc
        return AE_OK;
  }
  
 +#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
 +#include <asm/e820.h>
 +
 +#define ACPI_OVERRIDE_TABLES 10
 +
 +static unsigned long acpi_table_override_offset[ACPI_OVERRIDE_TABLES];
 +static u64 acpi_tables_inram;
 +
 +unsigned long __initdata acpi_initrd_offset;
 +
 +/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
 +u8 __init acpi_table_checksum(u8 *buffer, u32 length)
 +{
 +      u8 sum = 0;
 +      u8 *end = buffer + length;
 +
 +      while (buffer < end)
 +              sum = (u8) (sum + *(buffer++));
 +      return sum;
 +}
 +
 +/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
 +#define MAX_ACPI_SIGNATURE 35
 +static const char *table_sigs[MAX_ACPI_SIGNATURE] = {
 +      ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
 +      ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
 +      ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
 +      ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
 +      ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
 +      ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
 +      ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
 +      ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
 +      ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT };
 +
 +int __init acpi_initrd_table_override(void *start_addr, void *end_addr)
 +{
 +      int table_nr, sig;
 +      unsigned long offset = 0, max_len = end_addr - start_addr;
 +      char *p;
 +
 +      for (table_nr = 0; table_nr < ACPI_OVERRIDE_TABLES; table_nr++) {
 +              struct acpi_table_header *table;
 +              if (max_len < offset + sizeof(struct acpi_table_header)) {
 +                      WARN_ON(1);
 +                      return 0;
 +              }
 +              table = start_addr + offset;
 +
 +              for (sig = 0; sig < MAX_ACPI_SIGNATURE; sig++)
 +                      if (!memcmp(table->signature, table_sigs[sig], 4))
 +                              break;
 +
 +              if (sig >= MAX_ACPI_SIGNATURE)
 +                      break;
 +
 +              if (max_len < offset + table->length) {
 +                      WARN_ON(1);
 +                      return 0;
 +              }
 +
 +              if (acpi_table_checksum(start_addr + offset, table->length)) {
 +                      WARN(1, "%4.4s has invalid checksum\n",
 +                           table->signature);
 +                      continue;
 +              }
 +              printk(KERN_INFO "%4.4s ACPI table found in initrd"
 +                     " - size: %d\n", table->signature, table->length);
 +
 +              offset += table->length;
 +              acpi_table_override_offset[table_nr] = offset;
 +      }
 +      if (!offset)
 +              return 0;
 +
 +      acpi_tables_inram =
 +              memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
 +                                     offset, PAGE_SIZE);
-       if (acpi_tables_inram == MEMBLOCK_ERROR)
++      if (!acpi_tables_inram)
 +              panic("Cannot find place for ACPI override tables\n");
 +
 +      /*
 +       * Only calling e820_add_reserve does not work and the
 +       * tables are invalid (memory got used) later.
 +       * memblock_x86_reserve_range works as expected and the tables
 +       * won't get modified. But it's not enough because ioremap will
 +       * complain later (used by acpi_os_map_memory) that the pages
 +       * that should get mapped are not marked "reserved".
 +       * Both memblock_x86_reserve_range and e820_add_region works fine.
 +       */
-       memblock_x86_reserve_range(acpi_tables_inram,
-                                  acpi_tables_inram + offset,
-                                  "ACPI TABLE OVERRIDE");
++      memblock_reserve(acpi_tables_inram, acpi_tables_inram + offset);
 +      e820_add_region(acpi_tables_inram, offset, E820_ACPI);
 +      update_e820();
 +
 +      p = early_ioremap(acpi_tables_inram, offset);
 +      memcpy(p, start_addr, offset);
 +      early_iounmap(p, offset);
 +      return offset;
 +}
 +
 +#endif
 +
  acpi_status
  acpi_os_table_override(struct acpi_table_header * existing_table,
                       struct acpi_table_header ** new_table)
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -1768,8 -1787,7 +1787,9 @@@ static const struct hid_device_id hid_i
        { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
        { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
 +      { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_4000U) },
 +      { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_4500U) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
Simple merge
@@@ -1310,11 -1225,58 +1330,59 @@@ static void set_input_params(struct inp
        }
  }
  
+ static ssize_t synaptics_show_disable_gesture(struct psmouse *psmouse,
+                                             void *data, char *buf)
+ {
+       struct synaptics_data *priv = psmouse->private;
+       return sprintf(buf, "%c\n", priv->disable_gesture ? '1' : '0');
+ }
+ static ssize_t synaptics_set_disable_gesture(struct psmouse *psmouse,
+                                            void *data, const char *buf,
+                                            size_t len)
+ {
+       struct synaptics_data *priv = psmouse->private;
+       unsigned int value;
+       int err;
+       err = kstrtouint(buf, 10, &value);
+       if (err)
+               return err;
+       if (value > 1)
+               return -EINVAL;
+       if (value == priv->disable_gesture)
+               return len;
+       priv->disable_gesture = value;
+       if (value)
+               priv->mode |= SYN_BIT_DISABLE_GESTURE;
+       else
+               priv->mode &= ~SYN_BIT_DISABLE_GESTURE;
+       if (synaptics_mode_cmd(psmouse, priv->mode))
+               return -EIO;
+       return len;
+ }
+ PSMOUSE_DEFINE_ATTR(disable_gesture, S_IWUSR | S_IRUGO, NULL,
+                   synaptics_show_disable_gesture,
+                   synaptics_set_disable_gesture);
  static void synaptics_disconnect(struct psmouse *psmouse)
  {
+       struct synaptics_data *priv = psmouse->private;
+       if (!priv->absolute_mode && SYN_ID_DISGEST_SUPPORTED(priv->identity))
+               device_remove_file(&psmouse->ps2dev.serio->dev,
+                                  &psmouse_attr_disable_gesture.dattr);
 +      synaptics_free_led(psmouse);
        synaptics_reset(psmouse);
-       kfree(psmouse->private);
+       kfree(priv);
        psmouse->private = NULL;
  }
  
Simple merge
Simple merge
@@@ -23,7 -24,7 +24,8 @@@ obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE)   += 
  obj-$(CONFIG_TOUCHSCREEN_GUNZE)               += gunze.o
  obj-$(CONFIG_TOUCHSCREEN_EETI)                += eeti_ts.o
  obj-$(CONFIG_TOUCHSCREEN_ELO)         += elo.o
 +obj-$(CONFIG_TOUCHSCREEN_ELOUSB)      += elousb.o
+ obj-$(CONFIG_TOUCHSCREEN_EGALAX)      += egalax_ts.o
  obj-$(CONFIG_TOUCHSCREEN_FUJITSU)     += fujitsu_ts.o
  obj-$(CONFIG_TOUCHSCREEN_INEXIO)      += inexio.o
  obj-$(CONFIG_TOUCHSCREEN_INTEL_MID)   += intel-mid-touch.o
Simple merge
Simple merge
diff --cc drivers/md/dm.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 0000000,9f50c4e..3c17db4
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,3357 +1,3363 @@@
+ /*
+  *  Driver for 8250/16550-type serial ports
+  *
+  *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+  *
+  *  Copyright (C) 2001 Russell King.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  *
+  * A note about mapbase / membase
+  *
+  *  mapbase is the physical address of the IO port.
+  *  membase is an 'ioremapped' cookie.
+  */
+ #if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+ #define SUPPORT_SYSRQ
+ #endif
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/ioport.h>
+ #include <linux/init.h>
+ #include <linux/console.h>
+ #include <linux/sysrq.h>
+ #include <linux/delay.h>
+ #include <linux/platform_device.h>
+ #include <linux/tty.h>
+ #include <linux/ratelimit.h>
+ #include <linux/tty_flip.h>
+ #include <linux/serial_reg.h>
+ #include <linux/serial_core.h>
+ #include <linux/serial.h>
+ #include <linux/serial_8250.h>
+ #include <linux/nmi.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
+ #include <asm/io.h>
+ #include <asm/irq.h>
+ #include "8250.h"
+ #ifdef CONFIG_SPARC
+ #include "suncore.h"
+ #endif
+ /*
+  * Configuration:
+  *   share_irqs - whether we pass IRQF_SHARED to request_irq().  This option
+  *                is unsafe when used on edge-triggered interrupts.
+  */
+ static unsigned int share_irqs = SERIAL8250_SHARE_IRQS;
+ static unsigned int nr_uarts = CONFIG_SERIAL_8250_RUNTIME_UARTS;
+ static struct uart_driver serial8250_reg;
+ static int serial_index(struct uart_port *port)
+ {
+       return (serial8250_reg.minor - 64) + port->line;
+ }
+ static unsigned int skip_txen_test; /* force skip of txen test at init time */
+ /*
+  * Debugging.
+  */
+ #if 0
+ #define DEBUG_AUTOCONF(fmt...)        printk(fmt)
+ #else
+ #define DEBUG_AUTOCONF(fmt...)        do { } while (0)
+ #endif
+ #if 0
+ #define DEBUG_INTR(fmt...)    printk(fmt)
+ #else
+ #define DEBUG_INTR(fmt...)    do { } while (0)
+ #endif
+ #define PASS_LIMIT    512
+ #define BOTH_EMPTY    (UART_LSR_TEMT | UART_LSR_THRE)
+ /*
+  * We default to IRQ0 for the "no irq" hack.   Some
+  * machine types want others as well - they're free
+  * to redefine this in their header file.
+  */
+ #define is_real_interrupt(irq)        ((irq) != 0)
+ #ifdef CONFIG_SERIAL_8250_DETECT_IRQ
+ #define CONFIG_SERIAL_DETECT_IRQ 1
+ #endif
+ #ifdef CONFIG_SERIAL_8250_MANY_PORTS
+ #define CONFIG_SERIAL_MANY_PORTS 1
+ #endif
++#define arch_8250_sysrq_via_ctrl_o(a,b) 0
++
+ /*
+  * HUB6 is always on.  This will be removed once the header
+  * files have been cleaned.
+  */
+ #define CONFIG_HUB6 1
+ #include <asm/serial.h>
+ /*
+  * SERIAL_PORT_DFNS tells us about built-in ports that have no
+  * standard enumeration mechanism.   Platforms that can find all
+  * serial ports via mechanisms like ACPI or PCI need not supply it.
+  */
+ #ifndef SERIAL_PORT_DFNS
+ #define SERIAL_PORT_DFNS
+ #endif
+ static const struct old_serial_port old_serial_port[] = {
+       SERIAL_PORT_DFNS /* defined in asm/serial.h */
+ };
+ #define UART_NR       CONFIG_SERIAL_8250_NR_UARTS
+ #ifdef CONFIG_SERIAL_8250_RSA
+ #define PORT_RSA_MAX 4
+ static unsigned long probe_rsa[PORT_RSA_MAX];
+ static unsigned int probe_rsa_count;
+ #endif /* CONFIG_SERIAL_8250_RSA  */
+ struct irq_info {
+       struct                  hlist_node node;
+       int                     irq;
+       spinlock_t              lock;   /* Protects list not the hash */
+       struct list_head        *head;
+ };
+ #define NR_IRQ_HASH           32      /* Can be adjusted later */
+ static struct hlist_head irq_lists[NR_IRQ_HASH];
+ static DEFINE_MUTEX(hash_mutex);      /* Used to walk the hash */
+ /*
+  * Here we define the default xmit fifo size used for each type of UART.
+  */
+ static const struct serial8250_config uart_config[] = {
+       [PORT_UNKNOWN] = {
+               .name           = "unknown",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+       },
+       [PORT_8250] = {
+               .name           = "8250",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+       },
+       [PORT_16450] = {
+               .name           = "16450",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+       },
+       [PORT_16550] = {
+               .name           = "16550",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+       },
+       [PORT_16550A] = {
+               .name           = "16550A",
+               .fifo_size      = 16,
+               .tx_loadsz      = 16,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO,
+       },
+       [PORT_CIRRUS] = {
+               .name           = "Cirrus",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+       },
+       [PORT_16650] = {
+               .name           = "ST16650",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+       },
+       [PORT_16650V2] = {
+               .name           = "ST16650V2",
+               .fifo_size      = 32,
+               .tx_loadsz      = 16,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+                                 UART_FCR_T_TRIG_00,
+               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+       },
+       [PORT_16750] = {
+               .name           = "TI16750",
+               .fifo_size      = 64,
+               .tx_loadsz      = 64,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+                                 UART_FCR7_64BYTE,
+               .flags          = UART_CAP_FIFO | UART_CAP_SLEEP | UART_CAP_AFE,
+       },
+       [PORT_STARTECH] = {
+               .name           = "Startech",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1,
+       },
+       [PORT_16C950] = {
+               .name           = "16C950/954",
+               .fifo_size      = 128,
+               .tx_loadsz      = 128,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               /* UART_CAP_EFR breaks billionon CF bluetooth card. */
+               .flags          = UART_CAP_FIFO | UART_CAP_SLEEP,
+       },
+       [PORT_16654] = {
+               .name           = "ST16654",
+               .fifo_size      = 64,
+               .tx_loadsz      = 32,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+                                 UART_FCR_T_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+       },
+       [PORT_16850] = {
+               .name           = "XR16850",
+               .fifo_size      = 128,
+               .tx_loadsz      = 128,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+       },
+       [PORT_RSA] = {
+               .name           = "RSA",
+               .fifo_size      = 2048,
+               .tx_loadsz      = 2048,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11,
+               .flags          = UART_CAP_FIFO,
+       },
+       [PORT_NS16550A] = {
+               .name           = "NS16550A",
+               .fifo_size      = 16,
+               .tx_loadsz      = 16,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_NATSEMI,
+       },
+       [PORT_XSCALE] = {
+               .name           = "XScale",
+               .fifo_size      = 32,
+               .tx_loadsz      = 32,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_UUE | UART_CAP_RTOIE,
+       },
+       [PORT_RM9000] = {
+               .name           = "RM9000",
+               .fifo_size      = 16,
+               .tx_loadsz      = 16,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO,
+       },
+       [PORT_OCTEON] = {
+               .name           = "OCTEON",
+               .fifo_size      = 64,
+               .tx_loadsz      = 64,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO,
+       },
+       [PORT_AR7] = {
+               .name           = "AR7",
+               .fifo_size      = 16,
+               .tx_loadsz      = 16,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
+               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
+       },
+       [PORT_U6_16550A] = {
+               .name           = "U6_16550A",
+               .fifo_size      = 64,
+               .tx_loadsz      = 64,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_AFE,
+       },
+       [PORT_TEGRA] = {
+               .name           = "Tegra",
+               .fifo_size      = 32,
+               .tx_loadsz      = 8,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+                                 UART_FCR_T_TRIG_01,
+               .flags          = UART_CAP_FIFO | UART_CAP_RTOIE,
+       },
+       [PORT_XR17D15X] = {
+               .name           = "XR17D15X",
+               .fifo_size      = 64,
+               .tx_loadsz      = 64,
+               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+               .flags          = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR,
+       },
+ };
+ #if defined(CONFIG_MIPS_ALCHEMY)
+ /* Au1x00 UART hardware has a weird register layout */
+ static const u8 au_io_in_map[] = {
+       [UART_RX]  = 0,
+       [UART_IER] = 2,
+       [UART_IIR] = 3,
+       [UART_LCR] = 5,
+       [UART_MCR] = 6,
+       [UART_LSR] = 7,
+       [UART_MSR] = 8,
+ };
+ static const u8 au_io_out_map[] = {
+       [UART_TX]  = 1,
+       [UART_IER] = 2,
+       [UART_FCR] = 4,
+       [UART_LCR] = 5,
+       [UART_MCR] = 6,
+ };
+ /* sane hardware needs no mapping */
+ static inline int map_8250_in_reg(struct uart_port *p, int offset)
+ {
+       if (p->iotype != UPIO_AU)
+               return offset;
+       return au_io_in_map[offset];
+ }
+ static inline int map_8250_out_reg(struct uart_port *p, int offset)
+ {
+       if (p->iotype != UPIO_AU)
+               return offset;
+       return au_io_out_map[offset];
+ }
+ #elif defined(CONFIG_SERIAL_8250_RM9K)
+ static const u8
+       regmap_in[8] = {
+               [UART_RX]       = 0x00,
+               [UART_IER]      = 0x0c,
+               [UART_IIR]      = 0x14,
+               [UART_LCR]      = 0x1c,
+               [UART_MCR]      = 0x20,
+               [UART_LSR]      = 0x24,
+               [UART_MSR]      = 0x28,
+               [UART_SCR]      = 0x2c
+       },
+       regmap_out[8] = {
+               [UART_TX]       = 0x04,
+               [UART_IER]      = 0x0c,
+               [UART_FCR]      = 0x18,
+               [UART_LCR]      = 0x1c,
+               [UART_MCR]      = 0x20,
+               [UART_LSR]      = 0x24,
+               [UART_MSR]      = 0x28,
+               [UART_SCR]      = 0x2c
+       };
+ static inline int map_8250_in_reg(struct uart_port *p, int offset)
+ {
+       if (p->iotype != UPIO_RM9000)
+               return offset;
+       return regmap_in[offset];
+ }
+ static inline int map_8250_out_reg(struct uart_port *p, int offset)
+ {
+       if (p->iotype != UPIO_RM9000)
+               return offset;
+       return regmap_out[offset];
+ }
+ #else
+ /* sane hardware needs no mapping */
+ #define map_8250_in_reg(up, offset) (offset)
+ #define map_8250_out_reg(up, offset) (offset)
+ #endif
+ static unsigned int hub6_serial_in(struct uart_port *p, int offset)
+ {
+       offset = map_8250_in_reg(p, offset) << p->regshift;
+       outb(p->hub6 - 1 + offset, p->iobase);
+       return inb(p->iobase + 1);
+ }
+ static void hub6_serial_out(struct uart_port *p, int offset, int value)
+ {
+       offset = map_8250_out_reg(p, offset) << p->regshift;
+       outb(p->hub6 - 1 + offset, p->iobase);
+       outb(value, p->iobase + 1);
+ }
+ static unsigned int mem_serial_in(struct uart_port *p, int offset)
+ {
+       offset = map_8250_in_reg(p, offset) << p->regshift;
+       return readb(p->membase + offset);
+ }
+ static void mem_serial_out(struct uart_port *p, int offset, int value)
+ {
+       offset = map_8250_out_reg(p, offset) << p->regshift;
+       writeb(value, p->membase + offset);
+ }
+ static void mem32_serial_out(struct uart_port *p, int offset, int value)
+ {
+       offset = map_8250_out_reg(p, offset) << p->regshift;
+       writel(value, p->membase + offset);
+ }
+ static unsigned int mem32_serial_in(struct uart_port *p, int offset)
+ {
+       offset = map_8250_in_reg(p, offset) << p->regshift;
+       return readl(p->membase + offset);
+ }
+ static unsigned int au_serial_in(struct uart_port *p, int offset)
+ {
+       offset = map_8250_in_reg(p, offset) << p->regshift;
+       return __raw_readl(p->membase + offset);
+ }
+ static void au_serial_out(struct uart_port *p, int offset, int value)
+ {
+       offset = map_8250_out_reg(p, offset) << p->regshift;
+       __raw_writel(value, p->membase + offset);
+ }
+ static unsigned int io_serial_in(struct uart_port *p, int offset)
+ {
+       offset = map_8250_in_reg(p, offset) << p->regshift;
+       return inb(p->iobase + offset);
+ }
+ static void io_serial_out(struct uart_port *p, int offset, int value)
+ {
+       offset = map_8250_out_reg(p, offset) << p->regshift;
+       outb(value, p->iobase + offset);
+ }
+ static int serial8250_default_handle_irq(struct uart_port *port);
+ static void set_io_from_upio(struct uart_port *p)
+ {
+       struct uart_8250_port *up =
+               container_of(p, struct uart_8250_port, port);
+       switch (p->iotype) {
+       case UPIO_HUB6:
+               p->serial_in = hub6_serial_in;
+               p->serial_out = hub6_serial_out;
+               break;
+       case UPIO_MEM:
+               p->serial_in = mem_serial_in;
+               p->serial_out = mem_serial_out;
+               break;
+       case UPIO_RM9000:
+       case UPIO_MEM32:
+               p->serial_in = mem32_serial_in;
+               p->serial_out = mem32_serial_out;
+               break;
+       case UPIO_AU:
+               p->serial_in = au_serial_in;
+               p->serial_out = au_serial_out;
+               break;
+       default:
+               p->serial_in = io_serial_in;
+               p->serial_out = io_serial_out;
+               break;
+       }
+       /* Remember loaded iotype */
+       up->cur_iotype = p->iotype;
+       p->handle_irq = serial8250_default_handle_irq;
+ }
+ static void
+ serial_out_sync(struct uart_8250_port *up, int offset, int value)
+ {
+       struct uart_port *p = &up->port;
+       switch (p->iotype) {
+       case UPIO_MEM:
+       case UPIO_MEM32:
+       case UPIO_AU:
+               p->serial_out(p, offset, value);
+               p->serial_in(p, UART_LCR);      /* safe, no side-effects */
+               break;
+       default:
+               p->serial_out(p, offset, value);
+       }
+ }
+ #define serial_in(up, offset)         \
+       (up->port.serial_in(&(up)->port, (offset)))
+ #define serial_out(up, offset, value) \
+       (up->port.serial_out(&(up)->port, (offset), (value)))
+ /*
+  * We used to support using pause I/O for certain machines.  We
+  * haven't supported this for a while, but just in case it's badly
+  * needed for certain old 386 machines, I've left these #define's
+  * in....
+  */
+ #define serial_inp(up, offset)                serial_in(up, offset)
+ #define serial_outp(up, offset, value)        serial_out(up, offset, value)
+ /* Uart divisor latch read */
+ static inline int _serial_dl_read(struct uart_8250_port *up)
+ {
+       return serial_inp(up, UART_DLL) | serial_inp(up, UART_DLM) << 8;
+ }
+ /* Uart divisor latch write */
+ static inline void _serial_dl_write(struct uart_8250_port *up, int value)
+ {
+       serial_outp(up, UART_DLL, value & 0xff);
+       serial_outp(up, UART_DLM, value >> 8 & 0xff);
+ }
+ #if defined(CONFIG_MIPS_ALCHEMY)
+ /* Au1x00 haven't got a standard divisor latch */
+ static int serial_dl_read(struct uart_8250_port *up)
+ {
+       if (up->port.iotype == UPIO_AU)
+               return __raw_readl(up->port.membase + 0x28);
+       else
+               return _serial_dl_read(up);
+ }
+ static void serial_dl_write(struct uart_8250_port *up, int value)
+ {
+       if (up->port.iotype == UPIO_AU)
+               __raw_writel(value, up->port.membase + 0x28);
+       else
+               _serial_dl_write(up, value);
+ }
+ #elif defined(CONFIG_SERIAL_8250_RM9K)
+ static int serial_dl_read(struct uart_8250_port *up)
+ {
+       return  (up->port.iotype == UPIO_RM9000) ?
+               (((__raw_readl(up->port.membase + 0x10) << 8) |
+               (__raw_readl(up->port.membase + 0x08) & 0xff)) & 0xffff) :
+               _serial_dl_read(up);
+ }
+ static void serial_dl_write(struct uart_8250_port *up, int value)
+ {
+       if (up->port.iotype == UPIO_RM9000) {
+               __raw_writel(value, up->port.membase + 0x08);
+               __raw_writel(value >> 8, up->port.membase + 0x10);
+       } else {
+               _serial_dl_write(up, value);
+       }
+ }
+ #else
+ #define serial_dl_read(up) _serial_dl_read(up)
+ #define serial_dl_write(up, value) _serial_dl_write(up, value)
+ #endif
+ /*
+  * For the 16C950
+  */
+ static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
+ {
+       serial_out(up, UART_SCR, offset);
+       serial_out(up, UART_ICR, value);
+ }
+ static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
+ {
+       unsigned int value;
+       serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
+       serial_out(up, UART_SCR, offset);
+       value = serial_in(up, UART_ICR);
+       serial_icr_write(up, UART_ACR, up->acr);
+       return value;
+ }
+ /*
+  * FIFO support.
+  */
+ static void serial8250_clear_fifos(struct uart_8250_port *p)
+ {
+       if (p->capabilities & UART_CAP_FIFO) {
+               serial_outp(p, UART_FCR, UART_FCR_ENABLE_FIFO);
+               serial_outp(p, UART_FCR, UART_FCR_ENABLE_FIFO |
+                              UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+               serial_outp(p, UART_FCR, 0);
+       }
+ }
+ /*
+  * IER sleep support.  UARTs which have EFRs need the "extended
+  * capability" bit enabled.  Note that on XR16C850s, we need to
+  * reset LCR to write to IER.
+  */
+ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
+ {
+       if (p->capabilities & UART_CAP_SLEEP) {
+               if (p->capabilities & UART_CAP_EFR) {
+                       serial_outp(p, UART_LCR, UART_LCR_CONF_MODE_B);
+                       serial_outp(p, UART_EFR, UART_EFR_ECB);
+                       serial_outp(p, UART_LCR, 0);
+               }
+               serial_outp(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
+               if (p->capabilities & UART_CAP_EFR) {
+                       serial_outp(p, UART_LCR, UART_LCR_CONF_MODE_B);
+                       serial_outp(p, UART_EFR, 0);
+                       serial_outp(p, UART_LCR, 0);
+               }
+       }
+ }
+ #ifdef CONFIG_SERIAL_8250_RSA
+ /*
+  * Attempts to turn on the RSA FIFO.  Returns zero on failure.
+  * We set the port uart clock rate if we succeed.
+  */
+ static int __enable_rsa(struct uart_8250_port *up)
+ {
+       unsigned char mode;
+       int result;
+       mode = serial_inp(up, UART_RSA_MSR);
+       result = mode & UART_RSA_MSR_FIFO;
+       if (!result) {
+               serial_outp(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
+               mode = serial_inp(up, UART_RSA_MSR);
+               result = mode & UART_RSA_MSR_FIFO;
+       }
+       if (result)
+               up->port.uartclk = SERIAL_RSA_BAUD_BASE * 16;
+       return result;
+ }
+ static void enable_rsa(struct uart_8250_port *up)
+ {
+       if (up->port.type == PORT_RSA) {
+               if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
+                       spin_lock_irq(&up->port.lock);
+                       __enable_rsa(up);
+                       spin_unlock_irq(&up->port.lock);
+               }
+               if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
+                       serial_outp(up, UART_RSA_FRR, 0);
+       }
+ }
+ /*
+  * Attempts to turn off the RSA FIFO.  Returns zero on failure.
+  * It is unknown why interrupts were disabled in here.  However,
+  * the caller is expected to preserve this behaviour by grabbing
+  * the spinlock before calling this function.
+  */
+ static void disable_rsa(struct uart_8250_port *up)
+ {
+       unsigned char mode;
+       int result;
+       if (up->port.type == PORT_RSA &&
+           up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
+               spin_lock_irq(&up->port.lock);
+               mode = serial_inp(up, UART_RSA_MSR);
+               result = !(mode & UART_RSA_MSR_FIFO);
+               if (!result) {
+                       serial_outp(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
+                       mode = serial_inp(up, UART_RSA_MSR);
+                       result = !(mode & UART_RSA_MSR_FIFO);
+               }
+               if (result)
+                       up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
+               spin_unlock_irq(&up->port.lock);
+       }
+ }
+ #endif /* CONFIG_SERIAL_8250_RSA */
+ /*
+  * This is a quickie test to see how big the FIFO is.
+  * It doesn't work at all the time, more's the pity.
+  */
+ static int size_fifo(struct uart_8250_port *up)
+ {
+       unsigned char old_fcr, old_mcr, old_lcr;
+       unsigned short old_dl;
+       int count;
+       old_lcr = serial_inp(up, UART_LCR);
+       serial_outp(up, UART_LCR, 0);
+       old_fcr = serial_inp(up, UART_FCR);
+       old_mcr = serial_inp(up, UART_MCR);
+       serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+                   UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+       serial_outp(up, UART_MCR, UART_MCR_LOOP);
+       serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_A);
+       old_dl = serial_dl_read(up);
+       serial_dl_write(up, 0x0001);
+       serial_outp(up, UART_LCR, 0x03);
+       for (count = 0; count < 256; count++)
+               serial_outp(up, UART_TX, count);
+       mdelay(20);/* FIXME - schedule_timeout */
+       for (count = 0; (serial_inp(up, UART_LSR) & UART_LSR_DR) &&
+            (count < 256); count++)
+               serial_inp(up, UART_RX);
+       serial_outp(up, UART_FCR, old_fcr);
+       serial_outp(up, UART_MCR, old_mcr);
+       serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_A);
+       serial_dl_write(up, old_dl);
+       serial_outp(up, UART_LCR, old_lcr);
+       return count;
+ }
+ /*
+  * Read UART ID using the divisor method - set DLL and DLM to zero
+  * and the revision will be in DLL and device type in DLM.  We
+  * preserve the device state across this.
+  */
+ static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
+ {
+       unsigned char old_dll, old_dlm, old_lcr;
+       unsigned int id;
+       old_lcr = serial_inp(p, UART_LCR);
+       serial_outp(p, UART_LCR, UART_LCR_CONF_MODE_A);
+       old_dll = serial_inp(p, UART_DLL);
+       old_dlm = serial_inp(p, UART_DLM);
+       serial_outp(p, UART_DLL, 0);
+       serial_outp(p, UART_DLM, 0);
+       id = serial_inp(p, UART_DLL) | serial_inp(p, UART_DLM) << 8;
+       serial_outp(p, UART_DLL, old_dll);
+       serial_outp(p, UART_DLM, old_dlm);
+       serial_outp(p, UART_LCR, old_lcr);
+       return id;
+ }
+ /*
+  * This is a helper routine to autodetect StarTech/Exar/Oxsemi UART's.
+  * When this function is called we know it is at least a StarTech
+  * 16650 V2, but it might be one of several StarTech UARTs, or one of
+  * its clones.  (We treat the broken original StarTech 16650 V1 as a
+  * 16550, and why not?  Startech doesn't seem to even acknowledge its
+  * existence.)
+  *
+  * What evil have men's minds wrought...
+  */
+ static void autoconfig_has_efr(struct uart_8250_port *up)
+ {
+       unsigned int id1, id2, id3, rev;
+       /*
+        * Everything with an EFR has SLEEP
+        */
+       up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
+       /*
+        * First we check to see if it's an Oxford Semiconductor UART.
+        *
+        * If we have to do this here because some non-National
+        * Semiconductor clone chips lock up if you try writing to the
+        * LSR register (which serial_icr_read does)
+        */
+       /*
+        * Check for Oxford Semiconductor 16C950.
+        *
+        * EFR [4] must be set else this test fails.
+        *
+        * This shouldn't be necessary, but Mike Hudson (Exoray@isys.ca)
+        * claims that it's needed for 952 dual UART's (which are not
+        * recommended for new designs).
+        */
+       up->acr = 0;
+       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+       serial_out(up, UART_EFR, UART_EFR_ECB);
+       serial_out(up, UART_LCR, 0x00);
+       id1 = serial_icr_read(up, UART_ID1);
+       id2 = serial_icr_read(up, UART_ID2);
+       id3 = serial_icr_read(up, UART_ID3);
+       rev = serial_icr_read(up, UART_REV);
+       DEBUG_AUTOCONF("950id=%02x:%02x:%02x:%02x ", id1, id2, id3, rev);
+       if (id1 == 0x16 && id2 == 0xC9 &&
+           (id3 == 0x50 || id3 == 0x52 || id3 == 0x54)) {
+               up->port.type = PORT_16C950;
+               /*
+                * Enable work around for the Oxford Semiconductor 952 rev B
+                * chip which causes it to seriously miscalculate baud rates
+                * when DLL is 0.
+                */
+               if (id3 == 0x52 && rev == 0x01)
+                       up->bugs |= UART_BUG_QUOT;
+               return;
+       }
+       /*
+        * We check for a XR16C850 by setting DLL and DLM to 0, and then
+        * reading back DLL and DLM.  The chip type depends on the DLM
+        * value read back:
+        *  0x10 - XR16C850 and the DLL contains the chip revision.
+        *  0x12 - XR16C2850.
+        *  0x14 - XR16C854.
+        */
+       id1 = autoconfig_read_divisor_id(up);
+       DEBUG_AUTOCONF("850id=%04x ", id1);
+       id2 = id1 >> 8;
+       if (id2 == 0x10 || id2 == 0x12 || id2 == 0x14) {
+               up->port.type = PORT_16850;
+               return;
+       }
+       /*
+        * It wasn't an XR16C850.
+        *
+        * We distinguish between the '654 and the '650 by counting
+        * how many bytes are in the FIFO.  I'm using this for now,
+        * since that's the technique that was sent to me in the
+        * serial driver update, but I'm not convinced this works.
+        * I've had problems doing this in the past.  -TYT
+        */
+       if (size_fifo(up) == 64)
+               up->port.type = PORT_16654;
+       else
+               up->port.type = PORT_16650V2;
+ }
+ /*
+  * We detected a chip without a FIFO.  Only two fall into
+  * this category - the original 8250 and the 16450.  The
+  * 16450 has a scratch register (accessible with LCR=0)
+  */
+ static void autoconfig_8250(struct uart_8250_port *up)
+ {
+       unsigned char scratch, status1, status2;
+       up->port.type = PORT_8250;
+       scratch = serial_in(up, UART_SCR);
+       serial_outp(up, UART_SCR, 0xa5);
+       status1 = serial_in(up, UART_SCR);
+       serial_outp(up, UART_SCR, 0x5a);
+       status2 = serial_in(up, UART_SCR);
+       serial_outp(up, UART_SCR, scratch);
+       if (status1 == 0xa5 && status2 == 0x5a)
+               up->port.type = PORT_16450;
+ }
+ static int broken_efr(struct uart_8250_port *up)
+ {
+       /*
+        * Exar ST16C2550 "A2" devices incorrectly detect as
+        * having an EFR, and report an ID of 0x0201.  See
+        * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html 
+        */
+       if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16)
+               return 1;
+       return 0;
+ }
+ static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
+ {
+       unsigned char status;
+       status = serial_in(up, 0x04); /* EXCR2 */
+ #define PRESL(x) ((x) & 0x30)
+       if (PRESL(status) == 0x10) {
+               /* already in high speed mode */
+               return 0;
+       } else {
+               status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
+               status |= 0x10;  /* 1.625 divisor for baud_base --> 921600 */
+               serial_outp(up, 0x04, status);
+       }
+       return 1;
+ }
+ /*
+  * We know that the chip has FIFOs.  Does it have an EFR?  The
+  * EFR is located in the same register position as the IIR and
+  * we know the top two bits of the IIR are currently set.  The
+  * EFR should contain zero.  Try to read the EFR.
+  */
+ static void autoconfig_16550a(struct uart_8250_port *up)
+ {
+       unsigned char status1, status2;
+       unsigned int iersave;
+       up->port.type = PORT_16550A;
+       up->capabilities |= UART_CAP_FIFO;
+       /*
+        * Check for presence of the EFR when DLAB is set.
+        * Only ST16C650V1 UARTs pass this test.
+        */
+       serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_A);
+       if (serial_in(up, UART_EFR) == 0) {
+               serial_outp(up, UART_EFR, 0xA8);
+               if (serial_in(up, UART_EFR) != 0) {
+                       DEBUG_AUTOCONF("EFRv1 ");
+                       up->port.type = PORT_16650;
+                       up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
+               } else {
+                       DEBUG_AUTOCONF("Motorola 8xxx DUART ");
+               }
+               serial_outp(up, UART_EFR, 0);
+               return;
+       }
+       /*
+        * Maybe it requires 0xbf to be written to the LCR.
+        * (other ST16C650V2 UARTs, TI16C752A, etc)
+        */
+       serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
+       if (serial_in(up, UART_EFR) == 0 && !broken_efr(up)) {
+               DEBUG_AUTOCONF("EFRv2 ");
+               autoconfig_has_efr(up);
+               return;
+       }
+       /*
+        * Check for a National Semiconductor SuperIO chip.
+        * Attempt to switch to bank 2, read the value of the LOOP bit
+        * from EXCR1. Switch back to bank 0, change it in MCR. Then
+        * switch back to bank 2, read it from EXCR1 again and check
+        * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
+        */
+       serial_outp(up, UART_LCR, 0);
+       status1 = serial_in(up, UART_MCR);
+       serial_outp(up, UART_LCR, 0xE0);
+       status2 = serial_in(up, 0x02); /* EXCR1 */
+       if (!((status2 ^ status1) & UART_MCR_LOOP)) {
+               serial_outp(up, UART_LCR, 0);
+               serial_outp(up, UART_MCR, status1 ^ UART_MCR_LOOP);
+               serial_outp(up, UART_LCR, 0xE0);
+               status2 = serial_in(up, 0x02); /* EXCR1 */
+               serial_outp(up, UART_LCR, 0);
+               serial_outp(up, UART_MCR, status1);
+               if ((status2 ^ status1) & UART_MCR_LOOP) {
+                       unsigned short quot;
+                       serial_outp(up, UART_LCR, 0xE0);
+                       quot = serial_dl_read(up);
+                       quot <<= 3;
+                       if (ns16550a_goto_highspeed(up))
+                               serial_dl_write(up, quot);
+                       serial_outp(up, UART_LCR, 0);
+                       up->port.uartclk = 921600*16;
+                       up->port.type = PORT_NS16550A;
+                       up->capabilities |= UART_NATSEMI;
+                       return;
+               }
+       }
+       /*
+        * No EFR.  Try to detect a TI16750, which only sets bit 5 of
+        * the IIR when 64 byte FIFO mode is enabled when DLAB is set.
+        * Try setting it with and without DLAB set.  Cheap clones
+        * set bit 5 without DLAB set.
+        */
+       serial_outp(up, UART_LCR, 0);
+       serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
+       status1 = serial_in(up, UART_IIR) >> 5;
+       serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+       serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_A);
+       serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
+       status2 = serial_in(up, UART_IIR) >> 5;
+       serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+       serial_outp(up, UART_LCR, 0);
+       DEBUG_AUTOCONF("iir1=%d iir2=%d ", status1, status2);
+       if (status1 == 6 && status2 == 7) {
+               up->port.type = PORT_16750;
+               up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP;
+               return;
+       }
+       /*
+        * Try writing and reading the UART_IER_UUE bit (b6).
+        * If it works, this is probably one of the Xscale platform's
+        * internal UARTs.
+        * We're going to explicitly set the UUE bit to 0 before
+        * trying to write and read a 1 just to make sure it's not
+        * already a 1 and maybe locked there before we even start start.
+        */
+       iersave = serial_in(up, UART_IER);
+       serial_outp(up, UART_IER, iersave & ~UART_IER_UUE);
+       if (!(serial_in(up, UART_IER) & UART_IER_UUE)) {
+               /*
+                * OK it's in a known zero state, try writing and reading
+                * without disturbing the current state of the other bits.
+                */
+               serial_outp(up, UART_IER, iersave | UART_IER_UUE);
+               if (serial_in(up, UART_IER) & UART_IER_UUE) {
+                       /*
+                        * It's an Xscale.
+                        * We'll leave the UART_IER_UUE bit set to 1 (enabled).
+                        */
+                       DEBUG_AUTOCONF("Xscale ");
+                       up->port.type = PORT_XSCALE;
+                       up->capabilities |= UART_CAP_UUE | UART_CAP_RTOIE;
+                       return;
+               }
+       } else {
+               /*
+                * If we got here we couldn't force the IER_UUE bit to 0.
+                * Log it and continue.
+                */
+               DEBUG_AUTOCONF("Couldn't force IER_UUE to 0 ");
+       }
+       serial_outp(up, UART_IER, iersave);
+       /*
+        * Exar uarts have EFR in a weird location
+        */
+       if (up->port.flags & UPF_EXAR_EFR) {
+               up->port.type = PORT_XR17D15X;
+               up->capabilities |= UART_CAP_AFE | UART_CAP_EFR;
+       }
+       /*
+        * We distinguish between 16550A and U6 16550A by counting
+        * how many bytes are in the FIFO.
+        */
+       if (up->port.type == PORT_16550A && size_fifo(up) == 64) {
+               up->port.type = PORT_U6_16550A;
+               up->capabilities |= UART_CAP_AFE;
+       }
+ }
+ /*
+  * This routine is called by rs_init() to initialize a specific serial
+  * port.  It determines what type of UART chip this serial port is
+  * using: 8250, 16450, 16550, 16550A.  The important question is
+  * whether or not this UART is a 16550A or not, since this will
+  * determine whether or not we can use its FIFO features or not.
+  */
+ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
+ {
+       unsigned char status1, scratch, scratch2, scratch3;
+       unsigned char save_lcr, save_mcr;
+       unsigned long flags;
+       if (!up->port.iobase && !up->port.mapbase && !up->port.membase)
+               return;
+       DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04lx, 0x%p): ",
+                      serial_index(&up->port), up->port.iobase, up->port.membase);
+       /*
+        * We really do need global IRQs disabled here - we're going to
+        * be frobbing the chips IRQ enable register to see if it exists.
+        */
+       spin_lock_irqsave(&up->port.lock, flags);
+       up->capabilities = 0;
+       up->bugs = 0;
+       if (!(up->port.flags & UPF_BUGGY_UART)) {
+               /*
+                * Do a simple existence test first; if we fail this,
+                * there's no point trying anything else.
+                *
+                * 0x80 is used as a nonsense port to prevent against
+                * false positives due to ISA bus float.  The
+                * assumption is that 0x80 is a non-existent port;
+                * which should be safe since include/asm/io.h also
+                * makes this assumption.
+                *
+                * Note: this is safe as long as MCR bit 4 is clear
+                * and the device is in "PC" mode.
+                */
+               scratch = serial_inp(up, UART_IER);
+               serial_outp(up, UART_IER, 0);
+ #ifdef __i386__
+               outb(0xff, 0x080);
+ #endif
+               /*
+                * Mask out IER[7:4] bits for test as some UARTs (e.g. TL
+                * 16C754B) allow only to modify them if an EFR bit is set.
+                */
+               scratch2 = serial_inp(up, UART_IER) & 0x0f;
+               serial_outp(up, UART_IER, 0x0F);
+ #ifdef __i386__
+               outb(0, 0x080);
+ #endif
+               scratch3 = serial_inp(up, UART_IER) & 0x0f;
+               serial_outp(up, UART_IER, scratch);
+               if (scratch2 != 0 || scratch3 != 0x0F) {
+                       /*
+                        * We failed; there's nothing here
+                        */
+                       DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
+                                      scratch2, scratch3);
+                       goto out;
+               }
+       }
+       save_mcr = serial_in(up, UART_MCR);
+       save_lcr = serial_in(up, UART_LCR);
+       /*
+        * Check to see if a UART is really there.  Certain broken
+        * internal modems based on the Rockwell chipset fail this
+        * test, because they apparently don't implement the loopback
+        * test mode.  So this test is skipped on the COM 1 through
+        * COM 4 ports.  This *should* be safe, since no board
+        * manufacturer would be stupid enough to design a board
+        * that conflicts with COM 1-4 --- we hope!
+        */
+       if (!(up->port.flags & UPF_SKIP_TEST)) {
+               serial_outp(up, UART_MCR, UART_MCR_LOOP | 0x0A);
+               status1 = serial_inp(up, UART_MSR) & 0xF0;
+               serial_outp(up, UART_MCR, save_mcr);
+               if (status1 != 0x90) {
+                       DEBUG_AUTOCONF("LOOP test failed (%02x) ",
+                                      status1);
+                       goto out;
+               }
+       }
+       /*
+        * We're pretty sure there's a port here.  Lets find out what
+        * type of port it is.  The IIR top two bits allows us to find
+        * out if it's 8250 or 16450, 16550, 16550A or later.  This
+        * determines what we test for next.
+        *
+        * We also initialise the EFR (if any) to zero for later.  The
+        * EFR occupies the same register location as the FCR and IIR.
+        */
+       serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
+       serial_outp(up, UART_EFR, 0);
+       serial_outp(up, UART_LCR, 0);
+       serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+       scratch = serial_in(up, UART_IIR) >> 6;
+       DEBUG_AUTOCONF("iir=%d ", scratch);
+       switch (scratch) {
+       case 0:
+               autoconfig_8250(up);
+               break;
+       case 1:
+               up->port.type = PORT_UNKNOWN;
+               break;
+       case 2:
+               up->port.type = PORT_16550;
+               break;
+       case 3:
+               autoconfig_16550a(up);
+               break;
+       }
+ #ifdef CONFIG_SERIAL_8250_RSA
+       /*
+        * Only probe for RSA ports if we got the region.
+        */
+       if (up->port.type == PORT_16550A && probeflags & PROBE_RSA) {
+               int i;
+               for (i = 0 ; i < probe_rsa_count; ++i) {
+                       if (probe_rsa[i] == up->port.iobase &&
+                           __enable_rsa(up)) {
+                               up->port.type = PORT_RSA;
+                               break;
+                       }
+               }
+       }
+ #endif
+       serial_outp(up, UART_LCR, save_lcr);
+       if (up->capabilities != uart_config[up->port.type].flags) {
+               printk(KERN_WARNING
+                      "ttyS%d: detected caps %08x should be %08x\n",
+                      serial_index(&up->port), up->capabilities,
+                      uart_config[up->port.type].flags);
+       }
+       up->port.fifosize = uart_config[up->port.type].fifo_size;
+       up->capabilities = uart_config[up->port.type].flags;
+       up->tx_loadsz = uart_config[up->port.type].tx_loadsz;
+       if (up->port.type == PORT_UNKNOWN)
+               goto out;
+       /*
+        * Reset the UART.
+        */
+ #ifdef CONFIG_SERIAL_8250_RSA
+       if (up->port.type == PORT_RSA)
+               serial_outp(up, UART_RSA_FRR, 0);
+ #endif
+       serial_outp(up, UART_MCR, save_mcr);
+       serial8250_clear_fifos(up);
+       serial_in(up, UART_RX);
+       if (up->capabilities & UART_CAP_UUE)
+               serial_outp(up, UART_IER, UART_IER_UUE);
+       else
+               serial_outp(up, UART_IER, 0);
+  out:
+       spin_unlock_irqrestore(&up->port.lock, flags);
+       DEBUG_AUTOCONF("type=%s\n", uart_config[up->port.type].name);
+ }
+ static void autoconfig_irq(struct uart_8250_port *up)
+ {
+       unsigned char save_mcr, save_ier;
+       unsigned char save_ICP = 0;
+       unsigned int ICP = 0;
+       unsigned long irqs;
+       int irq;
+       if (up->port.flags & UPF_FOURPORT) {
+               ICP = (up->port.iobase & 0xfe0) | 0x1f;
+               save_ICP = inb_p(ICP);
+               outb_p(0x80, ICP);
+               (void) inb_p(ICP);
+       }
+       /* forget possible initially masked and pending IRQ */
+       probe_irq_off(probe_irq_on());
+       save_mcr = serial_inp(up, UART_MCR);
+       save_ier = serial_inp(up, UART_IER);
+       serial_outp(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
+       irqs = probe_irq_on();
+       serial_outp(up, UART_MCR, 0);
+       udelay(10);
+       if (up->port.flags & UPF_FOURPORT) {
+               serial_outp(up, UART_MCR,
+                           UART_MCR_DTR | UART_MCR_RTS);
+       } else {
+               serial_outp(up, UART_MCR,
+                           UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
+       }
+       serial_outp(up, UART_IER, 0x0f);        /* enable all intrs */
+       (void)serial_inp(up, UART_LSR);
+       (void)serial_inp(up, UART_RX);
+       (void)serial_inp(up, UART_IIR);
+       (void)serial_inp(up, UART_MSR);
+       serial_outp(up, UART_TX, 0xFF);
+       udelay(20);
+       irq = probe_irq_off(irqs);
+       serial_outp(up, UART_MCR, save_mcr);
+       serial_outp(up, UART_IER, save_ier);
+       if (up->port.flags & UPF_FOURPORT)
+               outb_p(save_ICP, ICP);
+       up->port.irq = (irq > 0) ? irq : 0;
+ }
+ static inline void __stop_tx(struct uart_8250_port *p)
+ {
+       if (p->ier & UART_IER_THRI) {
+               p->ier &= ~UART_IER_THRI;
+               serial_out(p, UART_IER, p->ier);
+       }
+ }
+ static void serial8250_stop_tx(struct uart_port *port)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       __stop_tx(up);
+       /*
+        * We really want to stop the transmitter from sending.
+        */
+       if (up->port.type == PORT_16C950) {
+               up->acr |= UART_ACR_TXDIS;
+               serial_icr_write(up, UART_ACR, up->acr);
+       }
+ }
+ static void serial8250_start_tx(struct uart_port *port)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       if (!(up->ier & UART_IER_THRI)) {
+               up->ier |= UART_IER_THRI;
+               serial_out(up, UART_IER, up->ier);
+               if (up->bugs & UART_BUG_TXEN) {
+                       unsigned char lsr;
+                       lsr = serial_in(up, UART_LSR);
+                       up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+                       if ((up->port.type == PORT_RM9000) ?
+                               (lsr & UART_LSR_THRE) :
+                               (lsr & UART_LSR_TEMT))
+                               serial8250_tx_chars(up);
+               }
+       }
+       /*
+        * Re-enable the transmitter if we disabled it.
+        */
+       if (up->port.type == PORT_16C950 && up->acr & UART_ACR_TXDIS) {
+               up->acr &= ~UART_ACR_TXDIS;
+               serial_icr_write(up, UART_ACR, up->acr);
+       }
+ }
+ static void serial8250_stop_rx(struct uart_port *port)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       up->ier &= ~UART_IER_RLSI;
+       up->port.read_status_mask &= ~UART_LSR_DR;
+       serial_out(up, UART_IER, up->ier);
+ }
+ static void serial8250_enable_ms(struct uart_port *port)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       /* no MSR capabilities */
+       if (up->bugs & UART_BUG_NOMSR)
+               return;
+       up->ier |= UART_IER_MSI;
+       serial_out(up, UART_IER, up->ier);
+ }
+ /*
+  * Clear the Tegra rx fifo after a break
+  *
+  * FIXME: This needs to become a port specific callback once we have a
+  * framework for this
+  */
+ static void clear_rx_fifo(struct uart_8250_port *up)
+ {
+       unsigned int status, tmout = 10000;
+       do {
+               status = serial_in(up, UART_LSR);
+               if (status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS))
+                       status = serial_in(up, UART_RX);
+               else
+                       break;
+               if (--tmout == 0)
+                       break;
+               udelay(1);
+       } while (1);
+ }
+ /*
+  * serial8250_rx_chars: processes according to the passed in LSR
+  * value, and returns the remaining LSR bits not handled
+  * by this Rx routine.
+  */
+ unsigned char
+ serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
+ {
+       struct tty_struct *tty = up->port.state->port.tty;
+       unsigned char ch;
+       int max_count = 256;
+       char flag;
+       do {
+               if (likely(lsr & UART_LSR_DR))
++              {
+                       ch = serial_inp(up, UART_RX);
++                      if (arch_8250_sysrq_via_ctrl_o(ch, &up->port))
++                              goto ignore_char;
++              }
+               else
+                       /*
+                        * Intel 82571 has a Serial Over Lan device that will
+                        * set UART_LSR_BI without setting UART_LSR_DR when
+                        * it receives a break. To avoid reading from the
+                        * receive buffer without UART_LSR_DR bit set, we
+                        * just force the read character to be 0
+                        */
+                       ch = 0;
+               flag = TTY_NORMAL;
+               up->port.icount.rx++;
+               lsr |= up->lsr_saved_flags;
+               up->lsr_saved_flags = 0;
+               if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
+                       /*
+                        * For statistics only
+                        */
+                       if (lsr & UART_LSR_BI) {
+                               lsr &= ~(UART_LSR_FE | UART_LSR_PE);
+                               up->port.icount.brk++;
+                               /*
+                                * If tegra port then clear the rx fifo to
+                                * accept another break/character.
+                                */
+                               if (up->port.type == PORT_TEGRA)
+                                       clear_rx_fifo(up);
+                               /*
+                                * We do the SysRQ and SAK checking
+                                * here because otherwise the break
+                                * may get masked by ignore_status_mask
+                                * or read_status_mask.
+                                */
+                               if (uart_handle_break(&up->port))
+                                       goto ignore_char;
+                       } else if (lsr & UART_LSR_PE)
+                               up->port.icount.parity++;
+                       else if (lsr & UART_LSR_FE)
+                               up->port.icount.frame++;
+                       if (lsr & UART_LSR_OE)
+                               up->port.icount.overrun++;
+                       /*
+                        * Mask off conditions which should be ignored.
+                        */
+                       lsr &= up->port.read_status_mask;
+                       if (lsr & UART_LSR_BI) {
+                               DEBUG_INTR("handling break....");
+                               flag = TTY_BREAK;
+                       } else if (lsr & UART_LSR_PE)
+                               flag = TTY_PARITY;
+                       else if (lsr & UART_LSR_FE)
+                               flag = TTY_FRAME;
+               }
+               if (uart_handle_sysrq_char(&up->port, ch))
+                       goto ignore_char;
+               uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag);
+ ignore_char:
+               lsr = serial_inp(up, UART_LSR);
+       } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (max_count-- > 0));
+       spin_unlock(&up->port.lock);
+       tty_flip_buffer_push(tty);
+       spin_lock(&up->port.lock);
+       return lsr;
+ }
+ EXPORT_SYMBOL_GPL(serial8250_rx_chars);
+ void serial8250_tx_chars(struct uart_8250_port *up)
+ {
+       struct circ_buf *xmit = &up->port.state->xmit;
+       int count;
+       if (up->port.x_char) {
+               serial_outp(up, UART_TX, up->port.x_char);
+               up->port.icount.tx++;
+               up->port.x_char = 0;
+               return;
+       }
+       if (uart_tx_stopped(&up->port)) {
+               serial8250_stop_tx(&up->port);
+               return;
+       }
+       if (uart_circ_empty(xmit)) {
+               __stop_tx(up);
+               return;
+       }
+       count = up->tx_loadsz;
+       do {
+               serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+               up->port.icount.tx++;
+               if (uart_circ_empty(xmit))
+                       break;
+       } while (--count > 0);
+       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+               uart_write_wakeup(&up->port);
+       DEBUG_INTR("THRE...");
+       if (uart_circ_empty(xmit))
+               __stop_tx(up);
+ }
+ EXPORT_SYMBOL_GPL(serial8250_tx_chars);
+ unsigned int serial8250_modem_status(struct uart_8250_port *up)
+ {
+       unsigned int status = serial_in(up, UART_MSR);
+       status |= up->msr_saved_flags;
+       up->msr_saved_flags = 0;
+       if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
+           up->port.state != NULL) {
+               if (status & UART_MSR_TERI)
+                       up->port.icount.rng++;
+               if (status & UART_MSR_DDSR)
+                       up->port.icount.dsr++;
+               if (status & UART_MSR_DDCD)
+                       uart_handle_dcd_change(&up->port, status & UART_MSR_DCD);
+               if (status & UART_MSR_DCTS)
+                       uart_handle_cts_change(&up->port, status & UART_MSR_CTS);
+               wake_up_interruptible(&up->port.state->port.delta_msr_wait);
+       }
+       return status;
+ }
+ EXPORT_SYMBOL_GPL(serial8250_modem_status);
+ /*
+  * This handles the interrupt from one port.
+  */
+ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ {
+       unsigned char status;
+       unsigned long flags;
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       if (iir & UART_IIR_NO_INT)
+               return 0;
+       spin_lock_irqsave(&up->port.lock, flags);
+       status = serial_inp(up, UART_LSR);
+       DEBUG_INTR("status = %x...", status);
+       if (status & (UART_LSR_DR | UART_LSR_BI))
+               status = serial8250_rx_chars(up, status);
+       serial8250_modem_status(up);
+       if (status & UART_LSR_THRE)
+               serial8250_tx_chars(up);
+       spin_unlock_irqrestore(&up->port.lock, flags);
+       return 1;
+ }
+ EXPORT_SYMBOL_GPL(serial8250_handle_irq);
+ static int serial8250_default_handle_irq(struct uart_port *port)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned int iir = serial_in(up, UART_IIR);
+       return serial8250_handle_irq(port, iir);
+ }
+ /*
+  * This is the serial driver's interrupt routine.
+  *
+  * Arjan thinks the old way was overly complex, so it got simplified.
+  * Alan disagrees, saying that need the complexity to handle the weird
+  * nature of ISA shared interrupts.  (This is a special exception.)
+  *
+  * In order to handle ISA shared interrupts properly, we need to check
+  * that all ports have been serviced, and therefore the ISA interrupt
+  * line has been de-asserted.
+  *
+  * This means we need to loop through all ports. checking that they
+  * don't have an interrupt pending.
+  */
+ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
+ {
+       struct irq_info *i = dev_id;
+       struct list_head *l, *end = NULL;
+       int pass_counter = 0, handled = 0;
+       DEBUG_INTR("serial8250_interrupt(%d)...", irq);
+       spin_lock(&i->lock);
+       l = i->head;
+       do {
+               struct uart_8250_port *up;
+               struct uart_port *port;
+               bool skip;
+               up = list_entry(l, struct uart_8250_port, list);
+               port = &up->port;
+               skip = pass_counter && up->port.flags & UPF_IIR_ONCE;
+               if (!skip && port->handle_irq(port)) {
+                       handled = 1;
+                       end = NULL;
+               } else if (end == NULL)
+                       end = l;
+               l = l->next;
+               if (l == i->head && pass_counter++ > PASS_LIMIT) {
+                       /* If we hit this, we're dead. */
+                       printk_ratelimited(KERN_ERR
+                               "serial8250: too much work for irq%d\n", irq);
+                       break;
+               }
+       } while (l != end);
+       spin_unlock(&i->lock);
+       DEBUG_INTR("end.\n");
+       return IRQ_RETVAL(handled);
+ }
+ /*
+  * To support ISA shared interrupts, we need to have one interrupt
+  * handler that ensures that the IRQ line has been deasserted
+  * before returning.  Failing to do this will result in the IRQ
+  * line being stuck active, and, since ISA irqs are edge triggered,
+  * no more IRQs will be seen.
+  */
+ static void serial_do_unlink(struct irq_info *i, struct uart_8250_port *up)
+ {
+       spin_lock_irq(&i->lock);
+       if (!list_empty(i->head)) {
+               if (i->head == &up->list)
+                       i->head = i->head->next;
+               list_del(&up->list);
+       } else {
+               BUG_ON(i->head != &up->list);
+               i->head = NULL;
+       }
+       spin_unlock_irq(&i->lock);
+       /* List empty so throw away the hash node */
+       if (i->head == NULL) {
+               hlist_del(&i->node);
+               kfree(i);
+       }
+ }
+ static int serial_link_irq_chain(struct uart_8250_port *up)
+ {
+       struct hlist_head *h;
+       struct hlist_node *n;
+       struct irq_info *i;
+       int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
+       mutex_lock(&hash_mutex);
+       h = &irq_lists[up->port.irq % NR_IRQ_HASH];
+       hlist_for_each(n, h) {
+               i = hlist_entry(n, struct irq_info, node);
+               if (i->irq == up->port.irq)
+                       break;
+       }
+       if (n == NULL) {
+               i = kzalloc(sizeof(struct irq_info), GFP_KERNEL);
+               if (i == NULL) {
+                       mutex_unlock(&hash_mutex);
+                       return -ENOMEM;
+               }
+               spin_lock_init(&i->lock);
+               i->irq = up->port.irq;
+               hlist_add_head(&i->node, h);
+       }
+       mutex_unlock(&hash_mutex);
+       spin_lock_irq(&i->lock);
+       if (i->head) {
+               list_add(&up->list, i->head);
+               spin_unlock_irq(&i->lock);
+               ret = 0;
+       } else {
+               INIT_LIST_HEAD(&up->list);
+               i->head = &up->list;
+               spin_unlock_irq(&i->lock);
+               irq_flags |= up->port.irqflags;
+               ret = request_irq(up->port.irq, serial8250_interrupt,
+                                 irq_flags, "serial", i);
+               if (ret < 0)
+                       serial_do_unlink(i, up);
+       }
+       return ret;
+ }
+ static void serial_unlink_irq_chain(struct uart_8250_port *up)
+ {
+       struct irq_info *i;
+       struct hlist_node *n;
+       struct hlist_head *h;
+       mutex_lock(&hash_mutex);
+       h = &irq_lists[up->port.irq % NR_IRQ_HASH];
+       hlist_for_each(n, h) {
+               i = hlist_entry(n, struct irq_info, node);
+               if (i->irq == up->port.irq)
+                       break;
+       }
+       BUG_ON(n == NULL);
+       BUG_ON(i->head == NULL);
+       if (list_empty(i->head))
+               free_irq(up->port.irq, i);
+       serial_do_unlink(i, up);
+       mutex_unlock(&hash_mutex);
+ }
+ /*
+  * This function is used to handle ports that do not have an
+  * interrupt.  This doesn't work very well for 16450's, but gives
+  * barely passable results for a 16550A.  (Although at the expense
+  * of much CPU overhead).
+  */
+ static void serial8250_timeout(unsigned long data)
+ {
+       struct uart_8250_port *up = (struct uart_8250_port *)data;
+       up->port.handle_irq(&up->port);
+       mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
+ }
+ static void serial8250_backup_timeout(unsigned long data)
+ {
+       struct uart_8250_port *up = (struct uart_8250_port *)data;
+       unsigned int iir, ier = 0, lsr;
+       unsigned long flags;
+       spin_lock_irqsave(&up->port.lock, flags);
+       /*
+        * Must disable interrupts or else we risk racing with the interrupt
+        * based handler.
+        */
+       if (is_real_interrupt(up->port.irq)) {
+               ier = serial_in(up, UART_IER);
+               serial_out(up, UART_IER, 0);
+       }
+       iir = serial_in(up, UART_IIR);
+       /*
+        * This should be a safe test for anyone who doesn't trust the
+        * IIR bits on their UART, but it's specifically designed for
+        * the "Diva" UART used on the management processor on many HP
+        * ia64 and parisc boxes.
+        */
+       lsr = serial_in(up, UART_LSR);
+       up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+       if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) &&
+           (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) &&
+           (lsr & UART_LSR_THRE)) {
+               iir &= ~(UART_IIR_ID | UART_IIR_NO_INT);
+               iir |= UART_IIR_THRI;
+       }
+       if (!(iir & UART_IIR_NO_INT))
+               serial8250_tx_chars(up);
+       if (is_real_interrupt(up->port.irq))
+               serial_out(up, UART_IER, ier);
+       spin_unlock_irqrestore(&up->port.lock, flags);
+       /* Standard timer interval plus 0.2s to keep the port running */
+       mod_timer(&up->timer,
+               jiffies + uart_poll_timeout(&up->port) + HZ / 5);
+ }
+ static unsigned int serial8250_tx_empty(struct uart_port *port)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned long flags;
+       unsigned int lsr;
+       spin_lock_irqsave(&up->port.lock, flags);
+       lsr = serial_in(up, UART_LSR);
+       up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+       spin_unlock_irqrestore(&up->port.lock, flags);
+       return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
+ }
+ static unsigned int serial8250_get_mctrl(struct uart_port *port)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned int status;
+       unsigned int ret;
+       status = serial8250_modem_status(up);
+       ret = 0;
+       if (status & UART_MSR_DCD)
+               ret |= TIOCM_CAR;
+       if (status & UART_MSR_RI)
+               ret |= TIOCM_RNG;
+       if (status & UART_MSR_DSR)
+               ret |= TIOCM_DSR;
+       if (status & UART_MSR_CTS)
+               ret |= TIOCM_CTS;
+       return ret;
+ }
+ static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned char mcr = 0;
+       if (mctrl & TIOCM_RTS)
+               mcr |= UART_MCR_RTS;
+       if (mctrl & TIOCM_DTR)
+               mcr |= UART_MCR_DTR;
+       if (mctrl & TIOCM_OUT1)
+               mcr |= UART_MCR_OUT1;
+       if (mctrl & TIOCM_OUT2)
+               mcr |= UART_MCR_OUT2;
+       if (mctrl & TIOCM_LOOP)
+               mcr |= UART_MCR_LOOP;
+       mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
+       serial_out(up, UART_MCR, mcr);
+ }
+ static void serial8250_break_ctl(struct uart_port *port, int break_state)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned long flags;
+       spin_lock_irqsave(&up->port.lock, flags);
+       if (break_state == -1)
+               up->lcr |= UART_LCR_SBC;
+       else
+               up->lcr &= ~UART_LCR_SBC;
+       serial_out(up, UART_LCR, up->lcr);
+       spin_unlock_irqrestore(&up->port.lock, flags);
+ }
+ /*
+  *    Wait for transmitter & holding register to empty
+  */
+ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+ {
+       unsigned int status, tmout = 10000;
+       /* Wait up to 10ms for the character(s) to be sent. */
+       for (;;) {
+               status = serial_in(up, UART_LSR);
+               up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
+               if ((status & bits) == bits)
+                       break;
+               if (--tmout == 0)
+                       break;
+               udelay(1);
+       }
+       /* Wait up to 1s for flow control if necessary */
+       if (up->port.flags & UPF_CONS_FLOW) {
+               unsigned int tmout;
+               for (tmout = 1000000; tmout; tmout--) {
+                       unsigned int msr = serial_in(up, UART_MSR);
+                       up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
+                       if (msr & UART_MSR_CTS)
+                               break;
+                       udelay(1);
+                       touch_nmi_watchdog();
+               }
+       }
+ }
+ #ifdef CONFIG_CONSOLE_POLL
+ /*
+  * Console polling routines for writing and reading from the uart while
+  * in an interrupt or debug context.
+  */
+ static int serial8250_get_poll_char(struct uart_port *port)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned char lsr = serial_inp(up, UART_LSR);
+       if (!(lsr & UART_LSR_DR))
+               return NO_POLL_CHAR;
+       return serial_inp(up, UART_RX);
+ }
+ static void serial8250_put_poll_char(struct uart_port *port,
+                        unsigned char c)
+ {
+       unsigned int ier;
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       /*
+        *      First save the IER then disable the interrupts
+        */
+       ier = serial_in(up, UART_IER);
+       if (up->capabilities & UART_CAP_UUE)
+               serial_out(up, UART_IER, UART_IER_UUE);
+       else
+               serial_out(up, UART_IER, 0);
+       wait_for_xmitr(up, BOTH_EMPTY);
+       /*
+        *      Send the character out.
+        *      If a LF, also do CR...
+        */
+       serial_out(up, UART_TX, c);
+       if (c == 10) {
+               wait_for_xmitr(up, BOTH_EMPTY);
+               serial_out(up, UART_TX, 13);
+       }
+       /*
+        *      Finally, wait for transmitter to become empty
+        *      and restore the IER
+        */
+       wait_for_xmitr(up, BOTH_EMPTY);
+       serial_out(up, UART_IER, ier);
+ }
+ #endif /* CONFIG_CONSOLE_POLL */
+ static int serial8250_startup(struct uart_port *port)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned long flags;
+       unsigned char lsr, iir;
+       int retval;
+       up->port.fifosize = uart_config[up->port.type].fifo_size;
+       up->tx_loadsz = uart_config[up->port.type].tx_loadsz;
+       up->capabilities = uart_config[up->port.type].flags;
+       up->mcr = 0;
+       if (up->port.iotype != up->cur_iotype)
+               set_io_from_upio(port);
+       if (up->port.type == PORT_16C950) {
+               /* Wake up and initialize UART */
+               up->acr = 0;
+               serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
+               serial_outp(up, UART_EFR, UART_EFR_ECB);
+               serial_outp(up, UART_IER, 0);
+               serial_outp(up, UART_LCR, 0);
+               serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
+               serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
+               serial_outp(up, UART_EFR, UART_EFR_ECB);
+               serial_outp(up, UART_LCR, 0);
+       }
+ #ifdef CONFIG_SERIAL_8250_RSA
+       /*
+        * If this is an RSA port, see if we can kick it up to the
+        * higher speed clock.
+        */
+       enable_rsa(up);
+ #endif
+       /*
+        * Clear the FIFO buffers and disable them.
+        * (they will be reenabled in set_termios())
+        */
+       serial8250_clear_fifos(up);
+       /*
+        * Clear the interrupt registers.
+        */
+       (void) serial_inp(up, UART_LSR);
+       (void) serial_inp(up, UART_RX);
+       (void) serial_inp(up, UART_IIR);
+       (void) serial_inp(up, UART_MSR);
+       /*
+        * At this point, there's no way the LSR could still be 0xff;
+        * if it is, then bail out, because there's likely no UART
+        * here.
+        */
+       if (!(up->port.flags & UPF_BUGGY_UART) &&
+           (serial_inp(up, UART_LSR) == 0xff)) {
+               printk_ratelimited(KERN_INFO "ttyS%d: LSR safety check engaged!\n",
+                                  serial_index(&up->port));
+               return -ENODEV;
+       }
+       /*
+        * For a XR16C850, we need to set the trigger levels
+        */
+       if (up->port.type == PORT_16850) {
+               unsigned char fctr;
+               serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
+               fctr = serial_inp(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX);
+               serial_outp(up, UART_FCTR, fctr | UART_FCTR_TRGD | UART_FCTR_RX);
+               serial_outp(up, UART_TRG, UART_TRG_96);
+               serial_outp(up, UART_FCTR, fctr | UART_FCTR_TRGD | UART_FCTR_TX);
+               serial_outp(up, UART_TRG, UART_TRG_96);
+               serial_outp(up, UART_LCR, 0);
+       }
+       if (is_real_interrupt(up->port.irq)) {
+               unsigned char iir1;
+               /*
+                * Test for UARTs that do not reassert THRE when the
+                * transmitter is idle and the interrupt has already
+                * been cleared.  Real 16550s should always reassert
+                * this interrupt whenever the transmitter is idle and
+                * the interrupt is enabled.  Delays are necessary to
+                * allow register changes to become visible.
+                */
+               spin_lock_irqsave(&up->port.lock, flags);
+               if (up->port.irqflags & IRQF_SHARED)
+                       disable_irq_nosync(up->port.irq);
+               wait_for_xmitr(up, UART_LSR_THRE);
+               serial_out_sync(up, UART_IER, UART_IER_THRI);
+               udelay(1); /* allow THRE to set */
+               iir1 = serial_in(up, UART_IIR);
+               serial_out(up, UART_IER, 0);
+               serial_out_sync(up, UART_IER, UART_IER_THRI);
+               udelay(1); /* allow a working UART time to re-assert THRE */
+               iir = serial_in(up, UART_IIR);
+               serial_out(up, UART_IER, 0);
+               if (up->port.irqflags & IRQF_SHARED)
+                       enable_irq(up->port.irq);
+               spin_unlock_irqrestore(&up->port.lock, flags);
+               /*
+                * If the interrupt is not reasserted, setup a timer to
+                * kick the UART on a regular basis.
+                */
+               if (!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) {
+                       up->bugs |= UART_BUG_THRE;
+                       pr_debug("ttyS%d - using backup timer\n",
+                                serial_index(port));
+               }
+       }
+       /*
+        * The above check will only give an accurate result the first time
+        * the port is opened so this value needs to be preserved.
+        */
+       if (up->bugs & UART_BUG_THRE) {
+               up->timer.function = serial8250_backup_timeout;
+               up->timer.data = (unsigned long)up;
+               mod_timer(&up->timer, jiffies +
+                       uart_poll_timeout(port) + HZ / 5);
+       }
+       /*
+        * If the "interrupt" for this port doesn't correspond with any
+        * hardware interrupt, we use a timer-based system.  The original
+        * driver used to do this with IRQ0.
+        */
+       if (!is_real_interrupt(up->port.irq)) {
+               up->timer.data = (unsigned long)up;
+               mod_timer(&up->timer, jiffies + uart_poll_timeout(port));
+       } else {
+               retval = serial_link_irq_chain(up);
+               if (retval)
+                       return retval;
+       }
+       /*
+        * Now, initialize the UART
+        */
+       serial_outp(up, UART_LCR, UART_LCR_WLEN8);
+       spin_lock_irqsave(&up->port.lock, flags);
+       if (up->port.flags & UPF_FOURPORT) {
+               if (!is_real_interrupt(up->port.irq))
+                       up->port.mctrl |= TIOCM_OUT1;
+       } else
+               /*
+                * Most PC uarts need OUT2 raised to enable interrupts.
+                */
+               if (is_real_interrupt(up->port.irq))
+                       up->port.mctrl |= TIOCM_OUT2;
+       serial8250_set_mctrl(&up->port, up->port.mctrl);
+       /* Serial over Lan (SoL) hack:
+          Intel 8257x Gigabit ethernet chips have a
+          16550 emulation, to be used for Serial Over Lan.
+          Those chips take a longer time than a normal
+          serial device to signalize that a transmission
+          data was queued. Due to that, the above test generally
+          fails. One solution would be to delay the reading of
+          iir. However, this is not reliable, since the timeout
+          is variable. So, let's just don't test if we receive
+          TX irq. This way, we'll never enable UART_BUG_TXEN.
+        */
+       if (skip_txen_test || up->port.flags & UPF_NO_TXEN_TEST)
+               goto dont_test_tx_en;
+       /*
+        * Do a quick test to see if we receive an
+        * interrupt when we enable the TX irq.
+        */
+       serial_outp(up, UART_IER, UART_IER_THRI);
+       lsr = serial_in(up, UART_LSR);
+       iir = serial_in(up, UART_IIR);
+       serial_outp(up, UART_IER, 0);
+       if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
+               if (!(up->bugs & UART_BUG_TXEN)) {
+                       up->bugs |= UART_BUG_TXEN;
+                       pr_debug("ttyS%d - enabling bad tx status workarounds\n",
+                                serial_index(port));
+               }
+       } else {
+               up->bugs &= ~UART_BUG_TXEN;
+       }
+ dont_test_tx_en:
+       spin_unlock_irqrestore(&up->port.lock, flags);
+       /*
+        * Clear the interrupt registers again for luck, and clear the
+        * saved flags to avoid getting false values from polling
+        * routines or the previous session.
+        */
+       serial_inp(up, UART_LSR);
+       serial_inp(up, UART_RX);
+       serial_inp(up, UART_IIR);
+       serial_inp(up, UART_MSR);
+       up->lsr_saved_flags = 0;
+       up->msr_saved_flags = 0;
+       /*
+        * Finally, enable interrupts.  Note: Modem status interrupts
+        * are set via set_termios(), which will be occurring imminently
+        * anyway, so we don't enable them here.
+        */
+       up->ier = UART_IER_RLSI | UART_IER_RDI;
+       serial_outp(up, UART_IER, up->ier);
+       if (up->port.flags & UPF_FOURPORT) {
+               unsigned int icp;
+               /*
+                * Enable interrupts on the AST Fourport board
+                */
+               icp = (up->port.iobase & 0xfe0) | 0x01f;
+               outb_p(0x80, icp);
+               (void) inb_p(icp);
+       }
+       return 0;
+ }
+ static void serial8250_shutdown(struct uart_port *port)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned long flags;
+       /*
+        * Disable interrupts from this port
+        */
+       up->ier = 0;
+       serial_outp(up, UART_IER, 0);
+       spin_lock_irqsave(&up->port.lock, flags);
+       if (up->port.flags & UPF_FOURPORT) {
+               /* reset interrupts on the AST Fourport board */
+               inb((up->port.iobase & 0xfe0) | 0x1f);
+               up->port.mctrl |= TIOCM_OUT1;
+       } else
+               up->port.mctrl &= ~TIOCM_OUT2;
+       serial8250_set_mctrl(&up->port, up->port.mctrl);
+       spin_unlock_irqrestore(&up->port.lock, flags);
+       /*
+        * Disable break condition and FIFOs
+        */
+       serial_out(up, UART_LCR, serial_inp(up, UART_LCR) & ~UART_LCR_SBC);
+       serial8250_clear_fifos(up);
+ #ifdef CONFIG_SERIAL_8250_RSA
+       /*
+        * Reset the RSA board back to 115kbps compat mode.
+        */
+       disable_rsa(up);
+ #endif
+       /*
+        * Read data port to reset things, and then unlink from
+        * the IRQ chain.
+        */
+       (void) serial_in(up, UART_RX);
+       del_timer_sync(&up->timer);
+       up->timer.function = serial8250_timeout;
+       if (is_real_interrupt(up->port.irq))
+               serial_unlink_irq_chain(up);
+ }
+ static unsigned int serial8250_get_divisor(struct uart_port *port, unsigned int baud)
+ {
+       unsigned int quot;
+       /*
+        * Handle magic divisors for baud rates above baud_base on
+        * SMSC SuperIO chips.
+        */
+       if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
+           baud == (port->uartclk/4))
+               quot = 0x8001;
+       else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
+                baud == (port->uartclk/8))
+               quot = 0x8002;
+       else
+               quot = uart_get_divisor(port, baud);
+       return quot;
+ }
+ void
+ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+                         struct ktermios *old)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       unsigned char cval, fcr = 0;
+       unsigned long flags;
+       unsigned int baud, quot;
+       switch (termios->c_cflag & CSIZE) {
+       case CS5:
+               cval = UART_LCR_WLEN5;
+               break;
+       case CS6:
+               cval = UART_LCR_WLEN6;
+               break;
+       case CS7:
+               cval = UART_LCR_WLEN7;
+               break;
+       default:
+       case CS8:
+               cval = UART_LCR_WLEN8;
+               break;
+       }
+       if (termios->c_cflag & CSTOPB)
+               cval |= UART_LCR_STOP;
+       if (termios->c_cflag & PARENB)
+               cval |= UART_LCR_PARITY;
+       if (!(termios->c_cflag & PARODD))
+               cval |= UART_LCR_EPAR;
+ #ifdef CMSPAR
+       if (termios->c_cflag & CMSPAR)
+               cval |= UART_LCR_SPAR;
+ #endif
+       /*
+        * Ask the core to calculate the divisor for us.
+        */
+       baud = uart_get_baud_rate(port, termios, old,
+                                 port->uartclk / 16 / 0xffff,
+                                 port->uartclk / 16);
+       quot = serial8250_get_divisor(port, baud);
+       /*
+        * Oxford Semi 952 rev B workaround
+        */
+       if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
+               quot++;
+       if (up->capabilities & UART_CAP_FIFO && up->port.fifosize > 1) {
+               if (baud < 2400)
+                       fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
+               else
+                       fcr = uart_config[up->port.type].fcr;
+       }
+       /*
+        * MCR-based auto flow control.  When AFE is enabled, RTS will be
+        * deasserted when the receive FIFO contains more characters than
+        * the trigger, or the MCR RTS bit is cleared.  In the case where
+        * the remote UART is not using CTS auto flow control, we must
+        * have sufficient FIFO entries for the latency of the remote
+        * UART to respond.  IOW, at least 32 bytes of FIFO.
+        */
+       if (up->capabilities & UART_CAP_AFE && up->port.fifosize >= 32) {
+               up->mcr &= ~UART_MCR_AFE;
+               if (termios->c_cflag & CRTSCTS)
+                       up->mcr |= UART_MCR_AFE;
+       }
+       /*
+        * Ok, we're now changing the port state.  Do it with
+        * interrupts disabled.
+        */
+       spin_lock_irqsave(&up->port.lock, flags);
+       /*
+        * Update the per-port timeout.
+        */
+       uart_update_timeout(port, termios->c_cflag, baud);
+       up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+       if (termios->c_iflag & INPCK)
+               up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+       if (termios->c_iflag & (BRKINT | PARMRK))
+               up->port.read_status_mask |= UART_LSR_BI;
+       /*
+        * Characteres to ignore
+        */
+       up->port.ignore_status_mask = 0;
+       if (termios->c_iflag & IGNPAR)
+               up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+       if (termios->c_iflag & IGNBRK) {
+               up->port.ignore_status_mask |= UART_LSR_BI;
+               /*
+                * If we're ignoring parity and break indicators,
+                * ignore overruns too (for real raw support).
+                */
+               if (termios->c_iflag & IGNPAR)
+                       up->port.ignore_status_mask |= UART_LSR_OE;
+       }
+       /*
+        * ignore all characters if CREAD is not set
+        */
+       if ((termios->c_cflag & CREAD) == 0)
+               up->port.ignore_status_mask |= UART_LSR_DR;
+       /*
+        * CTS flow control flag and modem status interrupts
+        */
+       up->ier &= ~UART_IER_MSI;
+       if (!(up->bugs & UART_BUG_NOMSR) &&
+                       UART_ENABLE_MS(&up->port, termios->c_cflag))
+               up->ier |= UART_IER_MSI;
+       if (up->capabilities & UART_CAP_UUE)
+               up->ier |= UART_IER_UUE;
+       if (up->capabilities & UART_CAP_RTOIE)
+               up->ier |= UART_IER_RTOIE;
+       serial_out(up, UART_IER, up->ier);
+       if (up->capabilities & UART_CAP_EFR) {
+               unsigned char efr = 0;
+               /*
+                * TI16C752/Startech hardware flow control.  FIXME:
+                * - TI16C752 requires control thresholds to be set.
+                * - UART_MCR_RTS is ineffective if auto-RTS mode is enabled.
+                */
+               if (termios->c_cflag & CRTSCTS)
+                       efr |= UART_EFR_CTS;
+               serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
+               if (up->port.flags & UPF_EXAR_EFR)
+                       serial_outp(up, UART_XR_EFR, efr);
+               else
+                       serial_outp(up, UART_EFR, efr);
+       }
+ #ifdef CONFIG_ARCH_OMAP
+       /* Workaround to enable 115200 baud on OMAP1510 internal ports */
+       if (cpu_is_omap1510() && is_omap_port(up)) {
+               if (baud == 115200) {
+                       quot = 1;
+                       serial_out(up, UART_OMAP_OSC_12M_SEL, 1);
+               } else
+                       serial_out(up, UART_OMAP_OSC_12M_SEL, 0);
+       }
+ #endif
+       if (up->capabilities & UART_NATSEMI) {
+               /* Switch to bank 2 not bank 1, to avoid resetting EXCR2 */
+               serial_outp(up, UART_LCR, 0xe0);
+       } else {
+               serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
+       }
+       serial_dl_write(up, quot);
+       /*
+        * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
+        * is written without DLAB set, this mode will be disabled.
+        */
+       if (up->port.type == PORT_16750)
+               serial_outp(up, UART_FCR, fcr);
+       serial_outp(up, UART_LCR, cval);                /* reset DLAB */
+       up->lcr = cval;                                 /* Save LCR */
+       if (up->port.type != PORT_16750) {
+               if (fcr & UART_FCR_ENABLE_FIFO) {
+                       /* emulated UARTs (Lucent Venus 167x) need two steps */
+                       serial_outp(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+               }
+               serial_outp(up, UART_FCR, fcr);         /* set fcr */
+       }
+       serial8250_set_mctrl(&up->port, up->port.mctrl);
+       spin_unlock_irqrestore(&up->port.lock, flags);
+       /* Don't rewrite B0 */
+       if (tty_termios_baud_rate(termios))
+               tty_termios_encode_baud_rate(termios, baud, baud);
+ }
+ EXPORT_SYMBOL(serial8250_do_set_termios);
+ static void
+ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
+                      struct ktermios *old)
+ {
+       if (port->set_termios)
+               port->set_termios(port, termios, old);
+       else
+               serial8250_do_set_termios(port, termios, old);
+ }
+ static void
+ serial8250_set_ldisc(struct uart_port *port, int new)
+ {
+       if (new == N_PPS) {
+               port->flags |= UPF_HARDPPS_CD;
+               serial8250_enable_ms(port);
+       } else
+               port->flags &= ~UPF_HARDPPS_CD;
+ }
+ void serial8250_do_pm(struct uart_port *port, unsigned int state,
+                     unsigned int oldstate)
+ {
+       struct uart_8250_port *p =
+               container_of(port, struct uart_8250_port, port);
+       serial8250_set_sleep(p, state != 0);
+ }
+ EXPORT_SYMBOL(serial8250_do_pm);
+ static void
+ serial8250_pm(struct uart_port *port, unsigned int state,
+             unsigned int oldstate)
+ {
+       if (port->pm)
+               port->pm(port, state, oldstate);
+       else
+               serial8250_do_pm(port, state, oldstate);
+ }
+ static unsigned int serial8250_port_size(struct uart_8250_port *pt)
+ {
+       if (pt->port.iotype == UPIO_AU)
+               return 0x1000;
+ #ifdef CONFIG_ARCH_OMAP
+       if (is_omap_port(pt))
+               return 0x16 << pt->port.regshift;
+ #endif
+       return 8 << pt->port.regshift;
+ }
+ /*
+  * Resource handling.
+  */
+ static int serial8250_request_std_resource(struct uart_8250_port *up)
+ {
+       unsigned int size = serial8250_port_size(up);
+       int ret = 0;
+       switch (up->port.iotype) {
+       case UPIO_AU:
+       case UPIO_TSI:
+       case UPIO_MEM32:
+       case UPIO_MEM:
+               if (!up->port.mapbase)
+                       break;
+               if (!request_mem_region(up->port.mapbase, size, "serial")) {
+                       ret = -EBUSY;
+                       break;
+               }
+               if (up->port.flags & UPF_IOREMAP) {
+                       up->port.membase = ioremap_nocache(up->port.mapbase,
+                                                                       size);
+                       if (!up->port.membase) {
+                               release_mem_region(up->port.mapbase, size);
+                               ret = -ENOMEM;
+                       }
+               }
+               break;
+       case UPIO_HUB6:
+       case UPIO_PORT:
+               if (!request_region(up->port.iobase, size, "serial"))
+                       ret = -EBUSY;
+               break;
+       }
+       return ret;
+ }
+ static void serial8250_release_std_resource(struct uart_8250_port *up)
+ {
+       unsigned int size = serial8250_port_size(up);
+       switch (up->port.iotype) {
+       case UPIO_AU:
+       case UPIO_TSI:
+       case UPIO_MEM32:
+       case UPIO_MEM:
+               if (!up->port.mapbase)
+                       break;
+               if (up->port.flags & UPF_IOREMAP) {
+                       iounmap(up->port.membase);
+                       up->port.membase = NULL;
+               }
+               release_mem_region(up->port.mapbase, size);
+               break;
+       case UPIO_HUB6:
+       case UPIO_PORT:
+               release_region(up->port.iobase, size);
+               break;
+       }
+ }
+ static int serial8250_request_rsa_resource(struct uart_8250_port *up)
+ {
+       unsigned long start = UART_RSA_BASE << up->port.regshift;
+       unsigned int size = 8 << up->port.regshift;
+       int ret = -EINVAL;
+       switch (up->port.iotype) {
+       case UPIO_HUB6:
+       case UPIO_PORT:
+               start += up->port.iobase;
+               if (request_region(start, size, "serial-rsa"))
+                       ret = 0;
+               else
+                       ret = -EBUSY;
+               break;
+       }
+       return ret;
+ }
+ static void serial8250_release_rsa_resource(struct uart_8250_port *up)
+ {
+       unsigned long offset = UART_RSA_BASE << up->port.regshift;
+       unsigned int size = 8 << up->port.regshift;
+       switch (up->port.iotype) {
+       case UPIO_HUB6:
+       case UPIO_PORT:
+               release_region(up->port.iobase + offset, size);
+               break;
+       }
+ }
+ static void serial8250_release_port(struct uart_port *port)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       serial8250_release_std_resource(up);
+       if (up->port.type == PORT_RSA)
+               serial8250_release_rsa_resource(up);
+ }
+ static int serial8250_request_port(struct uart_port *port)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       int ret = 0;
+       ret = serial8250_request_std_resource(up);
+       if (ret == 0 && up->port.type == PORT_RSA) {
+               ret = serial8250_request_rsa_resource(up);
+               if (ret < 0)
+                       serial8250_release_std_resource(up);
+       }
+       return ret;
+ }
+ static void serial8250_config_port(struct uart_port *port, int flags)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       int probeflags = PROBE_ANY;
+       int ret;
+       /*
+        * Find the region that we can probe for.  This in turn
+        * tells us whether we can probe for the type of port.
+        */
+       ret = serial8250_request_std_resource(up);
+       if (ret < 0)
+               return;
+       ret = serial8250_request_rsa_resource(up);
+       if (ret < 0)
+               probeflags &= ~PROBE_RSA;
+       if (up->port.iotype != up->cur_iotype)
+               set_io_from_upio(port);
+       if (flags & UART_CONFIG_TYPE)
+               autoconfig(up, probeflags);
+       /* if access method is AU, it is a 16550 with a quirk */
+       if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU)
+               up->bugs |= UART_BUG_NOMSR;
+       if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
+               autoconfig_irq(up);
+       if (up->port.type != PORT_RSA && probeflags & PROBE_RSA)
+               serial8250_release_rsa_resource(up);
+       if (up->port.type == PORT_UNKNOWN)
+               serial8250_release_std_resource(up);
+ }
+ static int
+ serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
+ {
+       if (ser->irq >= nr_irqs || ser->irq < 0 ||
+           ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
+           ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
+           ser->type == PORT_STARTECH)
+               return -EINVAL;
+       return 0;
+ }
+ static const char *
+ serial8250_type(struct uart_port *port)
+ {
+       int type = port->type;
+       if (type >= ARRAY_SIZE(uart_config))
+               type = 0;
+       return uart_config[type].name;
+ }
+ static struct uart_ops serial8250_pops = {
+       .tx_empty       = serial8250_tx_empty,
+       .set_mctrl      = serial8250_set_mctrl,
+       .get_mctrl      = serial8250_get_mctrl,
+       .stop_tx        = serial8250_stop_tx,
+       .start_tx       = serial8250_start_tx,
+       .stop_rx        = serial8250_stop_rx,
+       .enable_ms      = serial8250_enable_ms,
+       .break_ctl      = serial8250_break_ctl,
+       .startup        = serial8250_startup,
+       .shutdown       = serial8250_shutdown,
+       .set_termios    = serial8250_set_termios,
+       .set_ldisc      = serial8250_set_ldisc,
+       .pm             = serial8250_pm,
+       .type           = serial8250_type,
+       .release_port   = serial8250_release_port,
+       .request_port   = serial8250_request_port,
+       .config_port    = serial8250_config_port,
+       .verify_port    = serial8250_verify_port,
+ #ifdef CONFIG_CONSOLE_POLL
+       .poll_get_char = serial8250_get_poll_char,
+       .poll_put_char = serial8250_put_poll_char,
+ #endif
+ };
+ static struct uart_8250_port serial8250_ports[UART_NR];
+ static void (*serial8250_isa_config)(int port, struct uart_port *up,
+       unsigned short *capabilities);
+ void serial8250_set_isa_configurator(
+       void (*v)(int port, struct uart_port *up, unsigned short *capabilities))
+ {
+       serial8250_isa_config = v;
+ }
+ EXPORT_SYMBOL(serial8250_set_isa_configurator);
+ static void __init serial8250_isa_init_ports(void)
+ {
+       struct uart_8250_port *up;
+       static int first = 1;
+       int i, irqflag = 0;
+       if (!first)
+               return;
+       first = 0;
+       for (i = 0; i < nr_uarts; i++) {
+               struct uart_8250_port *up = &serial8250_ports[i];
+               up->port.line = i;
+               spin_lock_init(&up->port.lock);
+               init_timer(&up->timer);
+               up->timer.function = serial8250_timeout;
+               /*
+                * ALPHA_KLUDGE_MCR needs to be killed.
+                */
+               up->mcr_mask = ~ALPHA_KLUDGE_MCR;
+               up->mcr_force = ALPHA_KLUDGE_MCR;
+               up->port.ops = &serial8250_pops;
+       }
+       if (share_irqs)
+               irqflag = IRQF_SHARED;
+       for (i = 0, up = serial8250_ports;
+            i < ARRAY_SIZE(old_serial_port) && i < nr_uarts;
+            i++, up++) {
+               up->port.iobase   = old_serial_port[i].port;
+               up->port.irq      = irq_canonicalize(old_serial_port[i].irq);
+               up->port.irqflags = old_serial_port[i].irqflags;
+               up->port.uartclk  = old_serial_port[i].baud_base * 16;
+               up->port.flags    = old_serial_port[i].flags;
+               up->port.hub6     = old_serial_port[i].hub6;
+               up->port.membase  = old_serial_port[i].iomem_base;
+               up->port.iotype   = old_serial_port[i].io_type;
+               up->port.regshift = old_serial_port[i].iomem_reg_shift;
+               set_io_from_upio(&up->port);
+               up->port.irqflags |= irqflag;
+               if (serial8250_isa_config != NULL)
+                       serial8250_isa_config(i, &up->port, &up->capabilities);
+       }
+ }
+ static void
+ serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type)
+ {
+       up->port.type = type;
+       up->port.fifosize = uart_config[type].fifo_size;
+       up->capabilities = uart_config[type].flags;
+       up->tx_loadsz = uart_config[type].tx_loadsz;
+ }
+ static void __init
+ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
+ {
+       int i;
+       for (i = 0; i < nr_uarts; i++) {
+               struct uart_8250_port *up = &serial8250_ports[i];
+               up->cur_iotype = 0xFF;
+       }
+       serial8250_isa_init_ports();
+       for (i = 0; i < nr_uarts; i++) {
+               struct uart_8250_port *up = &serial8250_ports[i];
+               up->port.dev = dev;
+               if (up->port.flags & UPF_FIXED_TYPE)
+                       serial8250_init_fixed_type_port(up, up->port.type);
+               uart_add_one_port(drv, &up->port);
+       }
+ }
+ #ifdef CONFIG_SERIAL_8250_CONSOLE
+ static void serial8250_console_putchar(struct uart_port *port, int ch)
+ {
+       struct uart_8250_port *up =
+               container_of(port, struct uart_8250_port, port);
+       wait_for_xmitr(up, UART_LSR_THRE);
+       serial_out(up, UART_TX, ch);
+ }
+ /*
+  *    Print a string to the serial port trying not to disturb
+  *    any possible real use of the port...
+  *
+  *    The console_lock must be held when we get here.
+  */
+ static void
+ serial8250_console_write(struct console *co, const char *s, unsigned int count)
+ {
+       struct uart_8250_port *up = &serial8250_ports[co->index];
+       unsigned long flags;
+       unsigned int ier;
+       int locked = 1;
+       touch_nmi_watchdog();
+       local_irq_save(flags);
+       if (up->port.sysrq) {
+               /* serial8250_handle_irq() already took the lock */
+               locked = 0;
+       } else if (oops_in_progress) {
+               locked = spin_trylock(&up->port.lock);
+       } else
+               spin_lock(&up->port.lock);
+       /*
+        *      First save the IER then disable the interrupts
+        */
+       ier = serial_in(up, UART_IER);
+       if (up->capabilities & UART_CAP_UUE)
+               serial_out(up, UART_IER, UART_IER_UUE);
+       else
+               serial_out(up, UART_IER, 0);
+       uart_console_write(&up->port, s, count, serial8250_console_putchar);
+       /*
+        *      Finally, wait for transmitter to become empty
+        *      and restore the IER
+        */
+       wait_for_xmitr(up, BOTH_EMPTY);
+       serial_out(up, UART_IER, ier);
+       /*
+        *      The receive handling will happen properly because the
+        *      receive ready bit will still be set; it is not cleared
+        *      on read.  However, modem control will not, we must
+        *      call it if we have saved something in the saved flags
+        *      while processing with interrupts off.
+        */
+       if (up->msr_saved_flags)
+               serial8250_modem_status(up);
+       if (locked)
+               spin_unlock(&up->port.lock);
+       local_irq_restore(flags);
+ }
+ static int __init serial8250_console_setup(struct console *co, char *options)
+ {
+       struct uart_port *port;
+       int baud = 9600;
+       int bits = 8;
+       int parity = 'n';
+       int flow = 'n';
+       /*
+        * Check whether an invalid uart number has been specified, and
+        * if so, search for the first available port that does have
+        * console support.
+        */
+       if (co->index >= nr_uarts)
+               co->index = 0;
+       port = &serial8250_ports[co->index].port;
+       if (!port->iobase && !port->membase)
+               return -ENODEV;
+       if (options)
+               uart_parse_options(options, &baud, &parity, &bits, &flow);
+       return uart_set_options(port, co, baud, parity, bits, flow);
+ }
+ static int serial8250_console_early_setup(void)
+ {
+       return serial8250_find_port_for_earlycon();
+ }
+ static struct console serial8250_console = {
+       .name           = "ttyS",
+       .write          = serial8250_console_write,
+       .device         = uart_console_device,
+       .setup          = serial8250_console_setup,
+       .early_setup    = serial8250_console_early_setup,
+       .flags          = CON_PRINTBUFFER | CON_ANYTIME,
+       .index          = -1,
+       .data           = &serial8250_reg,
+ };
+ static int __init serial8250_console_init(void)
+ {
+       if (nr_uarts > UART_NR)
+               nr_uarts = UART_NR;
+       serial8250_isa_init_ports();
+       register_console(&serial8250_console);
+       return 0;
+ }
+ console_initcall(serial8250_console_init);
+ int serial8250_find_port(struct uart_port *p)
+ {
+       int line;
+       struct uart_port *port;
+       for (line = 0; line < nr_uarts; line++) {
+               port = &serial8250_ports[line].port;
+               if (uart_match_port(p, port))
+                       return line;
+       }
+       return -ENODEV;
+ }
+ #define SERIAL8250_CONSOLE    &serial8250_console
+ #else
+ #define SERIAL8250_CONSOLE    NULL
+ #endif
+ static struct uart_driver serial8250_reg = {
+       .owner                  = THIS_MODULE,
+       .driver_name            = "serial",
+       .dev_name               = "ttyS",
+       .major                  = TTY_MAJOR,
+       .minor                  = 64,
+       .cons                   = SERIAL8250_CONSOLE,
+ };
+ /*
+  * early_serial_setup - early registration for 8250 ports
+  *
+  * Setup an 8250 port structure prior to console initialisation.  Use
+  * after console initialisation will cause undefined behaviour.
+  */
+ int __init early_serial_setup(struct uart_port *port)
+ {
+       struct uart_port *p;
+       if (port->line >= ARRAY_SIZE(serial8250_ports))
+               return -ENODEV;
+       serial8250_isa_init_ports();
+       p = &serial8250_ports[port->line].port;
+       p->iobase       = port->iobase;
+       p->membase      = port->membase;
+       p->irq          = port->irq;
+       p->irqflags     = port->irqflags;
+       p->uartclk      = port->uartclk;
+       p->fifosize     = port->fifosize;
+       p->regshift     = port->regshift;
+       p->iotype       = port->iotype;
+       p->flags        = port->flags;
+       p->mapbase      = port->mapbase;
+       p->private_data = port->private_data;
+       p->type         = port->type;
+       p->line         = port->line;
+       set_io_from_upio(p);
+       if (port->serial_in)
+               p->serial_in = port->serial_in;
+       if (port->serial_out)
+               p->serial_out = port->serial_out;
+       if (port->handle_irq)
+               p->handle_irq = port->handle_irq;
+       else
+               p->handle_irq = serial8250_default_handle_irq;
+       return 0;
+ }
+ /**
+  *    serial8250_suspend_port - suspend one serial port
+  *    @line:  serial line number
+  *
+  *    Suspend one serial port.
+  */
+ void serial8250_suspend_port(int line)
+ {
+       uart_suspend_port(&serial8250_reg, &serial8250_ports[line].port);
+ }
+ /**
+  *    serial8250_resume_port - resume one serial port
+  *    @line:  serial line number
+  *
+  *    Resume one serial port.
+  */
+ void serial8250_resume_port(int line)
+ {
+       struct uart_8250_port *up = &serial8250_ports[line];
+       if (up->capabilities & UART_NATSEMI) {
+               /* Ensure it's still in high speed mode */
+               serial_outp(up, UART_LCR, 0xE0);
+               ns16550a_goto_highspeed(up);
+               serial_outp(up, UART_LCR, 0);
+               up->port.uartclk = 921600*16;
+       }
+       uart_resume_port(&serial8250_reg, &up->port);
+ }
+ /*
+  * Register a set of serial devices attached to a platform device.  The
+  * list is terminated with a zero flags entry, which means we expect
+  * all entries to have at least UPF_BOOT_AUTOCONF set.
+  */
+ static int __devinit serial8250_probe(struct platform_device *dev)
+ {
+       struct plat_serial8250_port *p = dev->dev.platform_data;
+       struct uart_port port;
+       int ret, i, irqflag = 0;
+       memset(&port, 0, sizeof(struct uart_port));
+       if (share_irqs)
+               irqflag = IRQF_SHARED;
+       for (i = 0; p && p->flags != 0; p++, i++) {
+               port.iobase             = p->iobase;
+               port.membase            = p->membase;
+               port.irq                = p->irq;
+               port.irqflags           = p->irqflags;
+               port.uartclk            = p->uartclk;
+               port.regshift           = p->regshift;
+               port.iotype             = p->iotype;
+               port.flags              = p->flags;
+               port.mapbase            = p->mapbase;
+               port.hub6               = p->hub6;
+               port.private_data       = p->private_data;
+               port.type               = p->type;
+               port.serial_in          = p->serial_in;
+               port.serial_out         = p->serial_out;
+               port.handle_irq         = p->handle_irq;
+               port.set_termios        = p->set_termios;
+               port.pm                 = p->pm;
+               port.dev                = &dev->dev;
+               port.irqflags           |= irqflag;
+               ret = serial8250_register_port(&port);
+               if (ret < 0) {
+                       dev_err(&dev->dev, "unable to register port at index %d "
+                               "(IO%lx MEM%llx IRQ%d): %d\n", i,
+                               p->iobase, (unsigned long long)p->mapbase,
+                               p->irq, ret);
+               }
+       }
+       return 0;
+ }
+ /*
+  * Remove serial ports registered against a platform device.
+  */
+ static int __devexit serial8250_remove(struct platform_device *dev)
+ {
+       int i;
+       for (i = 0; i < nr_uarts; i++) {
+               struct uart_8250_port *up = &serial8250_ports[i];
+               if (up->port.dev == &dev->dev)
+                       serial8250_unregister_port(i);
+       }
+       return 0;
+ }
+ static int serial8250_suspend(struct platform_device *dev, pm_message_t state)
+ {
+       int i;
+       for (i = 0; i < UART_NR; i++) {
+               struct uart_8250_port *up = &serial8250_ports[i];
+               if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
+                       uart_suspend_port(&serial8250_reg, &up->port);
+       }
+       return 0;
+ }
+ static int serial8250_resume(struct platform_device *dev)
+ {
+       int i;
+       for (i = 0; i < UART_NR; i++) {
+               struct uart_8250_port *up = &serial8250_ports[i];
+               if (up->port.type != PORT_UNKNOWN && up->port.dev == &dev->dev)
+                       serial8250_resume_port(i);
+       }
+       return 0;
+ }
+ static struct platform_driver serial8250_isa_driver = {
+       .probe          = serial8250_probe,
+       .remove         = __devexit_p(serial8250_remove),
+       .suspend        = serial8250_suspend,
+       .resume         = serial8250_resume,
+       .driver         = {
+               .name   = "serial8250",
+               .owner  = THIS_MODULE,
+       },
+ };
+ /*
+  * This "device" covers _all_ ISA 8250-compatible serial devices listed
+  * in the table in include/asm/serial.h
+  */
+ static struct platform_device *serial8250_isa_devs;
+ /*
+  * serial8250_register_port and serial8250_unregister_port allows for
+  * 16x50 serial ports to be configured at run-time, to support PCMCIA
+  * modems and PCI multiport cards.
+  */
+ static DEFINE_MUTEX(serial_mutex);
+ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *port)
+ {
+       int i;
+       /*
+        * First, find a port entry which matches.
+        */
+       for (i = 0; i < nr_uarts; i++)
+               if (uart_match_port(&serial8250_ports[i].port, port))
+                       return &serial8250_ports[i];
+       /*
+        * We didn't find a matching entry, so look for the first
+        * free entry.  We look for one which hasn't been previously
+        * used (indicated by zero iobase).
+        */
+       for (i = 0; i < nr_uarts; i++)
+               if (serial8250_ports[i].port.type == PORT_UNKNOWN &&
+                   serial8250_ports[i].port.iobase == 0)
+                       return &serial8250_ports[i];
+       /*
+        * That also failed.  Last resort is to find any entry which
+        * doesn't have a real port associated with it.
+        */
+       for (i = 0; i < nr_uarts; i++)
+               if (serial8250_ports[i].port.type == PORT_UNKNOWN)
+                       return &serial8250_ports[i];
+       return NULL;
+ }
+ /**
+  *    serial8250_register_port - register a serial port
+  *    @port: serial port template
+  *
+  *    Configure the serial port specified by the request. If the
+  *    port exists and is in use, it is hung up and unregistered
+  *    first.
+  *
+  *    The port is then probed and if necessary the IRQ is autodetected
+  *    If this fails an error is returned.
+  *
+  *    On success the port is ready to use and the line number is returned.
+  */
+ int serial8250_register_port(struct uart_port *port)
+ {
+       struct uart_8250_port *uart;
+       int ret = -ENOSPC;
+       if (port->uartclk == 0)
+               return -EINVAL;
+       mutex_lock(&serial_mutex);
+       uart = serial8250_find_match_or_unused(port);
+       if (uart) {
+               uart_remove_one_port(&serial8250_reg, &uart->port);
+               uart->port.iobase       = port->iobase;
+               uart->port.membase      = port->membase;
+               uart->port.irq          = port->irq;
+               uart->port.irqflags     = port->irqflags;
+               uart->port.uartclk      = port->uartclk;
+               uart->port.fifosize     = port->fifosize;
+               uart->port.regshift     = port->regshift;
+               uart->port.iotype       = port->iotype;
+               uart->port.flags        = port->flags | UPF_BOOT_AUTOCONF;
+               uart->port.mapbase      = port->mapbase;
+               uart->port.private_data = port->private_data;
+               if (port->dev)
+                       uart->port.dev = port->dev;
+               if (port->flags & UPF_FIXED_TYPE)
+                       serial8250_init_fixed_type_port(uart, port->type);
+               set_io_from_upio(&uart->port);
+               /* Possibly override default I/O functions.  */
+               if (port->serial_in)
+                       uart->port.serial_in = port->serial_in;
+               if (port->serial_out)
+                       uart->port.serial_out = port->serial_out;
+               if (port->handle_irq)
+                       uart->port.handle_irq = port->handle_irq;
+               /*  Possibly override set_termios call */
+               if (port->set_termios)
+                       uart->port.set_termios = port->set_termios;
+               if (port->pm)
+                       uart->port.pm = port->pm;
+               if (serial8250_isa_config != NULL)
+                       serial8250_isa_config(0, &uart->port,
+                                       &uart->capabilities);
+               ret = uart_add_one_port(&serial8250_reg, &uart->port);
+               if (ret == 0)
+                       ret = uart->port.line;
+       }
+       mutex_unlock(&serial_mutex);
+       return ret;
+ }
+ EXPORT_SYMBOL(serial8250_register_port);
+ /**
+  *    serial8250_unregister_port - remove a 16x50 serial port at runtime
+  *    @line: serial line number
+  *
+  *    Remove one serial port.  This may not be called from interrupt
+  *    context.  We hand the port back to the our control.
+  */
+ void serial8250_unregister_port(int line)
+ {
+       struct uart_8250_port *uart = &serial8250_ports[line];
+       mutex_lock(&serial_mutex);
+       uart_remove_one_port(&serial8250_reg, &uart->port);
+       if (serial8250_isa_devs) {
+               uart->port.flags &= ~UPF_BOOT_AUTOCONF;
+               uart->port.type = PORT_UNKNOWN;
+               uart->port.dev = &serial8250_isa_devs->dev;
+               uart->capabilities = uart_config[uart->port.type].flags;
+               uart_add_one_port(&serial8250_reg, &uart->port);
+       } else {
+               uart->port.dev = NULL;
+       }
+       mutex_unlock(&serial_mutex);
+ }
+ EXPORT_SYMBOL(serial8250_unregister_port);
+ static int __init serial8250_init(void)
+ {
+       int ret;
+       if (nr_uarts > UART_NR)
+               nr_uarts = UART_NR;
+       printk(KERN_INFO "Serial: 8250/16550 driver, "
+               "%d ports, IRQ sharing %sabled\n", nr_uarts,
+               share_irqs ? "en" : "dis");
+ #ifdef CONFIG_SPARC
+       ret = sunserial_register_minors(&serial8250_reg, UART_NR);
+ #else
+       serial8250_reg.nr = UART_NR;
+       ret = uart_register_driver(&serial8250_reg);
+ #endif
+       if (ret)
+               goto out;
+       serial8250_isa_devs = platform_device_alloc("serial8250",
+                                                   PLAT8250_DEV_LEGACY);
+       if (!serial8250_isa_devs) {
+               ret = -ENOMEM;
+               goto unreg_uart_drv;
+       }
+       ret = platform_device_add(serial8250_isa_devs);
+       if (ret)
+               goto put_dev;
+       serial8250_register_ports(&serial8250_reg, &serial8250_isa_devs->dev);
+       ret = platform_driver_register(&serial8250_isa_driver);
+       if (ret == 0)
+               goto out;
+       platform_device_del(serial8250_isa_devs);
+ put_dev:
+       platform_device_put(serial8250_isa_devs);
+ unreg_uart_drv:
+ #ifdef CONFIG_SPARC
+       sunserial_unregister_minors(&serial8250_reg, UART_NR);
+ #else
+       uart_unregister_driver(&serial8250_reg);
+ #endif
+ out:
+       return ret;
+ }
+ static void __exit serial8250_exit(void)
+ {
+       struct platform_device *isa_dev = serial8250_isa_devs;
+       /*
+        * This tells serial8250_unregister_port() not to re-register
+        * the ports (thereby making serial8250_isa_driver permanently
+        * in use.)
+        */
+       serial8250_isa_devs = NULL;
+       platform_driver_unregister(&serial8250_isa_driver);
+       platform_device_unregister(isa_dev);
+ #ifdef CONFIG_SPARC
+       sunserial_unregister_minors(&serial8250_reg, UART_NR);
+ #else
+       uart_unregister_driver(&serial8250_reg);
+ #endif
+ }
+ module_init(serial8250_init);
+ module_exit(serial8250_exit);
+ EXPORT_SYMBOL(serial8250_suspend_port);
+ EXPORT_SYMBOL(serial8250_resume_port);
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Generic 8250/16x50 serial driver");
+ module_param(share_irqs, uint, 0644);
+ MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
+       " (unsafe)");
+ module_param(nr_uarts, uint, 0644);
+ MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
+ module_param(skip_txen_test, uint, 0644);
+ MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time");
+ #ifdef CONFIG_SERIAL_8250_RSA
+ module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
+ MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
+ #endif
+ MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
Simple merge
@@@ -13,8 -13,7 +13,8 @@@ fb-objs                           := $(
  
  obj-$(CONFIG_VT)                += console/
  obj-$(CONFIG_LOGO)              += logo/
- obj-y                           += backlight/ display/
+ obj-y                           += backlight/
 +obj-$(CONFIG_BOOTSPLASH)        += bootsplash/
  
  obj-$(CONFIG_FB_CFB_FILLRECT)  += cfbfillrect.o
  obj-$(CONFIG_FB_CFB_COPYAREA)  += cfbcopyarea.o
Simple merge
diff --cc fs/Kconfig
Simple merge
diff --cc fs/Makefile
Simple merge
diff --cc fs/ext4/ext4.h
Simple merge
Simple merge
diff --cc fs/ext4/inode.c
Simple merge
diff --cc fs/ext4/namei.c
Simple merge
diff --cc fs/ext4/super.c
Simple merge
diff --cc fs/namei.c
@@@ -2024,10 -1977,10 +2025,10 @@@ void unlock_rename(struct dentry *p1, s
        }
  }
  
- int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
+ int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
                struct nameidata *nd)
  {
 -      int error = may_create(dir, dentry);
 +      int error = may_create(dir, dentry, 0);
  
        if (error)
                return error;
@@@ -2225,8 -2178,8 +2226,8 @@@ static struct file *do_last(struct name
  
        /* Negative dentry, just create the file */
        if (!dentry->d_inode) {
-               int mode = op->mode;
+               umode_t mode = op->mode;
 -              if (!IS_POSIXACL(dir->d_inode))
 +              if (!IS_ACL(dir->d_inode))
                        mode &= ~current_umask();
                /*
                 * This write is needed to ensure that a
@@@ -2492,9 -2445,9 +2493,9 @@@ struct dentry *user_path_create(int dfd
  }
  EXPORT_SYMBOL(user_path_create);
  
- int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+ int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
  {
 -      int error = may_create(dir, dentry);
 +      int error = may_create(dir, dentry, 0);
  
        if (error)
                return error;
@@@ -2589,9 -2542,9 +2590,9 @@@ SYSCALL_DEFINE3(mknod, const char __use
        return sys_mknodat(AT_FDCWD, filename, mode, dev);
  }
  
- int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+ int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  {
 -      int error = may_create(dir, dentry);
 +      int error = may_create(dir, dentry, 1);
  
        if (error)
                return error;
diff --cc fs/nfs/dir.c
Simple merge
diff --cc fs/nfs/inode.c
Simple merge
diff --cc fs/super.c
@@@ -714,17 -723,20 +718,20 @@@ static int __do_remount_sb(struct super
        /* If we are remounting RDONLY and current sb is read/write,
           make sure there are no rw files opened */
        if (remount_ro) {
-               if (rflags & REMOUNT_FORCE)
 -              if (force) {
++              if (rflags & REMOUNT_FORCE) {
                        mark_files_ro(sb);
-               else if (!fs_may_remount_ro(sb))
-                       return -EBUSY;
+               } else {
+                       retval = sb_prepare_remount_readonly(sb);
+                       if (retval)
+                               return retval;
+               }
        }
  
        if (sb->s_op->remount_fs) {
                retval = sb->s_op->remount_fs(sb, &flags, data);
                if (retval) {
 -                      if (!force)
 +                      if (!(rflags & REMOUNT_FORCE))
-                               return retval;
+                               goto cancel_readonly;
                        /* If forced remount, go ahead despite any errors */
                        WARN(1, "forced remount of a %s fs returned %i\n",
                             sb->s_type->name, retval);
        if (remount_ro && sb->s_bdev)
                invalidate_bdev(sb->s_bdev);
        return 0;
+ cancel_readonly:
+       sb->s_readonly_remount = 0;
+       return retval;
  }
  
 +/**
 + *    do_remount_sb - asks filesystem to change mount options.
 + *    @sb:    superblock in question
 + *    @flags: numeric part of options
 + *    @data:  the rest of options
 + *      @force: whether or not to force the change
 + *
 + *    Alters the mount options of a mounted file system.
 + */
 +int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
 +{
 +      return __do_remount_sb(sb, flags, data,
 +                      REMOUNT_SHRINK_DCACHE|(force? REMOUNT_FORCE : 0));
 +}
 +
  static void do_emergency_remount(struct work_struct *work)
  {
        struct super_block *sb, *p = NULL;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -341,10 -342,9 +342,11 @@@ extern int panic_timeout
  extern int panic_on_oops;
  extern int panic_on_unrecovered_nmi;
  extern int panic_on_io_nmi;
 +extern int unsupported;
+ extern int sysctl_panic_on_stackoverflow;
  extern const char *print_tainted(void);
  extern void add_taint(unsigned flag);
 +extern void add_nonfatal_taint(unsigned flag);
  extern int test_taint(unsigned flag);
  extern unsigned long get_taint(void);
  extern int root_mountflags;
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc init/Kconfig
@@@ -741,8 -727,7 +766,7 @@@ config CGROUP_PER
  
  menuconfig CGROUP_SCHED
        bool "Group CPU scheduler"
-       depends on EXPERIMENTAL
 -      default n
 +      default !KERNEL_DESKTOP
        help
          This feature lets CPU scheduler recognize task groups and control CPU
          bandwidth allocation to such task groups. It uses cgroups to group
diff --cc init/main.c
@@@ -476,8 -469,8 +470,9 @@@ asmlinkage void __init start_kernel(voi
         * Need to run as early as possible, to initialize the
         * lockdep hash:
         */
 +      unwind_init();
        lockdep_init();
+       smp_setup_processor_id();
        debug_objects_early_init();
  
        /*
Simple merge
diff --cc kernel/Makefile
@@@ -52,9 -53,6 +53,7 @@@ obj-$(CONFIG_PROVE_LOCKING) += spinlock
  obj-$(CONFIG_UID16) += uid16.o
  obj-$(CONFIG_MODULES) += module.o
  obj-$(CONFIG_KALLSYMS) += kallsyms.o
 +obj-$(CONFIG_STACK_UNWIND) += unwind.o
- obj-$(CONFIG_PM) += power/
- obj-$(CONFIG_FREEZER) += power/
  obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
  obj-$(CONFIG_KEXEC) += kexec.o
  obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
diff --cc kernel/ksysfs.c
Simple merge
diff --cc kernel/module.c
@@@ -989,6 -942,26 +980,32 @@@ static inline int module_unload_init(st
  }
  #endif /* CONFIG_MODULE_UNLOAD */
  
+ static size_t module_flags_taint(struct module *mod, char *buf)
+ {
+       size_t l = 0;
+       if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
+               buf[l++] = 'P';
+       if (mod->taints & (1 << TAINT_OOT_MODULE))
+               buf[l++] = 'O';
+       if (mod->taints & (1 << TAINT_FORCED_MODULE))
+               buf[l++] = 'F';
+       if (mod->taints & (1 << TAINT_CRAP))
+               buf[l++] = 'C';
++#ifdef CONFIG_ENTERPRISE_SUPPORT
++      if (mod->taints & (1 << TAINT_NO_SUPPORT))
++              buf[l++] = 'N';
++      if (mod->taints & (1 << TAINT_EXTERNAL_SUPPORT))
++              buf[l++] = 'X';
++#endif
+       /*
+        * TAINT_FORCED_RMMOD: could be added.
+        * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
+        * apply to modules.
+        */
+       return l;
+ }
  static ssize_t show_initstate(struct module_attribute *mattr,
                              struct module_kobject *mk, char *buffer)
  {
@@@ -1024,48 -995,50 +1039,80 @@@ static ssize_t store_uevent(struct modu
        return count;
  }
  
- struct module_attribute module_uevent = {
-       .attr = { .name = "uevent", .mode = 0200 },
-       .store = store_uevent,
- };
+ struct module_attribute module_uevent =
+       __ATTR(uevent, 0200, NULL, store_uevent);
+ static ssize_t show_coresize(struct module_attribute *mattr,
+                            struct module_kobject *mk, char *buffer)
+ {
+       return sprintf(buffer, "%u\n", mk->mod->core_size);
+ }
+ static struct module_attribute modinfo_coresize =
+       __ATTR(coresize, 0444, show_coresize, NULL);
+ static ssize_t show_initsize(struct module_attribute *mattr,
+                            struct module_kobject *mk, char *buffer)
+ {
+       return sprintf(buffer, "%u\n", mk->mod->init_size);
+ }
+ static struct module_attribute modinfo_initsize =
+       __ATTR(initsize, 0444, show_initsize, NULL);
+ static ssize_t show_taint(struct module_attribute *mattr,
+                         struct module_kobject *mk, char *buffer)
+ {
+       size_t l;
+       l = module_flags_taint(mk->mod, buffer);
+       buffer[l++] = '\n';
+       return l;
+ }
+ static struct module_attribute modinfo_taint =
+       __ATTR(taint, 0444, show_taint, NULL);
  
 +#ifdef CONFIG_ENTERPRISE_SUPPORT
 +static void setup_modinfo_supported(struct module *mod, const char *s)
 +{
 +      if (!s) {
 +              mod->taints |= (1 << TAINT_NO_SUPPORT);
 +              return;
 +      }
 +
 +      if (strcmp(s, "external") == 0)
 +              mod->taints |= (1 << TAINT_EXTERNAL_SUPPORT);
 +      else if (strcmp(s, "yes"))
 +              mod->taints |= (1 << TAINT_NO_SUPPORT);
 +}
 +
 +static ssize_t show_modinfo_supported(struct module_attribute *mattr,
 +                                    struct module_kobject *mk, char *buffer)
 +{
 +      return sprintf(buffer, "%s\n", supported_printable(mk->mod->taints));
 +}
 +
 +static struct module_attribute modinfo_supported = {
 +      .attr = { .name = "supported", .mode = 0444 },
 +      .show = show_modinfo_supported,
 +      .setup = setup_modinfo_supported,
 +};
 +#endif
 +
  static struct module_attribute *modinfo_attrs[] = {
+       &module_uevent,
        &modinfo_version,
        &modinfo_srcversion,
-       &initstate,
-       &module_uevent,
+       &modinfo_initstate,
+       &modinfo_coresize,
+       &modinfo_initsize,
+       &modinfo_taint,
 +#ifdef CONFIG_ENTERPRISE_SUPPORT
 +      &modinfo_supported,
 +#endif
  #ifdef CONFIG_MODULE_UNLOAD
-       &refcnt,
+       &modinfo_refcnt,
  #endif
        NULL,
  };
@@@ -3001,11 -2931,7 +3036,10 @@@ static struct module *load_module(void 
        if (err < 0)
                goto unlink;
  
 +      /* Initialize unwind table */
 +      add_unwind_table(mod, &info);
 +
-       /* Get rid of temporary copy and strmap. */
-       kfree(info.strmap);
+       /* Get rid of temporary copy. */
        free_copy(&info);
  
        /* Done! */
diff --cc kernel/panic.c
Simple merge
diff --cc kernel/printk.c
Simple merge
diff --cc kernel/sysctl.c
Simple merge
Simple merge
Simple merge
diff --cc mm/page_alloc.c
Simple merge
diff --cc mm/truncate.c
@@@ -292,14 -292,7 +292,14 @@@ void truncate_inode_pages_range(struct 
                mem_cgroup_uncharge_end();
                index++;
        }
-       cleancache_invalidate_inode(mapping);
+       cleancache_flush_inode(mapping);
 +      /*
 +       * Cycle the tree_lock to make sure all __delete_from_page_cache()
 +       * calls run from page reclaim have finished as well (this handles the
 +       * case when page reclaim took the last page from our range).
 +       */
 +      spin_lock_irq(&mapping->tree_lock);
 +      spin_unlock_irq(&mapping->tree_lock);
  }
  EXPORT_SYMBOL(truncate_inode_pages_range);
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge