KVM: fix kvmclock regression due to missing clock update
[linux-flexiantxendom0-natty.git] / arch / x86 / kvm / x86.c
index 7d28805..0556e05 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 2006 Qumranet, Inc.
  * Copyright (C) 2008 Qumranet, Inc.
  * Copyright IBM Corporation, 2008
- * Copyright 2010 Red Hat, Inc. and/or its affilates.
+ * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  *
  * Authors:
  *   Avi Kivity   <avi@qumranet.com>
@@ -43,6 +43,7 @@
 #include <linux/slab.h>
 #include <linux/perf_event.h>
 #include <linux/uaccess.h>
+#include <linux/hash.h>
 #include <trace/events/kvm.h>
 
 #define CREATE_TRACE_POINTS
@@ -73,7 +74,7 @@
 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
 
 #define KVM_MAX_MCE_BANKS 32
-#define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
+#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
 
 /* EFER defaults:
  * - enable syscall per default because its emulated by KVM
@@ -155,9 +156,11 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 
 u64 __read_mostly host_xcr0;
 
-static inline u32 bit(int bitno)
+static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
 {
-       return 1 << (bitno & 31);
+       int i;
+       for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
+               vcpu->arch.apf.gfns[i] = ~0;
 }
 
 static void kvm_on_user_return(struct user_return_notifier *urn)
@@ -331,23 +334,28 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
 }
 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
 
-void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
+void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
 {
-       unsigned error_code = vcpu->arch.fault.error_code;
+       if (err)
+               kvm_inject_gp(vcpu, 0);
+       else
+               kvm_x86_ops->skip_emulated_instruction(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
 
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
+{
        ++vcpu->stat.pf_guest;
-       vcpu->arch.cr2 = vcpu->arch.fault.address;
-       kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
+       vcpu->arch.cr2 = fault->address;
+       kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
 }
 
-void kvm_propagate_fault(struct kvm_vcpu *vcpu)
+void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
 {
-       if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
-               vcpu->arch.nested_mmu.inject_page_fault(vcpu);
+       if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
+               vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
        else
-               vcpu->arch.mmu.inject_page_fault(vcpu);
-
-       vcpu->arch.fault.nested = false;
+               vcpu->arch.mmu.inject_page_fault(vcpu, fault);
 }
 
 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
@@ -465,8 +473,8 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
                      (unsigned long *)&vcpu->arch.regs_avail))
                return true;
 
-       gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
-       offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
+       gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
+       offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
        r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
                                       PFERR_USER_MASK | PFERR_WRITE_MASK);
        if (r < 0)
@@ -511,12 +519,15 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                } else
 #endif
                if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
-                                                vcpu->arch.cr3))
+                                                kvm_read_cr3(vcpu)))
                        return 1;
        }
 
        kvm_x86_ops->set_cr0(vcpu, cr0);
 
+       if ((cr0 ^ old_cr0) & X86_CR0_PG)
+               kvm_clear_async_pf_completion_queue(vcpu);
+
        if ((cr0 ^ old_cr0) & update_bits)
                kvm_mmu_reset_context(vcpu);
        return 0;
@@ -600,7 +611,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                        return 1;
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
-                  && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
+                  && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
+                                  kvm_read_cr3(vcpu)))
                return 1;
 
        if (cr4 & X86_CR4_VMXE)
@@ -620,7 +632,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
 
 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
-       if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
+       if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
                kvm_mmu_sync_roots(vcpu);
                kvm_mmu_flush_tlb(vcpu);
                return 0;
@@ -655,12 +667,13 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
        if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
                return 1;
        vcpu->arch.cr3 = cr3;
+       __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
        vcpu->arch.mmu.new_cr3(vcpu);
        return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_set_cr3);
 
-int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
+int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
        if (cr8 & CR8_RESERVED_BITS)
                return 1;
@@ -670,12 +683,6 @@ int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
                vcpu->arch.cr8 = cr8;
        return 0;
 }
-
-void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
-{
-       if (__kvm_set_cr8(vcpu, cr8))
-               kvm_inject_gp(vcpu, 0);
-}
 EXPORT_SYMBOL_GPL(kvm_set_cr8);
 
 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
@@ -780,12 +787,12 @@ EXPORT_SYMBOL_GPL(kvm_get_dr);
  * kvm-specific. Those are put in the beginning of the list.
  */
 
-#define KVM_SAVE_MSRS_BEGIN    7
+#define KVM_SAVE_MSRS_BEGIN    8
 static u32 msrs_to_save[] = {
        MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
        MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
        HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
-       HV_X64_MSR_APIC_ASSIST_PAGE,
+       HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN,
        MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
        MSR_STAR,
 #ifdef CONFIG_X86_64
@@ -835,7 +842,6 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
        kvm_x86_ops->set_efer(vcpu, efer);
 
        vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
-       kvm_mmu_reset_context(vcpu);
 
        /* Update reserved bits */
        if ((efer ^ old_efer) & EFER_NX)
@@ -892,7 +898,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 
        /*
         * The guest calculates current wall clock time by adding
-        * system time (updated by kvm_write_guest_time below) to the
+        * system time (updated by kvm_guest_time_update below) to the
         * wall clock specified here.  guest system time equals host
         * system time for us, thus we must fill in host boot time here.
         */
@@ -920,31 +926,35 @@ static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
        return quotient;
 }
 
-static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
+static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
+                              s8 *pshift, u32 *pmultiplier)
 {
-       uint64_t nsecs = 1000000000LL;
+       uint64_t scaled64;
        int32_t  shift = 0;
        uint64_t tps64;
        uint32_t tps32;
 
-       tps64 = tsc_khz * 1000LL;
-       while (tps64 > nsecs*2) {
+       tps64 = base_khz * 1000LL;
+       scaled64 = scaled_khz * 1000LL;
+       while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
                tps64 >>= 1;
                shift--;
        }
 
        tps32 = (uint32_t)tps64;
-       while (tps32 <= (uint32_t)nsecs) {
-               tps32 <<= 1;
+       while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
+               if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
+                       scaled64 >>= 1;
+               else
+                       tps32 <<= 1;
                shift++;
        }
 
-       hv_clock->tsc_shift = shift;
-       hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
+       *pshift = shift;
+       *pmultiplier = div_frac(scaled64, tps32);
 
-       pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
-                __func__, tsc_khz, hv_clock->tsc_shift,
-                hv_clock->tsc_to_system_mul);
+       pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
+                __func__, base_khz, scaled_khz, shift, *pmultiplier);
 }
 
 static inline u64 get_kernel_ns(void)
@@ -958,6 +968,7 @@ static inline u64 get_kernel_ns(void)
 }
 
 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
+unsigned long max_tsc_khz;
 
 static inline int kvm_tsc_changes_freq(void)
 {
@@ -976,11 +987,29 @@ static inline u64 nsec_to_cycles(u64 nsec)
        if (kvm_tsc_changes_freq())
                printk_once(KERN_WARNING
                 "kvm: unreliable cycle conversion on adjustable rate TSC\n");
-       ret = nsec * __get_cpu_var(cpu_tsc_khz);
+       ret = nsec * __this_cpu_read(cpu_tsc_khz);
        do_div(ret, USEC_PER_SEC);
        return ret;
 }
 
+static void kvm_arch_set_tsc_khz(struct kvm *kvm, u32 this_tsc_khz)
+{
+       /* Compute a scale to convert nanoseconds in TSC cycles */
+       kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
+                          &kvm->arch.virtual_tsc_shift,
+                          &kvm->arch.virtual_tsc_mult);
+       kvm->arch.virtual_tsc_khz = this_tsc_khz;
+}
+
+static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
+{
+       u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec,
+                                     vcpu->kvm->arch.virtual_tsc_mult,
+                                     vcpu->kvm->arch.virtual_tsc_shift);
+       tsc += vcpu->arch.last_tsc_write;
+       return tsc;
+}
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -1025,10 +1054,12 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
 
        /* Reset of TSC must disable overshoot protection below */
        vcpu->arch.hv_clock.tsc_timestamp = 0;
+       vcpu->arch.last_tsc_write = data;
+       vcpu->arch.last_tsc_nsec = ns;
 }
 EXPORT_SYMBOL_GPL(kvm_write_tsc);
 
-static int kvm_write_guest_time(struct kvm_vcpu *v)
+static int kvm_guest_time_update(struct kvm_vcpu *v)
 {
        unsigned long flags;
        struct kvm_vcpu_arch *vcpu = &v->arch;
@@ -1037,22 +1068,42 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
        s64 kernel_ns, max_kernel_ns;
        u64 tsc_timestamp;
 
-       if ((!vcpu->time_page))
-               return 0;
-
        /* Keep irq disabled to prevent changes to the clock */
        local_irq_save(flags);
        kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
        kernel_ns = get_kernel_ns();
-       this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
-       local_irq_restore(flags);
+       this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
 
        if (unlikely(this_tsc_khz == 0)) {
-               kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
+               local_irq_restore(flags);
+               kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
                return 1;
        }
 
        /*
+        * We may have to catch up the TSC to match elapsed wall clock
+        * time for two reasons, even if kvmclock is used.
+        *   1) CPU could have been running below the maximum TSC rate
+        *   2) Broken TSC compensation resets the base at each VCPU
+        *      entry to avoid unknown leaps of TSC even when running
+        *      again on the same CPU.  This may cause apparent elapsed
+        *      time to disappear, and the guest to stand still or run
+        *      very slowly.
+        */
+       if (vcpu->tsc_catchup) {
+               u64 tsc = compute_guest_tsc(v, kernel_ns);
+               if (tsc > tsc_timestamp) {
+                       kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp);
+                       tsc_timestamp = tsc;
+               }
+       }
+
+       local_irq_restore(flags);
+
+       if (!vcpu->time_page)
+               return 0;
+
+       /*
         * Time as measured by the TSC may go backwards when resetting the base
         * tsc_timestamp.  The reason for this is that the TSC resolution is
         * higher than the resolution of the other clock scales.  Thus, many
@@ -1084,7 +1135,9 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
        }
 
        if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
-               kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
+               kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
+                                  &vcpu->hv_clock.tsc_shift,
+                                  &vcpu->hv_clock.tsc_to_system_mul);
                vcpu->hw_tsc_khz = this_tsc_khz;
        }
 
@@ -1116,16 +1169,6 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
        return 0;
 }
 
-static int kvm_request_guest_time_update(struct kvm_vcpu *v)
-{
-       struct kvm_vcpu_arch *vcpu = &v->arch;
-
-       if (!vcpu->time_page)
-               return 0;
-       kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
-       return 1;
-}
-
 static bool msr_mtrr_valid(unsigned msr)
 {
        switch (msr) {
@@ -1386,6 +1429,30 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
        return 0;
 }
 
+static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
+{
+       gpa_t gpa = data & ~0x3f;
+
+       /* Bits 2:5 are resrved, Should be zero */
+       if (data & 0x3c)
+               return 1;
+
+       vcpu->arch.apf.msr_val = data;
+
+       if (!(data & KVM_ASYNC_PF_ENABLED)) {
+               kvm_clear_async_pf_completion_queue(vcpu);
+               kvm_async_pf_hash_reset(vcpu);
+               return 0;
+       }
+
+       if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
+               return 1;
+
+       vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
+       kvm_async_pf_wakeup_all(vcpu);
+       return 0;
+}
+
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
        switch (msr) {
@@ -1449,6 +1516,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                }
 
                vcpu->arch.time = data;
+               kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
 
                /* we verify if the enable bit is set... */
                if (!(data & 1))
@@ -1464,10 +1532,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                        kvm_release_page_clean(vcpu->arch.time_page);
                        vcpu->arch.time_page = NULL;
                }
-
-               kvm_request_guest_time_update(vcpu);
                break;
        }
+       case MSR_KVM_ASYNC_PF_EN:
+               if (kvm_pv_enable_async_pf(vcpu, data))
+                       return 1;
+               break;
        case MSR_IA32_MCG_CTL:
        case MSR_IA32_MCG_STATUS:
        case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
@@ -1744,6 +1814,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_KVM_SYSTEM_TIME_NEW:
                data = vcpu->arch.time;
                break;
+       case MSR_KVM_ASYNC_PF_EN:
+               data = vcpu->arch.apf.msr_val;
+               break;
        case MSR_IA32_P5_MC_ADDR:
        case MSR_IA32_P5_MC_TYPE:
        case MSR_IA32_MCG_CAP:
@@ -1873,6 +1946,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_NOP_IO_DELAY:
        case KVM_CAP_MP_STATE:
        case KVM_CAP_SYNC_MMU:
+       case KVM_CAP_USER_NMI:
        case KVM_CAP_REINJECT_CONTROL:
        case KVM_CAP_IRQ_INJECT_STATUS:
        case KVM_CAP_ASSIGN_DEV_IRQ:
@@ -1891,6 +1965,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_DEBUGREGS:
        case KVM_CAP_X86_ROBUST_SINGLESTEP:
        case KVM_CAP_XSAVE:
+       case KVM_CAP_ASYNC_PF:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -2022,9 +2097,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                                native_read_tsc() - vcpu->arch.last_host_tsc;
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
-               if (check_tsc_unstable())
+               if (check_tsc_unstable()) {
                        kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
-               kvm_migrate_timers(vcpu);
+                       vcpu->arch.tsc_catchup = 1;
+               }
+               kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+               if (vcpu->cpu != cpu)
+                       kvm_migrate_timers(vcpu);
                vcpu->cpu = cpu;
        }
 }
@@ -2150,6 +2229,11 @@ out:
        return r;
 }
 
+static void cpuid_mask(u32 *word, int wordnum)
+{
+       *word &= boot_cpu_data.x86_capability[wordnum];
+}
+
 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                           u32 index)
 {
@@ -2204,13 +2288,14 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
                0 /* Reserved, DCA */ | F(XMM4_1) |
                F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
-               0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
+               0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
+               F(F16C);
        /* cpuid 0x80000001.ecx */
        const u32 kvm_supported_word6_x86_features =
                F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
                F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
-               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
-               0 /* SKINIT */ | 0 /* WDT */;
+               F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
+               0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
 
        /* all calls to cpuid_count() should be made on the same cpu */
        get_cpu();
@@ -2223,7 +2308,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                break;
        case 1:
                entry->edx &= kvm_supported_word0_x86_features;
+               cpuid_mask(&entry->edx, 0);
                entry->ecx &= kvm_supported_word4_x86_features;
+               cpuid_mask(&entry->ecx, 4);
                /* we support x2apic emulation even if host does not support
                 * it since we emulate x2apic in software */
                entry->ecx |= F(X2APIC);
@@ -2314,7 +2401,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                break;
        case 0x80000001:
                entry->edx &= kvm_supported_word1_x86_features;
+               cpuid_mask(&entry->edx, 1);
                entry->ecx &= kvm_supported_word6_x86_features;
+               cpuid_mask(&entry->ecx, 6);
                break;
        }
 
@@ -2519,6 +2608,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
                !kvm_exception_is_soft(vcpu->arch.exception.nr);
        events->exception.nr = vcpu->arch.exception.nr;
        events->exception.has_error_code = vcpu->arch.exception.has_error_code;
+       events->exception.pad = 0;
        events->exception.error_code = vcpu->arch.exception.error_code;
 
        events->interrupt.injected =
@@ -2532,12 +2622,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
        events->nmi.injected = vcpu->arch.nmi_injected;
        events->nmi.pending = vcpu->arch.nmi_pending;
        events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
+       events->nmi.pad = 0;
 
        events->sipi_vector = vcpu->arch.sipi_vector;
 
        events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
                         | KVM_VCPUEVENT_VALID_SIPI_VECTOR
                         | KVM_VCPUEVENT_VALID_SHADOW);
+       memset(&events->reserved, 0, sizeof(events->reserved));
 }
 
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2582,6 +2674,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
        dbgregs->dr6 = vcpu->arch.dr6;
        dbgregs->dr7 = vcpu->arch.dr7;
        dbgregs->flags = 0;
+       memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
 }
 
 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -3065,6 +3158,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
                sizeof(ps->channels));
        ps->flags = kvm->arch.vpit->pit_state.flags;
        mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+       memset(&ps->reserved, 0, sizeof(ps->reserved));
        return r;
 }
 
@@ -3128,24 +3222,18 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                struct kvm_memslots *slots, *old_slots;
                unsigned long *dirty_bitmap;
 
-               spin_lock(&kvm->mmu_lock);
-               kvm_mmu_slot_remove_write_access(kvm, log->slot);
-               spin_unlock(&kvm->mmu_lock);
-
-               r = -ENOMEM;
-               dirty_bitmap = vmalloc(n);
-               if (!dirty_bitmap)
-                       goto out;
+               dirty_bitmap = memslot->dirty_bitmap_head;
+               if (memslot->dirty_bitmap == dirty_bitmap)
+                       dirty_bitmap += n / sizeof(long);
                memset(dirty_bitmap, 0, n);
 
                r = -ENOMEM;
                slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
-               if (!slots) {
-                       vfree(dirty_bitmap);
+               if (!slots)
                        goto out;
-               }
                memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
                slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
+               slots->generation++;
 
                old_slots = kvm->memslots;
                rcu_assign_pointer(kvm->memslots, slots);
@@ -3153,12 +3241,13 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
                kfree(old_slots);
 
+               spin_lock(&kvm->mmu_lock);
+               kvm_mmu_slot_remove_write_access(kvm, log->slot);
+               spin_unlock(&kvm->mmu_lock);
+
                r = -EFAULT;
-               if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
-                       vfree(dirty_bitmap);
+               if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
                        goto out;
-               }
-               vfree(dirty_bitmap);
        } else {
                r = -EFAULT;
                if (clear_user(log->dirty_bitmap, n))
@@ -3225,8 +3314,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
                if (vpic) {
                        r = kvm_ioapic_init(kvm);
                        if (r) {
+                               mutex_lock(&kvm->slots_lock);
                                kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
                                                          &vpic->dev);
+                               mutex_unlock(&kvm->slots_lock);
                                kfree(vpic);
                                goto create_irqchip_unlock;
                        }
@@ -3237,10 +3328,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
                smp_wmb();
                r = kvm_setup_default_irq_routing(kvm);
                if (r) {
+                       mutex_lock(&kvm->slots_lock);
                        mutex_lock(&kvm->irq_lock);
                        kvm_ioapic_destroy(kvm);
                        kvm_destroy_pic(kvm);
                        mutex_unlock(&kvm->irq_lock);
+                       mutex_unlock(&kvm->slots_lock);
                }
        create_irqchip_unlock:
                mutex_unlock(&kvm->lock);
@@ -3429,8 +3522,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
                        goto out;
 
                r = 0;
+               local_irq_disable();
                now_ns = get_kernel_ns();
                delta = user_ns.clock - now_ns;
+               local_irq_enable();
                kvm->arch.kvmclock_offset = delta;
                break;
        }
@@ -3438,9 +3533,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
                struct kvm_clock_data user_ns;
                u64 now_ns;
 
+               local_irq_disable();
                now_ns = get_kernel_ns();
                user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
+               local_irq_enable();
                user_ns.flags = 0;
+               memset(&user_ns.pad, 0, sizeof(user_ns.pad));
 
                r = -EFAULT;
                if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -3511,63 +3609,63 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
 static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
 {
        gpa_t t_gpa;
-       u32 error;
+       struct x86_exception exception;
 
        BUG_ON(!mmu_is_nested(vcpu));
 
        /* NPT walks are always user-walks */
        access |= PFERR_USER_MASK;
-       t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
-       if (t_gpa == UNMAPPED_GVA)
-               vcpu->arch.fault.nested = true;
+       t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
 
        return t_gpa;
 }
 
-gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
+                             struct x86_exception *exception)
 {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
-       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 }
 
- gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+ gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
+                               struct x86_exception *exception)
 {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
        access |= PFERR_FETCH_MASK;
-       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 }
 
-gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
+                              struct x86_exception *exception)
 {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
        access |= PFERR_WRITE_MASK;
-       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 }
 
 /* uses this to access any guest's mapped memory without checking CPL */
-gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
+                               struct x86_exception *exception)
 {
-       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
+       return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
 }
 
 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
                                      struct kvm_vcpu *vcpu, u32 access,
-                                     u32 *error)
+                                     struct x86_exception *exception)
 {
        void *data = val;
        int r = X86EMUL_CONTINUE;
 
        while (bytes) {
                gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
-                                                           error);
+                                                           exception);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
                int ret;
 
-               if (gpa == UNMAPPED_GVA) {
-                       r = X86EMUL_PROPAGATE_FAULT;
-                       goto out;
-               }
+               if (gpa == UNMAPPED_GVA)
+                       return X86EMUL_PROPAGATE_FAULT;
                ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
                if (ret < 0) {
                        r = X86EMUL_IO_NEEDED;
@@ -3584,31 +3682,35 @@ out:
 
 /* used for instruction fetching */
 static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
-                               struct kvm_vcpu *vcpu, u32 *error)
+                               struct kvm_vcpu *vcpu,
+                               struct x86_exception *exception)
 {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
        return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
-                                         access | PFERR_FETCH_MASK, error);
+                                         access | PFERR_FETCH_MASK,
+                                         exception);
 }
 
 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
-                              struct kvm_vcpu *vcpu, u32 *error)
+                              struct kvm_vcpu *vcpu,
+                              struct x86_exception *exception)
 {
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
        return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
-                                         error);
+                                         exception);
 }
 
 static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
-                              struct kvm_vcpu *vcpu, u32 *error)
+                                     struct kvm_vcpu *vcpu,
+                                     struct x86_exception *exception)
 {
-       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
+       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
 }
 
 static int kvm_write_guest_virt_system(gva_t addr, void *val,
                                       unsigned int bytes,
                                       struct kvm_vcpu *vcpu,
-                                      u32 *error)
+                                      struct x86_exception *exception)
 {
        void *data = val;
        int r = X86EMUL_CONTINUE;
@@ -3616,15 +3718,13 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val,
        while (bytes) {
                gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
                                                             PFERR_WRITE_MASK,
-                                                            error);
+                                                            exception);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
                int ret;
 
-               if (gpa == UNMAPPED_GVA) {
-                       r = X86EMUL_PROPAGATE_FAULT;
-                       goto out;
-               }
+               if (gpa == UNMAPPED_GVA)
+                       return X86EMUL_PROPAGATE_FAULT;
                ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
                if (ret < 0) {
                        r = X86EMUL_IO_NEEDED;
@@ -3642,7 +3742,7 @@ out:
 static int emulator_read_emulated(unsigned long addr,
                                  void *val,
                                  unsigned int bytes,
-                                 unsigned int *error_code,
+                                 struct x86_exception *exception,
                                  struct kvm_vcpu *vcpu)
 {
        gpa_t                 gpa;
@@ -3655,7 +3755,7 @@ static int emulator_read_emulated(unsigned long addr,
                return X86EMUL_CONTINUE;
        }
 
-       gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code);
+       gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception);
 
        if (gpa == UNMAPPED_GVA)
                return X86EMUL_PROPAGATE_FAULT;
@@ -3664,8 +3764,8 @@ static int emulator_read_emulated(unsigned long addr,
        if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
                goto mmio;
 
-       if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
-                               == X86EMUL_CONTINUE)
+       if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception)
+           == X86EMUL_CONTINUE)
                return X86EMUL_CONTINUE;
 
 mmio:
@@ -3689,7 +3789,7 @@ mmio:
 }
 
 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
-                         const void *val, int bytes)
+                       const void *val, int bytes)
 {
        int ret;
 
@@ -3703,12 +3803,12 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
 static int emulator_write_emulated_onepage(unsigned long addr,
                                           const void *val,
                                           unsigned int bytes,
-                                          unsigned int *error_code,
+                                          struct x86_exception *exception,
                                           struct kvm_vcpu *vcpu)
 {
        gpa_t                 gpa;
 
-       gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code);
+       gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception);
 
        if (gpa == UNMAPPED_GVA)
                return X86EMUL_PROPAGATE_FAULT;
@@ -3741,7 +3841,7 @@ mmio:
 int emulator_write_emulated(unsigned long addr,
                            const void *val,
                            unsigned int bytes,
-                           unsigned int *error_code,
+                           struct x86_exception *exception,
                            struct kvm_vcpu *vcpu)
 {
        /* Crossing a page boundary? */
@@ -3749,7 +3849,7 @@ int emulator_write_emulated(unsigned long addr,
                int rc, now;
 
                now = -addr & ~PAGE_MASK;
-               rc = emulator_write_emulated_onepage(addr, val, now, error_code,
+               rc = emulator_write_emulated_onepage(addr, val, now, exception,
                                                     vcpu);
                if (rc != X86EMUL_CONTINUE)
                        return rc;
@@ -3757,7 +3857,7 @@ int emulator_write_emulated(unsigned long addr,
                val += now;
                bytes -= now;
        }
-       return emulator_write_emulated_onepage(addr, val, bytes, error_code,
+       return emulator_write_emulated_onepage(addr, val, bytes, exception,
                                               vcpu);
 }
 
@@ -3775,7 +3875,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
                                     const void *old,
                                     const void *new,
                                     unsigned int bytes,
-                                    unsigned int *error_code,
+                                    struct x86_exception *exception,
                                     struct kvm_vcpu *vcpu)
 {
        gpa_t gpa;
@@ -3833,7 +3933,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
 emul_write:
        printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
 
-       return emulator_write_emulated(addr, new, bytes, error_code, vcpu);
+       return emulator_write_emulated(addr, new, bytes, exception, vcpu);
 }
 
 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
@@ -3858,7 +3958,7 @@ static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
        if (vcpu->arch.pio.count)
                goto data_avail;
 
-       trace_kvm_pio(0, port, size, 1);
+       trace_kvm_pio(0, port, size, count);
 
        vcpu->arch.pio.port = port;
        vcpu->arch.pio.in = 1;
@@ -3886,7 +3986,7 @@ static int emulator_pio_out_emulated(int size, unsigned short port,
                              const void *val, unsigned int count,
                              struct kvm_vcpu *vcpu)
 {
-       trace_kvm_pio(1, port, size, 1);
+       trace_kvm_pio(1, port, size, count);
 
        vcpu->arch.pio.port = port;
        vcpu->arch.pio.in = 0;
@@ -3927,11 +4027,15 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
                return X86EMUL_CONTINUE;
 
        if (kvm_x86_ops->has_wbinvd_exit()) {
+               int cpu = get_cpu();
+
+               cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
                smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
                                wbinvd_ipi, NULL, 1);
+               put_cpu();
                cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
-       }
-       wbinvd();
+       } else
+               wbinvd();
        return X86EMUL_CONTINUE;
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
@@ -3971,7 +4075,7 @@ static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
                value = vcpu->arch.cr2;
                break;
        case 3:
-               value = vcpu->arch.cr3;
+               value = kvm_read_cr3(vcpu);
                break;
        case 4:
                value = kvm_read_cr4(vcpu);
@@ -4005,7 +4109,7 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
                res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
                break;
        case 8:
-               res = __kvm_set_cr8(vcpu, val & 0xfUL);
+               res = kvm_set_cr8(vcpu, val);
                break;
        default:
                vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
@@ -4158,12 +4262,13 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
 {
        struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
-       if (ctxt->exception == PF_VECTOR)
-               kvm_propagate_fault(vcpu);
-       else if (ctxt->error_code_valid)
-               kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
+       if (ctxt->exception.vector == PF_VECTOR)
+               kvm_propagate_fault(vcpu, &ctxt->exception);
+       else if (ctxt->exception.error_code_valid)
+               kvm_queue_exception_e(vcpu, ctxt->exception.vector,
+                                     ctxt->exception.error_code);
        else
-               kvm_queue_exception(vcpu, ctxt->exception);
+               kvm_queue_exception(vcpu, ctxt->exception.vector);
 }
 
 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
@@ -4188,7 +4293,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
        memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
 }
 
-int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq)
+int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
 {
        struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
        int ret;
@@ -4197,7 +4302,8 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq)
 
        vcpu->arch.emulate_ctxt.decode.op_bytes = 2;
        vcpu->arch.emulate_ctxt.decode.ad_bytes = 2;
-       vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip;
+       vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip +
+                                                                inc_eip;
        ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq);
 
        if (ret != X86EMUL_CONTINUE)
@@ -4219,13 +4325,19 @@ EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
 
 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 {
+       int r = EMULATE_DONE;
+
        ++vcpu->stat.insn_emulation_fail;
        trace_kvm_emulate_insn_failed(vcpu);
-       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-       vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
-       vcpu->run->internal.ndata = 0;
+       if (!is_guest_mode(vcpu)) {
+               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+               vcpu->run->internal.ndata = 0;
+               r = EMULATE_FAIL;
+       }
        kvm_queue_exception(vcpu, UD_VECTOR);
-       return EMULATE_FAIL;
+
+       return r;
 }
 
 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
@@ -4254,10 +4366,11 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
        return false;
 }
 
-int emulate_instruction(struct kvm_vcpu *vcpu,
-                       unsigned long cr2,
-                       u16 error_code,
-                       int emulation_type)
+int x86_emulate_instruction(struct kvm_vcpu *vcpu,
+                           unsigned long cr2,
+                           int emulation_type,
+                           void *insn,
+                           int insn_len)
 {
        int r;
        struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
@@ -4275,10 +4388,10 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
        if (!(emulation_type & EMULTYPE_NO_DECODE)) {
                init_emulate_ctxt(vcpu);
                vcpu->arch.emulate_ctxt.interruptibility = 0;
-               vcpu->arch.emulate_ctxt.exception = -1;
+               vcpu->arch.emulate_ctxt.have_exception = false;
                vcpu->arch.emulate_ctxt.perm_ok = false;
 
-               r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
+               r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
                if (r == X86EMUL_PROPAGATE_FAULT)
                        goto done;
 
@@ -4341,7 +4454,7 @@ restart:
        }
 
 done:
-       if (vcpu->arch.emulate_ctxt.exception >= 0) {
+       if (vcpu->arch.emulate_ctxt.have_exception) {
                inject_emulated_exception(vcpu);
                r = EMULATE_DONE;
        } else if (vcpu->arch.pio.count) {
@@ -4365,7 +4478,7 @@ done:
 
        return r;
 }
-EXPORT_SYMBOL_GPL(emulate_instruction);
+EXPORT_SYMBOL_GPL(x86_emulate_instruction);
 
 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
 {
@@ -4379,7 +4492,7 @@ EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
 
 static void tsc_bad(void *info)
 {
-       __get_cpu_var(cpu_tsc_khz) = 0;
+       __this_cpu_write(cpu_tsc_khz, 0);
 }
 
 static void tsc_khz_changed(void *data)
@@ -4393,7 +4506,7 @@ static void tsc_khz_changed(void *data)
                khz = cpufreq_quick_get(raw_smp_processor_id());
        if (!khz)
                khz = tsc_khz;
-       __get_cpu_var(cpu_tsc_khz) = khz;
+       __this_cpu_write(cpu_tsc_khz, khz);
 }
 
 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -4455,8 +4568,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        if (vcpu->cpu != freq->cpu)
                                continue;
-                       if (!kvm_request_guest_time_update(vcpu))
-                               continue;
+                       kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
                        if (vcpu->cpu != smp_processor_id())
                                send_ipi = 1;
                }
@@ -4511,11 +4623,22 @@ static void kvm_timer_init(void)
 {
        int cpu;
 
+       max_tsc_khz = tsc_khz;
        register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
+#ifdef CONFIG_CPU_FREQ
+               struct cpufreq_policy policy;
+               memset(&policy, 0, sizeof(policy));
+               cpu = get_cpu();
+               cpufreq_get_policy(&policy, cpu);
+               if (policy.cpuinfo.max_freq)
+                       max_tsc_khz = policy.cpuinfo.max_freq;
+               put_cpu();
+#endif
                cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
                                          CPUFREQ_TRANSITION_NOTIFIER);
        }
+       pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
        for_each_online_cpu(cpu)
                smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
 }
@@ -4595,7 +4718,6 @@ int kvm_arch_init(void *opaque)
 
        kvm_x86_ops = ops;
        kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
-       kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
        kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
                        PT_DIRTY_MASK, PT64_NX_MASK, 0);
 
@@ -5035,8 +5157,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        kvm_mmu_unload(vcpu);
                if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
                        __kvm_migrate_timers(vcpu);
-               if (kvm_check_request(KVM_REQ_KVMCLOCK_UPDATE, vcpu)) {
-                       r = kvm_write_guest_time(vcpu);
+               if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
+                       r = kvm_guest_time_update(vcpu);
                        if (unlikely(r))
                                goto out;
                }
@@ -5058,6 +5180,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        vcpu->fpu_active = 0;
                        kvm_x86_ops->fpu_deactivate(vcpu);
                }
+               if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
+                       /* Page is swapped out. Do synthetic halt */
+                       vcpu->arch.apf.halted = true;
+                       r = 1;
+                       goto out;
+               }
        }
 
        r = kvm_mmu_reload(vcpu);
@@ -5186,7 +5314,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
 
        r = 1;
        while (r > 0) {
-               if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
+               if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+                   !vcpu->arch.apf.halted)
                        r = vcpu_enter_guest(vcpu);
                else {
                        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
@@ -5199,6 +5328,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                                        vcpu->arch.mp_state =
                                                KVM_MP_STATE_RUNNABLE;
                                case KVM_MP_STATE_RUNNABLE:
+                                       vcpu->arch.apf.halted = false;
                                        break;
                                case KVM_MP_STATE_SIPI_RECEIVED:
                                default:
@@ -5220,6 +5350,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                        vcpu->run->exit_reason = KVM_EXIT_INTR;
                        ++vcpu->stat.request_irq_exits;
                }
+
+               kvm_check_async_pf_completion(vcpu);
+
                if (signal_pending(current)) {
                        r = -EINTR;
                        vcpu->run->exit_reason = KVM_EXIT_INTR;
@@ -5244,6 +5377,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        int r;
        sigset_t sigsaved;
 
+       if (!tsk_used_math(current) && init_fpu(current))
+               return -ENOMEM;
+
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
@@ -5255,8 +5391,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        }
 
        /* re-sync apic's tpr */
-       if (!irqchip_in_kernel(vcpu->kvm))
-               kvm_set_cr8(vcpu, kvm_run->cr8);
+       if (!irqchip_in_kernel(vcpu->kvm)) {
+               if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
+                       r = -EINVAL;
+                       goto out;
+               }
+       }
 
        if (vcpu->arch.pio.count || vcpu->mmio_needed) {
                if (vcpu->mmio_needed) {
@@ -5265,7 +5405,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                        vcpu->mmio_needed = 0;
                }
                vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-               r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
+               r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
                srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
                if (r != EMULATE_DONE) {
                        r = 0;
@@ -5378,7 +5518,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 
        sregs->cr0 = kvm_read_cr0(vcpu);
        sregs->cr2 = vcpu->arch.cr2;
-       sregs->cr3 = vcpu->arch.cr3;
+       sregs->cr3 = kvm_read_cr3(vcpu);
        sregs->cr4 = kvm_read_cr4(vcpu);
        sregs->cr8 = kvm_get_cr8(vcpu);
        sregs->efer = vcpu->arch.efer;
@@ -5446,8 +5586,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        kvm_x86_ops->set_gdt(vcpu, &dt);
 
        vcpu->arch.cr2 = sregs->cr2;
-       mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
+       mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
        vcpu->arch.cr3 = sregs->cr3;
+       __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
 
        kvm_set_cr8(vcpu, sregs->cr8);
 
@@ -5461,8 +5602,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
        kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
+       if (sregs->cr4 & X86_CR4_OSXSAVE)
+               update_cpuid(vcpu);
        if (!is_long_mode(vcpu) && is_pae(vcpu)) {
-               load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
+               load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
                mmu_reset_needed = 1;
        }
 
@@ -5713,6 +5856,8 @@ free_vcpu:
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
+       vcpu->arch.apf.msr_val = 0;
+
        vcpu_load(vcpu);
        kvm_mmu_unload(vcpu);
        vcpu_put(vcpu);
@@ -5732,6 +5877,11 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
        vcpu->arch.dr7 = DR7_FIXED_1;
 
        kvm_make_request(KVM_REQ_EVENT, vcpu);
+       vcpu->arch.apf.msr_val = 0;
+
+       kvm_clear_async_pf_completion_queue(vcpu);
+       kvm_async_pf_hash_reset(vcpu);
+       vcpu->arch.apf.halted = false;
 
        return kvm_x86_ops->vcpu_reset(vcpu);
 }
@@ -5746,7 +5896,7 @@ int kvm_arch_hardware_enable(void *garbage)
        list_for_each_entry(kvm, &vm_list, vm_list)
                kvm_for_each_vcpu(i, vcpu, kvm)
                        if (vcpu->cpu == smp_processor_id())
-                               kvm_request_guest_time_update(vcpu);
+                               kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
        return kvm_x86_ops->hardware_enable(garbage);
 }
 
@@ -5797,6 +5947,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        }
        vcpu->arch.pio_data = page_address(page);
 
+       if (!kvm->arch.virtual_tsc_khz)
+               kvm_arch_set_tsc_khz(kvm, max_tsc_khz);
+
        r = kvm_mmu_create(vcpu);
        if (r < 0)
                goto fail_free_pio_data;
@@ -5818,6 +5971,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
                goto fail_free_mce_banks;
 
+       kvm_async_pf_hash_reset(vcpu);
+
        return 0;
 fail_free_mce_banks:
        kfree(vcpu->arch.mce_banks);
@@ -5843,13 +5998,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
        free_page((unsigned long)vcpu->arch.pio_data);
 }
 
-struct  kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
-       struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-
-       if (!kvm)
-               return ERR_PTR(-ENOMEM);
-
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
        INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
 
@@ -5858,7 +6008,7 @@ struct  kvm *kvm_arch_create_vm(void)
 
        spin_lock_init(&kvm->arch.tsc_write_lock);
 
-       return kvm;
+       return 0;
 }
 
 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
@@ -5876,8 +6026,10 @@ static void kvm_free_vcpus(struct kvm *kvm)
        /*
         * Unpin any mmu pages first.
         */
-       kvm_for_each_vcpu(i, vcpu, kvm)
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               kvm_clear_async_pf_completion_queue(vcpu);
                kvm_unload_vcpu_mmu(vcpu);
+       }
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_arch_vcpu_free(vcpu);
 
@@ -5901,13 +6053,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
        kfree(kvm->arch.vpic);
        kfree(kvm->arch.vioapic);
        kvm_free_vcpus(kvm);
-       kvm_free_physmem(kvm);
        if (kvm->arch.apic_access_page)
                put_page(kvm->arch.apic_access_page);
        if (kvm->arch.ept_identity_pagetable)
                put_page(kvm->arch.ept_identity_pagetable);
-       cleanup_srcu_struct(&kvm->srcu);
-       kfree(kvm);
 }
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -5988,7 +6137,9 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
+       return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+               !vcpu->arch.apf.halted)
+               || !list_empty_careful(&vcpu->async_pf.done)
                || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
                || vcpu->arch.nmi_pending ||
                (kvm_arch_interrupt_allowed(vcpu) &&
@@ -6047,6 +6198,147 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 }
 EXPORT_SYMBOL_GPL(kvm_set_rflags);
 
+void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
+{
+       int r;
+
+       if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
+             is_error_page(work->page))
+               return;
+
+       r = kvm_mmu_reload(vcpu);
+       if (unlikely(r))
+               return;
+
+       if (!vcpu->arch.mmu.direct_map &&
+             work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
+               return;
+
+       vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
+}
+
+static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
+{
+       return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
+}
+
+static inline u32 kvm_async_pf_next_probe(u32 key)
+{
+       return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
+}
+
+static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+       u32 key = kvm_async_pf_hash_fn(gfn);
+
+       while (vcpu->arch.apf.gfns[key] != ~0)
+               key = kvm_async_pf_next_probe(key);
+
+       vcpu->arch.apf.gfns[key] = gfn;
+}
+
+static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+       int i;
+       u32 key = kvm_async_pf_hash_fn(gfn);
+
+       for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
+                    (vcpu->arch.apf.gfns[key] != gfn &&
+                     vcpu->arch.apf.gfns[key] != ~0); i++)
+               key = kvm_async_pf_next_probe(key);
+
+       return key;
+}
+
+bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+       return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
+}
+
+static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+       u32 i, j, k;
+
+       i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
+       while (true) {
+               vcpu->arch.apf.gfns[i] = ~0;
+               do {
+                       j = kvm_async_pf_next_probe(j);
+                       if (vcpu->arch.apf.gfns[j] == ~0)
+                               return;
+                       k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
+                       /*
+                        * k lies cyclically in ]i,j]
+                        * |    i.k.j |
+                        * |....j i.k.| or  |.k..j i...|
+                        */
+               } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
+               vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
+               i = j;
+       }
+}
+
+static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
+{
+
+       return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
+                                     sizeof(val));
+}
+
+void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+                                    struct kvm_async_pf *work)
+{
+       struct x86_exception fault;
+
+       trace_kvm_async_pf_not_present(work->arch.token, work->gva);
+       kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
+
+       if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
+           (vcpu->arch.apf.send_user_only &&
+            kvm_x86_ops->get_cpl(vcpu) == 0))
+               kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+       else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
+               fault.vector = PF_VECTOR;
+               fault.error_code_valid = true;
+               fault.error_code = 0;
+               fault.nested_page_fault = false;
+               fault.address = work->arch.token;
+               kvm_inject_page_fault(vcpu, &fault);
+       }
+}
+
+void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+                                struct kvm_async_pf *work)
+{
+       struct x86_exception fault;
+
+       trace_kvm_async_pf_ready(work->arch.token, work->gva);
+       if (is_error_page(work->page))
+               work->arch.token = ~0; /* broadcast wakeup */
+       else
+               kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+
+       if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
+           !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+               fault.vector = PF_VECTOR;
+               fault.error_code_valid = true;
+               fault.error_code = 0;
+               fault.nested_page_fault = false;
+               fault.address = work->arch.token;
+               kvm_inject_page_fault(vcpu, &fault);
+       }
+       vcpu->arch.apf.halted = false;
+}
+
+bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
+{
+       if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
+               return true;
+       else
+               return !kvm_event_needs_reinjection(vcpu) &&
+                       kvm_x86_ops->interrupt_allowed(vcpu);
+}
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);