KVM: fix kvmclock regression due to missing clock update
[linux-flexiantxendom0-natty.git] / arch / x86 / kvm / x86.c
index 1d54cb7..0556e05 100644 (file)
@@ -473,8 +473,8 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
                      (unsigned long *)&vcpu->arch.regs_avail))
                return true;
 
-       gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
-       offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
+       gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
+       offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
        r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
                                       PFERR_USER_MASK | PFERR_WRITE_MASK);
        if (r < 0)
@@ -519,7 +519,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                } else
 #endif
                if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
-                                                vcpu->arch.cr3))
+                                                kvm_read_cr3(vcpu)))
                        return 1;
        }
 
@@ -611,7 +611,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                        return 1;
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
-                  && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
+                  && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
+                                  kvm_read_cr3(vcpu)))
                return 1;
 
        if (cr4 & X86_CR4_VMXE)
@@ -631,7 +632,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
 
 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
-       if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
+       if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
                kvm_mmu_sync_roots(vcpu);
                kvm_mmu_flush_tlb(vcpu);
                return 0;
@@ -666,6 +667,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
        if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
                return 1;
        vcpu->arch.cr3 = cr3;
+       __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
        vcpu->arch.mmu.new_cr3(vcpu);
        return 0;
 }
@@ -985,7 +987,7 @@ static inline u64 nsec_to_cycles(u64 nsec)
        if (kvm_tsc_changes_freq())
                printk_once(KERN_WARNING
                 "kvm: unreliable cycle conversion on adjustable rate TSC\n");
-       ret = nsec * __get_cpu_var(cpu_tsc_khz);
+       ret = nsec * __this_cpu_read(cpu_tsc_khz);
        do_div(ret, USEC_PER_SEC);
        return ret;
 }
@@ -1070,7 +1072,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        local_irq_save(flags);
        kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
        kernel_ns = get_kernel_ns();
-       this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
+       this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
 
        if (unlikely(this_tsc_khz == 0)) {
                local_irq_restore(flags);
@@ -2098,8 +2100,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                if (check_tsc_unstable()) {
                        kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
                        vcpu->arch.tsc_catchup = 1;
-                       kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
                }
+               kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
                if (vcpu->cpu != cpu)
                        kvm_migrate_timers(vcpu);
                vcpu->cpu = cpu;
@@ -4073,7 +4075,7 @@ static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
                value = vcpu->arch.cr2;
                break;
        case 3:
-               value = vcpu->arch.cr3;
+               value = kvm_read_cr3(vcpu);
                break;
        case 4:
                value = kvm_read_cr4(vcpu);
@@ -4291,7 +4293,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
        memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
 }
 
-int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq)
+int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
 {
        struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
        int ret;
@@ -4300,7 +4302,8 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq)
 
        vcpu->arch.emulate_ctxt.decode.op_bytes = 2;
        vcpu->arch.emulate_ctxt.decode.ad_bytes = 2;
-       vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip;
+       vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip +
+                                                                inc_eip;
        ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq);
 
        if (ret != X86EMUL_CONTINUE)
@@ -4363,10 +4366,11 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
        return false;
 }
 
-int emulate_instruction(struct kvm_vcpu *vcpu,
-                       unsigned long cr2,
-                       u16 error_code,
-                       int emulation_type)
+int x86_emulate_instruction(struct kvm_vcpu *vcpu,
+                           unsigned long cr2,
+                           int emulation_type,
+                           void *insn,
+                           int insn_len)
 {
        int r;
        struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
@@ -4387,7 +4391,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                vcpu->arch.emulate_ctxt.have_exception = false;
                vcpu->arch.emulate_ctxt.perm_ok = false;
 
-               r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
+               r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
                if (r == X86EMUL_PROPAGATE_FAULT)
                        goto done;
 
@@ -4474,7 +4478,7 @@ done:
 
        return r;
 }
-EXPORT_SYMBOL_GPL(emulate_instruction);
+EXPORT_SYMBOL_GPL(x86_emulate_instruction);
 
 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
 {
@@ -4488,7 +4492,7 @@ EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
 
 static void tsc_bad(void *info)
 {
-       __get_cpu_var(cpu_tsc_khz) = 0;
+       __this_cpu_write(cpu_tsc_khz, 0);
 }
 
 static void tsc_khz_changed(void *data)
@@ -4502,7 +4506,7 @@ static void tsc_khz_changed(void *data)
                khz = cpufreq_quick_get(raw_smp_processor_id());
        if (!khz)
                khz = tsc_khz;
-       __get_cpu_var(cpu_tsc_khz) = khz;
+       __this_cpu_write(cpu_tsc_khz, khz);
 }
 
 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -5373,6 +5377,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        int r;
        sigset_t sigsaved;
 
+       if (!tsk_used_math(current) && init_fpu(current))
+               return -ENOMEM;
+
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
@@ -5398,7 +5405,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                        vcpu->mmio_needed = 0;
                }
                vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-               r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
+               r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
                srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
                if (r != EMULATE_DONE) {
                        r = 0;
@@ -5511,7 +5518,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 
        sregs->cr0 = kvm_read_cr0(vcpu);
        sregs->cr2 = vcpu->arch.cr2;
-       sregs->cr3 = vcpu->arch.cr3;
+       sregs->cr3 = kvm_read_cr3(vcpu);
        sregs->cr4 = kvm_read_cr4(vcpu);
        sregs->cr8 = kvm_get_cr8(vcpu);
        sregs->efer = vcpu->arch.efer;
@@ -5579,8 +5586,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        kvm_x86_ops->set_gdt(vcpu, &dt);
 
        vcpu->arch.cr2 = sregs->cr2;
-       mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
+       mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
        vcpu->arch.cr3 = sregs->cr3;
+       __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
 
        kvm_set_cr8(vcpu, sregs->cr8);
 
@@ -5597,7 +5605,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        if (sregs->cr4 & X86_CR4_OSXSAVE)
                update_cpuid(vcpu);
        if (!is_long_mode(vcpu) && is_pae(vcpu)) {
-               load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
+               load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
                mmu_reset_needed = 1;
        }