- Update to 3.4-rc7.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / kvm / x86.c
index 1457be3..7e0f8e1 100644 (file)
@@ -1582,6 +1582,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
        case MSR_VM_HSAVE_PA:
        case MSR_AMD64_PATCH_LOADER:
                break;
+       case MSR_NHM_SNB_PKG_CST_CFG_CTL: /* 0xe2 */
        case 0x200 ... 0x2ff:
                return set_msr_mtrr(vcpu, msr, data);
        case MSR_IA32_APICBASE:
@@ -1904,6 +1905,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_K8_INT_PENDING_MSG:
        case MSR_AMD64_NB_CFG:
        case MSR_FAM10H_MMIO_CONF_BASE:
+       case MSR_NHM_SNB_PKG_CST_CFG_CTL: /* 0xe2 */
                data = 0;
                break;
        case MSR_P6_PERFCTR0:
@@ -6364,7 +6366,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
        if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
                int ret;
 
-               ret = vm_munmap(current->mm, old.userspace_addr,
+               ret = vm_munmap(old.userspace_addr,
                                old.npages * PAGE_SIZE);
                if (ret < 0)
                        printk(KERN_WARNING
@@ -6581,6 +6583,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
                kvm_inject_page_fault(vcpu, &fault);
        }
        vcpu->arch.apf.halted = false;
+       vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 }
 
 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)