#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
-#include <linux/sysdev.h>
+#include <linux/syscore_ops.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/srcu.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/bsearch.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
-#include <asm-generic/bitops/le.h>
#include "coalesced_mmio.h"
#include "async_pf.h"
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/
-DEFINE_SPINLOCK(kvm_lock);
+DEFINE_RAW_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
static cpumask_var_t cpus_hardware_enabled;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
+#ifdef CONFIG_COMPAT
+static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
+ unsigned long arg);
+#endif
static int hardware_enable_all(void);
static void hardware_disable_all(void);
static struct page *hwpoison_page;
static pfn_t hwpoison_pfn;
-static struct page *fault_page;
-static pfn_t fault_pfn;
+struct page *fault_page;
+pfn_t fault_pfn;
inline int kvm_is_mmio_pfn(pfn_t pfn)
{
if (pfn_valid(pfn)) {
- struct page *head;
+ int reserved;
struct page *tail = pfn_to_page(pfn);
- head = compound_head(tail);
+ struct page *head = compound_trans_head(tail);
+ reserved = PageReserved(head);
if (head != tail) {
- smp_rmb();
/*
- * head may be a dangling pointer.
- * __split_huge_page_refcount clears PageTail
- * before overwriting first_page, so if
- * PageTail is still there it means the head
- * pointer isn't dangling.
+ * "head" is not a dangling pointer
+ * (compound_trans_head takes care of that)
+ * but the hugepage may have been splitted
+ * from under us (and we may not hold a
+ * reference count on the head page so it can
+ * be reused before we run PageReferenced), so
+ * we've to check PageTail before returning
+ * what we just read.
*/
- if (PageTail(tail)) {
- /*
- * the "head" is not a dangling
- * pointer but the hugepage may have
- * been splitted from under us (and we
- * may not hold a reference count on
- * the head page so it can be reused
- * before we run PageReferenced), so
- * we've to recheck PageTail before
- * returning what we just read.
- */
- int reserved = PageReserved(head);
- smp_rmb();
- if (PageTail(tail))
- return reserved;
- }
+ smp_rmb();
+ if (PageTail(tail))
+ return reserved;
}
return PageReserved(tail);
}
int cpu;
mutex_lock(&vcpu->mutex);
+ if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
+ /* The thread running this VCPU changed. */
+ struct pid *oldpid = vcpu->pid;
+ struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
+ rcu_assign_pointer(vcpu->pid, newpid);
+ synchronize_rcu();
+ put_pid(oldpid);
+ }
cpu = get_cpu();
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
- raw_spin_lock(&kvm->requests_lock);
- me = smp_processor_id();
+ me = get_cpu();
kvm_for_each_vcpu(i, vcpu, kvm) {
- if (kvm_make_check_request(req, vcpu))
- continue;
+ kvm_make_request(req, vcpu);
cpu = vcpu->cpu;
- if (cpus != NULL && cpu != -1 && cpu != me)
+
+ /* Set ->requests bit before we read ->mode */
+ smp_mb();
+
+ if (cpus != NULL && cpu != -1 && cpu != me &&
+ kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
cpumask_set_cpu(cpu, cpus);
}
if (unlikely(cpus == NULL))
smp_call_function_many(cpus, ack_flush, NULL, 1);
else
called = false;
- raw_spin_unlock(&kvm->requests_lock);
+ put_cpu();
free_cpumask_var(cpus);
return called;
}
vcpu->cpu = -1;
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
+ vcpu->pid = NULL;
init_waitqueue_head(&vcpu->wq);
kvm_async_pf_vcpu_init(vcpu);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
+ put_pid(vcpu->pid);
kvm_arch_vcpu_uninit(vcpu);
free_page((unsigned long)vcpu->run);
}
return young;
}
+static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int young, idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
+ young = kvm_test_age_hva(kvm, address);
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ return young;
+}
+
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
+ .test_young = kvm_mmu_notifier_test_young,
.change_pte = kvm_mmu_notifier_change_pte,
.release = kvm_mmu_notifier_release,
};
goto out_err;
}
- r = kvm_init_mmu_notifier(kvm);
- if (r)
- goto out_err;
-
+ spin_lock_init(&kvm->mmu_lock);
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
- spin_lock_init(&kvm->mmu_lock);
- raw_spin_lock_init(&kvm->requests_lock);
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1);
- spin_lock(&kvm_lock);
+
+ r = kvm_init_mmu_notifier(kvm);
+ if (r)
+ goto out_err;
+
+ raw_spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
return kvm;
struct mm_struct *mm = kvm->mm;
kvm_arch_sync_events(kvm);
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
list_del(&kvm->vm_list);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++)
kvm_io_bus_destroy(kvm->buses[i]);
return 0;
}
+#ifndef CONFIG_S390
/*
* Allocation size is twice as large as the actual dirty bitmap size.
* This makes it possible to do double buffering: see x86's
memslot->dirty_bitmap_head = memslot->dirty_bitmap;
return 0;
}
+#endif /* !CONFIG_S390 */
/*
* Allocate some memory and give it an address in the guest physical address
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
- int r, flush_shadow = 0;
+ int r;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
- if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
+ /* We can read the guest memory with __xxx_user() later on. */
+ if (user_alloc &&
+ ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
+ !access_ok(VERIFY_WRITE,
+ (void __user *)(unsigned long)mem->userspace_addr,
+ mem->memory_size)))
goto out;
if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
goto out;
if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free;
/* destroy any largepage mappings for dirty tracking */
- if (old.npages)
- flush_shadow = 1;
}
#else /* not defined CONFIG_S390 */
new.user_alloc = user_alloc;
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
+ /*
+ * If the new memory slot is created, we need to clear all
+ * mmio sptes.
+ */
+ if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
+ kvm_arch_flush_shadow(kvm);
+
kvm_free_physmem_slot(&old, &new);
kfree(old_memslots);
- if (flush_shadow)
- kvm_arch_flush_shadow(kvm);
-
return 0;
out_free:
}
EXPORT_SYMBOL_GPL(is_fault_pfn);
+int is_noslot_pfn(pfn_t pfn)
+{
+ return pfn == bad_pfn;
+}
+EXPORT_SYMBOL_GPL(is_noslot_pfn);
+
+int is_invalid_pfn(pfn_t pfn)
+{
+ return pfn == hwpoison_pfn || pfn == fault_pfn;
+}
+EXPORT_SYMBOL_GPL(is_invalid_pfn);
+
static inline unsigned long bad_hva(void)
{
return PAGE_OFFSET;
return size;
}
-int memslot_id(struct kvm *kvm, gfn_t gfn)
-{
- int i;
- struct kvm_memslots *slots = kvm_memslots(kvm);
- struct kvm_memory_slot *memslot = NULL;
-
- for (i = 0; i < slots->nmemslots; ++i) {
- memslot = &slots->memslots[i];
-
- if (gfn >= memslot->base_gfn
- && gfn < memslot->base_gfn + memslot->npages)
- break;
- }
-
- return memslot - slots->memslots;
-}
-
static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
gfn_t *nr_pages)
{
return fault_pfn;
}
+int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int write, struct page **page)
+{
+ int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
+
+ if (write)
+ flags |= FOLL_WRITE;
+
+ return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
+}
+
+static inline int check_user_page_hwpoison(unsigned long addr)
+{
+ int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
+
+ rc = __get_user_pages(current, current->mm, addr, 1,
+ flags, NULL, NULL, NULL);
+ return rc == -EHWPOISON;
+}
+
static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
bool *async, bool write_fault, bool *writable)
{
if (writable)
*writable = write_fault;
- npages = get_user_pages_fast(addr, 1, write_fault, page);
+ if (async) {
+ down_read(¤t->mm->mmap_sem);
+ npages = get_user_page_nowait(current, current->mm,
+ addr, write_fault, page);
+ up_read(¤t->mm->mmap_sem);
+ } else
+ npages = get_user_pages_fast(addr, 1, write_fault,
+ page);
/* map read fault as writable if possible */
if (unlikely(!write_fault) && npages == 1) {
return get_fault_pfn();
down_read(¤t->mm->mmap_sem);
- if (is_hwpoison_address(addr)) {
+ if (npages == -EHWPOISON ||
+ (!async && check_user_page_hwpoison(addr))) {
up_read(¤t->mm->mmap_sem);
get_page(hwpoison_page);
return page_to_pfn(hwpoison_page);
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
- r = copy_from_user(data, (void __user *)addr + offset, len);
+ r = __copy_from_user(data, (void __user *)addr + offset, len);
if (r)
return -EFAULT;
return 0;
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
- r = copy_to_user((void __user *)addr + offset, data, len);
+ r = __copy_to_user((void __user *)addr + offset, data, len);
if (r)
return -EFAULT;
mark_page_dirty(kvm, gfn);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
- r = copy_to_user((void __user *)ghc->hva, data, len);
+ r = __copy_to_user((void __user *)ghc->hva, data, len);
if (r)
return -EFAULT;
mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
}
EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
+int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len)
+{
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ int r;
+
+ if (slots->generation != ghc->generation)
+ kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
+
+ if (kvm_is_error_hva(ghc->hva))
+ return -EFAULT;
+
+ r = __copy_from_user(data, (void __user *)ghc->hva, len);
+ if (r)
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
+
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
- generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
+ __set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
}
}
EXPORT_SYMBOL_GPL(kvm_resched);
-void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
+void kvm_vcpu_on_spin(struct kvm_vcpu *me)
{
- ktime_t expires;
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
-
- /* Sleep for 100 us, and hope lock-holder got scheduled */
- expires = ktime_add_ns(ktime_get(), 100000UL);
- schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
+ struct kvm *kvm = me->kvm;
+ struct kvm_vcpu *vcpu;
+ int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
+ int yielded = 0;
+ int pass;
+ int i;
- finish_wait(&vcpu->wq, &wait);
+ /*
+ * We boost the priority of a VCPU that is runnable but not
+ * currently running, because it got preempted by something
+ * else and called schedule in __vcpu_run. Hopefully that
+ * VCPU is holding the lock that we need and will release it.
+ * We approximate round-robin by starting at the last boosted VCPU.
+ */
+ for (pass = 0; pass < 2 && !yielded; pass++) {
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct task_struct *task = NULL;
+ struct pid *pid;
+ if (!pass && i < last_boosted_vcpu) {
+ i = last_boosted_vcpu;
+ continue;
+ } else if (pass && i > last_boosted_vcpu)
+ break;
+ if (vcpu == me)
+ continue;
+ if (waitqueue_active(&vcpu->wq))
+ continue;
+ rcu_read_lock();
+ pid = rcu_dereference(vcpu->pid);
+ if (pid)
+ task = get_pid_task(vcpu->pid, PIDTYPE_PID);
+ rcu_read_unlock();
+ if (!task)
+ continue;
+ if (task->flags & PF_VCPU) {
+ put_task_struct(task);
+ continue;
+ }
+ if (yield_to(task, 1)) {
+ put_task_struct(task);
+ kvm->last_boosted_vcpu = i;
+ yielded = 1;
+ break;
+ }
+ put_task_struct(task);
+ }
+ }
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
static struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
- .compat_ioctl = kvm_vcpu_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = kvm_vcpu_compat_ioctl,
+#endif
.mmap = kvm_vcpu_mmap,
.llseek = noop_llseek,
};
r = kvm_arch_vcpu_setup(vcpu);
if (r)
- return r;
+ goto vcpu_destroy;
mutex_lock(&kvm->lock);
+ if (!kvm_vcpu_compatible(vcpu)) {
+ r = -EINVAL;
+ goto unlock_vcpu_destroy;
+ }
if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
r = -EINVAL;
- goto vcpu_destroy;
+ goto unlock_vcpu_destroy;
}
kvm_for_each_vcpu(r, v, kvm)
if (v->vcpu_id == id) {
r = -EEXIST;
- goto vcpu_destroy;
+ goto unlock_vcpu_destroy;
}
BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
r = create_vcpu_fd(vcpu);
if (r < 0) {
kvm_put_kvm(kvm);
- goto vcpu_destroy;
+ goto unlock_vcpu_destroy;
}
kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
mutex_unlock(&kvm->lock);
return r;
-vcpu_destroy:
+unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
+vcpu_destroy:
kvm_arch_vcpu_destroy(vcpu);
return r;
}
return r;
}
+#ifdef CONFIG_COMPAT
+static long kvm_vcpu_compat_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = compat_ptr(arg);
+ int r;
+
+ if (vcpu->kvm->mm != current->mm)
+ return -EIO;
+
+ switch (ioctl) {
+ case KVM_SET_SIGNAL_MASK: {
+ struct kvm_signal_mask __user *sigmask_arg = argp;
+ struct kvm_signal_mask kvm_sigmask;
+ compat_sigset_t csigset;
+ sigset_t sigset;
+
+ if (argp) {
+ r = -EFAULT;
+ if (copy_from_user(&kvm_sigmask, argp,
+ sizeof kvm_sigmask))
+ goto out;
+ r = -EINVAL;
+ if (kvm_sigmask.len != sizeof csigset)
+ goto out;
+ r = -EFAULT;
+ if (copy_from_user(&csigset, sigmask_arg->sigset,
+ sizeof csigset))
+ goto out;
+ }
+ sigset_from_compat(&sigset, &csigset);
+ r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
+ break;
+ }
+ default:
+ r = kvm_vcpu_ioctl(filp, ioctl, arg);
+ }
+
+out:
+ return r;
+}
+#endif
+
static long kvm_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
static void hardware_enable(void *junk)
{
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
hardware_enable_nolock(junk);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
}
static void hardware_disable_nolock(void *junk)
static void hardware_disable(void *junk)
{
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
hardware_disable_nolock(junk);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
}
static void hardware_disable_all_nolock(void)
static void hardware_disable_all(void)
{
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
hardware_disable_all_nolock();
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
}
static int hardware_enable_all(void)
{
int r = 0;
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
kvm_usage_count++;
if (kvm_usage_count == 1) {
}
}
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
return r;
}
int i;
for (i = 0; i < bus->dev_count; i++) {
- struct kvm_io_device *pos = bus->devs[i];
+ struct kvm_io_device *pos = bus->range[i].dev;
kvm_iodevice_destructor(pos);
}
kfree(bus);
}
+int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
+{
+ const struct kvm_io_range *r1 = p1;
+ const struct kvm_io_range *r2 = p2;
+
+ if (r1->addr < r2->addr)
+ return -1;
+ if (r1->addr + r1->len > r2->addr + r2->len)
+ return 1;
+ return 0;
+}
+
+int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
+ gpa_t addr, int len)
+{
+ if (bus->dev_count == NR_IOBUS_DEVS)
+ return -ENOSPC;
+
+ bus->range[bus->dev_count++] = (struct kvm_io_range) {
+ .addr = addr,
+ .len = len,
+ .dev = dev,
+ };
+
+ sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
+ kvm_io_bus_sort_cmp, NULL);
+
+ return 0;
+}
+
+int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
+ gpa_t addr, int len)
+{
+ struct kvm_io_range *range, key;
+ int off;
+
+ key = (struct kvm_io_range) {
+ .addr = addr,
+ .len = len,
+ };
+
+ range = bsearch(&key, bus->range, bus->dev_count,
+ sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
+ if (range == NULL)
+ return -ENOENT;
+
+ off = range - bus->range;
+
+ while (off > 0 && kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0)
+ off--;
+
+ return off;
+}
+
/* kvm_io_bus_write - called under kvm->slots_lock */
int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{
- int i;
+ int idx;
struct kvm_io_bus *bus;
+ struct kvm_io_range range;
+
+ range = (struct kvm_io_range) {
+ .addr = addr,
+ .len = len,
+ };
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
- for (i = 0; i < bus->dev_count; i++)
- if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
+ idx = kvm_io_bus_get_first_dev(bus, addr, len);
+ if (idx < 0)
+ return -EOPNOTSUPP;
+
+ while (idx < bus->dev_count &&
+ kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
+ if (!kvm_iodevice_write(bus->range[idx].dev, addr, len, val))
return 0;
+ idx++;
+ }
+
return -EOPNOTSUPP;
}
int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val)
{
- int i;
+ int idx;
struct kvm_io_bus *bus;
+ struct kvm_io_range range;
+
+ range = (struct kvm_io_range) {
+ .addr = addr,
+ .len = len,
+ };
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
- for (i = 0; i < bus->dev_count; i++)
- if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
+ idx = kvm_io_bus_get_first_dev(bus, addr, len);
+ if (idx < 0)
+ return -EOPNOTSUPP;
+
+ while (idx < bus->dev_count &&
+ kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
+ if (!kvm_iodevice_read(bus->range[idx].dev, addr, len, val))
return 0;
+ idx++;
+ }
+
return -EOPNOTSUPP;
}
/* Caller must hold slots_lock. */
-int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev)
+int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, struct kvm_io_device *dev)
{
struct kvm_io_bus *new_bus, *bus;
if (!new_bus)
return -ENOMEM;
memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
- new_bus->devs[new_bus->dev_count++] = dev;
+ kvm_io_bus_insert_dev(new_bus, dev, addr, len);
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);
r = -ENOENT;
for (i = 0; i < new_bus->dev_count; i++)
- if (new_bus->devs[i] == dev) {
+ if (new_bus->range[i].dev == dev) {
r = 0;
- new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
+ new_bus->dev_count--;
+ new_bus->range[i] = new_bus->range[new_bus->dev_count];
+ sort(new_bus->range, new_bus->dev_count,
+ sizeof(struct kvm_io_range),
+ kvm_io_bus_sort_cmp, NULL);
break;
}
struct kvm *kvm;
*val = 0;
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
*val += *(u32 *)((void *)kvm + offset);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
return 0;
}
int i;
*val = 0;
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
*val += *(u32 *)((void *)vcpu + offset);
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
return 0;
}
debugfs_remove(kvm_debugfs_dir);
}
-static int kvm_suspend(struct sys_device *dev, pm_message_t state)
+static int kvm_suspend(void)
{
if (kvm_usage_count)
hardware_disable_nolock(NULL);
return 0;
}
-static int kvm_resume(struct sys_device *dev)
+static void kvm_resume(void)
{
if (kvm_usage_count) {
- WARN_ON(spin_is_locked(&kvm_lock));
+ WARN_ON(raw_spin_is_locked(&kvm_lock));
hardware_enable_nolock(NULL);
}
- return 0;
}
-static struct sysdev_class kvm_sysdev_class = {
- .name = "kvm",
+static struct syscore_ops kvm_syscore_ops = {
.suspend = kvm_suspend,
.resume = kvm_resume,
};
-static struct sys_device kvm_sysdev = {
- .id = 0,
- .cls = &kvm_sysdev_class,
-};
-
struct page *bad_page;
pfn_t bad_pfn;
goto out_free_2;
register_reboot_notifier(&kvm_reboot_notifier);
- r = sysdev_class_register(&kvm_sysdev_class);
- if (r)
- goto out_free_3;
-
- r = sysdev_register(&kvm_sysdev);
- if (r)
- goto out_free_4;
-
/* A kmem cache lets us meet the alignment requirements of fx_save. */
if (!vcpu_align)
vcpu_align = __alignof__(struct kvm_vcpu);
0, NULL);
if (!kvm_vcpu_cache) {
r = -ENOMEM;
- goto out_free_5;
+ goto out_free_3;
}
r = kvm_async_pf_init();
goto out_unreg;
}
+ register_syscore_ops(&kvm_syscore_ops);
+
kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out;
kvm_async_pf_deinit();
out_free:
kmem_cache_destroy(kvm_vcpu_cache);
-out_free_5:
- sysdev_unregister(&kvm_sysdev);
-out_free_4:
- sysdev_class_unregister(&kvm_sysdev_class);
out_free_3:
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache);
kvm_async_pf_deinit();
- sysdev_unregister(&kvm_sysdev);
- sysdev_class_unregister(&kvm_sysdev_class);
+ unregister_syscore_ops(&kvm_syscore_ops);
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
on_each_cpu(hardware_disable_nolock, NULL, 1);