2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/switch_to.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75 { "diagnose_10", VCPU_STAT(diagnose_10) },
76 { "diagnose_44", VCPU_STAT(diagnose_44) },
80 static unsigned long long *facilities;
82 /* Section: not file related */
83 int kvm_arch_hardware_enable(void *garbage)
85 /* every s390 is virtualization enabled ;-) */
89 void kvm_arch_hardware_disable(void *garbage)
93 int kvm_arch_hardware_setup(void)
98 void kvm_arch_hardware_unsetup(void)
102 void kvm_arch_check_processor_compat(void *rtn)
106 int kvm_arch_init(void *opaque)
111 void kvm_arch_exit(void)
115 /* Section: device related */
116 long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
124 int kvm_dev_ioctl_check_extension(long ext)
129 case KVM_CAP_S390_PSW:
130 case KVM_CAP_S390_GMAP:
131 case KVM_CAP_SYNC_MMU:
132 #ifdef CONFIG_KVM_S390_UCONTROL
133 case KVM_CAP_S390_UCONTROL:
135 case KVM_CAP_SYNC_REGS:
144 /* Section: vm related */
146 * Get (and clear) the dirty memory log for a memory slot.
148 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
149 struct kvm_dirty_log *log)
154 long kvm_arch_vm_ioctl(struct file *filp,
155 unsigned int ioctl, unsigned long arg)
157 struct kvm *kvm = filp->private_data;
158 void __user *argp = (void __user *)arg;
162 case KVM_S390_INTERRUPT: {
163 struct kvm_s390_interrupt s390int;
166 if (copy_from_user(&s390int, argp, sizeof(s390int)))
168 r = kvm_s390_inject_vm(kvm, &s390int);
178 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
184 #ifdef CONFIG_KVM_S390_UCONTROL
185 if (type & ~KVM_VM_S390_UCONTROL)
187 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
194 rc = s390_enable_sie();
200 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
204 sprintf(debug_name, "kvm-%u", current->pid);
206 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
210 spin_lock_init(&kvm->arch.float_int.lock);
211 INIT_LIST_HEAD(&kvm->arch.float_int.list);
213 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
214 VM_EVENT(kvm, 3, "%s", "vm created");
216 if (type & KVM_VM_S390_UCONTROL) {
217 kvm->arch.gmap = NULL;
219 kvm->arch.gmap = gmap_alloc(current->mm);
225 debug_unregister(kvm->arch.dbf);
227 free_page((unsigned long)(kvm->arch.sca));
232 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
234 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
235 if (!kvm_is_ucontrol(vcpu->kvm)) {
236 clear_bit(63 - vcpu->vcpu_id,
237 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
238 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
239 (__u64) vcpu->arch.sie_block)
240 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
244 if (kvm_is_ucontrol(vcpu->kvm))
245 gmap_free(vcpu->arch.gmap);
247 free_page((unsigned long)(vcpu->arch.sie_block));
248 kvm_vcpu_uninit(vcpu);
252 static void kvm_free_vcpus(struct kvm *kvm)
255 struct kvm_vcpu *vcpu;
257 kvm_for_each_vcpu(i, vcpu, kvm)
258 kvm_arch_vcpu_destroy(vcpu);
260 mutex_lock(&kvm->lock);
261 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
262 kvm->vcpus[i] = NULL;
264 atomic_set(&kvm->online_vcpus, 0);
265 mutex_unlock(&kvm->lock);
268 void kvm_arch_sync_events(struct kvm *kvm)
272 void kvm_arch_destroy_vm(struct kvm *kvm)
275 free_page((unsigned long)(kvm->arch.sca));
276 debug_unregister(kvm->arch.dbf);
277 if (!kvm_is_ucontrol(kvm))
278 gmap_free(kvm->arch.gmap);
281 /* Section: vcpu related */
282 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
284 if (kvm_is_ucontrol(vcpu->kvm)) {
285 vcpu->arch.gmap = gmap_alloc(current->mm);
286 if (!vcpu->arch.gmap)
291 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
292 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
299 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
304 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
306 save_fp_regs(&vcpu->arch.host_fpregs);
307 save_access_regs(vcpu->arch.host_acrs);
308 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
309 restore_fp_regs(&vcpu->arch.guest_fpregs);
310 restore_access_regs(vcpu->run->s.regs.acrs);
311 gmap_enable(vcpu->arch.gmap);
312 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
315 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
317 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
318 gmap_disable(vcpu->arch.gmap);
319 save_fp_regs(&vcpu->arch.guest_fpregs);
320 save_access_regs(vcpu->run->s.regs.acrs);
321 restore_fp_regs(&vcpu->arch.host_fpregs);
322 restore_access_regs(vcpu->arch.host_acrs);
325 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
327 /* this equals initial cpu reset in pop, but we don't switch to ESA */
328 vcpu->arch.sie_block->gpsw.mask = 0UL;
329 vcpu->arch.sie_block->gpsw.addr = 0UL;
330 kvm_s390_set_prefix(vcpu, 0);
331 vcpu->arch.sie_block->cputm = 0UL;
332 vcpu->arch.sie_block->ckc = 0UL;
333 vcpu->arch.sie_block->todpr = 0;
334 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
335 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
336 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
337 vcpu->arch.guest_fpregs.fpc = 0;
338 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
339 vcpu->arch.sie_block->gbea = 1;
342 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
344 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
347 vcpu->arch.sie_block->ecb = 6;
348 vcpu->arch.sie_block->eca = 0xC1002001U;
349 vcpu->arch.sie_block->fac = (int) (long) facilities;
350 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
351 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
352 (unsigned long) vcpu);
353 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
354 get_cpu_id(&vcpu->arch.cpu_id);
355 vcpu->arch.cpu_id.version = 0xff;
359 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
362 struct kvm_vcpu *vcpu;
365 if (id >= KVM_MAX_VCPUS)
370 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
374 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
375 get_zeroed_page(GFP_KERNEL);
377 if (!vcpu->arch.sie_block)
380 vcpu->arch.sie_block->icpua = id;
381 if (!kvm_is_ucontrol(kvm)) {
382 if (!kvm->arch.sca) {
386 if (!kvm->arch.sca->cpu[id].sda)
387 kvm->arch.sca->cpu[id].sda =
388 (__u64) vcpu->arch.sie_block;
389 vcpu->arch.sie_block->scaoh =
390 (__u32)(((__u64)kvm->arch.sca) >> 32);
391 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
392 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
395 spin_lock_init(&vcpu->arch.local_int.lock);
396 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
397 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
398 spin_lock(&kvm->arch.float_int.lock);
399 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
400 init_waitqueue_head(&vcpu->arch.local_int.wq);
401 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
402 spin_unlock(&kvm->arch.float_int.lock);
404 rc = kvm_vcpu_init(vcpu, kvm, id);
406 goto out_free_sie_block;
407 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
408 vcpu->arch.sie_block);
412 free_page((unsigned long)(vcpu->arch.sie_block));
419 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
421 /* kvm common code refers to this, but never calls it */
426 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
428 kvm_s390_vcpu_initial_reset(vcpu);
432 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
434 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
438 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
440 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
444 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
445 struct kvm_sregs *sregs)
447 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
448 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
449 restore_access_regs(vcpu->run->s.regs.acrs);
453 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
454 struct kvm_sregs *sregs)
456 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
457 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
461 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
463 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
464 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
465 restore_fp_regs(&vcpu->arch.guest_fpregs);
469 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
471 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
472 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
476 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
480 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
483 vcpu->run->psw_mask = psw.mask;
484 vcpu->run->psw_addr = psw.addr;
489 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
490 struct kvm_translation *tr)
492 return -EINVAL; /* not implemented yet */
495 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
496 struct kvm_guest_debug *dbg)
498 return -EINVAL; /* not implemented yet */
501 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
502 struct kvm_mp_state *mp_state)
504 return -EINVAL; /* not implemented yet */
507 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
508 struct kvm_mp_state *mp_state)
510 return -EINVAL; /* not implemented yet */
513 static int __vcpu_run(struct kvm_vcpu *vcpu)
517 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
522 if (test_thread_flag(TIF_MCCK_PENDING))
525 if (!kvm_is_ucontrol(vcpu->kvm))
526 kvm_s390_deliver_pending_interrupts(vcpu);
528 vcpu->arch.sie_block->icptcode = 0;
532 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
533 atomic_read(&vcpu->arch.sie_block->cpuflags));
534 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
536 if (kvm_is_ucontrol(vcpu->kvm)) {
537 rc = SIE_INTERCEPT_UCONTROL;
539 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
540 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
544 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
545 vcpu->arch.sie_block->icptcode);
550 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
554 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
560 if (vcpu->sigset_active)
561 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
563 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
565 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
567 switch (kvm_run->exit_reason) {
568 case KVM_EXIT_S390_SIEIC:
569 case KVM_EXIT_UNKNOWN:
571 case KVM_EXIT_S390_RESET:
572 case KVM_EXIT_S390_UCONTROL:
578 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
579 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
580 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
581 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
582 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
584 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
585 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
586 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
587 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
593 rc = __vcpu_run(vcpu);
596 if (kvm_is_ucontrol(vcpu->kvm))
599 rc = kvm_handle_sie_intercept(vcpu);
600 } while (!signal_pending(current) && !rc);
602 if (rc == SIE_INTERCEPT_RERUNVCPU)
605 if (signal_pending(current) && !rc) {
606 kvm_run->exit_reason = KVM_EXIT_INTR;
610 #ifdef CONFIG_KVM_S390_UCONTROL
611 if (rc == SIE_INTERCEPT_UCONTROL) {
612 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
613 kvm_run->s390_ucontrol.trans_exc_code =
614 current->thread.gmap_addr;
615 kvm_run->s390_ucontrol.pgm_code = 0x10;
620 if (rc == -EOPNOTSUPP) {
621 /* intercept cannot be handled in-kernel, prepare kvm-run */
622 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
623 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
624 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
625 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
629 if (rc == -EREMOTE) {
630 /* intercept was handled, but userspace support is needed
631 * kvm_run has been prepared by the handler */
635 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
636 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
637 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
638 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
640 if (vcpu->sigset_active)
641 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
643 vcpu->stat.exit_userspace++;
647 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
648 unsigned long n, int prefix)
651 return copy_to_guest(vcpu, guestdest, from, n);
653 return copy_to_guest_absolute(vcpu, guestdest, from, n);
657 * store status at address
658 * we use have two special cases:
659 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
660 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
662 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
664 unsigned char archmode = 1;
667 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
668 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
670 addr = SAVE_AREA_BASE;
672 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
673 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
675 addr = SAVE_AREA_BASE;
680 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
681 vcpu->arch.guest_fpregs.fprs, 128, prefix))
684 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
685 vcpu->run->s.regs.gprs, 128, prefix))
688 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
689 &vcpu->arch.sie_block->gpsw, 16, prefix))
692 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
693 &vcpu->arch.sie_block->prefix, 4, prefix))
696 if (__guestcopy(vcpu,
697 addr + offsetof(struct save_area, fp_ctrl_reg),
698 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
701 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
702 &vcpu->arch.sie_block->todpr, 4, prefix))
705 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
706 &vcpu->arch.sie_block->cputm, 8, prefix))
709 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
710 &vcpu->arch.sie_block->ckc, 8, prefix))
713 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
714 &vcpu->run->s.regs.acrs, 64, prefix))
717 if (__guestcopy(vcpu,
718 addr + offsetof(struct save_area, ctrl_regs),
719 &vcpu->arch.sie_block->gcr, 128, prefix))
724 long kvm_arch_vcpu_ioctl(struct file *filp,
725 unsigned int ioctl, unsigned long arg)
727 struct kvm_vcpu *vcpu = filp->private_data;
728 void __user *argp = (void __user *)arg;
732 case KVM_S390_INTERRUPT: {
733 struct kvm_s390_interrupt s390int;
736 if (copy_from_user(&s390int, argp, sizeof(s390int)))
738 r = kvm_s390_inject_vcpu(vcpu, &s390int);
741 case KVM_S390_STORE_STATUS:
742 r = kvm_s390_vcpu_store_status(vcpu, arg);
744 case KVM_S390_SET_INITIAL_PSW: {
748 if (copy_from_user(&psw, argp, sizeof(psw)))
750 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
753 case KVM_S390_INITIAL_RESET:
754 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
756 #ifdef CONFIG_KVM_S390_UCONTROL
757 case KVM_S390_UCAS_MAP: {
758 struct kvm_s390_ucas_mapping ucasmap;
760 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
765 if (!kvm_is_ucontrol(vcpu->kvm)) {
770 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
771 ucasmap.vcpu_addr, ucasmap.length);
774 case KVM_S390_UCAS_UNMAP: {
775 struct kvm_s390_ucas_mapping ucasmap;
777 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
782 if (!kvm_is_ucontrol(vcpu->kvm)) {
787 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
792 case KVM_S390_VCPU_FAULT: {
793 r = gmap_fault(arg, vcpu->arch.gmap);
794 if (!IS_ERR_VALUE(r))
804 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
806 #ifdef CONFIG_KVM_S390_UCONTROL
807 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
808 && (kvm_is_ucontrol(vcpu->kvm))) {
809 vmf->page = virt_to_page(vcpu->arch.sie_block);
814 return VM_FAULT_SIGBUS;
817 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
818 struct kvm_memory_slot *dont)
822 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
827 /* Section: memory related */
828 int kvm_arch_prepare_memory_region(struct kvm *kvm,
829 struct kvm_memory_slot *memslot,
830 struct kvm_memory_slot old,
831 struct kvm_userspace_memory_region *mem,
834 /* A few sanity checks. We can have exactly one memory slot which has
835 to start at guest virtual zero and which has to be located at a
836 page boundary in userland and which has to end at a page boundary.
837 The memory in userland is ok to be fragmented into various different
838 vmas. It is okay to mmap() and munmap() stuff in this slot after
839 doing this call at any time */
844 if (mem->guest_phys_addr)
847 if (mem->userspace_addr & 0xffffful)
850 if (mem->memory_size & 0xffffful)
859 void kvm_arch_commit_memory_region(struct kvm *kvm,
860 struct kvm_userspace_memory_region *mem,
861 struct kvm_memory_slot old,
867 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
868 mem->guest_phys_addr, mem->memory_size);
870 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
874 void kvm_arch_flush_shadow(struct kvm *kvm)
878 static int __init kvm_s390_init(void)
881 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
886 * guests can ask for up to 255+1 double words, we need a full page
887 * to hold the maximum amount of facilities. On the other hand, we
888 * only set facilities that are known to work in KVM.
890 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
895 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
896 facilities[0] &= 0xff00fff3f47c0000ULL;
897 facilities[1] &= 0x201c000000000000ULL;
901 static void __exit kvm_s390_exit(void)
903 free_page((unsigned long) facilities);
907 module_init(kvm_s390_init);
908 module_exit(kvm_s390_exit);