2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
25 * Since this file is built in even if KVM is a module, we need
26 * a local copy of this function for the case where kvm_main.c is
29 static struct kvm_memory_slot *builtin_gfn_to_memslot(struct kvm *kvm,
32 struct kvm_memslots *slots;
33 struct kvm_memory_slot *memslot;
35 slots = kvm_memslots(kvm);
36 kvm_for_each_memslot(memslot, slots)
37 if (gfn >= memslot->base_gfn &&
38 gfn < memslot->base_gfn + memslot->npages)
43 /* Translate address of a vmalloc'd thing to a linear map address */
44 static void *real_vmalloc_addr(void *x)
46 unsigned long addr = (unsigned long) x;
49 p = find_linux_pte(swapper_pg_dir, addr);
50 if (!p || !pte_present(*p))
52 /* assume we don't have huge pages in vmalloc space... */
53 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
57 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
58 long pte_index, unsigned long pteh, unsigned long ptel)
60 struct kvm *kvm = vcpu->kvm;
61 unsigned long i, pa, gpa, gfn, psize;
62 unsigned long slot_fn;
64 struct revmap_entry *rev;
65 unsigned long g_ptel = ptel;
66 struct kvm_memory_slot *memslot;
67 unsigned long *physp, pte_size;
68 bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
70 psize = hpte_page_size(pteh, ptel);
74 /* Find the memslot (if any) for this address */
75 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
76 gfn = gpa >> PAGE_SHIFT;
77 memslot = builtin_gfn_to_memslot(kvm, gfn);
78 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)))
80 slot_fn = gfn - memslot->base_gfn;
82 physp = kvm->arch.slot_phys[memslot->id];
87 physp = real_vmalloc_addr(physp);
93 pte_size = kvm->arch.ram_psize;
96 if (pa && pte_size > psize)
97 pa |= gpa & (pte_size - 1);
99 ptel &= ~(HPTE_R_PP0 - psize);
103 if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
104 (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
107 pteh |= HPTE_V_VALID;
109 if (pte_index >= HPT_NPTE)
111 if (likely((flags & H_EXACT) == 0)) {
113 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
114 for (i = 0; i < 8; ++i) {
115 if ((*hpte & HPTE_V_VALID) == 0 &&
116 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
122 * Since try_lock_hpte doesn't retry (not even stdcx.
123 * failures), it could be that there is a free slot
124 * but we transiently failed to lock it. Try again,
125 * actually locking each slot and checking it.
128 for (i = 0; i < 8; ++i) {
129 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
131 if ((*hpte & HPTE_V_VALID) == 0)
133 *hpte &= ~HPTE_V_HVLOCK;
141 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
142 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) {
143 /* Lock the slot and check again */
144 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
146 if (*hpte & HPTE_V_VALID) {
147 *hpte &= ~HPTE_V_HVLOCK;
153 /* Save away the guest's idea of the second HPTE dword */
154 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
156 rev->guest_rpte = g_ptel;
160 asm volatile("ptesync" : : : "memory");
161 vcpu->arch.gpr[4] = pte_index;
164 EXPORT_SYMBOL_GPL(kvmppc_h_enter);
166 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
168 static inline int try_lock_tlbie(unsigned int *lock)
170 unsigned int tmp, old;
171 unsigned int token = LOCK_TOKEN;
173 asm volatile("1:lwarx %1,0,%2\n"
180 : "=&r" (tmp), "=&r" (old)
181 : "r" (lock), "r" (token)
186 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
187 unsigned long pte_index, unsigned long avpn,
190 struct kvm *kvm = vcpu->kvm;
192 unsigned long v, r, rb;
194 if (pte_index >= HPT_NPTE)
196 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
197 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
199 if ((hpte[0] & HPTE_V_VALID) == 0 ||
200 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
201 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
202 hpte[0] &= ~HPTE_V_HVLOCK;
205 if (atomic_read(&kvm->online_vcpus) == 1)
207 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
208 vcpu->arch.gpr[5] = r = hpte[1];
209 rb = compute_tlbie_rb(v, r, pte_index);
211 if (!(flags & H_LOCAL)) {
212 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
214 asm volatile("ptesync" : : : "memory");
215 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
216 : : "r" (rb), "r" (kvm->arch.lpid));
217 asm volatile("ptesync" : : : "memory");
218 kvm->arch.tlbie_lock = 0;
220 asm volatile("ptesync" : : : "memory");
221 asm volatile("tlbiel %0" : : "r" (rb));
222 asm volatile("ptesync" : : : "memory");
227 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
229 struct kvm *kvm = vcpu->kvm;
230 unsigned long *args = &vcpu->arch.gpr[4];
231 unsigned long *hp, tlbrb[4];
233 long int n_inval = 0;
234 unsigned long flags, req, pte_index;
236 long int ret = H_SUCCESS;
238 if (atomic_read(&kvm->online_vcpus) == 1)
240 for (i = 0; i < 4; ++i) {
241 pte_index = args[i * 2];
242 flags = pte_index >> 56;
243 pte_index &= ((1ul << 56) - 1);
248 if (req != 1 || flags == 3 ||
249 pte_index >= HPT_NPTE) {
250 /* parameter error */
251 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
255 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
256 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
259 if (hp[0] & HPTE_V_VALID) {
261 case 0: /* absolute */
264 case 1: /* andcond */
265 if (!(hp[0] & args[i * 2 + 1]))
269 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
275 hp[0] &= ~HPTE_V_HVLOCK;
276 args[i * 2] = ((0x90 | flags) << 56) + pte_index;
279 /* insert R and C bits from PTE */
280 flags |= (hp[1] >> 5) & 0x0c;
281 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
282 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
289 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
291 asm volatile("ptesync" : : : "memory");
292 for (i = 0; i < n_inval; ++i)
293 asm volatile(PPC_TLBIE(%1,%0)
294 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
295 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
296 kvm->arch.tlbie_lock = 0;
298 asm volatile("ptesync" : : : "memory");
299 for (i = 0; i < n_inval; ++i)
300 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
301 asm volatile("ptesync" : : : "memory");
306 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
307 unsigned long pte_index, unsigned long avpn,
310 struct kvm *kvm = vcpu->kvm;
312 struct revmap_entry *rev;
313 unsigned long v, r, rb, mask, bits;
315 if (pte_index >= HPT_NPTE)
317 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
318 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
320 if ((hpte[0] & HPTE_V_VALID) == 0 ||
321 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
322 hpte[0] &= ~HPTE_V_HVLOCK;
325 if (atomic_read(&kvm->online_vcpus) == 1)
328 bits = (flags << 55) & HPTE_R_PP0;
329 bits |= (flags << 48) & HPTE_R_KEY_HI;
330 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
332 /* Update guest view of 2nd HPTE dword */
333 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
334 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
335 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
337 r = (rev->guest_rpte & ~mask) | bits;
340 r = (hpte[1] & ~mask) | bits;
343 rb = compute_tlbie_rb(v, r, pte_index);
344 hpte[0] = v & ~HPTE_V_VALID;
345 if (!(flags & H_LOCAL)) {
346 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
348 asm volatile("ptesync" : : : "memory");
349 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
350 : : "r" (rb), "r" (kvm->arch.lpid));
351 asm volatile("ptesync" : : : "memory");
352 kvm->arch.tlbie_lock = 0;
354 asm volatile("ptesync" : : : "memory");
355 asm volatile("tlbiel %0" : : "r" (rb));
356 asm volatile("ptesync" : : : "memory");
360 hpte[0] = v & ~HPTE_V_HVLOCK;
361 asm volatile("ptesync" : : : "memory");
365 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
366 unsigned long pte_index)
368 struct kvm *kvm = vcpu->kvm;
369 unsigned long *hpte, r;
371 struct revmap_entry *rev = NULL;
373 if (pte_index >= HPT_NPTE)
375 if (flags & H_READ_4) {
379 if (flags & H_R_XLATE)
380 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
381 for (i = 0; i < n; ++i, ++pte_index) {
382 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
384 if (hpte[0] & HPTE_V_VALID) {
386 r = rev[i].guest_rpte;
388 r = hpte[1] | HPTE_R_RPN;
390 vcpu->arch.gpr[4 + i * 2] = hpte[0];
391 vcpu->arch.gpr[5 + i * 2] = r;