2 * pSeries hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 #include <linux/spinlock.h>
13 #include <linux/bitops.h>
14 #include <linux/threads.h>
15 #include <linux/smp.h>
17 #include <asm/abs_addr.h>
18 #include <asm/machdep.h>
20 #include <asm/mmu_context.h>
21 #include <asm/pgtable.h>
22 #include <asm/tlbflush.h>
24 #include <asm/cputable.h>
26 #define HPTE_LOCK_BIT 3
28 static inline void pSeries_lock_hpte(HPTE *hptep)
30 unsigned long *word = &hptep->dw0.dword0;
33 if (!test_and_set_bit(HPTE_LOCK_BIT, word))
35 while(test_bit(HPTE_LOCK_BIT, word))
40 static inline void pSeries_unlock_hpte(HPTE *hptep)
42 unsigned long *word = &hptep->dw0.dword0;
44 asm volatile("lwsync":::"memory");
45 clear_bit(HPTE_LOCK_BIT, word);
48 static spinlock_t pSeries_tlbie_lock = SPIN_LOCK_UNLOCKED;
50 long pSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
51 unsigned long prpn, int secondary,
52 unsigned long hpteflags, int bolted, int large)
54 unsigned long arpn = physRpn_to_absRpn(prpn);
55 HPTE *hptep = htab_data.htab + hpte_group;
60 for (i = 0; i < HPTES_PER_GROUP; i++) {
64 /* retry with lock held */
65 pSeries_lock_hpte(hptep);
69 pSeries_unlock_hpte(hptep);
75 if (i == HPTES_PER_GROUP)
79 lhpte.dw1.dw1.rpn = arpn;
80 lhpte.dw1.flags.flags = hpteflags;
83 lhpte.dw0.dw0.avpn = va >> 23;
84 lhpte.dw0.dw0.h = secondary;
85 lhpte.dw0.dw0.bolted = bolted;
90 lhpte.dw0.dw0.avpn &= ~0x1UL;
93 hptep->dw1.dword1 = lhpte.dw1.dword1;
95 /* Guarantee the second dword is visible before the valid bit */
96 __asm__ __volatile__ ("eieio" : : : "memory");
99 * Now set the first dword including the valid bit
100 * NOTE: this also unlocks the hpte
102 hptep->dw0.dword0 = lhpte.dw0.dword0;
104 __asm__ __volatile__ ("ptesync" : : : "memory");
109 static long pSeries_hpte_remove(unsigned long hpte_group)
116 /* pick a random entry to start at */
117 slot_offset = mftb() & 0x7;
119 for (i = 0; i < HPTES_PER_GROUP; i++) {
120 hptep = htab_data.htab + hpte_group + slot_offset;
121 dw0 = hptep->dw0.dw0;
123 if (dw0.v && !dw0.bolted) {
124 /* retry with lock held */
125 pSeries_lock_hpte(hptep);
126 dw0 = hptep->dw0.dw0;
127 if (dw0.v && !dw0.bolted)
129 pSeries_unlock_hpte(hptep);
136 if (i == HPTES_PER_GROUP)
139 /* Invalidate the hpte. NOTE: this also unlocks it */
140 hptep->dw0.dword0 = 0;
145 static inline void set_pp_bit(unsigned long pp, HPTE *addr)
148 unsigned long *p = &addr->dw1.dword1;
150 __asm__ __volatile__(
155 : "=&r" (old), "=m" (*p)
156 : "r" (pp), "r" (p), "m" (*p)
161 * Only works on small pages. Yes its ugly to have to check each slot in
162 * the group but we only use this during bootup.
164 static long pSeries_hpte_find(unsigned long vpn)
172 hash = hpt_hash(vpn, 0);
174 for (j = 0; j < 2; j++) {
175 slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
176 for (i = 0; i < HPTES_PER_GROUP; i++) {
177 hptep = htab_data.htab + slot;
178 dw0 = hptep->dw0.dw0;
180 if ((dw0.avpn == (vpn >> 11)) && dw0.v &&
195 static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
196 unsigned long va, int large, int local)
198 HPTE *hptep = htab_data.htab + slot;
200 unsigned long avpn = va >> 23;
207 pSeries_lock_hpte(hptep);
209 dw0 = hptep->dw0.dw0;
211 /* Even if we miss, we need to invalidate the TLB */
212 if ((dw0.avpn != avpn) || !dw0.v) {
213 pSeries_unlock_hpte(hptep);
216 set_pp_bit(newpp, hptep);
217 pSeries_unlock_hpte(hptep);
220 /* Ensure it is out of the tlb too */
221 if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
224 spin_lock_irqsave(&pSeries_tlbie_lock, flags);
226 spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
233 * Update the page protection bits. Intended to be used to create
234 * guard pages for kernel data structures on pages which are bolted
235 * in the HPT. Assumes pages being operated on will not be stolen.
236 * Does not work on large pages.
238 * No need to lock here because we should be the only user.
240 static void pSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
242 unsigned long vsid, va, vpn, flags;
246 vsid = get_kernel_vsid(ea);
247 va = (vsid << 28) | (ea & 0x0fffffff);
248 vpn = va >> PAGE_SHIFT;
250 slot = pSeries_hpte_find(vpn);
252 panic("could not find page to bolt\n");
253 hptep = htab_data.htab + slot;
255 set_pp_bit(newpp, hptep);
257 /* Ensure it is out of the tlb too */
258 spin_lock_irqsave(&pSeries_tlbie_lock, flags);
260 spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
263 static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
264 int large, int local)
266 HPTE *hptep = htab_data.htab + slot;
268 unsigned long avpn = va >> 23;
274 pSeries_lock_hpte(hptep);
276 dw0 = hptep->dw0.dw0;
278 /* Even if we miss, we need to invalidate the TLB */
279 if ((dw0.avpn != avpn) || !dw0.v) {
280 pSeries_unlock_hpte(hptep);
282 /* Invalidate the hpte. NOTE: this also unlocks it */
283 hptep->dw0.dword0 = 0;
286 /* Invalidate the tlb */
287 if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
290 spin_lock_irqsave(&pSeries_tlbie_lock, flags);
292 spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
296 static void pSeries_flush_hash_range(unsigned long context,
297 unsigned long number, int local)
299 unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn;
303 struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()];
305 /* XXX fix for large ptes */
306 unsigned long large = 0;
309 for (i = 0; i < number; i++) {
310 if ((batch->addr[i] >= USER_START) &&
311 (batch->addr[i] <= USER_END))
312 vsid = get_vsid(context, batch->addr[i]);
314 vsid = get_kernel_vsid(batch->addr[i]);
316 va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
317 batch->vaddr[j] = va;
319 vpn = va >> LARGE_PAGE_SHIFT;
321 vpn = va >> PAGE_SHIFT;
322 hash = hpt_hash(vpn, large);
323 secondary = (pte_val(batch->pte[i]) & _PAGE_SECONDARY) >> 15;
326 slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
327 slot += (pte_val(batch->pte[i]) & _PAGE_GROUP_IX) >> 12;
329 hptep = htab_data.htab + slot;
335 pSeries_lock_hpte(hptep);
337 dw0 = hptep->dw0.dw0;
339 /* Even if we miss, we need to invalidate the TLB */
340 if ((dw0.avpn != avpn) || !dw0.v) {
341 pSeries_unlock_hpte(hptep);
343 /* Invalidate the hpte. NOTE: this also unlocks it */
344 hptep->dw0.dword0 = 0;
350 if ((cur_cpu_spec->cpu_features & CPU_FTR_TLBIEL) && !large && local) {
351 asm volatile("ptesync":::"memory");
353 for (i = 0; i < j; i++) {
357 : : "r" (batch->vaddr[i]) : "memory" );
360 asm volatile("ptesync":::"memory");
362 /* XXX double check that it is safe to take this late */
363 spin_lock_irqsave(&pSeries_tlbie_lock, flags);
365 asm volatile("ptesync":::"memory");
367 for (i = 0; i < j; i++) {
371 : : "r" (batch->vaddr[i]) : "memory" );
374 asm volatile("eieio; tlbsync; ptesync":::"memory");
376 spin_unlock_irqrestore(&pSeries_tlbie_lock, flags);
380 void hpte_init_pSeries(void)
382 struct device_node *root;
385 ppc_md.hpte_invalidate = pSeries_hpte_invalidate;
386 ppc_md.hpte_updatepp = pSeries_hpte_updatepp;
387 ppc_md.hpte_updateboltedpp = pSeries_hpte_updateboltedpp;
388 ppc_md.hpte_insert = pSeries_hpte_insert;
389 ppc_md.hpte_remove = pSeries_hpte_remove;
391 /* Disable TLB batching on nighthawk */
392 root = find_path_device("/");
394 model = get_property(root, "model", NULL);
395 if (strcmp(model, "CHRP IBM,9076-N81"))
396 ppc_md.flush_hash_range = pSeries_flush_hash_range;