c95671289175994f009b2c1fd09f926207cbe019
[linux-flexiantxendom0-3.2.10.git] / mm / memory.c
1 /*
2  *  linux/mm/memory.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6
7 /*
8  * demand-loading started 01.12.91 - seems it is high on the list of
9  * things wanted, and it should be easy to implement. - Linus
10  */
11
12 /*
13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14  * pages started 02.12.91, seems to work. - Linus.
15  *
16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17  * would have taken more than the 6M I have free, but it worked well as
18  * far as I could see.
19  *
20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21  */
22
23 /*
24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
25  * thought has to go into this. Oh, well..
26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
27  *              Found it. Everything seems to work now.
28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
29  */
30
31 /*
32  * 05.04.94  -  Multi-page memory management added for v1.1.
33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
34  *
35  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
36  *              (Gerhard.Wichert@pdb.siemens.de)
37  */
38
39 #include <linux/kernel_stat.h>
40 #include <linux/mm.h>
41 #include <linux/hugetlb.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46 #include <linux/vcache.h>
47 #include <linux/rmap-locking.h>
48
49 #include <asm/pgalloc.h>
50 #include <asm/rmap.h>
51 #include <asm/uaccess.h>
52 #include <asm/tlb.h>
53 #include <asm/tlbflush.h>
54 #include <asm/pgtable.h>
55
56 #include <linux/swapops.h>
57
58 #ifndef CONFIG_DISCONTIGMEM
59 /* use the per-pgdat data instead for discontigmem - mbligh */
60 unsigned long max_mapnr;
61 struct page *mem_map;
62 #endif
63
64 unsigned long num_physpages;
65 void * high_memory;
66 struct page *highmem_start_page;
67
68 /*
69  * We special-case the C-O-W ZERO_PAGE, because it's such
70  * a common occurrence (no need to read the page to know
71  * that it's zero - better for the cache and memory subsystem).
72  */
73 static inline void copy_cow_page(struct page * from, struct page * to, unsigned long address)
74 {
75         if (from == ZERO_PAGE(address)) {
76                 clear_user_highpage(to, address);
77                 return;
78         }
79         copy_user_highpage(to, from, address);
80 }
81
82 /*
83  * Note: this doesn't free the actual pages themselves. That
84  * has been handled earlier when unmapping all the memory regions.
85  */
86 static inline void free_one_pmd(struct mmu_gather *tlb, pmd_t * dir)
87 {
88         struct page *page;
89
90         if (pmd_none(*dir))
91                 return;
92         if (pmd_bad(*dir)) {
93                 pmd_ERROR(*dir);
94                 pmd_clear(dir);
95                 return;
96         }
97         page = pmd_page(*dir);
98         pmd_clear(dir);
99         pgtable_remove_rmap(page);
100         pte_free_tlb(tlb, page);
101 }
102
103 static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir)
104 {
105         int j;
106         pmd_t * pmd;
107
108         if (pgd_none(*dir))
109                 return;
110         if (pgd_bad(*dir)) {
111                 pgd_ERROR(*dir);
112                 pgd_clear(dir);
113                 return;
114         }
115         pmd = pmd_offset(dir, 0);
116         pgd_clear(dir);
117         for (j = 0; j < PTRS_PER_PMD ; j++)
118                 free_one_pmd(tlb, pmd+j);
119         pmd_free_tlb(tlb, pmd);
120 }
121
122 /*
123  * This function clears all user-level page tables of a process - this
124  * is needed by execve(), so that old pages aren't in the way.
125  *
126  * Must be called with pagetable lock held.
127  */
128 void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr)
129 {
130         pgd_t * page_dir = tlb->mm->pgd;
131
132         page_dir += first;
133         do {
134                 free_one_pgd(tlb, page_dir);
135                 page_dir++;
136         } while (--nr);
137 }
138
139 pte_t * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
140 {
141         if (!pmd_present(*pmd)) {
142                 struct page *new;
143
144                 spin_unlock(&mm->page_table_lock);
145                 new = pte_alloc_one(mm, address);
146                 spin_lock(&mm->page_table_lock);
147                 if (!new)
148                         return NULL;
149
150                 /*
151                  * Because we dropped the lock, we should re-check the
152                  * entry, as somebody else could have populated it..
153                  */
154                 if (pmd_present(*pmd)) {
155                         pte_free(new);
156                         goto out;
157                 }
158                 pgtable_add_rmap(new, mm, address);
159                 pmd_populate(mm, pmd, new);
160         }
161 out:
162         return pte_offset_map(pmd, address);
163 }
164
165 pte_t * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
166 {
167         if (!pmd_present(*pmd)) {
168                 pte_t *new;
169
170                 spin_unlock(&mm->page_table_lock);
171                 new = pte_alloc_one_kernel(mm, address);
172                 spin_lock(&mm->page_table_lock);
173                 if (!new)
174                         return NULL;
175
176                 /*
177                  * Because we dropped the lock, we should re-check the
178                  * entry, as somebody else could have populated it..
179                  */
180                 if (pmd_present(*pmd)) {
181                         pte_free_kernel(new);
182                         goto out;
183                 }
184                 pgtable_add_rmap(virt_to_page(new), mm, address);
185                 pmd_populate_kernel(mm, pmd, new);
186         }
187 out:
188         return pte_offset_kernel(pmd, address);
189 }
190 #define PTE_TABLE_MASK  ((PTRS_PER_PTE-1) * sizeof(pte_t))
191 #define PMD_TABLE_MASK  ((PTRS_PER_PMD-1) * sizeof(pmd_t))
192
193 /*
194  * copy one vm_area from one task to the other. Assumes the page tables
195  * already present in the new task to be cleared in the whole range
196  * covered by this vma.
197  *
198  * 08Jan98 Merged into one routine from several inline routines to reduce
199  *         variable count and make things faster. -jj
200  *
201  * dst->page_table_lock is held on entry and exit,
202  * but may be dropped within pmd_alloc() and pte_alloc_map().
203  */
204 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
205                         struct vm_area_struct *vma)
206 {
207         pgd_t * src_pgd, * dst_pgd;
208         unsigned long address = vma->vm_start;
209         unsigned long end = vma->vm_end;
210         unsigned long cow;
211         struct pte_chain *pte_chain = NULL;
212
213         if (is_vm_hugetlb_page(vma))
214                 return copy_hugetlb_page_range(dst, src, vma);
215
216         pte_chain = pte_chain_alloc(GFP_ATOMIC);
217         if (!pte_chain) {
218                 spin_unlock(&dst->page_table_lock);
219                 pte_chain = pte_chain_alloc(GFP_KERNEL);
220                 spin_lock(&dst->page_table_lock);
221                 if (!pte_chain)
222                         goto nomem;
223         }
224         
225         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
226         src_pgd = pgd_offset(src, address)-1;
227         dst_pgd = pgd_offset(dst, address)-1;
228
229         for (;;) {
230                 pmd_t * src_pmd, * dst_pmd;
231
232                 src_pgd++; dst_pgd++;
233                 
234                 /* copy_pmd_range */
235                 
236                 if (pgd_none(*src_pgd))
237                         goto skip_copy_pmd_range;
238                 if (pgd_bad(*src_pgd)) {
239                         pgd_ERROR(*src_pgd);
240                         pgd_clear(src_pgd);
241 skip_copy_pmd_range:    address = (address + PGDIR_SIZE) & PGDIR_MASK;
242                         if (!address || (address >= end))
243                                 goto out;
244                         continue;
245                 }
246
247                 src_pmd = pmd_offset(src_pgd, address);
248                 dst_pmd = pmd_alloc(dst, dst_pgd, address);
249                 if (!dst_pmd)
250                         goto nomem;
251
252                 do {
253                         pte_t * src_pte, * dst_pte;
254                 
255                         /* copy_pte_range */
256                 
257                         if (pmd_none(*src_pmd))
258                                 goto skip_copy_pte_range;
259                         if (pmd_bad(*src_pmd)) {
260                                 pmd_ERROR(*src_pmd);
261                                 pmd_clear(src_pmd);
262 skip_copy_pte_range:
263                                 address = (address + PMD_SIZE) & PMD_MASK;
264                                 if (address >= end)
265                                         goto out;
266                                 goto cont_copy_pmd_range;
267                         }
268
269                         dst_pte = pte_alloc_map(dst, dst_pmd, address);
270                         if (!dst_pte)
271                                 goto nomem;
272                         spin_lock(&src->page_table_lock);       
273                         src_pte = pte_offset_map_nested(src_pmd, address);
274                         do {
275                                 pte_t pte = *src_pte;
276                                 struct page *page;
277                                 unsigned long pfn;
278
279                                 /* copy_one_pte */
280
281                                 if (pte_none(pte))
282                                         goto cont_copy_pte_range_noset;
283                                 /* pte contains position in swap, so copy. */
284                                 if (!pte_present(pte)) {
285                                         if (!pte_file(pte))
286                                                 swap_duplicate(pte_to_swp_entry(pte));
287                                         set_pte(dst_pte, pte);
288                                         goto cont_copy_pte_range_noset;
289                                 }
290                                 pfn = pte_pfn(pte);
291                                 /* the pte points outside of valid memory, the
292                                  * mapping is assumed to be good, meaningful
293                                  * and not mapped via rmap - duplicate the
294                                  * mapping as is.
295                                  */
296                                 page = NULL;
297                                 if (pfn_valid(pfn)) 
298                                         page = pfn_to_page(pfn); 
299
300                                 if (!page || PageReserved(page)) {
301                                         set_pte(dst_pte, pte);
302                                         goto cont_copy_pte_range_noset;
303                                 }
304
305                                 /*
306                                  * If it's a COW mapping, write protect it both
307                                  * in the parent and the child
308                                  */
309                                 if (cow) {
310                                         ptep_set_wrprotect(src_pte);
311                                         pte = *src_pte;
312                                 }
313
314                                 /*
315                                  * If it's a shared mapping, mark it clean in
316                                  * the child
317                                  */
318                                 if (vma->vm_flags & VM_SHARED)
319                                         pte = pte_mkclean(pte);
320                                 pte = pte_mkold(pte);
321                                 get_page(page);
322                                 dst->rss++;
323
324                                 set_pte(dst_pte, pte);
325                                 pte_chain = page_add_rmap(page, dst_pte,
326                                                         pte_chain);
327                                 if (pte_chain)
328                                         goto cont_copy_pte_range_noset;
329                                 pte_chain = pte_chain_alloc(GFP_ATOMIC);
330                                 if (pte_chain)
331                                         goto cont_copy_pte_range_noset;
332
333                                 /*
334                                  * pte_chain allocation failed, and we need to
335                                  * run page reclaim.
336                                  */
337                                 pte_unmap_nested(src_pte);
338                                 pte_unmap(dst_pte);
339                                 spin_unlock(&src->page_table_lock);     
340                                 spin_unlock(&dst->page_table_lock);     
341                                 pte_chain = pte_chain_alloc(GFP_KERNEL);
342                                 spin_lock(&dst->page_table_lock);       
343                                 if (!pte_chain)
344                                         goto nomem;
345                                 spin_lock(&src->page_table_lock);
346                                 dst_pte = pte_offset_map(dst_pmd, address);
347                                 src_pte = pte_offset_map_nested(src_pmd,
348                                                                 address);
349 cont_copy_pte_range_noset:
350                                 address += PAGE_SIZE;
351                                 if (address >= end) {
352                                         pte_unmap_nested(src_pte);
353                                         pte_unmap(dst_pte);
354                                         goto out_unlock;
355                                 }
356                                 src_pte++;
357                                 dst_pte++;
358                         } while ((unsigned long)src_pte & PTE_TABLE_MASK);
359                         pte_unmap_nested(src_pte-1);
360                         pte_unmap(dst_pte-1);
361                         spin_unlock(&src->page_table_lock);
362                 
363 cont_copy_pmd_range:
364                         src_pmd++;
365                         dst_pmd++;
366                 } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
367         }
368 out_unlock:
369         spin_unlock(&src->page_table_lock);
370 out:
371         pte_chain_free(pte_chain);
372         return 0;
373 nomem:
374         pte_chain_free(pte_chain);
375         return -ENOMEM;
376 }
377
378 static void
379 zap_pte_range(struct mmu_gather *tlb, pmd_t * pmd,
380                 unsigned long address, unsigned long size)
381 {
382         unsigned long offset;
383         pte_t *ptep;
384
385         if (pmd_none(*pmd))
386                 return;
387         if (pmd_bad(*pmd)) {
388                 pmd_ERROR(*pmd);
389                 pmd_clear(pmd);
390                 return;
391         }
392         ptep = pte_offset_map(pmd, address);
393         offset = address & ~PMD_MASK;
394         if (offset + size > PMD_SIZE)
395                 size = PMD_SIZE - offset;
396         size &= PAGE_MASK;
397         for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
398                 pte_t pte = *ptep;
399                 if (pte_none(pte))
400                         continue;
401                 if (pte_present(pte)) {
402                         unsigned long pfn = pte_pfn(pte);
403
404                         pte = ptep_get_and_clear(ptep);
405                         tlb_remove_tlb_entry(tlb, ptep, address+offset);
406                         if (pfn_valid(pfn)) {
407                                 struct page *page = pfn_to_page(pfn);
408                                 if (!PageReserved(page)) {
409                                         if (pte_dirty(pte))
410                                                 set_page_dirty(page);
411                                         if (page->mapping && pte_young(pte) &&
412                                                         !PageSwapCache(page))
413                                                 mark_page_accessed(page);
414                                         tlb->freed++;
415                                         page_remove_rmap(page, ptep);
416                                         tlb_remove_page(tlb, page);
417                                 }
418                         }
419                 } else {
420                         if (!pte_file(pte))
421                                 free_swap_and_cache(pte_to_swp_entry(pte));
422                         pte_clear(ptep);
423                 }
424         }
425         pte_unmap(ptep-1);
426 }
427
428 static void
429 zap_pmd_range(struct mmu_gather *tlb, pgd_t * dir,
430                 unsigned long address, unsigned long size)
431 {
432         pmd_t * pmd;
433         unsigned long end;
434
435         if (pgd_none(*dir))
436                 return;
437         if (pgd_bad(*dir)) {
438                 pgd_ERROR(*dir);
439                 pgd_clear(dir);
440                 return;
441         }
442         pmd = pmd_offset(dir, address);
443         end = address + size;
444         if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
445                 end = ((address + PGDIR_SIZE) & PGDIR_MASK);
446         do {
447                 zap_pte_range(tlb, pmd, address, end - address);
448                 address = (address + PMD_SIZE) & PMD_MASK; 
449                 pmd++;
450         } while (address < end);
451 }
452
453 void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
454                         unsigned long address, unsigned long end)
455 {
456         pgd_t * dir;
457
458         if (is_vm_hugetlb_page(vma)) {
459                 unmap_hugepage_range(vma, address, end);
460                 return;
461         }
462
463         BUG_ON(address >= end);
464
465         dir = pgd_offset(vma->vm_mm, address);
466         tlb_start_vma(tlb, vma);
467         do {
468                 zap_pmd_range(tlb, dir, address, end - address);
469                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
470                 dir++;
471         } while (address && (address < end));
472         tlb_end_vma(tlb, vma);
473 }
474
475 /* Dispose of an entire struct mmu_gather per rescheduling point */
476 #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
477 #define ZAP_BLOCK_SIZE  (FREE_PTE_NR * PAGE_SIZE)
478 #endif
479
480 /* For UP, 256 pages at a time gives nice low latency */
481 #if !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
482 #define ZAP_BLOCK_SIZE  (256 * PAGE_SIZE)
483 #endif
484
485 /* No preempt: go for the best straight-line efficiency */
486 #if !defined(CONFIG_PREEMPT)
487 #define ZAP_BLOCK_SIZE  (~(0UL))
488 #endif
489
490 /**
491  * unmap_vmas - unmap a range of memory covered by a list of vma's
492  * @tlbp: address of the caller's struct mmu_gather
493  * @mm: the controlling mm_struct
494  * @vma: the starting vma
495  * @start_addr: virtual address at which to start unmapping
496  * @end_addr: virtual address at which to end unmapping
497  * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
498  *
499  * Returns the number of vma's which were covered by the unmapping.
500  *
501  * Unmap all pages in the vma list.  Called under page_table_lock.
502  *
503  * We aim to not hold page_table_lock for too long (for scheduling latency
504  * reasons).  So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
505  * return the ending mmu_gather to the caller.
506  *
507  * Only addresses between `start' and `end' will be unmapped.
508  *
509  * The VMA list must be sorted in ascending virtual address order.
510  *
511  * unmap_vmas() assumes that the caller will flush the whole unmapped address
512  * range after unmap_vmas() returns.  So the only responsibility here is to
513  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
514  * drops the lock and schedules.
515  */
516 int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
517                 struct vm_area_struct *vma, unsigned long start_addr,
518                 unsigned long end_addr, unsigned long *nr_accounted)
519 {
520         unsigned long zap_bytes = ZAP_BLOCK_SIZE;
521         unsigned long tlb_start;        /* For tlb_finish_mmu */
522         int tlb_start_valid = 0;
523         int ret = 0;
524
525         if (vma) {      /* debug.  killme. */
526                 if (end_addr <= vma->vm_start)
527                         printk("%s: end_addr(0x%08lx) <= vm_start(0x%08lx)\n",
528                                 __FUNCTION__, end_addr, vma->vm_start);
529                 if (start_addr >= vma->vm_end)
530                         printk("%s: start_addr(0x%08lx) <= vm_end(0x%08lx)\n",
531                                 __FUNCTION__, start_addr, vma->vm_end);
532         }
533
534         for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
535                 unsigned long start;
536                 unsigned long end;
537
538                 start = max(vma->vm_start, start_addr);
539                 if (start >= vma->vm_end)
540                         continue;
541                 end = min(vma->vm_end, end_addr);
542                 if (end <= vma->vm_start)
543                         continue;
544
545                 if (vma->vm_flags & VM_ACCOUNT)
546                         *nr_accounted += (end - start) >> PAGE_SHIFT;
547
548                 ret++;
549                 while (start != end) {
550                         unsigned long block;
551
552                         if (is_vm_hugetlb_page(vma))
553                                 block = end - start;
554                         else
555                                 block = min(zap_bytes, end - start);
556
557                         if (!tlb_start_valid) {
558                                 tlb_start = start;
559                                 tlb_start_valid = 1;
560                         }
561
562                         unmap_page_range(*tlbp, vma, start, start + block);
563                         start += block;
564                         zap_bytes -= block;
565                         if ((long)zap_bytes > 0)
566                                 continue;
567                         if (need_resched()) {
568                                 tlb_finish_mmu(*tlbp, tlb_start, start);
569                                 cond_resched_lock(&mm->page_table_lock);
570                                 *tlbp = tlb_gather_mmu(mm, 0);
571                                 tlb_start_valid = 0;
572                         }
573                         zap_bytes = ZAP_BLOCK_SIZE;
574                 }
575                 if (vma->vm_next && vma->vm_next->vm_start < vma->vm_end)
576                         printk("%s: VMA list is not sorted correctly!\n",
577                                 __FUNCTION__);          
578         }
579         return ret;
580 }
581
582 /**
583  * zap_page_range - remove user pages in a given range
584  * @vma: vm_area_struct holding the applicable pages
585  * @address: starting address of pages to zap
586  * @size: number of bytes to zap
587  */
588 void zap_page_range(struct vm_area_struct *vma,
589                         unsigned long address, unsigned long size)
590 {
591         struct mm_struct *mm = vma->vm_mm;
592         struct mmu_gather *tlb;
593         unsigned long end = address + size;
594         unsigned long nr_accounted = 0;
595
596         might_sleep();
597
598         if (is_vm_hugetlb_page(vma)) {
599                 zap_hugepage_range(vma, address, size);
600                 return;
601         }
602
603         lru_add_drain();
604         spin_lock(&mm->page_table_lock);
605         tlb = tlb_gather_mmu(mm, 0);
606         unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted);
607         tlb_finish_mmu(tlb, address, end);
608         spin_unlock(&mm->page_table_lock);
609 }
610
611 /*
612  * Do a quick page-table lookup for a single page.
613  * mm->page_table_lock must be held.
614  */
615 struct page *
616 follow_page(struct mm_struct *mm, unsigned long address, int write) 
617 {
618         pgd_t *pgd;
619         pmd_t *pmd;
620         pte_t *ptep, pte;
621         unsigned long pfn;
622         struct vm_area_struct *vma;
623
624         vma = hugepage_vma(mm, address);
625         if (vma)
626                 return follow_huge_addr(mm, vma, address, write);
627
628         pgd = pgd_offset(mm, address);
629         if (pgd_none(*pgd) || pgd_bad(*pgd))
630                 goto out;
631
632         pmd = pmd_offset(pgd, address);
633         if (pmd_none(*pmd))
634                 goto out;
635         if (pmd_huge(*pmd))
636                 return follow_huge_pmd(mm, address, pmd, write);
637         if (pmd_bad(*pmd))
638                 goto out;
639
640         ptep = pte_offset_map(pmd, address);
641         if (!ptep)
642                 goto out;
643
644         pte = *ptep;
645         pte_unmap(ptep);
646         if (pte_present(pte)) {
647                 if (!write || (pte_write(pte) && pte_dirty(pte))) {
648                         pfn = pte_pfn(pte);
649                         if (pfn_valid(pfn))
650                                 return pfn_to_page(pfn);
651                 }
652         }
653
654 out:
655         return NULL;
656 }
657
658 /* 
659  * Given a physical address, is there a useful struct page pointing to
660  * it?  This may become more complex in the future if we start dealing
661  * with IO-aperture pages for direct-IO.
662  */
663
664 static inline struct page *get_page_map(struct page *page)
665 {
666         if (!pfn_valid(page_to_pfn(page)))
667                 return 0;
668         return page;
669 }
670
671
672 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
673                 unsigned long start, int len, int write, int force,
674                 struct page **pages, struct vm_area_struct **vmas)
675 {
676         int i;
677         unsigned int flags;
678
679         /* 
680          * Require read or write permissions.
681          * If 'force' is set, we only require the "MAY" flags.
682          */
683         flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
684         flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
685         i = 0;
686
687         do {
688                 struct vm_area_struct * vma;
689
690                 vma = find_extend_vma(mm, start);
691
692 #ifdef FIXADDR_USER_START
693                 if (!vma &&
694                     start >= FIXADDR_USER_START && start < FIXADDR_USER_END) {
695                         static struct vm_area_struct fixmap_vma = {
696                                 /* Catch users - if there are any valid
697                                    ones, we can make this be "&init_mm" or
698                                    something.  */
699                                 .vm_mm = NULL,
700                                 .vm_start = FIXADDR_USER_START,
701                                 .vm_end = FIXADDR_USER_END,
702                                 .vm_page_prot = PAGE_READONLY,
703                                 .vm_flags = VM_READ | VM_EXEC,
704                         };
705                         unsigned long pg = start & PAGE_MASK;
706                         pgd_t *pgd;
707                         pmd_t *pmd;
708                         pte_t *pte;
709                         if (write) /* user fixmap pages are read-only */
710                                 return i ? : -EFAULT;
711                         pgd = pgd_offset_k(pg);
712                         if (!pgd)
713                                 return i ? : -EFAULT;
714                         pmd = pmd_offset(pgd, pg);
715                         if (!pmd)
716                                 return i ? : -EFAULT;
717                         pte = pte_offset_kernel(pmd, pg);
718                         if (!pte || !pte_present(*pte))
719                                 return i ? : -EFAULT;
720                         if (pages) {
721                                 pages[i] = pte_page(*pte);
722                                 get_page(pages[i]);
723                         }
724                         if (vmas)
725                                 vmas[i] = &fixmap_vma;
726                         i++;
727                         start += PAGE_SIZE;
728                         len--;
729                         continue;
730                 }
731 #endif
732
733                 if (!vma || (pages && (vma->vm_flags & VM_IO))
734                                 || !(flags & vma->vm_flags))
735                         return i ? : -EFAULT;
736
737                 if (is_vm_hugetlb_page(vma)) {
738                         i = follow_hugetlb_page(mm, vma, pages, vmas,
739                                                 &start, &len, i);
740                         continue;
741                 }
742                 spin_lock(&mm->page_table_lock);
743                 do {
744                         struct page *map;
745                         while (!(map = follow_page(mm, start, write))) {
746                                 spin_unlock(&mm->page_table_lock);
747                                 switch (handle_mm_fault(mm,vma,start,write)) {
748                                 case VM_FAULT_MINOR:
749                                         tsk->min_flt++;
750                                         break;
751                                 case VM_FAULT_MAJOR:
752                                         tsk->maj_flt++;
753                                         break;
754                                 case VM_FAULT_SIGBUS:
755                                         return i ? i : -EFAULT;
756                                 case VM_FAULT_OOM:
757                                         return i ? i : -ENOMEM;
758                                 default:
759                                         BUG();
760                                 }
761                                 spin_lock(&mm->page_table_lock);
762                         }
763                         if (pages) {
764                                 pages[i] = get_page_map(map);
765                                 if (!pages[i]) {
766                                         spin_unlock(&mm->page_table_lock);
767                                         while (i--)
768                                                 page_cache_release(pages[i]);
769                                         i = -EFAULT;
770                                         goto out;
771                                 }
772                                 flush_dcache_page(pages[i]);
773                                 if (!PageReserved(pages[i]))
774                                         page_cache_get(pages[i]);
775                         }
776                         if (vmas)
777                                 vmas[i] = vma;
778                         i++;
779                         start += PAGE_SIZE;
780                         len--;
781                 } while(len && start < vma->vm_end);
782                 spin_unlock(&mm->page_table_lock);
783         } while(len);
784 out:
785         return i;
786 }
787
788 static void zeromap_pte_range(pte_t * pte, unsigned long address,
789                                      unsigned long size, pgprot_t prot)
790 {
791         unsigned long end;
792
793         address &= ~PMD_MASK;
794         end = address + size;
795         if (end > PMD_SIZE)
796                 end = PMD_SIZE;
797         do {
798                 pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
799                 BUG_ON(!pte_none(*pte));
800                 set_pte(pte, zero_pte);
801                 address += PAGE_SIZE;
802                 pte++;
803         } while (address && (address < end));
804 }
805
806 static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,
807                                     unsigned long size, pgprot_t prot)
808 {
809         unsigned long end;
810
811         address &= ~PGDIR_MASK;
812         end = address + size;
813         if (end > PGDIR_SIZE)
814                 end = PGDIR_SIZE;
815         do {
816                 pte_t * pte = pte_alloc_map(mm, pmd, address);
817                 if (!pte)
818                         return -ENOMEM;
819                 zeromap_pte_range(pte, address, end - address, prot);
820                 pte_unmap(pte);
821                 address = (address + PMD_SIZE) & PMD_MASK;
822                 pmd++;
823         } while (address && (address < end));
824         return 0;
825 }
826
827 int zeromap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, pgprot_t prot)
828 {
829         int error = 0;
830         pgd_t * dir;
831         unsigned long beg = address;
832         unsigned long end = address + size;
833         struct mm_struct *mm = vma->vm_mm;
834
835         dir = pgd_offset(mm, address);
836         flush_cache_range(vma, beg, end);
837         if (address >= end)
838                 BUG();
839
840         spin_lock(&mm->page_table_lock);
841         do {
842                 pmd_t *pmd = pmd_alloc(mm, dir, address);
843                 error = -ENOMEM;
844                 if (!pmd)
845                         break;
846                 error = zeromap_pmd_range(mm, pmd, address, end - address, prot);
847                 if (error)
848                         break;
849                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
850                 dir++;
851         } while (address && (address < end));
852         flush_tlb_range(vma, beg, end);
853         spin_unlock(&mm->page_table_lock);
854         return error;
855 }
856
857 /*
858  * maps a range of physical memory into the requested pages. the old
859  * mappings are removed. any references to nonexistent pages results
860  * in null mappings (currently treated as "copy-on-access")
861  */
862 static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
863         unsigned long phys_addr, pgprot_t prot)
864 {
865         unsigned long end;
866         unsigned long pfn;
867
868         address &= ~PMD_MASK;
869         end = address + size;
870         if (end > PMD_SIZE)
871                 end = PMD_SIZE;
872         pfn = phys_addr >> PAGE_SHIFT;
873         do {
874                 BUG_ON(!pte_none(*pte));
875                 if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
876                         set_pte(pte, pfn_pte(pfn, prot));
877                 address += PAGE_SIZE;
878                 pfn++;
879                 pte++;
880         } while (address && (address < end));
881 }
882
883 static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
884         unsigned long phys_addr, pgprot_t prot)
885 {
886         unsigned long base, end;
887
888         base = address & PGDIR_MASK;
889         address &= ~PGDIR_MASK;
890         end = address + size;
891         if (end > PGDIR_SIZE)
892                 end = PGDIR_SIZE;
893         phys_addr -= address;
894         do {
895                 pte_t * pte = pte_alloc_map(mm, pmd, base + address);
896                 if (!pte)
897                         return -ENOMEM;
898                 remap_pte_range(pte, base + address, end - address, address + phys_addr, prot);
899                 pte_unmap(pte);
900                 address = (address + PMD_SIZE) & PMD_MASK;
901                 pmd++;
902         } while (address && (address < end));
903         return 0;
904 }
905
906 /*  Note: this is only safe if the mm semaphore is held when called. */
907 int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
908 {
909         int error = 0;
910         pgd_t * dir;
911         unsigned long beg = from;
912         unsigned long end = from + size;
913         struct mm_struct *mm = vma->vm_mm;
914
915         phys_addr -= from;
916         dir = pgd_offset(mm, from);
917         flush_cache_range(vma, beg, end);
918         if (from >= end)
919                 BUG();
920
921         spin_lock(&mm->page_table_lock);
922         do {
923                 pmd_t *pmd = pmd_alloc(mm, dir, from);
924                 error = -ENOMEM;
925                 if (!pmd)
926                         break;
927                 error = remap_pmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
928                 if (error)
929                         break;
930                 from = (from + PGDIR_SIZE) & PGDIR_MASK;
931                 dir++;
932         } while (from && (from < end));
933         flush_tlb_range(vma, beg, end);
934         spin_unlock(&mm->page_table_lock);
935         return error;
936 }
937
938 /*
939  * Establish a new mapping:
940  *  - flush the old one
941  *  - update the page tables
942  *  - inform the TLB about the new one
943  *
944  * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
945  */
946 static inline void establish_pte(struct vm_area_struct * vma, unsigned long address, pte_t *page_table, pte_t entry)
947 {
948         set_pte(page_table, entry);
949         flush_tlb_page(vma, address);
950         update_mmu_cache(vma, address, entry);
951 }
952
953 /*
954  * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
955  */
956 static inline void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address, 
957                 pte_t *page_table)
958 {
959         invalidate_vcache(address, vma->vm_mm, new_page);
960         flush_cache_page(vma, address);
961         establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
962 }
963
964 /*
965  * This routine handles present pages, when users try to write
966  * to a shared page. It is done by copying the page to a new address
967  * and decrementing the shared-page counter for the old page.
968  *
969  * Goto-purists beware: the only reason for goto's here is that it results
970  * in better assembly code.. The "default" path will see no jumps at all.
971  *
972  * Note that this routine assumes that the protection checks have been
973  * done by the caller (the low-level page fault routine in most cases).
974  * Thus we can safely just mark it writable once we've done any necessary
975  * COW.
976  *
977  * We also mark the page dirty at this point even though the page will
978  * change only once the write actually happens. This avoids a few races,
979  * and potentially makes it more efficient.
980  *
981  * We hold the mm semaphore and the page_table_lock on entry and exit
982  * with the page_table_lock released.
983  */
984 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
985         unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
986 {
987         struct page *old_page, *new_page;
988         unsigned long pfn = pte_pfn(pte);
989         struct pte_chain *pte_chain = NULL;
990         int ret;
991
992         if (unlikely(!pfn_valid(pfn))) {
993                 /*
994                  * This should really halt the system so it can be debugged or
995                  * at least the kernel stops what it's doing before it corrupts
996                  * data, but for the moment just pretend this is OOM.
997                  */
998                 pte_unmap(page_table);
999                 printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
1000                                 address);
1001                 goto oom;
1002         }
1003         old_page = pfn_to_page(pfn);
1004
1005         if (!TestSetPageLocked(old_page)) {
1006                 int reuse = can_share_swap_page(old_page);
1007                 unlock_page(old_page);
1008                 if (reuse) {
1009                         flush_cache_page(vma, address);
1010                         establish_pte(vma, address, page_table,
1011                                 pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
1012                         pte_unmap(page_table);
1013                         ret = VM_FAULT_MINOR;
1014                         goto out;
1015                 }
1016         }
1017         pte_unmap(page_table);
1018
1019         /*
1020          * Ok, we need to copy. Oh, well..
1021          */
1022         page_cache_get(old_page);
1023         spin_unlock(&mm->page_table_lock);
1024
1025         pte_chain = pte_chain_alloc(GFP_KERNEL);
1026         if (!pte_chain)
1027                 goto no_mem;
1028         new_page = alloc_page(GFP_HIGHUSER);
1029         if (!new_page)
1030                 goto no_mem;
1031         copy_cow_page(old_page,new_page,address);
1032
1033         /*
1034          * Re-check the pte - we dropped the lock
1035          */
1036         spin_lock(&mm->page_table_lock);
1037         page_table = pte_offset_map(pmd, address);
1038         if (pte_same(*page_table, pte)) {
1039                 if (PageReserved(old_page))
1040                         ++mm->rss;
1041                 page_remove_rmap(old_page, page_table);
1042                 break_cow(vma, new_page, address, page_table);
1043                 pte_chain = page_add_rmap(new_page, page_table, pte_chain);
1044                 lru_cache_add_active(new_page);
1045
1046                 /* Free the old page.. */
1047                 new_page = old_page;
1048         }
1049         pte_unmap(page_table);
1050         page_cache_release(new_page);
1051         page_cache_release(old_page);
1052         ret = VM_FAULT_MINOR;
1053         goto out;
1054
1055 no_mem:
1056         page_cache_release(old_page);
1057 oom:
1058         ret = VM_FAULT_OOM;
1059 out:
1060         spin_unlock(&mm->page_table_lock);
1061         pte_chain_free(pte_chain);
1062         return ret;
1063 }
1064
1065 static void vmtruncate_list(struct list_head *head, unsigned long pgoff)
1066 {
1067         unsigned long start, end, len, diff;
1068         struct vm_area_struct *vma;
1069         struct list_head *curr;
1070
1071         list_for_each(curr, head) {
1072                 vma = list_entry(curr, struct vm_area_struct, shared);
1073                 start = vma->vm_start;
1074                 end = vma->vm_end;
1075                 len = end - start;
1076
1077                 /* mapping wholly truncated? */
1078                 if (vma->vm_pgoff >= pgoff) {
1079                         zap_page_range(vma, start, len);
1080                         continue;
1081                 }
1082
1083                 /* mapping wholly unaffected? */
1084                 len = len >> PAGE_SHIFT;
1085                 diff = pgoff - vma->vm_pgoff;
1086                 if (diff >= len)
1087                         continue;
1088
1089                 /* Ok, partially affected.. */
1090                 start += diff << PAGE_SHIFT;
1091                 len = (len - diff) << PAGE_SHIFT;
1092                 zap_page_range(vma, start, len);
1093         }
1094 }
1095
1096 /*
1097  * Handle all mappings that got truncated by a "truncate()"
1098  * system call.
1099  *
1100  * NOTE! We have to be ready to update the memory sharing
1101  * between the file and the memory map for a potential last
1102  * incomplete page.  Ugly, but necessary.
1103  */
1104 int vmtruncate(struct inode * inode, loff_t offset)
1105 {
1106         unsigned long pgoff;
1107         struct address_space *mapping = inode->i_mapping;
1108         unsigned long limit;
1109
1110         if (inode->i_size < offset)
1111                 goto do_expand;
1112         inode->i_size = offset;
1113         pgoff = (offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1114         down(&mapping->i_shared_sem);
1115         if (unlikely(!list_empty(&mapping->i_mmap)))
1116                 vmtruncate_list(&mapping->i_mmap, pgoff);
1117         if (unlikely(!list_empty(&mapping->i_mmap_shared)))
1118                 vmtruncate_list(&mapping->i_mmap_shared, pgoff);
1119         up(&mapping->i_shared_sem);
1120         truncate_inode_pages(mapping, offset);
1121         goto out_truncate;
1122
1123 do_expand:
1124         limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
1125         if (limit != RLIM_INFINITY && offset > limit)
1126                 goto out_sig;
1127         if (offset > inode->i_sb->s_maxbytes)
1128                 goto out;
1129         inode->i_size = offset;
1130
1131 out_truncate:
1132         if (inode->i_op && inode->i_op->truncate)
1133                 inode->i_op->truncate(inode);
1134         return 0;
1135 out_sig:
1136         send_sig(SIGXFSZ, current, 0);
1137 out:
1138         return -EFBIG;
1139 }
1140
1141 /* 
1142  * Primitive swap readahead code. We simply read an aligned block of
1143  * (1 << page_cluster) entries in the swap area. This method is chosen
1144  * because it doesn't cost us any seek time.  We also make sure to queue
1145  * the 'original' request together with the readahead ones...  
1146  */
1147 void swapin_readahead(swp_entry_t entry)
1148 {
1149         int i, num;
1150         struct page *new_page;
1151         unsigned long offset;
1152
1153         /*
1154          * Get the number of handles we should do readahead io to.
1155          */
1156         num = valid_swaphandles(entry, &offset);
1157         for (i = 0; i < num; offset++, i++) {
1158                 /* Ok, do the async read-ahead now */
1159                 new_page = read_swap_cache_async(swp_entry(swp_type(entry),
1160                                                 offset));
1161                 if (!new_page)
1162                         break;
1163                 page_cache_release(new_page);
1164         }
1165         lru_add_drain();        /* Push any new pages onto the LRU now */
1166 }
1167
1168 /*
1169  * We hold the mm semaphore and the page_table_lock on entry and
1170  * should release the pagetable lock on exit..
1171  */
1172 static int do_swap_page(struct mm_struct * mm,
1173         struct vm_area_struct * vma, unsigned long address,
1174         pte_t *page_table, pmd_t *pmd, pte_t orig_pte, int write_access)
1175 {
1176         struct page *page;
1177         swp_entry_t entry = pte_to_swp_entry(orig_pte);
1178         pte_t pte;
1179         int ret = VM_FAULT_MINOR;
1180         struct pte_chain *pte_chain = NULL;
1181
1182         pte_unmap(page_table);
1183         spin_unlock(&mm->page_table_lock);
1184         page = lookup_swap_cache(entry);
1185         if (!page) {
1186                 swapin_readahead(entry);
1187                 page = read_swap_cache_async(entry);
1188                 if (!page) {
1189                         /*
1190                          * Back out if somebody else faulted in this pte while
1191                          * we released the page table lock.
1192                          */
1193                         spin_lock(&mm->page_table_lock);
1194                         page_table = pte_offset_map(pmd, address);
1195                         if (pte_same(*page_table, orig_pte))
1196                                 ret = VM_FAULT_OOM;
1197                         else
1198                                 ret = VM_FAULT_MINOR;
1199                         pte_unmap(page_table);
1200                         spin_unlock(&mm->page_table_lock);
1201                         goto out;
1202                 }
1203
1204                 /* Had to read the page from swap area: Major fault */
1205                 ret = VM_FAULT_MAJOR;
1206                 inc_page_state(pgmajfault);
1207         }
1208
1209         mark_page_accessed(page);
1210         pte_chain = pte_chain_alloc(GFP_KERNEL);
1211         if (!pte_chain) {
1212                 ret = -ENOMEM;
1213                 goto out;
1214         }
1215         lock_page(page);
1216
1217         /*
1218          * Back out if somebody else faulted in this pte while we
1219          * released the page table lock.
1220          */
1221         spin_lock(&mm->page_table_lock);
1222         page_table = pte_offset_map(pmd, address);
1223         if (!pte_same(*page_table, orig_pte)) {
1224                 pte_unmap(page_table);
1225                 spin_unlock(&mm->page_table_lock);
1226                 unlock_page(page);
1227                 page_cache_release(page);
1228                 ret = VM_FAULT_MINOR;
1229                 goto out;
1230         }
1231
1232         /* The page isn't present yet, go ahead with the fault. */
1233                 
1234         swap_free(entry);
1235         if (vm_swap_full())
1236                 remove_exclusive_swap_page(page);
1237
1238         mm->rss++;
1239         pte = mk_pte(page, vma->vm_page_prot);
1240         if (write_access && can_share_swap_page(page))
1241                 pte = pte_mkdirty(pte_mkwrite(pte));
1242         unlock_page(page);
1243
1244         flush_icache_page(vma, page);
1245         set_pte(page_table, pte);
1246         pte_chain = page_add_rmap(page, page_table, pte_chain);
1247
1248         /* No need to invalidate - it was non-present before */
1249         update_mmu_cache(vma, address, pte);
1250         pte_unmap(page_table);
1251         spin_unlock(&mm->page_table_lock);
1252 out:
1253         pte_chain_free(pte_chain);
1254         return ret;
1255 }
1256
1257 /*
1258  * We are called with the MM semaphore and page_table_lock
1259  * spinlock held to protect against concurrent faults in
1260  * multithreaded programs. 
1261  */
1262 static int
1263 do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1264                 pte_t *page_table, pmd_t *pmd, int write_access,
1265                 unsigned long addr)
1266 {
1267         pte_t entry;
1268         struct page * page = ZERO_PAGE(addr);
1269         struct pte_chain *pte_chain;
1270         int ret;
1271
1272         pte_chain = pte_chain_alloc(GFP_ATOMIC);
1273         if (!pte_chain) {
1274                 pte_unmap(page_table);
1275                 spin_unlock(&mm->page_table_lock);
1276                 pte_chain = pte_chain_alloc(GFP_KERNEL);
1277                 if (!pte_chain)
1278                         goto no_mem;
1279                 spin_lock(&mm->page_table_lock);
1280                 page_table = pte_offset_map(pmd, addr);
1281         }
1282                 
1283         /* Read-only mapping of ZERO_PAGE. */
1284         entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
1285
1286         /* ..except if it's a write access */
1287         if (write_access) {
1288                 /* Allocate our own private page. */
1289                 pte_unmap(page_table);
1290                 spin_unlock(&mm->page_table_lock);
1291
1292                 page = alloc_page(GFP_HIGHUSER);
1293                 if (!page)
1294                         goto no_mem;
1295                 clear_user_highpage(page, addr);
1296
1297                 spin_lock(&mm->page_table_lock);
1298                 page_table = pte_offset_map(pmd, addr);
1299
1300                 if (!pte_none(*page_table)) {
1301                         pte_unmap(page_table);
1302                         page_cache_release(page);
1303                         spin_unlock(&mm->page_table_lock);
1304                         ret = VM_FAULT_MINOR;
1305                         goto out;
1306                 }
1307                 mm->rss++;
1308                 entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
1309                 lru_cache_add_active(page);
1310                 mark_page_accessed(page);
1311         }
1312
1313         set_pte(page_table, entry);
1314         /* ignores ZERO_PAGE */
1315         pte_chain = page_add_rmap(page, page_table, pte_chain);
1316         pte_unmap(page_table);
1317
1318         /* No need to invalidate - it was non-present before */
1319         update_mmu_cache(vma, addr, entry);
1320         spin_unlock(&mm->page_table_lock);
1321         ret = VM_FAULT_MINOR;
1322         goto out;
1323
1324 no_mem:
1325         ret = VM_FAULT_OOM;
1326 out:
1327         pte_chain_free(pte_chain);
1328         return ret;
1329 }
1330
1331 /*
1332  * do_no_page() tries to create a new page mapping. It aggressively
1333  * tries to share with existing pages, but makes a separate copy if
1334  * the "write_access" parameter is true in order to avoid the next
1335  * page fault.
1336  *
1337  * As this is called only for pages that do not currently exist, we
1338  * do not need to flush old virtual caches or the TLB.
1339  *
1340  * This is called with the MM semaphore held and the page table
1341  * spinlock held. Exit with the spinlock released.
1342  */
1343 static int
1344 do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1345         unsigned long address, int write_access, pte_t *page_table, pmd_t *pmd)
1346 {
1347         struct page * new_page;
1348         pte_t entry;
1349         struct pte_chain *pte_chain;
1350         int ret;
1351
1352         if (!vma->vm_ops || !vma->vm_ops->nopage)
1353                 return do_anonymous_page(mm, vma, page_table,
1354                                         pmd, write_access, address);
1355         pte_unmap(page_table);
1356         spin_unlock(&mm->page_table_lock);
1357
1358         new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, 0);
1359
1360         /* no page was available -- either SIGBUS or OOM */
1361         if (new_page == NOPAGE_SIGBUS)
1362                 return VM_FAULT_SIGBUS;
1363         if (new_page == NOPAGE_OOM)
1364                 return VM_FAULT_OOM;
1365
1366         pte_chain = pte_chain_alloc(GFP_KERNEL);
1367         if (!pte_chain)
1368                 goto oom;
1369
1370         /*
1371          * Should we do an early C-O-W break?
1372          */
1373         if (write_access && !(vma->vm_flags & VM_SHARED)) {
1374                 struct page * page = alloc_page(GFP_HIGHUSER);
1375                 if (!page) {
1376                         page_cache_release(new_page);
1377                         goto oom;
1378                 }
1379                 copy_user_highpage(page, new_page, address);
1380                 page_cache_release(new_page);
1381                 lru_cache_add_active(page);
1382                 new_page = page;
1383         }
1384
1385         spin_lock(&mm->page_table_lock);
1386         page_table = pte_offset_map(pmd, address);
1387
1388         /*
1389          * This silly early PAGE_DIRTY setting removes a race
1390          * due to the bad i386 page protection. But it's valid
1391          * for other architectures too.
1392          *
1393          * Note that if write_access is true, we either now have
1394          * an exclusive copy of the page, or this is a shared mapping,
1395          * so we can make it writable and dirty to avoid having to
1396          * handle that later.
1397          */
1398         /* Only go through if we didn't race with anybody else... */
1399         if (pte_none(*page_table)) {
1400                 ++mm->rss;
1401                 flush_icache_page(vma, new_page);
1402                 entry = mk_pte(new_page, vma->vm_page_prot);
1403                 if (write_access)
1404                         entry = pte_mkwrite(pte_mkdirty(entry));
1405                 set_pte(page_table, entry);
1406                 pte_chain = page_add_rmap(new_page, page_table, pte_chain);
1407                 pte_unmap(page_table);
1408         } else {
1409                 /* One of our sibling threads was faster, back out. */
1410                 pte_unmap(page_table);
1411                 page_cache_release(new_page);
1412                 spin_unlock(&mm->page_table_lock);
1413                 ret = VM_FAULT_MINOR;
1414                 goto out;
1415         }
1416
1417         /* no need to invalidate: a not-present page shouldn't be cached */
1418         update_mmu_cache(vma, address, entry);
1419         spin_unlock(&mm->page_table_lock);
1420         ret = VM_FAULT_MAJOR;
1421         goto out;
1422 oom:
1423         ret = VM_FAULT_OOM;
1424 out:
1425         pte_chain_free(pte_chain);
1426         return ret;
1427 }
1428
1429 /*
1430  * Fault of a previously existing named mapping. Repopulate the pte
1431  * from the encoded file_pte if possible. This enables swappable
1432  * nonlinear vmas.
1433  */
1434 static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma,
1435         unsigned long address, int write_access, pte_t *pte, pmd_t *pmd)
1436 {
1437         unsigned long pgoff;
1438         int err;
1439
1440         BUG_ON(!vma->vm_ops || !vma->vm_ops->nopage);
1441         /*
1442          * Fall back to the linear mapping if the fs does not support
1443          * ->populate:
1444          */
1445         if (!vma->vm_ops || !vma->vm_ops->populate || 
1446                         (write_access && !(vma->vm_flags & VM_SHARED))) {
1447                 pte_clear(pte);
1448                 return do_no_page(mm, vma, address, write_access, pte, pmd);
1449         }
1450
1451         pgoff = pte_to_pgoff(*pte);
1452
1453         pte_unmap(pte);
1454         spin_unlock(&mm->page_table_lock);
1455
1456         err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, vma->vm_page_prot, pgoff, 0);
1457         if (err == -ENOMEM)
1458                 return VM_FAULT_OOM;
1459         if (err)
1460                 return VM_FAULT_SIGBUS;
1461         return VM_FAULT_MAJOR;
1462 }
1463
1464 /*
1465  * These routines also need to handle stuff like marking pages dirty
1466  * and/or accessed for architectures that don't do it in hardware (most
1467  * RISC architectures).  The early dirtying is also good on the i386.
1468  *
1469  * There is also a hook called "update_mmu_cache()" that architectures
1470  * with external mmu caches can use to update those (ie the Sparc or
1471  * PowerPC hashed page tables that act as extended TLBs).
1472  *
1473  * Note the "page_table_lock". It is to protect against kswapd removing
1474  * pages from under us. Note that kswapd only ever _removes_ pages, never
1475  * adds them. As such, once we have noticed that the page is not present,
1476  * we can drop the lock early.
1477  *
1478  * The adding of pages is protected by the MM semaphore (which we hold),
1479  * so we don't need to worry about a page being suddenly been added into
1480  * our VM.
1481  *
1482  * We enter with the pagetable spinlock held, we are supposed to
1483  * release it when done.
1484  */
1485 static inline int handle_pte_fault(struct mm_struct *mm,
1486         struct vm_area_struct * vma, unsigned long address,
1487         int write_access, pte_t *pte, pmd_t *pmd)
1488 {
1489         pte_t entry;
1490
1491         entry = *pte;
1492         if (!pte_present(entry)) {
1493                 /*
1494                  * If it truly wasn't present, we know that kswapd
1495                  * and the PTE updates will not touch it later. So
1496                  * drop the lock.
1497                  */
1498                 if (pte_none(entry))
1499                         return do_no_page(mm, vma, address, write_access, pte, pmd);
1500                 if (pte_file(entry))
1501                         return do_file_page(mm, vma, address, write_access, pte, pmd);
1502                 return do_swap_page(mm, vma, address, pte, pmd, entry, write_access);
1503         }
1504
1505         if (write_access) {
1506                 if (!pte_write(entry))
1507                         return do_wp_page(mm, vma, address, pte, pmd, entry);
1508
1509                 entry = pte_mkdirty(entry);
1510         }
1511         entry = pte_mkyoung(entry);
1512         establish_pte(vma, address, pte, entry);
1513         pte_unmap(pte);
1514         spin_unlock(&mm->page_table_lock);
1515         return VM_FAULT_MINOR;
1516 }
1517
1518 /*
1519  * By the time we get here, we already hold the mm semaphore
1520  */
1521 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
1522         unsigned long address, int write_access)
1523 {
1524         pgd_t *pgd;
1525         pmd_t *pmd;
1526
1527         __set_current_state(TASK_RUNNING);
1528         pgd = pgd_offset(mm, address);
1529
1530         inc_page_state(pgfault);
1531
1532         if (is_vm_hugetlb_page(vma))
1533                 return VM_FAULT_SIGBUS; /* mapping truncation does this. */
1534
1535         /*
1536          * We need the page table lock to synchronize with kswapd
1537          * and the SMP-safe atomic PTE updates.
1538          */
1539         spin_lock(&mm->page_table_lock);
1540         pmd = pmd_alloc(mm, pgd, address);
1541
1542         if (pmd) {
1543                 pte_t * pte = pte_alloc_map(mm, pmd, address);
1544                 if (pte)
1545                         return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
1546         }
1547         spin_unlock(&mm->page_table_lock);
1548         return VM_FAULT_OOM;
1549 }
1550
1551 /*
1552  * Allocate page middle directory.
1553  *
1554  * We've already handled the fast-path in-line, and we own the
1555  * page table lock.
1556  *
1557  * On a two-level page table, this ends up actually being entirely
1558  * optimized away.
1559  */
1560 pmd_t *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1561 {
1562         pmd_t *new;
1563
1564         spin_unlock(&mm->page_table_lock);
1565         new = pmd_alloc_one(mm, address);
1566         spin_lock(&mm->page_table_lock);
1567         if (!new)
1568                 return NULL;
1569
1570         /*
1571          * Because we dropped the lock, we should re-check the
1572          * entry, as somebody else could have populated it..
1573          */
1574         if (pgd_present(*pgd)) {
1575                 pmd_free(new);
1576                 goto out;
1577         }
1578         pgd_populate(mm, pgd, new);
1579 out:
1580         return pmd_offset(pgd, address);
1581 }
1582
1583 int make_pages_present(unsigned long addr, unsigned long end)
1584 {
1585         int ret, len, write;
1586         struct vm_area_struct * vma;
1587
1588         vma = find_vma(current->mm, addr);
1589         write = (vma->vm_flags & VM_WRITE) != 0;
1590         if (addr >= end)
1591                 BUG();
1592         if (end > vma->vm_end)
1593                 BUG();
1594         len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
1595         ret = get_user_pages(current, current->mm, addr,
1596                         len, write, 0, NULL, NULL);
1597         return ret == len ? 0 : -1;
1598 }
1599
1600 /* 
1601  * Map a vmalloc()-space virtual address to the physical page.
1602  */
1603 struct page * vmalloc_to_page(void * vmalloc_addr)
1604 {
1605         unsigned long addr = (unsigned long) vmalloc_addr;
1606         struct page *page = NULL;
1607         pgd_t *pgd = pgd_offset_k(addr);
1608         pmd_t *pmd;
1609         pte_t *ptep, pte;
1610   
1611         if (!pgd_none(*pgd)) {
1612                 pmd = pmd_offset(pgd, addr);
1613                 if (!pmd_none(*pmd)) {
1614                         preempt_disable();
1615                         ptep = pte_offset_map(pmd, addr);
1616                         pte = *ptep;
1617                         if (pte_present(pte))
1618                                 page = pte_page(pte);
1619                         pte_unmap(ptep);
1620                         preempt_enable();
1621                 }
1622         }
1623         return page;
1624 }