+- add patches.fixes/linux-post-2.6.3-20040220
[linux-flexiantxendom0-3.2.10.git] / mm / memory.c
1 /*
2  *  linux/mm/memory.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6
7 /*
8  * demand-loading started 01.12.91 - seems it is high on the list of
9  * things wanted, and it should be easy to implement. - Linus
10  */
11
12 /*
13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14  * pages started 02.12.91, seems to work. - Linus.
15  *
16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17  * would have taken more than the 6M I have free, but it worked well as
18  * far as I could see.
19  *
20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21  */
22
23 /*
24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
25  * thought has to go into this. Oh, well..
26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
27  *              Found it. Everything seems to work now.
28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
29  */
30
31 /*
32  * 05.04.94  -  Multi-page memory management added for v1.1.
33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
34  *
35  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
36  *              (Gerhard.Wichert@pdb.siemens.de)
37  */
38
39 #include <linux/kernel_stat.h>
40 #include <linux/mm.h>
41 #include <linux/hugetlb.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46 #include <linux/rmap-locking.h>
47 #include <linux/module.h>
48 #include <linux/init.h>
49
50 #include <asm/pgalloc.h>
51 #include <asm/rmap.h>
52 #include <asm/uaccess.h>
53 #include <asm/tlb.h>
54 #include <asm/tlbflush.h>
55 #include <asm/pgtable.h>
56
57 #include <linux/swapops.h>
58 #include <linux/elf.h>
59
60 #ifndef CONFIG_DISCONTIGMEM
61 /* use the per-pgdat data instead for discontigmem - mbligh */
62 unsigned long max_mapnr;
63 struct page *mem_map;
64
65 EXPORT_SYMBOL(max_mapnr);
66 EXPORT_SYMBOL(mem_map);
67 #endif
68
69 unsigned long num_physpages;
70 void * high_memory;
71 struct page *highmem_start_page;
72
73 EXPORT_SYMBOL(num_physpages);
74 EXPORT_SYMBOL(highmem_start_page);
75 EXPORT_SYMBOL(high_memory);
76
77 /*
78  * We special-case the C-O-W ZERO_PAGE, because it's such
79  * a common occurrence (no need to read the page to know
80  * that it's zero - better for the cache and memory subsystem).
81  */
82 static inline void copy_cow_page(struct page * from, struct page * to, unsigned long address)
83 {
84         if (from == ZERO_PAGE(address)) {
85                 clear_user_highpage(to, address);
86                 return;
87         }
88         copy_user_highpage(to, from, address);
89 }
90
91 /*
92  * Note: this doesn't free the actual pages themselves. That
93  * has been handled earlier when unmapping all the memory regions.
94  */
95 static inline void free_one_pmd(struct mmu_gather *tlb, pmd_t * dir)
96 {
97         struct page *page;
98
99         if (pmd_none(*dir))
100                 return;
101         if (pmd_bad(*dir)) {
102                 pmd_ERROR(*dir);
103                 pmd_clear(dir);
104                 return;
105         }
106         page = pmd_page(*dir);
107         pmd_clear(dir);
108         pgtable_remove_rmap(page);
109         pte_free_tlb(tlb, page);
110 }
111
112 static inline void free_one_pgd(struct mmu_gather *tlb, pgd_t * dir)
113 {
114         int j;
115         pmd_t * pmd;
116
117         if (pgd_none(*dir))
118                 return;
119         if (pgd_bad(*dir)) {
120                 pgd_ERROR(*dir);
121                 pgd_clear(dir);
122                 return;
123         }
124         pmd = pmd_offset(dir, 0);
125         pgd_clear(dir);
126         for (j = 0; j < PTRS_PER_PMD ; j++)
127                 free_one_pmd(tlb, pmd+j);
128         pmd_free_tlb(tlb, pmd);
129 }
130
131 /*
132  * This function clears all user-level page tables of a process - this
133  * is needed by execve(), so that old pages aren't in the way.
134  *
135  * Must be called with pagetable lock held.
136  */
137 void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr)
138 {
139         pgd_t * page_dir = tlb->mm->pgd;
140
141         page_dir += first;
142         do {
143                 free_one_pgd(tlb, page_dir);
144                 page_dir++;
145         } while (--nr);
146 }
147
148 pte_t * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
149 {
150         if (!pmd_present(*pmd)) {
151                 struct page *new;
152
153                 spin_unlock(&mm->page_table_lock);
154                 new = pte_alloc_one(mm, address);
155                 spin_lock(&mm->page_table_lock);
156                 if (!new)
157                         return NULL;
158
159                 /*
160                  * Because we dropped the lock, we should re-check the
161                  * entry, as somebody else could have populated it..
162                  */
163                 if (pmd_present(*pmd)) {
164                         pte_free(new);
165                         goto out;
166                 }
167                 pgtable_add_rmap(new, mm, address);
168                 pmd_populate(mm, pmd, new);
169         }
170 out:
171         return pte_offset_map(pmd, address);
172 }
173
174 pte_t * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
175 {
176         if (!pmd_present(*pmd)) {
177                 pte_t *new;
178
179                 spin_unlock(&mm->page_table_lock);
180                 new = pte_alloc_one_kernel(mm, address);
181                 spin_lock(&mm->page_table_lock);
182                 if (!new)
183                         return NULL;
184
185                 /*
186                  * Because we dropped the lock, we should re-check the
187                  * entry, as somebody else could have populated it..
188                  */
189                 if (pmd_present(*pmd)) {
190                         pte_free_kernel(new);
191                         goto out;
192                 }
193                 pgtable_add_rmap(virt_to_page(new), mm, address);
194                 pmd_populate_kernel(mm, pmd, new);
195         }
196 out:
197         return pte_offset_kernel(pmd, address);
198 }
199 #define PTE_TABLE_MASK  ((PTRS_PER_PTE-1) * sizeof(pte_t))
200 #define PMD_TABLE_MASK  ((PTRS_PER_PMD-1) * sizeof(pmd_t))
201
202 /*
203  * copy one vm_area from one task to the other. Assumes the page tables
204  * already present in the new task to be cleared in the whole range
205  * covered by this vma.
206  *
207  * 08Jan98 Merged into one routine from several inline routines to reduce
208  *         variable count and make things faster. -jj
209  *
210  * dst->page_table_lock is held on entry and exit,
211  * but may be dropped within pmd_alloc() and pte_alloc_map().
212  */
213 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
214                         struct vm_area_struct *vma)
215 {
216         pgd_t * src_pgd, * dst_pgd;
217         unsigned long address = vma->vm_start;
218         unsigned long end = vma->vm_end;
219         unsigned long cow;
220         struct pte_chain *pte_chain = NULL;
221
222         if (is_vm_hugetlb_page(vma))
223                 return copy_hugetlb_page_range(dst, src, vma);
224
225         pte_chain = pte_chain_alloc(GFP_ATOMIC);
226         if (!pte_chain) {
227                 spin_unlock(&dst->page_table_lock);
228                 pte_chain = pte_chain_alloc(GFP_KERNEL);
229                 spin_lock(&dst->page_table_lock);
230                 if (!pte_chain)
231                         goto nomem;
232         }
233         
234         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
235         src_pgd = pgd_offset(src, address)-1;
236         dst_pgd = pgd_offset(dst, address)-1;
237
238         for (;;) {
239                 pmd_t * src_pmd, * dst_pmd;
240
241                 src_pgd++; dst_pgd++;
242                 
243                 /* copy_pmd_range */
244                 
245                 if (pgd_none(*src_pgd))
246                         goto skip_copy_pmd_range;
247                 if (pgd_bad(*src_pgd)) {
248                         pgd_ERROR(*src_pgd);
249                         pgd_clear(src_pgd);
250 skip_copy_pmd_range:    address = (address + PGDIR_SIZE) & PGDIR_MASK;
251                         if (!address || (address >= end))
252                                 goto out;
253                         continue;
254                 }
255
256                 src_pmd = pmd_offset(src_pgd, address);
257                 dst_pmd = pmd_alloc(dst, dst_pgd, address);
258                 if (!dst_pmd)
259                         goto nomem;
260
261                 do {
262                         pte_t * src_pte, * dst_pte;
263                 
264                         /* copy_pte_range */
265                 
266                         if (pmd_none(*src_pmd))
267                                 goto skip_copy_pte_range;
268                         if (pmd_bad(*src_pmd)) {
269                                 pmd_ERROR(*src_pmd);
270                                 pmd_clear(src_pmd);
271 skip_copy_pte_range:
272                                 address = (address + PMD_SIZE) & PMD_MASK;
273                                 if (address >= end)
274                                         goto out;
275                                 goto cont_copy_pmd_range;
276                         }
277
278                         dst_pte = pte_alloc_map(dst, dst_pmd, address);
279                         if (!dst_pte)
280                                 goto nomem;
281                         spin_lock(&src->page_table_lock);       
282                         src_pte = pte_offset_map_nested(src_pmd, address);
283                         do {
284                                 pte_t pte = *src_pte;
285                                 struct page *page;
286                                 unsigned long pfn;
287
288                                 /* copy_one_pte */
289
290                                 if (pte_none(pte))
291                                         goto cont_copy_pte_range_noset;
292                                 /* pte contains position in swap, so copy. */
293                                 if (!pte_present(pte)) {
294                                         if (!pte_file(pte))
295                                                 swap_duplicate(pte_to_swp_entry(pte));
296                                         set_pte(dst_pte, pte);
297                                         goto cont_copy_pte_range_noset;
298                                 }
299                                 pfn = pte_pfn(pte);
300                                 /* the pte points outside of valid memory, the
301                                  * mapping is assumed to be good, meaningful
302                                  * and not mapped via rmap - duplicate the
303                                  * mapping as is.
304                                  */
305                                 page = NULL;
306                                 if (pfn_valid(pfn)) 
307                                         page = pfn_to_page(pfn); 
308
309                                 if (!page || PageReserved(page)) {
310                                         set_pte(dst_pte, pte);
311                                         goto cont_copy_pte_range_noset;
312                                 }
313
314                                 /*
315                                  * If it's a COW mapping, write protect it both
316                                  * in the parent and the child
317                                  */
318                                 if (cow) {
319                                         ptep_set_wrprotect(src_pte);
320                                         pte = *src_pte;
321                                 }
322
323                                 /*
324                                  * If it's a shared mapping, mark it clean in
325                                  * the child
326                                  */
327                                 if (vma->vm_flags & VM_SHARED)
328                                         pte = pte_mkclean(pte);
329                                 pte = pte_mkold(pte);
330                                 get_page(page);
331                                 dst->rss++;
332
333                                 set_pte(dst_pte, pte);
334                                 pte_chain = page_add_rmap(page, dst_pte,
335                                                         pte_chain);
336                                 if (pte_chain)
337                                         goto cont_copy_pte_range_noset;
338                                 pte_chain = pte_chain_alloc(GFP_ATOMIC);
339                                 if (pte_chain)
340                                         goto cont_copy_pte_range_noset;
341
342                                 /*
343                                  * pte_chain allocation failed, and we need to
344                                  * run page reclaim.
345                                  */
346                                 pte_unmap_nested(src_pte);
347                                 pte_unmap(dst_pte);
348                                 spin_unlock(&src->page_table_lock);     
349                                 spin_unlock(&dst->page_table_lock);     
350                                 pte_chain = pte_chain_alloc(GFP_KERNEL);
351                                 spin_lock(&dst->page_table_lock);       
352                                 if (!pte_chain)
353                                         goto nomem;
354                                 spin_lock(&src->page_table_lock);
355                                 dst_pte = pte_offset_map(dst_pmd, address);
356                                 src_pte = pte_offset_map_nested(src_pmd,
357                                                                 address);
358 cont_copy_pte_range_noset:
359                                 address += PAGE_SIZE;
360                                 if (address >= end) {
361                                         pte_unmap_nested(src_pte);
362                                         pte_unmap(dst_pte);
363                                         goto out_unlock;
364                                 }
365                                 src_pte++;
366                                 dst_pte++;
367                         } while ((unsigned long)src_pte & PTE_TABLE_MASK);
368                         pte_unmap_nested(src_pte-1);
369                         pte_unmap(dst_pte-1);
370                         spin_unlock(&src->page_table_lock);
371                 
372 cont_copy_pmd_range:
373                         src_pmd++;
374                         dst_pmd++;
375                 } while ((unsigned long)src_pmd & PMD_TABLE_MASK);
376         }
377 out_unlock:
378         spin_unlock(&src->page_table_lock);
379 out:
380         pte_chain_free(pte_chain);
381         return 0;
382 nomem:
383         pte_chain_free(pte_chain);
384         return -ENOMEM;
385 }
386
387 static void
388 zap_pte_range(struct mmu_gather *tlb, pmd_t * pmd,
389                 unsigned long address, unsigned long size)
390 {
391         unsigned long offset;
392         pte_t *ptep;
393
394         if (pmd_none(*pmd))
395                 return;
396         if (pmd_bad(*pmd)) {
397                 pmd_ERROR(*pmd);
398                 pmd_clear(pmd);
399                 return;
400         }
401         ptep = pte_offset_map(pmd, address);
402         offset = address & ~PMD_MASK;
403         if (offset + size > PMD_SIZE)
404                 size = PMD_SIZE - offset;
405         size &= PAGE_MASK;
406         for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
407                 pte_t pte = *ptep;
408                 if (pte_none(pte))
409                         continue;
410                 if (pte_present(pte)) {
411                         unsigned long pfn = pte_pfn(pte);
412
413                         pte = ptep_get_and_clear(ptep);
414                         tlb_remove_tlb_entry(tlb, ptep, address+offset);
415                         if (pfn_valid(pfn)) {
416                                 struct page *page = pfn_to_page(pfn);
417                                 if (!PageReserved(page)) {
418                                         if (pte_dirty(pte))
419                                                 set_page_dirty(page);
420                                         if (page->mapping && pte_young(pte) &&
421                                                         !PageSwapCache(page))
422                                                 mark_page_accessed(page);
423                                         tlb->freed++;
424                                         page_remove_rmap(page, ptep);
425                                         tlb_remove_page(tlb, page);
426                                 }
427                         }
428                 } else {
429                         if (!pte_file(pte))
430                                 free_swap_and_cache(pte_to_swp_entry(pte));
431                         pte_clear(ptep);
432                 }
433         }
434         pte_unmap(ptep-1);
435 }
436
437 static void
438 zap_pmd_range(struct mmu_gather *tlb, pgd_t * dir,
439                 unsigned long address, unsigned long size)
440 {
441         pmd_t * pmd;
442         unsigned long end;
443
444         if (pgd_none(*dir))
445                 return;
446         if (pgd_bad(*dir)) {
447                 pgd_ERROR(*dir);
448                 pgd_clear(dir);
449                 return;
450         }
451         pmd = pmd_offset(dir, address);
452         end = address + size;
453         if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
454                 end = ((address + PGDIR_SIZE) & PGDIR_MASK);
455         do {
456                 zap_pte_range(tlb, pmd, address, end - address);
457                 address = (address + PMD_SIZE) & PMD_MASK; 
458                 pmd++;
459         } while (address < end);
460 }
461
462 void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
463                         unsigned long address, unsigned long end)
464 {
465         pgd_t * dir;
466
467         if (is_vm_hugetlb_page(vma)) {
468                 unmap_hugepage_range(vma, address, end);
469                 return;
470         }
471
472         BUG_ON(address >= end);
473
474         dir = pgd_offset(vma->vm_mm, address);
475         tlb_start_vma(tlb, vma);
476         do {
477                 zap_pmd_range(tlb, dir, address, end - address);
478                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
479                 dir++;
480         } while (address && (address < end));
481         tlb_end_vma(tlb, vma);
482 }
483
484 /* Dispose of an entire struct mmu_gather per rescheduling point */
485 #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
486 #define ZAP_BLOCK_SIZE  (FREE_PTE_NR * PAGE_SIZE)
487 #endif
488
489 /* For UP, 256 pages at a time gives nice low latency */
490 #if !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
491 #define ZAP_BLOCK_SIZE  (256 * PAGE_SIZE)
492 #endif
493
494 /* No preempt: go for the best straight-line efficiency */
495 #if !defined(CONFIG_PREEMPT)
496 #define ZAP_BLOCK_SIZE  (~(0UL))
497 #endif
498
499 /**
500  * unmap_vmas - unmap a range of memory covered by a list of vma's
501  * @tlbp: address of the caller's struct mmu_gather
502  * @mm: the controlling mm_struct
503  * @vma: the starting vma
504  * @start_addr: virtual address at which to start unmapping
505  * @end_addr: virtual address at which to end unmapping
506  * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
507  *
508  * Returns the number of vma's which were covered by the unmapping.
509  *
510  * Unmap all pages in the vma list.  Called under page_table_lock.
511  *
512  * We aim to not hold page_table_lock for too long (for scheduling latency
513  * reasons).  So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
514  * return the ending mmu_gather to the caller.
515  *
516  * Only addresses between `start' and `end' will be unmapped.
517  *
518  * The VMA list must be sorted in ascending virtual address order.
519  *
520  * unmap_vmas() assumes that the caller will flush the whole unmapped address
521  * range after unmap_vmas() returns.  So the only responsibility here is to
522  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
523  * drops the lock and schedules.
524  */
525 int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
526                 struct vm_area_struct *vma, unsigned long start_addr,
527                 unsigned long end_addr, unsigned long *nr_accounted)
528 {
529         unsigned long zap_bytes = ZAP_BLOCK_SIZE;
530         unsigned long tlb_start = 0;    /* For tlb_finish_mmu */
531         int tlb_start_valid = 0;
532         int ret = 0;
533
534         if (vma) {      /* debug.  killme. */
535                 if (end_addr <= vma->vm_start)
536                         printk("%s: end_addr(0x%08lx) <= vm_start(0x%08lx)\n",
537                                 __FUNCTION__, end_addr, vma->vm_start);
538                 if (start_addr >= vma->vm_end)
539                         printk("%s: start_addr(0x%08lx) <= vm_end(0x%08lx)\n",
540                                 __FUNCTION__, start_addr, vma->vm_end);
541         }
542
543         for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
544                 unsigned long start;
545                 unsigned long end;
546
547                 start = max(vma->vm_start, start_addr);
548                 if (start >= vma->vm_end)
549                         continue;
550                 end = min(vma->vm_end, end_addr);
551                 if (end <= vma->vm_start)
552                         continue;
553
554                 if (vma->vm_flags & VM_ACCOUNT)
555                         *nr_accounted += (end - start) >> PAGE_SHIFT;
556
557                 ret++;
558                 while (start != end) {
559                         unsigned long block;
560
561                         if (is_vm_hugetlb_page(vma))
562                                 block = end - start;
563                         else
564                                 block = min(zap_bytes, end - start);
565
566                         if (!tlb_start_valid) {
567                                 tlb_start = start;
568                                 tlb_start_valid = 1;
569                         }
570
571                         unmap_page_range(*tlbp, vma, start, start + block);
572                         start += block;
573                         zap_bytes -= block;
574                         if ((long)zap_bytes > 0)
575                                 continue;
576                         if (need_resched()) {
577                                 int fullmm = tlb_is_full_mm(*tlbp);
578                                 tlb_finish_mmu(*tlbp, tlb_start, start);
579                                 cond_resched_lock(&mm->page_table_lock);
580                                 *tlbp = tlb_gather_mmu(mm, fullmm);
581                                 tlb_start_valid = 0;
582                         }
583                         zap_bytes = ZAP_BLOCK_SIZE;
584                 }
585                 if (vma->vm_next && vma->vm_next->vm_start < vma->vm_end)
586                         printk("%s: VMA list is not sorted correctly!\n",
587                                 __FUNCTION__);          
588         }
589         return ret;
590 }
591
592 /**
593  * zap_page_range - remove user pages in a given range
594  * @vma: vm_area_struct holding the applicable pages
595  * @address: starting address of pages to zap
596  * @size: number of bytes to zap
597  */
598 void zap_page_range(struct vm_area_struct *vma,
599                         unsigned long address, unsigned long size)
600 {
601         struct mm_struct *mm = vma->vm_mm;
602         struct mmu_gather *tlb;
603         unsigned long end = address + size;
604         unsigned long nr_accounted = 0;
605
606         might_sleep();
607
608         if (is_vm_hugetlb_page(vma)) {
609                 zap_hugepage_range(vma, address, size);
610                 return;
611         }
612
613         lru_add_drain();
614         spin_lock(&mm->page_table_lock);
615         tlb = tlb_gather_mmu(mm, 0);
616         unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted);
617         tlb_finish_mmu(tlb, address, end);
618         spin_unlock(&mm->page_table_lock);
619 }
620
621 /*
622  * Do a quick page-table lookup for a single page.
623  * mm->page_table_lock must be held.
624  */
625 struct page *
626 follow_page(struct mm_struct *mm, unsigned long address, int write) 
627 {
628         pgd_t *pgd;
629         pmd_t *pmd;
630         pte_t *ptep, pte;
631         unsigned long pfn;
632         struct vm_area_struct *vma;
633
634         vma = hugepage_vma(mm, address);
635         if (vma)
636                 return follow_huge_addr(mm, vma, address, write);
637
638         pgd = pgd_offset(mm, address);
639         if (pgd_none(*pgd) || pgd_bad(*pgd))
640                 goto out;
641
642         pmd = pmd_offset(pgd, address);
643         if (pmd_none(*pmd))
644                 goto out;
645         if (pmd_huge(*pmd))
646                 return follow_huge_pmd(mm, address, pmd, write);
647         if (pmd_bad(*pmd))
648                 goto out;
649
650         ptep = pte_offset_map(pmd, address);
651         if (!ptep)
652                 goto out;
653
654         pte = *ptep;
655         pte_unmap(ptep);
656         if (pte_present(pte)) {
657                 if (write && !pte_write(pte))
658                         goto out;
659                 if (write && !pte_dirty(pte)) {
660                         struct page *page = pte_page(pte);
661                         if (!PageDirty(page))
662                                 set_page_dirty(page);
663                 }
664                 pfn = pte_pfn(pte);
665                 if (pfn_valid(pfn)) {
666                         struct page *page = pfn_to_page(pfn);
667                         
668                         mark_page_accessed(page);
669                         return page;
670                 }
671         }
672
673 out:
674         return NULL;
675 }
676
677 /* 
678  * Given a physical address, is there a useful struct page pointing to
679  * it?  This may become more complex in the future if we start dealing
680  * with IO-aperture pages for direct-IO.
681  */
682
683 static inline struct page *get_page_map(struct page *page)
684 {
685         if (!pfn_valid(page_to_pfn(page)))
686                 return 0;
687         return page;
688 }
689
690
691 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
692                 unsigned long start, int len, int write, int force,
693                 struct page **pages, struct vm_area_struct **vmas)
694 {
695         int i;
696         unsigned int flags;
697
698         /* 
699          * Require read or write permissions.
700          * If 'force' is set, we only require the "MAY" flags.
701          */
702         flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
703         flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
704         i = 0;
705
706         do {
707                 struct vm_area_struct * vma;
708
709                 vma = find_extend_vma(mm, start);
710                 if (!vma && in_gate_area(tsk, start)) {
711                         unsigned long pg = start & PAGE_MASK;
712                         struct vm_area_struct *gate_vma = get_gate_vma(tsk);
713                         pgd_t *pgd;
714                         pmd_t *pmd;
715                         pte_t *pte;
716                         if (write) /* user gate pages are read-only */
717                                 return i ? : -EFAULT;
718                         pgd = pgd_offset_k(pg);
719                         if (!pgd)
720                                 return i ? : -EFAULT;
721                         pmd = pmd_offset(pgd, pg);
722                         if (!pmd)
723                                 return i ? : -EFAULT;
724                         pte = pte_offset_kernel(pmd, pg);
725                         if (!pte || !pte_present(*pte))
726                                 return i ? : -EFAULT;
727                         if (pages) {
728                                 pages[i] = pte_page(*pte);
729                                 get_page(pages[i]);
730                         }
731                         if (vmas)
732                                 vmas[i] = gate_vma;
733                         i++;
734                         start += PAGE_SIZE;
735                         len--;
736                         continue;
737                 }
738
739                 if (!vma || (pages && (vma->vm_flags & VM_IO))
740                                 || !(flags & vma->vm_flags))
741                         return i ? : -EFAULT;
742
743                 if (is_vm_hugetlb_page(vma)) {
744                         i = follow_hugetlb_page(mm, vma, pages, vmas,
745                                                 &start, &len, i);
746                         continue;
747                 }
748                 spin_lock(&mm->page_table_lock);
749                 do {
750                         struct page *map;
751                         int lookup_write = write;
752                         while (!(map = follow_page(mm, start, lookup_write))) {
753                                 spin_unlock(&mm->page_table_lock);
754                                 switch (handle_mm_fault(mm,vma,start,write)) {
755                                 case VM_FAULT_MINOR:
756                                         tsk->min_flt++;
757                                         break;
758                                 case VM_FAULT_MAJOR:
759                                         tsk->maj_flt++;
760                                         break;
761                                 case VM_FAULT_SIGBUS:
762                                         return i ? i : -EFAULT;
763                                 case VM_FAULT_OOM:
764                                         return i ? i : -ENOMEM;
765                                 default:
766                                         BUG();
767                                 }
768                                 /*
769                                  * Now that we have performed a write fault
770                                  * and surely no longer have a shared page we
771                                  * shouldn't write, we shouldn't ignore an
772                                  * unwritable page in the page table if
773                                  * we are forcing write access.
774                                  */
775                                 lookup_write = write && !force;
776                                 spin_lock(&mm->page_table_lock);
777                         }
778                         if (pages) {
779                                 pages[i] = get_page_map(map);
780                                 if (!pages[i]) {
781                                         spin_unlock(&mm->page_table_lock);
782                                         while (i--)
783                                                 page_cache_release(pages[i]);
784                                         i = -EFAULT;
785                                         goto out;
786                                 }
787                                 flush_dcache_page(pages[i]);
788                                 if (!PageReserved(pages[i]))
789                                         page_cache_get(pages[i]);
790                         }
791                         if (vmas)
792                                 vmas[i] = vma;
793                         i++;
794                         start += PAGE_SIZE;
795                         len--;
796                 } while(len && start < vma->vm_end);
797                 spin_unlock(&mm->page_table_lock);
798         } while(len);
799 out:
800         return i;
801 }
802
803 EXPORT_SYMBOL(get_user_pages);
804
805 static void zeromap_pte_range(pte_t * pte, unsigned long address,
806                                      unsigned long size, pgprot_t prot)
807 {
808         unsigned long end;
809
810         address &= ~PMD_MASK;
811         end = address + size;
812         if (end > PMD_SIZE)
813                 end = PMD_SIZE;
814         do {
815                 pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot));
816                 BUG_ON(!pte_none(*pte));
817                 set_pte(pte, zero_pte);
818                 address += PAGE_SIZE;
819                 pte++;
820         } while (address && (address < end));
821 }
822
823 static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,
824                                     unsigned long size, pgprot_t prot)
825 {
826         unsigned long base, end;
827
828         base = address & PGDIR_MASK;
829         address &= ~PGDIR_MASK;
830         end = address + size;
831         if (end > PGDIR_SIZE)
832                 end = PGDIR_SIZE;
833         do {
834                 pte_t * pte = pte_alloc_map(mm, pmd, base + address);
835                 if (!pte)
836                         return -ENOMEM;
837                 zeromap_pte_range(pte, base + address, end - address, prot);
838                 pte_unmap(pte);
839                 address = (address + PMD_SIZE) & PMD_MASK;
840                 pmd++;
841         } while (address && (address < end));
842         return 0;
843 }
844
845 int zeromap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, pgprot_t prot)
846 {
847         int error = 0;
848         pgd_t * dir;
849         unsigned long beg = address;
850         unsigned long end = address + size;
851         struct mm_struct *mm = vma->vm_mm;
852
853         dir = pgd_offset(mm, address);
854         flush_cache_range(vma, beg, end);
855         if (address >= end)
856                 BUG();
857
858         spin_lock(&mm->page_table_lock);
859         do {
860                 pmd_t *pmd = pmd_alloc(mm, dir, address);
861                 error = -ENOMEM;
862                 if (!pmd)
863                         break;
864                 error = zeromap_pmd_range(mm, pmd, address, end - address, prot);
865                 if (error)
866                         break;
867                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
868                 dir++;
869         } while (address && (address < end));
870         /*
871          * Why flush? zeromap_pte_range has a BUG_ON for !pte_none()
872          */
873         flush_tlb_range(vma, beg, end);
874         spin_unlock(&mm->page_table_lock);
875         return error;
876 }
877
878 /*
879  * maps a range of physical memory into the requested pages. the old
880  * mappings are removed. any references to nonexistent pages results
881  * in null mappings (currently treated as "copy-on-access")
882  */
883 static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
884         unsigned long phys_addr, pgprot_t prot)
885 {
886         unsigned long end;
887         unsigned long pfn;
888
889         address &= ~PMD_MASK;
890         end = address + size;
891         if (end > PMD_SIZE)
892                 end = PMD_SIZE;
893         pfn = phys_addr >> PAGE_SHIFT;
894         do {
895                 BUG_ON(!pte_none(*pte));
896                 if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
897                         set_pte(pte, pfn_pte(pfn, prot));
898                 address += PAGE_SIZE;
899                 pfn++;
900                 pte++;
901         } while (address && (address < end));
902 }
903
904 static inline int remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
905         unsigned long phys_addr, pgprot_t prot)
906 {
907         unsigned long base, end;
908
909         base = address & PGDIR_MASK;
910         address &= ~PGDIR_MASK;
911         end = address + size;
912         if (end > PGDIR_SIZE)
913                 end = PGDIR_SIZE;
914         phys_addr -= address;
915         do {
916                 pte_t * pte = pte_alloc_map(mm, pmd, base + address);
917                 if (!pte)
918                         return -ENOMEM;
919                 remap_pte_range(pte, base + address, end - address, address + phys_addr, prot);
920                 pte_unmap(pte);
921                 address = (address + PMD_SIZE) & PMD_MASK;
922                 pmd++;
923         } while (address && (address < end));
924         return 0;
925 }
926
927 /*  Note: this is only safe if the mm semaphore is held when called. */
928 int remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot)
929 {
930         int error = 0;
931         pgd_t * dir;
932         unsigned long beg = from;
933         unsigned long end = from + size;
934         struct mm_struct *mm = vma->vm_mm;
935
936         phys_addr -= from;
937         dir = pgd_offset(mm, from);
938         flush_cache_range(vma, beg, end);
939         if (from >= end)
940                 BUG();
941
942         spin_lock(&mm->page_table_lock);
943         do {
944                 pmd_t *pmd = pmd_alloc(mm, dir, from);
945                 error = -ENOMEM;
946                 if (!pmd)
947                         break;
948                 error = remap_pmd_range(mm, pmd, from, end - from, phys_addr + from, prot);
949                 if (error)
950                         break;
951                 from = (from + PGDIR_SIZE) & PGDIR_MASK;
952                 dir++;
953         } while (from && (from < end));
954         /*
955          * Why flush? remap_pte_range has a BUG_ON for !pte_none()
956          */
957         flush_tlb_range(vma, beg, end);
958         spin_unlock(&mm->page_table_lock);
959         return error;
960 }
961
962 EXPORT_SYMBOL(remap_page_range);
963
964 /*
965  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
966  * servicing faults for write access.  In the normal case, do always want
967  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
968  * that do not have writing enabled, when used by access_process_vm.
969  */
970 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
971 {
972         if (likely(vma->vm_flags & VM_WRITE))
973                 pte = pte_mkwrite(pte);
974         return pte;
975 }
976
977 /*
978  * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
979  */
980 static inline void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address, 
981                 pte_t *page_table)
982 {
983         pte_t entry;
984
985         flush_cache_page(vma, address);
986         entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)),
987                               vma);
988         ptep_establish(vma, address, page_table, entry);
989         update_mmu_cache(vma, address, entry);
990 }
991
992 /*
993  * This routine handles present pages, when users try to write
994  * to a shared page. It is done by copying the page to a new address
995  * and decrementing the shared-page counter for the old page.
996  *
997  * Goto-purists beware: the only reason for goto's here is that it results
998  * in better assembly code.. The "default" path will see no jumps at all.
999  *
1000  * Note that this routine assumes that the protection checks have been
1001  * done by the caller (the low-level page fault routine in most cases).
1002  * Thus we can safely just mark it writable once we've done any necessary
1003  * COW.
1004  *
1005  * We also mark the page dirty at this point even though the page will
1006  * change only once the write actually happens. This avoids a few races,
1007  * and potentially makes it more efficient.
1008  *
1009  * We hold the mm semaphore and the page_table_lock on entry and exit
1010  * with the page_table_lock released.
1011  */
1012 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
1013         unsigned long address, pte_t *page_table, pmd_t *pmd, pte_t pte)
1014 {
1015         struct page *old_page, *new_page;
1016         unsigned long pfn = pte_pfn(pte);
1017         struct pte_chain *pte_chain;
1018         pte_t entry;
1019
1020         if (unlikely(!pfn_valid(pfn))) {
1021                 /*
1022                  * This should really halt the system so it can be debugged or
1023                  * at least the kernel stops what it's doing before it corrupts
1024                  * data, but for the moment just pretend this is OOM.
1025                  */
1026                 pte_unmap(page_table);
1027                 printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
1028                                 address);
1029                 spin_unlock(&mm->page_table_lock);
1030                 return VM_FAULT_OOM;
1031         }
1032         old_page = pfn_to_page(pfn);
1033
1034         if (!TestSetPageLocked(old_page)) {
1035                 int reuse = can_share_swap_page(old_page);
1036                 unlock_page(old_page);
1037                 if (reuse) {
1038                         flush_cache_page(vma, address);
1039                         entry = maybe_mkwrite(pte_mkyoung(pte_mkdirty(pte)),
1040                                               vma);
1041                         ptep_establish(vma, address, page_table, entry);
1042                         update_mmu_cache(vma, address, entry);
1043                         pte_unmap(page_table);
1044                         spin_unlock(&mm->page_table_lock);
1045                         return VM_FAULT_MINOR;
1046                 }
1047         }
1048         pte_unmap(page_table);
1049
1050         /*
1051          * Ok, we need to copy. Oh, well..
1052          */
1053         page_cache_get(old_page);
1054         spin_unlock(&mm->page_table_lock);
1055
1056         pte_chain = pte_chain_alloc(GFP_KERNEL);
1057         if (!pte_chain)
1058                 goto no_pte_chain;
1059         new_page = alloc_page(GFP_HIGHUSER);
1060         if (!new_page)
1061                 goto no_new_page;
1062         copy_cow_page(old_page,new_page,address);
1063
1064         /*
1065          * Re-check the pte - we dropped the lock
1066          */
1067         spin_lock(&mm->page_table_lock);
1068         page_table = pte_offset_map(pmd, address);
1069         if (pte_same(*page_table, pte)) {
1070                 if (PageReserved(old_page))
1071                         ++mm->rss;
1072                 page_remove_rmap(old_page, page_table);
1073                 break_cow(vma, new_page, address, page_table);
1074                 pte_chain = page_add_rmap(new_page, page_table, pte_chain);
1075                 lru_cache_add_active(new_page);
1076
1077                 /* Free the old page.. */
1078                 new_page = old_page;
1079         }
1080         pte_unmap(page_table);
1081         page_cache_release(new_page);
1082         page_cache_release(old_page);
1083         spin_unlock(&mm->page_table_lock);
1084         pte_chain_free(pte_chain);
1085         return VM_FAULT_MINOR;
1086
1087 no_new_page:
1088         pte_chain_free(pte_chain);
1089 no_pte_chain:
1090         page_cache_release(old_page);
1091         return VM_FAULT_OOM;
1092 }
1093
1094 /*
1095  * Helper function for invalidate_mmap_range().
1096  * Both hba and hlen are page numbers in PAGE_SIZE units.
1097  * An hlen of zero blows away the entire portion file after hba.
1098  */
1099 static void
1100 invalidate_mmap_range_list(struct list_head *head,
1101                            unsigned long const hba,
1102                            unsigned long const hlen)
1103 {
1104         struct list_head *curr;
1105         unsigned long hea;      /* last page of hole. */
1106         unsigned long vba;
1107         unsigned long vea;      /* last page of corresponding uva hole. */
1108         struct vm_area_struct *vp;
1109         unsigned long zba;
1110         unsigned long zea;
1111
1112         hea = hba + hlen - 1;   /* avoid overflow. */
1113         if (hea < hba)
1114                 hea = ULONG_MAX;
1115         list_for_each(curr, head) {
1116                 vp = list_entry(curr, struct vm_area_struct, shared);
1117                 vba = vp->vm_pgoff;
1118                 vea = vba + ((vp->vm_end - vp->vm_start) >> PAGE_SHIFT) - 1;
1119                 if (hea < vba || vea < hba)
1120                         continue;       /* Mapping disjoint from hole. */
1121                 zba = (hba <= vba) ? vba : hba;
1122                 zea = (vea <= hea) ? vea : hea;
1123                 zap_page_range(vp,
1124                                ((zba - vba) << PAGE_SHIFT) + vp->vm_start,
1125                                (zea - zba + 1) << PAGE_SHIFT);
1126         }
1127 }
1128
1129 /**
1130  * invalidate_mmap_range - invalidate the portion of all mmaps
1131  * in the specified address_space corresponding to the specified
1132  * page range in the underlying file.
1133  * @address_space: the address space containing mmaps to be invalidated.
1134  * @holebegin: byte in first page to invalidate, relative to the start of
1135  * the underlying file.  This will be rounded down to a PAGE_SIZE
1136  * boundary.  Note that this is different from vmtruncate(), which
1137  * must keep the partial page.  In contrast, we must get rid of
1138  * partial pages.
1139  * @holelen: size of prospective hole in bytes.  This will be rounded
1140  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
1141  * end of the file.
1142  */
1143 void invalidate_mmap_range(struct address_space *mapping,
1144                       loff_t const holebegin, loff_t const holelen)
1145 {
1146         unsigned long hba = holebegin >> PAGE_SHIFT;
1147         unsigned long hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1148
1149         /* Check for overflow. */
1150         if (sizeof(holelen) > sizeof(hlen)) {
1151                 long long holeend =
1152                         (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1153
1154                 if (holeend & ~(long long)ULONG_MAX)
1155                         hlen = ULONG_MAX - hba + 1;
1156         }
1157         down(&mapping->i_shared_sem);
1158         /* Protect against page fault */
1159         atomic_inc(&mapping->truncate_count);
1160         if (unlikely(!list_empty(&mapping->i_mmap)))
1161                 invalidate_mmap_range_list(&mapping->i_mmap, hba, hlen);
1162         if (unlikely(!list_empty(&mapping->i_mmap_shared)))
1163                 invalidate_mmap_range_list(&mapping->i_mmap_shared, hba, hlen);
1164         up(&mapping->i_shared_sem);
1165 }
1166 EXPORT_SYMBOL_GPL(invalidate_mmap_range);
1167
1168 /*
1169  * Handle all mappings that got truncated by a "truncate()"
1170  * system call.
1171  *
1172  * NOTE! We have to be ready to update the memory sharing
1173  * between the file and the memory map for a potential last
1174  * incomplete page.  Ugly, but necessary.
1175  */
1176 int vmtruncate(struct inode * inode, loff_t offset)
1177 {
1178         struct address_space *mapping = inode->i_mapping;
1179         unsigned long limit;
1180
1181         if (inode->i_size < offset)
1182                 goto do_expand;
1183         i_size_write(inode, offset);
1184         invalidate_mmap_range(mapping, offset + PAGE_SIZE - 1, 0);
1185         truncate_inode_pages(mapping, offset);
1186         goto out_truncate;
1187
1188 do_expand:
1189         limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
1190         if (limit != RLIM_INFINITY && offset > limit)
1191                 goto out_sig;
1192         if (offset > inode->i_sb->s_maxbytes)
1193                 goto out;
1194         i_size_write(inode, offset);
1195
1196 out_truncate:
1197         if (inode->i_op && inode->i_op->truncate)
1198                 inode->i_op->truncate(inode);
1199         return 0;
1200 out_sig:
1201         send_sig(SIGXFSZ, current, 0);
1202 out:
1203         return -EFBIG;
1204 }
1205
1206 EXPORT_SYMBOL(vmtruncate);
1207
1208 /* 
1209  * Primitive swap readahead code. We simply read an aligned block of
1210  * (1 << page_cluster) entries in the swap area. This method is chosen
1211  * because it doesn't cost us any seek time.  We also make sure to queue
1212  * the 'original' request together with the readahead ones...  
1213  */
1214 void swapin_readahead(swp_entry_t entry)
1215 {
1216         int i, num;
1217         struct page *new_page;
1218         unsigned long offset;
1219
1220         /*
1221          * Get the number of handles we should do readahead io to.
1222          */
1223         num = valid_swaphandles(entry, &offset);
1224         for (i = 0; i < num; offset++, i++) {
1225                 /* Ok, do the async read-ahead now */
1226                 new_page = read_swap_cache_async(swp_entry(swp_type(entry),
1227                                                 offset));
1228                 if (!new_page)
1229                         break;
1230                 page_cache_release(new_page);
1231         }
1232         lru_add_drain();        /* Push any new pages onto the LRU now */
1233 }
1234
1235 /*
1236  * We hold the mm semaphore and the page_table_lock on entry and
1237  * should release the pagetable lock on exit..
1238  */
1239 static int do_swap_page(struct mm_struct * mm,
1240         struct vm_area_struct * vma, unsigned long address,
1241         pte_t *page_table, pmd_t *pmd, pte_t orig_pte, int write_access)
1242 {
1243         struct page *page;
1244         swp_entry_t entry = pte_to_swp_entry(orig_pte);
1245         pte_t pte;
1246         int ret = VM_FAULT_MINOR;
1247         struct pte_chain *pte_chain = NULL;
1248
1249         pte_unmap(page_table);
1250         spin_unlock(&mm->page_table_lock);
1251         page = lookup_swap_cache(entry);
1252         if (!page) {
1253                 swapin_readahead(entry);
1254                 page = read_swap_cache_async(entry);
1255                 if (!page) {
1256                         /*
1257                          * Back out if somebody else faulted in this pte while
1258                          * we released the page table lock.
1259                          */
1260                         spin_lock(&mm->page_table_lock);
1261                         page_table = pte_offset_map(pmd, address);
1262                         if (pte_same(*page_table, orig_pte))
1263                                 ret = VM_FAULT_OOM;
1264                         else
1265                                 ret = VM_FAULT_MINOR;
1266                         pte_unmap(page_table);
1267                         spin_unlock(&mm->page_table_lock);
1268                         goto out;
1269                 }
1270
1271                 /* Had to read the page from swap area: Major fault */
1272                 ret = VM_FAULT_MAJOR;
1273                 inc_page_state(pgmajfault);
1274         }
1275
1276         mark_page_accessed(page);
1277         pte_chain = pte_chain_alloc(GFP_KERNEL);
1278         if (!pte_chain) {
1279                 ret = VM_FAULT_OOM;
1280                 goto out;
1281         }
1282         lock_page(page);
1283
1284         /*
1285          * Back out if somebody else faulted in this pte while we
1286          * released the page table lock.
1287          */
1288         spin_lock(&mm->page_table_lock);
1289         page_table = pte_offset_map(pmd, address);
1290         if (!pte_same(*page_table, orig_pte)) {
1291                 pte_unmap(page_table);
1292                 spin_unlock(&mm->page_table_lock);
1293                 unlock_page(page);
1294                 page_cache_release(page);
1295                 ret = VM_FAULT_MINOR;
1296                 goto out;
1297         }
1298
1299         /* The page isn't present yet, go ahead with the fault. */
1300                 
1301         swap_free(entry);
1302         if (vm_swap_full())
1303                 remove_exclusive_swap_page(page);
1304
1305         mm->rss++;
1306         pte = mk_pte(page, vma->vm_page_prot);
1307         if (write_access && can_share_swap_page(page))
1308                 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
1309         unlock_page(page);
1310
1311         flush_icache_page(vma, page);
1312         set_pte(page_table, pte);
1313         pte_chain = page_add_rmap(page, page_table, pte_chain);
1314
1315         /* No need to invalidate - it was non-present before */
1316         update_mmu_cache(vma, address, pte);
1317         pte_unmap(page_table);
1318         spin_unlock(&mm->page_table_lock);
1319 out:
1320         pte_chain_free(pte_chain);
1321         return ret;
1322 }
1323
1324 /*
1325  * We are called with the MM semaphore and page_table_lock
1326  * spinlock held to protect against concurrent faults in
1327  * multithreaded programs. 
1328  */
1329 static int
1330 do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1331                 pte_t *page_table, pmd_t *pmd, int write_access,
1332                 unsigned long addr)
1333 {
1334         pte_t entry;
1335         struct page * page = ZERO_PAGE(addr);
1336         struct pte_chain *pte_chain;
1337         int ret;
1338
1339         pte_chain = pte_chain_alloc(GFP_ATOMIC);
1340         if (!pte_chain) {
1341                 pte_unmap(page_table);
1342                 spin_unlock(&mm->page_table_lock);
1343                 pte_chain = pte_chain_alloc(GFP_KERNEL);
1344                 if (!pte_chain)
1345                         goto no_mem;
1346                 spin_lock(&mm->page_table_lock);
1347                 page_table = pte_offset_map(pmd, addr);
1348         }
1349                 
1350         /* Read-only mapping of ZERO_PAGE. */
1351         entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot));
1352
1353         /* ..except if it's a write access */
1354         if (write_access) {
1355                 /* Allocate our own private page. */
1356                 pte_unmap(page_table);
1357                 spin_unlock(&mm->page_table_lock);
1358
1359                 page = alloc_page(GFP_HIGHUSER);
1360                 if (!page)
1361                         goto no_mem;
1362                 clear_user_highpage(page, addr);
1363
1364                 spin_lock(&mm->page_table_lock);
1365                 page_table = pte_offset_map(pmd, addr);
1366
1367                 if (!pte_none(*page_table)) {
1368                         pte_unmap(page_table);
1369                         page_cache_release(page);
1370                         spin_unlock(&mm->page_table_lock);
1371                         ret = VM_FAULT_MINOR;
1372                         goto out;
1373                 }
1374                 mm->rss++;
1375                 entry = maybe_mkwrite(pte_mkdirty(mk_pte(page,
1376                                                          vma->vm_page_prot)),
1377                                       vma);
1378                 lru_cache_add_active(page);
1379                 mark_page_accessed(page);
1380         }
1381
1382         set_pte(page_table, entry);
1383         /* ignores ZERO_PAGE */
1384         pte_chain = page_add_rmap(page, page_table, pte_chain);
1385         pte_unmap(page_table);
1386
1387         /* No need to invalidate - it was non-present before */
1388         update_mmu_cache(vma, addr, entry);
1389         spin_unlock(&mm->page_table_lock);
1390         ret = VM_FAULT_MINOR;
1391         goto out;
1392
1393 no_mem:
1394         ret = VM_FAULT_OOM;
1395 out:
1396         pte_chain_free(pte_chain);
1397         return ret;
1398 }
1399
1400 /*
1401  * do_no_page() tries to create a new page mapping. It aggressively
1402  * tries to share with existing pages, but makes a separate copy if
1403  * the "write_access" parameter is true in order to avoid the next
1404  * page fault.
1405  *
1406  * As this is called only for pages that do not currently exist, we
1407  * do not need to flush old virtual caches or the TLB.
1408  *
1409  * This is called with the MM semaphore held and the page table
1410  * spinlock held. Exit with the spinlock released.
1411  */
1412 static int
1413 do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1414         unsigned long address, int write_access, pte_t *page_table, pmd_t *pmd)
1415 {
1416         struct page * new_page;
1417         struct address_space *mapping = NULL;
1418         pte_t entry;
1419         struct pte_chain *pte_chain;
1420         int sequence = 0;
1421         int ret = VM_FAULT_MINOR;
1422
1423         if (!vma->vm_ops || !vma->vm_ops->nopage)
1424                 return do_anonymous_page(mm, vma, page_table,
1425                                         pmd, write_access, address);
1426         pte_unmap(page_table);
1427         spin_unlock(&mm->page_table_lock);
1428
1429         if (vma->vm_file) {
1430                 mapping = vma->vm_file->f_mapping;
1431                 sequence = atomic_read(&mapping->truncate_count);
1432         }
1433         smp_rmb();  /* Prevent CPU from reordering lock-free ->nopage() */
1434 retry:
1435         new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
1436
1437         /* no page was available -- either SIGBUS or OOM */
1438         if (new_page == NOPAGE_SIGBUS)
1439                 return VM_FAULT_SIGBUS;
1440         if (new_page == NOPAGE_OOM)
1441                 return VM_FAULT_OOM;
1442
1443         pte_chain = pte_chain_alloc(GFP_KERNEL);
1444         if (!pte_chain)
1445                 goto oom;
1446
1447         /*
1448          * Should we do an early C-O-W break?
1449          */
1450         if (write_access && !(vma->vm_flags & VM_SHARED)) {
1451                 struct page * page = alloc_page(GFP_HIGHUSER);
1452                 if (!page)
1453                         goto oom;
1454                 copy_user_highpage(page, new_page, address);
1455                 page_cache_release(new_page);
1456                 lru_cache_add_active(page);
1457                 new_page = page;
1458         }
1459
1460         spin_lock(&mm->page_table_lock);
1461         /*
1462          * For a file-backed vma, someone could have truncated or otherwise
1463          * invalidated this page.  If invalidate_mmap_range got called,
1464          * retry getting the page.
1465          */
1466         if (mapping &&
1467               (unlikely(sequence != atomic_read(&mapping->truncate_count)))) {
1468                 sequence = atomic_read(&mapping->truncate_count);
1469                 spin_unlock(&mm->page_table_lock);
1470                 page_cache_release(new_page);
1471                 pte_chain_free(pte_chain);
1472                 goto retry;
1473         }
1474         page_table = pte_offset_map(pmd, address);
1475
1476         /*
1477          * This silly early PAGE_DIRTY setting removes a race
1478          * due to the bad i386 page protection. But it's valid
1479          * for other architectures too.
1480          *
1481          * Note that if write_access is true, we either now have
1482          * an exclusive copy of the page, or this is a shared mapping,
1483          * so we can make it writable and dirty to avoid having to
1484          * handle that later.
1485          */
1486         /* Only go through if we didn't race with anybody else... */
1487         if (pte_none(*page_table)) {
1488                 if (!PageReserved(new_page))
1489                         ++mm->rss;
1490                 flush_icache_page(vma, new_page);
1491                 entry = mk_pte(new_page, vma->vm_page_prot);
1492                 if (write_access)
1493                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1494                 set_pte(page_table, entry);
1495                 pte_chain = page_add_rmap(new_page, page_table, pte_chain);
1496                 pte_unmap(page_table);
1497         } else {
1498                 /* One of our sibling threads was faster, back out. */
1499                 pte_unmap(page_table);
1500                 page_cache_release(new_page);
1501                 spin_unlock(&mm->page_table_lock);
1502                 goto out;
1503         }
1504
1505         /* no need to invalidate: a not-present page shouldn't be cached */
1506         update_mmu_cache(vma, address, entry);
1507         spin_unlock(&mm->page_table_lock);
1508         goto out;
1509 oom:
1510         page_cache_release(new_page);
1511         ret = VM_FAULT_OOM;
1512 out:
1513         pte_chain_free(pte_chain);
1514         return ret;
1515 }
1516
1517 /*
1518  * Fault of a previously existing named mapping. Repopulate the pte
1519  * from the encoded file_pte if possible. This enables swappable
1520  * nonlinear vmas.
1521  */
1522 static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma,
1523         unsigned long address, int write_access, pte_t *pte, pmd_t *pmd)
1524 {
1525         unsigned long pgoff;
1526         int err;
1527
1528         BUG_ON(!vma->vm_ops || !vma->vm_ops->nopage);
1529         /*
1530          * Fall back to the linear mapping if the fs does not support
1531          * ->populate:
1532          */
1533         if (!vma->vm_ops || !vma->vm_ops->populate || 
1534                         (write_access && !(vma->vm_flags & VM_SHARED))) {
1535                 pte_clear(pte);
1536                 return do_no_page(mm, vma, address, write_access, pte, pmd);
1537         }
1538
1539         pgoff = pte_to_pgoff(*pte);
1540
1541         pte_unmap(pte);
1542         spin_unlock(&mm->page_table_lock);
1543
1544         err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, vma->vm_page_prot, pgoff, 0);
1545         if (err == -ENOMEM)
1546                 return VM_FAULT_OOM;
1547         if (err)
1548                 return VM_FAULT_SIGBUS;
1549         return VM_FAULT_MAJOR;
1550 }
1551
1552 /*
1553  * These routines also need to handle stuff like marking pages dirty
1554  * and/or accessed for architectures that don't do it in hardware (most
1555  * RISC architectures).  The early dirtying is also good on the i386.
1556  *
1557  * There is also a hook called "update_mmu_cache()" that architectures
1558  * with external mmu caches can use to update those (ie the Sparc or
1559  * PowerPC hashed page tables that act as extended TLBs).
1560  *
1561  * Note the "page_table_lock". It is to protect against kswapd removing
1562  * pages from under us. Note that kswapd only ever _removes_ pages, never
1563  * adds them. As such, once we have noticed that the page is not present,
1564  * we can drop the lock early.
1565  *
1566  * The adding of pages is protected by the MM semaphore (which we hold),
1567  * so we don't need to worry about a page being suddenly been added into
1568  * our VM.
1569  *
1570  * We enter with the pagetable spinlock held, we are supposed to
1571  * release it when done.
1572  */
1573 static inline int handle_pte_fault(struct mm_struct *mm,
1574         struct vm_area_struct * vma, unsigned long address,
1575         int write_access, pte_t *pte, pmd_t *pmd)
1576 {
1577         pte_t entry;
1578
1579         entry = *pte;
1580         if (!pte_present(entry)) {
1581                 /*
1582                  * If it truly wasn't present, we know that kswapd
1583                  * and the PTE updates will not touch it later. So
1584                  * drop the lock.
1585                  */
1586                 if (pte_none(entry))
1587                         return do_no_page(mm, vma, address, write_access, pte, pmd);
1588                 if (pte_file(entry))
1589                         return do_file_page(mm, vma, address, write_access, pte, pmd);
1590                 return do_swap_page(mm, vma, address, pte, pmd, entry, write_access);
1591         }
1592
1593         if (write_access) {
1594                 if (!pte_write(entry))
1595                         return do_wp_page(mm, vma, address, pte, pmd, entry);
1596
1597                 entry = pte_mkdirty(entry);
1598         }
1599         entry = pte_mkyoung(entry);
1600         ptep_establish(vma, address, pte, entry);
1601         update_mmu_cache(vma, address, entry);
1602         pte_unmap(pte);
1603         spin_unlock(&mm->page_table_lock);
1604         return VM_FAULT_MINOR;
1605 }
1606
1607 /*
1608  * By the time we get here, we already hold the mm semaphore
1609  */
1610 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
1611         unsigned long address, int write_access)
1612 {
1613         pgd_t *pgd;
1614         pmd_t *pmd;
1615
1616         __set_current_state(TASK_RUNNING);
1617         pgd = pgd_offset(mm, address);
1618
1619         inc_page_state(pgfault);
1620
1621         if (is_vm_hugetlb_page(vma))
1622                 return VM_FAULT_SIGBUS; /* mapping truncation does this. */
1623
1624         /*
1625          * We need the page table lock to synchronize with kswapd
1626          * and the SMP-safe atomic PTE updates.
1627          */
1628         spin_lock(&mm->page_table_lock);
1629         pmd = pmd_alloc(mm, pgd, address);
1630
1631         if (pmd) {
1632                 pte_t * pte = pte_alloc_map(mm, pmd, address);
1633                 if (pte)
1634                         return handle_pte_fault(mm, vma, address, write_access, pte, pmd);
1635         }
1636         spin_unlock(&mm->page_table_lock);
1637         return VM_FAULT_OOM;
1638 }
1639
1640 /*
1641  * Allocate page middle directory.
1642  *
1643  * We've already handled the fast-path in-line, and we own the
1644  * page table lock.
1645  *
1646  * On a two-level page table, this ends up actually being entirely
1647  * optimized away.
1648  */
1649 pmd_t *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1650 {
1651         pmd_t *new;
1652
1653         spin_unlock(&mm->page_table_lock);
1654         new = pmd_alloc_one(mm, address);
1655         spin_lock(&mm->page_table_lock);
1656         if (!new)
1657                 return NULL;
1658
1659         /*
1660          * Because we dropped the lock, we should re-check the
1661          * entry, as somebody else could have populated it..
1662          */
1663         if (pgd_present(*pgd)) {
1664                 pmd_free(new);
1665                 goto out;
1666         }
1667         pgd_populate(mm, pgd, new);
1668 out:
1669         return pmd_offset(pgd, address);
1670 }
1671
1672 int make_pages_present(unsigned long addr, unsigned long end)
1673 {
1674         int ret, len, write;
1675         struct vm_area_struct * vma;
1676
1677         vma = find_vma(current->mm, addr);
1678         write = (vma->vm_flags & VM_WRITE) != 0;
1679         if (addr >= end)
1680                 BUG();
1681         if (end > vma->vm_end)
1682                 BUG();
1683         len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
1684         ret = get_user_pages(current, current->mm, addr,
1685                         len, write, 0, NULL, NULL);
1686         if (ret < 0)
1687                 return ret;
1688         return ret == len ? 0 : -1;
1689 }
1690
1691 /* 
1692  * Map a vmalloc()-space virtual address to the physical page.
1693  */
1694 struct page * vmalloc_to_page(void * vmalloc_addr)
1695 {
1696         unsigned long addr = (unsigned long) vmalloc_addr;
1697         struct page *page = NULL;
1698         pgd_t *pgd = pgd_offset_k(addr);
1699         pmd_t *pmd;
1700         pte_t *ptep, pte;
1701   
1702         if (!pgd_none(*pgd)) {
1703                 pmd = pmd_offset(pgd, addr);
1704                 if (!pmd_none(*pmd)) {
1705                         preempt_disable();
1706                         ptep = pte_offset_map(pmd, addr);
1707                         pte = *ptep;
1708                         if (pte_present(pte))
1709                                 page = pte_page(pte);
1710                         pte_unmap(ptep);
1711                         preempt_enable();
1712                 }
1713         }
1714         return page;
1715 }
1716
1717 EXPORT_SYMBOL(vmalloc_to_page);
1718
1719 #if !defined(CONFIG_ARCH_GATE_AREA)
1720
1721 #if defined(AT_SYSINFO_EHDR)
1722 struct vm_area_struct gate_vma;
1723
1724 static int __init gate_vma_init(void)
1725 {
1726         gate_vma.vm_mm = NULL;
1727         gate_vma.vm_start = FIXADDR_USER_START;
1728         gate_vma.vm_end = FIXADDR_USER_END;
1729         gate_vma.vm_page_prot = PAGE_READONLY;
1730         gate_vma.vm_flags = 0;
1731         return 0;
1732 }
1733 __initcall(gate_vma_init);
1734 #endif
1735
1736 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
1737 {
1738 #ifdef AT_SYSINFO_EHDR
1739         return &gate_vma;
1740 #else
1741         return 0;
1742 #endif
1743 }
1744
1745 int in_gate_area(struct task_struct *task, unsigned long addr)
1746 {
1747 #ifdef AT_SYSINFO_EHDR
1748         if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
1749                 return 1;
1750 #endif
1751         return 0;
1752 }
1753
1754 #endif