- Update to 3.3-final.
[linux-flexiantxendom0-3.2.10.git] / drivers / gpu / drm / ttm / ttm_page_alloc.c
1 /*
2  * Copyright (c) Red Hat Inc.
3
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27
28 /* simple list based uncached page pool
29  * - Pool collects resently freed pages for reuse
30  * - Use page->lru to keep a free list
31  * - doesn't track currently in use pages
32  */
33 #include <linux/list.h>
34 #include <linux/spinlock.h>
35 #include <linux/highmem.h>
36 #include <linux/mm_types.h>
37 #include <linux/module.h>
38 #include <linux/mm.h>
39 #include <linux/seq_file.h> /* for seq_printf */
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42
43 #include <linux/atomic.h>
44
45 #include "ttm/ttm_bo_driver.h"
46 #include "ttm/ttm_page_alloc.h"
47
48 #ifdef TTM_HAS_AGP
49 #include <asm/agp.h>
50 #endif
51
52 #define NUM_PAGES_TO_ALLOC              (PAGE_SIZE/sizeof(struct page *))
53 #define SMALL_ALLOCATION                16
54 #define FREE_ALL_PAGES                  (~0U)
55 /* times are in msecs */
56 #define PAGE_FREE_INTERVAL              1000
57
58 /**
59  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
60  *
61  * @lock: Protects the shared pool from concurrnet access. Must be used with
62  * irqsave/irqrestore variants because pool allocator maybe called from
63  * delayed work.
64  * @fill_lock: Prevent concurrent calls to fill.
65  * @list: Pool of free uc/wc pages for fast reuse.
66  * @gfp_flags: Flags to pass for alloc_page.
67  * @npages: Number of pages in pool.
68  */
69 struct ttm_page_pool {
70         spinlock_t              lock;
71         bool                    fill_lock;
72         struct list_head        list;
73         gfp_t                   gfp_flags;
74         unsigned                npages;
75         char                    *name;
76         unsigned long           nfrees;
77         unsigned long           nrefills;
78 };
79
80 /**
81  * Limits for the pool. They are handled without locks because only place where
82  * they may change is in sysfs store. They won't have immediate effect anyway
83  * so forcing serialization to access them is pointless.
84  */
85
86 struct ttm_pool_opts {
87         unsigned        alloc_size;
88         unsigned        max_size;
89         unsigned        small;
90 };
91
92 #define NUM_POOLS 4
93
94 /**
95  * struct ttm_pool_manager - Holds memory pools for fst allocation
96  *
97  * Manager is read only object for pool code so it doesn't need locking.
98  *
99  * @free_interval: minimum number of jiffies between freeing pages from pool.
100  * @page_alloc_inited: reference counting for pool allocation.
101  * @work: Work that is used to shrink the pool. Work is only run when there is
102  * some pages to free.
103  * @small_allocation: Limit in number of pages what is small allocation.
104  *
105  * @pools: All pool objects in use.
106  **/
107 struct ttm_pool_manager {
108         struct kobject          kobj;
109         struct shrinker         mm_shrink;
110         struct ttm_pool_opts    options;
111
112         union {
113                 struct ttm_page_pool    pools[NUM_POOLS];
114                 struct {
115                         struct ttm_page_pool    wc_pool;
116                         struct ttm_page_pool    uc_pool;
117                         struct ttm_page_pool    wc_pool_dma32;
118                         struct ttm_page_pool    uc_pool_dma32;
119                 } ;
120         };
121 };
122
123 static struct attribute ttm_page_pool_max = {
124         .name = "pool_max_size",
125         .mode = S_IRUGO | S_IWUSR
126 };
127 static struct attribute ttm_page_pool_small = {
128         .name = "pool_small_allocation",
129         .mode = S_IRUGO | S_IWUSR
130 };
131 static struct attribute ttm_page_pool_alloc_size = {
132         .name = "pool_allocation_size",
133         .mode = S_IRUGO | S_IWUSR
134 };
135
136 static struct attribute *ttm_pool_attrs[] = {
137         &ttm_page_pool_max,
138         &ttm_page_pool_small,
139         &ttm_page_pool_alloc_size,
140         NULL
141 };
142
143 static void ttm_pool_kobj_release(struct kobject *kobj)
144 {
145         struct ttm_pool_manager *m =
146                 container_of(kobj, struct ttm_pool_manager, kobj);
147         kfree(m);
148 }
149
150 static ssize_t ttm_pool_store(struct kobject *kobj,
151                 struct attribute *attr, const char *buffer, size_t size)
152 {
153         struct ttm_pool_manager *m =
154                 container_of(kobj, struct ttm_pool_manager, kobj);
155         int chars;
156         unsigned val;
157         chars = sscanf(buffer, "%u", &val);
158         if (chars == 0)
159                 return size;
160
161         /* Convert kb to number of pages */
162         val = val / (PAGE_SIZE >> 10);
163
164         if (attr == &ttm_page_pool_max)
165                 m->options.max_size = val;
166         else if (attr == &ttm_page_pool_small)
167                 m->options.small = val;
168         else if (attr == &ttm_page_pool_alloc_size) {
169                 if (val > NUM_PAGES_TO_ALLOC*8) {
170                         printk(KERN_ERR TTM_PFX
171                                "Setting allocation size to %lu "
172                                "is not allowed. Recommended size is "
173                                "%lu\n",
174                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
175                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
176                         return size;
177                 } else if (val > NUM_PAGES_TO_ALLOC) {
178                         printk(KERN_WARNING TTM_PFX
179                                "Setting allocation size to "
180                                "larger than %lu is not recommended.\n",
181                                NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
182                 }
183                 m->options.alloc_size = val;
184         }
185
186         return size;
187 }
188
189 static ssize_t ttm_pool_show(struct kobject *kobj,
190                 struct attribute *attr, char *buffer)
191 {
192         struct ttm_pool_manager *m =
193                 container_of(kobj, struct ttm_pool_manager, kobj);
194         unsigned val = 0;
195
196         if (attr == &ttm_page_pool_max)
197                 val = m->options.max_size;
198         else if (attr == &ttm_page_pool_small)
199                 val = m->options.small;
200         else if (attr == &ttm_page_pool_alloc_size)
201                 val = m->options.alloc_size;
202
203         val = val * (PAGE_SIZE >> 10);
204
205         return snprintf(buffer, PAGE_SIZE, "%u\n", val);
206 }
207
208 static const struct sysfs_ops ttm_pool_sysfs_ops = {
209         .show = &ttm_pool_show,
210         .store = &ttm_pool_store,
211 };
212
213 static struct kobj_type ttm_pool_kobj_type = {
214         .release = &ttm_pool_kobj_release,
215         .sysfs_ops = &ttm_pool_sysfs_ops,
216         .default_attrs = ttm_pool_attrs,
217 };
218
219 static struct ttm_pool_manager *_manager;
220
221 #ifndef CONFIG_X86
222 static int set_pages_array_wb(struct page **pages, int addrinarray)
223 {
224 #ifdef TTM_HAS_AGP
225         int i;
226
227         for (i = 0; i < addrinarray; i++)
228                 unmap_page_from_agp(pages[i]);
229 #endif
230         return 0;
231 }
232
233 static int set_pages_array_wc(struct page **pages, int addrinarray)
234 {
235 #ifdef TTM_HAS_AGP
236         int i;
237
238         for (i = 0; i < addrinarray; i++)
239                 map_page_into_agp(pages[i]);
240 #endif
241         return 0;
242 }
243
244 static int set_pages_array_uc(struct page **pages, int addrinarray)
245 {
246 #ifdef TTM_HAS_AGP
247         int i;
248
249         for (i = 0; i < addrinarray; i++)
250                 map_page_into_agp(pages[i]);
251 #endif
252         return 0;
253 }
254 #endif
255
256 /**
257  * Select the right pool or requested caching state and ttm flags. */
258 static struct ttm_page_pool *ttm_get_pool(int flags,
259                 enum ttm_caching_state cstate)
260 {
261         int pool_index;
262
263         if (cstate == tt_cached)
264                 return NULL;
265
266         if (cstate == tt_wc)
267                 pool_index = 0x0;
268         else
269                 pool_index = 0x1;
270
271         if (flags & TTM_PAGE_FLAG_DMA32)
272                 pool_index |= 0x2;
273
274         return &_manager->pools[pool_index];
275 }
276
277 /* set memory back to wb and free the pages. */
278 static void ttm_pages_put(struct page *pages[], unsigned npages)
279 {
280         unsigned i;
281         if (set_pages_array_wb(pages, npages))
282                 printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
283                                 npages);
284         for (i = 0; i < npages; ++i)
285                 __free_page(pages[i]);
286 }
287
288 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
289                 unsigned freed_pages)
290 {
291         pool->npages -= freed_pages;
292         pool->nfrees += freed_pages;
293 }
294
295 /**
296  * Free pages from pool.
297  *
298  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
299  * number of pages in one go.
300  *
301  * @pool: to free the pages from
302  * @free_all: If set to true will free all pages in pool
303  **/
304 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
305 {
306         unsigned long irq_flags;
307         struct page *p;
308         struct page **pages_to_free;
309         unsigned freed_pages = 0,
310                  npages_to_free = nr_free;
311
312         if (NUM_PAGES_TO_ALLOC < nr_free)
313                 npages_to_free = NUM_PAGES_TO_ALLOC;
314
315         pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
316                         GFP_KERNEL);
317         if (!pages_to_free) {
318                 printk(KERN_ERR TTM_PFX
319                        "Failed to allocate memory for pool free operation.\n");
320                 return 0;
321         }
322
323 restart:
324         spin_lock_irqsave(&pool->lock, irq_flags);
325
326         list_for_each_entry_reverse(p, &pool->list, lru) {
327                 if (freed_pages >= npages_to_free)
328                         break;
329
330                 pages_to_free[freed_pages++] = p;
331                 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
332                 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
333                         /* remove range of pages from the pool */
334                         __list_del(p->lru.prev, &pool->list);
335
336                         ttm_pool_update_free_locked(pool, freed_pages);
337                         /**
338                          * Because changing page caching is costly
339                          * we unlock the pool to prevent stalling.
340                          */
341                         spin_unlock_irqrestore(&pool->lock, irq_flags);
342
343                         ttm_pages_put(pages_to_free, freed_pages);
344                         if (likely(nr_free != FREE_ALL_PAGES))
345                                 nr_free -= freed_pages;
346
347                         if (NUM_PAGES_TO_ALLOC >= nr_free)
348                                 npages_to_free = nr_free;
349                         else
350                                 npages_to_free = NUM_PAGES_TO_ALLOC;
351
352                         freed_pages = 0;
353
354                         /* free all so restart the processing */
355                         if (nr_free)
356                                 goto restart;
357
358                         /* Not allowed to fall through or break because
359                          * following context is inside spinlock while we are
360                          * outside here.
361                          */
362                         goto out;
363
364                 }
365         }
366
367         /* remove range of pages from the pool */
368         if (freed_pages) {
369                 __list_del(&p->lru, &pool->list);
370
371                 ttm_pool_update_free_locked(pool, freed_pages);
372                 nr_free -= freed_pages;
373         }
374
375         spin_unlock_irqrestore(&pool->lock, irq_flags);
376
377         if (freed_pages)
378                 ttm_pages_put(pages_to_free, freed_pages);
379 out:
380         kfree(pages_to_free);
381         return nr_free;
382 }
383
384 /* Get good estimation how many pages are free in pools */
385 static int ttm_pool_get_num_unused_pages(void)
386 {
387         unsigned i;
388         int total = 0;
389         for (i = 0; i < NUM_POOLS; ++i)
390                 total += _manager->pools[i].npages;
391
392         return total;
393 }
394
395 /**
396  * Callback for mm to request pool to reduce number of page held.
397  */
398 static int ttm_pool_mm_shrink(struct shrinker *shrink,
399                               struct shrink_control *sc)
400 {
401         static atomic_t start_pool = ATOMIC_INIT(0);
402         unsigned i;
403         unsigned pool_offset = atomic_add_return(1, &start_pool);
404         struct ttm_page_pool *pool;
405         int shrink_pages = sc->nr_to_scan;
406
407         pool_offset = pool_offset % NUM_POOLS;
408         /* select start pool in round robin fashion */
409         for (i = 0; i < NUM_POOLS; ++i) {
410                 unsigned nr_free = shrink_pages;
411                 if (shrink_pages == 0)
412                         break;
413                 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
414                 shrink_pages = ttm_page_pool_free(pool, nr_free);
415         }
416         /* return estimated number of unused pages in pool */
417         return ttm_pool_get_num_unused_pages();
418 }
419
420 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
421 {
422         manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
423         manager->mm_shrink.seeks = 1;
424         register_shrinker(&manager->mm_shrink);
425 }
426
427 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
428 {
429         unregister_shrinker(&manager->mm_shrink);
430 }
431
432 static int ttm_set_pages_caching(struct page **pages,
433                 enum ttm_caching_state cstate, unsigned cpages)
434 {
435         int r = 0;
436         /* Set page caching */
437         switch (cstate) {
438         case tt_uncached:
439                 r = set_pages_array_uc(pages, cpages);
440                 if (r)
441                         printk(KERN_ERR TTM_PFX
442                                "Failed to set %d pages to uc!\n",
443                                cpages);
444                 break;
445         case tt_wc:
446                 r = set_pages_array_wc(pages, cpages);
447                 if (r)
448                         printk(KERN_ERR TTM_PFX
449                                "Failed to set %d pages to wc!\n",
450                                cpages);
451                 break;
452         default:
453                 break;
454         }
455         return r;
456 }
457
458 /**
459  * Free pages the pages that failed to change the caching state. If there is
460  * any pages that have changed their caching state already put them to the
461  * pool.
462  */
463 static void ttm_handle_caching_state_failure(struct list_head *pages,
464                 int ttm_flags, enum ttm_caching_state cstate,
465                 struct page **failed_pages, unsigned cpages)
466 {
467         unsigned i;
468         /* Failed pages have to be freed */
469         for (i = 0; i < cpages; ++i) {
470                 list_del(&failed_pages[i]->lru);
471                 __free_page(failed_pages[i]);
472         }
473 }
474
475 /**
476  * Allocate new pages with correct caching.
477  *
478  * This function is reentrant if caller updates count depending on number of
479  * pages returned in pages array.
480  */
481 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
482                 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
483 {
484         struct page **caching_array;
485         struct page *p;
486         int r = 0;
487         unsigned i, cpages;
488         unsigned max_cpages = min(count,
489                         (unsigned)(PAGE_SIZE/sizeof(struct page *)));
490
491         /* allocate array for page caching change */
492         caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
493
494         if (!caching_array) {
495                 printk(KERN_ERR TTM_PFX
496                        "Unable to allocate table for new pages.");
497                 return -ENOMEM;
498         }
499
500         for (i = 0, cpages = 0; i < count; ++i) {
501                 p = alloc_page(gfp_flags);
502
503 #ifdef CONFIG_XEN
504                 if (p && (gfp_flags & __GFP_DMA32)) {
505                         r = xen_limit_pages_to_max_mfn(p, 0, 32);
506                         if (r) {
507                                 __free_page(p);
508                                 printk(KERN_ERR TTM_PFX
509                                        "Cannot restrict page (%d).", r);
510                                 p = NULL;
511                         } else if (gfp_flags & __GFP_ZERO)
512                                 clear_page(page_address(p));
513                 }
514 #endif
515
516                 if (!p) {
517                         printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
518
519                         /* store already allocated pages in the pool after
520                          * setting the caching state */
521                         if (cpages) {
522                                 r = ttm_set_pages_caching(caching_array,
523                                                           cstate, cpages);
524                                 if (r)
525                                         ttm_handle_caching_state_failure(pages,
526                                                 ttm_flags, cstate,
527                                                 caching_array, cpages);
528                         }
529                         r = -ENOMEM;
530                         goto out;
531                 }
532
533 #ifdef CONFIG_HIGHMEM
534                 /* gfp flags of highmem page should never be dma32 so we
535                  * we should be fine in such case
536                  */
537                 if (!PageHighMem(p))
538 #endif
539                 {
540                         caching_array[cpages++] = p;
541                         if (cpages == max_cpages) {
542
543                                 r = ttm_set_pages_caching(caching_array,
544                                                 cstate, cpages);
545                                 if (r) {
546                                         ttm_handle_caching_state_failure(pages,
547                                                 ttm_flags, cstate,
548                                                 caching_array, cpages);
549                                         goto out;
550                                 }
551                                 cpages = 0;
552                         }
553                 }
554
555                 list_add(&p->lru, pages);
556         }
557
558         if (cpages) {
559                 r = ttm_set_pages_caching(caching_array, cstate, cpages);
560                 if (r)
561                         ttm_handle_caching_state_failure(pages,
562                                         ttm_flags, cstate,
563                                         caching_array, cpages);
564         }
565 out:
566         kfree(caching_array);
567
568         return r;
569 }
570
571 /**
572  * Fill the given pool if there aren't enough pages and the requested number of
573  * pages is small.
574  */
575 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
576                 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
577                 unsigned long *irq_flags)
578 {
579         struct page *p;
580         int r;
581         unsigned cpages = 0;
582         /**
583          * Only allow one pool fill operation at a time.
584          * If pool doesn't have enough pages for the allocation new pages are
585          * allocated from outside of pool.
586          */
587         if (pool->fill_lock)
588                 return;
589
590         pool->fill_lock = true;
591
592         /* If allocation request is small and there are not enough
593          * pages in a pool we fill the pool up first. */
594         if (count < _manager->options.small
595                 && count > pool->npages) {
596                 struct list_head new_pages;
597                 unsigned alloc_size = _manager->options.alloc_size;
598
599                 /**
600                  * Can't change page caching if in irqsave context. We have to
601                  * drop the pool->lock.
602                  */
603                 spin_unlock_irqrestore(&pool->lock, *irq_flags);
604
605                 INIT_LIST_HEAD(&new_pages);
606                 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
607                                 cstate, alloc_size);
608                 spin_lock_irqsave(&pool->lock, *irq_flags);
609
610                 if (!r) {
611                         list_splice(&new_pages, &pool->list);
612                         ++pool->nrefills;
613                         pool->npages += alloc_size;
614                 } else {
615                         printk(KERN_ERR TTM_PFX
616                                "Failed to fill pool (%p).", pool);
617                         /* If we have any pages left put them to the pool. */
618                         list_for_each_entry(p, &pool->list, lru) {
619                                 ++cpages;
620                         }
621                         list_splice(&new_pages, &pool->list);
622                         pool->npages += cpages;
623                 }
624
625         }
626         pool->fill_lock = false;
627 }
628
629 /**
630  * Cut 'count' number of pages from the pool and put them on the return list.
631  *
632  * @return count of pages still required to fulfill the request.
633  */
634 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
635                                         struct list_head *pages,
636                                         int ttm_flags,
637                                         enum ttm_caching_state cstate,
638                                         unsigned count)
639 {
640         unsigned long irq_flags;
641         struct list_head *p;
642         unsigned i;
643
644         spin_lock_irqsave(&pool->lock, irq_flags);
645         ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
646
647         if (count >= pool->npages) {
648                 /* take all pages from the pool */
649                 list_splice_init(&pool->list, pages);
650                 count -= pool->npages;
651                 pool->npages = 0;
652                 goto out;
653         }
654         /* find the last pages to include for requested number of pages. Split
655          * pool to begin and halve it to reduce search space. */
656         if (count <= pool->npages/2) {
657                 i = 0;
658                 list_for_each(p, &pool->list) {
659                         if (++i == count)
660                                 break;
661                 }
662         } else {
663                 i = pool->npages + 1;
664                 list_for_each_prev(p, &pool->list) {
665                         if (--i == count)
666                                 break;
667                 }
668         }
669         /* Cut 'count' number of pages from the pool */
670         list_cut_position(pages, &pool->list, p);
671         pool->npages -= count;
672         count = 0;
673 out:
674         spin_unlock_irqrestore(&pool->lock, irq_flags);
675         return count;
676 }
677
678 /* Put all pages in pages list to correct pool to wait for reuse */
679 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
680                           enum ttm_caching_state cstate)
681 {
682         unsigned long irq_flags;
683         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
684         unsigned i;
685
686         if (pool == NULL) {
687                 /* No pool for this memory type so free the pages */
688                 for (i = 0; i < npages; i++) {
689                         if (pages[i]) {
690                                 if (page_count(pages[i]) != 1)
691                                         printk(KERN_ERR TTM_PFX
692                                                "Erroneous page count. "
693                                                "Leaking pages.\n");
694                                 __free_page(pages[i]);
695                                 pages[i] = NULL;
696                         }
697                 }
698                 return;
699         }
700
701         spin_lock_irqsave(&pool->lock, irq_flags);
702         for (i = 0; i < npages; i++) {
703                 if (pages[i]) {
704                         if (page_count(pages[i]) != 1)
705                                 printk(KERN_ERR TTM_PFX
706                                        "Erroneous page count. "
707                                        "Leaking pages.\n");
708                         list_add_tail(&pages[i]->lru, &pool->list);
709                         pages[i] = NULL;
710                         pool->npages++;
711                 }
712         }
713         /* Check that we don't go over the pool limit */
714         npages = 0;
715         if (pool->npages > _manager->options.max_size) {
716                 npages = pool->npages - _manager->options.max_size;
717                 /* free at least NUM_PAGES_TO_ALLOC number of pages
718                  * to reduce calls to set_memory_wb */
719                 if (npages < NUM_PAGES_TO_ALLOC)
720                         npages = NUM_PAGES_TO_ALLOC;
721         }
722         spin_unlock_irqrestore(&pool->lock, irq_flags);
723         if (npages)
724                 ttm_page_pool_free(pool, npages);
725 }
726
727 /*
728  * On success pages list will hold count number of correctly
729  * cached pages.
730  */
731 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
732                          enum ttm_caching_state cstate)
733 {
734         struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
735         struct list_head plist;
736         struct page *p = NULL;
737         gfp_t gfp_flags = GFP_USER;
738         unsigned count;
739         int r;
740
741         /* set zero flag for page allocation if required */
742         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
743                 gfp_flags |= __GFP_ZERO;
744
745         /* No pool for cached pages */
746         if (pool == NULL) {
747                 if (flags & TTM_PAGE_FLAG_DMA32)
748                         gfp_flags |= GFP_DMA32;
749                 else
750                         gfp_flags |= GFP_HIGHUSER;
751
752                 for (r = 0; r < npages; ++r) {
753                         p = alloc_page(gfp_flags);
754                         if (!p) {
755
756                                 printk(KERN_ERR TTM_PFX
757                                        "Unable to allocate page.");
758                                 return -ENOMEM;
759                         }
760
761 #ifdef CONFIG_XEN
762                         if (flags & TTM_PAGE_FLAG_DMA32) {
763                                 int rc = xen_limit_pages_to_max_mfn(p, 0, 32);
764
765                                 if (rc) {
766                                         __free_page(p);
767                                         printk(KERN_ERR TTM_PFX
768                                                "Unable to restrict page (%d).",
769                                                rc);
770                                         return rc;
771                                 }
772                                 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
773                                         clear_page(page_address(p));
774                         }
775 #endif
776
777                         pages[r] = p;
778                 }
779                 return 0;
780         }
781
782         /* combine zero flag to pool flags */
783         gfp_flags |= pool->gfp_flags;
784
785         /* First we take pages from the pool */
786         INIT_LIST_HEAD(&plist);
787         npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
788         count = 0;
789         list_for_each_entry(p, &plist, lru) {
790                 pages[count++] = p;
791         }
792
793         /* clear the pages coming from the pool if requested */
794         if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
795                 list_for_each_entry(p, &plist, lru) {
796                         clear_page(page_address(p));
797                 }
798         }
799
800         /* If pool didn't have enough pages allocate new one. */
801         if (npages > 0) {
802                 /* ttm_alloc_new_pages doesn't reference pool so we can run
803                  * multiple requests in parallel.
804                  **/
805                 INIT_LIST_HEAD(&plist);
806                 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
807                 list_for_each_entry(p, &plist, lru) {
808                         pages[count++] = p;
809                 }
810                 if (r) {
811                         /* If there is any pages in the list put them back to
812                          * the pool. */
813                         printk(KERN_ERR TTM_PFX
814                                "Failed to allocate extra pages "
815                                "for large request.");
816                         ttm_put_pages(pages, count, flags, cstate);
817                         return r;
818                 }
819         }
820
821         return 0;
822 }
823
824 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
825                 char *name)
826 {
827         spin_lock_init(&pool->lock);
828         pool->fill_lock = false;
829         INIT_LIST_HEAD(&pool->list);
830         pool->npages = pool->nfrees = 0;
831         pool->gfp_flags = flags;
832         pool->name = name;
833 }
834
835 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
836 {
837         int ret;
838
839         WARN_ON(_manager);
840
841         printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
842
843         _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
844
845         ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
846
847         ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
848
849         ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
850                                   GFP_USER | GFP_DMA32, "wc dma");
851
852         ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
853                                   GFP_USER | GFP_DMA32, "uc dma");
854
855         _manager->options.max_size = max_pages;
856         _manager->options.small = SMALL_ALLOCATION;
857         _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
858
859         ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
860                                    &glob->kobj, "pool");
861         if (unlikely(ret != 0)) {
862                 kobject_put(&_manager->kobj);
863                 _manager = NULL;
864                 return ret;
865         }
866
867         ttm_pool_mm_shrink_init(_manager);
868
869         return 0;
870 }
871
872 void ttm_page_alloc_fini(void)
873 {
874         int i;
875
876         printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
877         ttm_pool_mm_shrink_fini(_manager);
878
879         for (i = 0; i < NUM_POOLS; ++i)
880                 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
881
882         kobject_put(&_manager->kobj);
883         _manager = NULL;
884 }
885
886 int ttm_pool_populate(struct ttm_tt *ttm)
887 {
888         struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
889         unsigned i;
890         int ret;
891
892         if (ttm->state != tt_unpopulated)
893                 return 0;
894
895         for (i = 0; i < ttm->num_pages; ++i) {
896                 ret = ttm_get_pages(&ttm->pages[i], 1,
897                                     ttm->page_flags,
898                                     ttm->caching_state);
899                 if (ret != 0) {
900                         ttm_pool_unpopulate(ttm);
901                         return -ENOMEM;
902                 }
903
904                 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
905                                                 false, false);
906                 if (unlikely(ret != 0)) {
907                         ttm_pool_unpopulate(ttm);
908                         return -ENOMEM;
909                 }
910         }
911
912         if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
913                 ret = ttm_tt_swapin(ttm);
914                 if (unlikely(ret != 0)) {
915                         ttm_pool_unpopulate(ttm);
916                         return ret;
917                 }
918         }
919
920         ttm->state = tt_unbound;
921         return 0;
922 }
923 EXPORT_SYMBOL(ttm_pool_populate);
924
925 void ttm_pool_unpopulate(struct ttm_tt *ttm)
926 {
927         unsigned i;
928
929         for (i = 0; i < ttm->num_pages; ++i) {
930                 if (ttm->pages[i]) {
931                         ttm_mem_global_free_page(ttm->glob->mem_glob,
932                                                  ttm->pages[i]);
933                         ttm_put_pages(&ttm->pages[i], 1,
934                                       ttm->page_flags,
935                                       ttm->caching_state);
936                 }
937         }
938         ttm->state = tt_unpopulated;
939 }
940 EXPORT_SYMBOL(ttm_pool_unpopulate);
941
942 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
943 {
944         struct ttm_page_pool *p;
945         unsigned i;
946         char *h[] = {"pool", "refills", "pages freed", "size"};
947         if (!_manager) {
948                 seq_printf(m, "No pool allocator running.\n");
949                 return 0;
950         }
951         seq_printf(m, "%6s %12s %13s %8s\n",
952                         h[0], h[1], h[2], h[3]);
953         for (i = 0; i < NUM_POOLS; ++i) {
954                 p = &_manager->pools[i];
955
956                 seq_printf(m, "%6s %12ld %13ld %8d\n",
957                                 p->name, p->nrefills,
958                                 p->nfrees, p->npages);
959         }
960         return 0;
961 }
962 EXPORT_SYMBOL(ttm_page_alloc_debugfs);