Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / drivers / gpu / drm / ttm / ttm_bo_vm.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #define pr_fmt(fmt) "[TTM] " fmt
32
33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_placement.h>
36 #include <linux/mm.h>
37 #include <linux/rbtree.h>
38 #include <linux/module.h>
39 #include <linux/uaccess.h>
40
41 #define TTM_BO_VM_NUM_PREFAULT 16
42
43 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
44                                                      unsigned long page_start,
45                                                      unsigned long num_pages)
46 {
47         struct rb_node *cur = bdev->addr_space_rb.rb_node;
48         unsigned long cur_offset;
49         struct ttm_buffer_object *bo;
50         struct ttm_buffer_object *best_bo = NULL;
51
52         while (likely(cur != NULL)) {
53                 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
54                 cur_offset = bo->vm_node->start;
55                 if (page_start >= cur_offset) {
56                         cur = cur->rb_right;
57                         best_bo = bo;
58                         if (page_start == cur_offset)
59                                 break;
60                 } else
61                         cur = cur->rb_left;
62         }
63
64         if (unlikely(best_bo == NULL))
65                 return NULL;
66
67         if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
68                      (page_start + num_pages)))
69                 return NULL;
70
71         return best_bo;
72 }
73
74 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
75 {
76         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
77             vma->vm_private_data;
78         struct ttm_bo_device *bdev = bo->bdev;
79         unsigned long page_offset;
80         unsigned long page_last;
81         unsigned long pfn;
82         struct ttm_tt *ttm = NULL;
83         struct page *page;
84         int ret;
85         int i;
86         unsigned long address = (unsigned long)vmf->virtual_address;
87         int retval = VM_FAULT_NOPAGE;
88         struct ttm_mem_type_manager *man =
89                 &bdev->man[bo->mem.mem_type];
90
91         /*
92          * Work around locking order reversal in fault / nopfn
93          * between mmap_sem and bo_reserve: Perform a trylock operation
94          * for reserve, and if it fails, retry the fault after scheduling.
95          */
96
97         ret = ttm_bo_reserve(bo, true, true, false, 0);
98         if (unlikely(ret != 0)) {
99                 if (ret == -EBUSY)
100                         set_need_resched();
101                 return VM_FAULT_NOPAGE;
102         }
103
104         if (bdev->driver->fault_reserve_notify) {
105                 ret = bdev->driver->fault_reserve_notify(bo);
106                 switch (ret) {
107                 case 0:
108                         break;
109                 case -EBUSY:
110                         set_need_resched();
111                 case -ERESTARTSYS:
112                         retval = VM_FAULT_NOPAGE;
113                         goto out_unlock;
114                 default:
115                         retval = VM_FAULT_SIGBUS;
116                         goto out_unlock;
117                 }
118         }
119
120         /*
121          * Wait for buffer data in transit, due to a pipelined
122          * move.
123          */
124
125         spin_lock(&bdev->fence_lock);
126         if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
127                 ret = ttm_bo_wait(bo, false, true, false);
128                 spin_unlock(&bdev->fence_lock);
129                 if (unlikely(ret != 0)) {
130                         retval = (ret != -ERESTARTSYS) ?
131                             VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
132                         goto out_unlock;
133                 }
134         } else
135                 spin_unlock(&bdev->fence_lock);
136
137         ret = ttm_mem_io_lock(man, true);
138         if (unlikely(ret != 0)) {
139                 retval = VM_FAULT_NOPAGE;
140                 goto out_unlock;
141         }
142         ret = ttm_mem_io_reserve_vm(bo);
143         if (unlikely(ret != 0)) {
144                 retval = VM_FAULT_SIGBUS;
145                 goto out_io_unlock;
146         }
147
148         page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
149             bo->vm_node->start - vma->vm_pgoff;
150         page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
151             bo->vm_node->start - vma->vm_pgoff;
152
153         if (unlikely(page_offset >= bo->num_pages)) {
154                 retval = VM_FAULT_SIGBUS;
155                 goto out_io_unlock;
156         }
157
158         /*
159          * Strictly, we're not allowed to modify vma->vm_page_prot here,
160          * since the mmap_sem is only held in read mode. However, we
161          * modify only the caching bits of vma->vm_page_prot and
162          * consider those bits protected by
163          * the bo->mutex, as we should be the only writers.
164          * There shouldn't really be any readers of these bits except
165          * within vm_insert_mixed()? fork?
166          *
167          * TODO: Add a list of vmas to the bo, and change the
168          * vma->vm_page_prot when the object changes caching policy, with
169          * the correct locks held.
170          */
171         if (bo->mem.bus.is_iomem) {
172                 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
173                                                 vma->vm_page_prot);
174 #if defined(CONFIG_XEN) && defined(_PAGE_IOMAP)
175                 pgprot_val(vma->vm_page_prot) |= _PAGE_IOMAP;
176 #endif
177         } else {
178 #if defined(CONFIG_XEN) && defined(_PAGE_IOMAP)
179                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_IOMAP;
180 #endif
181                 ttm = bo->ttm;
182                 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
183                     vm_get_page_prot(vma->vm_flags) :
184                     ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
185
186                 /* Allocate all page at once, most common usage */
187                 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
188                         retval = VM_FAULT_OOM;
189                         goto out_io_unlock;
190                 }
191         }
192
193         /*
194          * Speculatively prefault a number of pages. Only error on
195          * first page.
196          */
197         for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
198                 if (bo->mem.bus.is_iomem)
199                         pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
200                 else {
201                         page = ttm->pages[page_offset];
202                         if (unlikely(!page && i == 0)) {
203                                 retval = VM_FAULT_OOM;
204                                 goto out_io_unlock;
205                         } else if (unlikely(!page)) {
206                                 break;
207                         }
208                         pfn = page_to_pfn(page);
209                 }
210
211                 ret = vm_insert_mixed(vma, address, pfn);
212                 /*
213                  * Somebody beat us to this PTE or prefaulting to
214                  * an already populated PTE, or prefaulting error.
215                  */
216
217                 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
218                         break;
219                 else if (unlikely(ret != 0)) {
220                         retval =
221                             (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
222                         goto out_io_unlock;
223                 }
224
225                 address += PAGE_SIZE;
226                 if (unlikely(++page_offset >= page_last))
227                         break;
228         }
229 out_io_unlock:
230         ttm_mem_io_unlock(man);
231 out_unlock:
232         ttm_bo_unreserve(bo);
233         return retval;
234 }
235
236 static void ttm_bo_vm_open(struct vm_area_struct *vma)
237 {
238         struct ttm_buffer_object *bo =
239             (struct ttm_buffer_object *)vma->vm_private_data;
240
241         (void)ttm_bo_reference(bo);
242 }
243
244 static void ttm_bo_vm_close(struct vm_area_struct *vma)
245 {
246         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
247
248         ttm_bo_unref(&bo);
249         vma->vm_private_data = NULL;
250 }
251
252 static const struct vm_operations_struct ttm_bo_vm_ops = {
253         .fault = ttm_bo_vm_fault,
254         .open = ttm_bo_vm_open,
255         .close = ttm_bo_vm_close
256 };
257
258 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
259                 struct ttm_bo_device *bdev)
260 {
261         struct ttm_bo_driver *driver;
262         struct ttm_buffer_object *bo;
263         int ret;
264
265         read_lock(&bdev->vm_lock);
266         bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
267                                  (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
268         if (likely(bo != NULL))
269                 ttm_bo_reference(bo);
270         read_unlock(&bdev->vm_lock);
271
272         if (unlikely(bo == NULL)) {
273                 pr_err("Could not find buffer object to map\n");
274                 return -EINVAL;
275         }
276
277         driver = bo->bdev->driver;
278         if (unlikely(!driver->verify_access)) {
279                 ret = -EPERM;
280                 goto out_unref;
281         }
282         ret = driver->verify_access(bo, filp);
283         if (unlikely(ret != 0))
284                 goto out_unref;
285
286         vma->vm_ops = &ttm_bo_vm_ops;
287
288         /*
289          * Note: We're transferring the bo reference to
290          * vma->vm_private_data here.
291          */
292
293         vma->vm_private_data = bo;
294         vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
295         return 0;
296 out_unref:
297         ttm_bo_unref(&bo);
298         return ret;
299 }
300 EXPORT_SYMBOL(ttm_bo_mmap);
301
302 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
303 {
304         if (vma->vm_pgoff != 0)
305                 return -EACCES;
306
307         vma->vm_ops = &ttm_bo_vm_ops;
308         vma->vm_private_data = ttm_bo_reference(bo);
309         vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
310         return 0;
311 }
312 EXPORT_SYMBOL(ttm_fbdev_mmap);
313
314
315 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
316                   const char __user *wbuf, char __user *rbuf, size_t count,
317                   loff_t *f_pos, bool write)
318 {
319         struct ttm_buffer_object *bo;
320         struct ttm_bo_driver *driver;
321         struct ttm_bo_kmap_obj map;
322         unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
323         unsigned long kmap_offset;
324         unsigned long kmap_end;
325         unsigned long kmap_num;
326         size_t io_size;
327         unsigned int page_offset;
328         char *virtual;
329         int ret;
330         bool no_wait = false;
331         bool dummy;
332
333         read_lock(&bdev->vm_lock);
334         bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
335         if (likely(bo != NULL))
336                 ttm_bo_reference(bo);
337         read_unlock(&bdev->vm_lock);
338
339         if (unlikely(bo == NULL))
340                 return -EFAULT;
341
342         driver = bo->bdev->driver;
343         if (unlikely(!driver->verify_access)) {
344                 ret = -EPERM;
345                 goto out_unref;
346         }
347
348         ret = driver->verify_access(bo, filp);
349         if (unlikely(ret != 0))
350                 goto out_unref;
351
352         kmap_offset = dev_offset - bo->vm_node->start;
353         if (unlikely(kmap_offset >= bo->num_pages)) {
354                 ret = -EFBIG;
355                 goto out_unref;
356         }
357
358         page_offset = *f_pos & ~PAGE_MASK;
359         io_size = bo->num_pages - kmap_offset;
360         io_size = (io_size << PAGE_SHIFT) - page_offset;
361         if (count < io_size)
362                 io_size = count;
363
364         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
365         kmap_num = kmap_end - kmap_offset + 1;
366
367         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
368
369         switch (ret) {
370         case 0:
371                 break;
372         case -EBUSY:
373                 ret = -EAGAIN;
374                 goto out_unref;
375         default:
376                 goto out_unref;
377         }
378
379         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
380         if (unlikely(ret != 0)) {
381                 ttm_bo_unreserve(bo);
382                 goto out_unref;
383         }
384
385         virtual = ttm_kmap_obj_virtual(&map, &dummy);
386         virtual += page_offset;
387
388         if (write)
389                 ret = copy_from_user(virtual, wbuf, io_size);
390         else
391                 ret = copy_to_user(rbuf, virtual, io_size);
392
393         ttm_bo_kunmap(&map);
394         ttm_bo_unreserve(bo);
395         ttm_bo_unref(&bo);
396
397         if (unlikely(ret != 0))
398                 return -EFBIG;
399
400         *f_pos += io_size;
401
402         return io_size;
403 out_unref:
404         ttm_bo_unref(&bo);
405         return ret;
406 }
407
408 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
409                         char __user *rbuf, size_t count, loff_t *f_pos,
410                         bool write)
411 {
412         struct ttm_bo_kmap_obj map;
413         unsigned long kmap_offset;
414         unsigned long kmap_end;
415         unsigned long kmap_num;
416         size_t io_size;
417         unsigned int page_offset;
418         char *virtual;
419         int ret;
420         bool no_wait = false;
421         bool dummy;
422
423         kmap_offset = (*f_pos >> PAGE_SHIFT);
424         if (unlikely(kmap_offset >= bo->num_pages))
425                 return -EFBIG;
426
427         page_offset = *f_pos & ~PAGE_MASK;
428         io_size = bo->num_pages - kmap_offset;
429         io_size = (io_size << PAGE_SHIFT) - page_offset;
430         if (count < io_size)
431                 io_size = count;
432
433         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
434         kmap_num = kmap_end - kmap_offset + 1;
435
436         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
437
438         switch (ret) {
439         case 0:
440                 break;
441         case -EBUSY:
442                 return -EAGAIN;
443         default:
444                 return ret;
445         }
446
447         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
448         if (unlikely(ret != 0)) {
449                 ttm_bo_unreserve(bo);
450                 return ret;
451         }
452
453         virtual = ttm_kmap_obj_virtual(&map, &dummy);
454         virtual += page_offset;
455
456         if (write)
457                 ret = copy_from_user(virtual, wbuf, io_size);
458         else
459                 ret = copy_to_user(rbuf, virtual, io_size);
460
461         ttm_bo_kunmap(&map);
462         ttm_bo_unreserve(bo);
463         ttm_bo_unref(&bo);
464
465         if (unlikely(ret != 0))
466                 return ret;
467
468         *f_pos += io_size;
469
470         return io_size;
471 }