- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38
39 static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42 static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43                                                           bool write);
44 static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
45                                                                   uint64_t offset,
46                                                                   uint64_t size);
47 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
48 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
49                                                     unsigned alignment,
50                                                     bool map_and_fenceable);
51 static void i915_gem_clear_fence_reg(struct drm_device *dev,
52                                      struct drm_i915_fence_reg *reg);
53 static int i915_gem_phys_pwrite(struct drm_device *dev,
54                                 struct drm_i915_gem_object *obj,
55                                 struct drm_i915_gem_pwrite *args,
56                                 struct drm_file *file);
57 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
58
59 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
60                                     struct shrink_control *sc);
61
62 /* some bookkeeping */
63 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
64                                   size_t size)
65 {
66         dev_priv->mm.object_count++;
67         dev_priv->mm.object_memory += size;
68 }
69
70 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
71                                      size_t size)
72 {
73         dev_priv->mm.object_count--;
74         dev_priv->mm.object_memory -= size;
75 }
76
77 static int
78 i915_gem_wait_for_error(struct drm_device *dev)
79 {
80         struct drm_i915_private *dev_priv = dev->dev_private;
81         struct completion *x = &dev_priv->error_completion;
82         unsigned long flags;
83         int ret;
84
85         if (!atomic_read(&dev_priv->mm.wedged))
86                 return 0;
87
88         ret = wait_for_completion_interruptible(x);
89         if (ret)
90                 return ret;
91
92         if (atomic_read(&dev_priv->mm.wedged)) {
93                 /* GPU is hung, bump the completion count to account for
94                  * the token we just consumed so that we never hit zero and
95                  * end up waiting upon a subsequent completion event that
96                  * will never happen.
97                  */
98                 spin_lock_irqsave(&x->wait.lock, flags);
99                 x->done++;
100                 spin_unlock_irqrestore(&x->wait.lock, flags);
101         }
102         return 0;
103 }
104
105 int i915_mutex_lock_interruptible(struct drm_device *dev)
106 {
107         int ret;
108
109         ret = i915_gem_wait_for_error(dev);
110         if (ret)
111                 return ret;
112
113         ret = mutex_lock_interruptible(&dev->struct_mutex);
114         if (ret)
115                 return ret;
116
117         WARN_ON(i915_verify_lists(dev));
118         return 0;
119 }
120
121 static inline bool
122 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
123 {
124         return obj->gtt_space && !obj->active && obj->pin_count == 0;
125 }
126
127 void i915_gem_do_init(struct drm_device *dev,
128                       unsigned long start,
129                       unsigned long mappable_end,
130                       unsigned long end)
131 {
132         drm_i915_private_t *dev_priv = dev->dev_private;
133
134         drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
135
136         dev_priv->mm.gtt_start = start;
137         dev_priv->mm.gtt_mappable_end = mappable_end;
138         dev_priv->mm.gtt_end = end;
139         dev_priv->mm.gtt_total = end - start;
140         dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
141
142         /* Take over this portion of the GTT */
143         intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
144 }
145
146 int
147 i915_gem_init_ioctl(struct drm_device *dev, void *data,
148                     struct drm_file *file)
149 {
150         struct drm_i915_gem_init *args = data;
151
152         if (args->gtt_start >= args->gtt_end ||
153             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
154                 return -EINVAL;
155
156         mutex_lock(&dev->struct_mutex);
157         i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
158         mutex_unlock(&dev->struct_mutex);
159
160         return 0;
161 }
162
163 int
164 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
165                             struct drm_file *file)
166 {
167         struct drm_i915_private *dev_priv = dev->dev_private;
168         struct drm_i915_gem_get_aperture *args = data;
169         struct drm_i915_gem_object *obj;
170         size_t pinned;
171
172         if (!(dev->driver->driver_features & DRIVER_GEM))
173                 return -ENODEV;
174
175         pinned = 0;
176         mutex_lock(&dev->struct_mutex);
177         list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
178                 pinned += obj->gtt_space->size;
179         mutex_unlock(&dev->struct_mutex);
180
181         args->aper_size = dev_priv->mm.gtt_total;
182         args->aper_available_size = args->aper_size - pinned;
183
184         return 0;
185 }
186
187 static int
188 i915_gem_create(struct drm_file *file,
189                 struct drm_device *dev,
190                 uint64_t size,
191                 uint32_t *handle_p)
192 {
193         struct drm_i915_gem_object *obj;
194         int ret;
195         u32 handle;
196
197         size = roundup(size, PAGE_SIZE);
198         if (size == 0)
199                 return -EINVAL;
200
201         /* Allocate the new object */
202         obj = i915_gem_alloc_object(dev, size);
203         if (obj == NULL)
204                 return -ENOMEM;
205
206         ret = drm_gem_handle_create(file, &obj->base, &handle);
207         if (ret) {
208                 drm_gem_object_release(&obj->base);
209                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
210                 kfree(obj);
211                 return ret;
212         }
213
214         /* drop reference from allocate - handle holds it now */
215         drm_gem_object_unreference(&obj->base);
216         trace_i915_gem_object_create(obj);
217
218         *handle_p = handle;
219         return 0;
220 }
221
222 int
223 i915_gem_dumb_create(struct drm_file *file,
224                      struct drm_device *dev,
225                      struct drm_mode_create_dumb *args)
226 {
227         /* have to work out size/pitch and return them */
228         args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
229         args->size = args->pitch * args->height;
230         return i915_gem_create(file, dev,
231                                args->size, &args->handle);
232 }
233
234 int i915_gem_dumb_destroy(struct drm_file *file,
235                           struct drm_device *dev,
236                           uint32_t handle)
237 {
238         return drm_gem_handle_delete(file, handle);
239 }
240
241 /**
242  * Creates a new mm object and returns a handle to it.
243  */
244 int
245 i915_gem_create_ioctl(struct drm_device *dev, void *data,
246                       struct drm_file *file)
247 {
248         struct drm_i915_gem_create *args = data;
249         return i915_gem_create(file, dev,
250                                args->size, &args->handle);
251 }
252
253 static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
254 {
255         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
256
257         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
258                 obj->tiling_mode != I915_TILING_NONE;
259 }
260
261 static inline void
262 slow_shmem_copy(struct page *dst_page,
263                 int dst_offset,
264                 struct page *src_page,
265                 int src_offset,
266                 int length)
267 {
268         char *dst_vaddr, *src_vaddr;
269
270         dst_vaddr = kmap(dst_page);
271         src_vaddr = kmap(src_page);
272
273         memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
274
275         kunmap(src_page);
276         kunmap(dst_page);
277 }
278
279 static inline void
280 slow_shmem_bit17_copy(struct page *gpu_page,
281                       int gpu_offset,
282                       struct page *cpu_page,
283                       int cpu_offset,
284                       int length,
285                       int is_read)
286 {
287         char *gpu_vaddr, *cpu_vaddr;
288
289         /* Use the unswizzled path if this page isn't affected. */
290         if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
291                 if (is_read)
292                         return slow_shmem_copy(cpu_page, cpu_offset,
293                                                gpu_page, gpu_offset, length);
294                 else
295                         return slow_shmem_copy(gpu_page, gpu_offset,
296                                                cpu_page, cpu_offset, length);
297         }
298
299         gpu_vaddr = kmap(gpu_page);
300         cpu_vaddr = kmap(cpu_page);
301
302         /* Copy the data, XORing A6 with A17 (1). The user already knows he's
303          * XORing with the other bits (A9 for Y, A9 and A10 for X)
304          */
305         while (length > 0) {
306                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
307                 int this_length = min(cacheline_end - gpu_offset, length);
308                 int swizzled_gpu_offset = gpu_offset ^ 64;
309
310                 if (is_read) {
311                         memcpy(cpu_vaddr + cpu_offset,
312                                gpu_vaddr + swizzled_gpu_offset,
313                                this_length);
314                 } else {
315                         memcpy(gpu_vaddr + swizzled_gpu_offset,
316                                cpu_vaddr + cpu_offset,
317                                this_length);
318                 }
319                 cpu_offset += this_length;
320                 gpu_offset += this_length;
321                 length -= this_length;
322         }
323
324         kunmap(cpu_page);
325         kunmap(gpu_page);
326 }
327
328 /**
329  * This is the fast shmem pread path, which attempts to copy_from_user directly
330  * from the backing pages of the object to the user's address space.  On a
331  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
332  */
333 static int
334 i915_gem_shmem_pread_fast(struct drm_device *dev,
335                           struct drm_i915_gem_object *obj,
336                           struct drm_i915_gem_pread *args,
337                           struct drm_file *file)
338 {
339         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
340         ssize_t remain;
341         loff_t offset;
342         char __user *user_data;
343         int page_offset, page_length;
344
345         user_data = (char __user *) (uintptr_t) args->data_ptr;
346         remain = args->size;
347
348         offset = args->offset;
349
350         while (remain > 0) {
351                 struct page *page;
352                 char *vaddr;
353                 int ret;
354
355                 /* Operation in this page
356                  *
357                  * page_offset = offset within page
358                  * page_length = bytes to copy for this page
359                  */
360                 page_offset = offset_in_page(offset);
361                 page_length = remain;
362                 if ((page_offset + remain) > PAGE_SIZE)
363                         page_length = PAGE_SIZE - page_offset;
364
365                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
366                 if (IS_ERR(page))
367                         return PTR_ERR(page);
368
369                 vaddr = kmap_atomic(page);
370                 ret = __copy_to_user_inatomic(user_data,
371                                               vaddr + page_offset,
372                                               page_length);
373                 kunmap_atomic(vaddr);
374
375                 mark_page_accessed(page);
376                 page_cache_release(page);
377                 if (ret)
378                         return -EFAULT;
379
380                 remain -= page_length;
381                 user_data += page_length;
382                 offset += page_length;
383         }
384
385         return 0;
386 }
387
388 /**
389  * This is the fallback shmem pread path, which allocates temporary storage
390  * in kernel space to copy_to_user into outside of the struct_mutex, so we
391  * can copy out of the object's backing pages while holding the struct mutex
392  * and not take page faults.
393  */
394 static int
395 i915_gem_shmem_pread_slow(struct drm_device *dev,
396                           struct drm_i915_gem_object *obj,
397                           struct drm_i915_gem_pread *args,
398                           struct drm_file *file)
399 {
400         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
401         struct mm_struct *mm = current->mm;
402         struct page **user_pages;
403         ssize_t remain;
404         loff_t offset, pinned_pages, i;
405         loff_t first_data_page, last_data_page, num_pages;
406         int shmem_page_offset;
407         int data_page_index, data_page_offset;
408         int page_length;
409         int ret;
410         uint64_t data_ptr = args->data_ptr;
411         int do_bit17_swizzling;
412
413         remain = args->size;
414
415         /* Pin the user pages containing the data.  We can't fault while
416          * holding the struct mutex, yet we want to hold it while
417          * dereferencing the user data.
418          */
419         first_data_page = data_ptr / PAGE_SIZE;
420         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
421         num_pages = last_data_page - first_data_page + 1;
422
423         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
424         if (user_pages == NULL)
425                 return -ENOMEM;
426
427         mutex_unlock(&dev->struct_mutex);
428         down_read(&mm->mmap_sem);
429         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
430                                       num_pages, 1, 0, user_pages, NULL);
431         up_read(&mm->mmap_sem);
432         mutex_lock(&dev->struct_mutex);
433         if (pinned_pages < num_pages) {
434                 ret = -EFAULT;
435                 goto out;
436         }
437
438         ret = i915_gem_object_set_cpu_read_domain_range(obj,
439                                                         args->offset,
440                                                         args->size);
441         if (ret)
442                 goto out;
443
444         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
445
446         offset = args->offset;
447
448         while (remain > 0) {
449                 struct page *page;
450
451                 /* Operation in this page
452                  *
453                  * shmem_page_offset = offset within page in shmem file
454                  * data_page_index = page number in get_user_pages return
455                  * data_page_offset = offset with data_page_index page.
456                  * page_length = bytes to copy for this page
457                  */
458                 shmem_page_offset = offset_in_page(offset);
459                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
460                 data_page_offset = offset_in_page(data_ptr);
461
462                 page_length = remain;
463                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
464                         page_length = PAGE_SIZE - shmem_page_offset;
465                 if ((data_page_offset + page_length) > PAGE_SIZE)
466                         page_length = PAGE_SIZE - data_page_offset;
467
468                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
469                 if (IS_ERR(page)) {
470                         ret = PTR_ERR(page);
471                         goto out;
472                 }
473
474                 if (do_bit17_swizzling) {
475                         slow_shmem_bit17_copy(page,
476                                               shmem_page_offset,
477                                               user_pages[data_page_index],
478                                               data_page_offset,
479                                               page_length,
480                                               1);
481                 } else {
482                         slow_shmem_copy(user_pages[data_page_index],
483                                         data_page_offset,
484                                         page,
485                                         shmem_page_offset,
486                                         page_length);
487                 }
488
489                 mark_page_accessed(page);
490                 page_cache_release(page);
491
492                 remain -= page_length;
493                 data_ptr += page_length;
494                 offset += page_length;
495         }
496
497 out:
498         for (i = 0; i < pinned_pages; i++) {
499                 SetPageDirty(user_pages[i]);
500                 mark_page_accessed(user_pages[i]);
501                 page_cache_release(user_pages[i]);
502         }
503         drm_free_large(user_pages);
504
505         return ret;
506 }
507
508 /**
509  * Reads data from the object referenced by handle.
510  *
511  * On error, the contents of *data are undefined.
512  */
513 int
514 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
515                      struct drm_file *file)
516 {
517         struct drm_i915_gem_pread *args = data;
518         struct drm_i915_gem_object *obj;
519         int ret = 0;
520
521         if (args->size == 0)
522                 return 0;
523
524         if (!access_ok(VERIFY_WRITE,
525                        (char __user *)(uintptr_t)args->data_ptr,
526                        args->size))
527                 return -EFAULT;
528
529         ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
530                                        args->size);
531         if (ret)
532                 return -EFAULT;
533
534         ret = i915_mutex_lock_interruptible(dev);
535         if (ret)
536                 return ret;
537
538         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
539         if (&obj->base == NULL) {
540                 ret = -ENOENT;
541                 goto unlock;
542         }
543
544         /* Bounds check source.  */
545         if (args->offset > obj->base.size ||
546             args->size > obj->base.size - args->offset) {
547                 ret = -EINVAL;
548                 goto out;
549         }
550
551         trace_i915_gem_object_pread(obj, args->offset, args->size);
552
553         ret = i915_gem_object_set_cpu_read_domain_range(obj,
554                                                         args->offset,
555                                                         args->size);
556         if (ret)
557                 goto out;
558
559         ret = -EFAULT;
560         if (!i915_gem_object_needs_bit17_swizzle(obj))
561                 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
562         if (ret == -EFAULT)
563                 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
564
565 out:
566         drm_gem_object_unreference(&obj->base);
567 unlock:
568         mutex_unlock(&dev->struct_mutex);
569         return ret;
570 }
571
572 /* This is the fast write path which cannot handle
573  * page faults in the source data
574  */
575
576 static inline int
577 fast_user_write(struct io_mapping *mapping,
578                 loff_t page_base, int page_offset,
579                 char __user *user_data,
580                 int length)
581 {
582         char *vaddr_atomic;
583         unsigned long unwritten;
584
585         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
586         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
587                                                       user_data, length);
588         io_mapping_unmap_atomic(vaddr_atomic);
589         return unwritten;
590 }
591
592 /* Here's the write path which can sleep for
593  * page faults
594  */
595
596 static inline void
597 slow_kernel_write(struct io_mapping *mapping,
598                   loff_t gtt_base, int gtt_offset,
599                   struct page *user_page, int user_offset,
600                   int length)
601 {
602         char __iomem *dst_vaddr;
603         char *src_vaddr;
604
605         dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
606         src_vaddr = kmap(user_page);
607
608         memcpy_toio(dst_vaddr + gtt_offset,
609                     src_vaddr + user_offset,
610                     length);
611
612         kunmap(user_page);
613         io_mapping_unmap(dst_vaddr);
614 }
615
616 /**
617  * This is the fast pwrite path, where we copy the data directly from the
618  * user into the GTT, uncached.
619  */
620 static int
621 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
622                          struct drm_i915_gem_object *obj,
623                          struct drm_i915_gem_pwrite *args,
624                          struct drm_file *file)
625 {
626         drm_i915_private_t *dev_priv = dev->dev_private;
627         ssize_t remain;
628         loff_t offset, page_base;
629         char __user *user_data;
630         int page_offset, page_length;
631
632         user_data = (char __user *) (uintptr_t) args->data_ptr;
633         remain = args->size;
634
635         offset = obj->gtt_offset + args->offset;
636
637         while (remain > 0) {
638                 /* Operation in this page
639                  *
640                  * page_base = page offset within aperture
641                  * page_offset = offset within page
642                  * page_length = bytes to copy for this page
643                  */
644                 page_base = offset & PAGE_MASK;
645                 page_offset = offset_in_page(offset);
646                 page_length = remain;
647                 if ((page_offset + remain) > PAGE_SIZE)
648                         page_length = PAGE_SIZE - page_offset;
649
650                 /* If we get a fault while copying data, then (presumably) our
651                  * source page isn't available.  Return the error and we'll
652                  * retry in the slow path.
653                  */
654                 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
655                                     page_offset, user_data, page_length))
656                         return -EFAULT;
657
658                 remain -= page_length;
659                 user_data += page_length;
660                 offset += page_length;
661         }
662
663         return 0;
664 }
665
666 /**
667  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
668  * the memory and maps it using kmap_atomic for copying.
669  *
670  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
671  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
672  */
673 static int
674 i915_gem_gtt_pwrite_slow(struct drm_device *dev,
675                          struct drm_i915_gem_object *obj,
676                          struct drm_i915_gem_pwrite *args,
677                          struct drm_file *file)
678 {
679         drm_i915_private_t *dev_priv = dev->dev_private;
680         ssize_t remain;
681         loff_t gtt_page_base, offset;
682         loff_t first_data_page, last_data_page, num_pages;
683         loff_t pinned_pages, i;
684         struct page **user_pages;
685         struct mm_struct *mm = current->mm;
686         int gtt_page_offset, data_page_offset, data_page_index, page_length;
687         int ret;
688         uint64_t data_ptr = args->data_ptr;
689
690         remain = args->size;
691
692         /* Pin the user pages containing the data.  We can't fault while
693          * holding the struct mutex, and all of the pwrite implementations
694          * want to hold it while dereferencing the user data.
695          */
696         first_data_page = data_ptr / PAGE_SIZE;
697         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
698         num_pages = last_data_page - first_data_page + 1;
699
700         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
701         if (user_pages == NULL)
702                 return -ENOMEM;
703
704         mutex_unlock(&dev->struct_mutex);
705         down_read(&mm->mmap_sem);
706         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
707                                       num_pages, 0, 0, user_pages, NULL);
708         up_read(&mm->mmap_sem);
709         mutex_lock(&dev->struct_mutex);
710         if (pinned_pages < num_pages) {
711                 ret = -EFAULT;
712                 goto out_unpin_pages;
713         }
714
715         ret = i915_gem_object_set_to_gtt_domain(obj, true);
716         if (ret)
717                 goto out_unpin_pages;
718
719         ret = i915_gem_object_put_fence(obj);
720         if (ret)
721                 goto out_unpin_pages;
722
723         offset = obj->gtt_offset + args->offset;
724
725         while (remain > 0) {
726                 /* Operation in this page
727                  *
728                  * gtt_page_base = page offset within aperture
729                  * gtt_page_offset = offset within page in aperture
730                  * data_page_index = page number in get_user_pages return
731                  * data_page_offset = offset with data_page_index page.
732                  * page_length = bytes to copy for this page
733                  */
734                 gtt_page_base = offset & PAGE_MASK;
735                 gtt_page_offset = offset_in_page(offset);
736                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
737                 data_page_offset = offset_in_page(data_ptr);
738
739                 page_length = remain;
740                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
741                         page_length = PAGE_SIZE - gtt_page_offset;
742                 if ((data_page_offset + page_length) > PAGE_SIZE)
743                         page_length = PAGE_SIZE - data_page_offset;
744
745                 slow_kernel_write(dev_priv->mm.gtt_mapping,
746                                   gtt_page_base, gtt_page_offset,
747                                   user_pages[data_page_index],
748                                   data_page_offset,
749                                   page_length);
750
751                 remain -= page_length;
752                 offset += page_length;
753                 data_ptr += page_length;
754         }
755
756 out_unpin_pages:
757         for (i = 0; i < pinned_pages; i++)
758                 page_cache_release(user_pages[i]);
759         drm_free_large(user_pages);
760
761         return ret;
762 }
763
764 /**
765  * This is the fast shmem pwrite path, which attempts to directly
766  * copy_from_user into the kmapped pages backing the object.
767  */
768 static int
769 i915_gem_shmem_pwrite_fast(struct drm_device *dev,
770                            struct drm_i915_gem_object *obj,
771                            struct drm_i915_gem_pwrite *args,
772                            struct drm_file *file)
773 {
774         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
775         ssize_t remain;
776         loff_t offset;
777         char __user *user_data;
778         int page_offset, page_length;
779
780         user_data = (char __user *) (uintptr_t) args->data_ptr;
781         remain = args->size;
782
783         offset = args->offset;
784         obj->dirty = 1;
785
786         while (remain > 0) {
787                 struct page *page;
788                 char *vaddr;
789                 int ret;
790
791                 /* Operation in this page
792                  *
793                  * page_offset = offset within page
794                  * page_length = bytes to copy for this page
795                  */
796                 page_offset = offset_in_page(offset);
797                 page_length = remain;
798                 if ((page_offset + remain) > PAGE_SIZE)
799                         page_length = PAGE_SIZE - page_offset;
800
801                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
802                 if (IS_ERR(page))
803                         return PTR_ERR(page);
804
805                 vaddr = kmap_atomic(page);
806                 ret = __copy_from_user_inatomic(vaddr + page_offset,
807                                                 user_data,
808                                                 page_length);
809                 kunmap_atomic(vaddr);
810
811                 set_page_dirty(page);
812                 mark_page_accessed(page);
813                 page_cache_release(page);
814
815                 /* If we get a fault while copying data, then (presumably) our
816                  * source page isn't available.  Return the error and we'll
817                  * retry in the slow path.
818                  */
819                 if (ret)
820                         return -EFAULT;
821
822                 remain -= page_length;
823                 user_data += page_length;
824                 offset += page_length;
825         }
826
827         return 0;
828 }
829
830 /**
831  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
832  * the memory and maps it using kmap_atomic for copying.
833  *
834  * This avoids taking mmap_sem for faulting on the user's address while the
835  * struct_mutex is held.
836  */
837 static int
838 i915_gem_shmem_pwrite_slow(struct drm_device *dev,
839                            struct drm_i915_gem_object *obj,
840                            struct drm_i915_gem_pwrite *args,
841                            struct drm_file *file)
842 {
843         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
844         struct mm_struct *mm = current->mm;
845         struct page **user_pages;
846         ssize_t remain;
847         loff_t offset, pinned_pages, i;
848         loff_t first_data_page, last_data_page, num_pages;
849         int shmem_page_offset;
850         int data_page_index,  data_page_offset;
851         int page_length;
852         int ret;
853         uint64_t data_ptr = args->data_ptr;
854         int do_bit17_swizzling;
855
856         remain = args->size;
857
858         /* Pin the user pages containing the data.  We can't fault while
859          * holding the struct mutex, and all of the pwrite implementations
860          * want to hold it while dereferencing the user data.
861          */
862         first_data_page = data_ptr / PAGE_SIZE;
863         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
864         num_pages = last_data_page - first_data_page + 1;
865
866         user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
867         if (user_pages == NULL)
868                 return -ENOMEM;
869
870         mutex_unlock(&dev->struct_mutex);
871         down_read(&mm->mmap_sem);
872         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
873                                       num_pages, 0, 0, user_pages, NULL);
874         up_read(&mm->mmap_sem);
875         mutex_lock(&dev->struct_mutex);
876         if (pinned_pages < num_pages) {
877                 ret = -EFAULT;
878                 goto out;
879         }
880
881         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
882         if (ret)
883                 goto out;
884
885         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
886
887         offset = args->offset;
888         obj->dirty = 1;
889
890         while (remain > 0) {
891                 struct page *page;
892
893                 /* Operation in this page
894                  *
895                  * shmem_page_offset = offset within page in shmem file
896                  * data_page_index = page number in get_user_pages return
897                  * data_page_offset = offset with data_page_index page.
898                  * page_length = bytes to copy for this page
899                  */
900                 shmem_page_offset = offset_in_page(offset);
901                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
902                 data_page_offset = offset_in_page(data_ptr);
903
904                 page_length = remain;
905                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
906                         page_length = PAGE_SIZE - shmem_page_offset;
907                 if ((data_page_offset + page_length) > PAGE_SIZE)
908                         page_length = PAGE_SIZE - data_page_offset;
909
910                 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
911                 if (IS_ERR(page)) {
912                         ret = PTR_ERR(page);
913                         goto out;
914                 }
915
916                 if (do_bit17_swizzling) {
917                         slow_shmem_bit17_copy(page,
918                                               shmem_page_offset,
919                                               user_pages[data_page_index],
920                                               data_page_offset,
921                                               page_length,
922                                               0);
923                 } else {
924                         slow_shmem_copy(page,
925                                         shmem_page_offset,
926                                         user_pages[data_page_index],
927                                         data_page_offset,
928                                         page_length);
929                 }
930
931                 set_page_dirty(page);
932                 mark_page_accessed(page);
933                 page_cache_release(page);
934
935                 remain -= page_length;
936                 data_ptr += page_length;
937                 offset += page_length;
938         }
939
940 out:
941         for (i = 0; i < pinned_pages; i++)
942                 page_cache_release(user_pages[i]);
943         drm_free_large(user_pages);
944
945         return ret;
946 }
947
948 /**
949  * Writes data to the object referenced by handle.
950  *
951  * On error, the contents of the buffer that were to be modified are undefined.
952  */
953 int
954 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
955                       struct drm_file *file)
956 {
957         struct drm_i915_gem_pwrite *args = data;
958         struct drm_i915_gem_object *obj;
959         int ret;
960
961         if (args->size == 0)
962                 return 0;
963
964         if (!access_ok(VERIFY_READ,
965                        (char __user *)(uintptr_t)args->data_ptr,
966                        args->size))
967                 return -EFAULT;
968
969         ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
970                                       args->size);
971         if (ret)
972                 return -EFAULT;
973
974         ret = i915_mutex_lock_interruptible(dev);
975         if (ret)
976                 return ret;
977
978         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
979         if (&obj->base == NULL) {
980                 ret = -ENOENT;
981                 goto unlock;
982         }
983
984         /* Bounds check destination. */
985         if (args->offset > obj->base.size ||
986             args->size > obj->base.size - args->offset) {
987                 ret = -EINVAL;
988                 goto out;
989         }
990
991         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
992
993         /* We can only do the GTT pwrite on untiled buffers, as otherwise
994          * it would end up going through the fenced access, and we'll get
995          * different detiling behavior between reading and writing.
996          * pread/pwrite currently are reading and writing from the CPU
997          * perspective, requiring manual detiling by the client.
998          */
999         if (obj->phys_obj)
1000                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
1001         else if (obj->gtt_space &&
1002                  obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1003                 ret = i915_gem_object_pin(obj, 0, true);
1004                 if (ret)
1005                         goto out;
1006
1007                 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1008                 if (ret)
1009                         goto out_unpin;
1010
1011                 ret = i915_gem_object_put_fence(obj);
1012                 if (ret)
1013                         goto out_unpin;
1014
1015                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1016                 if (ret == -EFAULT)
1017                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1018
1019 out_unpin:
1020                 i915_gem_object_unpin(obj);
1021         } else {
1022                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1023                 if (ret)
1024                         goto out;
1025
1026                 ret = -EFAULT;
1027                 if (!i915_gem_object_needs_bit17_swizzle(obj))
1028                         ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1029                 if (ret == -EFAULT)
1030                         ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1031         }
1032
1033 out:
1034         drm_gem_object_unreference(&obj->base);
1035 unlock:
1036         mutex_unlock(&dev->struct_mutex);
1037         return ret;
1038 }
1039
1040 /**
1041  * Called when user space prepares to use an object with the CPU, either
1042  * through the mmap ioctl's mapping or a GTT mapping.
1043  */
1044 int
1045 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1046                           struct drm_file *file)
1047 {
1048         struct drm_i915_gem_set_domain *args = data;
1049         struct drm_i915_gem_object *obj;
1050         uint32_t read_domains = args->read_domains;
1051         uint32_t write_domain = args->write_domain;
1052         int ret;
1053
1054         if (!(dev->driver->driver_features & DRIVER_GEM))
1055                 return -ENODEV;
1056
1057         /* Only handle setting domains to types used by the CPU. */
1058         if (write_domain & I915_GEM_GPU_DOMAINS)
1059                 return -EINVAL;
1060
1061         if (read_domains & I915_GEM_GPU_DOMAINS)
1062                 return -EINVAL;
1063
1064         /* Having something in the write domain implies it's in the read
1065          * domain, and only that read domain.  Enforce that in the request.
1066          */
1067         if (write_domain != 0 && read_domains != write_domain)
1068                 return -EINVAL;
1069
1070         ret = i915_mutex_lock_interruptible(dev);
1071         if (ret)
1072                 return ret;
1073
1074         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1075         if (&obj->base == NULL) {
1076                 ret = -ENOENT;
1077                 goto unlock;
1078         }
1079
1080         if (read_domains & I915_GEM_DOMAIN_GTT) {
1081                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1082
1083                 /* Silently promote "you're not bound, there was nothing to do"
1084                  * to success, since the client was just asking us to
1085                  * make sure everything was done.
1086                  */
1087                 if (ret == -EINVAL)
1088                         ret = 0;
1089         } else {
1090                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1091         }
1092
1093         drm_gem_object_unreference(&obj->base);
1094 unlock:
1095         mutex_unlock(&dev->struct_mutex);
1096         return ret;
1097 }
1098
1099 /**
1100  * Called when user space has done writes to this buffer
1101  */
1102 int
1103 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1104                          struct drm_file *file)
1105 {
1106         struct drm_i915_gem_sw_finish *args = data;
1107         struct drm_i915_gem_object *obj;
1108         int ret = 0;
1109
1110         if (!(dev->driver->driver_features & DRIVER_GEM))
1111                 return -ENODEV;
1112
1113         ret = i915_mutex_lock_interruptible(dev);
1114         if (ret)
1115                 return ret;
1116
1117         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1118         if (&obj->base == NULL) {
1119                 ret = -ENOENT;
1120                 goto unlock;
1121         }
1122
1123         /* Pinned buffers may be scanout, so flush the cache */
1124         if (obj->pin_count)
1125                 i915_gem_object_flush_cpu_write_domain(obj);
1126
1127         drm_gem_object_unreference(&obj->base);
1128 unlock:
1129         mutex_unlock(&dev->struct_mutex);
1130         return ret;
1131 }
1132
1133 /**
1134  * Maps the contents of an object, returning the address it is mapped
1135  * into.
1136  *
1137  * While the mapping holds a reference on the contents of the object, it doesn't
1138  * imply a ref on the object itself.
1139  */
1140 int
1141 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1142                     struct drm_file *file)
1143 {
1144         struct drm_i915_private *dev_priv = dev->dev_private;
1145         struct drm_i915_gem_mmap *args = data;
1146         struct drm_gem_object *obj;
1147         unsigned long addr;
1148
1149         if (!(dev->driver->driver_features & DRIVER_GEM))
1150                 return -ENODEV;
1151
1152         obj = drm_gem_object_lookup(dev, file, args->handle);
1153         if (obj == NULL)
1154                 return -ENOENT;
1155
1156         if (obj->size > dev_priv->mm.gtt_mappable_end) {
1157                 drm_gem_object_unreference_unlocked(obj);
1158                 return -E2BIG;
1159         }
1160
1161         down_write(&current->mm->mmap_sem);
1162         addr = do_mmap(obj->filp, 0, args->size,
1163                        PROT_READ | PROT_WRITE, MAP_SHARED,
1164                        args->offset);
1165         up_write(&current->mm->mmap_sem);
1166         drm_gem_object_unreference_unlocked(obj);
1167         if (IS_ERR((void *)addr))
1168                 return addr;
1169
1170         args->addr_ptr = (uint64_t) addr;
1171
1172         return 0;
1173 }
1174
1175 #ifdef CONFIG_XEN
1176 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1177 {
1178         int ret = drm_gem_mmap(filp, vma);
1179
1180         pgprot_val(vma->vm_page_prot) |= _PAGE_IOMAP;
1181
1182         return ret;
1183 }
1184 #endif
1185
1186 /**
1187  * i915_gem_fault - fault a page into the GTT
1188  * vma: VMA in question
1189  * vmf: fault info
1190  *
1191  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1192  * from userspace.  The fault handler takes care of binding the object to
1193  * the GTT (if needed), allocating and programming a fence register (again,
1194  * only if needed based on whether the old reg is still valid or the object
1195  * is tiled) and inserting a new PTE into the faulting process.
1196  *
1197  * Note that the faulting process may involve evicting existing objects
1198  * from the GTT and/or fence registers to make room.  So performance may
1199  * suffer if the GTT working set is large or there are few fence registers
1200  * left.
1201  */
1202 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1203 {
1204         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1205         struct drm_device *dev = obj->base.dev;
1206         drm_i915_private_t *dev_priv = dev->dev_private;
1207         pgoff_t page_offset;
1208         unsigned long pfn;
1209         int ret = 0;
1210         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1211
1212         /* We don't use vmf->pgoff since that has the fake offset */
1213         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1214                 PAGE_SHIFT;
1215
1216         ret = i915_mutex_lock_interruptible(dev);
1217         if (ret)
1218                 goto out;
1219
1220         trace_i915_gem_object_fault(obj, page_offset, true, write);
1221
1222         /* Now bind it into the GTT if needed */
1223         if (!obj->map_and_fenceable) {
1224                 ret = i915_gem_object_unbind(obj);
1225                 if (ret)
1226                         goto unlock;
1227         }
1228         if (!obj->gtt_space) {
1229                 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1230                 if (ret)
1231                         goto unlock;
1232
1233                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1234                 if (ret)
1235                         goto unlock;
1236         }
1237
1238         if (obj->tiling_mode == I915_TILING_NONE)
1239                 ret = i915_gem_object_put_fence(obj);
1240         else
1241                 ret = i915_gem_object_get_fence(obj, NULL);
1242         if (ret)
1243                 goto unlock;
1244
1245         if (i915_gem_object_is_inactive(obj))
1246                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1247
1248         obj->fault_mappable = true;
1249
1250         pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1251                 page_offset;
1252
1253         /* Finally, remap it using the new GTT offset */
1254         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1255 unlock:
1256         mutex_unlock(&dev->struct_mutex);
1257 out:
1258         switch (ret) {
1259         case -EIO:
1260         case -EAGAIN:
1261                 /* Give the error handler a chance to run and move the
1262                  * objects off the GPU active list. Next time we service the
1263                  * fault, we should be able to transition the page into the
1264                  * GTT without touching the GPU (and so avoid further
1265                  * EIO/EGAIN). If the GPU is wedged, then there is no issue
1266                  * with coherency, just lost writes.
1267                  */
1268                 set_need_resched();
1269         case 0:
1270         case -ERESTARTSYS:
1271         case -EINTR:
1272                 return VM_FAULT_NOPAGE;
1273         case -ENOMEM:
1274                 return VM_FAULT_OOM;
1275         default:
1276                 return VM_FAULT_SIGBUS;
1277         }
1278 }
1279
1280 /**
1281  * i915_gem_release_mmap - remove physical page mappings
1282  * @obj: obj in question
1283  *
1284  * Preserve the reservation of the mmapping with the DRM core code, but
1285  * relinquish ownership of the pages back to the system.
1286  *
1287  * It is vital that we remove the page mapping if we have mapped a tiled
1288  * object through the GTT and then lose the fence register due to
1289  * resource pressure. Similarly if the object has been moved out of the
1290  * aperture, than pages mapped into userspace must be revoked. Removing the
1291  * mapping will then trigger a page fault on the next user access, allowing
1292  * fixup by i915_gem_fault().
1293  */
1294 void
1295 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1296 {
1297         if (!obj->fault_mappable)
1298                 return;
1299
1300         if (obj->base.dev->dev_mapping)
1301                 unmap_mapping_range(obj->base.dev->dev_mapping,
1302                                     (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1303                                     obj->base.size, 1);
1304
1305         obj->fault_mappable = false;
1306 }
1307
1308 static uint32_t
1309 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1310 {
1311         uint32_t gtt_size;
1312
1313         if (INTEL_INFO(dev)->gen >= 4 ||
1314             tiling_mode == I915_TILING_NONE)
1315                 return size;
1316
1317         /* Previous chips need a power-of-two fence region when tiling */
1318         if (INTEL_INFO(dev)->gen == 3)
1319                 gtt_size = 1024*1024;
1320         else
1321                 gtt_size = 512*1024;
1322
1323         while (gtt_size < size)
1324                 gtt_size <<= 1;
1325
1326         return gtt_size;
1327 }
1328
1329 /**
1330  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1331  * @obj: object to check
1332  *
1333  * Return the required GTT alignment for an object, taking into account
1334  * potential fence register mapping.
1335  */
1336 static uint32_t
1337 i915_gem_get_gtt_alignment(struct drm_device *dev,
1338                            uint32_t size,
1339                            int tiling_mode)
1340 {
1341         /*
1342          * Minimum alignment is 4k (GTT page size), but might be greater
1343          * if a fence register is needed for the object.
1344          */
1345         if (INTEL_INFO(dev)->gen >= 4 ||
1346             tiling_mode == I915_TILING_NONE)
1347                 return 4096;
1348
1349         /*
1350          * Previous chips need to be aligned to the size of the smallest
1351          * fence register that can contain the object.
1352          */
1353         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1354 }
1355
1356 /**
1357  * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1358  *                                       unfenced object
1359  * @dev: the device
1360  * @size: size of the object
1361  * @tiling_mode: tiling mode of the object
1362  *
1363  * Return the required GTT alignment for an object, only taking into account
1364  * unfenced tiled surface requirements.
1365  */
1366 uint32_t
1367 i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1368                                     uint32_t size,
1369                                     int tiling_mode)
1370 {
1371         /*
1372          * Minimum alignment is 4k (GTT page size) for sane hw.
1373          */
1374         if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1375             tiling_mode == I915_TILING_NONE)
1376                 return 4096;
1377
1378         /* Previous hardware however needs to be aligned to a power-of-two
1379          * tile height. The simplest method for determining this is to reuse
1380          * the power-of-tile object size.
1381          */
1382         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1383 }
1384
1385 int
1386 i915_gem_mmap_gtt(struct drm_file *file,
1387                   struct drm_device *dev,
1388                   uint32_t handle,
1389                   uint64_t *offset)
1390 {
1391         struct drm_i915_private *dev_priv = dev->dev_private;
1392         struct drm_i915_gem_object *obj;
1393         int ret;
1394
1395         if (!(dev->driver->driver_features & DRIVER_GEM))
1396                 return -ENODEV;
1397
1398         ret = i915_mutex_lock_interruptible(dev);
1399         if (ret)
1400                 return ret;
1401
1402         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1403         if (&obj->base == NULL) {
1404                 ret = -ENOENT;
1405                 goto unlock;
1406         }
1407
1408         if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1409                 ret = -E2BIG;
1410                 goto out;
1411         }
1412
1413         if (obj->madv != I915_MADV_WILLNEED) {
1414                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1415                 ret = -EINVAL;
1416                 goto out;
1417         }
1418
1419         if (!obj->base.map_list.map) {
1420                 ret = drm_gem_create_mmap_offset(&obj->base);
1421                 if (ret)
1422                         goto out;
1423         }
1424
1425         *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1426
1427 out:
1428         drm_gem_object_unreference(&obj->base);
1429 unlock:
1430         mutex_unlock(&dev->struct_mutex);
1431         return ret;
1432 }
1433
1434 /**
1435  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1436  * @dev: DRM device
1437  * @data: GTT mapping ioctl data
1438  * @file: GEM object info
1439  *
1440  * Simply returns the fake offset to userspace so it can mmap it.
1441  * The mmap call will end up in drm_gem_mmap(), which will set things
1442  * up so we can get faults in the handler above.
1443  *
1444  * The fault handler will take care of binding the object into the GTT
1445  * (since it may have been evicted to make room for something), allocating
1446  * a fence register, and mapping the appropriate aperture address into
1447  * userspace.
1448  */
1449 int
1450 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1451                         struct drm_file *file)
1452 {
1453         struct drm_i915_gem_mmap_gtt *args = data;
1454
1455         if (!(dev->driver->driver_features & DRIVER_GEM))
1456                 return -ENODEV;
1457
1458         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1459 }
1460
1461
1462 static int
1463 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1464                               gfp_t gfpmask)
1465 {
1466         int page_count, i;
1467         struct address_space *mapping;
1468         struct inode *inode;
1469         struct page *page;
1470
1471         /* Get the list of pages out of our struct file.  They'll be pinned
1472          * at this point until we release them.
1473          */
1474         page_count = obj->base.size / PAGE_SIZE;
1475         BUG_ON(obj->pages != NULL);
1476         obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1477         if (obj->pages == NULL)
1478                 return -ENOMEM;
1479
1480         inode = obj->base.filp->f_path.dentry->d_inode;
1481         mapping = inode->i_mapping;
1482         gfpmask |= mapping_gfp_mask(mapping);
1483
1484         for (i = 0; i < page_count; i++) {
1485                 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
1486                 if (IS_ERR(page))
1487                         goto err_pages;
1488
1489                 obj->pages[i] = page;
1490         }
1491
1492         if (i915_gem_object_needs_bit17_swizzle(obj))
1493                 i915_gem_object_do_bit_17_swizzle(obj);
1494
1495         return 0;
1496
1497 err_pages:
1498         while (i--)
1499                 page_cache_release(obj->pages[i]);
1500
1501         drm_free_large(obj->pages);
1502         obj->pages = NULL;
1503         return PTR_ERR(page);
1504 }
1505
1506 static void
1507 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1508 {
1509         int page_count = obj->base.size / PAGE_SIZE;
1510         int i;
1511
1512         BUG_ON(obj->madv == __I915_MADV_PURGED);
1513
1514         if (i915_gem_object_needs_bit17_swizzle(obj))
1515                 i915_gem_object_save_bit_17_swizzle(obj);
1516
1517         if (obj->madv == I915_MADV_DONTNEED)
1518                 obj->dirty = 0;
1519
1520         for (i = 0; i < page_count; i++) {
1521                 if (obj->dirty)
1522                         set_page_dirty(obj->pages[i]);
1523
1524                 if (obj->madv == I915_MADV_WILLNEED)
1525                         mark_page_accessed(obj->pages[i]);
1526
1527                 page_cache_release(obj->pages[i]);
1528         }
1529         obj->dirty = 0;
1530
1531         drm_free_large(obj->pages);
1532         obj->pages = NULL;
1533 }
1534
1535 void
1536 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1537                                struct intel_ring_buffer *ring,
1538                                u32 seqno)
1539 {
1540         struct drm_device *dev = obj->base.dev;
1541         struct drm_i915_private *dev_priv = dev->dev_private;
1542
1543         BUG_ON(ring == NULL);
1544         obj->ring = ring;
1545
1546         /* Add a reference if we're newly entering the active list. */
1547         if (!obj->active) {
1548                 drm_gem_object_reference(&obj->base);
1549                 obj->active = 1;
1550         }
1551
1552         /* Move from whatever list we were on to the tail of execution. */
1553         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1554         list_move_tail(&obj->ring_list, &ring->active_list);
1555
1556         obj->last_rendering_seqno = seqno;
1557         if (obj->fenced_gpu_access) {
1558                 struct drm_i915_fence_reg *reg;
1559
1560                 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1561
1562                 obj->last_fenced_seqno = seqno;
1563                 obj->last_fenced_ring = ring;
1564
1565                 reg = &dev_priv->fence_regs[obj->fence_reg];
1566                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1567         }
1568 }
1569
1570 static void
1571 i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1572 {
1573         list_del_init(&obj->ring_list);
1574         obj->last_rendering_seqno = 0;
1575 }
1576
1577 static void
1578 i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1579 {
1580         struct drm_device *dev = obj->base.dev;
1581         drm_i915_private_t *dev_priv = dev->dev_private;
1582
1583         BUG_ON(!obj->active);
1584         list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1585
1586         i915_gem_object_move_off_active(obj);
1587 }
1588
1589 static void
1590 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1591 {
1592         struct drm_device *dev = obj->base.dev;
1593         struct drm_i915_private *dev_priv = dev->dev_private;
1594
1595         if (obj->pin_count != 0)
1596                 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1597         else
1598                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1599
1600         BUG_ON(!list_empty(&obj->gpu_write_list));
1601         BUG_ON(!obj->active);
1602         obj->ring = NULL;
1603
1604         i915_gem_object_move_off_active(obj);
1605         obj->fenced_gpu_access = false;
1606
1607         obj->active = 0;
1608         obj->pending_gpu_write = false;
1609         drm_gem_object_unreference(&obj->base);
1610
1611         WARN_ON(i915_verify_lists(dev));
1612 }
1613
1614 /* Immediately discard the backing storage */
1615 static void
1616 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1617 {
1618         struct inode *inode;
1619
1620         /* Our goal here is to return as much of the memory as
1621          * is possible back to the system as we are called from OOM.
1622          * To do this we must instruct the shmfs to drop all of its
1623          * backing pages, *now*.
1624          */
1625         inode = obj->base.filp->f_path.dentry->d_inode;
1626         shmem_truncate_range(inode, 0, (loff_t)-1);
1627
1628         obj->madv = __I915_MADV_PURGED;
1629 }
1630
1631 static inline int
1632 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1633 {
1634         return obj->madv == I915_MADV_DONTNEED;
1635 }
1636
1637 static void
1638 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1639                                uint32_t flush_domains)
1640 {
1641         struct drm_i915_gem_object *obj, *next;
1642
1643         list_for_each_entry_safe(obj, next,
1644                                  &ring->gpu_write_list,
1645                                  gpu_write_list) {
1646                 if (obj->base.write_domain & flush_domains) {
1647                         uint32_t old_write_domain = obj->base.write_domain;
1648
1649                         obj->base.write_domain = 0;
1650                         list_del_init(&obj->gpu_write_list);
1651                         i915_gem_object_move_to_active(obj, ring,
1652                                                        i915_gem_next_request_seqno(ring));
1653
1654                         trace_i915_gem_object_change_domain(obj,
1655                                                             obj->base.read_domains,
1656                                                             old_write_domain);
1657                 }
1658         }
1659 }
1660
1661 int
1662 i915_add_request(struct intel_ring_buffer *ring,
1663                  struct drm_file *file,
1664                  struct drm_i915_gem_request *request)
1665 {
1666         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1667         uint32_t seqno;
1668         int was_empty;
1669         int ret;
1670
1671         BUG_ON(request == NULL);
1672
1673         ret = ring->add_request(ring, &seqno);
1674         if (ret)
1675             return ret;
1676
1677         trace_i915_gem_request_add(ring, seqno);
1678
1679         request->seqno = seqno;
1680         request->ring = ring;
1681         request->emitted_jiffies = jiffies;
1682         was_empty = list_empty(&ring->request_list);
1683         list_add_tail(&request->list, &ring->request_list);
1684
1685         if (file) {
1686                 struct drm_i915_file_private *file_priv = file->driver_priv;
1687
1688                 spin_lock(&file_priv->mm.lock);
1689                 request->file_priv = file_priv;
1690                 list_add_tail(&request->client_list,
1691                               &file_priv->mm.request_list);
1692                 spin_unlock(&file_priv->mm.lock);
1693         }
1694
1695         ring->outstanding_lazy_request = false;
1696
1697         if (!dev_priv->mm.suspended) {
1698                 if (i915_enable_hangcheck) {
1699                         mod_timer(&dev_priv->hangcheck_timer,
1700                                   jiffies +
1701                                   msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1702                 }
1703                 if (was_empty)
1704                         queue_delayed_work(dev_priv->wq,
1705                                            &dev_priv->mm.retire_work, HZ);
1706         }
1707         return 0;
1708 }
1709
1710 static inline void
1711 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1712 {
1713         struct drm_i915_file_private *file_priv = request->file_priv;
1714
1715         if (!file_priv)
1716                 return;
1717
1718         spin_lock(&file_priv->mm.lock);
1719         if (request->file_priv) {
1720                 list_del(&request->client_list);
1721                 request->file_priv = NULL;
1722         }
1723         spin_unlock(&file_priv->mm.lock);
1724 }
1725
1726 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1727                                       struct intel_ring_buffer *ring)
1728 {
1729         while (!list_empty(&ring->request_list)) {
1730                 struct drm_i915_gem_request *request;
1731
1732                 request = list_first_entry(&ring->request_list,
1733                                            struct drm_i915_gem_request,
1734                                            list);
1735
1736                 list_del(&request->list);
1737                 i915_gem_request_remove_from_client(request);
1738                 kfree(request);
1739         }
1740
1741         while (!list_empty(&ring->active_list)) {
1742                 struct drm_i915_gem_object *obj;
1743
1744                 obj = list_first_entry(&ring->active_list,
1745                                        struct drm_i915_gem_object,
1746                                        ring_list);
1747
1748                 obj->base.write_domain = 0;
1749                 list_del_init(&obj->gpu_write_list);
1750                 i915_gem_object_move_to_inactive(obj);
1751         }
1752 }
1753
1754 static void i915_gem_reset_fences(struct drm_device *dev)
1755 {
1756         struct drm_i915_private *dev_priv = dev->dev_private;
1757         int i;
1758
1759         for (i = 0; i < dev_priv->num_fence_regs; i++) {
1760                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1761                 struct drm_i915_gem_object *obj = reg->obj;
1762
1763                 if (!obj)
1764                         continue;
1765
1766                 if (obj->tiling_mode)
1767                         i915_gem_release_mmap(obj);
1768
1769                 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1770                 reg->obj->fenced_gpu_access = false;
1771                 reg->obj->last_fenced_seqno = 0;
1772                 reg->obj->last_fenced_ring = NULL;
1773                 i915_gem_clear_fence_reg(dev, reg);
1774         }
1775 }
1776
1777 void i915_gem_reset(struct drm_device *dev)
1778 {
1779         struct drm_i915_private *dev_priv = dev->dev_private;
1780         struct drm_i915_gem_object *obj;
1781         int i;
1782
1783         for (i = 0; i < I915_NUM_RINGS; i++)
1784                 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1785
1786         /* Remove anything from the flushing lists. The GPU cache is likely
1787          * to be lost on reset along with the data, so simply move the
1788          * lost bo to the inactive list.
1789          */
1790         while (!list_empty(&dev_priv->mm.flushing_list)) {
1791                 obj = list_first_entry(&dev_priv->mm.flushing_list,
1792                                       struct drm_i915_gem_object,
1793                                       mm_list);
1794
1795                 obj->base.write_domain = 0;
1796                 list_del_init(&obj->gpu_write_list);
1797                 i915_gem_object_move_to_inactive(obj);
1798         }
1799
1800         /* Move everything out of the GPU domains to ensure we do any
1801          * necessary invalidation upon reuse.
1802          */
1803         list_for_each_entry(obj,
1804                             &dev_priv->mm.inactive_list,
1805                             mm_list)
1806         {
1807                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1808         }
1809
1810         /* The fence registers are invalidated so clear them out */
1811         i915_gem_reset_fences(dev);
1812 }
1813
1814 /**
1815  * This function clears the request list as sequence numbers are passed.
1816  */
1817 static void
1818 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1819 {
1820         uint32_t seqno;
1821         int i;
1822
1823         if (list_empty(&ring->request_list))
1824                 return;
1825
1826         WARN_ON(i915_verify_lists(ring->dev));
1827
1828         seqno = ring->get_seqno(ring);
1829
1830         for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1831                 if (seqno >= ring->sync_seqno[i])
1832                         ring->sync_seqno[i] = 0;
1833
1834         while (!list_empty(&ring->request_list)) {
1835                 struct drm_i915_gem_request *request;
1836
1837                 request = list_first_entry(&ring->request_list,
1838                                            struct drm_i915_gem_request,
1839                                            list);
1840
1841                 if (!i915_seqno_passed(seqno, request->seqno))
1842                         break;
1843
1844                 trace_i915_gem_request_retire(ring, request->seqno);
1845
1846                 list_del(&request->list);
1847                 i915_gem_request_remove_from_client(request);
1848                 kfree(request);
1849         }
1850
1851         /* Move any buffers on the active list that are no longer referenced
1852          * by the ringbuffer to the flushing/inactive lists as appropriate.
1853          */
1854         while (!list_empty(&ring->active_list)) {
1855                 struct drm_i915_gem_object *obj;
1856
1857                 obj = list_first_entry(&ring->active_list,
1858                                       struct drm_i915_gem_object,
1859                                       ring_list);
1860
1861                 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1862                         break;
1863
1864                 if (obj->base.write_domain != 0)
1865                         i915_gem_object_move_to_flushing(obj);
1866                 else
1867                         i915_gem_object_move_to_inactive(obj);
1868         }
1869
1870         if (unlikely(ring->trace_irq_seqno &&
1871                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
1872                 ring->irq_put(ring);
1873                 ring->trace_irq_seqno = 0;
1874         }
1875
1876         WARN_ON(i915_verify_lists(ring->dev));
1877 }
1878
1879 void
1880 i915_gem_retire_requests(struct drm_device *dev)
1881 {
1882         drm_i915_private_t *dev_priv = dev->dev_private;
1883         int i;
1884
1885         if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1886             struct drm_i915_gem_object *obj, *next;
1887
1888             /* We must be careful that during unbind() we do not
1889              * accidentally infinitely recurse into retire requests.
1890              * Currently:
1891              *   retire -> free -> unbind -> wait -> retire_ring
1892              */
1893             list_for_each_entry_safe(obj, next,
1894                                      &dev_priv->mm.deferred_free_list,
1895                                      mm_list)
1896                     i915_gem_free_object_tail(obj);
1897         }
1898
1899         for (i = 0; i < I915_NUM_RINGS; i++)
1900                 i915_gem_retire_requests_ring(&dev_priv->ring[i]);
1901 }
1902
1903 static void
1904 i915_gem_retire_work_handler(struct work_struct *work)
1905 {
1906         drm_i915_private_t *dev_priv;
1907         struct drm_device *dev;
1908         bool idle;
1909         int i;
1910
1911         dev_priv = container_of(work, drm_i915_private_t,
1912                                 mm.retire_work.work);
1913         dev = dev_priv->dev;
1914
1915         /* Come back later if the device is busy... */
1916         if (!mutex_trylock(&dev->struct_mutex)) {
1917                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1918                 return;
1919         }
1920
1921         i915_gem_retire_requests(dev);
1922
1923         /* Send a periodic flush down the ring so we don't hold onto GEM
1924          * objects indefinitely.
1925          */
1926         idle = true;
1927         for (i = 0; i < I915_NUM_RINGS; i++) {
1928                 struct intel_ring_buffer *ring = &dev_priv->ring[i];
1929
1930                 if (!list_empty(&ring->gpu_write_list)) {
1931                         struct drm_i915_gem_request *request;
1932                         int ret;
1933
1934                         ret = i915_gem_flush_ring(ring,
1935                                                   0, I915_GEM_GPU_DOMAINS);
1936                         request = kzalloc(sizeof(*request), GFP_KERNEL);
1937                         if (ret || request == NULL ||
1938                             i915_add_request(ring, NULL, request))
1939                             kfree(request);
1940                 }
1941
1942                 idle &= list_empty(&ring->request_list);
1943         }
1944
1945         if (!dev_priv->mm.suspended && !idle)
1946                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1947
1948         mutex_unlock(&dev->struct_mutex);
1949 }
1950
1951 /**
1952  * Waits for a sequence number to be signaled, and cleans up the
1953  * request and object lists appropriately for that event.
1954  */
1955 int
1956 i915_wait_request(struct intel_ring_buffer *ring,
1957                   uint32_t seqno)
1958 {
1959         drm_i915_private_t *dev_priv = ring->dev->dev_private;
1960         u32 ier;
1961         int ret = 0;
1962
1963         BUG_ON(seqno == 0);
1964
1965         if (atomic_read(&dev_priv->mm.wedged)) {
1966                 struct completion *x = &dev_priv->error_completion;
1967                 bool recovery_complete;
1968                 unsigned long flags;
1969
1970                 /* Give the error handler a chance to run. */
1971                 spin_lock_irqsave(&x->wait.lock, flags);
1972                 recovery_complete = x->done > 0;
1973                 spin_unlock_irqrestore(&x->wait.lock, flags);
1974
1975                 return recovery_complete ? -EIO : -EAGAIN;
1976         }
1977
1978         if (seqno == ring->outstanding_lazy_request) {
1979                 struct drm_i915_gem_request *request;
1980
1981                 request = kzalloc(sizeof(*request), GFP_KERNEL);
1982                 if (request == NULL)
1983                         return -ENOMEM;
1984
1985                 ret = i915_add_request(ring, NULL, request);
1986                 if (ret) {
1987                         kfree(request);
1988                         return ret;
1989                 }
1990
1991                 seqno = request->seqno;
1992         }
1993
1994         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
1995                 if (HAS_PCH_SPLIT(ring->dev))
1996                         ier = I915_READ(DEIER) | I915_READ(GTIER);
1997                 else
1998                         ier = I915_READ(IER);
1999                 if (!ier) {
2000                         DRM_ERROR("something (likely vbetool) disabled "
2001                                   "interrupts, re-enabling\n");
2002                         ring->dev->driver->irq_preinstall(ring->dev);
2003                         ring->dev->driver->irq_postinstall(ring->dev);
2004                 }
2005
2006                 trace_i915_gem_request_wait_begin(ring, seqno);
2007
2008                 ring->waiting_seqno = seqno;
2009                 if (ring->irq_get(ring)) {
2010                         if (dev_priv->mm.interruptible)
2011                                 ret = wait_event_interruptible(ring->irq_queue,
2012                                                                i915_seqno_passed(ring->get_seqno(ring), seqno)
2013                                                                || atomic_read(&dev_priv->mm.wedged));
2014                         else
2015                                 wait_event(ring->irq_queue,
2016                                            i915_seqno_passed(ring->get_seqno(ring), seqno)
2017                                            || atomic_read(&dev_priv->mm.wedged));
2018
2019                         ring->irq_put(ring);
2020                 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
2021                                                              seqno) ||
2022                                            atomic_read(&dev_priv->mm.wedged), 3000))
2023                         ret = -EBUSY;
2024                 ring->waiting_seqno = 0;
2025
2026                 trace_i915_gem_request_wait_end(ring, seqno);
2027         }
2028         if (atomic_read(&dev_priv->mm.wedged))
2029                 ret = -EAGAIN;
2030
2031         if (ret && ret != -ERESTARTSYS)
2032                 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2033                           __func__, ret, seqno, ring->get_seqno(ring),
2034                           dev_priv->next_seqno);
2035
2036         /* Directly dispatch request retiring.  While we have the work queue
2037          * to handle this, the waiter on a request often wants an associated
2038          * buffer to have made it to the inactive list, and we would need
2039          * a separate wait queue to handle that.
2040          */
2041         if (ret == 0)
2042                 i915_gem_retire_requests_ring(ring);
2043
2044         return ret;
2045 }
2046
2047 /**
2048  * Ensures that all rendering to the object has completed and the object is
2049  * safe to unbind from the GTT or access from the CPU.
2050  */
2051 int
2052 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2053 {
2054         int ret;
2055
2056         /* This function only exists to support waiting for existing rendering,
2057          * not for emitting required flushes.
2058          */
2059         BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2060
2061         /* If there is rendering queued on the buffer being evicted, wait for
2062          * it.
2063          */
2064         if (obj->active) {
2065                 ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
2066                 if (ret)
2067                         return ret;
2068         }
2069
2070         return 0;
2071 }
2072
2073 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2074 {
2075         u32 old_write_domain, old_read_domains;
2076
2077         /* Act a barrier for all accesses through the GTT */
2078         mb();
2079
2080         /* Force a pagefault for domain tracking on next user access */
2081         i915_gem_release_mmap(obj);
2082
2083         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2084                 return;
2085
2086         old_read_domains = obj->base.read_domains;
2087         old_write_domain = obj->base.write_domain;
2088
2089         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2090         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2091
2092         trace_i915_gem_object_change_domain(obj,
2093                                             old_read_domains,
2094                                             old_write_domain);
2095 }
2096
2097 /**
2098  * Unbinds an object from the GTT aperture.
2099  */
2100 int
2101 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2102 {
2103         int ret = 0;
2104
2105         if (obj->gtt_space == NULL)
2106                 return 0;
2107
2108         if (obj->pin_count != 0) {
2109                 DRM_ERROR("Attempting to unbind pinned buffer\n");
2110                 return -EINVAL;
2111         }
2112
2113         ret = i915_gem_object_finish_gpu(obj);
2114         if (ret == -ERESTARTSYS)
2115                 return ret;
2116         /* Continue on if we fail due to EIO, the GPU is hung so we
2117          * should be safe and we need to cleanup or else we might
2118          * cause memory corruption through use-after-free.
2119          */
2120
2121         i915_gem_object_finish_gtt(obj);
2122
2123         /* Move the object to the CPU domain to ensure that
2124          * any possible CPU writes while it's not in the GTT
2125          * are flushed when we go to remap it.
2126          */
2127         if (ret == 0)
2128                 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2129         if (ret == -ERESTARTSYS)
2130                 return ret;
2131         if (ret) {
2132                 /* In the event of a disaster, abandon all caches and
2133                  * hope for the best.
2134                  */
2135                 i915_gem_clflush_object(obj);
2136                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2137         }
2138
2139         /* release the fence reg _after_ flushing */
2140         ret = i915_gem_object_put_fence(obj);
2141         if (ret == -ERESTARTSYS)
2142                 return ret;
2143
2144         trace_i915_gem_object_unbind(obj);
2145
2146         i915_gem_gtt_unbind_object(obj);
2147         i915_gem_object_put_pages_gtt(obj);
2148
2149         list_del_init(&obj->gtt_list);
2150         list_del_init(&obj->mm_list);
2151         /* Avoid an unnecessary call to unbind on rebind. */
2152         obj->map_and_fenceable = true;
2153
2154         drm_mm_put_block(obj->gtt_space);
2155         obj->gtt_space = NULL;
2156         obj->gtt_offset = 0;
2157
2158         if (i915_gem_object_is_purgeable(obj))
2159                 i915_gem_object_truncate(obj);
2160
2161         return ret;
2162 }
2163
2164 int
2165 i915_gem_flush_ring(struct intel_ring_buffer *ring,
2166                     uint32_t invalidate_domains,
2167                     uint32_t flush_domains)
2168 {
2169         int ret;
2170
2171         if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2172                 return 0;
2173
2174         trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2175
2176         ret = ring->flush(ring, invalidate_domains, flush_domains);
2177         if (ret)
2178                 return ret;
2179
2180         if (flush_domains & I915_GEM_GPU_DOMAINS)
2181                 i915_gem_process_flushing_list(ring, flush_domains);
2182
2183         return 0;
2184 }
2185
2186 static int i915_ring_idle(struct intel_ring_buffer *ring)
2187 {
2188         int ret;
2189
2190         if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2191                 return 0;
2192
2193         if (!list_empty(&ring->gpu_write_list)) {
2194                 ret = i915_gem_flush_ring(ring,
2195                                     I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2196                 if (ret)
2197                         return ret;
2198         }
2199
2200         return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
2201 }
2202
2203 int
2204 i915_gpu_idle(struct drm_device *dev)
2205 {
2206         drm_i915_private_t *dev_priv = dev->dev_private;
2207         int ret, i;
2208
2209         /* Flush everything onto the inactive list. */
2210         for (i = 0; i < I915_NUM_RINGS; i++) {
2211                 ret = i915_ring_idle(&dev_priv->ring[i]);
2212                 if (ret)
2213                         return ret;
2214         }
2215
2216         return 0;
2217 }
2218
2219 static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2220                                        struct intel_ring_buffer *pipelined)
2221 {
2222         struct drm_device *dev = obj->base.dev;
2223         drm_i915_private_t *dev_priv = dev->dev_private;
2224         u32 size = obj->gtt_space->size;
2225         int regnum = obj->fence_reg;
2226         uint64_t val;
2227
2228         val = (uint64_t)((obj->gtt_offset + size - 4096) &
2229                          0xfffff000) << 32;
2230         val |= obj->gtt_offset & 0xfffff000;
2231         val |= (uint64_t)((obj->stride / 128) - 1) <<
2232                 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2233
2234         if (obj->tiling_mode == I915_TILING_Y)
2235                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2236         val |= I965_FENCE_REG_VALID;
2237
2238         if (pipelined) {
2239                 int ret = intel_ring_begin(pipelined, 6);
2240                 if (ret)
2241                         return ret;
2242
2243                 intel_ring_emit(pipelined, MI_NOOP);
2244                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2245                 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2246                 intel_ring_emit(pipelined, (u32)val);
2247                 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2248                 intel_ring_emit(pipelined, (u32)(val >> 32));
2249                 intel_ring_advance(pipelined);
2250         } else
2251                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2252
2253         return 0;
2254 }
2255
2256 static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2257                                 struct intel_ring_buffer *pipelined)
2258 {
2259         struct drm_device *dev = obj->base.dev;
2260         drm_i915_private_t *dev_priv = dev->dev_private;
2261         u32 size = obj->gtt_space->size;
2262         int regnum = obj->fence_reg;
2263         uint64_t val;
2264
2265         val = (uint64_t)((obj->gtt_offset + size - 4096) &
2266                     0xfffff000) << 32;
2267         val |= obj->gtt_offset & 0xfffff000;
2268         val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2269         if (obj->tiling_mode == I915_TILING_Y)
2270                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2271         val |= I965_FENCE_REG_VALID;
2272
2273         if (pipelined) {
2274                 int ret = intel_ring_begin(pipelined, 6);
2275                 if (ret)
2276                         return ret;
2277
2278                 intel_ring_emit(pipelined, MI_NOOP);
2279                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2280                 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2281                 intel_ring_emit(pipelined, (u32)val);
2282                 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2283                 intel_ring_emit(pipelined, (u32)(val >> 32));
2284                 intel_ring_advance(pipelined);
2285         } else
2286                 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2287
2288         return 0;
2289 }
2290
2291 static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2292                                 struct intel_ring_buffer *pipelined)
2293 {
2294         struct drm_device *dev = obj->base.dev;
2295         drm_i915_private_t *dev_priv = dev->dev_private;
2296         u32 size = obj->gtt_space->size;
2297         u32 fence_reg, val, pitch_val;
2298         int tile_width;
2299
2300         if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2301                  (size & -size) != size ||
2302                  (obj->gtt_offset & (size - 1)),
2303                  "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2304                  obj->gtt_offset, obj->map_and_fenceable, size))
2305                 return -EINVAL;
2306
2307         if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2308                 tile_width = 128;
2309         else
2310                 tile_width = 512;
2311
2312         /* Note: pitch better be a power of two tile widths */
2313         pitch_val = obj->stride / tile_width;
2314         pitch_val = ffs(pitch_val) - 1;
2315
2316         val = obj->gtt_offset;
2317         if (obj->tiling_mode == I915_TILING_Y)
2318                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2319         val |= I915_FENCE_SIZE_BITS(size);
2320         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2321         val |= I830_FENCE_REG_VALID;
2322
2323         fence_reg = obj->fence_reg;
2324         if (fence_reg < 8)
2325                 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2326         else
2327                 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2328
2329         if (pipelined) {
2330                 int ret = intel_ring_begin(pipelined, 4);
2331                 if (ret)
2332                         return ret;
2333
2334                 intel_ring_emit(pipelined, MI_NOOP);
2335                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2336                 intel_ring_emit(pipelined, fence_reg);
2337                 intel_ring_emit(pipelined, val);
2338                 intel_ring_advance(pipelined);
2339         } else
2340                 I915_WRITE(fence_reg, val);
2341
2342         return 0;
2343 }
2344
2345 static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2346                                 struct intel_ring_buffer *pipelined)
2347 {
2348         struct drm_device *dev = obj->base.dev;
2349         drm_i915_private_t *dev_priv = dev->dev_private;
2350         u32 size = obj->gtt_space->size;
2351         int regnum = obj->fence_reg;
2352         uint32_t val;
2353         uint32_t pitch_val;
2354
2355         if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2356                  (size & -size) != size ||
2357                  (obj->gtt_offset & (size - 1)),
2358                  "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2359                  obj->gtt_offset, size))
2360                 return -EINVAL;
2361
2362         pitch_val = obj->stride / 128;
2363         pitch_val = ffs(pitch_val) - 1;
2364
2365         val = obj->gtt_offset;
2366         if (obj->tiling_mode == I915_TILING_Y)
2367                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2368         val |= I830_FENCE_SIZE_BITS(size);
2369         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2370         val |= I830_FENCE_REG_VALID;
2371
2372         if (pipelined) {
2373                 int ret = intel_ring_begin(pipelined, 4);
2374                 if (ret)
2375                         return ret;
2376
2377                 intel_ring_emit(pipelined, MI_NOOP);
2378                 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2379                 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2380                 intel_ring_emit(pipelined, val);
2381                 intel_ring_advance(pipelined);
2382         } else
2383                 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2384
2385         return 0;
2386 }
2387
2388 static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2389 {
2390         return i915_seqno_passed(ring->get_seqno(ring), seqno);
2391 }
2392
2393 static int
2394 i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2395                             struct intel_ring_buffer *pipelined)
2396 {
2397         int ret;
2398
2399         if (obj->fenced_gpu_access) {
2400                 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2401                         ret = i915_gem_flush_ring(obj->last_fenced_ring,
2402                                                   0, obj->base.write_domain);
2403                         if (ret)
2404                                 return ret;
2405                 }
2406
2407                 obj->fenced_gpu_access = false;
2408         }
2409
2410         if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2411                 if (!ring_passed_seqno(obj->last_fenced_ring,
2412                                        obj->last_fenced_seqno)) {
2413                         ret = i915_wait_request(obj->last_fenced_ring,
2414                                                 obj->last_fenced_seqno);
2415                         if (ret)
2416                                 return ret;
2417                 }
2418
2419                 obj->last_fenced_seqno = 0;
2420                 obj->last_fenced_ring = NULL;
2421         }
2422
2423         /* Ensure that all CPU reads are completed before installing a fence
2424          * and all writes before removing the fence.
2425          */
2426         if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2427                 mb();
2428
2429         return 0;
2430 }
2431
2432 int
2433 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2434 {
2435         int ret;
2436
2437         if (obj->tiling_mode)
2438                 i915_gem_release_mmap(obj);
2439
2440         ret = i915_gem_object_flush_fence(obj, NULL);
2441         if (ret)
2442                 return ret;
2443
2444         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2445                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2446                 i915_gem_clear_fence_reg(obj->base.dev,
2447                                          &dev_priv->fence_regs[obj->fence_reg]);
2448
2449                 obj->fence_reg = I915_FENCE_REG_NONE;
2450         }
2451
2452         return 0;
2453 }
2454
2455 static struct drm_i915_fence_reg *
2456 i915_find_fence_reg(struct drm_device *dev,
2457                     struct intel_ring_buffer *pipelined)
2458 {
2459         struct drm_i915_private *dev_priv = dev->dev_private;
2460         struct drm_i915_fence_reg *reg, *first, *avail;
2461         int i;
2462
2463         /* First try to find a free reg */
2464         avail = NULL;
2465         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2466                 reg = &dev_priv->fence_regs[i];
2467                 if (!reg->obj)
2468                         return reg;
2469
2470                 if (!reg->obj->pin_count)
2471                         avail = reg;
2472         }
2473
2474         if (avail == NULL)
2475                 return NULL;
2476
2477         /* None available, try to steal one or wait for a user to finish */
2478         avail = first = NULL;
2479         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2480                 if (reg->obj->pin_count)
2481                         continue;
2482
2483                 if (first == NULL)
2484                         first = reg;
2485
2486                 if (!pipelined ||
2487                     !reg->obj->last_fenced_ring ||
2488                     reg->obj->last_fenced_ring == pipelined) {
2489                         avail = reg;
2490                         break;
2491                 }
2492         }
2493
2494         if (avail == NULL)
2495                 avail = first;
2496
2497         return avail;
2498 }
2499
2500 /**
2501  * i915_gem_object_get_fence - set up a fence reg for an object
2502  * @obj: object to map through a fence reg
2503  * @pipelined: ring on which to queue the change, or NULL for CPU access
2504  * @interruptible: must we wait uninterruptibly for the register to retire?
2505  *
2506  * When mapping objects through the GTT, userspace wants to be able to write
2507  * to them without having to worry about swizzling if the object is tiled.
2508  *
2509  * This function walks the fence regs looking for a free one for @obj,
2510  * stealing one if it can't find any.
2511  *
2512  * It then sets up the reg based on the object's properties: address, pitch
2513  * and tiling format.
2514  */
2515 int
2516 i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2517                           struct intel_ring_buffer *pipelined)
2518 {
2519         struct drm_device *dev = obj->base.dev;
2520         struct drm_i915_private *dev_priv = dev->dev_private;
2521         struct drm_i915_fence_reg *reg;
2522         int ret;
2523
2524         /* XXX disable pipelining. There are bugs. Shocking. */
2525         pipelined = NULL;
2526
2527         /* Just update our place in the LRU if our fence is getting reused. */
2528         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2529                 reg = &dev_priv->fence_regs[obj->fence_reg];
2530                 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2531
2532                 if (obj->tiling_changed) {
2533                         ret = i915_gem_object_flush_fence(obj, pipelined);
2534                         if (ret)
2535                                 return ret;
2536
2537                         if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2538                                 pipelined = NULL;
2539
2540                         if (pipelined) {
2541                                 reg->setup_seqno =
2542                                         i915_gem_next_request_seqno(pipelined);
2543                                 obj->last_fenced_seqno = reg->setup_seqno;
2544                                 obj->last_fenced_ring = pipelined;
2545                         }
2546
2547                         goto update;
2548                 }
2549
2550                 if (!pipelined) {
2551                         if (reg->setup_seqno) {
2552                                 if (!ring_passed_seqno(obj->last_fenced_ring,
2553                                                        reg->setup_seqno)) {
2554                                         ret = i915_wait_request(obj->last_fenced_ring,
2555                                                                 reg->setup_seqno);
2556                                         if (ret)
2557                                                 return ret;
2558                                 }
2559
2560                                 reg->setup_seqno = 0;
2561                         }
2562                 } else if (obj->last_fenced_ring &&
2563                            obj->last_fenced_ring != pipelined) {
2564                         ret = i915_gem_object_flush_fence(obj, pipelined);
2565                         if (ret)
2566                                 return ret;
2567                 }
2568
2569                 return 0;
2570         }
2571
2572         reg = i915_find_fence_reg(dev, pipelined);
2573         if (reg == NULL)
2574                 return -ENOSPC;
2575
2576         ret = i915_gem_object_flush_fence(obj, pipelined);
2577         if (ret)
2578                 return ret;
2579
2580         if (reg->obj) {
2581                 struct drm_i915_gem_object *old = reg->obj;
2582
2583                 drm_gem_object_reference(&old->base);
2584
2585                 if (old->tiling_mode)
2586                         i915_gem_release_mmap(old);
2587
2588                 ret = i915_gem_object_flush_fence(old, pipelined);
2589                 if (ret) {
2590                         drm_gem_object_unreference(&old->base);
2591                         return ret;
2592                 }
2593
2594                 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2595                         pipelined = NULL;
2596
2597                 old->fence_reg = I915_FENCE_REG_NONE;
2598                 old->last_fenced_ring = pipelined;
2599                 old->last_fenced_seqno =
2600                         pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2601
2602                 drm_gem_object_unreference(&old->base);
2603         } else if (obj->last_fenced_seqno == 0)
2604                 pipelined = NULL;
2605
2606         reg->obj = obj;
2607         list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2608         obj->fence_reg = reg - dev_priv->fence_regs;
2609         obj->last_fenced_ring = pipelined;
2610
2611         reg->setup_seqno =
2612                 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2613         obj->last_fenced_seqno = reg->setup_seqno;
2614
2615 update:
2616         obj->tiling_changed = false;
2617         switch (INTEL_INFO(dev)->gen) {
2618         case 7:
2619         case 6:
2620                 ret = sandybridge_write_fence_reg(obj, pipelined);
2621                 break;
2622         case 5:
2623         case 4:
2624                 ret = i965_write_fence_reg(obj, pipelined);
2625                 break;
2626         case 3:
2627                 ret = i915_write_fence_reg(obj, pipelined);
2628                 break;
2629         case 2:
2630                 ret = i830_write_fence_reg(obj, pipelined);
2631                 break;
2632         }
2633
2634         return ret;
2635 }
2636
2637 /**
2638  * i915_gem_clear_fence_reg - clear out fence register info
2639  * @obj: object to clear
2640  *
2641  * Zeroes out the fence register itself and clears out the associated
2642  * data structures in dev_priv and obj.
2643  */
2644 static void
2645 i915_gem_clear_fence_reg(struct drm_device *dev,
2646                          struct drm_i915_fence_reg *reg)
2647 {
2648         drm_i915_private_t *dev_priv = dev->dev_private;
2649         uint32_t fence_reg = reg - dev_priv->fence_regs;
2650
2651         switch (INTEL_INFO(dev)->gen) {
2652         case 7:
2653         case 6:
2654                 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2655                 break;
2656         case 5:
2657         case 4:
2658                 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2659                 break;
2660         case 3:
2661                 if (fence_reg >= 8)
2662                         fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2663                 else
2664         case 2:
2665                         fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2666
2667                 I915_WRITE(fence_reg, 0);
2668                 break;
2669         }
2670
2671         list_del_init(&reg->lru_list);
2672         reg->obj = NULL;
2673         reg->setup_seqno = 0;
2674 }
2675
2676 /**
2677  * Finds free space in the GTT aperture and binds the object there.
2678  */
2679 static int
2680 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2681                             unsigned alignment,
2682                             bool map_and_fenceable)
2683 {
2684         struct drm_device *dev = obj->base.dev;
2685         drm_i915_private_t *dev_priv = dev->dev_private;
2686         struct drm_mm_node *free_space;
2687         gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2688         u32 size, fence_size, fence_alignment, unfenced_alignment;
2689         bool mappable, fenceable;
2690         int ret;
2691
2692         if (obj->madv != I915_MADV_WILLNEED) {
2693                 DRM_ERROR("Attempting to bind a purgeable object\n");
2694                 return -EINVAL;
2695         }
2696
2697         fence_size = i915_gem_get_gtt_size(dev,
2698                                            obj->base.size,
2699                                            obj->tiling_mode);
2700         fence_alignment = i915_gem_get_gtt_alignment(dev,
2701                                                      obj->base.size,
2702                                                      obj->tiling_mode);
2703         unfenced_alignment =
2704                 i915_gem_get_unfenced_gtt_alignment(dev,
2705                                                     obj->base.size,
2706                                                     obj->tiling_mode);
2707
2708         if (alignment == 0)
2709                 alignment = map_and_fenceable ? fence_alignment :
2710                                                 unfenced_alignment;
2711         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2712                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2713                 return -EINVAL;
2714         }
2715
2716         size = map_and_fenceable ? fence_size : obj->base.size;
2717
2718         /* If the object is bigger than the entire aperture, reject it early
2719          * before evicting everything in a vain attempt to find space.
2720          */
2721         if (obj->base.size >
2722             (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2723                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2724                 return -E2BIG;
2725         }
2726
2727  search_free:
2728         if (map_and_fenceable)
2729                 free_space =
2730                         drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2731                                                     size, alignment, 0,
2732                                                     dev_priv->mm.gtt_mappable_end,
2733                                                     0);
2734         else
2735                 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2736                                                 size, alignment, 0);
2737
2738         if (free_space != NULL) {
2739                 if (map_and_fenceable)
2740                         obj->gtt_space =
2741                                 drm_mm_get_block_range_generic(free_space,
2742                                                                size, alignment, 0,
2743                                                                dev_priv->mm.gtt_mappable_end,
2744                                                                0);
2745                 else
2746                         obj->gtt_space =
2747                                 drm_mm_get_block(free_space, size, alignment);
2748         }
2749         if (obj->gtt_space == NULL) {
2750                 /* If the gtt is empty and we're still having trouble
2751                  * fitting our object in, we're out of memory.
2752                  */
2753                 ret = i915_gem_evict_something(dev, size, alignment,
2754                                                map_and_fenceable);
2755                 if (ret)
2756                         return ret;
2757
2758                 goto search_free;
2759         }
2760
2761         ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2762         if (ret) {
2763                 drm_mm_put_block(obj->gtt_space);
2764                 obj->gtt_space = NULL;
2765
2766                 if (ret == -ENOMEM) {
2767                         /* first try to reclaim some memory by clearing the GTT */
2768                         ret = i915_gem_evict_everything(dev, false);
2769                         if (ret) {
2770                                 /* now try to shrink everyone else */
2771                                 if (gfpmask) {
2772                                         gfpmask = 0;
2773                                         goto search_free;
2774                                 }
2775
2776                                 return -ENOMEM;
2777                         }
2778
2779                         goto search_free;
2780                 }
2781
2782                 return ret;
2783         }
2784
2785         ret = i915_gem_gtt_bind_object(obj);
2786         if (ret) {
2787                 i915_gem_object_put_pages_gtt(obj);
2788                 drm_mm_put_block(obj->gtt_space);
2789                 obj->gtt_space = NULL;
2790
2791                 if (i915_gem_evict_everything(dev, false))
2792                         return ret;
2793
2794                 goto search_free;
2795         }
2796
2797         list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2798         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2799
2800         /* Assert that the object is not currently in any GPU domain. As it
2801          * wasn't in the GTT, there shouldn't be any way it could have been in
2802          * a GPU cache
2803          */
2804         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2805         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2806
2807         obj->gtt_offset = obj->gtt_space->start;
2808
2809         fenceable =
2810                 obj->gtt_space->size == fence_size &&
2811                 (obj->gtt_space->start & (fence_alignment - 1)) == 0;
2812
2813         mappable =
2814                 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2815
2816         obj->map_and_fenceable = mappable && fenceable;
2817
2818         trace_i915_gem_object_bind(obj, map_and_fenceable);
2819         return 0;
2820 }
2821
2822 void
2823 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2824 {
2825         /* If we don't have a page list set up, then we're not pinned
2826          * to GPU, and we can ignore the cache flush because it'll happen
2827          * again at bind time.
2828          */
2829         if (obj->pages == NULL)
2830                 return;
2831
2832         /* If the GPU is snooping the contents of the CPU cache,
2833          * we do not need to manually clear the CPU cache lines.  However,
2834          * the caches are only snooped when the render cache is
2835          * flushed/invalidated.  As we always have to emit invalidations
2836          * and flushes when moving into and out of the RENDER domain, correct
2837          * snooping behaviour occurs naturally as the result of our domain
2838          * tracking.
2839          */
2840         if (obj->cache_level != I915_CACHE_NONE)
2841                 return;
2842
2843         trace_i915_gem_object_clflush(obj);
2844
2845         drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2846 }
2847
2848 /** Flushes any GPU write domain for the object if it's dirty. */
2849 static int
2850 i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2851 {
2852         if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2853                 return 0;
2854
2855         /* Queue the GPU write cache flushing we need. */
2856         return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
2857 }
2858
2859 /** Flushes the GTT write domain for the object if it's dirty. */
2860 static void
2861 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2862 {
2863         uint32_t old_write_domain;
2864
2865         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2866                 return;
2867
2868         /* No actual flushing is required for the GTT write domain.  Writes
2869          * to it immediately go to main memory as far as we know, so there's
2870          * no chipset flush.  It also doesn't land in render cache.
2871          *
2872          * However, we do have to enforce the order so that all writes through
2873          * the GTT land before any writes to the device, such as updates to
2874          * the GATT itself.
2875          */
2876         wmb();
2877
2878         old_write_domain = obj->base.write_domain;
2879         obj->base.write_domain = 0;
2880
2881         trace_i915_gem_object_change_domain(obj,
2882                                             obj->base.read_domains,
2883                                             old_write_domain);
2884 }
2885
2886 /** Flushes the CPU write domain for the object if it's dirty. */
2887 static void
2888 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2889 {
2890         uint32_t old_write_domain;
2891
2892         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2893                 return;
2894
2895         i915_gem_clflush_object(obj);
2896         intel_gtt_chipset_flush();
2897         old_write_domain = obj->base.write_domain;
2898         obj->base.write_domain = 0;
2899
2900         trace_i915_gem_object_change_domain(obj,
2901                                             obj->base.read_domains,
2902                                             old_write_domain);
2903 }
2904
2905 /**
2906  * Moves a single object to the GTT read, and possibly write domain.
2907  *
2908  * This function returns when the move is complete, including waiting on
2909  * flushes to occur.
2910  */
2911 int
2912 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2913 {
2914         uint32_t old_write_domain, old_read_domains;
2915         int ret;
2916
2917         /* Not valid to be called on unbound objects. */
2918         if (obj->gtt_space == NULL)
2919                 return -EINVAL;
2920
2921         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2922                 return 0;
2923
2924         ret = i915_gem_object_flush_gpu_write_domain(obj);
2925         if (ret)
2926                 return ret;
2927
2928         if (obj->pending_gpu_write || write) {
2929                 ret = i915_gem_object_wait_rendering(obj);
2930                 if (ret)
2931                         return ret;
2932         }
2933
2934         i915_gem_object_flush_cpu_write_domain(obj);
2935
2936         old_write_domain = obj->base.write_domain;
2937         old_read_domains = obj->base.read_domains;
2938
2939         /* It should now be out of any other write domains, and we can update
2940          * the domain values for our changes.
2941          */
2942         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2943         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2944         if (write) {
2945                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2946                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2947                 obj->dirty = 1;
2948         }
2949
2950         trace_i915_gem_object_change_domain(obj,
2951                                             old_read_domains,
2952                                             old_write_domain);
2953
2954         return 0;
2955 }
2956
2957 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2958                                     enum i915_cache_level cache_level)
2959 {
2960         int ret;
2961
2962         if (obj->cache_level == cache_level)
2963                 return 0;
2964
2965         if (obj->pin_count) {
2966                 DRM_DEBUG("can not change the cache level of pinned objects\n");
2967                 return -EBUSY;
2968         }
2969
2970         if (obj->gtt_space) {
2971                 ret = i915_gem_object_finish_gpu(obj);
2972                 if (ret)
2973                         return ret;
2974
2975                 i915_gem_object_finish_gtt(obj);
2976
2977                 /* Before SandyBridge, you could not use tiling or fence
2978                  * registers with snooped memory, so relinquish any fences
2979                  * currently pointing to our region in the aperture.
2980                  */
2981                 if (INTEL_INFO(obj->base.dev)->gen < 6) {
2982                         ret = i915_gem_object_put_fence(obj);
2983                         if (ret)
2984                                 return ret;
2985                 }
2986
2987                 i915_gem_gtt_rebind_object(obj, cache_level);
2988         }
2989
2990         if (cache_level == I915_CACHE_NONE) {
2991                 u32 old_read_domains, old_write_domain;
2992
2993                 /* If we're coming from LLC cached, then we haven't
2994                  * actually been tracking whether the data is in the
2995                  * CPU cache or not, since we only allow one bit set
2996                  * in obj->write_domain and have been skipping the clflushes.
2997                  * Just set it to the CPU cache for now.
2998                  */
2999                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3000                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3001
3002                 old_read_domains = obj->base.read_domains;
3003                 old_write_domain = obj->base.write_domain;
3004
3005                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3006                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3007
3008                 trace_i915_gem_object_change_domain(obj,
3009                                                     old_read_domains,
3010                                                     old_write_domain);
3011         }
3012
3013         obj->cache_level = cache_level;
3014         return 0;
3015 }
3016
3017 /*
3018  * Prepare buffer for display plane (scanout, cursors, etc).
3019  * Can be called from an uninterruptible phase (modesetting) and allows
3020  * any flushes to be pipelined (for pageflips).
3021  *
3022  * For the display plane, we want to be in the GTT but out of any write
3023  * domains. So in many ways this looks like set_to_gtt_domain() apart from the
3024  * ability to pipeline the waits, pinning and any additional subtleties
3025  * that may differentiate the display plane from ordinary buffers.
3026  */
3027 int
3028 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3029                                      u32 alignment,
3030                                      struct intel_ring_buffer *pipelined)
3031 {
3032         u32 old_read_domains, old_write_domain;
3033         int ret;
3034
3035         ret = i915_gem_object_flush_gpu_write_domain(obj);
3036         if (ret)
3037                 return ret;
3038
3039         if (pipelined != obj->ring) {
3040                 ret = i915_gem_object_wait_rendering(obj);
3041                 if (ret == -ERESTARTSYS)
3042                         return ret;
3043         }
3044
3045         /* The display engine is not coherent with the LLC cache on gen6.  As
3046          * a result, we make sure that the pinning that is about to occur is
3047          * done with uncached PTEs. This is lowest common denominator for all
3048          * chipsets.
3049          *
3050          * However for gen6+, we could do better by using the GFDT bit instead
3051          * of uncaching, which would allow us to flush all the LLC-cached data
3052          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3053          */
3054         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3055         if (ret)
3056                 return ret;
3057
3058         /* As the user may map the buffer once pinned in the display plane
3059          * (e.g. libkms for the bootup splash), we have to ensure that we
3060          * always use map_and_fenceable for all scanout buffers.
3061          */
3062         ret = i915_gem_object_pin(obj, alignment, true);
3063         if (ret)
3064                 return ret;
3065
3066         i915_gem_object_flush_cpu_write_domain(obj);
3067
3068         old_write_domain = obj->base.write_domain;
3069         old_read_domains = obj->base.read_domains;
3070
3071         /* It should now be out of any other write domains, and we can update
3072          * the domain values for our changes.
3073          */
3074         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3075         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3076
3077         trace_i915_gem_object_change_domain(obj,
3078                                             old_read_domains,
3079                                             old_write_domain);
3080
3081         return 0;
3082 }
3083
3084 int
3085 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3086 {
3087         int ret;
3088
3089         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3090                 return 0;
3091
3092         if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3093                 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3094                 if (ret)
3095                         return ret;
3096         }
3097
3098         /* Ensure that we invalidate the GPU's caches and TLBs. */
3099         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3100
3101         return i915_gem_object_wait_rendering(obj);
3102 }
3103
3104 /**
3105  * Moves a single object to the CPU read, and possibly write domain.
3106  *
3107  * This function returns when the move is complete, including waiting on
3108  * flushes to occur.
3109  */
3110 static int
3111 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3112 {
3113         uint32_t old_write_domain, old_read_domains;
3114         int ret;
3115
3116         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3117                 return 0;
3118
3119         ret = i915_gem_object_flush_gpu_write_domain(obj);
3120         if (ret)
3121                 return ret;
3122
3123         ret = i915_gem_object_wait_rendering(obj);
3124         if (ret)
3125                 return ret;
3126
3127         i915_gem_object_flush_gtt_write_domain(obj);
3128
3129         /* If we have a partially-valid cache of the object in the CPU,
3130          * finish invalidating it and free the per-page flags.
3131          */
3132         i915_gem_object_set_to_full_cpu_read_domain(obj);
3133
3134         old_write_domain = obj->base.write_domain;
3135         old_read_domains = obj->base.read_domains;
3136
3137         /* Flush the CPU cache if it's still invalid. */
3138         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3139                 i915_gem_clflush_object(obj);
3140
3141                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3142         }
3143
3144         /* It should now be out of any other write domains, and we can update
3145          * the domain values for our changes.
3146          */
3147         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3148
3149         /* If we're writing through the CPU, then the GPU read domains will
3150          * need to be invalidated at next use.
3151          */
3152         if (write) {
3153                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3154                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3155         }
3156
3157         trace_i915_gem_object_change_domain(obj,
3158                                             old_read_domains,
3159                                             old_write_domain);
3160
3161         return 0;
3162 }
3163
3164 /**
3165  * Moves the object from a partially CPU read to a full one.
3166  *
3167  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3168  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3169  */
3170 static void
3171 i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3172 {
3173         if (!obj->page_cpu_valid)
3174                 return;
3175
3176         /* If we're partially in the CPU read domain, finish moving it in.
3177          */
3178         if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3179                 int i;
3180
3181                 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3182                         if (obj->page_cpu_valid[i])
3183                                 continue;
3184                         drm_clflush_pages(obj->pages + i, 1);
3185                 }
3186         }
3187
3188         /* Free the page_cpu_valid mappings which are now stale, whether
3189          * or not we've got I915_GEM_DOMAIN_CPU.
3190          */
3191         kfree(obj->page_cpu_valid);
3192         obj->page_cpu_valid = NULL;
3193 }
3194
3195 /**
3196  * Set the CPU read domain on a range of the object.
3197  *
3198  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3199  * not entirely valid.  The page_cpu_valid member of the object flags which
3200  * pages have been flushed, and will be respected by
3201  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3202  * of the whole object.
3203  *
3204  * This function returns when the move is complete, including waiting on
3205  * flushes to occur.
3206  */
3207 static int
3208 i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3209                                           uint64_t offset, uint64_t size)
3210 {
3211         uint32_t old_read_domains;
3212         int i, ret;
3213
3214         if (offset == 0 && size == obj->base.size)
3215                 return i915_gem_object_set_to_cpu_domain(obj, 0);
3216
3217         ret = i915_gem_object_flush_gpu_write_domain(obj);
3218         if (ret)
3219                 return ret;
3220
3221         ret = i915_gem_object_wait_rendering(obj);
3222         if (ret)
3223                 return ret;
3224
3225         i915_gem_object_flush_gtt_write_domain(obj);
3226
3227         /* If we're already fully in the CPU read domain, we're done. */
3228         if (obj->page_cpu_valid == NULL &&
3229             (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3230                 return 0;
3231
3232         /* Otherwise, create/clear the per-page CPU read domain flag if we're
3233          * newly adding I915_GEM_DOMAIN_CPU
3234          */
3235         if (obj->page_cpu_valid == NULL) {
3236                 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3237                                               GFP_KERNEL);
3238                 if (obj->page_cpu_valid == NULL)
3239                         return -ENOMEM;
3240         } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3241                 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3242
3243         /* Flush the cache on any pages that are still invalid from the CPU's
3244          * perspective.
3245          */
3246         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3247              i++) {
3248                 if (obj->page_cpu_valid[i])
3249                         continue;
3250
3251                 drm_clflush_pages(obj->pages + i, 1);
3252
3253                 obj->page_cpu_valid[i] = 1;
3254         }
3255
3256         /* It should now be out of any other write domains, and we can update
3257          * the domain values for our changes.
3258          */
3259         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3260
3261         old_read_domains = obj->base.read_domains;
3262         obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3263
3264         trace_i915_gem_object_change_domain(obj,
3265                                             old_read_domains,
3266                                             obj->base.write_domain);
3267
3268         return 0;
3269 }
3270
3271 /* Throttle our rendering by waiting until the ring has completed our requests
3272  * emitted over 20 msec ago.
3273  *
3274  * Note that if we were to use the current jiffies each time around the loop,
3275  * we wouldn't escape the function with any frames outstanding if the time to
3276  * render a frame was over 20ms.
3277  *
3278  * This should get us reasonable parallelism between CPU and GPU but also
3279  * relatively low latency when blocking on a particular request to finish.
3280  */
3281 static int
3282 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3283 {
3284         struct drm_i915_private *dev_priv = dev->dev_private;
3285         struct drm_i915_file_private *file_priv = file->driver_priv;
3286         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3287         struct drm_i915_gem_request *request;
3288         struct intel_ring_buffer *ring = NULL;
3289         u32 seqno = 0;
3290         int ret;
3291
3292         if (atomic_read(&dev_priv->mm.wedged))
3293                 return -EIO;
3294
3295         spin_lock(&file_priv->mm.lock);
3296         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3297                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3298                         break;
3299
3300                 ring = request->ring;
3301                 seqno = request->seqno;
3302         }
3303         spin_unlock(&file_priv->mm.lock);
3304
3305         if (seqno == 0)
3306                 return 0;
3307
3308         ret = 0;
3309         if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3310                 /* And wait for the seqno passing without holding any locks and
3311                  * causing extra latency for others. This is safe as the irq
3312                  * generation is designed to be run atomically and so is
3313                  * lockless.
3314                  */
3315                 if (ring->irq_get(ring)) {
3316                         ret = wait_event_interruptible(ring->irq_queue,
3317                                                        i915_seqno_passed(ring->get_seqno(ring), seqno)
3318                                                        || atomic_read(&dev_priv->mm.wedged));
3319                         ring->irq_put(ring);
3320
3321                         if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3322                                 ret = -EIO;
3323                 } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
3324                                                              seqno) ||
3325                                     atomic_read(&dev_priv->mm.wedged), 3000)) {
3326                         ret = -EBUSY;
3327                 }
3328         }
3329
3330         if (ret == 0)
3331                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3332
3333         return ret;
3334 }
3335
3336 int
3337 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3338                     uint32_t alignment,
3339                     bool map_and_fenceable)
3340 {
3341         struct drm_device *dev = obj->base.dev;
3342         struct drm_i915_private *dev_priv = dev->dev_private;
3343         int ret;
3344
3345         BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
3346         WARN_ON(i915_verify_lists(dev));
3347
3348         if (obj->gtt_space != NULL) {
3349                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3350                     (map_and_fenceable && !obj->map_and_fenceable)) {
3351                         WARN(obj->pin_count,
3352                              "bo is already pinned with incorrect alignment:"
3353                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3354                              " obj->map_and_fenceable=%d\n",
3355                              obj->gtt_offset, alignment,
3356                              map_and_fenceable,
3357                              obj->map_and_fenceable);
3358                         ret = i915_gem_object_unbind(obj);
3359                         if (ret)
3360                                 return ret;
3361                 }
3362         }
3363
3364         if (obj->gtt_space == NULL) {
3365                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3366                                                   map_and_fenceable);
3367                 if (ret)
3368                         return ret;
3369         }
3370
3371         if (obj->pin_count++ == 0) {
3372                 if (!obj->active)
3373                         list_move_tail(&obj->mm_list,
3374                                        &dev_priv->mm.pinned_list);
3375         }
3376         obj->pin_mappable |= map_and_fenceable;
3377
3378         WARN_ON(i915_verify_lists(dev));
3379         return 0;
3380 }
3381
3382 void
3383 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3384 {
3385         struct drm_device *dev = obj->base.dev;
3386         drm_i915_private_t *dev_priv = dev->dev_private;
3387
3388         WARN_ON(i915_verify_lists(dev));
3389         BUG_ON(obj->pin_count == 0);
3390         BUG_ON(obj->gtt_space == NULL);
3391
3392         if (--obj->pin_count == 0) {
3393                 if (!obj->active)
3394                         list_move_tail(&obj->mm_list,
3395                                        &dev_priv->mm.inactive_list);
3396                 obj->pin_mappable = false;
3397         }
3398         WARN_ON(i915_verify_lists(dev));
3399 }
3400
3401 int
3402 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3403                    struct drm_file *file)
3404 {
3405         struct drm_i915_gem_pin *args = data;
3406         struct drm_i915_gem_object *obj;
3407         int ret;
3408
3409         ret = i915_mutex_lock_interruptible(dev);
3410         if (ret)
3411                 return ret;
3412
3413         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3414         if (&obj->base == NULL) {
3415                 ret = -ENOENT;
3416                 goto unlock;
3417         }
3418
3419         if (obj->madv != I915_MADV_WILLNEED) {
3420                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3421                 ret = -EINVAL;
3422                 goto out;
3423         }
3424
3425         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3426                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3427                           args->handle);
3428                 ret = -EINVAL;
3429                 goto out;
3430         }
3431
3432         obj->user_pin_count++;
3433         obj->pin_filp = file;
3434         if (obj->user_pin_count == 1) {
3435                 ret = i915_gem_object_pin(obj, args->alignment, true);
3436                 if (ret)
3437                         goto out;
3438         }
3439
3440         /* XXX - flush the CPU caches for pinned objects
3441          * as the X server doesn't manage domains yet
3442          */
3443         i915_gem_object_flush_cpu_write_domain(obj);
3444         args->offset = obj->gtt_offset;
3445 out:
3446         drm_gem_object_unreference(&obj->base);
3447 unlock:
3448         mutex_unlock(&dev->struct_mutex);
3449         return ret;
3450 }
3451
3452 int
3453 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3454                      struct drm_file *file)
3455 {
3456         struct drm_i915_gem_pin *args = data;
3457         struct drm_i915_gem_object *obj;
3458         int ret;
3459
3460         ret = i915_mutex_lock_interruptible(dev);
3461         if (ret)
3462                 return ret;
3463
3464         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3465         if (&obj->base == NULL) {
3466                 ret = -ENOENT;
3467                 goto unlock;
3468         }
3469
3470         if (obj->pin_filp != file) {
3471                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3472                           args->handle);
3473                 ret = -EINVAL;
3474                 goto out;
3475         }
3476         obj->user_pin_count--;
3477         if (obj->user_pin_count == 0) {
3478                 obj->pin_filp = NULL;
3479                 i915_gem_object_unpin(obj);
3480         }
3481
3482 out:
3483         drm_gem_object_unreference(&obj->base);
3484 unlock:
3485         mutex_unlock(&dev->struct_mutex);
3486         return ret;
3487 }
3488
3489 int
3490 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3491                     struct drm_file *file)
3492 {
3493         struct drm_i915_gem_busy *args = data;
3494         struct drm_i915_gem_object *obj;
3495         int ret;
3496
3497         ret = i915_mutex_lock_interruptible(dev);
3498         if (ret)
3499                 return ret;
3500
3501         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3502         if (&obj->base == NULL) {
3503                 ret = -ENOENT;
3504                 goto unlock;
3505         }
3506
3507         /* Count all active objects as busy, even if they are currently not used
3508          * by the gpu. Users of this interface expect objects to eventually
3509          * become non-busy without any further actions, therefore emit any
3510          * necessary flushes here.
3511          */
3512         args->busy = obj->active;
3513         if (args->busy) {
3514                 /* Unconditionally flush objects, even when the gpu still uses this
3515                  * object. Userspace calling this function indicates that it wants to
3516                  * use this buffer rather sooner than later, so issuing the required
3517                  * flush earlier is beneficial.
3518                  */
3519                 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
3520                         ret = i915_gem_flush_ring(obj->ring,
3521                                                   0, obj->base.write_domain);
3522                 } else if (obj->ring->outstanding_lazy_request ==
3523                            obj->last_rendering_seqno) {
3524                         struct drm_i915_gem_request *request;
3525
3526                         /* This ring is not being cleared by active usage,
3527                          * so emit a request to do so.
3528                          */
3529                         request = kzalloc(sizeof(*request), GFP_KERNEL);
3530                         if (request) {
3531                                 ret = i915_add_request(obj->ring, NULL, request);
3532                                 if (ret)
3533                                         kfree(request);
3534                         } else
3535                                 ret = -ENOMEM;
3536                 }
3537
3538                 /* Update the active list for the hardware's current position.
3539                  * Otherwise this only updates on a delayed timer or when irqs
3540                  * are actually unmasked, and our working set ends up being
3541                  * larger than required.
3542                  */
3543                 i915_gem_retire_requests_ring(obj->ring);
3544
3545                 args->busy = obj->active;
3546         }
3547
3548         drm_gem_object_unreference(&obj->base);
3549 unlock:
3550         mutex_unlock(&dev->struct_mutex);
3551         return ret;
3552 }
3553
3554 int
3555 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3556                         struct drm_file *file_priv)
3557 {
3558         return i915_gem_ring_throttle(dev, file_priv);
3559 }
3560
3561 int
3562 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3563                        struct drm_file *file_priv)
3564 {
3565         struct drm_i915_gem_madvise *args = data;
3566         struct drm_i915_gem_object *obj;
3567         int ret;
3568
3569         switch (args->madv) {
3570         case I915_MADV_DONTNEED:
3571         case I915_MADV_WILLNEED:
3572             break;
3573         default:
3574             return -EINVAL;
3575         }
3576
3577         ret = i915_mutex_lock_interruptible(dev);
3578         if (ret)
3579                 return ret;
3580
3581         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3582         if (&obj->base == NULL) {
3583                 ret = -ENOENT;
3584                 goto unlock;
3585         }
3586
3587         if (obj->pin_count) {
3588                 ret = -EINVAL;
3589                 goto out;
3590         }
3591
3592         if (obj->madv != __I915_MADV_PURGED)
3593                 obj->madv = args->madv;
3594
3595         /* if the object is no longer bound, discard its backing storage */
3596         if (i915_gem_object_is_purgeable(obj) &&
3597             obj->gtt_space == NULL)
3598                 i915_gem_object_truncate(obj);
3599
3600         args->retained = obj->madv != __I915_MADV_PURGED;
3601
3602 out:
3603         drm_gem_object_unreference(&obj->base);
3604 unlock:
3605         mutex_unlock(&dev->struct_mutex);
3606         return ret;
3607 }
3608
3609 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3610                                                   size_t size)
3611 {
3612         struct drm_i915_private *dev_priv = dev->dev_private;
3613         struct drm_i915_gem_object *obj;
3614         struct address_space *mapping;
3615
3616         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
3617         if (obj == NULL)
3618                 return NULL;
3619
3620         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3621                 kfree(obj);
3622                 return NULL;
3623         }
3624
3625         mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3626         mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
3627
3628         i915_gem_info_add_obj(dev_priv, size);
3629
3630         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3631         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3632
3633         if (IS_GEN6(dev) || IS_GEN7(dev)) {
3634                 /* On Gen6, we can have the GPU use the LLC (the CPU
3635                  * cache) for about a 10% performance improvement
3636                  * compared to uncached.  Graphics requests other than
3637                  * display scanout are coherent with the CPU in
3638                  * accessing this cache.  This means in this mode we
3639                  * don't need to clflush on the CPU side, and on the
3640                  * GPU side we only need to flush internal caches to
3641                  * get data visible to the CPU.
3642                  *
3643                  * However, we maintain the display planes as UC, and so
3644                  * need to rebind when first used as such.
3645                  */
3646                 obj->cache_level = I915_CACHE_LLC;
3647         } else
3648                 obj->cache_level = I915_CACHE_NONE;
3649
3650         obj->base.driver_private = NULL;
3651         obj->fence_reg = I915_FENCE_REG_NONE;
3652         INIT_LIST_HEAD(&obj->mm_list);
3653         INIT_LIST_HEAD(&obj->gtt_list);
3654         INIT_LIST_HEAD(&obj->ring_list);
3655         INIT_LIST_HEAD(&obj->exec_list);
3656         INIT_LIST_HEAD(&obj->gpu_write_list);
3657         obj->madv = I915_MADV_WILLNEED;
3658         /* Avoid an unnecessary call to unbind on the first bind. */
3659         obj->map_and_fenceable = true;
3660
3661         return obj;
3662 }
3663
3664 int i915_gem_init_object(struct drm_gem_object *obj)
3665 {
3666         BUG();
3667
3668         return 0;
3669 }
3670
3671 static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3672 {
3673         struct drm_device *dev = obj->base.dev;
3674         drm_i915_private_t *dev_priv = dev->dev_private;
3675         int ret;
3676
3677         ret = i915_gem_object_unbind(obj);
3678         if (ret == -ERESTARTSYS) {
3679                 list_move(&obj->mm_list,
3680                           &dev_priv->mm.deferred_free_list);
3681                 return;
3682         }
3683
3684         trace_i915_gem_object_destroy(obj);
3685
3686         if (obj->base.map_list.map)
3687                 drm_gem_free_mmap_offset(&obj->base);
3688
3689         drm_gem_object_release(&obj->base);
3690         i915_gem_info_remove_obj(dev_priv, obj->base.size);
3691
3692         kfree(obj->page_cpu_valid);
3693         kfree(obj->bit_17);
3694         kfree(obj);
3695 }
3696
3697 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3698 {
3699         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3700         struct drm_device *dev = obj->base.dev;
3701
3702         while (obj->pin_count > 0)
3703                 i915_gem_object_unpin(obj);
3704
3705         if (obj->phys_obj)
3706                 i915_gem_detach_phys_object(dev, obj);
3707
3708         i915_gem_free_object_tail(obj);
3709 }
3710
3711 int
3712 i915_gem_idle(struct drm_device *dev)
3713 {
3714         drm_i915_private_t *dev_priv = dev->dev_private;
3715         int ret;
3716
3717         mutex_lock(&dev->struct_mutex);
3718
3719         if (dev_priv->mm.suspended) {
3720                 mutex_unlock(&dev->struct_mutex);
3721                 return 0;
3722         }
3723
3724         ret = i915_gpu_idle(dev);
3725         if (ret) {
3726                 mutex_unlock(&dev->struct_mutex);
3727                 return ret;
3728         }
3729
3730         /* Under UMS, be paranoid and evict. */
3731         if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
3732                 ret = i915_gem_evict_inactive(dev, false);
3733                 if (ret) {
3734                         mutex_unlock(&dev->struct_mutex);
3735                         return ret;
3736                 }
3737         }
3738
3739         i915_gem_reset_fences(dev);
3740
3741         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3742          * We need to replace this with a semaphore, or something.
3743          * And not confound mm.suspended!
3744          */
3745         dev_priv->mm.suspended = 1;
3746         del_timer_sync(&dev_priv->hangcheck_timer);
3747
3748         i915_kernel_lost_context(dev);
3749         i915_gem_cleanup_ringbuffer(dev);
3750
3751         mutex_unlock(&dev->struct_mutex);
3752
3753         /* Cancel the retire work handler, which should be idle now. */
3754         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3755
3756         return 0;
3757 }
3758
3759 int
3760 i915_gem_init_ringbuffer(struct drm_device *dev)
3761 {
3762         drm_i915_private_t *dev_priv = dev->dev_private;
3763         int ret;
3764
3765         ret = intel_init_render_ring_buffer(dev);
3766         if (ret)
3767                 return ret;
3768
3769         if (HAS_BSD(dev)) {
3770                 ret = intel_init_bsd_ring_buffer(dev);
3771                 if (ret)
3772                         goto cleanup_render_ring;
3773         }
3774
3775         if (HAS_BLT(dev)) {
3776                 ret = intel_init_blt_ring_buffer(dev);
3777                 if (ret)
3778                         goto cleanup_bsd_ring;
3779         }
3780
3781         dev_priv->next_seqno = 1;
3782
3783         return 0;
3784
3785 cleanup_bsd_ring:
3786         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3787 cleanup_render_ring:
3788         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3789         return ret;
3790 }
3791
3792 void
3793 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3794 {
3795         drm_i915_private_t *dev_priv = dev->dev_private;
3796         int i;
3797
3798         for (i = 0; i < I915_NUM_RINGS; i++)
3799                 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
3800 }
3801
3802 int
3803 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3804                        struct drm_file *file_priv)
3805 {
3806         drm_i915_private_t *dev_priv = dev->dev_private;
3807         int ret, i;
3808
3809         if (drm_core_check_feature(dev, DRIVER_MODESET))
3810                 return 0;
3811
3812         if (atomic_read(&dev_priv->mm.wedged)) {
3813                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3814                 atomic_set(&dev_priv->mm.wedged, 0);
3815         }
3816
3817         mutex_lock(&dev->struct_mutex);
3818         dev_priv->mm.suspended = 0;
3819
3820         ret = i915_gem_init_ringbuffer(dev);
3821         if (ret != 0) {
3822                 mutex_unlock(&dev->struct_mutex);
3823                 return ret;
3824         }
3825
3826         BUG_ON(!list_empty(&dev_priv->mm.active_list));
3827         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3828         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3829         for (i = 0; i < I915_NUM_RINGS; i++) {
3830                 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
3831                 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3832         }
3833         mutex_unlock(&dev->struct_mutex);
3834
3835         ret = drm_irq_install(dev);
3836         if (ret)
3837                 goto cleanup_ringbuffer;
3838
3839         return 0;
3840
3841 cleanup_ringbuffer:
3842         mutex_lock(&dev->struct_mutex);
3843         i915_gem_cleanup_ringbuffer(dev);
3844         dev_priv->mm.suspended = 1;
3845         mutex_unlock(&dev->struct_mutex);
3846
3847         return ret;
3848 }
3849
3850 int
3851 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3852                        struct drm_file *file_priv)
3853 {
3854         if (drm_core_check_feature(dev, DRIVER_MODESET))
3855                 return 0;
3856
3857         drm_irq_uninstall(dev);
3858         return i915_gem_idle(dev);
3859 }
3860
3861 void
3862 i915_gem_lastclose(struct drm_device *dev)
3863 {
3864         int ret;
3865
3866         if (drm_core_check_feature(dev, DRIVER_MODESET))
3867                 return;
3868
3869         ret = i915_gem_idle(dev);
3870         if (ret)
3871                 DRM_ERROR("failed to idle hardware: %d\n", ret);
3872 }
3873
3874 static void
3875 init_ring_lists(struct intel_ring_buffer *ring)
3876 {
3877         INIT_LIST_HEAD(&ring->active_list);
3878         INIT_LIST_HEAD(&ring->request_list);
3879         INIT_LIST_HEAD(&ring->gpu_write_list);
3880 }
3881
3882 void
3883 i915_gem_load(struct drm_device *dev)
3884 {
3885         int i;
3886         drm_i915_private_t *dev_priv = dev->dev_private;
3887
3888         INIT_LIST_HEAD(&dev_priv->mm.active_list);
3889         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3890         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3891         INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
3892         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3893         INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
3894         INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3895         for (i = 0; i < I915_NUM_RINGS; i++)
3896                 init_ring_lists(&dev_priv->ring[i]);
3897         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3898                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3899         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3900                           i915_gem_retire_work_handler);
3901         init_completion(&dev_priv->error_completion);
3902
3903         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
3904         if (IS_GEN3(dev)) {
3905                 u32 tmp = I915_READ(MI_ARB_STATE);
3906                 if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
3907                         /* arb state is a masked write, so set bit + bit in mask */
3908                         tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
3909                         I915_WRITE(MI_ARB_STATE, tmp);
3910                 }
3911         }
3912
3913         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3914
3915         /* Old X drivers will take 0-2 for front, back, depth buffers */
3916         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3917                 dev_priv->fence_reg_start = 3;
3918
3919         if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3920                 dev_priv->num_fence_regs = 16;
3921         else
3922                 dev_priv->num_fence_regs = 8;
3923
3924         /* Initialize fence registers to zero */
3925         for (i = 0; i < dev_priv->num_fence_regs; i++) {
3926                 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3927         }
3928
3929         i915_gem_detect_bit_6_swizzle(dev);
3930         init_waitqueue_head(&dev_priv->pending_flip_queue);
3931
3932         dev_priv->mm.interruptible = true;
3933
3934         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3935         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3936         register_shrinker(&dev_priv->mm.inactive_shrinker);
3937 }
3938
3939 /*
3940  * Create a physically contiguous memory object for this object
3941  * e.g. for cursor + overlay regs
3942  */
3943 static int i915_gem_init_phys_object(struct drm_device *dev,
3944                                      int id, int size, int align)
3945 {
3946         drm_i915_private_t *dev_priv = dev->dev_private;
3947         struct drm_i915_gem_phys_object *phys_obj;
3948         int ret;
3949
3950         if (dev_priv->mm.phys_objs[id - 1] || !size)
3951                 return 0;
3952
3953         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
3954         if (!phys_obj)
3955                 return -ENOMEM;
3956
3957         phys_obj->id = id;
3958
3959         phys_obj->handle = drm_pci_alloc(dev, size, align);
3960         if (!phys_obj->handle) {
3961                 ret = -ENOMEM;
3962                 goto kfree_obj;
3963         }
3964 #ifdef CONFIG_X86
3965         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3966 #endif
3967
3968         dev_priv->mm.phys_objs[id - 1] = phys_obj;
3969
3970         return 0;
3971 kfree_obj:
3972         kfree(phys_obj);
3973         return ret;
3974 }
3975
3976 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
3977 {
3978         drm_i915_private_t *dev_priv = dev->dev_private;
3979         struct drm_i915_gem_phys_object *phys_obj;
3980
3981         if (!dev_priv->mm.phys_objs[id - 1])
3982                 return;
3983
3984         phys_obj = dev_priv->mm.phys_objs[id - 1];
3985         if (phys_obj->cur_obj) {
3986                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3987         }
3988
3989 #ifdef CONFIG_X86
3990         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3991 #endif
3992         drm_pci_free(dev, phys_obj->handle);
3993         kfree(phys_obj);
3994         dev_priv->mm.phys_objs[id - 1] = NULL;
3995 }
3996
3997 void i915_gem_free_all_phys_object(struct drm_device *dev)
3998 {
3999         int i;
4000
4001         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4002                 i915_gem_free_phys_object(dev, i);
4003 }
4004
4005 void i915_gem_detach_phys_object(struct drm_device *dev,
4006                                  struct drm_i915_gem_object *obj)
4007 {
4008         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4009         char *vaddr;
4010         int i;
4011         int page_count;
4012
4013         if (!obj->phys_obj)
4014                 return;
4015         vaddr = obj->phys_obj->handle->vaddr;
4016
4017         page_count = obj->base.size / PAGE_SIZE;
4018         for (i = 0; i < page_count; i++) {
4019                 struct page *page = shmem_read_mapping_page(mapping, i);
4020                 if (!IS_ERR(page)) {
4021                         char *dst = kmap_atomic(page);
4022                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4023                         kunmap_atomic(dst);
4024
4025                         drm_clflush_pages(&page, 1);
4026
4027                         set_page_dirty(page);
4028                         mark_page_accessed(page);
4029                         page_cache_release(page);
4030                 }
4031         }
4032         intel_gtt_chipset_flush();
4033
4034         obj->phys_obj->cur_obj = NULL;
4035         obj->phys_obj = NULL;
4036 }
4037
4038 int
4039 i915_gem_attach_phys_object(struct drm_device *dev,
4040                             struct drm_i915_gem_object *obj,
4041                             int id,
4042                             int align)
4043 {
4044         struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4045         drm_i915_private_t *dev_priv = dev->dev_private;
4046         int ret = 0;
4047         int page_count;
4048         int i;
4049
4050         if (id > I915_MAX_PHYS_OBJECT)
4051                 return -EINVAL;
4052
4053         if (obj->phys_obj) {
4054                 if (obj->phys_obj->id == id)
4055                         return 0;
4056                 i915_gem_detach_phys_object(dev, obj);
4057         }
4058
4059         /* create a new object */
4060         if (!dev_priv->mm.phys_objs[id - 1]) {
4061                 ret = i915_gem_init_phys_object(dev, id,
4062                                                 obj->base.size, align);
4063                 if (ret) {
4064                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4065                                   id, obj->base.size);
4066                         return ret;
4067                 }
4068         }
4069
4070         /* bind to the object */
4071         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4072         obj->phys_obj->cur_obj = obj;
4073
4074         page_count = obj->base.size / PAGE_SIZE;
4075
4076         for (i = 0; i < page_count; i++) {
4077                 struct page *page;
4078                 char *dst, *src;
4079
4080                 page = shmem_read_mapping_page(mapping, i);
4081                 if (IS_ERR(page))
4082                         return PTR_ERR(page);
4083
4084                 src = kmap_atomic(page);
4085                 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4086                 memcpy(dst, src, PAGE_SIZE);
4087                 kunmap_atomic(src);
4088
4089                 mark_page_accessed(page);
4090                 page_cache_release(page);
4091         }
4092
4093         return 0;
4094 }
4095
4096 static int
4097 i915_gem_phys_pwrite(struct drm_device *dev,
4098                      struct drm_i915_gem_object *obj,
4099                      struct drm_i915_gem_pwrite *args,
4100                      struct drm_file *file_priv)
4101 {
4102         void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4103         char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4104
4105         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4106                 unsigned long unwritten;
4107
4108                 /* The physical object once assigned is fixed for the lifetime
4109                  * of the obj, so we can safely drop the lock and continue
4110                  * to access vaddr.
4111                  */
4112                 mutex_unlock(&dev->struct_mutex);
4113                 unwritten = copy_from_user(vaddr, user_data, args->size);
4114                 mutex_lock(&dev->struct_mutex);
4115                 if (unwritten)
4116                         return -EFAULT;
4117         }
4118
4119         intel_gtt_chipset_flush();
4120         return 0;
4121 }
4122
4123 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4124 {
4125         struct drm_i915_file_private *file_priv = file->driver_priv;
4126
4127         /* Clean up our request list when the client is going away, so that
4128          * later retire_requests won't dereference our soon-to-be-gone
4129          * file_priv.
4130          */
4131         spin_lock(&file_priv->mm.lock);
4132         while (!list_empty(&file_priv->mm.request_list)) {
4133                 struct drm_i915_gem_request *request;
4134
4135                 request = list_first_entry(&file_priv->mm.request_list,
4136                                            struct drm_i915_gem_request,
4137                                            client_list);
4138                 list_del(&request->client_list);
4139                 request->file_priv = NULL;
4140         }
4141         spin_unlock(&file_priv->mm.lock);
4142 }
4143
4144 static int
4145 i915_gpu_is_active(struct drm_device *dev)
4146 {
4147         drm_i915_private_t *dev_priv = dev->dev_private;
4148         int lists_empty;
4149
4150         lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4151                       list_empty(&dev_priv->mm.active_list);
4152
4153         return !lists_empty;
4154 }
4155
4156 static int
4157 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4158 {
4159         struct drm_i915_private *dev_priv =
4160                 container_of(shrinker,
4161                              struct drm_i915_private,
4162                              mm.inactive_shrinker);
4163         struct drm_device *dev = dev_priv->dev;
4164         struct drm_i915_gem_object *obj, *next;
4165         int nr_to_scan = sc->nr_to_scan;
4166         int cnt;
4167
4168         if (!mutex_trylock(&dev->struct_mutex))
4169                 return 0;
4170
4171         /* "fast-path" to count number of available objects */
4172         if (nr_to_scan == 0) {
4173                 cnt = 0;
4174                 list_for_each_entry(obj,
4175                                     &dev_priv->mm.inactive_list,
4176                                     mm_list)
4177                         cnt++;
4178                 mutex_unlock(&dev->struct_mutex);
4179                 return cnt / 100 * sysctl_vfs_cache_pressure;
4180         }
4181
4182 rescan:
4183         /* first scan for clean buffers */
4184         i915_gem_retire_requests(dev);
4185
4186         list_for_each_entry_safe(obj, next,
4187                                  &dev_priv->mm.inactive_list,
4188                                  mm_list) {
4189                 if (i915_gem_object_is_purgeable(obj)) {
4190                         if (i915_gem_object_unbind(obj) == 0 &&
4191                             --nr_to_scan == 0)
4192                                 break;
4193                 }
4194         }
4195
4196         /* second pass, evict/count anything still on the inactive list */
4197         cnt = 0;
4198         list_for_each_entry_safe(obj, next,
4199                                  &dev_priv->mm.inactive_list,
4200                                  mm_list) {
4201                 if (nr_to_scan &&
4202                     i915_gem_object_unbind(obj) == 0)
4203                         nr_to_scan--;
4204                 else
4205                         cnt++;
4206         }
4207
4208         if (nr_to_scan && i915_gpu_is_active(dev)) {
4209                 /*
4210                  * We are desperate for pages, so as a last resort, wait
4211                  * for the GPU to finish and discard whatever we can.
4212                  * This has a dramatic impact to reduce the number of
4213                  * OOM-killer events whilst running the GPU aggressively.
4214                  */
4215                 if (i915_gpu_idle(dev) == 0)
4216                         goto rescan;
4217         }
4218         mutex_unlock(&dev->struct_mutex);
4219         return cnt / 100 * sysctl_vfs_cache_pressure;
4220 }