3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/shmem_fs.h>
30 #include <drm/exynos_drm.h>
32 #include "exynos_drm_drv.h"
33 #include "exynos_drm_gem.h"
34 #include "exynos_drm_buf.h"
36 static unsigned int convert_to_vm_err_msg(int msg)
44 out_msg = VM_FAULT_NOPAGE;
48 out_msg = VM_FAULT_OOM;
52 out_msg = VM_FAULT_SIGBUS;
59 static int check_gem_flags(unsigned int flags)
61 if (flags & ~(EXYNOS_BO_MASK)) {
62 DRM_ERROR("invalid flags.\n");
69 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
71 if (!IS_NONCONTIG_BUFFER(flags)) {
73 return roundup(size, SECTION_SIZE);
74 else if (size >= SZ_64K)
75 return roundup(size, SZ_64K);
80 return roundup(size, PAGE_SIZE);
83 static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
87 struct address_space *mapping;
88 struct page *p, **pages;
91 /* This is the shared memory object that backs the GEM resource */
92 inode = obj->filp->f_path.dentry->d_inode;
93 mapping = inode->i_mapping;
95 npages = obj->size >> PAGE_SHIFT;
97 pages = drm_malloc_ab(npages, sizeof(struct page *));
99 return ERR_PTR(-ENOMEM);
101 gfpmask |= mapping_gfp_mask(mapping);
103 for (i = 0; i < npages; i++) {
104 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
114 page_cache_release(pages[i]);
116 drm_free_large(pages);
117 return ERR_PTR(PTR_ERR(p));
120 static void exynos_gem_put_pages(struct drm_gem_object *obj,
122 bool dirty, bool accessed)
126 npages = obj->size >> PAGE_SHIFT;
128 for (i = 0; i < npages; i++) {
130 set_page_dirty(pages[i]);
133 mark_page_accessed(pages[i]);
135 /* Undo the reference we took when populating the table */
136 page_cache_release(pages[i]);
139 drm_free_large(pages);
142 static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
143 struct vm_area_struct *vma,
144 unsigned long f_vaddr,
147 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
148 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
151 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
155 pfn = page_to_pfn(buf->pages[page_offset++]);
157 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
159 return vm_insert_mixed(vma, f_vaddr, pfn);
162 static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
164 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
165 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
166 struct scatterlist *sgl;
168 unsigned int npages, i = 0;
172 DRM_DEBUG_KMS("already allocated.\n");
176 pages = exynos_gem_get_pages(obj, GFP_KERNEL);
178 DRM_ERROR("failed to get pages.\n");
179 return PTR_ERR(pages);
182 npages = obj->size >> PAGE_SHIFT;
184 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
186 DRM_ERROR("failed to allocate sg table.\n");
191 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
193 DRM_ERROR("failed to initialize sg table.\n");
200 /* set all pages to sg list. */
202 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
203 sg_dma_address(sgl) = page_to_phys(pages[i]);
208 /* add some codes for UNCACHED type here. TODO */
216 exynos_gem_put_pages(obj, pages, true, false);
221 static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
223 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
224 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
227 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
228 * allocated at gem fault handler.
230 sg_free_table(buf->sgt);
234 exynos_gem_put_pages(obj, buf->pages, true, false);
237 /* add some codes for UNCACHED type here. TODO */
240 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
241 struct drm_file *file_priv,
242 unsigned int *handle)
247 * allocate a id of idr table where the obj is registered
248 * and handle has the id what user can see.
250 ret = drm_gem_handle_create(file_priv, obj, handle);
254 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
256 /* drop reference from allocate - handle holds it now. */
257 drm_gem_object_unreference_unlocked(obj);
262 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
264 struct drm_gem_object *obj;
266 DRM_DEBUG_KMS("%s\n", __FILE__);
271 obj = &exynos_gem_obj->base;
273 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
275 if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
276 exynos_gem_obj->buffer->pages)
277 exynos_drm_gem_put_pages(obj);
279 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
280 exynos_gem_obj->buffer);
282 exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
283 exynos_gem_obj->buffer = NULL;
285 if (obj->map_list.map)
286 drm_gem_free_mmap_offset(obj);
288 /* release file pointer to gem object. */
289 drm_gem_object_release(obj);
291 kfree(exynos_gem_obj);
292 exynos_gem_obj = NULL;
295 static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
298 struct exynos_drm_gem_obj *exynos_gem_obj;
299 struct drm_gem_object *obj;
302 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
303 if (!exynos_gem_obj) {
304 DRM_ERROR("failed to allocate exynos gem object\n");
308 exynos_gem_obj->size = size;
309 obj = &exynos_gem_obj->base;
311 ret = drm_gem_object_init(dev, obj, size);
313 DRM_ERROR("failed to initialize gem object\n");
314 kfree(exynos_gem_obj);
318 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
320 return exynos_gem_obj;
323 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
327 struct exynos_drm_gem_obj *exynos_gem_obj;
328 struct exynos_drm_gem_buf *buf;
332 DRM_ERROR("invalid size.\n");
333 return ERR_PTR(-EINVAL);
336 size = roundup_gem_size(size, flags);
337 DRM_DEBUG_KMS("%s\n", __FILE__);
339 ret = check_gem_flags(flags);
343 buf = exynos_drm_init_buf(dev, size);
345 return ERR_PTR(-ENOMEM);
347 exynos_gem_obj = exynos_drm_gem_init(dev, size);
348 if (!exynos_gem_obj) {
353 exynos_gem_obj->buffer = buf;
355 /* set memory type and cache attribute from user side. */
356 exynos_gem_obj->flags = flags;
359 * allocate all pages as desired size if user wants to allocate
360 * physically non-continuous memory.
362 if (flags & EXYNOS_BO_NONCONTIG) {
363 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
365 drm_gem_object_release(&exynos_gem_obj->base);
369 ret = exynos_drm_alloc_buf(dev, buf, flags);
371 drm_gem_object_release(&exynos_gem_obj->base);
376 return exynos_gem_obj;
379 exynos_drm_fini_buf(dev, buf);
383 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
384 struct drm_file *file_priv)
386 struct drm_exynos_gem_create *args = data;
387 struct exynos_drm_gem_obj *exynos_gem_obj;
390 DRM_DEBUG_KMS("%s\n", __FILE__);
392 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
393 if (IS_ERR(exynos_gem_obj))
394 return PTR_ERR(exynos_gem_obj);
396 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
399 exynos_drm_gem_destroy(exynos_gem_obj);
406 void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
407 unsigned int gem_handle,
408 struct drm_file *file_priv)
410 struct exynos_drm_gem_obj *exynos_gem_obj;
411 struct drm_gem_object *obj;
413 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
415 DRM_ERROR("failed to lookup gem object.\n");
416 return ERR_PTR(-EINVAL);
419 exynos_gem_obj = to_exynos_gem_obj(obj);
421 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
422 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
423 drm_gem_object_unreference_unlocked(obj);
426 return ERR_PTR(-EINVAL);
429 return &exynos_gem_obj->buffer->dma_addr;
432 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
433 unsigned int gem_handle,
434 struct drm_file *file_priv)
436 struct exynos_drm_gem_obj *exynos_gem_obj;
437 struct drm_gem_object *obj;
439 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
441 DRM_ERROR("failed to lookup gem object.\n");
445 exynos_gem_obj = to_exynos_gem_obj(obj);
447 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
448 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
449 drm_gem_object_unreference_unlocked(obj);
455 drm_gem_object_unreference_unlocked(obj);
458 * decrease obj->refcount one more time because we has already
459 * increased it at exynos_drm_gem_get_dma_addr().
461 drm_gem_object_unreference_unlocked(obj);
464 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
465 struct drm_file *file_priv)
467 struct drm_exynos_gem_map_off *args = data;
469 DRM_DEBUG_KMS("%s\n", __FILE__);
471 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
472 args->handle, (unsigned long)args->offset);
474 if (!(dev->driver->driver_features & DRIVER_GEM)) {
475 DRM_ERROR("does not support GEM.\n");
479 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
483 static int exynos_drm_gem_mmap_buffer(struct file *filp,
484 struct vm_area_struct *vma)
486 struct drm_gem_object *obj = filp->private_data;
487 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
488 struct exynos_drm_gem_buf *buffer;
489 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
492 DRM_DEBUG_KMS("%s\n", __FILE__);
494 vma->vm_flags |= (VM_IO | VM_RESERVED);
496 /* in case of direct mapping, always having non-cachable attribute */
497 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
499 vm_size = usize = vma->vm_end - vma->vm_start;
502 * a buffer contains information to physically continuous memory
503 * allocated by user request or at framebuffer creation.
505 buffer = exynos_gem_obj->buffer;
507 /* check if user-requested size is valid. */
508 if (vm_size > buffer->size)
511 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
517 vma->vm_flags |= VM_MIXEDMAP;
520 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
522 DRM_ERROR("failed to remap user space.\n");
531 * get page frame number to physical memory to be mapped
534 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
537 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
539 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
540 vma->vm_page_prot)) {
541 DRM_ERROR("failed to remap pfn range.\n");
549 static const struct file_operations exynos_drm_gem_fops = {
550 .mmap = exynos_drm_gem_mmap_buffer,
553 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
554 struct drm_file *file_priv)
556 struct drm_exynos_gem_mmap *args = data;
557 struct drm_gem_object *obj;
560 DRM_DEBUG_KMS("%s\n", __FILE__);
562 if (!(dev->driver->driver_features & DRIVER_GEM)) {
563 DRM_ERROR("does not support GEM.\n");
567 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
569 DRM_ERROR("failed to lookup gem object.\n");
573 obj->filp->f_op = &exynos_drm_gem_fops;
574 obj->filp->private_data = obj;
576 addr = vm_mmap(obj->filp, 0, args->size,
577 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
579 drm_gem_object_unreference_unlocked(obj);
581 if (IS_ERR((void *)addr))
582 return PTR_ERR((void *)addr);
586 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
591 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
593 DRM_DEBUG_KMS("%s\n", __FILE__);
598 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
600 DRM_DEBUG_KMS("%s\n", __FILE__);
602 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
605 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
606 struct drm_device *dev,
607 struct drm_mode_create_dumb *args)
609 struct exynos_drm_gem_obj *exynos_gem_obj;
612 DRM_DEBUG_KMS("%s\n", __FILE__);
615 * alocate memory to be used for framebuffer.
616 * - this callback would be called by user application
617 * with DRM_IOCTL_MODE_CREATE_DUMB command.
620 args->pitch = args->width * args->bpp >> 3;
621 args->size = PAGE_ALIGN(args->pitch * args->height);
623 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
624 if (IS_ERR(exynos_gem_obj))
625 return PTR_ERR(exynos_gem_obj);
627 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
630 exynos_drm_gem_destroy(exynos_gem_obj);
637 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
638 struct drm_device *dev, uint32_t handle,
641 struct exynos_drm_gem_obj *exynos_gem_obj;
642 struct drm_gem_object *obj;
645 DRM_DEBUG_KMS("%s\n", __FILE__);
647 mutex_lock(&dev->struct_mutex);
650 * get offset of memory allocated for drm framebuffer.
651 * - this callback would be called by user application
652 * with DRM_IOCTL_MODE_MAP_DUMB command.
655 obj = drm_gem_object_lookup(dev, file_priv, handle);
657 DRM_ERROR("failed to lookup gem object.\n");
662 exynos_gem_obj = to_exynos_gem_obj(obj);
664 if (!exynos_gem_obj->base.map_list.map) {
665 ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
670 *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
671 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
674 drm_gem_object_unreference(obj);
676 mutex_unlock(&dev->struct_mutex);
680 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
681 struct drm_device *dev,
686 DRM_DEBUG_KMS("%s\n", __FILE__);
689 * obj->refcount and obj->handle_count are decreased and
690 * if both them are 0 then exynos_drm_gem_free_object()
691 * would be called by callback to release resources.
693 ret = drm_gem_handle_delete(file_priv, handle);
695 DRM_ERROR("failed to delete drm_gem_handle.\n");
702 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
704 struct drm_gem_object *obj = vma->vm_private_data;
705 struct drm_device *dev = obj->dev;
706 unsigned long f_vaddr;
710 page_offset = ((unsigned long)vmf->virtual_address -
711 vma->vm_start) >> PAGE_SHIFT;
712 f_vaddr = (unsigned long)vmf->virtual_address;
714 mutex_lock(&dev->struct_mutex);
716 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
718 DRM_ERROR("failed to map pages.\n");
720 mutex_unlock(&dev->struct_mutex);
722 return convert_to_vm_err_msg(ret);
725 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
729 DRM_DEBUG_KMS("%s\n", __FILE__);
731 /* set vm_area_struct. */
732 ret = drm_gem_mmap(filp, vma);
734 DRM_ERROR("failed to mmap.\n");
738 vma->vm_flags &= ~VM_PFNMAP;
739 vma->vm_flags |= VM_MIXEDMAP;