1 /******************************************************************************
4 * Device for accessing (in user-space) pages that have been granted by other
7 * Copyright (c) 2006-2007, D G Murray.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/atomic.h>
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
24 #include <linux/miscdevice.h>
26 #include <linux/slab.h>
27 #include <asm/uaccess.h>
29 #include <xen/gnttab.h>
30 #include <asm/hypervisor.h>
31 #include <xen/balloon.h>
32 #include <xen/evtchn.h>
33 #include <xen/public/gntdev.h>
36 #define DRIVER_AUTHOR "Derek G. Murray <Derek.Murray@cl.cam.ac.uk>"
37 #define DRIVER_DESC "User-space granted page access driver"
39 MODULE_LICENSE("GPL");
40 MODULE_AUTHOR(DRIVER_AUTHOR);
41 MODULE_DESCRIPTION(DRIVER_DESC);
43 #define GNTDEV_NAME "gntdev"
44 MODULE_ALIAS("devname:xen/" GNTDEV_NAME);
46 #define MAX_GRANTS_LIMIT 1024
47 #define DEFAULT_MAX_GRANTS 128
49 /* A slot can be in one of three states:
51 * 0. GNTDEV_SLOT_INVALID:
52 * This slot is not associated with a grant reference, and is therefore free
53 * to be overwritten by a new grant reference.
55 * 1. GNTDEV_SLOT_NOT_YET_MAPPED:
56 * This slot is associated with a grant reference (via the
57 * IOCTL_GNTDEV_MAP_GRANT_REF ioctl), but it has not yet been mmap()-ed.
59 * 2. GNTDEV_SLOT_MAPPED:
60 * This slot is associated with a grant reference, and has been mmap()-ed.
62 typedef enum gntdev_slot_state {
63 GNTDEV_SLOT_INVALID = 0,
64 GNTDEV_SLOT_NOT_YET_MAPPED,
66 } gntdev_slot_state_t;
68 #define GNTDEV_INVALID_HANDLE -1
69 #define GNTDEV_FREE_LIST_INVALID -1
70 /* Each opened instance of gntdev is associated with a list of grants,
71 * represented by an array of elements of the following type,
72 * gntdev_grant_info_t.
74 typedef struct gntdev_grant_info {
75 gntdev_slot_state_t state;
77 uint32_t free_list_index;
81 grant_handle_t kernel_handle;
82 grant_handle_t user_handle;
83 uint64_t dev_bus_addr;
86 } gntdev_grant_info_t;
88 /* Private data structure, which is stored in the file pointer for files
89 * associated with this device.
91 typedef struct gntdev_file_private_data {
93 /* Array of grant information. */
94 gntdev_grant_info_t *grants;
97 /* Read/write semaphore used to protect the grants array. */
98 struct rw_semaphore grants_sem;
100 /* An array of indices of free slots in the grants array.
101 * N.B. An entry in this list may temporarily have the value
102 * GNTDEV_FREE_LIST_INVALID if the corresponding slot has been removed
103 * from the list by the contiguous allocator, but the list has not yet
104 * been compressed. However, this is not visible across invocations of
109 /* The number of free slots in the grants array. */
110 uint32_t free_list_size;
112 /* Read/write semaphore used to protect the free list. */
113 struct rw_semaphore free_list_sem;
115 /* Index of the next slot after the most recent contiguous allocation,
116 * for use in a next-fit allocator.
118 uint32_t next_fit_index;
120 /* Used to map grants into the kernel, before mapping them into user
123 struct page **foreign_pages;
125 } gntdev_file_private_data_t;
127 /* Module lifecycle operations. */
128 static int __init gntdev_init(void);
129 static void __exit gntdev_exit(void);
131 module_init(gntdev_init);
132 module_exit(gntdev_exit);
134 /* File operations. */
135 static int gntdev_open(struct inode *inode, struct file *flip);
136 static int gntdev_release(struct inode *inode, struct file *flip);
137 static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma);
138 static long gntdev_ioctl(struct file *flip,
139 unsigned int cmd, unsigned long arg);
141 static const struct file_operations gntdev_fops = {
142 .owner = THIS_MODULE,
145 .release = gntdev_release,
147 .unlocked_ioctl = gntdev_ioctl
151 static void gntdev_vma_close(struct vm_area_struct *vma);
152 static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
153 pte_t *ptep, int is_fullmm);
155 static struct vm_operations_struct gntdev_vmops = {
156 .close = gntdev_vma_close,
157 .zap_pte = gntdev_clear_pte
160 /* Memory mapping functions
161 * ------------------------
163 * Every granted page is mapped into both kernel and user space, and the two
164 * following functions return the respective virtual addresses of these pages.
166 * When shadow paging is disabled, the granted page is mapped directly into
167 * user space; when it is enabled, it is mapped into the kernel and remapped
168 * into user space using vm_insert_page() (see gntdev_mmap(), below).
171 /* Returns the virtual address (in user space) of the @page_index'th page
172 * in the given VM area.
174 static inline unsigned long get_user_vaddr (struct vm_area_struct *vma,
177 return (unsigned long) vma->vm_start + (page_index << PAGE_SHIFT);
180 /* Returns the virtual address (in kernel space) of the @slot_index'th page
181 * mapped by the gntdev instance that owns the given private data struct.
183 static inline unsigned long get_kernel_vaddr (gntdev_file_private_data_t *priv,
188 pfn = page_to_pfn(priv->foreign_pages[slot_index]);
189 kaddr = pfn_to_kaddr(pfn);
190 return (unsigned long) kaddr;
193 /* Helper functions. */
195 /* Adds information about a grant reference to the list of grants in the file's
196 * private data structure. Returns non-zero on failure. On success, sets the
197 * value of *offset to the offset that should be mmap()-ed in order to map the
200 static int add_grant_reference(gntdev_file_private_data_t *private_data,
201 struct ioctl_gntdev_grant_ref *op,
206 slot_index = private_data->free_list[--private_data->free_list_size];
207 private_data->free_list[private_data->free_list_size]
208 = GNTDEV_FREE_LIST_INVALID;
210 /* Copy the grant information into file's private data. */
211 private_data->grants[slot_index].state = GNTDEV_SLOT_NOT_YET_MAPPED;
212 private_data->grants[slot_index].u.valid.domid = op->domid;
213 private_data->grants[slot_index].u.valid.ref = op->ref;
215 /* The offset is calculated as the index of the chosen entry in the
216 * file's private data's array of grant information. This is then
217 * shifted to give an offset into the virtual "file address space".
219 *offset = slot_index << PAGE_SHIFT;
224 /* Adds the @count grant references to the contiguous range in the slot array
225 * beginning at @first_slot. It is assumed that @first_slot was returned by a
226 * previous invocation of find_contiguous_free_range(), during the same
227 * invocation of the driver.
229 static int add_grant_references(gntdev_file_private_data_t *private_data,
231 struct ioctl_gntdev_grant_ref *ops,
236 for (i = 0; i < count; ++i) {
238 /* First, mark the slot's entry in the free list as invalid. */
239 uint32_t free_list_index =
240 private_data->grants[first_slot+i].u.free_list_index;
241 private_data->free_list[free_list_index] =
242 GNTDEV_FREE_LIST_INVALID;
244 /* Now, update the slot. */
245 private_data->grants[first_slot+i].state =
246 GNTDEV_SLOT_NOT_YET_MAPPED;
247 private_data->grants[first_slot+i].u.valid.domid =
249 private_data->grants[first_slot+i].u.valid.ref = ops[i].ref;
255 /* Scans through the free list for @flip, removing entries that are marked as
256 * GNTDEV_SLOT_INVALID. This will reduce the recorded size of the free list to
257 * the number of valid entries.
259 static void compress_free_list(gntdev_file_private_data_t *private_data)
261 uint32_t i, j = 0, old_size;
263 old_size = private_data->free_list_size;
264 for (i = 0; i < old_size; ++i) {
265 if (private_data->free_list[i] != GNTDEV_FREE_LIST_INVALID) {
269 slot_index = private_data->free_list[i];
270 private_data->free_list[j] = slot_index;
271 private_data->grants[slot_index].u
272 .free_list_index = j;
273 private_data->free_list[i]
274 = GNTDEV_FREE_LIST_INVALID;
278 --private_data->free_list_size;
283 /* Searches the grant array in the private data of @flip for a range of
284 * @num_slots contiguous slots in the GNTDEV_SLOT_INVALID state.
286 * Returns the index of the first slot if a range is found, otherwise -ENOMEM.
288 static int find_contiguous_free_range(gntdev_file_private_data_t *private_data,
291 uint32_t i, start_index = private_data->next_fit_index;
292 uint32_t range_start = 0, range_length;
294 /* First search from the start_index to the end of the array. */
296 for (i = start_index; i < private_data->grants_size; ++i) {
297 if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
298 if (range_length == 0) {
302 if (range_length == num_slots) {
308 /* Now search from the start of the array to the start_index. */
310 for (i = 0; i < start_index; ++i) {
311 if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
312 if (range_length == 0) {
316 if (range_length == num_slots) {
325 static int init_private_data(gntdev_file_private_data_t *priv,
330 /* Allocate space for the kernel-mapping of granted pages. */
331 priv->foreign_pages =
332 alloc_empty_pages_and_pagevec(max_grants);
333 if (!priv->foreign_pages)
336 /* Allocate the grant list and free-list. */
337 priv->grants = kmalloc(max_grants * sizeof(gntdev_grant_info_t),
341 priv->free_list = kmalloc(max_grants * sizeof(int32_t), GFP_KERNEL);
342 if (!priv->free_list)
345 /* Initialise the free-list, which contains all slots at first. */
346 for (i = 0; i < max_grants; ++i) {
347 priv->free_list[max_grants - i - 1] = i;
348 priv->grants[i].state = GNTDEV_SLOT_INVALID;
349 priv->grants[i].u.free_list_index = max_grants - i - 1;
351 priv->grants_size = max_grants;
352 priv->free_list_size = max_grants;
353 priv->next_fit_index = 0;
360 free_empty_pages_and_pagevec(priv->foreign_pages, max_grants);
366 /* Interface functions. */
368 static struct miscdevice gntdev_miscdev = {
369 .minor = MISC_DYNAMIC_MINOR,
371 .nodename = "xen/" GNTDEV_NAME,
372 .fops = &gntdev_fops,
375 /* Initialises the driver. Called when the module is loaded. */
376 static int __init gntdev_init(void)
380 if (!is_running_on_xen()) {
381 pr_err("You must be running Xen to use gntdev\n");
385 err = misc_register(&gntdev_miscdev);
388 pr_err("Could not register gntdev device\n");
395 /* Cleans up and unregisters the driver. Called when the driver is unloaded.
397 static void __exit gntdev_exit(void)
399 misc_deregister(&gntdev_miscdev);
402 /* Called when the device is opened. */
403 static int gntdev_open(struct inode *inode, struct file *flip)
405 gntdev_file_private_data_t *private_data;
407 nonseekable_open(inode, flip);
409 /* Allocate space for the per-instance private data. */
410 private_data = kmalloc(sizeof(*private_data), GFP_KERNEL);
414 /* These will be lazily initialised by init_private_data. */
415 private_data->grants = NULL;
416 private_data->free_list = NULL;
417 private_data->foreign_pages = NULL;
419 init_rwsem(&private_data->grants_sem);
420 init_rwsem(&private_data->free_list_sem);
422 flip->private_data = private_data;
430 /* Called when the device is closed.
432 static int gntdev_release(struct inode *inode, struct file *flip)
434 if (flip->private_data) {
435 gntdev_file_private_data_t *private_data =
436 (gntdev_file_private_data_t *) flip->private_data;
437 if (private_data->foreign_pages)
438 free_empty_pages_and_pagevec
439 (private_data->foreign_pages,
440 private_data->grants_size);
441 if (private_data->grants)
442 kfree(private_data->grants);
443 if (private_data->free_list)
444 kfree(private_data->free_list);
450 /* Called when an attempt is made to mmap() the device. The private data from
451 * @flip contains the list of grant references that can be mapped. The vm_pgoff
452 * field of @vma contains the index into that list that refers to the grant
453 * reference that will be mapped. Only mappings that are a multiple of
454 * PAGE_SIZE are handled.
456 static int gntdev_mmap (struct file *flip, struct vm_area_struct *vma)
458 struct gnttab_map_grant_ref op;
459 unsigned long slot_index = vma->vm_pgoff;
460 unsigned long kernel_vaddr, user_vaddr;
461 uint32_t size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
467 gntdev_file_private_data_t *private_data = flip->private_data;
469 if (unlikely(!private_data)) {
470 pr_err("file's private data is NULL\n");
474 /* Test to make sure that the grants array has been initialised. */
475 down_read(&private_data->grants_sem);
476 if (unlikely(!private_data->grants)) {
477 up_read(&private_data->grants_sem);
478 pr_err("attempted to mmap before ioctl\n");
481 up_read(&private_data->grants_sem);
483 if (unlikely((size <= 0) ||
484 (size + slot_index) > private_data->grants_size)) {
485 pr_err("Invalid number of pages or offset"
486 "(num_pages = %d, first_slot = %ld)\n",
491 if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
492 pr_err("writable mappings must be shared\n");
496 /* Slots must be in the NOT_YET_MAPPED state. */
497 down_write(&private_data->grants_sem);
498 for (i = 0; i < size; ++i) {
499 if (private_data->grants[slot_index + i].state !=
500 GNTDEV_SLOT_NOT_YET_MAPPED) {
501 pr_err("Slot (index = %ld) is in the wrong "
502 "state (%d)\n", slot_index + i,
503 private_data->grants[slot_index + i].state);
504 up_write(&private_data->grants_sem);
509 /* Install the hook for unmapping. */
510 vma->vm_ops = &gntdev_vmops;
512 /* The VM area contains pages from another VM. */
513 vma->vm_flags |= VM_FOREIGN;
514 vma->vm_private_data = kzalloc(size * sizeof(struct page *),
516 if (vma->vm_private_data == NULL) {
517 pr_err("couldn't allocate mapping structure for VM area\n");
521 /* This flag prevents Bad PTE errors when the memory is unmapped. */
522 vma->vm_flags |= VM_RESERVED;
524 /* This flag prevents this VM area being copied on a fork(). A better
525 * behaviour might be to explicitly carry out the appropriate mappings
526 * on fork(), but I don't know if there's a hook for this.
528 vma->vm_flags |= VM_DONTCOPY;
531 /* This flag ensures that the page tables are not unpinned before the
532 * VM area is unmapped. Therefore Xen still recognises the PTE as
533 * belonging to an L1 pagetable, and the grant unmap operation will
534 * succeed, even if the process does not exit cleanly.
536 vma->vm_mm->context.has_foreign_mappings = 1;
540 for (i = 0; i < size; ++i) {
542 flags = GNTMAP_host_map;
543 if (!(vma->vm_flags & VM_WRITE))
544 flags |= GNTMAP_readonly;
546 kernel_vaddr = get_kernel_vaddr(private_data, slot_index + i);
547 user_vaddr = get_user_vaddr(vma, i);
548 page = private_data->foreign_pages[slot_index + i];
550 gnttab_set_map_op(&op, kernel_vaddr, flags,
551 private_data->grants[slot_index+i]
553 private_data->grants[slot_index+i]
556 /* Carry out the mapping of the grant reference. */
557 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
560 if (op.status != GNTST_okay) {
561 if (op.status != GNTST_eagain)
562 pr_err("Error mapping the grant reference "
563 "into the kernel (%d). domid = %d; ref = %d\n",
565 private_data->grants[slot_index+i]
567 private_data->grants[slot_index+i]
570 /* Propagate eagain instead of trying to fix it up */
575 /* Store a reference to the page that will be mapped into user
578 ((struct page **) vma->vm_private_data)[i] = page;
580 /* Mark mapped page as reserved. */
581 SetPageReserved(page);
583 /* Record the grant handle, for use in the unmap operation. */
584 private_data->grants[slot_index+i].u.valid.kernel_handle =
586 private_data->grants[slot_index+i].u.valid.dev_bus_addr =
589 private_data->grants[slot_index+i].state = GNTDEV_SLOT_MAPPED;
590 private_data->grants[slot_index+i].u.valid.user_handle =
591 GNTDEV_INVALID_HANDLE;
593 /* Now perform the mapping to user space. */
594 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
596 /* NOT USING SHADOW PAGE TABLES. */
597 /* In this case, we map the grant(s) straight into user
601 /* Get the machine address of the PTE for the user
604 if ((ret = create_lookup_pte_addr(vma->vm_mm,
609 pr_err("Error obtaining PTE pointer (%d)\n",
614 /* Configure the map operation. */
616 /* The reference is to be used by host CPUs. */
617 flags = GNTMAP_host_map;
619 /* Specifies a user space mapping. */
620 flags |= GNTMAP_application_map;
622 /* The map request contains the machine address of the
625 flags |= GNTMAP_contains_pte;
627 if (!(vma->vm_flags & VM_WRITE))
628 flags |= GNTMAP_readonly;
630 gnttab_set_map_op(&op, ptep, flags,
631 private_data->grants[slot_index+i]
633 private_data->grants[slot_index+i]
636 /* Carry out the mapping of the grant reference. */
637 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
640 if (op.status != GNTST_okay) {
641 pr_err("Error mapping the grant "
642 "reference into user space (%d). domid "
643 "= %d; ref = %d\n", op.status,
644 private_data->grants[slot_index+i].u
646 private_data->grants[slot_index+i].u
648 /* This should never happen after we've mapped into
649 * the kernel space. */
650 BUG_ON(op.status == GNTST_eagain);
654 /* Record the grant handle, for use in the unmap
657 private_data->grants[slot_index+i].u.
658 valid.user_handle = op.handle;
660 /* Update p2m structure with the new mapping. */
661 set_phys_to_machine(__pa(kernel_vaddr) >> PAGE_SHIFT,
662 FOREIGN_FRAME(private_data->
664 .u.valid.dev_bus_addr
667 /* USING SHADOW PAGE TABLES. */
668 /* In this case, we simply insert the page into the VM
670 ret = vm_insert_page(vma, user_vaddr, page);
676 up_write(&private_data->grants_sem);
680 /* If we have a mapping failure, the unmapping will be taken care of
681 * by do_mmap_pgoff(), which will eventually call gntdev_clear_pte().
682 * All we need to do here is free the vma_private_data.
684 kfree(vma->vm_private_data);
686 /* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
687 * to NULL on failure. However, we need this in gntdev_clear_pte() to
688 * unmap the grants. Therefore, we smuggle a reference to the file's
689 * private data in the VM area's private data pointer.
691 vma->vm_private_data = private_data;
693 up_write(&private_data->grants_sem);
698 static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
699 pte_t *ptep, int is_fullmm)
703 struct gnttab_unmap_grant_ref op;
704 gntdev_file_private_data_t *private_data;
706 /* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
707 * to NULL on failure. However, we need this in gntdev_clear_pte() to
708 * unmap the grants. Therefore, we smuggle a reference to the file's
709 * private data in the VM area's private data pointer.
712 private_data = (gntdev_file_private_data_t *)
713 vma->vm_file->private_data;
714 } else if (vma->vm_private_data) {
715 private_data = (gntdev_file_private_data_t *)
716 vma->vm_private_data;
718 private_data = NULL; /* gcc warning */
722 /* Calculate the grant relating to this PTE. */
723 slot_index = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
725 /* Only unmap grants if the slot has been mapped. This could be being
726 * called from a failing mmap().
728 if (private_data->grants[slot_index].state == GNTDEV_SLOT_MAPPED) {
730 /* First, we clear the user space mapping, if it has been made.
732 if (private_data->grants[slot_index].u.valid.user_handle !=
733 GNTDEV_INVALID_HANDLE &&
734 !xen_feature(XENFEAT_auto_translated_physmap)) {
735 /* NOT USING SHADOW PAGE TABLES. */
737 /* Copy the existing value of the PTE for returning. */
740 gnttab_set_unmap_op(&op, ptep_to_machine(ptep),
742 private_data->grants[slot_index]
743 .u.valid.user_handle);
744 ret = HYPERVISOR_grant_table_op(
745 GNTTABOP_unmap_grant_ref, &op, 1);
747 if (op.status != GNTST_okay)
748 pr_warning("User unmap grant status = %d\n",
751 /* USING SHADOW PAGE TABLES. */
752 copy = xen_ptep_get_and_clear_full(vma, addr, ptep, is_fullmm);
755 /* Finally, we unmap the grant from kernel space. */
756 gnttab_set_unmap_op(&op,
757 get_kernel_vaddr(private_data, slot_index),
759 private_data->grants[slot_index].u.valid
761 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
764 if (op.status != GNTST_okay)
765 pr_warning("Kernel unmap grant status = %d\n",
769 /* Return slot to the not-yet-mapped state, so that it may be
770 * mapped again, or removed by a subsequent ioctl.
772 private_data->grants[slot_index].state =
773 GNTDEV_SLOT_NOT_YET_MAPPED;
775 /* Invalidate the physical to machine mapping for this page. */
777 page_to_pfn(private_data->foreign_pages[slot_index]),
781 copy = xen_ptep_get_and_clear_full(vma, addr, ptep, is_fullmm);
787 /* "Destructor" for a VM area.
789 static void gntdev_vma_close(struct vm_area_struct *vma) {
790 if (vma->vm_private_data) {
791 kfree(vma->vm_private_data);
795 /* Called when an ioctl is made on the device.
797 static long gntdev_ioctl(struct file *flip,
798 unsigned int cmd, unsigned long arg)
801 gntdev_file_private_data_t *private_data =
802 (gntdev_file_private_data_t *) flip->private_data;
804 /* On the first invocation, we will lazily initialise the grant array
807 if (unlikely(!private_data->grants)
808 && likely(cmd != IOCTL_GNTDEV_SET_MAX_GRANTS)) {
809 down_write(&private_data->grants_sem);
811 if (unlikely(private_data->grants)) {
812 up_write(&private_data->grants_sem);
813 goto private_data_initialised;
816 /* Just use the default. Setting to a non-default is handled
817 * in the ioctl switch.
819 rc = init_private_data(private_data, DEFAULT_MAX_GRANTS);
821 up_write(&private_data->grants_sem);
824 pr_err("Initialising gntdev private data failed\n");
829 private_data_initialised:
831 case IOCTL_GNTDEV_MAP_GRANT_REF:
833 struct ioctl_gntdev_map_grant_ref op;
834 struct ioctl_gntdev_grant_ref *refs = NULL;
836 if (copy_from_user(&op, (void __user *)arg, sizeof(op)))
838 if (unlikely(op.count <= 0))
841 if (op.count > 1 && op.count <= private_data->grants_size) {
842 struct ioctl_gntdev_grant_ref *u;
844 refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
847 u = ((struct ioctl_gntdev_map_grant_ref *)arg)->refs;
848 if (copy_from_user(refs, (void __user *)u,
849 sizeof(*refs) * op.count)) {
855 down_write(&private_data->grants_sem);
856 down_write(&private_data->free_list_sem);
858 if (unlikely(op.count > private_data->free_list_size)) {
864 if ((rc = add_grant_reference(private_data, op.refs,
866 pr_err("Adding grant reference failed (%d)\n",
871 if ((rc = find_contiguous_free_range(private_data,
873 pr_err("Finding contiguous range failed"
877 op.index = rc << PAGE_SHIFT;
878 if ((rc = add_grant_references(private_data, op.count,
880 pr_err("Adding grant references failed (%d)\n",
884 compress_free_list(private_data);
888 up_write(&private_data->free_list_sem);
889 up_write(&private_data->grants_sem);
893 if (!rc && copy_to_user((void __user *)arg, &op, sizeof(op)))
897 case IOCTL_GNTDEV_UNMAP_GRANT_REF:
899 struct ioctl_gntdev_unmap_grant_ref op;
900 uint32_t i, start_index;
902 if (copy_from_user(&op, (void __user *)arg, sizeof(op)))
905 start_index = op.index >> PAGE_SHIFT;
906 if (start_index + op.count > private_data->grants_size)
909 down_write(&private_data->grants_sem);
911 /* First, check that all pages are in the NOT_YET_MAPPED
914 for (i = 0; i < op.count; ++i) {
916 (private_data->grants[start_index + i].state
917 != GNTDEV_SLOT_NOT_YET_MAPPED)) {
918 if (private_data->grants[start_index + i].state
919 == GNTDEV_SLOT_INVALID) {
920 pr_err("Tried to remove an invalid "
921 "grant at offset 0x%x.",
926 pr_err("Tried to remove a grant which "
927 "is currently mmap()-ed at "
937 down_write(&private_data->free_list_sem);
939 /* Unmap pages and add them to the free list.
941 for (i = 0; i < op.count; ++i) {
942 private_data->grants[start_index+i].state =
944 private_data->grants[start_index+i].u.free_list_index =
945 private_data->free_list_size;
946 private_data->free_list[private_data->free_list_size] =
948 ++private_data->free_list_size;
951 up_write(&private_data->free_list_sem);
953 up_write(&private_data->grants_sem);
956 case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
958 struct ioctl_gntdev_get_offset_for_vaddr op;
959 struct vm_area_struct *vma;
962 if (copy_from_user(&op, (void __user *)arg, sizeof(op)))
965 vaddr = (unsigned long)op.vaddr;
967 down_read(¤t->mm->mmap_sem);
968 vma = find_vma(current->mm, vaddr);
969 if (!vma || vma->vm_ops != &gntdev_vmops) {
973 if (vma->vm_start != vaddr) {
974 pr_err("The vaddr specified in an "
975 "IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR must be at "
976 "the start of the VM area. vma->vm_start = "
977 "%#lx; vaddr = %#lx\n",
978 vma->vm_start, vaddr);
982 op.offset = vma->vm_pgoff << PAGE_SHIFT;
983 op.count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
985 up_read(¤t->mm->mmap_sem);
986 if (!rc && copy_to_user((void __user *)arg, &op, sizeof(op)))
990 case IOCTL_GNTDEV_SET_MAX_GRANTS:
992 struct ioctl_gntdev_set_max_grants op;
994 if (copy_from_user(&op, (void __user *)arg, sizeof(op)))
996 if (op.count > MAX_GRANTS_LIMIT)
999 down_write(&private_data->grants_sem);
1000 if (unlikely(private_data->grants))
1003 rc = init_private_data(private_data, op.count);
1004 up_write(&private_data->grants_sem);
1008 return -ENOIOCTLCMD;