1 /******************************************************************************
4 * Granting foreign access to our memory reservation.
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #include <linux/export.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
38 #include <linux/seqlock.h>
39 #include <linux/timer.h>
40 #include <xen/interface/xen.h>
41 #include <xen/gnttab.h>
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
44 #include <asm/cmpxchg.h>
46 #include <xen/interface/memory.h>
47 #include <asm/gnttab_dma.h>
49 #ifdef HAVE_XEN_PLATFORM_COMPAT_H
50 #include <xen/platform-compat.h>
53 /* External tools reserve first few grant table entries. */
54 #define NR_RESERVED_ENTRIES 8
55 #define GNTTAB_LIST_END 0xffffffff
56 #define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t))
58 static grant_ref_t **gnttab_list;
59 static unsigned int nr_grant_frames;
60 static unsigned int boot_max_nr_grant_frames;
61 static int gnttab_free_count;
62 static grant_ref_t gnttab_free_head;
63 static DEFINE_SPINLOCK(gnttab_list_lock);
65 static struct grant_entry *shared;
67 static struct gnttab_free_callback *gnttab_free_callback_list;
69 static int gnttab_expand(unsigned int req_entries);
71 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
72 #define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP])
74 #define nr_freelist_frames(grant_frames) \
75 (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP)
77 static int get_free_entries(int count)
83 spin_lock_irqsave(&gnttab_list_lock, flags);
85 if ((gnttab_free_count < count) &&
86 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
87 spin_unlock_irqrestore(&gnttab_list_lock, flags);
91 ref = head = gnttab_free_head;
92 gnttab_free_count -= count;
94 head = gnttab_entry(head);
95 gnttab_free_head = gnttab_entry(head);
96 gnttab_entry(head) = GNTTAB_LIST_END;
98 spin_unlock_irqrestore(&gnttab_list_lock, flags);
103 #define get_free_entry() get_free_entries(1)
105 static void do_free_callbacks(void)
107 struct gnttab_free_callback *callback, *next;
109 callback = gnttab_free_callback_list;
110 gnttab_free_callback_list = NULL;
112 while (callback != NULL) {
113 next = callback->next;
114 if (gnttab_free_count >= callback->count) {
115 callback->next = NULL;
116 callback->queued = 0;
117 callback->fn(callback->arg);
119 callback->next = gnttab_free_callback_list;
120 gnttab_free_callback_list = callback;
126 static inline void check_free_callbacks(void)
128 if (unlikely(gnttab_free_callback_list))
132 static void put_free_entry(grant_ref_t ref)
135 spin_lock_irqsave(&gnttab_list_lock, flags);
136 gnttab_entry(ref) = gnttab_free_head;
137 gnttab_free_head = ref;
139 check_free_callbacks();
140 spin_unlock_irqrestore(&gnttab_list_lock, flags);
144 * Public grant-issuing interface functions
147 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
152 if (unlikely((ref = get_free_entry()) < 0))
155 shared[ref].frame = frame;
156 shared[ref].domid = domid;
158 BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing));
159 shared[ref].flags = GTF_permit_access | flags;
163 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
165 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
166 unsigned long frame, int flags)
168 shared[ref].frame = frame;
169 shared[ref].domid = domid;
171 BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing));
172 shared[ref].flags = GTF_permit_access | flags;
174 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
177 int gnttab_query_foreign_access(grant_ref_t ref)
181 nflags = shared[ref].flags;
183 return (nflags & (GTF_reading|GTF_writing));
185 EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
187 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref)
191 nflags = shared[ref].flags;
193 if ((flags = nflags) & (GTF_reading|GTF_writing))
195 } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) !=
201 int gnttab_end_foreign_access_ref(grant_ref_t ref)
203 if (_gnttab_end_foreign_access_ref(ref))
205 printk(KERN_DEBUG "WARNING: g.e. %#x still in use!\n", ref);
208 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
210 struct deferred_entry {
211 struct list_head list;
216 static LIST_HEAD(deferred_list);
217 static void gnttab_handle_deferred(unsigned long);
218 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
220 static void gnttab_handle_deferred(unsigned long unused)
222 unsigned int nr = 10;
223 struct deferred_entry *first = NULL;
226 spin_lock_irqsave(&gnttab_list_lock, flags);
228 struct deferred_entry *entry
229 = list_first_entry(&deferred_list,
230 struct deferred_entry, list);
234 list_del(&entry->list);
235 spin_unlock_irqrestore(&gnttab_list_lock, flags);
236 if (_gnttab_end_foreign_access_ref(entry->ref)) {
237 put_free_entry(entry->ref);
240 "freeing g.e. %#x (pfn %#lx)\n",
241 entry->ref, page_to_pfn(entry->page));
242 __free_page(entry->page);
244 printk(KERN_DEBUG "freeing g.e. %#x\n",
249 if (!--entry->warn_delay)
250 pr_info("g.e. %#x still pending\n",
255 spin_lock_irqsave(&gnttab_list_lock, flags);
257 list_add_tail(&entry->list, &deferred_list);
258 else if (list_empty(&deferred_list))
261 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
262 deferred_timer.expires = jiffies + HZ;
263 add_timer(&deferred_timer);
265 spin_unlock_irqrestore(&gnttab_list_lock, flags);
268 static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
270 struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
271 const char *what = KERN_WARNING "leaking";
278 entry->warn_delay = 60;
279 spin_lock_irqsave(&gnttab_list_lock, flags);
280 list_add_tail(&entry->list, &deferred_list);
281 if (!timer_pending(&deferred_timer)) {
282 deferred_timer.expires = jiffies + HZ;
283 add_timer(&deferred_timer);
285 spin_unlock_irqrestore(&gnttab_list_lock, flags);
286 what = KERN_DEBUG "deferring";
288 printk("%s g.e. %#x (pfn %lx)\n", what,
289 ref, page ? page_to_pfn(page) : -1);
292 void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page)
294 if (gnttab_end_foreign_access_ref(ref)) {
299 gnttab_add_deferred(ref, page ? virt_to_page(page) : NULL);
301 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
303 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
307 if (unlikely((ref = get_free_entry()) < 0))
309 gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
313 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
315 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
318 shared[ref].frame = pfn;
319 shared[ref].domid = domid;
321 shared[ref].flags = GTF_accept_transfer;
323 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
325 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
331 * If a transfer is not even yet started, try to reclaim the grant
332 * reference and return failure (== 0).
334 while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
335 if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags)
340 /* If a transfer is in progress then wait until it is completed. */
341 while (!(flags & GTF_transfer_completed)) {
342 flags = shared[ref].flags;
346 /* Read the frame number /after/ reading completion status. */
348 frame = shared[ref].frame;
353 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
355 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
357 unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
361 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
363 void gnttab_free_grant_reference(grant_ref_t ref)
367 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
369 void gnttab_free_grant_references(grant_ref_t head)
374 if (head == GNTTAB_LIST_END)
376 spin_lock_irqsave(&gnttab_list_lock, flags);
378 while (gnttab_entry(ref) != GNTTAB_LIST_END) {
379 ref = gnttab_entry(ref);
382 gnttab_entry(ref) = gnttab_free_head;
383 gnttab_free_head = head;
384 gnttab_free_count += count;
385 check_free_callbacks();
386 spin_unlock_irqrestore(&gnttab_list_lock, flags);
388 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
390 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
392 int h = get_free_entries(count);
401 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
403 int gnttab_empty_grant_references(const grant_ref_t *private_head)
405 return (*private_head == GNTTAB_LIST_END);
407 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
409 int gnttab_claim_grant_reference(grant_ref_t *private_head)
411 grant_ref_t g = *private_head;
412 if (unlikely(g == GNTTAB_LIST_END))
414 *private_head = gnttab_entry(g);
417 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
419 void gnttab_release_grant_reference(grant_ref_t *private_head,
422 gnttab_entry(release) = *private_head;
423 *private_head = release;
425 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
427 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
428 void (*fn)(void *), void *arg, u16 count)
431 spin_lock_irqsave(&gnttab_list_lock, flags);
432 if (callback->queued)
436 callback->count = count;
437 callback->queued = 1;
438 callback->next = gnttab_free_callback_list;
439 gnttab_free_callback_list = callback;
440 check_free_callbacks();
442 spin_unlock_irqrestore(&gnttab_list_lock, flags);
444 EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
446 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
448 struct gnttab_free_callback **pcb;
451 spin_lock_irqsave(&gnttab_list_lock, flags);
452 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
453 if (*pcb == callback) {
454 *pcb = callback->next;
455 callback->queued = 0;
459 spin_unlock_irqrestore(&gnttab_list_lock, flags);
461 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
463 static int grow_gnttab_list(unsigned int more_frames)
465 unsigned int new_nr_grant_frames, extra_entries, i;
466 unsigned int nr_glist_frames, new_nr_glist_frames;
468 new_nr_grant_frames = nr_grant_frames + more_frames;
469 extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME;
471 nr_glist_frames = nr_freelist_frames(nr_grant_frames);
472 new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames);
473 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
474 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
479 for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames;
480 i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
481 gnttab_entry(i) = i + 1;
483 gnttab_entry(i) = gnttab_free_head;
484 gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames;
485 gnttab_free_count += extra_entries;
487 nr_grant_frames = new_nr_grant_frames;
489 check_free_callbacks();
494 for ( ; i >= nr_glist_frames; i--)
495 free_page((unsigned long) gnttab_list[i]);
499 static unsigned int __max_nr_grant_frames(void)
501 struct gnttab_query_size query;
504 query.dom = DOMID_SELF;
506 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
507 if ((rc < 0) || (query.status != GNTST_okay))
508 return 4; /* Legacy max supported number of frames */
510 return query.max_nr_frames;
513 static inline unsigned int max_nr_grant_frames(void)
515 unsigned int xen_max = __max_nr_grant_frames();
517 if (xen_max > boot_max_nr_grant_frames)
518 return boot_max_nr_grant_frames;
525 static int map_pte_fn(pte_t *pte, struct page *pmd_page,
526 unsigned long addr, void *data)
528 unsigned long **frames = (unsigned long **)data;
530 set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL));
535 #ifdef CONFIG_PM_SLEEP
536 static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
537 unsigned long addr, void *data)
540 set_pte_at(&init_mm, addr, pte, __pte(0));
545 void *arch_gnttab_alloc_shared(unsigned long *frames)
547 struct vm_struct *area;
548 area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames(), NULL);
549 BUG_ON(area == NULL);
552 #endif /* CONFIG_X86 */
554 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
556 struct gnttab_setup_table setup;
557 unsigned long *frames;
558 unsigned int nr_gframes = end_idx + 1;
561 frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
565 setup.dom = DOMID_SELF;
566 setup.nr_frames = nr_gframes;
567 set_xen_guest_handle(setup.frame_list, frames);
569 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
575 BUG_ON(rc || setup.status != GNTST_okay);
578 shared = arch_gnttab_alloc_shared(frames);
581 rc = apply_to_page_range(&init_mm, (unsigned long)shared,
582 PAGE_SIZE * nr_gframes,
583 map_pte_fn, &frames);
585 frames -= nr_gframes; /* adjust after map_pte_fn() */
586 #endif /* CONFIG_X86 */
593 #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
595 static DEFINE_SEQLOCK(gnttab_dma_lock);
597 static void gnttab_page_free(struct page *page, unsigned int order)
600 ClearPageForeign(page);
601 gnttab_reset_grant_page(page);
602 ClearPageReserved(page);
607 * Must not be called with IRQs off. This should only be used on the
610 * Copy a foreign granted page to local memory.
612 int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
614 struct gnttab_unmap_and_replace unmap;
617 struct page *new_page;
626 if (!get_page_unless_zero(page))
630 new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
634 new_addr = page_address(new_page);
635 addr = page_address(page);
636 copy_page(new_addr, addr);
638 pfn = page_to_pfn(page);
639 mfn = pfn_to_mfn(pfn);
640 new_mfn = virt_to_mfn(new_addr);
642 write_seqlock_bh(&gnttab_dma_lock);
644 /* Make seq visible before checking page_mapped. */
647 /* Has the page been DMA-mapped? */
648 if (unlikely(page_mapped(page))) {
649 write_sequnlock_bh(&gnttab_dma_lock);
655 if (!xen_feature(XENFEAT_auto_translated_physmap))
656 set_phys_to_machine(pfn, new_mfn);
658 gnttab_set_replace_op(&unmap, (unsigned long)addr,
659 (unsigned long)new_addr, ref);
661 err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
664 BUG_ON(unmap.status != GNTST_okay);
666 write_sequnlock_bh(&gnttab_dma_lock);
668 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
669 set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
671 mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
673 err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
677 new_page->mapping = page->mapping;
678 new_page->index = page->index;
679 set_bit(PG_foreign, &new_page->flags);
680 if (PageReserved(page))
681 SetPageReserved(new_page);
684 SetPageForeign(page, gnttab_page_free);
685 page->mapping = NULL;
691 EXPORT_SYMBOL_GPL(gnttab_copy_grant_page);
693 void gnttab_reset_grant_page(struct page *page)
695 init_page_count(page);
696 reset_page_mapcount(page);
698 EXPORT_SYMBOL_GPL(gnttab_reset_grant_page);
701 * Keep track of foreign pages marked as PageForeign so that we don't
702 * return them to the remote domain prematurely.
704 * PageForeign pages are pinned down by increasing their mapcount.
706 * All other pages are simply returned as is.
708 void __gnttab_dma_map_page(struct page *page)
712 if (!is_running_on_xen() || !PageForeign(page))
716 seq = read_seqbegin(&gnttab_dma_lock);
718 if (gnttab_dma_local_pfn(page))
721 atomic_set(&page->_mapcount, 0);
723 /* Make _mapcount visible before read_seqretry. */
725 } while (unlikely(read_seqretry(&gnttab_dma_lock, seq)));
728 #endif /* CONFIG_XEN_BACKEND */
730 #ifdef __HAVE_ARCH_PTE_SPECIAL
732 static unsigned int GNTMAP_pte_special;
734 bool gnttab_pre_map_adjust(unsigned int cmd, struct gnttab_map_grant_ref *map,
739 if (unlikely(cmd != GNTTABOP_map_grant_ref))
742 for (i = 0; i < count; ++i, ++map) {
743 if (!(map->flags & GNTMAP_host_map)
744 || !(map->flags & GNTMAP_application_map))
746 if (GNTMAP_pte_special)
747 map->flags |= GNTMAP_pte_special;
749 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
756 EXPORT_SYMBOL(gnttab_pre_map_adjust);
758 #if CONFIG_XEN_COMPAT < 0x030400
759 int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *map, unsigned int count)
764 for (i = 0; i < count && rc == 0; ++i, ++map) {
767 if (!(map->flags & GNTMAP_host_map)
768 || !(map->flags & GNTMAP_application_map))
772 pte = __pte_ma((map->dev_bus_addr | _PAGE_PRESENT | _PAGE_USER
773 | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NX
775 & __supported_pte_mask);
777 #error Architecture not yet supported.
779 if (!(map->flags & GNTMAP_readonly))
780 pte = pte_mkwrite(pte);
782 if (map->flags & GNTMAP_contains_pte) {
785 u.ptr = map->host_addr;
786 u.val = __pte_val(pte);
787 rc = HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
789 rc = HYPERVISOR_update_va_mapping(map->host_addr, pte, 0);
794 EXPORT_SYMBOL(gnttab_post_map_adjust);
797 #endif /* __HAVE_ARCH_PTE_SPECIAL */
799 int gnttab_resume(void)
801 if (max_nr_grant_frames() < nr_grant_frames)
803 return gnttab_map(0, nr_grant_frames - 1);
806 #ifdef CONFIG_PM_SLEEP
807 #include <linux/syscore_ops.h>
810 static int gnttab_suspend(void)
812 apply_to_page_range(&init_mm, (unsigned long)shared,
813 PAGE_SIZE * nr_grant_frames,
818 #define gnttab_suspend NULL
821 static void _gnttab_resume(void)
827 static struct syscore_ops gnttab_syscore_ops = {
828 .resume = _gnttab_resume,
829 .suspend = gnttab_suspend,
833 #else /* !CONFIG_XEN */
835 #include <platform-pci.h>
837 static unsigned long resume_frames;
839 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
841 struct xen_add_to_physmap xatp;
842 unsigned int i = end_idx;
844 /* Loop backwards, so that the first hypercall has the largest index,
845 * ensuring that the table will grow only once.
848 xatp.domid = DOMID_SELF;
850 xatp.space = XENMAPSPACE_grant_table;
851 xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i;
852 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
854 } while (i-- > start_idx);
859 int gnttab_resume(void)
861 unsigned int max_nr_gframes, nr_gframes;
863 nr_gframes = nr_grant_frames;
864 max_nr_gframes = max_nr_grant_frames();
865 if (max_nr_gframes < nr_gframes)
868 if (!resume_frames) {
869 resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
870 shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes);
871 if (shared == NULL) {
872 pr_warning("error to ioremap gnttab share frames\n");
877 gnttab_map(0, nr_gframes - 1);
882 #endif /* !CONFIG_XEN */
884 static int gnttab_expand(unsigned int req_entries)
887 unsigned int cur, extra;
889 cur = nr_grant_frames;
890 extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) /
891 ENTRIES_PER_GRANT_FRAME);
892 if (cur + extra > max_nr_grant_frames())
895 if ((rc = gnttab_map(cur, cur + extra - 1)) == 0)
896 rc = grow_gnttab_list(extra);
909 unsigned int max_nr_glist_frames, nr_glist_frames;
910 unsigned int nr_init_grefs;
912 if (!is_running_on_xen())
916 boot_max_nr_grant_frames = __max_nr_grant_frames();
918 /* Determine the maximum number of frames required for the
919 * grant reference free list on the current hypervisor.
921 max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames);
923 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
925 if (gnttab_list == NULL)
928 nr_glist_frames = nr_freelist_frames(nr_grant_frames);
929 for (i = 0; i < nr_glist_frames; i++) {
930 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
931 if (gnttab_list[i] == NULL)
935 if (gnttab_resume() < 0)
938 nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME;
940 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
941 gnttab_entry(i) = i + 1;
943 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
944 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
945 gnttab_free_head = NR_RESERVED_ENTRIES;
947 #if defined(CONFIG_XEN) && defined(__HAVE_ARCH_PTE_SPECIAL)
948 if (!xen_feature(XENFEAT_auto_translated_physmap)
949 && xen_feature(XENFEAT_gnttab_map_avail_bits)) {
951 GNTMAP_pte_special = (__pte_val(pte_mkspecial(__pte_ma(0)))
952 >> _PAGE_BIT_UNUSED1) << _GNTMAP_guest_avail0;
954 #error Architecture not yet supported.
959 #if defined(CONFIG_XEN) && defined(CONFIG_PM_SLEEP)
960 if (!is_initial_xendomain())
961 register_syscore_ops(&gnttab_syscore_ops);
967 for (i--; i >= 0; i--)
968 free_page((unsigned long)gnttab_list[i]);
974 core_initcall(gnttab_init);