2 * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
4 * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
5 * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
7 * Based on linux/drivers/video/q40fb.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of this archive for
17 * Switch to grant tables when they become capable of dealing with the
21 #include <linux/console.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
25 #include <linux/module.h>
26 #include <linux/vmalloc.h>
28 #include <linux/slab.h>
29 #include <linux/mutex.h>
30 #include <linux/freezer.h>
31 #include <asm/hypervisor.h>
32 #include <xen/evtchn.h>
33 #include <xen/interface/io/fbif.h>
34 #include <xen/interface/io/protocols.h>
35 #include <xen/xenbus.h>
36 #include <linux/kthread.h>
40 struct list_head link;
41 struct vm_area_struct *vma;
44 struct xenfb_info *info;
49 struct task_struct *kthread;
53 struct fb_info *fb_info;
54 struct timer_list refresh;
56 int x1, y1, x2, y2; /* dirty rectangle,
57 protected by dirty_lock */
58 spinlock_t dirty_lock;
62 struct list_head mappings; /* protected by mm_lock */
65 struct xenfb_page *page;
67 int feature_resize; /* Backend has resize feature */
68 struct xenfb_resize resize;
70 spinlock_t resize_lock;
72 struct xenbus_device *xbdev;
76 * There are three locks:
77 * spinlock resize_lock protecting resize_dpy and resize
78 * spinlock dirty_lock protecting the dirty rectangle
79 * mutex mm_lock protecting mappings.
81 * How the dirty and mapping locks work together
83 * The problem is that dirty rectangle and mappings aren't
84 * independent: the dirty rectangle must cover all faulted pages in
85 * mappings. We need to prove that our locking maintains this
88 * There are several kinds of critical regions:
90 * 1. Holding only dirty_lock: xenfb_refresh(). May run in
91 * interrupts. Extends the dirty rectangle. Trivially preserves
94 * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close(). Touch
95 * only mappings. The former creates unfaulted pages. Preserves
96 * invariant. The latter removes pages. Preserves invariant.
98 * 3. Holding both locks: xenfb_vm_fault(). Extends the dirty
99 * rectangle and updates mappings consistently. Preserves
102 * 4. The ugliest one: xenfb_update_screen(). Clear the dirty
103 * rectangle and update mappings consistently.
105 * We can't simply hold both locks, because zap_page_range() cannot
106 * be called with a spinlock held.
108 * Therefore, we first clear the dirty rectangle with both locks
109 * held. Then we unlock dirty_lock and update the mappings.
110 * Critical regions that hold only dirty_lock may interfere with
111 * that. This can only be region 1: xenfb_refresh(). But that
112 * just extends the dirty rectangle, which can't harm the
115 * But FIXME: the invariant is too weak. It misses that the fault
116 * record in mappings must be consistent with the mapping of pages in
117 * the associated address space! __do_fault() updates the PTE after
118 * xenfb_vm_fault() returns, i.e. outside the critical region. This
119 * allows the following race:
121 * X writes to some address in the Xen frame buffer
122 * Fault - call __do_fault()
123 * call xenfb_vm_fault()
127 * return back to do_no_page()
128 * (preempted, or SMP)
129 * Xen worker thread runs.
132 * find this mapping, zaps its pages (but page not in pte yet)
135 * (back to X process)
136 * put page in X's pte
138 * Oh well, we wont be updating the writes to this page anytime soon.
140 #define MB_ (1024*1024)
141 #define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
143 enum {KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT};
144 static int video[KPARAM_CNT] = {2, XENFB_WIDTH, XENFB_HEIGHT};
145 module_param_array(video, int, NULL, 0);
146 MODULE_PARM_DESC(video,
147 "Size of video memory in MB and width,height in pixels, default = (2,800,600)");
149 static int xenfb_fps = 20;
151 static int xenfb_remove(struct xenbus_device *);
152 static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
153 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
154 static void xenfb_disconnect_backend(struct xenfb_info *);
156 static void xenfb_send_event(struct xenfb_info *info,
157 union xenfb_out_event *event)
161 prod = info->page->out_prod;
162 /* caller ensures !xenfb_queue_full() */
163 mb(); /* ensure ring space available */
164 XENFB_OUT_RING_REF(info->page, prod) = *event;
165 wmb(); /* ensure ring contents visible */
166 info->page->out_prod = prod + 1;
168 notify_remote_via_irq(info->irq);
171 static void xenfb_do_update(struct xenfb_info *info,
172 int x, int y, int w, int h)
174 union xenfb_out_event event;
176 memset(&event, 0, sizeof(event));
177 event.type = XENFB_TYPE_UPDATE;
180 event.update.width = w;
181 event.update.height = h;
183 /* caller ensures !xenfb_queue_full() */
184 xenfb_send_event(info, &event);
187 static void xenfb_do_resize(struct xenfb_info *info)
189 union xenfb_out_event event;
191 memset(&event, 0, sizeof(event));
192 event.resize = info->resize;
194 /* caller ensures !xenfb_queue_full() */
195 xenfb_send_event(info, &event);
198 static int xenfb_queue_full(struct xenfb_info *info)
202 prod = info->page->out_prod;
203 cons = info->page->out_cons;
204 return prod - cons == XENFB_OUT_RING_LEN;
207 static void xenfb_update_screen(struct xenfb_info *info)
211 struct xenfb_mapping *map;
213 if (xenfb_queue_full(info))
216 mutex_lock(&info->mm_lock);
218 spin_lock_irqsave(&info->dirty_lock, flags);
225 info->x1 = info->y1 = INT_MAX;
226 info->x2 = info->y2 = 0;
228 spin_unlock_irqrestore(&info->dirty_lock, flags);
229 mutex_unlock(&info->mm_lock);
232 spin_unlock_irqrestore(&info->dirty_lock, flags);
234 list_for_each_entry(map, &info->mappings, link) {
237 zap_page_range(map->vma, map->vma->vm_start,
238 map->vma->vm_end - map->vma->vm_start, NULL);
242 mutex_unlock(&info->mm_lock);
244 if (x2 < x1 || y2 < y1) {
245 pr_warning("xenfb_update_screen bogus rect %d %d %d %d\n",
249 xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
252 static void xenfb_handle_resize_dpy(struct xenfb_info *info)
256 spin_lock_irqsave(&info->resize_lock, flags);
257 if (info->resize_dpy) {
258 if (!xenfb_queue_full(info)) {
259 info->resize_dpy = 0;
260 xenfb_do_resize(info);
263 spin_unlock_irqrestore(&info->resize_lock, flags);
266 static int xenfb_thread(void *data)
268 struct xenfb_info *info = data;
270 while (!kthread_should_stop()) {
271 xenfb_handle_resize_dpy(info);
272 xenfb_update_screen(info);
273 wait_event_interruptible(info->wq,
274 kthread_should_stop() || info->dirty);
280 static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
281 unsigned blue, unsigned transp,
282 struct fb_info *info)
286 if (regno > info->cmap.len)
289 red >>= (16 - info->var.red.length);
290 green >>= (16 - info->var.green.length);
291 blue >>= (16 - info->var.blue.length);
293 v = (red << info->var.red.offset) |
294 (green << info->var.green.offset) |
295 (blue << info->var.blue.offset);
297 /* FIXME is this sane? check against xxxfb_setcolreg()! */
298 switch (info->var.bits_per_pixel) {
302 ((u32 *)info->pseudo_palette)[regno] = v;
309 static void xenfb_timer(unsigned long data)
311 struct xenfb_info *info = (struct xenfb_info *)data;
315 static void __xenfb_refresh(struct xenfb_info *info,
316 int x1, int y1, int w, int h)
333 if (timer_pending(&info->refresh))
336 mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
339 static void xenfb_refresh(struct xenfb_info *info,
340 int x1, int y1, int w, int h)
344 spin_lock_irqsave(&info->dirty_lock, flags);
345 __xenfb_refresh(info, x1, y1, w, h);
346 spin_unlock_irqrestore(&info->dirty_lock, flags);
349 static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
351 struct xenfb_info *info = p->par;
353 cfb_fillrect(p, rect);
354 xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
357 static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
359 struct xenfb_info *info = p->par;
361 cfb_imageblit(p, image);
362 xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
365 static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
367 struct xenfb_info *info = p->par;
369 cfb_copyarea(p, area);
370 xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
373 static void xenfb_vm_open(struct vm_area_struct *vma)
375 struct xenfb_mapping *map = vma->vm_private_data;
376 atomic_inc(&map->map_refs);
379 static void xenfb_vm_close(struct vm_area_struct *vma)
381 struct xenfb_mapping *map = vma->vm_private_data;
382 struct xenfb_info *info = map->info;
384 mutex_lock(&info->mm_lock);
385 if (atomic_dec_and_test(&map->map_refs)) {
386 list_del(&map->link);
389 mutex_unlock(&info->mm_lock);
392 static int xenfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
394 struct xenfb_mapping *map = vma->vm_private_data;
395 struct xenfb_info *info = map->info;
396 int pgnr = ((long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
401 if (pgnr >= info->nr_pages)
402 return VM_FAULT_SIGBUS;
404 mutex_lock(&info->mm_lock);
405 spin_lock_irqsave(&info->dirty_lock, flags);
406 page = info->pages[pgnr];
410 y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
411 y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
412 if (y2 > info->fb_info->var.yres)
413 y2 = info->fb_info->var.yres;
414 __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
415 spin_unlock_irqrestore(&info->dirty_lock, flags);
416 mutex_unlock(&info->mm_lock);
420 return VM_FAULT_MINOR;
423 static struct vm_operations_struct xenfb_vm_ops = {
424 .open = xenfb_vm_open,
425 .close = xenfb_vm_close,
426 .fault = xenfb_vm_fault,
429 static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
431 struct xenfb_info *info = fb_info->par;
432 struct xenfb_mapping *map;
435 if (!(vma->vm_flags & VM_WRITE))
437 if (!(vma->vm_flags & VM_SHARED))
439 if (vma->vm_pgoff != 0)
442 map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
443 if (map_pages > info->nr_pages)
446 map = kzalloc(sizeof(*map), GFP_KERNEL);
453 atomic_set(&map->map_refs, 1);
455 mutex_lock(&info->mm_lock);
456 list_add(&map->link, &info->mappings);
457 mutex_unlock(&info->mm_lock);
459 vma->vm_ops = &xenfb_vm_ops;
460 vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
461 vma->vm_private_data = map;
467 xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
469 struct xenfb_info *xenfb_info;
470 int required_mem_len;
472 xenfb_info = info->par;
474 if (!xenfb_info->feature_resize) {
475 if (var->xres == video[KPARAM_WIDTH] &&
476 var->yres == video[KPARAM_HEIGHT] &&
477 var->bits_per_pixel == xenfb_info->page->depth) {
483 /* Can't resize past initial width and height */
484 if (var->xres > video[KPARAM_WIDTH] || var->yres > video[KPARAM_HEIGHT])
487 required_mem_len = var->xres * var->yres * (xenfb_info->page->depth / 8);
488 if (var->bits_per_pixel == xenfb_info->page->depth &&
489 var->xres <= info->fix.line_length / (XENFB_DEPTH / 8) &&
490 required_mem_len <= info->fix.smem_len) {
491 var->xres_virtual = var->xres;
492 var->yres_virtual = var->yres;
498 static int xenfb_set_par(struct fb_info *info)
500 struct xenfb_info *xenfb_info;
503 xenfb_info = info->par;
505 spin_lock_irqsave(&xenfb_info->resize_lock, flags);
506 xenfb_info->resize.type = XENFB_TYPE_RESIZE;
507 xenfb_info->resize.width = info->var.xres;
508 xenfb_info->resize.height = info->var.yres;
509 xenfb_info->resize.stride = info->fix.line_length;
510 xenfb_info->resize.depth = info->var.bits_per_pixel;
511 xenfb_info->resize.offset = 0;
512 xenfb_info->resize_dpy = 1;
513 spin_unlock_irqrestore(&xenfb_info->resize_lock, flags);
517 static struct fb_ops xenfb_fb_ops = {
518 .owner = THIS_MODULE,
519 .fb_setcolreg = xenfb_setcolreg,
520 .fb_fillrect = xenfb_fillrect,
521 .fb_copyarea = xenfb_copyarea,
522 .fb_imageblit = xenfb_imageblit,
523 .fb_mmap = xenfb_mmap,
524 .fb_check_var = xenfb_check_var,
525 .fb_set_par = xenfb_set_par,
528 static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
531 * No in events recognized, simply ignore them all.
532 * If you need to recognize some, see xenbkd's input_handler()
533 * for how to do that.
535 struct xenfb_info *info = dev_id;
536 struct xenfb_page *page = info->page;
538 if (page->in_cons != page->in_prod) {
539 info->page->in_cons = info->page->in_prod;
540 notify_remote_via_irq(info->irq);
545 static unsigned long vmalloc_to_mfn(void *address)
547 return pfn_to_mfn(vmalloc_to_pfn(address));
550 static __devinit void
551 xenfb_make_preferred_console(void)
555 if (console_set_on_cmdline)
559 for_each_console(c) {
560 if (!strcmp(c->name, "tty") && c->index == 0)
565 unregister_console(c);
566 c->flags |= CON_CONSDEV;
567 c->flags &= ~CON_PRINTBUFFER; /* don't print again */
572 static int __devinit xenfb_probe(struct xenbus_device *dev,
573 const struct xenbus_device_id *id)
575 struct xenfb_info *info;
576 struct fb_info *fb_info;
581 info = kzalloc(sizeof(*info), GFP_KERNEL);
583 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
587 /* Limit kernel param videoram amount to what is in xenstore */
588 if (xenbus_scanf(XBT_NIL, dev->otherend, "videoram", "%d", &val) == 1) {
589 if (val < video[KPARAM_MEM])
590 video[KPARAM_MEM] = val;
593 /* If requested res does not fit in available memory, use default */
594 fb_size = video[KPARAM_MEM] * MB_;
595 if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH/8 > fb_size) {
596 video[KPARAM_WIDTH] = XENFB_WIDTH;
597 video[KPARAM_HEIGHT] = XENFB_HEIGHT;
598 fb_size = XENFB_DEFAULT_FB_LEN;
601 dev_set_drvdata(&dev->dev, info);
604 info->x1 = info->y1 = INT_MAX;
605 spin_lock_init(&info->dirty_lock);
606 spin_lock_init(&info->resize_lock);
607 mutex_init(&info->mm_lock);
608 init_waitqueue_head(&info->wq);
609 init_timer(&info->refresh);
610 info->refresh.function = xenfb_timer;
611 info->refresh.data = (unsigned long)info;
612 INIT_LIST_HEAD(&info->mappings);
614 info->fb = vzalloc(fb_size);
615 if (info->fb == NULL)
618 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
620 info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
622 if (info->pages == NULL)
625 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
629 /* set up shared page */
630 info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
634 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
635 /* see fishy hackery below */
639 /* FIXME fishy hackery */
640 fb_info->pseudo_palette = fb_info->par;
643 fb_info->screen_base = info->fb;
645 fb_info->fbops = &xenfb_fb_ops;
646 fb_info->var.xres_virtual = fb_info->var.xres = video[KPARAM_WIDTH];
647 fb_info->var.yres_virtual = fb_info->var.yres = video[KPARAM_HEIGHT];
648 fb_info->var.bits_per_pixel = XENFB_DEPTH;
650 fb_info->var.red = (struct fb_bitfield){16, 8, 0};
651 fb_info->var.green = (struct fb_bitfield){8, 8, 0};
652 fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
654 fb_info->var.activate = FB_ACTIVATE_NOW;
655 fb_info->var.height = -1;
656 fb_info->var.width = -1;
657 fb_info->var.vmode = FB_VMODE_NONINTERLACED;
659 fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
660 fb_info->fix.line_length = fb_info->var.xres * (XENFB_DEPTH / 8);
661 fb_info->fix.smem_start = 0;
662 fb_info->fix.smem_len = fb_size;
663 strcpy(fb_info->fix.id, "xen");
664 fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
665 fb_info->fix.accel = FB_ACCEL_NONE;
667 fb_info->flags = FBINFO_FLAG_DEFAULT;
669 ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
671 framebuffer_release(fb_info);
672 xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
676 xenfb_init_shared_page(info, fb_info);
678 ret = register_framebuffer(fb_info);
680 fb_dealloc_cmap(&info->fb_info->cmap);
681 framebuffer_release(fb_info);
682 xenbus_dev_fatal(dev, ret, "register_framebuffer");
685 info->fb_info = fb_info;
687 ret = xenfb_connect_backend(dev, info);
691 xenfb_make_preferred_console();
696 xenbus_dev_fatal(dev, ret, "allocating device memory");
702 static int xenfb_resume(struct xenbus_device *dev)
704 struct xenfb_info *info = dev_get_drvdata(&dev->dev);
706 xenfb_disconnect_backend(info);
707 xenfb_init_shared_page(info, info->fb_info);
708 return xenfb_connect_backend(dev, info);
711 static int xenfb_remove(struct xenbus_device *dev)
713 struct xenfb_info *info = dev_get_drvdata(&dev->dev);
715 del_timer(&info->refresh);
717 kthread_stop(info->kthread);
718 xenfb_disconnect_backend(info);
720 unregister_framebuffer(info->fb_info);
721 fb_dealloc_cmap(&info->fb_info->cmap);
722 framebuffer_release(info->fb_info);
724 free_page((unsigned long)info->page);
733 static void xenfb_init_shared_page(struct xenfb_info *info,
734 struct fb_info * fb_info)
737 int epd = PAGE_SIZE / sizeof(info->mfns[0]);
739 for (i = 0; i < info->nr_pages; i++)
740 info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
742 for (i = 0; i < info->nr_pages; i++)
743 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
745 for (i = 0; i * epd < info->nr_pages; i++)
746 info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
748 info->page->width = fb_info->var.xres;
749 info->page->height = fb_info->var.yres;
750 info->page->depth = fb_info->var.bits_per_pixel;
751 info->page->line_length = fb_info->fix.line_length;
752 info->page->mem_length = fb_info->fix.smem_len;
753 info->page->in_cons = info->page->in_prod = 0;
754 info->page->out_cons = info->page->out_prod = 0;
757 static int xenfb_connect_backend(struct xenbus_device *dev,
758 struct xenfb_info *info)
761 struct xenbus_transaction xbt;
763 irq = bind_listening_port_to_irqhandler(
764 dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
766 xenbus_dev_fatal(dev, irq,
767 "bind_listening_port_to_irqhandler");
772 ret = xenbus_transaction_start(&xbt);
774 xenbus_dev_fatal(dev, ret, "starting transaction");
777 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
778 virt_to_mfn(info->page));
781 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
782 irq_to_evtchn_port(irq));
785 ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
786 XEN_IO_PROTO_ABI_NATIVE);
789 ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
792 ret = xenbus_transaction_end(xbt, 0);
796 xenbus_dev_fatal(dev, ret, "completing transaction");
801 xenbus_switch_state(dev, XenbusStateInitialised);
805 xenbus_transaction_end(xbt, 1);
806 xenbus_dev_fatal(dev, ret, "writing xenstore");
808 unbind_from_irqhandler(irq, info);
812 static void xenfb_disconnect_backend(struct xenfb_info *info)
815 unbind_from_irqhandler(info->irq, info);
819 static void xenfb_backend_changed(struct xenbus_device *dev,
820 enum xenbus_state backend_state)
822 struct xenfb_info *info = dev_get_drvdata(&dev->dev);
825 switch (backend_state) {
826 case XenbusStateInitialising:
827 case XenbusStateInitialised:
828 case XenbusStateReconfiguring:
829 case XenbusStateReconfigured:
830 case XenbusStateUnknown:
831 case XenbusStateClosed:
834 case XenbusStateInitWait:
836 xenbus_switch_state(dev, XenbusStateConnected);
839 case XenbusStateConnected:
841 * Work around xenbus race condition: If backend goes
842 * through InitWait to Connected fast enough, we can
843 * get Connected twice here.
845 if (dev->state != XenbusStateConnected)
846 goto InitWait; /* no InitWait seen yet, fudge it */
849 if (xenbus_scanf(XBT_NIL, dev->otherend,
850 "feature-resize", "%d", &val) < 0)
852 info->feature_resize = val;
854 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
855 "request-update", "%d", &val) < 0)
858 if (val && !info->kthread) {
859 info->kthread = kthread_run(xenfb_thread, info,
861 if (IS_ERR(info->kthread)) {
862 info->kthread = NULL;
863 xenbus_dev_fatal(dev, PTR_ERR(info->kthread),
869 case XenbusStateClosing:
870 // FIXME is this safe in any dev->state?
871 xenbus_frontend_closed(dev);
876 static const struct xenbus_device_id xenfb_ids[] = {
880 MODULE_ALIAS("xen:vfb");
882 static DEFINE_XENBUS_DRIVER(xenfb, ,
883 .probe = xenfb_probe,
884 .remove = xenfb_remove,
885 .resume = xenfb_resume,
886 .otherend_changed = xenfb_backend_changed,
889 static int __init xenfb_init(void)
891 if (!is_running_on_xen())
894 /* Nothing to do if running in dom0. */
895 if (is_initial_xendomain())
898 return xenbus_register_frontend(&xenfb_driver);
901 static void __exit xenfb_cleanup(void)
903 return xenbus_unregister_driver(&xenfb_driver);
906 module_init(xenfb_init);
907 module_exit(xenfb_cleanup);
909 MODULE_DESCRIPTION("Xen virtual framebuffer device frontend");
910 MODULE_LICENSE("GPL");