2 * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
4 * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
5 * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
7 * Based on linux/drivers/video/q40fb.c
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of this archive for
17 * Switch to grant tables when they become capable of dealing with the
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
27 #include <linux/mutex.h>
28 #include <linux/freezer.h>
29 #include <asm/hypervisor.h>
30 #include <xen/evtchn.h>
31 #include <xen/interface/io/fbif.h>
32 #include <xen/interface/io/protocols.h>
33 #include <xen/xenbus.h>
34 #include <linux/kthread.h>
38 struct list_head link;
39 struct vm_area_struct *vma;
42 struct xenfb_info *info;
47 struct task_struct *kthread;
51 struct fb_info *fb_info;
52 struct timer_list refresh;
54 int x1, y1, x2, y2; /* dirty rectangle,
55 protected by dirty_lock */
56 spinlock_t dirty_lock;
60 struct list_head mappings; /* protected by mm_lock */
63 struct xenfb_page *page;
65 int update_wanted; /* XENFB_TYPE_UPDATE wanted */
67 struct xenbus_device *xbdev;
71 * How the locks work together
73 * There are two locks: spinlock dirty_lock protecting the dirty
74 * rectangle, and mutex mm_lock protecting mappings.
76 * The problem is that dirty rectangle and mappings aren't
77 * independent: the dirty rectangle must cover all faulted pages in
78 * mappings. We need to prove that our locking maintains this
81 * There are several kinds of critical regions:
83 * 1. Holding only dirty_lock: xenfb_refresh(). May run in
84 * interrupts. Extends the dirty rectangle. Trivially preserves
87 * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close(). Touch
88 * only mappings. The former creates unfaulted pages. Preserves
89 * invariant. The latter removes pages. Preserves invariant.
91 * 3. Holding both locks: xenfb_vm_nopage(). Extends the dirty
92 * rectangle and updates mappings consistently. Preserves
95 * 4. The ugliest one: xenfb_update_screen(). Clear the dirty
96 * rectangle and update mappings consistently.
98 * We can't simply hold both locks, because zap_page_range() cannot
99 * be called with a spinlock held.
101 * Therefore, we first clear the dirty rectangle with both locks
102 * held. Then we unlock dirty_lock and update the mappings.
103 * Critical regions that hold only dirty_lock may interfere with
104 * that. This can only be region 1: xenfb_refresh(). But that
105 * just extends the dirty rectangle, which can't harm the
108 * But FIXME: the invariant is too weak. It misses that the fault
109 * record in mappings must be consistent with the mapping of pages in
110 * the associated address space! do_no_page() updates the PTE after
111 * xenfb_vm_nopage() returns, i.e. outside the critical region. This
112 * allows the following race:
114 * X writes to some address in the Xen frame buffer
115 * Fault - call do_no_page()
116 * call xenfb_vm_nopage()
120 * return back to do_no_page()
121 * (preempted, or SMP)
122 * Xen worker thread runs.
125 * find this mapping, zaps its pages (but page not in pte yet)
128 * (back to X process)
129 * put page in X's pte
131 * Oh well, we wont be updating the writes to this page anytime soon.
134 static int xenfb_fps = 20;
135 static unsigned long xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
137 static int xenfb_remove(struct xenbus_device *);
138 static void xenfb_init_shared_page(struct xenfb_info *);
139 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
140 static void xenfb_disconnect_backend(struct xenfb_info *);
142 static void xenfb_do_update(struct xenfb_info *info,
143 int x, int y, int w, int h)
145 union xenfb_out_event event;
148 event.type = XENFB_TYPE_UPDATE;
151 event.update.width = w;
152 event.update.height = h;
154 prod = info->page->out_prod;
155 /* caller ensures !xenfb_queue_full() */
156 mb(); /* ensure ring space available */
157 XENFB_OUT_RING_REF(info->page, prod) = event;
158 wmb(); /* ensure ring contents visible */
159 info->page->out_prod = prod + 1;
161 notify_remote_via_irq(info->irq);
164 static int xenfb_queue_full(struct xenfb_info *info)
168 prod = info->page->out_prod;
169 cons = info->page->out_cons;
170 return prod - cons == XENFB_OUT_RING_LEN;
173 static void xenfb_update_screen(struct xenfb_info *info)
177 struct xenfb_mapping *map;
179 if (!info->update_wanted)
181 if (xenfb_queue_full(info))
184 mutex_lock(&info->mm_lock);
186 spin_lock_irqsave(&info->dirty_lock, flags);
191 info->x1 = info->y1 = INT_MAX;
192 info->x2 = info->y2 = 0;
193 spin_unlock_irqrestore(&info->dirty_lock, flags);
195 list_for_each_entry(map, &info->mappings, link) {
198 zap_page_range(map->vma, map->vma->vm_start,
199 map->vma->vm_end - map->vma->vm_start, NULL);
203 mutex_unlock(&info->mm_lock);
205 xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
208 static int xenfb_thread(void *data)
210 struct xenfb_info *info = data;
212 while (!kthread_should_stop()) {
215 xenfb_update_screen(info);
217 wait_event_interruptible(info->wq,
218 kthread_should_stop() || info->dirty);
224 static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
225 unsigned blue, unsigned transp,
226 struct fb_info *info)
230 if (regno > info->cmap.len)
233 red >>= (16 - info->var.red.length);
234 green >>= (16 - info->var.green.length);
235 blue >>= (16 - info->var.blue.length);
237 v = (red << info->var.red.offset) |
238 (green << info->var.green.offset) |
239 (blue << info->var.blue.offset);
241 /* FIXME is this sane? check against xxxfb_setcolreg()! */
242 switch (info->var.bits_per_pixel) {
246 ((u32 *)info->pseudo_palette)[regno] = v;
253 static void xenfb_timer(unsigned long data)
255 struct xenfb_info *info = (struct xenfb_info *)data;
260 static void __xenfb_refresh(struct xenfb_info *info,
261 int x1, int y1, int w, int h)
277 if (timer_pending(&info->refresh))
280 mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
283 static void xenfb_refresh(struct xenfb_info *info,
284 int x1, int y1, int w, int h)
288 spin_lock_irqsave(&info->dirty_lock, flags);
289 __xenfb_refresh(info, x1, y1, w, h);
290 spin_unlock_irqrestore(&info->dirty_lock, flags);
293 static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
295 struct xenfb_info *info = p->par;
297 cfb_fillrect(p, rect);
298 xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
301 static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
303 struct xenfb_info *info = p->par;
305 cfb_imageblit(p, image);
306 xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
309 static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
311 struct xenfb_info *info = p->par;
313 cfb_copyarea(p, area);
314 xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
317 static void xenfb_vm_open(struct vm_area_struct *vma)
319 struct xenfb_mapping *map = vma->vm_private_data;
320 atomic_inc(&map->map_refs);
323 static void xenfb_vm_close(struct vm_area_struct *vma)
325 struct xenfb_mapping *map = vma->vm_private_data;
326 struct xenfb_info *info = map->info;
328 mutex_lock(&info->mm_lock);
329 if (atomic_dec_and_test(&map->map_refs)) {
330 list_del(&map->link);
333 mutex_unlock(&info->mm_lock);
336 static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
337 unsigned long vaddr, int *type)
339 struct xenfb_mapping *map = vma->vm_private_data;
340 struct xenfb_info *info = map->info;
341 int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
346 if (pgnr >= info->nr_pages)
347 return NOPAGE_SIGBUS;
349 mutex_lock(&info->mm_lock);
350 spin_lock_irqsave(&info->dirty_lock, flags);
351 page = info->pages[pgnr];
355 y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
356 y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
357 if (y2 > info->fb_info->var.yres)
358 y2 = info->fb_info->var.yres;
359 __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
360 spin_unlock_irqrestore(&info->dirty_lock, flags);
361 mutex_unlock(&info->mm_lock);
364 *type = VM_FAULT_MINOR;
369 static struct vm_operations_struct xenfb_vm_ops = {
370 .open = xenfb_vm_open,
371 .close = xenfb_vm_close,
372 .nopage = xenfb_vm_nopage,
375 static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
377 struct xenfb_info *info = fb_info->par;
378 struct xenfb_mapping *map;
381 if (!(vma->vm_flags & VM_WRITE))
383 if (!(vma->vm_flags & VM_SHARED))
385 if (vma->vm_pgoff != 0)
388 map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
389 if (map_pages > info->nr_pages)
392 map = kzalloc(sizeof(*map), GFP_KERNEL);
399 atomic_set(&map->map_refs, 1);
401 mutex_lock(&info->mm_lock);
402 list_add(&map->link, &info->mappings);
403 mutex_unlock(&info->mm_lock);
405 vma->vm_ops = &xenfb_vm_ops;
406 vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
407 vma->vm_private_data = map;
412 static struct fb_ops xenfb_fb_ops = {
413 .owner = THIS_MODULE,
414 .fb_setcolreg = xenfb_setcolreg,
415 .fb_fillrect = xenfb_fillrect,
416 .fb_copyarea = xenfb_copyarea,
417 .fb_imageblit = xenfb_imageblit,
418 .fb_mmap = xenfb_mmap,
421 static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
424 * No in events recognized, simply ignore them all.
425 * If you need to recognize some, see xenbkd's input_handler()
426 * for how to do that.
428 struct xenfb_info *info = dev_id;
429 struct xenfb_page *page = info->page;
431 if (page->in_cons != page->in_prod) {
432 info->page->in_cons = info->page->in_prod;
433 notify_remote_via_irq(info->irq);
438 static unsigned long vmalloc_to_mfn(void *address)
440 return pfn_to_mfn(vmalloc_to_pfn(address));
443 static int __devinit xenfb_probe(struct xenbus_device *dev,
444 const struct xenbus_device_id *id)
446 struct xenfb_info *info;
447 struct fb_info *fb_info;
450 info = kzalloc(sizeof(*info), GFP_KERNEL);
452 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
455 dev->dev.driver_data = info;
458 info->x1 = info->y1 = INT_MAX;
459 spin_lock_init(&info->dirty_lock);
460 mutex_init(&info->mm_lock);
461 init_waitqueue_head(&info->wq);
462 init_timer(&info->refresh);
463 info->refresh.function = xenfb_timer;
464 info->refresh.data = (unsigned long)info;
465 INIT_LIST_HEAD(&info->mappings);
467 info->fb = vmalloc(xenfb_mem_len);
468 if (info->fb == NULL)
470 memset(info->fb, 0, xenfb_mem_len);
472 info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
474 info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
476 if (info->pages == NULL)
479 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
483 /* set up shared page */
484 info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
488 xenfb_init_shared_page(info);
490 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
491 /* see fishy hackery below */
495 /* FIXME fishy hackery */
496 fb_info->pseudo_palette = fb_info->par;
499 fb_info->screen_base = info->fb;
501 fb_info->fbops = &xenfb_fb_ops;
502 fb_info->var.xres_virtual = fb_info->var.xres = info->page->width;
503 fb_info->var.yres_virtual = fb_info->var.yres = info->page->height;
504 fb_info->var.bits_per_pixel = info->page->depth;
506 fb_info->var.red = (struct fb_bitfield){16, 8, 0};
507 fb_info->var.green = (struct fb_bitfield){8, 8, 0};
508 fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
510 fb_info->var.activate = FB_ACTIVATE_NOW;
511 fb_info->var.height = -1;
512 fb_info->var.width = -1;
513 fb_info->var.vmode = FB_VMODE_NONINTERLACED;
515 fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
516 fb_info->fix.line_length = info->page->line_length;
517 fb_info->fix.smem_start = 0;
518 fb_info->fix.smem_len = xenfb_mem_len;
519 strcpy(fb_info->fix.id, "xen");
520 fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
521 fb_info->fix.accel = FB_ACCEL_NONE;
523 fb_info->flags = FBINFO_FLAG_DEFAULT;
525 ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
527 framebuffer_release(fb_info);
528 xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
532 ret = register_framebuffer(fb_info);
534 fb_dealloc_cmap(&info->fb_info->cmap);
535 framebuffer_release(fb_info);
536 xenbus_dev_fatal(dev, ret, "register_framebuffer");
539 info->fb_info = fb_info;
541 /* FIXME should this be delayed until backend XenbusStateConnected? */
542 info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
543 if (IS_ERR(info->kthread)) {
544 ret = PTR_ERR(info->kthread);
545 info->kthread = NULL;
546 xenbus_dev_fatal(dev, ret, "register_framebuffer");
550 ret = xenfb_connect_backend(dev, info);
558 xenbus_dev_fatal(dev, ret, "allocating device memory");
564 static int xenfb_resume(struct xenbus_device *dev)
566 struct xenfb_info *info = dev->dev.driver_data;
568 xenfb_disconnect_backend(info);
569 xenfb_init_shared_page(info);
570 return xenfb_connect_backend(dev, info);
573 static int xenfb_remove(struct xenbus_device *dev)
575 struct xenfb_info *info = dev->dev.driver_data;
577 del_timer(&info->refresh);
579 kthread_stop(info->kthread);
580 xenfb_disconnect_backend(info);
582 unregister_framebuffer(info->fb_info);
583 fb_dealloc_cmap(&info->fb_info->cmap);
584 framebuffer_release(info->fb_info);
586 free_page((unsigned long)info->page);
595 static void xenfb_init_shared_page(struct xenfb_info *info)
599 for (i = 0; i < info->nr_pages; i++)
600 info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
602 for (i = 0; i < info->nr_pages; i++)
603 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
605 info->page->pd[0] = vmalloc_to_mfn(info->mfns);
606 info->page->pd[1] = 0;
607 info->page->width = XENFB_WIDTH;
608 info->page->height = XENFB_HEIGHT;
609 info->page->depth = XENFB_DEPTH;
610 info->page->line_length = (info->page->depth / 8) * info->page->width;
611 info->page->mem_length = xenfb_mem_len;
612 info->page->in_cons = info->page->in_prod = 0;
613 info->page->out_cons = info->page->out_prod = 0;
616 static int xenfb_connect_backend(struct xenbus_device *dev,
617 struct xenfb_info *info)
620 struct xenbus_transaction xbt;
622 ret = bind_listening_port_to_irqhandler(
623 dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
625 xenbus_dev_fatal(dev, ret,
626 "bind_listening_port_to_irqhandler");
632 ret = xenbus_transaction_start(&xbt);
634 xenbus_dev_fatal(dev, ret, "starting transaction");
637 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
638 virt_to_mfn(info->page));
641 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
642 irq_to_evtchn_port(info->irq));
645 ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
646 XEN_IO_PROTO_ABI_NATIVE);
649 ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
652 ret = xenbus_transaction_end(xbt, 0);
656 xenbus_dev_fatal(dev, ret, "completing transaction");
660 xenbus_switch_state(dev, XenbusStateInitialised);
664 xenbus_transaction_end(xbt, 1);
665 xenbus_dev_fatal(dev, ret, "writing xenstore");
669 static void xenfb_disconnect_backend(struct xenfb_info *info)
672 unbind_from_irqhandler(info->irq, info);
676 static void xenfb_backend_changed(struct xenbus_device *dev,
677 enum xenbus_state backend_state)
679 struct xenfb_info *info = dev->dev.driver_data;
682 switch (backend_state) {
683 case XenbusStateInitialising:
684 case XenbusStateInitialised:
685 case XenbusStateUnknown:
686 case XenbusStateClosed:
689 case XenbusStateInitWait:
691 xenbus_switch_state(dev, XenbusStateConnected);
694 case XenbusStateConnected:
696 * Work around xenbus race condition: If backend goes
697 * through InitWait to Connected fast enough, we can
698 * get Connected twice here.
700 if (dev->state != XenbusStateConnected)
701 goto InitWait; /* no InitWait seen yet, fudge it */
703 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
704 "request-update", "%d", &val) < 0)
707 info->update_wanted = 1;
710 case XenbusStateClosing:
711 // FIXME is this safe in any dev->state?
712 xenbus_frontend_closed(dev);
717 static struct xenbus_device_id xenfb_ids[] = {
722 static struct xenbus_driver xenfb = {
724 .owner = THIS_MODULE,
726 .probe = xenfb_probe,
727 .remove = xenfb_remove,
728 .resume = xenfb_resume,
729 .otherend_changed = xenfb_backend_changed,
732 static int __init xenfb_init(void)
734 if (!is_running_on_xen())
737 /* Nothing to do if running in dom0. */
738 if (is_initial_xendomain())
741 return xenbus_register_frontend(&xenfb);
744 static void __exit xenfb_cleanup(void)
746 return xenbus_unregister_driver(&xenfb);
749 module_init(xenfb_init);
750 module_exit(xenfb_cleanup);
752 MODULE_LICENSE("GPL");