1 /******************************************************************************
2 * Client-facing interface for the Xenbus driver. In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
4 * frontend or the backend of that driver.
6 * Copyright (C) 2005 XenSource Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <linux/slab.h>
34 #if defined(CONFIG_XEN) || defined(MODULE)
35 #include <xen/evtchn.h>
36 #include <xen/gnttab.h>
38 #include <linux/types.h>
39 #include <linux/spinlock.h>
40 #include <linux/vmalloc.h>
41 #include <linux/export.h>
42 #include <asm/xen/hypervisor.h>
43 #include <asm/xen/page.h>
44 #include <xen/interface/xen.h>
45 #include <xen/interface/event_channel.h>
46 #include <xen/balloon.h>
47 #include <xen/events.h>
48 #include <xen/grant_table.h>
50 #include <xen/xenbus.h>
53 #if defined(CONFIG_PARAVIRT_XEN)
54 #include "xenbus_probe.h"
56 struct xenbus_map_node {
57 struct list_head next;
59 struct vm_struct *area; /* PV */
60 struct page *page; /* HVM */
62 grant_handle_t handle;
65 static DEFINE_SPINLOCK(xenbus_valloc_lock);
66 static LIST_HEAD(xenbus_valloc_pages);
68 struct xenbus_ring_ops {
69 int (*map)(struct xenbus_device *dev, grant_ref_t gnt, void **vaddr);
70 int (*unmap)(struct xenbus_device *dev, void *vaddr);
73 static const struct xenbus_ring_ops *ring_ops __read_mostly;
74 #elif defined(HAVE_XEN_PLATFORM_COMPAT_H)
75 #include <xen/platform-compat.h>
78 const char *xenbus_strstate(enum xenbus_state state)
80 static const char *const name[] = {
81 [ XenbusStateUnknown ] = "Unknown",
82 [ XenbusStateInitialising ] = "Initialising",
83 [ XenbusStateInitWait ] = "InitWait",
84 [ XenbusStateInitialised ] = "Initialised",
85 [ XenbusStateConnected ] = "Connected",
86 [ XenbusStateClosing ] = "Closing",
87 [ XenbusStateClosed ] = "Closed",
88 [ XenbusStateReconfiguring ] = "Reconfiguring",
89 [ XenbusStateReconfigured ] = "Reconfigured",
91 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
93 EXPORT_SYMBOL_GPL(xenbus_strstate);
96 * xenbus_watch_path - register a watch
98 * @path: path to watch
99 * @watch: watch to register
100 * @callback: callback to register
102 * Register a @watch on the given path, using the given xenbus_watch structure
103 * for storage, and the given @callback function as the callback. Return 0 on
104 * success, or -errno on error. On success, the given @path will be saved as
105 * @watch->node, and remains the caller's to free. On error, @watch->node will
106 * be NULL, the device will switch to %XenbusStateClosing, and the error will
107 * be saved in the store.
109 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
110 struct xenbus_watch *watch,
111 void (*callback)(struct xenbus_watch *,
112 const char **, unsigned int))
117 watch->callback = callback;
119 err = register_xenbus_watch(watch);
123 watch->callback = NULL;
124 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
129 EXPORT_SYMBOL_GPL(xenbus_watch_path);
132 #if defined(CONFIG_XEN) || defined(MODULE)
133 int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
134 const char *path2, struct xenbus_watch *watch,
135 void (*callback)(struct xenbus_watch *,
136 const char **, unsigned int))
139 char *state = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", path, path2);
141 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
144 err = xenbus_watch_path(dev, state, watch, callback);
150 EXPORT_SYMBOL_GPL(xenbus_watch_path2);
153 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
154 * @dev: xenbus device
155 * @watch: watch to register
156 * @callback: callback to register
157 * @pathfmt: format of path to watch
159 * Register a watch on the given @path, using the given xenbus_watch
160 * structure for storage, and the given @callback function as the callback.
161 * Return 0 on success, or -errno on error. On success, the watched path
162 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
163 * kfree(). On error, watch->node will be NULL, so the caller has nothing to
164 * free, the device will switch to %XenbusStateClosing, and the error will be
165 * saved in the store.
167 int xenbus_watch_pathfmt(struct xenbus_device *dev,
168 struct xenbus_watch *watch,
169 void (*callback)(struct xenbus_watch *,
170 const char **, unsigned int),
171 const char *pathfmt, ...)
177 va_start(ap, pathfmt);
178 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
182 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
185 err = xenbus_watch_path(dev, path, watch, callback);
191 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
194 static void xenbus_switch_fatal(struct xenbus_device *, int, int,
198 __xenbus_switch_state(struct xenbus_device *dev,
199 enum xenbus_state state, int depth)
201 /* We check whether the state is currently set to the given value, and
202 if not, then the state is set. We don't want to unconditionally
203 write the given state, because we don't want to fire watches
204 unnecessarily. Furthermore, if the node has gone, we don't write
205 to it, as the device will be tearing down, and we don't want to
206 resurrect that directory.
208 Note that, because of this cached value of our state, this
209 function will not take a caller's Xenstore transaction
210 (something it was trying to in the past) because dev->state
211 would not get reset if the transaction was aborted.
214 struct xenbus_transaction xbt;
218 if (state == dev->state)
224 err = xenbus_transaction_start(&xbt);
226 xenbus_switch_fatal(dev, depth, err, "starting transaction");
230 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
234 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
236 xenbus_switch_fatal(dev, depth, err, "writing new state");
242 err = xenbus_transaction_end(xbt, abort);
244 if (err == -EAGAIN && !abort)
246 xenbus_switch_fatal(dev, depth, err, "ending transaction");
254 * xenbus_switch_state
255 * @dev: xenbus device
258 * Advertise in the store a change of the given driver to the given new_state.
259 * Return 0 on success, or -errno on error. On error, the device will switch
260 * to XenbusStateClosing, and the error will be saved in the store.
262 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
264 return __xenbus_switch_state(dev, state, 0);
266 EXPORT_SYMBOL_GPL(xenbus_switch_state);
268 int xenbus_frontend_closed(struct xenbus_device *dev)
270 xenbus_switch_state(dev, XenbusStateClosed);
271 complete(&dev->down);
274 EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
277 * Return the path to the error node for the given device, or NULL on failure.
278 * If the value returned is non-NULL, then it is the caller's to kfree.
280 static char *error_path(struct xenbus_device *dev)
282 return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
286 static void _dev_error(struct xenbus_device *dev, int err,
287 const char *fmt, va_list *ap)
289 char *printf_buffer, *path_buffer;
290 struct va_format vaf = { .fmt = fmt, .va = ap };
292 printf_buffer = kasprintf(GFP_KERNEL, "%i %pV", -err, &vaf);
294 dev_err(&dev->dev, "%s\n", printf_buffer);
296 path_buffer = error_path(dev);
297 if (!printf_buffer || !path_buffer
298 || xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer))
300 "xenbus: failed to write error node for %s (%s)\n",
301 dev->nodename, printf_buffer);
303 kfree(printf_buffer);
310 * @dev: xenbus device
311 * @err: error to report
312 * @fmt: error message format
314 * Report the given negative errno into the store, along with the given
317 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
322 _dev_error(dev, err, fmt, &ap);
325 EXPORT_SYMBOL_GPL(xenbus_dev_error);
330 * @dev: xenbus device
331 * @err: error to report
332 * @fmt: error message format
334 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
335 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
336 * closedown of this driver and its peer.
338 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
343 _dev_error(dev, err, fmt, &ap);
346 xenbus_switch_state(dev, XenbusStateClosing);
348 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
351 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
352 * avoiding recursion within xenbus_switch_state.
354 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
355 const char *fmt, ...)
360 _dev_error(dev, err, fmt, &ap);
364 __xenbus_switch_state(dev, XenbusStateClosing, 1);
369 * @dev: xenbus device
370 * @ring_mfn: mfn of ring to grant
372 * Grant access to the given @ring_mfn to the peer of the given device. Return
373 * 0 on success, or -errno on error. On error, the device will switch to
374 * XenbusStateClosing, and the error will be saved in the store.
376 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
378 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
380 xenbus_dev_fatal(dev, err, "granting access to ring page");
383 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
387 * Allocate an event channel for the given xenbus_device, assigning the newly
388 * created local port to *port. Return 0 on success, or -errno on error. On
389 * error, the device will switch to XenbusStateClosing, and the error will be
390 * saved in the store.
392 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
394 struct evtchn_alloc_unbound alloc_unbound;
397 alloc_unbound.dom = DOMID_SELF;
398 alloc_unbound.remote_dom = dev->otherend_id;
400 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
403 xenbus_dev_fatal(dev, err, "allocating event channel");
405 *port = alloc_unbound.port;
409 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
412 #if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */
414 * Bind to an existing interdomain event channel in another domain. Returns 0
415 * on success and stores the local port in *port. On error, returns -errno,
416 * switches the device to XenbusStateClosing, and saves the error in XenStore.
418 int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
420 struct evtchn_bind_interdomain bind_interdomain;
423 bind_interdomain.remote_dom = dev->otherend_id;
424 bind_interdomain.remote_port = remote_port;
426 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
429 xenbus_dev_fatal(dev, err,
430 "binding to event channel %d from domain %d",
431 remote_port, dev->otherend_id);
433 *port = bind_interdomain.local_port;
437 EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
442 * Free an existing event channel. Returns 0 on success or -errno on error.
444 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
446 struct evtchn_close close;
451 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
453 xenbus_dev_error(dev, err, "freeing event channel %d", port);
457 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
460 #if !defined(CONFIG_XEN) && !defined(MODULE)
462 * xenbus_map_ring_valloc
463 * @dev: xenbus device
464 * @gnt_ref: grant reference
465 * @vaddr: pointer to address to be filled out by mapping
467 * Based on Rusty Russell's skeleton driver's map_page.
468 * Map a page of memory into this domain from another domain's grant table.
469 * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
470 * page to that address, and sets *vaddr to that address.
471 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
472 * or -ENOMEM on error. If an error is returned, device will switch to
473 * XenbusStateClosing and the error message will be saved in XenStore.
475 int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t gnt_ref, void **vaddr)
477 return ring_ops->map(dev, gnt_ref, vaddr);
479 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
481 static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
482 grant_ref_t gnt_ref, void **vaddr)
484 struct gnttab_map_grant_ref op = {
485 .flags = GNTMAP_host_map | GNTMAP_contains_pte,
487 .dom = dev->otherend_id,
489 struct xenbus_map_node *node;
490 struct vm_struct *area;
495 node = kzalloc(sizeof(*node), GFP_KERNEL);
499 area = alloc_vm_area(PAGE_SIZE, &pte);
505 op.host_addr = arbitrary_virt_to_machine(pte).maddr;
507 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
510 if (op.status != GNTST_okay) {
513 xenbus_dev_fatal(dev, op.status,
514 "mapping in shared page %d from domain %d",
515 gnt_ref, dev->otherend_id);
519 node->handle = op.handle;
522 spin_lock(&xenbus_valloc_lock);
523 list_add(&node->next, &xenbus_valloc_pages);
524 spin_unlock(&xenbus_valloc_lock);
530 static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
531 grant_ref_t gnt_ref, void **vaddr)
533 struct xenbus_map_node *node;
539 node = kzalloc(sizeof(*node), GFP_KERNEL);
543 err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
547 addr = pfn_to_kaddr(page_to_pfn(node->page));
549 err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
553 spin_lock(&xenbus_valloc_lock);
554 list_add(&node->next, &xenbus_valloc_pages);
555 spin_unlock(&xenbus_valloc_lock);
561 free_xenballooned_pages(1, &node->page);
569 * @dev: xenbus device
570 * @gnt_ref: grant reference
571 * @handle: pointer to grant handle to be filled
572 * @vaddr: address to be mapped to
574 * Map a page of memory into this domain from another domain's grant table.
575 * xenbus_map_ring does not allocate the virtual address space (you must do
576 * this yourself!). It only maps in the page to the specified address.
577 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
578 * or -ENOMEM on error. If an error is returned, device will switch to
579 * XenbusStateClosing and the error message will be saved in XenStore.
581 int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t gnt_ref,
582 grant_handle_t *handle, void *vaddr)
584 struct gnttab_map_grant_ref op;
586 gnttab_set_map_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, gnt_ref,
589 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
592 if (op.status != GNTST_okay) {
593 xenbus_dev_fatal(dev, op.status,
594 "mapping in shared page %d from domain %d",
595 gnt_ref, dev->otherend_id);
601 EXPORT_SYMBOL_GPL(xenbus_map_ring);
605 * xenbus_unmap_ring_vfree
606 * @dev: xenbus device
607 * @vaddr: addr to unmap
609 * Based on Rusty Russell's skeleton driver's unmap_page.
610 * Unmap a page of memory in this domain that was imported from another domain.
611 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
612 * xenbus_map_ring_valloc (it will free the virtual address space).
613 * Returns 0 on success and returns GNTST_* on error
614 * (see xen/include/interface/grant_table.h).
616 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
618 return ring_ops->unmap(dev, vaddr);
620 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
622 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
624 struct xenbus_map_node *node;
625 struct gnttab_unmap_grant_ref op = {
626 .host_addr = (unsigned long)vaddr,
630 spin_lock(&xenbus_valloc_lock);
631 list_for_each_entry(node, &xenbus_valloc_pages, next) {
632 if (node->area->addr == vaddr) {
633 list_del(&node->next);
639 spin_unlock(&xenbus_valloc_lock);
642 xenbus_dev_error(dev, -ENOENT,
643 "can't find mapped virtual address %p", vaddr);
644 return GNTST_bad_virt_addr;
647 op.handle = node->handle;
648 op.host_addr = arbitrary_virt_to_machine(
649 lookup_address((unsigned long)vaddr, &level)).maddr;
651 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
654 if (op.status == GNTST_okay)
655 free_vm_area(node->area);
657 xenbus_dev_error(dev, op.status,
658 "unmapping page at handle %d error %d",
659 node->handle, op.status);
665 static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
668 struct xenbus_map_node *node;
671 spin_lock(&xenbus_valloc_lock);
672 list_for_each_entry(node, &xenbus_valloc_pages, next) {
673 addr = pfn_to_kaddr(page_to_pfn(node->page));
675 list_del(&node->next);
681 spin_unlock(&xenbus_valloc_lock);
684 xenbus_dev_error(dev, -ENOENT,
685 "can't find mapped virtual address %p", vaddr);
686 return GNTST_bad_virt_addr;
689 rv = xenbus_unmap_ring(dev, node->handle, addr);
692 free_xenballooned_pages(1, &node->page);
694 WARN(1, "Leaking %p\n", vaddr);
702 * @dev: xenbus device
703 * @handle: grant handle
704 * @vaddr: addr to unmap
706 * Unmap a page of memory in this domain that was imported from another domain.
707 * Returns 0 on success and returns GNTST_* on error
708 * (see xen/include/interface/grant_table.h).
710 int xenbus_unmap_ring(struct xenbus_device *dev,
711 grant_handle_t handle, void *vaddr)
713 struct gnttab_unmap_grant_ref op;
715 gnttab_set_unmap_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, handle);
717 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
720 if (op.status != GNTST_okay)
721 xenbus_dev_error(dev, op.status,
722 "unmapping page at handle %d error %d",
727 EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
732 * xenbus_read_driver_state
733 * @path: path for driver
735 * Return the state of the driver rooted at the given store path, or
736 * XenbusStateUnknown if no state can be read.
738 enum xenbus_state xenbus_read_driver_state(const char *path)
742 if (xenbus_scanf(XBT_NIL, path, "state", "%d", &result) != 1)
743 result = XenbusStateUnknown;
747 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
749 #if !defined(CONFIG_XEN) && !defined(MODULE)
750 static const struct xenbus_ring_ops ring_ops_pv = {
751 .map = xenbus_map_ring_valloc_pv,
752 .unmap = xenbus_unmap_ring_vfree_pv,
755 static const struct xenbus_ring_ops ring_ops_hvm = {
756 .map = xenbus_map_ring_valloc_hvm,
757 .unmap = xenbus_unmap_ring_vfree_hvm,
760 void __init xenbus_ring_ops_init(void)
763 ring_ops = &ring_ops_pv;
765 ring_ops = &ring_ops_hvm;