1 /******************************************************************************
4 * Linux-specific hypervisor handling.
6 * Copyright (c) 2002-2004, K A Fraser
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #ifndef __HYPERVISOR_H__
34 #define __HYPERVISOR_H__
36 #include <linux/types.h>
37 #include <linux/kernel.h>
38 #include <linux/errno.h>
39 #include <xen/interface/xen.h>
40 #include <xen/interface/sched.h>
41 #include <xen/interface/vcpu.h>
42 #include <asm/percpu.h>
43 #include <asm/ptrace.h>
44 #include <asm/pgtable_types.h>
46 extern shared_info_t *HYPERVISOR_shared_info;
48 #ifdef CONFIG_XEN_VCPU_INFO_PLACEMENT
49 DECLARE_PER_CPU(struct vcpu_info, vcpu_info);
50 #define vcpu_info(cpu) (&per_cpu(vcpu_info, cpu))
51 #define current_vcpu_info() (&__get_cpu_var(vcpu_info))
52 #define vcpu_info_read(fld) percpu_read(vcpu_info.fld)
53 #define vcpu_info_write(fld, val) percpu_write(vcpu_info.fld, val)
54 #define vcpu_info_xchg(fld, val) percpu_xchg(vcpu_info.fld, val)
55 void setup_vcpu_info(unsigned int cpu);
56 void adjust_boot_vcpu_info(void);
58 #define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
60 #define current_vcpu_info() vcpu_info(smp_processor_id())
62 #define current_vcpu_info() vcpu_info(0)
64 #define vcpu_info_read(fld) (current_vcpu_info()->fld)
65 #define vcpu_info_write(fld, val) (current_vcpu_info()->fld = (val))
66 static inline void setup_vcpu_info(unsigned int cpu) {}
70 extern unsigned long hypervisor_virt_start;
73 /* arch/xen/i386/kernel/setup.c */
74 extern start_info_t *xen_start_info;
75 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
76 #define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
78 #define is_initial_xendomain() 0
81 #define init_hypervisor(c) ((void)(c))
82 #define init_hypervisor_platform() init_hypervisor(&boot_cpu_data)
84 DECLARE_PER_CPU(struct vcpu_runstate_info, runstate);
85 #define vcpu_running(cpu) (per_cpu(runstate.state, cpu) == RUNSTATE_running)
87 /* arch/xen/kernel/evtchn.c */
88 /* Force a proper event-channel callback from Xen. */
89 void force_evtchn_callback(void);
91 /* arch/xen/kernel/process.c */
92 void xen_cpu_idle (void);
94 /* arch/xen/i386/kernel/hypervisor.c */
95 void do_hypervisor_callback(struct pt_regs *regs);
97 /* arch/xen/i386/mm/hypervisor.c */
99 * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
100 * be MACHINE addresses.
103 void xen_pt_switch(pgd_t *);
104 void xen_new_user_pt(pgd_t *); /* x86_64 only */
105 void xen_load_gs(unsigned int selector); /* x86_64 only */
106 void xen_tlb_flush(void);
107 void xen_invlpg(unsigned long ptr);
109 void xen_l1_entry_update(pte_t *ptr, pte_t val);
110 void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
111 void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
112 void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
113 void xen_pgd_pin(pgd_t *);
114 void xen_pgd_unpin(pgd_t *);
116 void xen_init_pgd_pin(void);
117 #ifdef CONFIG_PM_SLEEP
118 void setup_pfn_to_mfn_frame_list(void *(*)(unsigned long, unsigned long,
122 void xen_set_ldt(const void *ptr, unsigned int ents);
125 #include <linux/cpumask.h>
126 void xen_tlb_flush_all(void);
127 void xen_invlpg_all(unsigned long ptr);
128 void xen_tlb_flush_mask(const cpumask_t *mask);
129 void xen_invlpg_mask(const cpumask_t *mask, unsigned long ptr);
131 #define xen_tlb_flush_all xen_tlb_flush
132 #define xen_invlpg_all xen_invlpg
135 /* Returns zero on success else negative errno. */
136 int xen_create_contiguous_region(
137 unsigned long vstart, unsigned int order, unsigned int address_bits);
138 void xen_destroy_contiguous_region(
139 unsigned long vstart, unsigned int order);
140 int early_create_contiguous_region(unsigned long pfn, unsigned int order,
141 unsigned int address_bits);
145 int xen_limit_pages_to_max_mfn(
146 struct page *pages, unsigned int order, unsigned int address_bits);
148 bool __cold hypervisor_oom(void);
150 /* Turn jiffies into Xen system time. */
151 u64 jiffies_to_st(unsigned long jiffies);
153 #ifdef CONFIG_XEN_SCRUB_PAGES
154 void scrub_pages(void *, unsigned int);
156 #define scrub_pages(_p,_n) ((void)0)
159 #if defined(CONFIG_XEN) && !defined(MODULE)
161 DECLARE_PER_CPU(bool, xen_lazy_mmu);
163 void xen_multicall_flush(void);
165 int __must_check xen_multi_update_va_mapping(unsigned long va, pte_t,
166 unsigned long flags);
167 int __must_check xen_multi_mmu_update(mmu_update_t *, unsigned int count,
168 unsigned int *success_count, domid_t);
169 int __must_check xen_multi_mmuext_op(struct mmuext_op *, unsigned int count,
170 unsigned int *success_count, domid_t);
172 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
173 static inline void arch_enter_lazy_mmu_mode(void)
175 percpu_write(xen_lazy_mmu, true);
178 static inline void arch_leave_lazy_mmu_mode(void)
180 percpu_write(xen_lazy_mmu, false);
181 xen_multicall_flush();
184 #define arch_use_lazy_mmu_mode() unlikely(percpu_read(xen_lazy_mmu))
186 #if 0 /* All uses are in places potentially called asynchronously, but
187 * asynchronous code should rather not make use of lazy mode at all.
188 * Therefore, all uses of this function get commented out, proper
189 * detection of asynchronous invocations is added whereever needed,
190 * and this function is disabled to catch any new (improper) uses.
192 static inline void arch_flush_lazy_mmu_mode(void)
194 if (arch_use_lazy_mmu_mode())
195 xen_multicall_flush();
199 #else /* !CONFIG_XEN || MODULE */
201 static inline void xen_multicall_flush(void) {}
202 #define arch_use_lazy_mmu_mode() false
203 #define xen_multi_update_va_mapping(...) ({ BUG(); -ENOSYS; })
204 #define xen_multi_mmu_update(...) ({ BUG(); -ENOSYS; })
205 #define xen_multi_mmuext_op(...) ({ BUG(); -ENOSYS; })
207 #endif /* CONFIG_XEN && !MODULE */
211 struct gnttab_map_grant_ref;
212 bool gnttab_pre_map_adjust(unsigned int cmd, struct gnttab_map_grant_ref *,
214 #if CONFIG_XEN_COMPAT < 0x030400
215 int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *, unsigned int);
217 static inline int gnttab_post_map_adjust(const struct gnttab_map_grant_ref *m,
225 #else /* !CONFIG_XEN */
227 #define gnttab_pre_map_adjust(...) false
228 #define gnttab_post_map_adjust(...) ({ BUG(); -ENOSYS; })
230 #endif /* CONFIG_XEN */
232 #if defined(CONFIG_X86_64)
233 #define MULTI_UVMFLAGS_INDEX 2
234 #define MULTI_UVMDOMID_INDEX 3
236 #define MULTI_UVMFLAGS_INDEX 3
237 #define MULTI_UVMDOMID_INDEX 4
241 #define is_running_on_xen() 1
242 extern char hypercall_page[PAGE_SIZE];
244 extern char *hypercall_stubs;
245 #define is_running_on_xen() (!!hypercall_stubs)
248 #include <xen/hypercall.h>
254 int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
256 #if CONFIG_XEN_COMPAT <= 0x030002
258 rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
268 int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL);
270 #if CONFIG_XEN_COMPAT <= 0x030002
272 rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0);
278 static inline void __noreturn
282 struct sched_shutdown sched_shutdown = {
286 VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown));
287 #if CONFIG_XEN_COMPAT <= 0x030002
288 VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason));
290 /* Don't recurse needlessly. */
291 BUG_ON(reason != SHUTDOWN_crash);
295 static inline int __must_check
297 evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
300 struct sched_poll sched_poll = {
301 .nr_ports = nr_ports,
302 .timeout = jiffies_to_st(timeout)
304 set_xen_guest_handle(sched_poll.ports, ports);
306 rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
307 #if CONFIG_XEN_COMPAT <= 0x030002
309 rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
315 static inline int __must_check
316 HYPERVISOR_poll_no_timeout(
317 evtchn_port_t *ports, unsigned int nr_ports)
320 struct sched_poll sched_poll = {
323 set_xen_guest_handle(sched_poll.ports, ports);
325 rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
326 #if CONFIG_XEN_COMPAT <= 0x030002
328 rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
337 MULTI_update_va_mapping(
338 multicall_entry_t *mcl, unsigned long va,
339 pte_t new_val, unsigned long flags)
341 mcl->op = __HYPERVISOR_update_va_mapping;
343 #if defined(CONFIG_X86_64)
344 mcl->args[1] = new_val.pte;
345 #elif defined(CONFIG_X86_PAE)
346 mcl->args[1] = new_val.pte_low;
347 mcl->args[2] = new_val.pte_high;
349 mcl->args[1] = new_val.pte_low;
352 mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
356 MULTI_mmu_update(multicall_entry_t *mcl, mmu_update_t *req,
357 unsigned int count, unsigned int *success_count,
360 mcl->op = __HYPERVISOR_mmu_update;
361 mcl->args[0] = (unsigned long)req;
362 mcl->args[1] = count;
363 mcl->args[2] = (unsigned long)success_count;
364 mcl->args[3] = domid;
368 MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
369 void *uop, unsigned int count)
371 mcl->op = __HYPERVISOR_grant_table_op;
373 mcl->args[1] = (unsigned long)uop;
374 mcl->args[2] = count;
377 #else /* !defined(CONFIG_XEN) */
379 /* Multicalls not supported for HVM guests. */
380 #define MULTI_update_va_mapping(a,b,c,d) ((void)0)
381 #define MULTI_grant_table_op(a,b,c,d) ((void)0)
385 #define uvm_multi(cpumask) ((unsigned long)cpumask_bits(cpumask) | UVMF_MULTI)
388 /* drivers/staging/ use Windows-style types, including VOID */
392 #endif /* __HYPERVISOR_H__ */