1 #ifndef _ASM_IA64_SYSTEM_H
2 #define _ASM_IA64_SYSTEM_H
5 * System defines. Note that this is included both from .c and .S
6 * files, so it does only defines, not any C code. This is based
7 * on information published in the Processor Abstraction Layer
8 * and the System Abstraction Layer manual.
10 * Copyright (C) 1998-2003 Hewlett-Packard Co
11 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
13 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
15 #include <linux/config.h>
17 #include <asm/kregs.h>
20 #include <asm/percpu.h>
22 #define KERNEL_START (PAGE_OFFSET + 68*1024*1024)
24 /* 0xa000000000000000 - 0xa000000000000000+PERCPU_MAX_SIZE remain unmapped */
25 #define PERCPU_ADDR (0xa000000000000000 + PERCPU_PAGE_SIZE)
26 #define GATE_ADDR (0xa000000000000000 + 2*PERCPU_PAGE_SIZE)
30 #include <linux/kernel.h>
31 #include <linux/types.h>
33 struct pci_vector_struct {
34 __u16 segment; /* PCI Segment number */
35 __u16 bus; /* PCI Bus number */
36 __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
37 __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
38 __u32 irq; /* IRQ assigned */
41 extern struct ia64_boot_param {
42 __u64 command_line; /* physical address of command line arguments */
43 __u64 efi_systab; /* physical address of EFI system table */
44 __u64 efi_memmap; /* physical address of EFI memory map */
45 __u64 efi_memmap_size; /* size of EFI memory map */
46 __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */
47 __u32 efi_memdesc_version; /* memory descriptor version */
49 __u16 num_cols; /* number of columns on console output device */
50 __u16 num_rows; /* number of rows on console output device */
51 __u16 orig_x; /* cursor's x position */
52 __u16 orig_y; /* cursor's y position */
54 __u64 fpswa; /* physical address of the fpswa interface */
60 ia64_insn_group_barrier (void)
62 __asm__ __volatile__ (";;" ::: "memory");
66 * Macros to force memory ordering. In these descriptions, "previous"
67 * and "subsequent" refer to program order; "visible" means that all
68 * architecturally visible effects of a memory access have occurred
69 * (at a minimum, this means the memory has been read or written).
71 * wmb(): Guarantees that all preceding stores to memory-
72 * like regions are visible before any subsequent
73 * stores and that all following stores will be
74 * visible only after all previous stores.
75 * rmb(): Like wmb(), but for reads.
76 * mb(): wmb()/rmb() combo, i.e., all previous memory
77 * accesses are visible before all subsequent
78 * accesses and vice versa. This is also known as
81 * Note: "mb()" and its variants cannot be used as a fence to order
82 * accesses to memory mapped I/O registers. For that, mf.a needs to
83 * be used. However, we don't want to always use mf.a because (a)
84 * it's (presumably) much slower than mf and (b) mf.a is supported for
85 * sequential memory pages only.
87 #define mb() __asm__ __volatile__ ("mf" ::: "memory")
90 #define read_barrier_depends() do { } while(0)
93 # define smp_mb() mb()
94 # define smp_rmb() rmb()
95 # define smp_wmb() wmb()
96 # define smp_read_barrier_depends() read_barrier_depends()
98 # define smp_mb() barrier()
99 # define smp_rmb() barrier()
100 # define smp_wmb() barrier()
101 # define smp_read_barrier_depends() do { } while(0)
105 * XXX check on these---I suspect what Linus really wants here is
106 * acquire vs release semantics but we can't discuss this stuff with
107 * Linus just yet. Grrr...
109 #define set_mb(var, value) do { (var) = (value); mb(); } while (0)
110 #define set_wmb(var, value) do { (var) = (value); mb(); } while (0)
112 #define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */
115 * The group barrier in front of the rsm & ssm are necessary to ensure
116 * that none of the previous instructions in the same group are
117 * affected by the rsm/ssm.
119 /* For spinlocks etc */
121 /* clearing psr.i is implicitly serialized (visible by next insn) */
122 /* setting psr.i requires data serialization */
123 #define __local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;;" \
125 : "=r" (x) :: "memory")
126 #define __local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
127 #define __local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \
131 :: "r" ((x) & IA64_PSR_I) \
132 : "p6", "p7", "memory")
134 #ifdef CONFIG_IA64_DEBUG_IRQ
136 extern unsigned long last_cli_ip;
138 # define __save_ip() __asm__ ("mov %0=ip" : "=r" (last_cli_ip))
140 # define local_irq_save(x) \
144 __local_irq_save(psr); \
145 if (psr & IA64_PSR_I) \
150 # define local_irq_disable() do { unsigned long x; local_irq_save(x); } while (0)
152 # define local_irq_restore(x) \
154 unsigned long old_psr, psr = (x); \
156 local_save_flags(old_psr); \
157 __local_irq_restore(psr); \
158 if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) \
162 #else /* !CONFIG_IA64_DEBUG_IRQ */
163 # define local_irq_save(x) __local_irq_save(x)
164 # define local_irq_disable() __local_irq_disable()
165 # define local_irq_restore(x) __local_irq_restore(x)
166 #endif /* !CONFIG_IA64_DEBUG_IRQ */
168 #define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory")
169 #define local_save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory")
171 #define irqs_disabled() \
173 unsigned long flags; \
174 local_save_flags(flags); \
175 (flags & IA64_PSR_I) == 0; \
180 #define prepare_to_switch() do { } while(0)
182 #ifdef CONFIG_IA32_SUPPORT
183 # define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
185 # define IS_IA32_PROCESS(regs) 0
187 static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){}
188 static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){}
192 * Context switch from one thread to another. If the two threads have
193 * different address spaces, schedule() has already taken care of
194 * switching to the new address space by calling switch_mm().
196 * Disabling access to the fph partition and the debug-register
197 * context switch MUST be done before calling ia64_switch_to() since a
198 * newly created thread returns directly to
199 * ia64_ret_from_syscall_clear_r8.
201 extern struct task_struct *ia64_switch_to (void *next_task);
205 extern void ia64_save_extra (struct task_struct *task);
206 extern void ia64_load_extra (struct task_struct *task);
208 #ifdef CONFIG_PERFMON
209 DECLARE_PER_CPU(unsigned long, pfm_syst_info);
210 # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
212 # define PERFMON_IS_SYSWIDE() (0)
215 #define IA64_HAS_EXTRA_STATE(t) \
216 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
217 || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
219 #define __switch_to(prev,next,last) do { \
220 struct task_struct *__fpu_owner = ia64_get_fpu_owner(); \
221 if (IA64_HAS_EXTRA_STATE(prev)) \
222 ia64_save_extra(prev); \
223 if (IA64_HAS_EXTRA_STATE(next)) \
224 ia64_load_extra(next); \
225 ia64_psr(ia64_task_regs(next))->dfh = \
226 !(__fpu_owner == (next) && ((next)->thread.last_fph_cpu == smp_processor_id())); \
227 (last) = ia64_switch_to((next)); \
232 * In the SMP case, we save the fph state when context-switching away from a thread that
233 * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can
234 * pick up the state from task->thread.fph, avoiding the complication of having to fetch
235 * the latest fph state from another CPU. In other words: eager save, lazy restore.
237 # define switch_to(prev,next,last) do { \
238 if (ia64_psr(ia64_task_regs(prev))->mfh) { \
239 ia64_psr(ia64_task_regs(prev))->mfh = 0; \
240 (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
241 __ia64_save_fpu((prev)->thread.fph); \
242 (prev)->thread.last_fph_cpu = smp_processor_id(); \
244 __switch_to(prev, next, last); \
247 # define switch_to(prev,next,last) __switch_to(prev, next, last)
251 * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch,
252 * because that could cause a deadlock. Here is an example by Erich Focht:
257 * -> spin_lock_irq(&rq->lock)
258 * -> context_switch()
259 * -> wrap_mmu_context()
260 * -> read_lock(&tasklist_lock)
263 * sys_wait4() or release_task() or forget_original_parent()
264 * -> write_lock(&tasklist_lock)
265 * -> do_notify_parent()
266 * -> wake_up_parent()
267 * -> try_to_wake_up()
268 * -> spin_lock_irq(&parent_rq->lock)
270 * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock
271 * of that CPU which will not be released, because there we wait for the
272 * tasklist_lock to become available.
274 #define prepare_arch_switch(rq, next) \
276 spin_lock(&(next)->switch_lock); \
277 spin_unlock(&(rq)->lock); \
279 #define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock)
280 #define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
282 #endif /* __KERNEL__ */
284 #endif /* __ASSEMBLY__ */
286 #endif /* _ASM_IA64_SYSTEM_H */