2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1992 - 1997, 2000-2002 Silicon Graphics, Inc. All rights reserved.
8 #ifndef _ASM_IA64_SN_PDA_H
9 #define _ASM_IA64_SN_PDA_H
11 #include <linux/config.h>
12 #include <linux/cache.h>
13 #include <asm/percpu.h>
14 #include <asm/system.h>
15 #include <asm/processor.h>
17 #include <asm/sn/bte.h>
21 * CPU-specific data structure.
23 * One of these structures is allocated for each cpu of a NUMA system.
25 * This structure provides a convenient way of keeping together
26 * all SN per-cpu data structures.
30 #define POLL_ENTRIES 50
38 typedef struct pda_s {
40 /* Having a pointer in the begining of PDA tends to increase
41 * the chance of having this pointer in cache. (Yes something
42 * else gets pushed out). Doing this reduces the number of memory
43 * access to all nodepda variables to be one
45 struct nodepda_s *p_nodepda; /* Pointer to Per node PDA */
46 struct subnodepda_s *p_subnodepda; /* Pointer to CPU subnode PDA */
51 #ifdef CONFIG_IA64_SGI_SN1
52 volatile long *led_address;
54 volatile short *led_address;
57 u8 hb_state; /* supports blinking heartbeat leds */
58 unsigned int hb_count;
60 unsigned int idle_flag;
62 #ifdef CONFIG_IA64_SGI_SN2
63 struct irqpda_s *p_irqpda; /* Pointer to CPU irq data */
65 volatile unsigned long *bedrock_rev_id;
66 volatile unsigned long *pio_write_status_addr;
67 volatile unsigned long *pio_shub_war_cam_addr;
68 volatile unsigned long *mem_write_status_addr;
70 bteinfo_t *cpu_bte_if[BTES_PER_NODE]; /* cpu interface order */
73 sn_poll_entry_t pda_poll_entries[POLL_ENTRIES];
74 int pda_poll_entry_count;
79 #define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
83 * Per-cpu private data area for each cpu. The PDA is located immediately after
84 * the IA64 cpu_data area. A full page is allocated for the cp_data area for each
85 * cpu but only a small amout of the page is actually used. We put the SNIA PDA
86 * in the same page as the cpu_data area. Note that there is a check in the setup
87 * code to verify that we don't overflow the page.
89 * Seems like we should should cache-line align the pda so that any changes in the
90 * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128
91 * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later.
93 DECLARE_PER_CPU(struct pda_s, pda_percpu);
95 #define pda (&__get_cpu_var(pda_percpu))
97 #define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
100 #endif /* _ASM_IA64_SN_PDA_H */