3 * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/string.h>
24 #include <linux/init.h>
29 /* I/O addresses are converted to EEH "tokens" such that a driver will cause
30 * a bad page fault if the address is used directly (i.e. these addresses are
31 * never actually mapped. Translation between IO <-> EEH region is 1 to 1.
33 #define IO_TOKEN_TO_ADDR(token) \
34 (((unsigned long)(token) & ~(0xfUL << REGION_SHIFT)) | \
35 (IO_REGION_ID << REGION_SHIFT))
37 #define IO_ADDR_TO_TOKEN(addr) \
38 (((unsigned long)(addr) & ~(0xfUL << REGION_SHIFT)) | \
39 (EEH_REGION_ID << REGION_SHIFT))
41 /* Values for eeh_mode bits in device_node */
42 #define EEH_MODE_SUPPORTED (1<<0)
43 #define EEH_MODE_NOCHECK (1<<1)
45 extern void __init eeh_init(void);
46 unsigned long eeh_check_failure(void *token, unsigned long val);
47 void *eeh_ioremap(unsigned long addr, void *vaddr);
48 void __init pci_addr_cache_build(void);
51 * eeh_add_device_early
54 * Perform eeh initialization for devices added after boot.
55 * Call eeh_add_device_early before doing any i/o to the
56 * device (including config space i/o). Call eeh_add_device_late
57 * to finish the eeh setup for this device.
60 void eeh_add_device_early(struct device_node *);
61 void eeh_add_device_late(struct pci_dev *);
64 * eeh_remove_device - undo EEH setup for the indicated pci device
65 * @dev: pci device to be removed
67 * This routine should be when a device is removed from a running
68 * system (e.g. by hotplug or dlpar).
70 void eeh_remove_device(struct pci_dev *);
74 #define EEH_RELEASE_LOADSTORE 2
75 #define EEH_RELEASE_DMA 3
76 int eeh_set_option(struct pci_dev *dev, int options);
79 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
81 * Order this macro for performance.
82 * If EEH is off for a device and it is a memory BAR, ioremap will
83 * map it to the IOREGION. In this case addr == vaddr and since these
84 * should be in registers we compare them first. Next we check for
85 * ff's which indicates a (very) possible failure.
87 * If this macro yields TRUE, the caller relays to eeh_check_failure()
88 * which does further tests out of line.
90 #define EEH_POSSIBLE_IO_ERROR(val, type) ((val) == (type)~0)
92 /* The vaddr will equal the addr if EEH checking is disabled for
93 * this device. This is because eeh_ioremap() will not have
94 * remapped to 0xA0, and thus both vaddr and addr will be 0xE0...
96 #define EEH_POSSIBLE_ERROR(addr, vaddr, val, type) \
97 ((vaddr) != (addr) && EEH_POSSIBLE_IO_ERROR(val, type))
100 * MMIO read/write operations with EEH support.
102 static inline u8 eeh_readb(void *addr) {
103 volatile u8 *vaddr = (volatile u8 *)IO_TOKEN_TO_ADDR(addr);
104 u8 val = in_8(vaddr);
105 if (EEH_POSSIBLE_ERROR(addr, vaddr, val, u8))
106 return eeh_check_failure(addr, val);
109 static inline void eeh_writeb(u8 val, void *addr) {
110 volatile u8 *vaddr = (volatile u8 *)IO_TOKEN_TO_ADDR(addr);
114 static inline u16 eeh_readw(void *addr) {
115 volatile u16 *vaddr = (volatile u16 *)IO_TOKEN_TO_ADDR(addr);
116 u16 val = in_le16(vaddr);
117 if (EEH_POSSIBLE_ERROR(addr, vaddr, val, u16))
118 return eeh_check_failure(addr, val);
121 static inline void eeh_writew(u16 val, void *addr) {
122 volatile u16 *vaddr = (volatile u16 *)IO_TOKEN_TO_ADDR(addr);
123 out_le16(vaddr, val);
125 static inline u16 eeh_raw_readw(void *addr) {
126 volatile u16 *vaddr = (volatile u16 *)IO_TOKEN_TO_ADDR(addr);
127 u16 val = in_be16(vaddr);
128 if (EEH_POSSIBLE_ERROR(addr, vaddr, val, u16))
129 return eeh_check_failure(addr, val);
132 static inline void eeh_raw_writew(u16 val, void *addr) {
133 volatile u16 *vaddr = (volatile u16 *)IO_TOKEN_TO_ADDR(addr);
134 out_be16(vaddr, val);
137 static inline u32 eeh_readl(void *addr) {
138 volatile u32 *vaddr = (volatile u32 *)IO_TOKEN_TO_ADDR(addr);
139 u32 val = in_le32(vaddr);
140 if (EEH_POSSIBLE_ERROR(addr, vaddr, val, u32))
141 return eeh_check_failure(addr, val);
144 static inline void eeh_writel(u32 val, void *addr) {
145 volatile u32 *vaddr = (volatile u32 *)IO_TOKEN_TO_ADDR(addr);
146 out_le32(vaddr, val);
148 static inline u32 eeh_raw_readl(void *addr) {
149 volatile u32 *vaddr = (volatile u32 *)IO_TOKEN_TO_ADDR(addr);
150 u32 val = in_be32(vaddr);
151 if (EEH_POSSIBLE_ERROR(addr, vaddr, val, u32))
152 return eeh_check_failure(addr, val);
155 static inline void eeh_raw_writel(u32 val, void *addr) {
156 volatile u32 *vaddr = (volatile u32 *)IO_TOKEN_TO_ADDR(addr);
157 out_be32(vaddr, val);
160 static inline u64 eeh_readq(void *addr) {
161 volatile u64 *vaddr = (volatile u64 *)IO_TOKEN_TO_ADDR(addr);
162 u64 val = in_le64(vaddr);
163 if (EEH_POSSIBLE_ERROR(addr, vaddr, val, u64))
164 return eeh_check_failure(addr, val);
167 static inline void eeh_writeq(u64 val, void *addr) {
168 volatile u64 *vaddr = (volatile u64 *)IO_TOKEN_TO_ADDR(addr);
169 out_le64(vaddr, val);
171 static inline u64 eeh_raw_readq(void *addr) {
172 volatile u64 *vaddr = (volatile u64 *)IO_TOKEN_TO_ADDR(addr);
173 u64 val = in_be64(vaddr);
174 if (EEH_POSSIBLE_ERROR(addr, vaddr, val, u64))
175 return eeh_check_failure(addr, val);
178 static inline void eeh_raw_writeq(u64 val, void *addr) {
179 volatile u64 *vaddr = (volatile u64 *)IO_TOKEN_TO_ADDR(addr);
180 out_be64(vaddr, val);
183 #define EEH_CHECK_ALIGN(v,a) \
184 ((((unsigned long)(v)) & ((a) - 1)) == 0)
186 static inline void eeh_memset_io(void *addr, int c, unsigned long n) {
187 void *vaddr = (void *)IO_TOKEN_TO_ADDR(addr);
192 while(n && !EEH_CHECK_ALIGN(vaddr, 4)) {
193 *((volatile u8 *)vaddr) = c;
194 vaddr = (void *)((unsigned long)vaddr + 1);
198 *((volatile u32 *)vaddr) = lc;
199 vaddr = (void *)((unsigned long)vaddr + 4);
203 *((volatile u8 *)vaddr) = c;
204 vaddr = (void *)((unsigned long)vaddr + 1);
207 __asm__ __volatile__ ("sync" : : : "memory");
209 static inline void eeh_memcpy_fromio(void *dest, void *src, unsigned long n) {
210 void *vsrc = (void *)IO_TOKEN_TO_ADDR(src);
211 void *vsrcsave = vsrc, *destsave = dest, *srcsave = src;
212 unsigned long nsave = n;
214 while(n && (!EEH_CHECK_ALIGN(vsrc, 4) || !EEH_CHECK_ALIGN(dest, 4))) {
215 *((u8 *)dest) = *((volatile u8 *)vsrc);
216 __asm__ __volatile__ ("eieio" : : : "memory");
217 vsrc = (void *)((unsigned long)vsrc + 1);
218 dest = (void *)((unsigned long)dest + 1);
222 *((u32 *)dest) = *((volatile u32 *)vsrc);
223 __asm__ __volatile__ ("eieio" : : : "memory");
224 vsrc = (void *)((unsigned long)vsrc + 4);
225 dest = (void *)((unsigned long)dest + 4);
229 *((u8 *)dest) = *((volatile u8 *)vsrc);
230 __asm__ __volatile__ ("eieio" : : : "memory");
231 vsrc = (void *)((unsigned long)vsrc + 1);
232 dest = (void *)((unsigned long)dest + 1);
235 __asm__ __volatile__ ("sync" : : : "memory");
237 /* Look for ffff's here at dest[n]. Assume that at least 4 bytes
238 * were copied. Check all four bytes.
241 (EEH_POSSIBLE_ERROR(srcsave, vsrcsave, (*((u32 *) destsave+nsave-4)),
243 eeh_check_failure(srcsave, (*((u32 *) destsave+nsave-4)));
247 static inline void eeh_memcpy_toio(void *dest, const void *src, unsigned long n) {
248 void *vdest = (void *)IO_TOKEN_TO_ADDR(dest);
250 while(n && (!EEH_CHECK_ALIGN(vdest, 4) || !EEH_CHECK_ALIGN(src, 4))) {
251 *((volatile u8 *)vdest) = *((u8 *)src);
252 src = (void *)((unsigned long)src + 1);
253 vdest = (void *)((unsigned long)vdest + 1);
257 *((volatile u32 *)vdest) = *((volatile u32 *)src);
258 src = (void *)((unsigned long)src + 4);
259 vdest = (void *)((unsigned long)vdest + 4);
263 *((volatile u8 *)vdest) = *((u8 *)src);
264 src = (void *)((unsigned long)src + 1);
265 vdest = (void *)((unsigned long)vdest + 1);
268 __asm__ __volatile__ ("sync" : : : "memory");
271 #undef EEH_CHECK_ALIGN
273 #define MAX_ISA_PORT 0x10000
274 extern unsigned long io_page_mask;
275 #define _IO_IS_VALID(port) ((port) >= MAX_ISA_PORT || (1 << (port>>PAGE_SHIFT)) & io_page_mask)
277 static inline u8 eeh_inb(unsigned long port) {
279 if (!_IO_IS_VALID(port))
281 val = in_8((u8 *)(port+pci_io_base));
282 if (EEH_POSSIBLE_IO_ERROR(val, u8))
283 return eeh_check_failure((void*)(port), val);
287 static inline void eeh_outb(u8 val, unsigned long port) {
288 if (_IO_IS_VALID(port))
289 out_8((u8 *)(port+pci_io_base), val);
292 static inline u16 eeh_inw(unsigned long port) {
294 if (!_IO_IS_VALID(port))
296 val = in_le16((u16 *)(port+pci_io_base));
297 if (EEH_POSSIBLE_IO_ERROR(val, u16))
298 return eeh_check_failure((void*)(port), val);
302 static inline void eeh_outw(u16 val, unsigned long port) {
303 if (_IO_IS_VALID(port))
304 out_le16((u16 *)(port+pci_io_base), val);
307 static inline u32 eeh_inl(unsigned long port) {
309 if (!_IO_IS_VALID(port))
311 val = in_le32((u32 *)(port+pci_io_base));
312 if (EEH_POSSIBLE_IO_ERROR(val, u32))
313 return eeh_check_failure((void*)(port), val);
317 static inline void eeh_outl(u32 val, unsigned long port) {
318 if (_IO_IS_VALID(port))
319 out_le32((u32 *)(port+pci_io_base), val);
322 /* in-string eeh macros */
323 static inline void eeh_insb(unsigned long port, void * buf, int ns) {
324 _insb((u8 *)(port+pci_io_base), buf, ns);
325 if (EEH_POSSIBLE_IO_ERROR((*(((u8*)buf)+ns-1)), u8))
326 eeh_check_failure((void*)(port), *(u8*)buf);
329 static inline void eeh_insw_ns(unsigned long port, void * buf, int ns) {
330 _insw_ns((u16 *)(port+pci_io_base), buf, ns);
331 if (EEH_POSSIBLE_IO_ERROR((*(((u16*)buf)+ns-1)), u16))
332 eeh_check_failure((void*)(port), *(u16*)buf);
335 static inline void eeh_insl_ns(unsigned long port, void * buf, int nl) {
336 _insl_ns((u32 *)(port+pci_io_base), buf, nl);
337 if (EEH_POSSIBLE_IO_ERROR((*(((u32*)buf)+nl-1)), u32))
338 eeh_check_failure((void*)(port), *(u32*)buf);
341 #endif /* _PPC64_EEH_H */