5 * This file contains the definitions for the x86 IO instructions
6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
8 * versions of the single-IO instructions (inb_p/inw_p/..).
10 * This file is not meant to be obfuscating: it's just complicated
11 * to (a) handle it all in a way that makes gcc able to optimize it
12 * as well as possible and (b) trying to avoid writing the same thing
13 * over and over again with slight variations and possibly making a
18 * Thanks to James van Artsdalen for a better timing-fix than
19 * the two short jumps: using outb's to a nonexistent port seems
20 * to guarantee better timings even on fast machines.
22 * On the other hand, I'd like to be sure of a non-existent port:
23 * I feel a bit unsafe about using 0x80 (should be safe, though)
29 * Bit simplified and optimized by Jan Hubicka
30 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
32 * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
33 * isa_read[wl] and isa_write[wl] fixed
34 * - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
37 #define ARCH_HAS_IOREMAP_WC
39 #include <linux/string.h>
40 #include <linux/compiler.h>
43 #include <asm/fixmap.h>
46 #define build_mmio_read(name, size, type, reg, barrier) \
47 static inline type name(const volatile void __iomem *addr) \
48 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
49 :"m" (*(volatile type __force *)addr) barrier); return ret; }
51 #define build_mmio_write(name, size, type, reg, barrier) \
52 static inline void name(type val, volatile void __iomem *addr) \
53 { asm volatile("mov" size " %0,%1": :reg (val), \
54 "m" (*(volatile type __force *)addr) barrier); }
56 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
57 build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
58 build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
60 build_mmio_read(__readb, "b", unsigned char, "=q", )
61 build_mmio_read(__readw, "w", unsigned short, "=r", )
62 build_mmio_read(__readl, "l", unsigned int, "=r", )
64 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
65 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
66 build_mmio_write(writel, "l", unsigned int, "r", :"memory")
68 build_mmio_write(__writeb, "b", unsigned char, "q", )
69 build_mmio_write(__writew, "w", unsigned short, "r", )
70 build_mmio_write(__writel, "l", unsigned int, "r", )
72 #define readb_relaxed(a) __readb(a)
73 #define readw_relaxed(a) __readw(a)
74 #define readl_relaxed(a) __readl(a)
75 #define __raw_readb __readb
76 #define __raw_readw __readw
77 #define __raw_readl __readl
79 #define __raw_writeb __writeb
80 #define __raw_writew __writew
81 #define __raw_writel __writel
83 #define mmiowb() barrier()
87 build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
88 build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
90 #define readq_relaxed(a) readq(a)
92 #define __raw_readq(a) readq(a)
93 #define __raw_writeq(val, addr) writeq(val, addr)
95 /* Let people know that we have them */
102 * virt_to_phys - map virtual addresses to physical
103 * @address: address to remap
105 * The returned physical address is the physical (CPU) mapping for
106 * the memory address given. It is only valid to use this function on
107 * addresses directly mapped or allocated via kmalloc.
109 * This function does not give bus mappings for DMA transfers. In
110 * almost all conceivable cases a device driver should not be using
114 static inline phys_addr_t virt_to_phys(volatile void *address)
116 return __pa(address);
120 * phys_to_virt - map physical address to virtual
121 * @address: address to remap
123 * The returned virtual address is a current CPU mapping for
124 * the memory address given. It is only valid to use this function on
125 * addresses that have a kernel mapping
127 * This function does not handle bus mappings for DMA transfers. In
128 * almost all conceivable cases a device driver should not be using
132 static inline void *phys_to_virt(phys_addr_t address)
134 return __va(address);
138 * Change "struct page" to physical address.
140 #define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
142 #define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
143 #define page_to_bus(page) (phys_to_machine(page_to_pseudophys(page)))
146 * ISA I/O bus memory addresses are 1:1 with the physical address.
147 * However, we truncate the address to unsigned int to avoid undesirable
148 * promitions in legacy drivers.
150 #define isa_virt_to_bus(_x) ({ \
151 unsigned long _va_ = (unsigned long)(_x); \
152 _va_ - fix_to_virt(FIX_ISAMAP_BEGIN) < (NR_FIX_ISAMAPS << PAGE_SHIFT) \
153 ? _va_ - fix_to_virt(FIX_ISAMAP_BEGIN) \
154 : ({ BUG(); (unsigned long)virt_to_bus(_va_); }); })
155 #define isa_bus_to_virt(_x) ((void *)fix_to_virt(FIX_ISAMAP_BEGIN) + (_x))
158 * However PCI ones are not necessarily 1:1 and therefore these interfaces
159 * are forbidden in portable PCI drivers.
161 * Allow them on x86 for legacy drivers, though.
163 #define virt_to_bus(_x) phys_to_machine(__pa(_x))
164 #define bus_to_virt(_x) __va(machine_to_phys(_x))
167 * ioremap - map bus memory into CPU space
168 * @offset: bus address of the memory
169 * @size: size of the resource to map
171 * ioremap performs a platform specific sequence of operations to
172 * make bus memory CPU accessible via the readb/readw/readl/writeb/
173 * writew/writel functions and the other mmio helpers. The returned
174 * address is not guaranteed to be usable directly as a virtual
177 * If the area you are trying to map is a PCI BAR you should have a
178 * look at pci_iomap().
180 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
181 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
182 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
183 unsigned long prot_val);
186 * The default ioremap() behavior is non-cached:
188 static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
190 return ioremap_nocache(offset, size);
193 extern void iounmap(volatile void __iomem *addr);
195 extern void set_iounmap_nonlazy(void);
199 #include <asm-generic/iomap.h>
201 #include <linux/vmalloc.h>
204 * Convert a virtual cached pointer to an uncached pointer
206 #define xlate_dev_kmem_ptr(p) p
209 memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
211 memset((void __force *)addr, val, count);
215 memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
217 memcpy(dst, (const void __force *)src, count);
221 memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
223 memcpy((void __force *)dst, src, count);
229 * This needed for two cases
230 * 1. Out of order aware processors
231 * 2. Accidentally out of order processors (PPro errata #51)
234 static inline void flush_write_buffers(void)
236 #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
237 asm volatile("lock; addl $0,0(%%esp)": : :"memory");
241 #endif /* __KERNEL__ */
243 extern void native_io_delay(void);
245 extern int io_delay_type;
246 extern void io_delay_init(void);
248 static inline void slow_down_io(void)
251 #ifdef REALLY_SLOW_IO
258 #define BUILDIO(bwl, bw, type) \
259 static inline void out##bwl(unsigned type value, int port) \
261 asm volatile("out" #bwl " %" #bw "0, %w1" \
262 : : "a"(value), "Nd"(port)); \
265 static inline unsigned type in##bwl(int port) \
267 unsigned type value; \
268 asm volatile("in" #bwl " %w1, %" #bw "0" \
269 : "=a"(value) : "Nd"(port)); \
273 static inline void out##bwl##_p(unsigned type value, int port) \
275 out##bwl(value, port); \
279 static inline unsigned type in##bwl##_p(int port) \
281 unsigned type value = in##bwl(port); \
286 static inline void outs##bwl(int port, const void *addr, unsigned long count) \
288 asm volatile("rep; outs" #bwl \
289 : "+S"(addr), "+c"(count) : "d"(port)); \
292 static inline void ins##bwl(int port, void *addr, unsigned long count) \
294 asm volatile("rep; ins" #bwl \
295 : "+D"(addr), "+c"(count) : "d"(port)); \
302 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
304 /* We will be supplying our own /dev/mem implementation */
305 #define ARCH_HAS_DEV_MEM
307 #define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
308 (unsigned long)(bv)->bv_offset)
310 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
311 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
312 && bvec_to_pseudophys(vec1) + (vec1)->bv_len \
313 == bvec_to_pseudophys(vec2))
317 extern void *xlate_dev_mem_ptr(unsigned long phys);
318 extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
320 extern int ioremap_check_change_attr(unsigned long mfn, unsigned long size,
321 unsigned long prot_val);
322 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
325 * early_ioremap() and early_iounmap() are for temporary early boot-time
326 * mappings, before the real ioremap() is functional.
327 * A boot-time mapping is currently limited to at most 16 pages.
329 extern void early_ioremap_init(void);
330 extern void early_ioremap_reset(void);
331 extern void __iomem *early_ioremap(resource_size_t phys_addr,
333 extern void __iomem *early_memremap(resource_size_t phys_addr,
335 extern void __iomem *early_memremap_ro(resource_size_t phys_addr,
337 extern void early_iounmap(void __iomem *addr, unsigned long size);
338 extern void fixup_early_ioremap(void);
339 extern bool is_early_ioremap_ptep(pte_t *ptep);
341 #define IO_SPACE_LIMIT 0xffff
343 #endif /* _ASM_X86_IO_H */