1 /****************************************************************************
2 * Copyright 2002-2005: Level 5 Networks Inc.
3 * Copyright 2005-2008: Solarflare Communications Inc,
4 * 9501 Jeronimo Road, Suite 250,
5 * Irvine, CA 92618, USA
7 * Maintained by Solarflare Communications
8 * <linux-xen-drivers@solarflare.com>
9 * <onload-dev@solarflare.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation, incorporated herein by reference.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 ****************************************************************************
26 /*! \cidoxg_include_ci_compat */
28 #ifndef __CI_COMPAT_GCC_X86_H__
29 #define __CI_COMPAT_GCC_X86_H__
35 ** SSE2 lfence, mfence, pause
39 Barriers to enforce ordering with respect to:
41 normal memory use: ci_wmb, ci_rmb, ci_wmb
42 IO bus access use: ci_wiob, ci_riob, ci_iob
44 #if defined(__x86_64__)
45 # define ci_x86_mb() __asm__ __volatile__ ("lock; addl $0,0(%%rsp)":::"memory")
47 # define ci_x86_mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory")
50 /* ?? measure the impact of latency of sfence on a modern processor before we
51 take a decision on how to integrate with respect to writecombining */
53 /* DJR: I don't think we need to add "memory" here. It means the asm does
54 ** something to memory that GCC doesn't understand. But all this does is
55 ** commit changes that GCC thinks have already happened. NB. GCC will not
56 ** reorder across a __volatile__ __asm__ anyway.
58 #define ci_gcc_fence() __asm__ __volatile__ ("")
60 #if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96)
61 # define ci_x86_sfence() __asm__ __volatile__ ("sfence")
62 # define ci_x86_lfence() __asm__ __volatile__ ("lfence")
63 # define ci_x86_mfence() __asm__ __volatile__ ("mfence")
65 # define ci_x86_sfence() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF8")
66 # define ci_x86_lfence() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xE8")
67 # define ci_x86_mfence() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF0")
71 /* x86 processors to P4 Xeon store in-order unless executing streaming
72 extensions or when using writecombining
74 Hence we do not define ci_wmb to use sfence by default. Requirement is that
75 we do not use writecombining to memory and any code which uses SSE
76 extensions must call sfence directly
78 We need to track non intel clones which may support out of order store.
84 # define ci_wmb() ci_x86_sfence()
86 # define ci_wmb() ci_x86_mb()
89 # define ci_wmb() ci_gcc_fence()
93 # define ci_rmb() ci_x86_lfence()
94 # define ci_mb() ci_x86_mfence()
95 # define ci_riob() ci_x86_lfence()
96 # define ci_wiob() ci_x86_sfence()
97 # define ci_iob() ci_x86_mfence()
100 # define ci_wiob() ci_x86_sfence()
102 # define ci_wiob() ci_x86_mb()
104 # define ci_rmb() ci_x86_mb()
105 # define ci_mb() ci_x86_mb()
106 # define ci_riob() ci_x86_mb()
107 # define ci_iob() ci_x86_mb()
110 typedef unsigned long ci_phys_addr_t;
111 #define ci_phys_addr_fmt "%lx"
113 #endif /* __CI_COMPAT_GCC_X86_H__ */