restricted ia64 patches to ia64 again, they still break builds on
[linux-flexiantxendom0-3.2.10.git] / include / asm-ia64 / intrinsics.h
index 1940874..976e3c8 100644 (file)
 extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
 extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
 
-#define IA64_FETCHADD(tmp,v,n,sz,sem)                                          \
+#define IA64_FETCHADD(tmp,v,n,sz)                                              \
 ({                                                                             \
        switch (sz) {                                                           \
              case 4:                                                           \
-               __asm__ __volatile__ ("fetchadd4."sem" %0=[%1],%2"              \
+               __asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2"                \
                                      : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
                break;                                                          \
                                                                                \
              case 8:                                                           \
-               __asm__ __volatile__ ("fetchadd8."sem" %0=[%1],%2"              \
+               __asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2"                \
                                      : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
                break;                                                          \
                                                                                \
@@ -35,34 +35,32 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
        }                                                                       \
 })
 
-#define ia64_fetchadd(i,v,sem)                                                         \
+#define ia64_fetch_and_add(i,v)                                                                \
 ({                                                                                     \
        __u64 _tmp;                                                                     \
        volatile __typeof__(*(v)) *_v = (v);                                            \
        /* Can't use a switch () here: gcc isn't always smart enough for that... */     \
        if ((i) == -16)                                                                 \
-               IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem);                        \
+               IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)));                             \
        else if ((i) == -8)                                                             \
-               IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem);                         \
+               IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)));                              \
        else if ((i) == -4)                                                             \
-               IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem);                         \
+               IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)));                              \
        else if ((i) == -1)                                                             \
-               IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem);                         \
+               IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)));                              \
        else if ((i) == 1)                                                              \
-               IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem);                          \
+               IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)));                               \
        else if ((i) == 4)                                                              \
-               IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem);                          \
+               IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)));                               \
        else if ((i) == 8)                                                              \
-               IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem);                          \
+               IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)));                               \
        else if ((i) == 16)                                                             \
-               IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem);                         \
+               IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)));                              \
        else                                                                            \
                _tmp = __bad_increment_for_ia64_fetch_and_add();                        \
-       (__typeof__(*(v))) (_tmp);      /* return old value */                          \
+       (__typeof__(*(v))) (_tmp + (i));        /* return new value */                  \
 })
 
-#define ia64_fetch_and_add(i,v)        (ia64_fetchadd(i, v, "rel") + (i)) /* return new value */
-
 /*
  * This function doesn't exist, so you'll get a linker error if
  * something tries to do an invalid xchg().
@@ -129,7 +127,7 @@ extern long __cmpxchg_called_with_bad_pointer(void);
              case 8: _o_ = (__u64) (long) (old); break;                                \
              default: break;                                                           \
        }                                                                               \
-       __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_));                          \
+        __asm__ __volatile__ ("mov ar.ccv=%0;;" :: "rO"(_o_));                         \
        switch (size) {                                                                 \
              case 1:                                                                   \
                __asm__ __volatile__ ("cmpxchg1."sem" %0=[%1],%2,ar.ccv"                \