New version, fixes PowerPC typo.
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -207,8 +207,9 @@ static __inline__ int atomic_sub_return(
return atomic_add_return(-i,v);
}
-#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, old, new) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (old), (new)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* atomic_add_unless - add unless the number is a given value
@@ -221,7 +222,7 @@ #define atomic_xchg(v, new) (xchg(&((v)-
*/
#define atomic_add_unless(v, a, u) \
({ \
- int c, old; \
+ __typeof__((v)->counter) c, old; \
c = atomic_read(v); \
for (;;) { \
if (unlikely(c == (u))) \
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -267,6 +267,9 @@ #define __HAVE_ARCH_CMPXCHG 1
#define cmpxchg(ptr,o,n)\
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
(unsigned long)(n),sizeof(*(ptr))))
+#define cmpxchg_local(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
#endif
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -296,6 +299,33 @@ static inline unsigned long __cmpxchg(vo
return old;
}
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__("cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__("cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
#ifndef CONFIG_X86_CMPXCHG
/*
* Building a kernel capable running on 80386. It may be necessary to
@@ -332,6 +362,17 @@ ({ \
(unsigned long)(n), sizeof(*(ptr))); \
__ret; \
})
+#define cmpxchg_local(ptr,o,n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ if (likely(boot_cpu_data.x86 > 3)) \
+ __ret = __cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ else \
+ __ret = cmpxchg_386((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ __ret; \
+})
#endif
#ifdef CONFIG_X86_CMPXCHG64
@@ -350,10 +391,26 @@ static inline unsigned long long __cmpxc
return prev;
}
+static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
+ unsigned long long old, unsigned long long new)
+{
+ unsigned long long prev;
+ __asm__ __volatile__("cmpxchg8b %3"
+ : "=A"(prev)
+ : "b"((unsigned long)new),
+ "c"((unsigned long)(new >> 32)),
+ "m"(*__xg(ptr)),
+ "0"(old)
+ : "memory");
+ return prev;
+}
+
#define cmpxchg64(ptr,o,n)\
((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
(unsigned long long)(n)))
-
+#define cmpxchg64_local(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
+ (unsigned long long)(n)))
#endif
/*
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -388,7 +388,12 @@ static __inline__ long atomic64_sub_retu
#define atomic64_inc_return(v) (atomic64_add_return(1,v))
#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
-#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic64_cmpxchg(v, old, new) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), old, new))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+#define atomic_cmpxchg(v, old, new) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
@@ -402,7 +407,7 @@ #define atomic_xchg(v, new) (xchg(&((v)-
*/
#define atomic_add_unless(v, a, u) \
({ \
- int c, old; \
+ __typeof__((v)->counter) c, old; \
c = atomic_read(v); \
for (;;) { \
if (unlikely(c == (u))) \
@@ -416,6 +421,9 @@ ({ \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+#define atomic64_add_unless(v, a, u) atomic_add_unless((v), (a), (u))
+#define atomic64_inc_not_zero(v) atomic_inc_not_zero((v))
+
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -208,9 +208,45 @@ static inline unsigned long __cmpxchg(vo
return old;
}
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__("cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__("cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ __asm__ __volatile__("cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
#define cmpxchg(ptr,o,n)\
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
(unsigned long)(n),sizeof(*(ptr))))
+#define cmpxchg_local(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
#ifdef CONFIG_SMP
#define smp_mb() mb()
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -165,7 +165,8 @@ static __inline__ int atomic_dec_return(
return t;
}
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
@@ -411,6 +412,44 @@ static __inline__ long atomic64_dec_if_p
return t;
}
+#define atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long t;
+
+ __asm__ __volatile__ (
+ LWSYNC_ON_SMP
+"1: ldarx %0,0,%1 # atomic_add_unless\n\
+ cmpd 0,%0,%3 \n\
+ beq- 2f \n\
+ add %0,%2,%0 \n"
+ PPC405_ERR77(0,%2)
+" stdcx. %0,0,%1 \n\
+ bne- 1b \n"
+ ISYNC_ON_SMP
+" subf %0,%2,%0 \n\
+2:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (a), "r" (u)
+ : "cc", "memory");
+
+ return t != u;
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
#endif /* __powerpc64__ */
#include <asm-generic/atomic.h>
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -235,6 +235,29 @@ __xchg_u32(volatile void *p, unsigned lo
return prev;
}
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*ptr' to be val and returns
+ * the previous value stored there.
+ */
+static __inline__ unsigned long
+__xchg_u32_local(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
#ifdef CONFIG_PPC64
static __inline__ unsigned long
__xchg_u64(volatile void *p, unsigned long val)
@@ -254,6 +277,23 @@ __xchg_u64(volatile void *p, unsigned lo
return prev;
}
+
+static __inline__ unsigned long
+__xchg_u64_local(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stdcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
#endif
/*
@@ -277,12 +317,33 @@ #endif
return x;
}
+static __inline__ unsigned long
+__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_u32_local(ptr, x);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __xchg_u64_local(ptr, x);
+#endif
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
#define xchg(ptr,x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
})
+#define xchg_local(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_local((ptr), \
+ (unsigned long)_x_, sizeof(*(ptr))); \
+ })
+
#define tas(ptr) (xchg((ptr),1))
/*
@@ -314,6 +375,28 @@ __cmpxchg_u32(volatile unsigned int *p,
return prev;
}
+static __inline__ unsigned long
+__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
+ unsigned long new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %4,0,%2\n\
+ bne- 1b"
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
#ifdef CONFIG_PPC64
static __inline__ unsigned long
__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
@@ -336,6 +419,27 @@ __cmpxchg_u64(volatile unsigned long *p,
return prev;
}
+
+static __inline__ unsigned long
+__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
+ unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
#endif
/* This function doesn't exist, so you'll get a linker error
@@ -358,6 +462,22 @@ #endif
return old;
}
+static __inline__ unsigned long
+__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32_local(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64_local(ptr, old, new);
+#endif
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
#define cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
@@ -366,6 +486,15 @@ #define cmpxchg(ptr,o,n) \
(unsigned long)_n_, sizeof(*(ptr))); \
})
+
+#define cmpxchg_local(ptr,o,n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
#ifdef CONFIG_PPC64
/*
* We handle most unaligned accesses in hardware. On the other hand
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -185,6 +185,7 @@ static inline int atomic_add_unless(atom
c = old;
return c != u;
}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_add(i, v) (void) atomic_add_return(i, v)
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -292,8 +292,9 @@ static __inline__ int atomic_sub_if_posi
return result;
}
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, o, n) \
+ (((__typeof__((v)->counter)))cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* atomic_add_unless - add unless the number is a given value
@@ -306,7 +307,7 @@ #define atomic_xchg(v, new) (xchg(&((v)-
*/
#define atomic_add_unless(v, a, u) \
({ \
- int c, old; \
+ __typeof__((v)->counter) c, old; \
c = atomic_read(v); \
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
c = old; \
@@ -646,6 +647,29 @@ static __inline__ long atomic64_sub_if_p
return result;
}
+#define atomic64_cmpxchg(v, o, n) \
+ (((__typeof__((v)->counter)))cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+#define atomic64_add_unless(v, a, u) \
+({ \
+ __typeof__((v)->counter) c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
#define atomic64_inc_return(v) atomic64_add_return(1,(v))
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -66,6 +66,79 @@ static inline void atomic_long_sub(long
atomic64_sub(i, v);
}
+static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_sub_and_test(i, v);
+}
+
+static inline int atomic_long_dec_and_test(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_dec_and_test(v);
+}
+
+static inline int atomic_long_inc_and_test(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_inc_and_test(v);
+}
+
+static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_add_negative(i, v);
+}
+
+static inline long atomic_long_add_return(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_add_return(i, v);
+}
+
+static inline long atomic_long_sub_return(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_sub_return(i, v);
+}
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_dec_return(v);
+}
+
+#if 0
+/* Atomic add unless is only effective on atomic_t on powerpc (at least) */
+static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_add_unless(v, a, u);
+}
+
+static inline long atomic_long_inc_not_zero(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_inc_not_zero(v);
+}
+#endif //0
+
#else
typedef atomic_t atomic_long_t;
@@ -113,5 +186,80 @@ static inline void atomic_long_sub(long
atomic_sub(i, v);
}
+static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_sub_and_test(i, v);
+}
+
+static inline int atomic_long_dec_and_test(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_dec_and_test(v);
+}
+
+static inline int atomic_long_inc_and_test(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_inc_and_test(v);
+}
+
+static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_add_negative(i, v);
+}
+
+static inline long atomic_long_add_return(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_add_return(i, v);
+}
+
+static inline long atomic_long_sub_return(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_sub_return(i, v);
+}
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_dec_return(v);
+}
+
+static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_add_unless(v, a, u);
+}
+
+static inline long atomic_long_inc_not_zero(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_inc_not_zero(v);
+}
+
#endif
+
+#define atomic_long_cmpxchg(l, old, new) \
+ ((__typeof__((l)->counter))cmpxchg(&((l)->counter), (old), (new)))
+#define atomic_long_xchg(l, new) (xchg(&((l)->counter), (new)))
+
#endif
OpenPGP public key: http://krystal.dyndns.org:8080/key/compudj.gpg
Key fingerprint: 8CD5 52C3 8E3C 4140 715F BA06 3F25 A8FE 3BAE 9A68
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]