On Tue, 28 Mar 2006, Chen, Kenneth W wrote:
> Why not make unlock_buffer use test_and_clear_bit()? Utilizing it's implied
> full memory fence and throw away the return value? OK, OK, this is obscured.
> Then introduce clear_bit_memory_fence API or some sort.
Only for IA64's sake? Better clean up the bitops as you suggested earlier.
The open ended acquires there leaves a weird feeling.
Something like this? (builds fine not tested yet)
Fix up the bitops for ia64.
Signed-off-by: Christoph Lameter <[email protected]>
Index: linux-2.6.16/include/asm-ia64/bitops.h
===================================================================
--- linux-2.6.16.orig/include/asm-ia64/bitops.h 2006-03-19 21:53:29.000000000 -0800
+++ linux-2.6.16/include/asm-ia64/bitops.h 2006-03-28 16:35:51.000000000 -0800
@@ -38,13 +38,14 @@ set_bit (int nr, volatile void *addr)
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
+ /* Volatile load = acq */
m = (volatile __u32 *) addr + (nr >> 5);
bit = 1 << (nr & 31);
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old | bit;
- } while (cmpxchg_acq(m, old, new) != old);
+ } while (cmpxchg_rel(m, old, new) != old);
}
/**
@@ -65,7 +66,7 @@ __set_bit (int nr, volatile void *addr)
/*
* clear_bit() has "acquire" semantics.
*/
-#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__before_clear_bit() do { /* skip */; } while (0)
#define smp_mb__after_clear_bit() do { /* skip */; } while (0)
/**
@@ -85,13 +86,14 @@ clear_bit (int nr, volatile void *addr)
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
+ /* Volatile load = acq */
m = (volatile __u32 *) addr + (nr >> 5);
mask = ~(1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old & mask;
- } while (cmpxchg_acq(m, old, new) != old);
+ } while (cmpxchg_rel(m, old, new) != old);
}
/**
@@ -121,13 +123,14 @@ change_bit (int nr, volatile void *addr)
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
+ /* Volatile load = acq */
m = (volatile __u32 *) addr + (nr >> 5);
bit = (1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old ^ bit;
- } while (cmpxchg_acq(m, old, new) != old);
+ } while (cmpxchg_rel(m, old, new) != old);
}
/**
@@ -160,13 +163,14 @@ test_and_set_bit (int nr, volatile void
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
+ /* Volatile load = acq */
m = (volatile __u32 *) addr + (nr >> 5);
bit = 1 << (nr & 31);
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old | bit;
- } while (cmpxchg_acq(m, old, new) != old);
+ } while (cmpxchg_rel(m, old, new) != old);
return (old & bit) != 0;
}
@@ -205,13 +209,14 @@ test_and_clear_bit (int nr, volatile voi
volatile __u32 *m;
CMPXCHG_BUGCHECK_DECL
+ /* Volatile load = acq */
m = (volatile __u32 *) addr + (nr >> 5);
mask = ~(1 << (nr & 31));
do {
CMPXCHG_BUGCHECK(m);
old = *m;
new = old & mask;
- } while (cmpxchg_acq(m, old, new) != old);
+ } while (cmpxchg_rel(m, old, new) != old);
return (old & ~mask) != 0;
}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]