This patch adds another architecture helper,
arch_mutex_fast_lock_retval(), to allows for mutex_lock_interruptible
to have an architecture defined fast path.
Signed-off-by: Nicolas Pitre <[email protected]>
---
Index: linux-2.6/include/asm-generic/mutex.h
===================================================================
--- linux-2.6.orig/include/asm-generic/mutex.h
+++ linux-2.6/include/asm-generic/mutex.h
@@ -26,6 +26,24 @@ do { \
} while (0)
/**
+ * arch_mutex_fast_lock_retval - lock mutex and call function if already locked
+ * @v: pointer of type atomic_t
+ * @contention_fn: function to call if v was already locked
+ *
+ * Atomically locks @v and calls a function if @v was already locked.
+ * When @v == 1 it is unlocked, <= 0 means locked.
+ *
+ * Returns 0 if no contention, otherwise returns whatever from @contention_fn.
+ */
+#define arch_mutex_fast_lock_retval(v, contention_fn) \
+({ \
+ int __retval = 0; \
+ if (unlikely(atomic_xchg(v, 0) != 1)) \
+ __retval = contention_fn(v); \
+ __retval; \
+})
+
+/**
* arch_mutex_fast_unlock - unlock and call function if contended
* @v: pointer of type atomic_t
* @contention_fn: function to call if v was contended
Index: linux-2.6/include/asm-i386/mutex.h
===================================================================
--- linux-2.6.orig/include/asm-i386/mutex.h
+++ linux-2.6/include/asm-i386/mutex.h
@@ -14,6 +14,14 @@
#define arch_mutex_fast_lock(v, contention_fn) \
atomic_dec_call_if_negative(v, contention_fn)
+#define arch_mutex_fast_lock_retval(v, contention_fn) \
+({ \
+ int __retval = 0; \
+ if (unlikely(atomic_dec_return(v) < 0)) \
+ __retval = contention_fnv); \
+ __retval; \
+})
+
#define arch_mutex_fast_unlock(v, contention_fn) \
atomic_inc_call_if_nonpositive(v, contention_fn)
Index: linux-2.6/kernel/mutex.c
===================================================================
--- linux-2.6.orig/kernel/mutex.c
+++ linux-2.6/kernel/mutex.c
@@ -336,11 +336,24 @@ static inline void __mutex_lock(struct m
__mutex_lock_atomic(lock);
}
+static __sched int FASTCALL(__mutex_lock_interruptible_noinline(atomic_t *lock_count));
+
+static inline int __mutex_lock_interruptible_atomic(struct mutex *lock)
+{
+ return arch_mutex_fast_lock_retval(&lock->count,
+ __mutex_lock_interruptible_noinline);
+}
+
+static fastcall __sched int __mutex_lock_interruptible_noinline(atomic_t *lock_count)
+{
+ struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+ return __mutex_lock_interruptible_nonatomic(lock);
+}
+
static inline int __mutex_lock_interruptible(struct mutex *lock)
{
- if (unlikely(atomic_dec_return(&lock->count) < 0))
- return __mutex_lock_interruptible_nonatomic(lock);
- return 0;
+ return __mutex_lock_interruptible_atomic(lock);
}
static void __sched FASTCALL(__mutex_unlock_noinline(atomic_t *lock_count));
Index: linux-2.6/include/asm-x86_64/mutex.h
===================================================================
--- linux-2.6.orig/include/asm-x86_64/mutex.h
+++ linux-2.6/include/asm-x86_64/mutex.h
@@ -14,6 +14,14 @@
#define arch_mutex_fast_lock(v, contention_fn) \
atomic_dec_call_if_negative(v, contention_fn)
+#define arch_mutex_fast_lock_retval(v, contention_fn) \
+({ \
+ int __retval = 0; \
+ if (unlikely(atomic_dec_return(v) < 0)) \
+ __retval = contention_fnv); \
+ __retval; \
+})
+
#define arch_mutex_fast_unlock(v, contention_fn) \
atomic_inc_call_if_nonpositive(v, contention_fn)
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]