[patch 2/2] mutex subsystem: use the per architecture fast path lock_unlock defines

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This switch the core over to the architecture defined fast path locking 
primitives.  It also adds __mutex_lock_interruptible_noinline and 
friends.

Index: linux-2.6/kernel/mutex.c
===================================================================
--- linux-2.6.orig/kernel/mutex.c
+++ linux-2.6/kernel/mutex.c
@@ -266,13 +266,8 @@ __mutex_lock_interruptible_nonatomic(str
  */
 int fastcall mutex_trylock(struct mutex *lock)
 {
-#ifdef __HAVE_ARCH_CMPXCHG
 	if (atomic_cmpxchg(&lock->count, 1, 0) == 1)
 		return 1;
-#else
-	if (atomic_dec_return(&lock->count) == 0)
-		return 1;
-#endif
 	return 0;
 }
 
@@ -316,13 +311,14 @@ static inline void __mutex_unlock_nonato
 	 * Waiters take care of themselves and stay in flight until
 	 * necessary.
 	 *
-	 * (in the xchg based implementation the fastpath has set the
+	 * (depending on the implementation the fastpath has set the
 	 *  count to 1 already, so we must not set it here, because we
 	 *  dont own the lock anymore. In the debug case we must set
 	 *  the lock inside the spinlock.)
 	 */
-#if !defined(CONFIG_MUTEX_XCHG_ALGORITHM) && !defined(CONFIG_DEBUG_MUTEXES)
-	atomic_set(&lock->count, 1);
+#if !defined(CONFIG_DEBUG_MUTEXES)
+	if (__mutex_slowpath_needs_unlock())
+		atomic_set(&lock->count, 1);
 #endif
 	spin_lock_mutex(&lock->wait_lock);
 #ifdef CONFIG_DEBUG_MUTEXES
@@ -349,21 +345,12 @@ static inline void __mutex_unlock_nonato
 static __sched void FASTCALL(__mutex_lock_noinline(atomic_t *lock_count));
 
 /*
- * Some architectures do not have fast dec_and_test atomic primitives,
- * for them we are providing an atomic_xchg() based mutex implementation,
- * if they enable CONFIG_MUTEX_XCHG_ALGORITHM.
- *
  * The locking fastpath is the 1->0 transition from 'unlocked' into
  * 'locked' state:
  */
 static inline void __mutex_lock_atomic(struct mutex *lock)
 {
-#ifdef CONFIG_MUTEX_XCHG_ALGORITHM
-	if (unlikely(atomic_xchg(&lock->count, 0) != 1))
-		__mutex_lock_noinline(&lock->count);
-#else
-	atomic_dec_call_if_negative(&lock->count, __mutex_lock_noinline);
-#endif
+	__mutex_fastpath_lock(&lock->count, __mutex_lock_noinline);
 }
 
 /*
@@ -383,16 +370,23 @@ static inline void __mutex_lock(struct m
 	__mutex_lock_atomic(lock);
 }
 
+static __sched int FASTCALL(__mutex_lock_interruptible_noinline(atomic_t *lock_count));
+
+static int __mutex_lock_interruptible_atomic(struct mutex *lock)
+{
+	__mutex_fastpath_lock_retval(&lock->count, __mutex_lock_interruptible_noinline);
+}
+
+static int fastcall __sched __mutex_lock_interruptible_noinline(atomic_t *lock_count)
+{
+	struct mutex *lock = container_of(lock_count, struct mutex, count);
+
+	return __mutex_lock_interruptible_nonatomic(lock);
+}
+
 static inline int __mutex_lock_interruptible(struct mutex *lock)
 {
-#ifdef CONFIG_MUTEX_XCHG_ALGORITHM
-	if (unlikely(atomic_xchg(&lock->count, 0) != 1))
-		return __mutex_lock_interruptible_nonatomic(lock);
-#else
-	if (unlikely(atomic_dec_return(&lock->count) < 0))
-		return __mutex_lock_interruptible_nonatomic(lock);
-#endif
-	return 0;
+	__mutex_lock_interruptible_atomic(lock);
 }
 
 static void __sched FASTCALL(__mutex_unlock_noinline(atomic_t *lock_count));
@@ -403,12 +397,7 @@ static void __sched FASTCALL(__mutex_unl
  */
 static inline void __mutex_unlock_atomic(struct mutex *lock)
 {
-#ifdef CONFIG_MUTEX_XCHG_ALGORITHM
-	if (unlikely(atomic_xchg(&lock->count, 1) != 0))
-		__mutex_unlock_noinline(&lock->count);
-#else
-	atomic_inc_call_if_nonpositive(&lock->count, __mutex_unlock_noinline);
-#endif
+	__mutex_fastpath_unlock(&lock->count, __mutex_unlock_noinline);
 }
 
 static void fastcall __sched __mutex_unlock_noinline(atomic_t *lock_count)
Index: linux-2.6/include/linux/mutex.h
===================================================================
--- linux-2.6.orig/include/linux/mutex.h
+++ linux-2.6/include/linux/mutex.h
@@ -12,6 +12,7 @@
  */
 #include <linux/list.h>
 #include <linux/spinlock_types.h>
+#include <asm/mutex.h>
 
 #include <asm/atomic.h>
 
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

[Index of Archives]     [Kernel Newbies]     [Netfilter]     [Bugtraq]     [Photo]     [Stuff]     [Gimp]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Video 4 Linux]     [Linux for the blind]     [Linux Resources]
  Powered by Linux