[patch 3/3] mutex subsystem: move the core to the new atomic helpers

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch moves the core mutex code over to the atomic helpers from 
previous patch.  There is no change for i386 and x86_64, except for the 
forced unlock state that is now done outside the spinlock (doing so 
doesn't matter since another CPU could have locked the mutex right away 
even if it was unlocked inside the spinlock).  This however brings great 
improvements on ARM for example.

Signed-off-by: Nicolas Pitre <[email protected]>

---

Index: linux-2.6/kernel/mutex.c
===================================================================
--- linux-2.6.orig/kernel/mutex.c
+++ linux-2.6/kernel/mutex.c
@@ -296,14 +296,6 @@ static inline void __mutex_unlock_nonato
 
 	debug_mutex_unlock(lock);
 
-	/*
-	 * Set it back to 'unlocked'. We'll have a waiter in flight
-	 * (if any), and if some other task comes around, let it
-	 * steal the lock. Waiters take care of themselves and stay
-	 * in flight until necessary.
-	 */
-	atomic_set(&lock->count, 1);
-
 	if (!list_empty(&lock->wait_list))
 		mutex_wakeup_waiter(lock __IP__);
 
@@ -329,7 +321,7 @@ static __sched void FASTCALL(__mutex_loc
  */
 static inline void __mutex_lock_atomic(struct mutex *lock)
 {
-	atomic_dec_call_if_negative(&lock->count, __mutex_lock_noinline);
+	atomic_lock_call_if_contended(&lock->count, __mutex_lock_noinline);
 }
 
 static fastcall __sched void __mutex_lock_noinline(atomic_t *lock_count)
@@ -359,13 +351,19 @@ static void __sched FASTCALL(__mutex_unl
  */
 static inline void __mutex_unlock_atomic(struct mutex *lock)
 {
-	atomic_inc_call_if_nonpositive(&lock->count, __mutex_unlock_noinline);
+	atomic_unlock_call_if_contended(&lock->count, __mutex_unlock_noinline);
 }
 
 static fastcall void __sched __mutex_unlock_noinline(atomic_t *lock_count)
 {
 	struct mutex *lock = container_of(lock_count, struct mutex, count);
 
+	/*
+	 * We were called via atomic_unlock_call_if_contended() therefore
+	 * we need to call atomic_contended_unlock_fixup() which will set
+	 * it to unlocked (if it wasn't done already).
+	 */
+	atomic_contended_unlock_fixup(lock_count);
 	__mutex_unlock_nonatomic(lock);
 }
 
@@ -383,6 +381,13 @@ static inline void __mutex_lock(struct m
 
 static inline void __mutex_unlock(struct mutex *lock __IP_DECL__)
 {
+	/*
+	 * Set it back to 'unlocked'. We'll have a waiter in flight
+	 * (if any), and if some other task comes around, let it
+	 * steal the lock. Waiters take care of themselves and stay
+	 * in flight until necessary.
+	 */
+	atomic_set(&lock->count, 1);
 	__mutex_unlock_nonatomic(lock __IP__);
 }
 
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

[Index of Archives]     [Kernel Newbies]     [Netfilter]     [Bugtraq]     [Photo]     [Stuff]     [Gimp]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Video 4 Linux]     [Linux for the blind]     [Linux Resources]
  Powered by Linux