[patch 3/5] mutex subsystem: move the core to the new atomic helpers

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch moves the core mutex code over to the atomic helpers from
previous patch.  There is no change for i386 and x86_64, except for the
forced unlock state that is now done outside the spinlock (with no
adverse effect).

At this point the patch adding asm-generic/atomic-call-if.h can probably 
be dropped from the original mutex patch serie.

Signed-off-by: Nicolas Pitre <[email protected]>

---

Index: linux-2.6/kernel/mutex.c
===================================================================
--- linux-2.6.orig/kernel/mutex.c
+++ linux-2.6/kernel/mutex.c
@@ -296,14 +296,6 @@ static inline void __mutex_unlock_nonato
 
 	debug_mutex_unlock(lock);
 
-	/*
-	 * Set it back to 'unlocked'. We'll have a waiter in flight
-	 * (if any), and if some other task comes around, let it
-	 * steal the lock. Waiters take care of themselves and stay
-	 * in flight until necessary.
-	 */
-	atomic_set(&lock->count, 1);
-
 	if (!list_empty(&lock->wait_list))
 		mutex_wakeup_waiter(lock __IP__);
 
@@ -329,7 +321,7 @@ static __sched void FASTCALL(__mutex_loc
  */
 static inline void __mutex_lock_atomic(struct mutex *lock)
 {
-	atomic_dec_call_if_negative(&lock->count, __mutex_lock_noinline);
+	arch_mutex_fast_lock(&lock->count, __mutex_lock_noinline);
 }
 
 static fastcall __sched void __mutex_lock_noinline(atomic_t *lock_count)
@@ -359,13 +351,19 @@ static void __sched FASTCALL(__mutex_unl
  */
 static inline void __mutex_unlock_atomic(struct mutex *lock)
 {
-	atomic_inc_call_if_nonpositive(&lock->count, __mutex_unlock_noinline);
+	arch_mutex_fast_unlock(&lock->count, __mutex_unlock_noinline);
 }
 
 static fastcall void __sched __mutex_unlock_noinline(atomic_t *lock_count)
 {
 	struct mutex *lock = container_of(lock_count, struct mutex, count);
 
+	/*
+	 * We were called via arch_mutex_fast_unlock() therefore
+	 * we need to call arch_mutex_unlock_fixup() which will set
+	 * the mutex to unlocked (but only if it wasn't done already).
+	 */
+	arch_mutex_unlock_fixup(lock_count);
 	__mutex_unlock_nonatomic(lock);
 }
 
@@ -383,6 +381,13 @@ static inline void __mutex_lock(struct m
 
 static inline void __mutex_unlock(struct mutex *lock __IP_DECL__)
 {
+	/*
+	 * Set it back to 'unlocked'. We'll have a waiter in flight
+	 * (if any), and if some other task comes around, let it
+	 * steal the lock. Waiters take care of themselves and stay
+	 * in flight until necessary.
+	 */
+	atomic_set(&lock->count, 1);
 	__mutex_unlock_nonatomic(lock __IP__);
 }
 
Index: linux-2.6/include/linux/mutex.h
===================================================================
--- linux-2.6.orig/include/linux/mutex.h
+++ linux-2.6/include/linux/mutex.h
@@ -14,6 +14,7 @@
 #include <asm/atomic.h>
 #include <linux/list.h>
 #include <linux/spinlock_types.h>
+#include <asm/mutex.h>
 
 /*
  * Simple, straightforward mutexes with strict semantics:


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

[Index of Archives]     [Kernel Newbies]     [Netfilter]     [Bugtraq]     [Photo]     [Stuff]     [Gimp]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Video 4 Linux]     [Linux for the blind]     [Linux Resources]
  Powered by Linux