On Thu, 22 Jun 2006, Thomas Gleixner wrote:
On Thu, 2006-06-22 at 10:23 -0400, Steven Rostedt wrote:
What's nasty ?
The fact that sched_setscheduler can never be called by interrupt context.
So I don't know how you're going to handle the high_res dynamic priority
now.
Thats a seperate issue. Though you are right.
Why not use my original patch and solve both issues?
I have even updated it to avoid the double traversal. It also removes
one other traversal which shouldn't be needed. (I have not had time
to boot the kernel with it, though, but it does compile...:-)
Esben
tglx
Index: linux-2.6.17-rt1/kernel/rtmutex.c
===================================================================
--- linux-2.6.17-rt1.orig/kernel/rtmutex.c
+++ linux-2.6.17-rt1/kernel/rtmutex.c
@@ -539,7 +539,8 @@ static void wakeup_next_waiter(struct rt
* Must be called with lock->wait_lock held
*/
static void remove_waiter(struct rt_mutex *lock,
- struct rt_mutex_waiter *waiter __IP_DECL__)
+ struct rt_mutex_waiter *waiter,
+ int fix_prio __IP_DECL__)
{
int first = (waiter == rt_mutex_top_waiter(lock));
int boost = 0;
@@ -564,10 +565,12 @@ static void remove_waiter(struct rt_mute
next = rt_mutex_top_waiter(lock);
plist_add(&next->pi_list_entry, &owner->pi_waiters);
}
- __rt_mutex_adjust_prio(owner);
+ if (fix_prio) {
+ __rt_mutex_adjust_prio(owner);
+ }
if (owner->pi_blocked_on) {
- boost = 1;
+ boost = fix_prio;
get_task_struct(owner);
}
spin_unlock_irqrestore(&owner->pi_lock, flags);
@@ -625,6 +628,7 @@ rt_lock_slowlock(struct rt_mutex *lock _
debug_rt_mutex_init_waiter(&waiter);
waiter.task = NULL;
+ waiter.save_state = 1;
spin_lock(&lock->wait_lock);
@@ -687,6 +691,19 @@ rt_lock_slowlock(struct rt_mutex *lock _
state = xchg(¤t->state, TASK_UNINTERRUPTIBLE);
if (unlikely(state == TASK_RUNNING))
saved_state = TASK_RUNNING;
+
+ if (unlikely(waiter.task) &&
+ waiter.list_entry.prio != current->prio) {
+ /*
+ * We still not have the lock, but we are woken up with
+ * a different prio than the one we waited with
+ * originally. We remove the wait entry now and then
+ * reinsert ourselves with the right priority
+ */
+ remove_waiter(lock, &waiter, 0 __IP__);
+ waiter.task = NULL;
+ }
+
}
state = xchg(¤t->state, saved_state);
@@ -700,7 +717,7 @@ rt_lock_slowlock(struct rt_mutex *lock _
* can end up with a non-NULL waiter.task:
*/
if (unlikely(waiter.task))
- remove_waiter(lock, &waiter __IP__);
+ remove_waiter(lock, &waiter, 0 __IP__);
/*
* try_to_take_rt_mutex() sets the waiter bit
* unconditionally. We might have to fix that up:
@@ -798,6 +815,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
debug_rt_mutex_init_waiter(&waiter);
waiter.task = NULL;
+ waiter.save_state = 0;
spin_lock(&lock->wait_lock);
@@ -877,12 +895,24 @@ rt_mutex_slowlock(struct rt_mutex *lock,
current->flags |= saved_flags;
set_current_state(state);
+
+ if (unlikely(waiter.task) &&
+ waiter.list_entry.prio != current->prio) {
+ /*
+ * We still not have the lock, but we are woken up with
+ * a different prio than the one we waited with
+ * originally. We remove the wait entry now and then
+ * reinsert ourselves with the right priority
+ */
+ remove_waiter(lock, &waiter, 0 __IP__);
+ waiter.task = NULL;
+ }
}
set_current_state(TASK_RUNNING);
if (unlikely(waiter.task))
- remove_waiter(lock, &waiter __IP__);
+ remove_waiter(lock, &waiter, 1 __IP__);
/*
* try_to_take_rt_mutex() sets the waiter bit
Index: linux-2.6.17-rt1/kernel/sched.c
===================================================================
--- linux-2.6.17-rt1.orig/kernel/sched.c
+++ linux-2.6.17-rt1/kernel/sched.c
@@ -57,6 +57,8 @@
#include <asm/unistd.h>
+#include "rtmutex_common.h"
+
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
@@ -646,7 +648,9 @@ static inline void sched_info_switch(tas
#define sched_info_switch(t, next) do { } while (0)
#endif /* CONFIG_SCHEDSTATS */
+#ifdef CONFIG_SMP
static __cacheline_aligned_in_smp atomic_t rt_overload;
+#endif
static inline void inc_rt_tasks(task_t *p, runqueue_t *rq)
{
@@ -1473,10 +1477,11 @@ static inline int wake_idle(int cpu, tas
static int try_to_wake_up(task_t *p, unsigned int state, int sync, int mutex)
{
int cpu, this_cpu, success = 0;
- runqueue_t *this_rq, *rq;
+ runqueue_t *rq;
unsigned long flags;
long old_state;
#ifdef CONFIG_SMP
+ runqueue_t *this_rq;
unsigned long load, this_load;
struct sched_domain *sd, *this_sd = NULL;
int new_cpu;
@@ -4351,6 +4356,18 @@ int setscheduler(struct task_struct *p,
resched_task(rq->curr);
}
task_rq_unlock(rq, &flags);
+
+ /*
+ * If the process is blocked on rt-mutex, it will now wake up and
+ * reinsert itself into the wait list and boost the owner correctly
+ */
+ if (p->pi_blocked_on) {
+ if (p->pi_blocked_on->save_state)
+ wake_up_process_mutex(p);
+ else
+ wake_up_process(p);
+ }
+
spin_unlock_irqrestore(&p->pi_lock, fp);
return 0;
}
@@ -7086,4 +7103,3 @@ void notrace preempt_enable_no_resched(v
EXPORT_SYMBOL(preempt_enable_no_resched);
#endif
-
Index: linux-2.6.17-rt1/kernel/rtmutex_common.h
===================================================================
--- linux-2.6.17-rt1.orig/kernel/rtmutex_common.h
+++ linux-2.6.17-rt1/kernel/rtmutex_common.h
@@ -49,6 +49,7 @@ struct rt_mutex_waiter {
struct plist_node pi_list_entry;
struct task_struct *task;
struct rt_mutex *lock;
+ int save_state;
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
pid_t deadlock_task_pid;
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]