Modify the smp nice code to store load_weight on uniprocessor as well to
allow relative niceness on one cpu to be assessed. Minor cleanups and
uninline set_load_weight().
Signed-off-by: Con Kolivas <[email protected]>
---
include/linux/sched.h | 4 ++--
kernel/sched.c | 24 ++++++------------------
2 files changed, 8 insertions(+), 20 deletions(-)
Index: linux-2.6.16-rc6-mm1/include/linux/sched.h
===================================================================
--- linux-2.6.16-rc6-mm1.orig/include/linux/sched.h 2006-03-13 10:08:36.000000000 +1100
+++ linux-2.6.16-rc6-mm1/include/linux/sched.h 2006-03-13 18:29:37.000000000 +1100
@@ -546,9 +546,9 @@ enum idle_type
/*
* sched-domains (multiprocessor balancing) declarations:
*/
-#ifdef CONFIG_SMP
#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */
+#ifdef CONFIG_SMP
#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
#define SD_BALANCE_EXEC 4 /* Balance on exec */
@@ -704,8 +704,8 @@ struct task_struct {
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
int oncpu;
#endif
- int load_weight; /* for load balancing purposes */
#endif
+ int load_weight; /* for niceness load balancing purposes */
int prio, static_prio;
struct list_head run_list;
prio_array_t *array;
Index: linux-2.6.16-rc6-mm1/kernel/sched.c
===================================================================
--- linux-2.6.16-rc6-mm1.orig/kernel/sched.c 2006-03-13 10:08:36.000000000 +1100
+++ linux-2.6.16-rc6-mm1/kernel/sched.c 2006-03-13 18:29:37.000000000 +1100
@@ -170,12 +170,12 @@
*/
#define SCALE_PRIO(x, prio) \
- max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
+ max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
static unsigned int static_prio_timeslice(int static_prio)
{
if (static_prio < NICE_TO_PRIO(0))
- return SCALE_PRIO(DEF_TIMESLICE*4, static_prio);
+ return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
else
return SCALE_PRIO(DEF_TIMESLICE, static_prio);
}
@@ -217,8 +217,8 @@ struct runqueue {
* remote CPUs use both these fields when doing load calculation.
*/
unsigned long nr_running;
-#ifdef CONFIG_SMP
unsigned long raw_weighted_load;
+#ifdef CONFIG_SMP
unsigned long cpu_load[3];
#endif
unsigned long long nr_switches;
@@ -672,7 +672,6 @@ static int effective_prio(task_t *p)
return prio;
}
-#ifdef CONFIG_SMP
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
* of tasks with abnormal "nice" values across CPUs the contribution that
@@ -695,9 +694,10 @@ static int effective_prio(task_t *p)
#define RTPRIO_TO_LOAD_WEIGHT(rp) \
(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
-static inline void set_load_weight(task_t *p)
+static void set_load_weight(task_t *p)
{
if (rt_task(p)) {
+#ifdef CONFIG_SMP
if (p == task_rq(p)->migration_thread)
/*
* The migration thread does the actual balancing.
@@ -706,6 +706,7 @@ static inline void set_load_weight(task_
*/
p->load_weight = 0;
else
+#endif
p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
} else
p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
@@ -720,19 +721,6 @@ static inline void dec_raw_weighted_load
{
rq->raw_weighted_load -= p->load_weight;
}
-#else
-static inline void set_load_weight(task_t *p)
-{
-}
-
-static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
-{
-}
-
-static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
-{
-}
-#endif
static inline void inc_nr_running(task_t *p, runqueue_t *rq)
{
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]