[PATCH 2/6] sched: make sched_slice() group scheduling savvy

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Currently the ideal slice length does not take group scheduling into account.
Change it so that it properly takes all the runnable tasks on this cpu into
account and caluclate the weight according to the grouping hierarchy.

Also fixes a bug in vslice which missed a factor NICE_0_LOAD.

Signed-off-by: Peter Zijlstra <[email protected]>
CC: Srivatsa Vaddagiri <[email protected]>
---
 kernel/sched_fair.c |   42 +++++++++++++++++++++++++++++++-----------
 1 file changed, 31 insertions(+), 11 deletions(-)

Index: linux-2.6/kernel/sched_fair.c
===================================================================
--- linux-2.6.orig/kernel/sched_fair.c
+++ linux-2.6/kernel/sched_fair.c
@@ -331,10 +331,15 @@ static u64 __sched_period(unsigned long 
  */
 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
-	u64 slice = __sched_period(cfs_rq->nr_running);
+	unsigned long nr_running = rq_of(cfs_rq)->nr_running;
+	u64 slice = __sched_period(nr_running);
 
-	slice *= se->load.weight;
-	do_div(slice, cfs_rq->load.weight);
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+
+		slice *= se->load.weight;
+		do_div(slice, cfs_rq->load.weight);
+	}
 
 	return slice;
 }
@@ -344,24 +349,39 @@ static u64 sched_slice(struct cfs_rq *cf
  *
  * vs = s/w = p/rw
  */
-static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
+static u64 __sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *new)
 {
-	u64 vslice = __sched_period(nr_running);
+	struct sched_entity *se = cfs_rq->curr;
+	unsigned long nr_running = rq_of(cfs_rq)->nr_running;
+	unsigned long weight = 0;
+	u64 vslice;
+
+	if (new) {
+		nr_running++;
+		weight = new->load.weight;
+	}
 
-	do_div(vslice, rq_weight);
+	vslice = __sched_period(nr_running);
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+
+		vslice *= NICE_0_LOAD;
+		do_div(vslice, cfs_rq->load.weight + weight);
+		weight = 0;
+	}
 
 	return vslice;
 }
 
-static u64 sched_vslice(struct cfs_rq *cfs_rq)
+static inline u64 sched_vslice(struct cfs_rq *cfs_rq)
 {
-	return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
+	return __sched_vslice(cfs_rq, NULL);
 }
 
-static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static inline u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *new)
 {
-	return __sched_vslice(cfs_rq->load.weight + se->load.weight,
-			cfs_rq->nr_running + 1);
+	return __sched_vslice(cfs_rq, new);
 }
 
 /*

--

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

[Index of Archives]     [Kernel Newbies]     [Netfilter]     [Bugtraq]     [Photo]     [Stuff]     [Gimp]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Video 4 Linux]     [Linux for the blind]     [Linux Resources]
  Powered by Linux