rpms/kernel/FC-6 linux-2.6-sched-cfs-updates.patch, NONE, 1.1 kernel-2.6.spec, 1.3018, 1.3019 linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch, 1.1, 1.2 linux-2.6-utrace-core.patch, 1.5, 1.6 linux-2.6-utrace-ptrace-compat.patch, 1.6, 1.7

fedora-cvs-commits at redhat.com fedora-cvs-commits at redhat.com
Wed Sep 12 21:44:49 UTC 2007


Author: cebbert

Update of /cvs/dist/rpms/kernel/FC-6
In directory cvs.devel.redhat.com:/tmp/cvs-serv32712

Modified Files:
	kernel-2.6.spec linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch 
	linux-2.6-utrace-core.patch 
	linux-2.6-utrace-ptrace-compat.patch 
Added Files:
	linux-2.6-sched-cfs-updates.patch 
Log Message:
* Wed Sep 05 2007 Chuck Ebbert <cebbert at redhat.com>
- CFS scheduler updates
- utrace update (#248532, #267161, #284311)


linux-2.6-sched-cfs-updates.patch:
 b/kernel/sched.c       |    8 ++----
 b/kernel/sched_debug.c |    1 
 b/kernel/sched_fair.c  |    2 -
 kernel/sched.c         |    1 
 kernel/sched_fair.c    |   61 +++++++++++++++++++++++++------------------------
 5 files changed, 37 insertions(+), 36 deletions(-)

--- NEW FILE linux-2.6-sched-cfs-updates.patch ---
Gitweb:     http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=a0dc72601d48b171b4870dfdd0824901a2b2b1a9
Commit:     a0dc72601d48b171b4870dfdd0824901a2b2b1a9
Parent:     7fd0d2dde929ead79901e389e70dbfb3c6c06986
Author:     Ingo Molnar <mingo at elte.hu>
AuthorDate: Wed Sep 5 14:32:49 2007 +0200
Committer:  Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:49 2007 +0200

    sched: fix niced_granularity() shift
    
    fix niced_granularity(). This resulted in under-scheduling for
    CPU-bound negative nice level tasks (and this in turn caused
    higher than necessary latencies in nice-0 tasks).
    
    Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
 kernel/sched_fair.c |    2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ce39282..810b52d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -291,7 +291,7 @@ niced_granularity(struct sched_entity *curr, unsigned long granularity)
 	/*
 	 * It will always fit into 'long':
 	 */
-	return (long) (tmp >> WMULT_SHIFT);
+	return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT));
 }
 
 static inline void
Gitweb:     http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=7fd0d2dde929ead79901e389e70dbfb3c6c06986
Commit:     7fd0d2dde929ead79901e389e70dbfb3c6c06986
Parent:     b21010ed6498391c0f359f2a89c907533fe07fec
Author:     Suresh Siddha <suresh.b.siddha at intel.com>
AuthorDate: Wed Sep 5 14:32:48 2007 +0200
Committer:  Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:48 2007 +0200

    sched: fix MC/HT scheduler optimization, without breaking the FUZZ logic.
    
    First fix the check
    	if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task)
    with this
    	if (*imbalance < busiest_load_per_task)
    
    As the current check is always false for nice 0 tasks (as
    SCHED_LOAD_SCALE_FUZZ is same as busiest_load_per_task for nice 0
    tasks).
    
    With the above change, imbalance was getting reset to 0 in the corner
    case condition, making the FUZZ logic fail. Fix it by not corrupting the
    imbalance and change the imbalance, only when it finds that the HT/MC
    optimization is needed.
    
    Signed-off-by: Suresh Siddha <suresh.b.siddha at intel.com>
    Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
 kernel/sched.c |    8 +++-----
 1 files changed, 3 insertions(+), 5 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index b533d6d..c8759ec 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2512,7 +2512,7 @@ group_next:
 	 * a think about bumping its value to force at least one task to be
 	 * moved
 	 */
-	if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) {
+	if (*imbalance < busiest_load_per_task) {
 		unsigned long tmp, pwr_now, pwr_move;
 		unsigned int imbn;
 
@@ -2564,10 +2564,8 @@ small_imbalance:
 		pwr_move /= SCHED_LOAD_SCALE;
 
 		/* Move if we gain throughput */
-		if (pwr_move <= pwr_now)
-			goto out_balanced;
-
-		*imbalance = busiest_load_per_task;
+		if (pwr_move > pwr_now)
+			*imbalance = busiest_load_per_task;
 	}
 
 	return busiest;
Gitweb:     http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=a206c07213cf6372289f189c3774c4c3255a7ae1
Commit:     a206c07213cf6372289f189c3774c4c3255a7ae1
Parent:     a0dc72601d48b171b4870dfdd0824901a2b2b1a9
Author:     Ingo Molnar <mingo at elte.hu>
AuthorDate: Wed Sep 5 14:32:49 2007 +0200
Committer:  Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:49 2007 +0200

    sched: debug: fix cfs_rq->wait_runtime accounting
    
    the cfs_rq->wait_runtime debug/statistics counter was not maintained
    properly - fix this.
    
    this also removes some code:
    
       text    data     bss     dec     hex filename
      13420     228    1204   14852    3a04 sched.o.before
      13404     228    1204   14836    39f4 sched.o.after
    
    Signed-off-by: Ingo Molnar <mingo at elte.hu>
    Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
---
 kernel/sched.c      |    1 -
 kernel/sched_fair.c |   10 +++++-----
 2 files changed, 5 insertions(+), 6 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index c8759ec..97986f1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -858,7 +858,6 @@ static void dec_nr_running(struct task_struct *p, struct rq *rq)
 
 static void set_load_weight(struct task_struct *p)
 {
-	task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime;
 	p->se.wait_runtime = 0;
 
 	if (task_has_rt_policy(p)) {
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 810b52d..bac2aff 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -194,6 +194,8 @@ __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 	update_load_add(&cfs_rq->load, se->load.weight);
 	cfs_rq->nr_running++;
 	se->on_rq = 1;
+
+	schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
 }
 
 static inline void
@@ -205,6 +207,8 @@ __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 	update_load_sub(&cfs_rq->load, se->load.weight);
 	cfs_rq->nr_running--;
 	se->on_rq = 0;
+
+	schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
 }
 
 static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
@@ -574,7 +578,6 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 	prev_runtime = se->wait_runtime;
 	__add_wait_runtime(cfs_rq, se, delta_fair);
-	schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
 	delta_fair = se->wait_runtime - prev_runtime;
 
 	/*
@@ -662,7 +665,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 			if (tsk->state & TASK_UNINTERRUPTIBLE)
 				se->block_start = rq_of(cfs_rq)->clock;
 		}
-		cfs_rq->wait_runtime -= se->wait_runtime;
 #endif
 	}
 	__dequeue_entity(cfs_rq, se);
@@ -1121,10 +1123,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
 	 * The statistical average of wait_runtime is about
 	 * -granularity/2, so initialize the task with that:
 	 */
-	if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) {
+	if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
 		se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
-		schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
-	}
 
 	__enqueue_entity(cfs_rq, se);
 }
Gitweb:     http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=2491b2b89d4646e02ab51c90ab7012d124924ddc
Commit:     2491b2b89d4646e02ab51c90ab7012d124924ddc
Parent:     a206c07213cf6372289f189c3774c4c3255a7ae1
Author:     Ingo Molnar <mingo at elte.hu>
AuthorDate: Wed Sep 5 14:32:49 2007 +0200
Committer:  Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:49 2007 +0200

    sched: debug: fix sum_exec_runtime clearing
    
    when cleaning sched-stats also clear prev_sum_exec_runtime.
    
    Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
 kernel/sched_debug.c |    1 +
 1 files changed, 1 insertions(+), 0 deletions(-)

diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index ab18f45..c3ee38b 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -283,4 +283,5 @@ void proc_sched_set_task(struct task_struct *p)
 	p->se.wait_runtime_overruns = p->se.wait_runtime_underruns = 0;
 #endif
 	p->se.sum_exec_runtime = 0;
+	p->se.prev_sum_exec_runtime	= 0;
 }
Gitweb:     http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=7c92e54f6f9601cfa9d8894ee248abcf62ed9a1c
Commit:     7c92e54f6f9601cfa9d8894ee248abcf62ed9a1c
Parent:     cf2ab4696ee42f895eed88c2b6e432fe03dda0db
Author:     Peter Zijlstra <a.p.zijlstra at chello.nl>
AuthorDate: Wed Sep 5 14:32:49 2007 +0200
Committer:  Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:49 2007 +0200

    sched: simplify __check_preempt_curr_fair()
    
    Preparatory patch for fix-ideal-runtime:
    
    simplify __check_preempt_curr_fair(): get rid of the integer return.
    
       text    data     bss     dec     hex filename
      13404     228    1204   14836    39f4 sched.o.before
      13393     228    1204   14825    39e9 sched.o.after
    
    functionality is unchanged.
    
    Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
    Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
 kernel/sched_fair.c |    8 +++-----
 1 files changed, 3 insertions(+), 5 deletions(-)

diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index bac2aff..f0dd4be 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -673,7 +673,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 /*
  * Preempt the current task with a newly woken task if needed:
  */
-static int
+static void
 __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
 			  struct sched_entity *curr, unsigned long granularity)
 {
@@ -686,9 +686,8 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
 	 */
 	if (__delta > niced_granularity(curr, granularity)) {
 		resched_task(rq_of(cfs_rq)->curr);
-		return 1;
+		curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
 	}
-	return 0;
 }
 
 static inline void
@@ -764,8 +763,7 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 	if (delta_exec > ideal_runtime)
 		gran = 0;
 
-	if (__check_preempt_curr_fair(cfs_rq, next, curr, gran))
-		curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
+	__check_preempt_curr_fair(cfs_rq, next, curr, gran);
 }
 
 /**************************************************
Gitweb:     http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=4a55b45036a677fac43fe81ddf7fdcd007aaaee7
Commit:     4a55b45036a677fac43fe81ddf7fdcd007aaaee7
Parent:     7c92e54f6f9601cfa9d8894ee248abcf62ed9a1c
Author:     Peter Zijlstra <a.p.zijlstra at chello.nl>
AuthorDate: Wed Sep 5 14:32:49 2007 +0200
Committer:  Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:49 2007 +0200

    sched: improve prev_sum_exec_runtime setting
    
    Second preparatory patch for fix-ideal runtime:
    
    Mark prev_sum_exec_runtime at the beginning of our run, the same spot
    that adds our wait period to wait_runtime. This seems a more natural
    location to do this, and it also reduces the code a bit:
    
       text    data     bss     dec     hex filename
      13397     228    1204   14829    39ed sched.o.before
      13391     228    1204   14823    39e7 sched.o.after
    
    Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
    Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
 kernel/sched_fair.c |    5 ++---
 1 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f0dd4be..2d01bbc 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -684,10 +684,8 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
 	 * preempt the current task unless the best task has
 	 * a larger than sched_granularity fairness advantage:
 	 */
-	if (__delta > niced_granularity(curr, granularity)) {
+	if (__delta > niced_granularity(curr, granularity))
 		resched_task(rq_of(cfs_rq)->curr);
-		curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
-	}
 }
 
 static inline void
@@ -703,6 +701,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 	update_stats_wait_end(cfs_rq, se);
 	update_stats_curr_start(cfs_rq, se);
 	set_cfs_rq_curr(cfs_rq, se);
+	se->prev_sum_exec_runtime = se->sum_exec_runtime;
 }
 
 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Gitweb:     http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=1169783085adb9ac969d21103a6885e8435f7ed3
Commit:     1169783085adb9ac969d21103a6885e8435f7ed3
Parent:     4a55b45036a677fac43fe81ddf7fdcd007aaaee7
Author:     Peter Zijlstra <a.p.zijlstra at chello.nl>
AuthorDate: Wed Sep 5 14:32:49 2007 +0200
Committer:  Ingo Molnar <mingo at elte.hu>
CommitDate: Wed Sep 5 14:32:49 2007 +0200

    sched: fix ideal_runtime calculations for reniced tasks
    
    fix ideal_runtime:
    
      - do not scale it using niced_granularity()
        it is against sum_exec_delta, so its wall-time, not fair-time.
    
      - move the whole check into __check_preempt_curr_fair()
        so that wakeup preemption can also benefit from the new logic.
    
    this also results in code size reduction:
    
       text    data     bss     dec     hex filename
      13391     228    1204   14823    39e7 sched.o.before
      13369     228    1204   14801    39d1 sched.o.after
    
    Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
    Signed-off-by: Ingo Molnar <mingo at elte.hu>
---
 kernel/sched_fair.c |   38 ++++++++++++++++++++++----------------
 1 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 2d01bbc..892616b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -678,11 +678,31 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
 			  struct sched_entity *curr, unsigned long granularity)
 {
 	s64 __delta = curr->fair_key - se->fair_key;
+	unsigned long ideal_runtime, delta_exec;
+
+	/*
+	 * ideal_runtime is compared against sum_exec_runtime, which is
+	 * walltime, hence do not scale.
+	 */
+	ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
+			(unsigned long)sysctl_sched_min_granularity);
+
+	/*
+	 * If we executed more than what the latency constraint suggests,
+	 * reduce the rescheduling granularity. This way the total latency
+	 * of how much a task is not scheduled converges to
+	 * sysctl_sched_latency:
+	 */
+	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+	if (delta_exec > ideal_runtime)
+		granularity = 0;
 
 	/*
 	 * Take scheduling granularity into account - do not
 	 * preempt the current task unless the best task has
 	 * a larger than sched_granularity fairness advantage:
+	 *
+	 * scale granularity as key space is in fair_clock.
 	 */
 	if (__delta > niced_granularity(curr, granularity))
 		resched_task(rq_of(cfs_rq)->curr);
@@ -731,7 +751,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
 
 static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 {
-	unsigned long gran, ideal_runtime, delta_exec;
 	struct sched_entity *next;
 
 	/*
@@ -748,21 +767,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 	if (next == curr)
 		return;
 
-	gran = sched_granularity(cfs_rq);
-	ideal_runtime = niced_granularity(curr,
-		max(sysctl_sched_latency / cfs_rq->nr_running,
-		    (unsigned long)sysctl_sched_min_granularity));
-	/*
-	 * If we executed more than what the latency constraint suggests,
-	 * reduce the rescheduling granularity. This way the total latency
-	 * of how much a task is not scheduled converges to
-	 * sysctl_sched_latency:
-	 */
-	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
-	if (delta_exec > ideal_runtime)
-		gran = 0;
-
-	__check_preempt_curr_fair(cfs_rq, next, curr, gran);
+	__check_preempt_curr_fair(cfs_rq, next, curr,
+			sched_granularity(cfs_rq));
 }
 
 /**************************************************


Index: kernel-2.6.spec
===================================================================
RCS file: /cvs/dist/rpms/kernel/FC-6/kernel-2.6.spec,v
retrieving revision 1.3018
retrieving revision 1.3019
diff -u -r1.3018 -r1.3019
--- kernel-2.6.spec	5 Sep 2007 16:18:08 -0000	1.3018
+++ kernel-2.6.spec	12 Sep 2007 21:44:47 -0000	1.3019
@@ -612,7 +612,8 @@
 Patch800: linux-2.6-wakeups-hdaps.patch
 Patch801: linux-2.6-wakeups.patch
 Patch900: linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch
-Patch901: linux-2.6-timekeeping-fixes.patch
+Patch901: linux-2.6-sched-cfs-updates.patch
+Patch902: linux-2.6-timekeeping-fixes.patch
 Patch1000: linux-2.6-dmi-based-module-autoloading.patch
 Patch1030: linux-2.6-nfs-nosharecache.patch
 Patch1400: linux-2.6-pcspkr-use-the-global-pit-lock.patch
@@ -1062,6 +1063,8 @@
 
 # Ingo's new scheduler.
 ApplyPatch linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch
+# CFS updates
+ApplyPatch linux-2.6-sched-cfs-updates.patch
 # timekeeping fixes that were in the Fedora CFS patch
 ApplyPatch linux-2.6-timekeeping-fixes.patch
 
@@ -2240,6 +2243,10 @@
 
 %changelog
 * Wed Sep 05 2007 Chuck Ebbert <cebbert at redhat.com>
+- CFS scheduler updates
+- utrace update (#248532, #267161, #284311)
+
+* Wed Sep 05 2007 Chuck Ebbert <cebbert at redhat.com>
 - Update utrace
 
 * Tue Aug 28 2007 Chuck Ebbert <cebbert at redhat.com>

linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch:
 Documentation/kernel-parameters.txt |   43 
 Documentation/sched-design-CFS.txt  |  119 +
 arch/i386/kernel/smpboot.c          |   12 
 arch/i386/kernel/tsc.c              |   14 
 arch/ia64/kernel/setup.c            |    6 
 arch/mips/kernel/smp.c              |   11 
 arch/sparc/kernel/smp.c             |   10 
 arch/sparc64/kernel/smp.c           |   27 
 block/cfq-iosched.c                 |    3 
 drivers/acpi/processor_idle.c       |   32 
 fs/proc/array.c                     |   59 
 fs/proc/base.c                      |   71 
 include/asm-generic/bitops/sched.h  |   21 
 include/linux/cpu.h                 |    2 
 include/linux/hardirq.h             |   13 
 include/linux/sched.h               |  290 ++
 include/linux/topology.h            |   15 
 init/main.c                         |    5 
 kernel/delayacct.c                  |   10 
 kernel/exit.c                       |    5 
 kernel/fork.c                       |    4 
 kernel/posix-cpu-timers.c           |   34 
 kernel/sched.c                      | 3619 +++++++++++++++---------------------
 kernel/sched_debug.c                |  286 ++
 kernel/sched_fair.c                 | 1179 +++++++++++
 kernel/sched_idletask.c             |   71 
 kernel/sched_rt.c                   |  234 ++
 kernel/sched_stats.h                |  237 ++
 kernel/softirq.c                    |    1 
 kernel/sysctl.c                     |   87 
 lib/Kconfig.debug                   |    9 
 31 files changed, 4233 insertions(+), 2296 deletions(-)

Index: linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch
===================================================================
RCS file: /cvs/dist/rpms/kernel/FC-6/linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch	30 Aug 2007 17:19:57 -0000	1.1
+++ linux-2.6-sched-cfs-v2.6.22.5-v20.5.patch	12 Sep 2007 21:44:47 -0000	1.2
@@ -8910,48 +8910,3 @@
  config SCHEDSTATS
  	bool "Collect scheduler statistics"
  	depends on DEBUG_KERNEL && PROC_FS
-Try to fix MC/HT scheduler optimization breakage again, with out breaking
-the FUZZ logic.
-
-First fix the check
-	if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task)
-with this
-	if (*imbalance < busiest_load_per_task)
-
-As the current check is always false for nice 0 tasks (as SCHED_LOAD_SCALE_FUZZ
-is same as busiest_load_per_task for nice 0 tasks).
-
-With the above change, imbalance was getting reset to 0 in the corner case
-condition, making the FUZZ logic fail. Fix it by not corrupting the
-imbalance and change the imbalance, only when it finds that the
-HT/MC optimization is needed.
-
-Signed-off-by: Suresh Siddha <suresh.b.siddha at intel.com>
----
-
-diff --git a/kernel/sched.c b/kernel/sched.c
-index 9fe473a..03e5e8d 100644
---- a/kernel/sched.c
-+++ b/kernel/sched.c
-@@ -2511,7 +2511,7 @@ group_next:
- 	 * a think about bumping its value to force at least one task to be
- 	 * moved
- 	 */
--	if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) {
-+	if (*imbalance < busiest_load_per_task) {
- 		unsigned long tmp, pwr_now, pwr_move;
- 		unsigned int imbn;
- 
-@@ -2563,10 +2563,8 @@ small_imbalance:
- 		pwr_move /= SCHED_LOAD_SCALE;
- 
- 		/* Move if we gain throughput */
--		if (pwr_move <= pwr_now)
--			goto out_balanced;
--
--		*imbalance = busiest_load_per_task;
-+		if (pwr_move > pwr_now)
-+			*imbalance = busiest_load_per_task;
- 	}
- 
- 	return busiest;

linux-2.6-utrace-core.patch:
 Documentation/DocBook/Makefile    |    2 
 Documentation/DocBook/utrace.tmpl |   23 
 Documentation/utrace.txt          |  579 +++++++++
 include/linux/sched.h             |    5 
 include/linux/tracehook.h         |   85 +
 include/linux/utrace.h            |  544 ++++++++
 init/Kconfig                      |   18 
 kernel/Makefile                   |    1 
 kernel/utrace.c                   | 2359 ++++++++++++++++++++++++++++++++++++++
 9 files changed, 3598 insertions(+), 18 deletions(-)

Index: linux-2.6-utrace-core.patch
===================================================================
RCS file: /cvs/dist/rpms/kernel/FC-6/linux-2.6-utrace-core.patch,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -r1.5 -r1.6
--- linux-2.6-utrace-core.patch	5 Sep 2007 16:18:09 -0000	1.5
+++ linux-2.6-utrace-core.patch	12 Sep 2007 21:44:47 -0000	1.6
@@ -27,8 +27,8 @@
  include/linux/utrace.h            |  544 ++++++++
  init/Kconfig                      |   18 
  kernel/Makefile                   |    1 
- kernel/utrace.c                   | 2344 ++++++++++++++++++++++++++++++++++++++
- 9 files changed, 3583 insertions(+), 18 deletions(-)
+ kernel/utrace.c                   | 2359 ++++++++++++++++++++++++++++++++++++++
+ 9 files changed, 3598 insertions(+), 18 deletions(-)
  create kernel/utrace.c
  create Documentation/utrace.txt
  create Documentation/DocBook/utrace.tmpl
@@ -50,7 +50,7 @@
 ===================================================================
 --- /dev/null
 +++ b/kernel/utrace.c
-@@ -0,0 +1,2344 @@
+@@ -0,0 +1,2359 @@
 +/*
 + * utrace infrastructure interface for debugging user processes
 + *
@@ -2008,13 +2008,28 @@
 +	 */
 +	if (signal.signr != 0) {
 +		if (signal.return_ka == NULL) {
-+			ka = &tsk->sighand->action[signal.signr - 1];
++			/*
++			 * utrace_inject_signal recorded this to have us
++			 * use the injected signal's normal sigaction.  We
++			 * have to perform the SA_ONESHOT work now because
++			 * our caller will never touch the real sigaction.
++			 */
++			ka = &tsk->sighand->action[info->si_signo - 1];
++			*return_ka = *ka;
 +			if (ka->sa.sa_flags & SA_ONESHOT)
 +				ka->sa.sa_handler = SIG_DFL;
-+			*return_ka = *ka;
 +		}
 +		else
 +			BUG_ON(signal.return_ka != return_ka);
++
++		/*
++		 * We already processed the SA_ONESHOT work ahead of time.
++		 * Once we return nonzero, our caller will only refer to
++		 * return_ka.  So we must clear the flag to be sure it
++		 * doesn't clear return_ka->sa.sa_handler.
++		 */
++		return_ka->sa.sa_flags &= ~SA_ONESHOT;
++
 +		return signal.signr;
 +	}
 +

linux-2.6-utrace-ptrace-compat.patch:
 arch/i386/kernel/ptrace.c       |   40 
 arch/powerpc/kernel/ptrace.c    |  250 ++++
 arch/powerpc/kernel/signal_32.c |   52 +
 arch/powerpc/lib/sstep.c        |    3 
 arch/x86_64/ia32/ia32entry.S    |    2 
 arch/x86_64/ia32/ptrace32.c     |   56 -
 arch/x86_64/kernel/ptrace.c     |   46 
 fs/proc/base.c                  |   40 
 include/asm-x86_64/ptrace-abi.h |    3 
 include/asm-x86_64/tracehook.h  |    1 
 include/linux/ptrace.h          |  221 +++-
 include/linux/sched.h           |    4 
 init/Kconfig                    |   15 
 kernel/Makefile                 |    3 
 kernel/exit.c                   |   13 
 kernel/fork.c                   |    2 
 kernel/ptrace.c                 | 2053 +++++++++++++++++++++++++++++++++++++---
 kernel/sys_ni.c                 |    4 
 18 files changed, 2634 insertions(+), 174 deletions(-)

Index: linux-2.6-utrace-ptrace-compat.patch
===================================================================
RCS file: /cvs/dist/rpms/kernel/FC-6/linux-2.6-utrace-ptrace-compat.patch,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -r1.6 -r1.7
--- linux-2.6-utrace-ptrace-compat.patch	5 Sep 2007 16:18:09 -0000	1.6
+++ linux-2.6-utrace-ptrace-compat.patch	12 Sep 2007 21:44:47 -0000	1.7
@@ -28,9 +28,9 @@
  kernel/Makefile                 |    3 
  kernel/exit.c                   |   13 
  kernel/fork.c                   |    2 
- kernel/ptrace.c                 | 2052 +++++++++++++++++++++++++++++++++++++---
+ kernel/ptrace.c                 | 2053 +++++++++++++++++++++++++++++++++++++---
  kernel/sys_ni.c                 |    4 
- 18 files changed, 2633 insertions(+), 174 deletions(-)
+ 18 files changed, 2634 insertions(+), 174 deletions(-)
 
 Index: b/fs/proc/base.c
 ===================================================================
@@ -683,7 +683,7 @@
 ===================================================================
 --- a/kernel/ptrace.c
 +++ b/kernel/ptrace.c
-@@ -19,194 +19,2008 @@
+@@ -19,194 +19,2009 @@
  #include <linux/security.h>
  #include <linux/signal.h>
  #include <linux/syscalls.h>
@@ -1131,9 +1131,7 @@
 +			 struct ptrace_state *state)
  {
 -	if (!valid_signal(data))
--		return -EIO;
- 
--	return -ENOSYS;
++
 +	int error;
 +
 +	NO_LOCKS;
@@ -1191,29 +1189,20 @@
 +		spin_unlock_irq(&current->sighand->siglock);
 +	}
 +	return error;
- }
- 
--int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
++}
++
 +
 +/*
 + * This is called when we are exiting.  We must stop all our ptracing.
 + */
 +void
 +ptrace_exit(struct task_struct *tsk)
- {
--	int copied = 0;
++{
 +	struct list_head *pos, *n;
 +	int restart;
- 
--	while (len > 0) {
--		char buf[128];
--		int this_len, retval;
++
 +	NO_LOCKS;
- 
--		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
--		retval = access_process_vm(tsk, src, buf, this_len, 0);
--		if (!retval) {
--			if (copied)
++
 +	/*
 +	 * Taking the task_lock after PF_EXITING is set ensures that a
 +	 * child in ptrace_traceme will not put itself on our list when
@@ -1226,7 +1215,6 @@
 +	}
 +	task_unlock(tsk);
 +
-+	restart = 0;
 +	do {
 +		struct ptrace_state *state;
 +		int error;
@@ -1235,6 +1223,7 @@
 +
 +		rcu_read_lock();
 +
++		restart = 0;
 +		list_for_each_safe_rcu(pos, n, &tsk->ptracees) {
 +			state = list_entry(pos, struct ptrace_state, entry);
 +			error = utrace_detach(state->task, state->engine);
@@ -1256,7 +1245,7 @@
 +				wait_task_inactive(p);
 +				put_task_struct(p);
 +				restart = 1;
- 				break;
++				goto loop_unlocked;
 +			}
 +			else {
 +				BUG_ON(error != -ESRCH);
@@ -1266,10 +1255,11 @@
 +
 +		rcu_read_unlock();
 +
++	loop_unlocked:
 +		END_CHECK;
 +
 +		cond_resched();
-+	} while (restart > 0);
++	} while (unlikely(restart > 0));
 +
 +	if (likely(restart == 0))
 +		/*
@@ -1291,8 +1281,9 @@
 +		return 0;
 +
 +	if (!valid_signal(signr))
-+		return -EIO;
-+
+ 		return -EIO;
+ 
+-	return -ENOSYS;
 +	if (state->syscall) {
 +		/*
 +		 * This is the traditional ptrace behavior when given
@@ -1320,22 +1311,32 @@
 +	}
 +
 +	return 0;
-+}
-+
+ }
+ 
+-int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
 +int
 +ptrace_regset_access(struct task_struct *target,
 +		     struct utrace_attached_engine *engine,
 +		     const struct utrace_regset_view *view,
 +		     int setno, unsigned long offset, unsigned int size,
 +		     void __user *data, int write)
-+{
+ {
+-	int copied = 0;
 +	const struct utrace_regset *regset = utrace_regset(target, engine,
 +							   view, setno);
 +	int ret;
-+
+ 
+-	while (len > 0) {
+-		char buf[128];
+-		int this_len, retval;
 +	if (unlikely(regset == NULL))
 +		return -EIO;
-+
+ 
+-		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
+-		retval = access_process_vm(tsk, src, buf, this_len, 0);
+-		if (!retval) {
+-			if (copied)
+-				break;
 +	if (size == (unsigned int) -1)
 +		size = regset->size * regset->n;
 +
@@ -1460,13 +1461,7 @@
 +			else
 +				ret = (*regset->get)(target, regset,
 +						     pos, n, kdata, udata);
- 		}
--		if (copy_to_user(dst, buf, retval))
--			return -EFAULT;
--		copied += retval;
--		src += retval;
--		dst += retval;
--		len -= retval;			
++		}
 +
 +		if (kdata)
 +			kdata += n;
@@ -1514,8 +1509,7 @@
 +	if (request == PTRACE_ATTACH) {
 +		ret = ptrace_attach(child);
 +		goto out_tsk;
- 	}
--	return copied;
++	}
 +
 +	rcu_read_lock();
 +	engine = utrace_attach(child, UTRACE_ATTACH_MATCH_OPS,
@@ -1542,7 +1536,13 @@
 +			if (child->state == TASK_STOPPED)
 +				ret = 0;
 +			unlock_task_sighand(child, &flags);
-+		}
+ 		}
+-		if (copy_to_user(dst, buf, retval))
+-			return -EFAULT;
+-		copied += retval;
+-		src += retval;
+-		dst += retval;
+-		len -= retval;			
 +		if (ret == 0) {
 +			ret = ptrace_update(child, state,
 +					    UTRACE_ACTION_QUIESCE, 0);
@@ -1559,7 +1559,8 @@
 +		}
 +
 +		ret = -ESRCH;  /* Return value for exit_state bail-out.  */
-+	}
+ 	}
+-	return copied;
 +
 +	rcu_read_unlock();
 +




More information about the fedora-cvs-commits mailing list