From 4211a9a2e94a34df8c02bc39b7ec10678ad5c2ab Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 10 Dec 2006 02:20:19 -0800 Subject: [PATCH] sched: remove staggering of load balancing Timer interrupts already are staggered. We do not need an additional layer of time staggering for short load balancing actions that take a reasonably small portion of the time slice. For load balancing on large sched_domains we will add a serialization later that avoids concurrent load balance operations and thus has the same effect as load staggering. Signed-off-by: Christoph Lameter Cc: Peter Williams Cc: Nick Piggin Cc: Christoph Lameter Cc: "Siddha, Suresh B" Cc: "Chen, Kenneth W" Acked-by: Ingo Molnar Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index fdd26fffaa20..b5b350135002 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2841,16 +2841,10 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) * Balancing parameters are set up in arch_init_sched_domains. */ -/* Don't have all balancing operations going off at once: */ -static inline unsigned long cpu_offset(int cpu) -{ - return jiffies + cpu * HZ / NR_CPUS; -} - static void rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) { - unsigned long this_load, interval, j = cpu_offset(this_cpu); + unsigned long this_load, interval; struct sched_domain *sd; int i, scale; @@ -2885,7 +2879,7 @@ rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle) if (unlikely(!interval)) interval = 1; - if (j - sd->last_balance >= interval) { + if (jiffies - sd->last_balance >= interval) { if (load_balance(this_cpu, this_rq, sd, idle)) { /* * We've pulled tasks over so either we're no -- cgit v1.2.3