From 44dcb04f0ea8eaac3b9c9d3172416efc5a950214 Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Tue, 16 Jun 2015 17:26:00 +0530 Subject: sched/numa: Consider 'imbalance_pct' when comparing loads in numa_has_capacity() This is consistent with all other load balancing instances where we absorb unfairness upto env->imbalance_pct. Absorbing unfairness upto env->imbalance_pct allows to pull and retain task to their preferred nodes. Signed-off-by: Srikar Dronamraju Signed-off-by: Peter Zijlstra (Intel) Acked-by: Rik van Riel Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1434455762-30857-3-git-send-email-srikar@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 43ee84f05d1e..a53a610095e6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1415,8 +1415,9 @@ static bool numa_has_capacity(struct task_numa_env *env) * --------------------- vs --------------------- * src->compute_capacity dst->compute_capacity */ - if (src->load * dst->compute_capacity > - dst->load * src->compute_capacity) + if (src->load * dst->compute_capacity * env->imbalance_pct > + + dst->load * src->compute_capacity * 100) return true; return false; -- cgit v1.2.3