summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorMorten Rasmussen <morten.rasmussen@arm.com>2015-07-06 15:01:10 +0100
committerAmit Pundir <amit.pundir@linaro.org>2016-09-14 14:48:50 +0530
commitd72801bf86b42594ba6e89b809718d2d401b1660 (patch)
tree19b1a3fb8b1c7ac69fa47af62b6f636b66b328cb /kernel
parent91b2b633145344b23bd960802a0dbb76a61afef7 (diff)
sched: Consider spare cpu capacity at task wake-up
find_idlest_group() selects the wake-up target group purely based on group load which leads to suboptimal choices in low load scenarios. An idle group with reduced capacity (due to RT tasks or different cpu type) isn't necessarily a better target than a lightly loaded group with higher capacity. The patch adds spare capacity as an additional group selection parameter. The target group is now selected based on the following criteria: 1. Return the group with the cpu with most spare capacity and this capacity is significant if such group exists. Significant spare capacity is currently at least 20% to spare. 2. Return the group with the lowest load, unless it is the local group in which case NULL is returned and the search is continued at the next (lower) level. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e8c5a16e79e..60f2e982c6bf 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4809,9 +4809,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int sd_flag)
{
struct sched_group *idlest = NULL, *group = sd->groups;
- struct sched_group *fit_group = NULL;
+ struct sched_group *fit_group = NULL, *spare_group = NULL;
unsigned long min_load = ULONG_MAX, this_load = 0;
unsigned long fit_capacity = ULONG_MAX;
+ unsigned long max_spare_capacity = capacity_margin - SCHED_LOAD_SCALE;
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
@@ -4819,7 +4820,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
load_idx = sd->wake_idx;
do {
- unsigned long load, avg_load;
+ unsigned long load, avg_load, spare_capacity;
int local_group;
int i;
@@ -4851,6 +4852,16 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
fit_capacity = capacity_of(i);
fit_group = group;
}
+
+ /*
+ * Look for group which has most spare capacity on a
+ * single cpu.
+ */
+ spare_capacity = capacity_of(i) - cpu_util(i);
+ if (spare_capacity > max_spare_capacity) {
+ max_spare_capacity = spare_capacity;
+ spare_group = group;
+ }
}
/* Adjust by relative CPU capacity of the group */
@@ -4867,6 +4878,9 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
if (fit_group)
return fit_group;
+ if (spare_group)
+ return spare_group;
+
if (!idlest || 100*this_load < imbalance*min_load)
return NULL;
return idlest;