summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 270091323ae8..e77917b23c79 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5677,9 +5677,9 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
{
struct sched_domain *sd;
struct sched_group *sg;
- int best_idle = -1;
- int best_idle_cstate = -1;
- int best_idle_capacity = INT_MAX;
+ int best_idle_cpu = -1;
+ int best_idle_cstate = INT_MAX;
+ unsigned long best_idle_capacity = ULONG_MAX;
if (!sysctl_sched_cstate_aware) {
if (idle_cpu(target))
@@ -5706,18 +5706,19 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
if (sysctl_sched_cstate_aware) {
for_each_cpu_and(i, tsk_cpus_allowed(p), sched_group_cpus(sg)) {
- struct rq *rq = cpu_rq(i);
- int idle_idx = idle_get_state_idx(rq);
+ int idle_idx = idle_get_state_idx(cpu_rq(i));
unsigned long new_usage = boosted_task_util(p);
unsigned long capacity_orig = capacity_orig_of(i);
+
if (new_usage > capacity_orig || !idle_cpu(i))
goto next;
if (i == target && new_usage <= capacity_curr_of(target))
return target;
- if (best_idle < 0 || (idle_idx < best_idle_cstate && capacity_orig <= best_idle_capacity)) {
- best_idle = i;
+ if (idle_idx < best_idle_cstate &&
+ capacity_orig <= best_idle_capacity) {
+ best_idle_cpu = i;
best_idle_cstate = idle_idx;
best_idle_capacity = capacity_orig;
}
@@ -5736,8 +5737,9 @@ next:
sg = sg->next;
} while (sg != sd->groups);
}
- if (best_idle > 0)
- target = best_idle;
+
+ if (best_idle_cpu >= 0)
+ target = best_idle_cpu;
done:
return target;