summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorJoonwoo Park <joonwoop@codeaurora.org>2015-06-01 20:29:11 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:02:12 -0700
commit82cf54e7d08bd495f417b3816c798f51d0f5daa3 (patch)
treeded085d37cec6d5ad002e5cdff4e24cc5ba22ece /kernel
parentde52c5fce5637f2c7ca5e1344502f2ffd4f29928 (diff)
sched: iterate search CPUs starting from prev_cpu for optimization
Function best_small_task_cpu() looks for a mostly idle CPU and returns it as the best CPU for a given small task. At present, however, it cannot break the CPU search loop when the function found a mostly idle CPU but continues to iterate CPU search loop because the function needs to find and return the given task's previous CPU as the best CPU to avoid unnecessary task migration when the previous CPU is mostly idle. Optimize the function best_small_task_cpu() to iterate search CPUs starting from the given task's CPU so it can break the loop as soon as mostly idle CPU found. This optimization saves few hundreds ns spent by the function and doesn't make any functional change. CRs-fixed: 849655 Change-Id: I8c540963487f4102dac4d54e9f98e24a4a92a7b3 Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d6acdc6725c0..183dc367f025 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3276,20 +3276,26 @@ static int best_small_task_cpu(struct task_struct *p, int sync)
int min_cstate_cpu = -1;
int min_cstate = INT_MAX;
int cpu_cost, min_cost = INT_MAX;
- int i, cstate, prev_cpu;
+ int i = task_cpu(p), cstate, prev_cpu;
int hmp_capable;
u64 tload, cpu_load, min_load = ULLONG_MAX;
- cpumask_t mi_cpus = CPU_MASK_NONE;
cpumask_t temp;
+ cpumask_t search_cpu;
+ struct rq *rq;
cpumask_and(&temp, &mpc_mask, cpu_possible_mask);
hmp_capable = !cpumask_full(&temp);
- for_each_cpu_and(i, tsk_cpus_allowed(p), cpu_online_mask) {
- struct rq *rq = cpu_rq(i);
+ cpumask_and(&search_cpu, tsk_cpus_allowed(p), cpu_online_mask);
+ if (unlikely(!cpumask_test_cpu(i, &search_cpu)))
+ i = cpumask_first(&search_cpu);
+ do {
+ rq = cpu_rq(i);
prev_cpu = (i == task_cpu(p));
+ cpumask_clear_cpu(i, &search_cpu);
+
trace_sched_cpu_load(rq, idle_cpu(i),
mostly_idle_cpu_sync(i,
cpu_load_sync(i, sync), sync),
@@ -3326,13 +3332,8 @@ static int best_small_task_cpu(struct task_struct *p, int sync)
}
cpu_load = cpu_load_sync(i, sync);
- if (mostly_idle_cpu_sync(i, cpu_load, sync)) {
- if (prev_cpu)
- return task_cpu(p);
-
- cpumask_set_cpu(i, &mi_cpus);
- continue;
- }
+ if (mostly_idle_cpu_sync(i, cpu_load, sync))
+ return i;
tload = scale_load_to_cpu(task_load(p), i);
if (!spill_threshold_crossed(tload, cpu_load, rq)) {
@@ -3342,10 +3343,7 @@ static int best_small_task_cpu(struct task_struct *p, int sync)
best_busy_cpu = i;
}
}
- }
-
- if (!cpumask_empty(&mi_cpus))
- return cpumask_first(&mi_cpus);
+ } while ((i = cpumask_first(&search_cpu)) < nr_cpu_ids);
if (min_cstate_cpu != -1)
return min_cstate_cpu;