diff options
| author | Steve Muckle <smuckle@codeaurora.org> | 2014-03-25 14:27:16 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 19:59:23 -0700 |
| commit | a31debf1c93cc87f60915e06d00889a8a047656e (patch) | |
| tree | c7578181144a5dda725d6afd9a0f5b6d2d6be6d5 /kernel | |
| parent | 1e19d2f48fb99fe46897c9bc26c3f44461577089 (diff) | |
sched: run idle_balance() on most power-efficient CPU
When a CPU goes idle, it checks to see whether it can pull any load
from other busy CPUs. The CPU going idle may not be the most
power-efficient idle CPU in the system however.
This patch causes the CPU going idle to check to see whether
there is a more power-efficient idle CPU within the same
lowest sched domain. If there is, then it runs the load balancer
on behalf of that CPU instead of itself.
Since it is unknown at this point what task load would be pulled,
a frequency must be assumed for this in order to do a comparison
of CPU power consumption. The maximum freqeuncy supported by all
CPUs is used for this.
Change-Id: I5eedddc1f7d10df58ecd358f37dba563eeecf4fc
Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
[rameezmustafa@codeaurora.org]: Port to msm-3.18]
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org
[joonwoop@codeaurora.org: fixed minor conflict around comment.]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 57 |
1 files changed, 47 insertions, 10 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 460e7cfef2d6..2cb70c88d18a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3051,6 +3051,11 @@ static inline int power_cost(struct task_struct *p, int cpu) return SCHED_CAPACITY_SCALE; } +static unsigned int power_cost_at_freq(int cpu, unsigned int freq) +{ + return 1; +} + static inline int mostly_idle_cpu(int cpu) { return 0; @@ -8025,6 +8030,10 @@ static int idle_balance(struct rq *this_rq) struct sched_domain *sd; int pulled_task = 0; u64 curr_cost = 0; + int i, cost; + int min_power = INT_MAX; + int balance_cpu = -1; + struct rq *balance_rq = NULL; idle_enter_fair(this_rq); @@ -8045,30 +8054,58 @@ static int idle_balance(struct rq *this_rq) goto out; } + /* + * If this CPU is not the most power-efficient idle CPU in the + * lowest level domain, run load balance on behalf of that + * most power-efficient idle CPU. + */ + rcu_read_lock(); + sd = rcu_dereference_check_sched_domain(this_rq->sd); + if (sd && sysctl_sched_enable_power_aware) { + for_each_cpu(i, sched_domain_span(sd)) { + if (i == this_cpu || idle_cpu(i)) { + cost = power_cost_at_freq(i, 0); + if (cost < min_power) { + min_power = cost; + balance_cpu = i; + } + } + } + BUG_ON(balance_cpu == -1); + + } else { + balance_cpu = this_cpu; + } + rcu_read_unlock(); + balance_rq = cpu_rq(balance_cpu); + raw_spin_unlock(&this_rq->lock); - update_blocked_averages(this_cpu); + update_blocked_averages(balance_cpu); rcu_read_lock(); - for_each_domain(this_cpu, sd) { + for_each_domain(balance_cpu, sd) { int continue_balancing = 1; u64 t0, domain_cost; if (!(sd->flags & SD_LOAD_BALANCE)) continue; - if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { + if (balance_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { update_next_balance(sd, 0, &next_balance); break; } if (sd->flags & SD_BALANCE_NEWIDLE) { - t0 = sched_clock_cpu(this_cpu); + t0 = sched_clock_cpu(balance_cpu); - pulled_task = load_balance(this_cpu, this_rq, - sd, CPU_NEWLY_IDLE, + pulled_task = load_balance(balance_cpu, balance_rq, + sd, + (this_cpu == balance_cpu ? + CPU_NEWLY_IDLE : + CPU_IDLE), &continue_balancing); - domain_cost = sched_clock_cpu(this_cpu) - t0; + domain_cost = sched_clock_cpu(balance_cpu) - t0; if (domain_cost > sd->max_newidle_lb_cost) sd->max_newidle_lb_cost = domain_cost; @@ -8079,9 +8116,9 @@ static int idle_balance(struct rq *this_rq) /* * Stop searching for tasks to pull if there are - * now runnable tasks on this rq. + * now runnable tasks on the balance rq. */ - if (pulled_task || this_rq->nr_running > 0) + if (pulled_task || balance_rq->nr_running > 0) break; } rcu_read_unlock(); @@ -8108,7 +8145,7 @@ out: if (this_rq->nr_running != this_rq->cfs.h_nr_running) pulled_task = -1; - if (pulled_task) { + if (pulled_task && balance_cpu == this_cpu) { idle_exit_fair(this_rq); this_rq->idle_stamp = 0; } |
