diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 1 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 42 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 1 |
3 files changed, 34 insertions, 10 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 964094ec9d03..b288947994b7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7921,6 +7921,7 @@ void __init sched_init(void) rq->active_balance = 0; rq->next_balance = jiffies; rq->push_cpu = 0; + rq->push_task = NULL; rq->cpu = i; rq->online = 0; rq->idle_stamp = 0; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3f2e13495620..4235fd175760 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7810,8 +7810,18 @@ static int active_load_balance_cpu_stop(void *data) int busiest_cpu = cpu_of(busiest_rq); int target_cpu = busiest_rq->push_cpu; struct rq *target_rq = cpu_rq(target_cpu); - struct sched_domain *sd; + struct sched_domain *sd = NULL; struct task_struct *p = NULL; + struct task_struct *push_task; + int push_task_detached = 0; + struct lb_env env = { + .sd = sd, + .dst_cpu = target_cpu, + .dst_rq = target_rq, + .src_cpu = busiest_rq->cpu, + .src_rq = busiest_rq, + .idle = CPU_IDLE, + }; raw_spin_lock_irq(&busiest_rq->lock); @@ -7833,6 +7843,16 @@ static int active_load_balance_cpu_stop(void *data) */ BUG_ON(busiest_rq == target_rq); + push_task = busiest_rq->push_task; + if (push_task) { + if (task_on_rq_queued(push_task) && + task_cpu(push_task) == busiest_cpu) { + detach_task(push_task, &env); + push_task_detached = 1; + } + goto out_unlock; + } + /* Search for an sd spanning us and the target CPU. */ rcu_read_lock(); for_each_domain(target_cpu, sd) { @@ -7842,15 +7862,7 @@ static int active_load_balance_cpu_stop(void *data) } if (likely(sd)) { - struct lb_env env = { - .sd = sd, - .dst_cpu = target_cpu, - .dst_rq = target_rq, - .src_cpu = busiest_rq->cpu, - .src_rq = busiest_rq, - .idle = CPU_IDLE, - }; - + env.sd = sd; schedstat_inc(sd, alb_count); p = detach_one_task(&env); @@ -7862,8 +7874,18 @@ static int active_load_balance_cpu_stop(void *data) rcu_read_unlock(); out_unlock: busiest_rq->active_balance = 0; + + if (push_task) + busiest_rq->push_task = NULL; + raw_spin_unlock(&busiest_rq->lock); + if (push_task) { + if (push_task_detached) + attach_one_task(target_rq, push_task); + put_task_struct(push_task); + } + if (p) attach_one_task(target_rq, p); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 5c27891ebd9a..00d7f187a88c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -626,6 +626,7 @@ struct rq { /* For active balancing */ int active_balance; int push_cpu; + struct task_struct *push_task; struct cpu_stop_work active_balance_work; /* cpu of this runqueue: */ int cpu; |
