diff options
| author | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2015-12-14 14:50:12 +0530 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 21:25:15 -0700 |
| commit | 6003b006be913b7b18092ad4f01d6482920a2924 (patch) | |
| tree | 1de35f7f372b69692e9d60f8efc2b804c6a71076 /kernel | |
| parent | 8cd1d7ef162d0911d18cf971c4a34c07bb994a80 (diff) | |
sched: Provide a facility to restrict RT tasks to lower power cluster
The current CPU selection algorithm for RT tasks looks for the
least loaded CPU in all clusters. Stop the search at the lowest
possible power cluster based on "sched_restrict_cluster_spill"
sysctl tunable.
Change-Id: I34fdaefea56e0d1b7e7178d800f1bb86aa0ec01c
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/rt.c | 8 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 1 |
2 files changed, 7 insertions, 2 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e698cc3438c7..0064682a0024 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1671,6 +1671,7 @@ static int find_lowest_rq_hmp(struct task_struct *task) int prev_cpu = task_cpu(task); u64 cpu_load, min_load = ULLONG_MAX; int i; + int restrict_cluster = sysctl_sched_restrict_cluster_spill; /* Make sure the mask is initialized first */ if (unlikely(!lowest_mask)) @@ -1698,8 +1699,9 @@ static int find_lowest_rq_hmp(struct task_struct *task) if (sched_cpu_high_irqload(i)) continue; - cpu_load = scale_load_to_cpu( - cpu_rq(i)->hmp_stats.cumulative_runnable_avg, i); + cpu_load = cpu_rq(i)->hmp_stats.cumulative_runnable_avg; + if (!restrict_cluster) + cpu_load = scale_load_to_cpu(cpu_load, i); if (cpu_load < min_load || (cpu_load == min_load && @@ -1709,6 +1711,8 @@ static int find_lowest_rq_hmp(struct task_struct *task) best_cpu = i; } } + if (restrict_cluster && best_cpu != -1) + break; } return best_cpu; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0185f664191b..c30c12097d02 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1044,6 +1044,7 @@ extern unsigned int sched_init_task_load_pelt; extern unsigned int sched_init_task_load_windows; extern unsigned int sched_heavy_task; extern unsigned int up_down_migrate_scale_factor; +extern unsigned int sysctl_sched_restrict_cluster_spill; extern void reset_cpu_hmp_stats(int cpu, int reset_cra); extern unsigned int max_task_load(void); extern void sched_account_irqtime(int cpu, struct task_struct *curr, |
