summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/sysctl.c7
2 files changed, 15 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index aef2fbb9dbf5..e57b50ee752a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2709,6 +2709,9 @@ unsigned int __read_mostly sysctl_sched_enable_power_aware = 0;
unsigned int __read_mostly sched_small_wakee_task_load;
unsigned int __read_mostly sysctl_sched_small_wakee_task_load_pct = 10;
+unsigned int __read_mostly sched_big_waker_task_load;
+unsigned int __read_mostly sysctl_sched_big_waker_task_load_pct = 25;
+
/*
* CPUs with load greater than the sched_spill_load_threshold are not
* eligible for task placement. When all CPUs in a cluster achieve a
@@ -2839,6 +2842,10 @@ void set_hmp_defaults(void)
sched_small_wakee_task_load =
div64_u64((u64)sysctl_sched_small_wakee_task_load_pct *
(u64)sched_ravg_window, 100);
+
+ sched_big_waker_task_load =
+ div64_u64((u64)sysctl_sched_big_waker_task_load_pct *
+ (u64)sched_ravg_window, 100);
}
u32 sched_get_init_task_load(struct task_struct *p)
@@ -3509,6 +3516,7 @@ static inline bool
wake_to_waker_cluster(struct cpu_select_env *env)
{
return !env->need_idle && !env->reason && env->sync &&
+ task_load(current) > sched_big_waker_task_load &&
task_load(env->p) < sched_small_wakee_task_load;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 09d6882acb86..1da3b96368b1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -449,6 +449,13 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = sched_hmp_proc_update_handler,
},
+ {
+ .procname = "sched_big_waker_task_load",
+ .data = &sysctl_sched_big_waker_task_load_pct,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_hmp_proc_update_handler,
+ },
#ifdef CONFIG_SCHED_FREQ_INPUT
{
.procname = "sched_new_task_windows",