summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c9
2 files changed, 10 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2fd5c5688dd0..631dbb0a7041 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3056,6 +3056,7 @@ static inline void mark_task_starting(struct task_struct *p)
wallclock = sched_ktime_clock();
p->ravg.mark_start = p->last_wake_ts = wallclock;
+ p->last_cpu_selected_ts = wallclock;
p->last_switch_out_ts = 0;
update_task_cpu_cycles(p, cpu_of(rq));
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b9ee3dc4a1a6..859416724e5a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2678,6 +2678,9 @@ static unsigned int __read_mostly
sched_short_sleep_task_threshold = 2000 * NSEC_PER_USEC;
unsigned int __read_mostly sysctl_sched_select_prev_cpu_us = 2000;
+static unsigned int __read_mostly
+sched_long_cpu_selection_threshold = 100 * NSEC_PER_MSEC;
+
unsigned int __read_mostly sysctl_sched_restrict_cluster_spill;
void update_up_down_migrate(void)
@@ -3455,6 +3458,7 @@ bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
struct sched_cluster *cluster;
if (env->boost_type != SCHED_BOOST_NONE || env->reason ||
+ !task->ravg.mark_start ||
env->need_idle || !sched_short_sleep_task_threshold)
return false;
@@ -3463,6 +3467,10 @@ bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
unlikely(!cpu_active(prev_cpu)))
return false;
+ if (task->ravg.mark_start - task->last_cpu_selected_ts >=
+ sched_long_cpu_selection_threshold)
+ return false;
+
/*
* This function should be used by task wake up path only as it's
* assuming p->last_switch_out_ts as last sleep time.
@@ -3604,6 +3612,7 @@ retry:
if (stats.best_capacity_cpu >= 0)
target = stats.best_capacity_cpu;
}
+ p->last_cpu_selected_ts = sched_ktime_clock();
out:
rcu_read_unlock();