summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c14
-rw-r--r--kernel/sysctl.c7
2 files changed, 19 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1d71f326bc4e..b1d48c53bf7e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2058,6 +2058,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
raw_spin_unlock(&rq->lock);
}
+__read_mostly unsigned int sysctl_sched_wakeup_load_threshold = 110;
/**
* try_to_wake_up - wake up a thread
* @p: the thread to be awakened
@@ -2156,7 +2157,7 @@ stat:
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- if (src_cpu != cpu && task_notify_on_migrate(p)) {
+ if (task_notify_on_migrate(p)) {
struct migration_notify_data mnd;
mnd.src_cpu = src_cpu;
@@ -2166,7 +2167,16 @@ out:
(u64)(sysctl_sched_ravg_window));
else
mnd.load = 0;
- atomic_notifier_call_chain(&migration_notifier_head,
+ /*
+ * Call the migration notifier with mnd for foreground task
+ * migrations as well as for wakeups if their load is above
+ * sysctl_sched_wakeup_load_threshold. This would prompt the
+ * cpu-boost to boost the CPU frequency on wake up of a heavy
+ * weight foreground task
+ */
+ if ((src_cpu != cpu) || (mnd.load >
+ sysctl_sched_wakeup_load_threshold))
+ atomic_notifier_call_chain(&migration_notifier_head,
0, (void *)&mnd);
}
return success;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ff8df5e6614e..1314618f07f8 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -299,6 +299,13 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "sched_wakeup_load_threshold",
+ .data = &sysctl_sched_wakeup_load_threshold,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_min_granularity_ns",