summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c41
1 files changed, 41 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 569a2f0f01e4..573dcb62b3b3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2854,6 +2854,45 @@ unsigned long long task_sched_runtime(struct task_struct *p)
return ns;
}
+#ifdef CONFIG_CPU_FREQ_GOV_SCHED
+static unsigned long sum_capacity_reqs(unsigned long cfs_cap,
+ struct sched_capacity_reqs *scr)
+{
+ unsigned long total = cfs_cap + scr->rt;
+
+ total = total * capacity_margin;
+ total /= SCHED_CAPACITY_SCALE;
+ total += scr->dl;
+ return total;
+}
+
+static void sched_freq_tick(int cpu)
+{
+ struct sched_capacity_reqs *scr;
+ unsigned long capacity_orig, capacity_curr;
+
+ if (!sched_freq())
+ return;
+
+ capacity_orig = capacity_orig_of(cpu);
+ capacity_curr = capacity_curr_of(cpu);
+ if (capacity_curr == capacity_orig)
+ return;
+
+ /*
+ * To make free room for a task that is building up its "real"
+ * utilization and to harm its performance the least, request
+ * a jump to max OPP as soon as the margin of free capacity is
+ * impacted (specified by capacity_margin).
+ */
+ scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
+ if (capacity_curr < sum_capacity_reqs(cpu_util(cpu), scr))
+ set_cfs_cpu_capacity(cpu, true, capacity_max);
+}
+#else
+static inline void sched_freq_tick(int cpu) { }
+#endif
+
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
@@ -2880,6 +2919,8 @@ void scheduler_tick(void)
trigger_load_balance(rq);
#endif
rq_last_tick_reset(rq);
+
+ sched_freq_tick(cpu);
}
#ifdef CONFIG_NO_HZ_FULL