summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2017-11-02 15:13:26 +0530
committerTodd Kjos <tkjos@google.com>2017-11-07 23:57:47 +0000
commitdf147c9e336cfcb4183db1eb9552b0429060cd0d (patch)
tree8839946a651b94292af9bc26a215cb1d7d4f7dc7 /kernel/sched/sched.h
parentd194ba5d712f051ff6c025f3484bb72f219764e3 (diff)
cpufreq: Drop schedfreq governor
We all should be using (and improving) the schedutil governor now. Get rid of the non-upstream governor. Tested on Hikey. Change-Id: Ic660756536e5da51952738c3c18b94e31f58cd57 Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h75
1 files changed, 0 insertions, 75 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 028e232103c2..782746140711 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1629,81 +1629,6 @@ static inline unsigned long cpu_util_freq(int cpu)
#endif
-#ifdef CONFIG_CPU_FREQ_GOV_SCHED
-#define capacity_max SCHED_CAPACITY_SCALE
-extern unsigned int capacity_margin;
-extern struct static_key __sched_freq;
-
-static inline bool sched_freq(void)
-{
- return static_key_false(&__sched_freq);
-}
-
-/*
- * sched_capacity_reqs expects capacity requests to be normalised.
- * All capacities should sum to the range of 0-1024.
- */
-DECLARE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
-void update_cpu_capacity_request(int cpu, bool request);
-
-static inline void set_cfs_cpu_capacity(int cpu, bool request,
- unsigned long capacity)
-{
- struct sched_capacity_reqs *scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
-
-#ifdef CONFIG_SCHED_WALT
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
- int rtdl = scr->rt + scr->dl;
- /*
- * WALT tracks the utilization of a CPU considering the load
- * generated by all the scheduling classes.
- * Since the following call to:
- * update_cpu_capacity
- * is already adding the RT and DL utilizations let's remove
- * these contributions from the WALT signal.
- */
- if (capacity > rtdl)
- capacity -= rtdl;
- else
- capacity = 0;
- }
-#endif
- if (scr->cfs != capacity) {
- scr->cfs = capacity;
- update_cpu_capacity_request(cpu, request);
- }
-}
-
-static inline void set_rt_cpu_capacity(int cpu, bool request,
- unsigned long capacity)
-{
- if (per_cpu(cpu_sched_capacity_reqs, cpu).rt != capacity) {
- per_cpu(cpu_sched_capacity_reqs, cpu).rt = capacity;
- update_cpu_capacity_request(cpu, request);
- }
-}
-
-static inline void set_dl_cpu_capacity(int cpu, bool request,
- unsigned long capacity)
-{
- if (per_cpu(cpu_sched_capacity_reqs, cpu).dl != capacity) {
- per_cpu(cpu_sched_capacity_reqs, cpu).dl = capacity;
- update_cpu_capacity_request(cpu, request);
- }
-}
-#else
-static inline bool sched_freq(void) { return false; }
-static inline void set_cfs_cpu_capacity(int cpu, bool request,
- unsigned long capacity)
-{ }
-static inline void set_rt_cpu_capacity(int cpu, bool request,
- unsigned long capacity)
-{ }
-static inline void set_dl_cpu_capacity(int cpu, bool request,
- unsigned long capacity)
-{ }
-#endif
-
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));