summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/fair.c33
1 files changed, 32 insertions, 1 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c0c9510d871f..5ad30f4362e2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4160,6 +4160,8 @@ static inline void hrtick_update(struct rq *rq)
}
#endif
+static inline unsigned long boosted_cpu_util(int cpu);
+
static void update_capacity_of(int cpu)
{
unsigned long req_cap;
@@ -4168,7 +4170,8 @@ static void update_capacity_of(int cpu)
return;
/* Convert scale-invariant capacity to cpu. */
- req_cap = cpu_util(cpu) * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
+ req_cap = boosted_cpu_util(cpu);
+ req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
set_cfs_cpu_capacity(cpu, true, req_cap);
}
@@ -5131,8 +5134,36 @@ schedtune_margin(unsigned long signal, unsigned long boost)
return margin;
}
+static inline unsigned int
+schedtune_cpu_margin(unsigned long util)
+{
+ unsigned int boost = get_sysctl_sched_cfs_boost();
+
+ if (boost == 0)
+ return 0;
+
+ return schedtune_margin(util, boost);
+}
+
+#else /* CONFIG_SCHED_TUNE */
+
+static inline unsigned int
+schedtune_cpu_margin(unsigned long util)
+{
+ return 0;
+}
+
#endif /* CONFIG_SCHED_TUNE */
+static inline unsigned long
+boosted_cpu_util(int cpu)
+{
+ unsigned long util = cpu_util(cpu);
+ unsigned long margin = schedtune_cpu_margin(util);
+
+ return util + margin;
+}
+
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.