summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h23
1 files changed, 21 insertions, 2 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 911d3a94690e..3111d74a5a85 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1611,8 +1611,27 @@ void update_cpu_capacity_request(int cpu, bool request);
static inline void set_cfs_cpu_capacity(int cpu, bool request,
unsigned long capacity)
{
- if (per_cpu(cpu_sched_capacity_reqs, cpu).cfs != capacity) {
- per_cpu(cpu_sched_capacity_reqs, cpu).cfs = capacity;
+ struct sched_capacity_reqs *scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
+ int rtdl = scr->rt + scr->dl;
+ /*
+ * WALT tracks the utilization of a CPU considering the load
+ * generated by all the scheduling classes.
+ * Since the following call to:
+ * update_cpu_capacity
+ * is already adding the RT and DL utilizations let's remove
+ * these contributions from the WALT signal.
+ */
+ if (capacity > rtdl)
+ capacity -= rtdl;
+ else
+ capacity = 0;
+ }
+#endif
+ if (scr->cfs != capacity) {
+ scr->cfs = capacity;
update_cpu_capacity_request(cpu, request);
}
}