summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/fair.c42
-rw-r--r--kernel/sched/tune.c14
-rw-r--r--kernel/sched/tune.h1
3 files changed, 55 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2dbe1ff0a90b..f582d58daea5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5048,11 +5048,13 @@ static inline unsigned long task_util(struct task_struct *p)
unsigned int capacity_margin = 1280; /* ~20% margin */
+static inline unsigned long boosted_task_util(struct task_struct *task);
+
static inline bool __task_fits(struct task_struct *p, int cpu, int util)
{
unsigned long capacity = capacity_of(cpu);
- util += task_util(p);
+ util += boosted_task_util(p);
return (capacity * 1024) > (util * capacity_margin);
}
@@ -5133,6 +5135,27 @@ schedtune_cpu_margin(unsigned long util, int cpu)
return schedtune_margin(util, boost);
}
+static inline unsigned long
+schedtune_task_margin(struct task_struct *task)
+{
+ unsigned int boost;
+ unsigned long util;
+ unsigned long margin;
+
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+ boost = schedtune_task_boost(task);
+#else
+ boost = get_sysctl_sched_cfs_boost();
+#endif
+ if (boost == 0)
+ return 0;
+
+ util = task_util(task);
+ margin = schedtune_margin(util, boost);
+
+ return margin;
+}
+
#else /* CONFIG_SCHED_TUNE */
static inline unsigned int
@@ -5141,6 +5164,12 @@ schedtune_cpu_margin(unsigned long util, int cpu)
return 0;
}
+static inline unsigned int
+schedtune_task_margin(struct task_struct *task)
+{
+ return 0;
+}
+
#endif /* CONFIG_SCHED_TUNE */
static inline unsigned long
@@ -5152,6 +5181,15 @@ boosted_cpu_util(int cpu)
return util + margin;
}
+static inline unsigned long
+boosted_task_util(struct task_struct *task)
+{
+ unsigned long util = task_util(task);
+ unsigned long margin = schedtune_task_margin(task);
+
+ return util + margin;
+}
+
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
@@ -5386,7 +5424,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target)
* so prev_cpu will receive a negative bias due to the double
* accounting. However, the blocked utilization may be zero.
*/
- int new_util = cpu_util(i) + task_util(p);
+ int new_util = cpu_util(i) + boosted_task_util(p);
if (new_util > capacity_orig_of(i))
continue;
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index 540b945a01ce..87213861bde5 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -240,6 +240,20 @@ int schedtune_cpu_boost(int cpu)
return bg->boost_max;
}
+int schedtune_task_boost(struct task_struct *p)
+{
+ struct schedtune *st;
+ int task_boost;
+
+ /* Get task boost value */
+ rcu_read_lock();
+ st = task_schedtune(p);
+ task_boost = st->boost;
+ rcu_read_unlock();
+
+ return task_boost;
+}
+
static u64
boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
diff --git a/kernel/sched/tune.h b/kernel/sched/tune.h
index 561b5171a19b..d756ce7b06e0 100644
--- a/kernel/sched/tune.h
+++ b/kernel/sched/tune.h
@@ -4,6 +4,7 @@
#ifdef CONFIG_CGROUP_SCHEDTUNE
int schedtune_cpu_boost(int cpu);
+int schedtune_task_boost(struct task_struct *tsk);
void schedtune_enqueue_task(struct task_struct *p, int cpu);
void schedtune_dequeue_task(struct task_struct *p, int cpu);