summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/fair.c12
-rw-r--r--kernel/sched/rt.c4
-rw-r--r--kernel/sched/sched.h11
3 files changed, 16 insertions, 11 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3614bb03cab5..6c7d2e4d5e90 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2831,7 +2831,7 @@ static void boost_kick_cpus(void)
}
}
-static inline int sched_boost(void)
+int sched_boost(void)
{
return boost_refcount > 0;
}
@@ -3622,11 +3622,6 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
&rq->active_balance_work);
}
-static inline int capacity(struct rq *rq)
-{
- return rq->capacity;
-}
-
static inline int nr_big_tasks(struct rq *rq)
{
return rq->nr_big_tasks;
@@ -3687,11 +3682,6 @@ static inline int nr_big_tasks(struct rq *rq)
return 0;
}
-static inline int capacity(struct rq *rq)
-{
- return SCHED_LOAD_SCALE;
-}
-
#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_HMP
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 280b9a8da5f8..f2f9b92f75cb 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1659,6 +1659,10 @@ static int find_lowest_rq_hmp(struct task_struct *task)
cpu_cost = power_cost_at_freq(i, ACCESS_ONCE(rq->min_freq));
trace_sched_cpu_load(rq, idle_cpu(i),
mostly_idle_cpu(i), cpu_cost);
+
+ if (sched_boost() && capacity(rq) != max_capacity)
+ continue;
+
if (cpu_cost < min_cost) {
min_cost = cpu_cost;
best_cpu = i;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index fcdf4063ac11..117578626e8f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -995,6 +995,11 @@ unsigned int max_task_load(void);
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
u64 delta, u64 wallclock);
+static inline int capacity(struct rq *rq)
+{
+ return rq->capacity;
+}
+
static inline void
inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
@@ -1026,6 +1031,11 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
static inline int pct_task_load(struct task_struct *p) { return 0; }
+static inline int capacity(struct rq *rq)
+{
+ return SCHED_LOAD_SCALE;
+}
+
static inline void
inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
@@ -1120,6 +1130,7 @@ extern void set_hmp_defaults(void);
extern unsigned int power_cost_at_freq(int cpu, unsigned int freq);
extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
extern void boost_kick(int cpu);
+extern int sched_boost(void);
#else /* CONFIG_SCHED_HMP */