summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c23
-rw-r--r--kernel/sched/sched.h22
2 files changed, 21 insertions, 24 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e1c8ec0458b3..c52655581c4c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3634,15 +3634,8 @@ static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
struct task_struct *p, int change_cra) { }
-static inline void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq)
-{
-}
-
-static inline void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq)
-{
-}
+#define dec_throttled_cfs_rq_hmp_stats(...)
+#define inc_throttled_cfs_rq_hmp_stats(...)
#endif /* CONFIG_SCHED_HMP */
@@ -4670,6 +4663,7 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
return cfs_bandwidth_used() && cfs_rq->throttled;
}
+#ifdef CONFIG_SCHED_HMP
/*
* Check if task is part of a hierarchy where some cfs_rq does not have any
* runtime left.
@@ -4696,6 +4690,7 @@ static int task_will_be_throttled(struct task_struct *p)
return 0;
}
+#endif
/* check whether cfs_rq, or any parent, is throttled */
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
@@ -4776,9 +4771,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
if (dequeue)
dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
qcfs_rq->h_nr_running -= task_delta;
-#ifdef CONFIG_SCHED_HMP
dec_throttled_cfs_rq_hmp_stats(&qcfs_rq->hmp_stats, cfs_rq);
-#endif
if (qcfs_rq->load.weight)
dequeue = 0;
@@ -4786,9 +4779,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
if (!se) {
sub_nr_running(rq, task_delta);
-#ifdef CONFIG_SCHED_HMP
dec_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, cfs_rq);
-#endif
}
cfs_rq->throttled = 1;
@@ -4825,7 +4816,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct sched_entity *se;
int enqueue = 1;
long task_delta;
- struct cfs_rq *tcfs_rq = cfs_rq;
+ struct cfs_rq *tcfs_rq __maybe_unused = cfs_rq;
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -4853,9 +4844,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
if (enqueue)
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
cfs_rq->h_nr_running += task_delta;
-#ifdef CONFIG_SCHED_HMP
inc_throttled_cfs_rq_hmp_stats(&cfs_rq->hmp_stats, tcfs_rq);
-#endif
if (cfs_rq_throttled(cfs_rq))
break;
@@ -4863,9 +4852,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
if (!se) {
add_nr_running(rq, task_delta);
-#ifdef CONFIG_SCHED_HMP
inc_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, tcfs_rq);
-#endif
}
/* determine whether we need to wake up potentially idle cpu */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c110c4aaf2be..a9d98b7dd10e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1055,6 +1055,12 @@ static inline void sched_ttwu_pending(void) { }
#include "stats.h"
#include "auto_group.h"
+enum sched_boost_policy {
+ SCHED_BOOST_NONE,
+ SCHED_BOOST_ON_BIG,
+ SCHED_BOOST_ON_ALL,
+};
+
#ifdef CONFIG_SCHED_HMP
#define WINDOW_STATS_RECENT 0
@@ -1139,12 +1145,6 @@ extern unsigned int update_freq_aggregate_threshold(unsigned int threshold);
extern void update_avg_burst(struct task_struct *p);
extern void update_avg(u64 *avg, u64 sample);
-enum sched_boost_policy {
- SCHED_BOOST_NONE,
- SCHED_BOOST_ON_BIG,
- SCHED_BOOST_ON_ALL,
-};
-
#define NO_BOOST 0
#define FULL_THROTTLE_BOOST 1
#define CONSERVATIVE_BOOST 2
@@ -1496,6 +1496,16 @@ struct hmp_sched_stats;
struct related_thread_group;
struct sched_cluster;
+static inline enum sched_boost_policy sched_boost_policy(void)
+{
+ return SCHED_BOOST_NONE;
+}
+
+static inline bool task_sched_boost(struct task_struct *p)
+{
+ return true;
+}
+
static inline int got_boost_kick(void)
{
return 0;