diff options
Diffstat (limited to 'kernel/sched/fair.c')
| -rw-r--r-- | kernel/sched/fair.c | 23 |
1 files changed, 5 insertions, 18 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e1c8ec0458b3..c52655581c4c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3634,15 +3634,8 @@ static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq, struct task_struct *p, int change_cra) { } -static inline void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats, - struct cfs_rq *cfs_rq) -{ -} - -static inline void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats, - struct cfs_rq *cfs_rq) -{ -} +#define dec_throttled_cfs_rq_hmp_stats(...) +#define inc_throttled_cfs_rq_hmp_stats(...) #endif /* CONFIG_SCHED_HMP */ @@ -4670,6 +4663,7 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) return cfs_bandwidth_used() && cfs_rq->throttled; } +#ifdef CONFIG_SCHED_HMP /* * Check if task is part of a hierarchy where some cfs_rq does not have any * runtime left. @@ -4696,6 +4690,7 @@ static int task_will_be_throttled(struct task_struct *p) return 0; } +#endif /* check whether cfs_rq, or any parent, is throttled */ static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) @@ -4776,9 +4771,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) if (dequeue) dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); qcfs_rq->h_nr_running -= task_delta; -#ifdef CONFIG_SCHED_HMP dec_throttled_cfs_rq_hmp_stats(&qcfs_rq->hmp_stats, cfs_rq); -#endif if (qcfs_rq->load.weight) dequeue = 0; @@ -4786,9 +4779,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) { sub_nr_running(rq, task_delta); -#ifdef CONFIG_SCHED_HMP dec_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, cfs_rq); -#endif } cfs_rq->throttled = 1; @@ -4825,7 +4816,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) struct sched_entity *se; int enqueue = 1; long task_delta; - struct cfs_rq *tcfs_rq = cfs_rq; + struct cfs_rq *tcfs_rq __maybe_unused = cfs_rq; se = cfs_rq->tg->se[cpu_of(rq)]; @@ -4853,9 +4844,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (enqueue) enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); cfs_rq->h_nr_running += task_delta; -#ifdef CONFIG_SCHED_HMP inc_throttled_cfs_rq_hmp_stats(&cfs_rq->hmp_stats, tcfs_rq); -#endif if (cfs_rq_throttled(cfs_rq)) break; @@ -4863,9 +4852,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) { add_nr_running(rq, task_delta); -#ifdef CONFIG_SCHED_HMP inc_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, tcfs_rq); -#endif } /* determine whether we need to wake up potentially idle cpu */ |
