diff options
| -rw-r--r-- | include/linux/sched.h | 2 | ||||
| -rw-r--r-- | kernel/sched/core.c | 39 |
2 files changed, 22 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index d3cbea6e2a5c..09974bc791ea 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2109,7 +2109,7 @@ static inline cputime_t task_gtime(struct task_struct *t) extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); -#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP) +#if defined(CONFIG_SCHED_FREQ_INPUT) extern int sched_set_window(u64 window_start, unsigned int window_size); extern unsigned long sched_get_busy(int cpu); extern void sched_set_io_is_busy(int val); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 308e36d4895c..658f0623c78f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1703,24 +1703,6 @@ static inline void migrate_sync_cpu(int cpu) sync_cpu = smp_processor_id(); } -unsigned long sched_get_busy(int cpu) -{ - unsigned long flags; - struct rq *rq = cpu_rq(cpu); - - /* - * This function could be called in timer context, and the - * current task may have been executing for a long time. Ensure - * that the window stats are current by doing an update. - */ - raw_spin_lock_irqsave(&rq->lock, flags); - update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); - raw_spin_unlock_irqrestore(&rq->lock, flags); - - return div64_u64(scale_load_to_cpu(rq->prev_runnable_sum, cpu), - NSEC_PER_USEC); -} - static void reset_all_task_stats(void) { struct task_struct *g, *p; @@ -1847,6 +1829,26 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size) local_irq_restore(flags); } +#ifdef CONFIG_SCHED_FREQ_INPUT + +unsigned long sched_get_busy(int cpu) +{ + unsigned long flags; + struct rq *rq = cpu_rq(cpu); + + /* + * This function could be called in timer context, and the + * current task may have been executing for a long time. Ensure + * that the window stats are current by doing an update. + */ + raw_spin_lock_irqsave(&rq->lock, flags); + update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); + raw_spin_unlock_irqrestore(&rq->lock, flags); + + return div64_u64(scale_load_to_cpu(rq->prev_runnable_sum, cpu), + NSEC_PER_USEC); +} + void sched_set_io_is_busy(int val) { sched_io_is_busy = val; @@ -1879,6 +1881,7 @@ int sched_set_window(u64 window_start, unsigned int window_size) return 0; } +#endif /* CONFIG_SCHED_FREQ_INPUT */ /* Keep track of max/min capacity possible across CPUs "currently" */ static void update_min_max_capacity(void) |
