diff options
| -rw-r--r-- | include/trace/events/sched.h | 3 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 7 |
2 files changed, 6 insertions, 4 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index c50310a7fd6d..dffaffab4bc8 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -1057,7 +1057,8 @@ TRACE_EVENT(walt_update_history, __entry->samples = samples; __entry->evt = evt; __entry->demand = p->ravg.demand; - __entry->walt_avg = (__entry->demand << 10) / walt_ravg_window, + __entry->walt_avg = (__entry->demand << 10); + do_div(__entry->walt_avg, walt_ravg_window); __entry->pelt_avg = p->se.avg.util_avg; memcpy(__entry->hist, p->ravg.sum_history, RAVG_HIST_SIZE_MAX * sizeof(u32)); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b2d8ad59f41f..2f2b959ad244 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1577,9 +1577,10 @@ static inline unsigned long __cpu_util(int cpu, int delta) unsigned long capacity = capacity_orig_of(cpu); #ifdef CONFIG_SCHED_WALT - if (!walt_disabled && sysctl_sched_use_walt_cpu_util) - util = (cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT) / - walt_ravg_window; + if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { + util = cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT; + do_div(util, walt_ravg_window); + } #endif delta += util; if (delta < 0) |
