summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e078a63adc51..833225a9fe57 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2700,6 +2700,21 @@ void set_hmp_defaults(void)
(u64)sched_ravg_window, 100);
}
+u32 sched_get_init_task_load(struct task_struct *p)
+{
+ return p->init_load_pct;
+}
+
+int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
+{
+ if (init_load_pct < 0 || init_load_pct > 100)
+ return -EINVAL;
+
+ p->init_load_pct = init_load_pct;
+
+ return 0;
+}
+
/*
* 'load' is in reference to "best cpu" at its best frequency.
* Scale that in reference to a given cpu, accounting for how bad it is
@@ -3605,13 +3620,24 @@ static inline int capacity(struct rq *rq)
void init_new_task_load(struct task_struct *p)
{
int i;
+ u32 init_load_windows = sched_init_task_load_windows;
+ u32 init_load_pelt = sched_init_task_load_pelt;
+ u32 init_load_pct = current->init_load_pct;
+ /* Note: child's init_load_pct itself would be 0 */
memset(&p->ravg, 0, sizeof(struct ravg));
+ if (init_load_pct) {
+ init_load_pelt = div64_u64((u64)init_load_pct *
+ (u64)LOAD_AVG_MAX, 100);
+ init_load_windows = div64_u64((u64)init_load_pct *
+ (u64)sched_ravg_window, 100);
+ }
+
+ p->ravg.demand = init_load_windows;
for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
- p->ravg.sum_history[i] = sched_init_task_load_windows;
- p->se.avg.runnable_avg_sum_scaled = sched_init_task_load_pelt;
- p->ravg.demand = sched_init_task_load_windows;
+ p->ravg.sum_history[i] = init_load_windows;
+ p->se.avg.runnable_avg_sum_scaled = init_load_pelt;
}
#else /* CONFIG_SCHED_HMP */