summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2014-11-04 14:24:19 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:00:58 -0700
commit33af11b6f4238f34860e305cf7610021ea8036b4 (patch)
tree0a6200a2b28d5ee6475662b7fdcf95ecc4f4bb84 /kernel/sched
parentbb63ece414d917d843ab3fbb44c230fad035d416 (diff)
sched: Add API to set task's initial task load
Add a per-task attribute, init_load_pct, that is used to initialize newly created children's initial task load. This helps important applications launch their child tasks on cpus with highest capacity. Change-Id: Ie9665fd2aeb15203f95fd7f211c50bebbaa18727 Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org> [joonwoop@codeaurora.org: fixed conflict int init_new_task_load. se.avg.runnable_avg_sum has deprecated.] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e078a63adc51..833225a9fe57 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2700,6 +2700,21 @@ void set_hmp_defaults(void)
(u64)sched_ravg_window, 100);
}
+u32 sched_get_init_task_load(struct task_struct *p)
+{
+ return p->init_load_pct;
+}
+
+int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
+{
+ if (init_load_pct < 0 || init_load_pct > 100)
+ return -EINVAL;
+
+ p->init_load_pct = init_load_pct;
+
+ return 0;
+}
+
/*
* 'load' is in reference to "best cpu" at its best frequency.
* Scale that in reference to a given cpu, accounting for how bad it is
@@ -3605,13 +3620,24 @@ static inline int capacity(struct rq *rq)
void init_new_task_load(struct task_struct *p)
{
int i;
+ u32 init_load_windows = sched_init_task_load_windows;
+ u32 init_load_pelt = sched_init_task_load_pelt;
+ u32 init_load_pct = current->init_load_pct;
+ /* Note: child's init_load_pct itself would be 0 */
memset(&p->ravg, 0, sizeof(struct ravg));
+ if (init_load_pct) {
+ init_load_pelt = div64_u64((u64)init_load_pct *
+ (u64)LOAD_AVG_MAX, 100);
+ init_load_windows = div64_u64((u64)init_load_pct *
+ (u64)sched_ravg_window, 100);
+ }
+
+ p->ravg.demand = init_load_windows;
for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
- p->ravg.sum_history[i] = sched_init_task_load_windows;
- p->se.avg.runnable_avg_sum_scaled = sched_init_task_load_pelt;
- p->ravg.demand = sched_init_task_load_windows;
+ p->ravg.sum_history[i] = init_load_windows;
+ p->se.avg.runnable_avg_sum_scaled = init_load_pelt;
}
#else /* CONFIG_SCHED_HMP */