summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-05-09 16:28:07 -0700
committerSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-10-17 12:43:53 -0700
commiteb7300e9a89edf0692fa53dbb6cb4214f9130927 (patch)
tree8b618a813045e474a52891cdc00a1230360ae83c /include/linux
parent4f983fd1aa1ae7640ddaff7e4838f9f04f3e263f (diff)
sched: Add per CPU load tracking for each task
Keeping a track of the load footprint of each task on every CPU that it executed on gives the scheduler much more flexibility in terms of the number of frequency guidance policies. These new fields will be used in subsequent patches as we alter the load fixup mechanism upon task migration. We still need to maintain the curr/prev_window sums as they will also be required in subsequent patches as we start to track top tasks based on cumulative load. Also, we need to call init_new_task_load() for the idle task. This is an existing harmless bug as load tracking for the idle task is irrelevant. However, in this patch we are adding pointers to the ravg structure. These pointers have to be initialized even for the idle task. Finally move init_new_task_load() to sched_fork(). This was always the more appropriate place, however, following the introduction of new pointers in the ravg struct, this is necessary to avoid races with functions such as reset_all_task_stats(). Change-Id: Ib584372eb539706da4319973314e54dae04e5934 Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sched.h15
1 files changed, 10 insertions, 5 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a395d8a9ff73..06acefeffd4c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -356,7 +356,7 @@ extern int lockdep_tasklist_lock_is_held(void);
extern void sched_init(void);
extern void sched_init_smp(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
-extern void init_idle(struct task_struct *idle, int cpu);
+extern void init_idle(struct task_struct *idle, int cpu, bool hotplug);
extern void init_idle_bootup_task(struct task_struct *idle);
extern cpumask_var_t cpu_isolated_map;
@@ -1332,11 +1332,15 @@ struct ravg {
* sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
* demand for tasks.
*
- * 'curr_window' represents task's contribution to cpu busy time
- * statistics (rq->curr_runnable_sum) in current window
+ * 'curr_window_cpu' represents task's contribution to cpu busy time on
+ * various CPUs in the current window
*
- * 'prev_window' represents task's contribution to cpu busy time
- * statistics (rq->prev_runnable_sum) in previous window
+ * 'prev_window_cpu' represents task's contribution to cpu busy time on
+ * various CPUs in the previous window
+ *
+ * 'curr_window' represents the sum of all entries in curr_window_cpu
+ *
+ * 'prev_window' represents the sum of all entries in prev_window_cpu
*
* 'pred_demand' represents task's current predicted cpu busy time
*
@@ -1346,6 +1350,7 @@ struct ravg {
u64 mark_start;
u32 sum, demand;
u32 sum_history[RAVG_HIST_SIZE_MAX];
+ u32 *curr_window_cpu, *prev_window_cpu;
u32 curr_window, prev_window;
u16 active_windows;
u32 pred_demand;