summaryrefslogtreecommitdiff
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorPavankumar Kondeti <pkondeti@codeaurora.org>2017-01-09 13:56:33 +0530
committerPavankumar Kondeti <pkondeti@codeaurora.org>2017-02-01 09:16:39 +0530
commitb559daa261b6b12958259f06ae776f140afa92db (patch)
treec3047d27865220f205fd1eddf9106f374a65f4fe /kernel/sched/sched.h
parenta874c1606fa22a4add4242cc8c5dfca500410f5d (diff)
sched: maintain group busy time counters in runqueue
There is no advantage of tracking busy time counters per related thread group. We need busy time across all groups for either a CPU or a frequency domain. Hence maintain group busy time counters in the runqueue itself. When CPU window is rolled over, the group busy counters are also rolled over. This eliminates the overhead of individual group's window_start maintenance. As we are preallocating related thread group now, this patch saves 40 * nr_cpu_ids * (nr_grp - 1) bytes memory. Change-Id: Ieaaccea483b377f54ea1761e6939ee23a78a5e9c Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h22
1 files changed, 8 insertions, 14 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d907eeb297a3..3e2ef7b0df3e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -366,6 +366,13 @@ struct load_subtractions {
u64 new_subs;
};
+struct group_cpu_time {
+ u64 curr_runnable_sum;
+ u64 prev_runnable_sum;
+ u64 nt_curr_runnable_sum;
+ u64 nt_prev_runnable_sum;
+};
+
struct sched_cluster {
raw_spinlock_t load_lock;
struct list_head list;
@@ -407,12 +414,6 @@ struct related_thread_group {
struct sched_cluster *preferred_cluster;
struct rcu_head rcu;
u64 last_update;
- struct group_cpu_time __percpu *cpu_time; /* one per cluster */
-};
-
-struct migration_sum_data {
- struct rq *src_rq, *dst_rq;
- struct group_cpu_time *src_cpu_time, *dst_cpu_time;
};
extern struct list_head cluster_head;
@@ -776,6 +777,7 @@ struct rq {
u64 prev_runnable_sum;
u64 nt_curr_runnable_sum;
u64 nt_prev_runnable_sum;
+ struct group_cpu_time grp_time;
struct load_subtractions load_subs[NUM_TRACKED_WINDOWS];
DECLARE_BITMAP_ARRAY(top_tasks_bitmap,
NUM_TRACKED_WINDOWS, NUM_LOAD_INDICES);
@@ -1350,14 +1352,6 @@ check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
extern void notify_migration(int src_cpu, int dest_cpu,
bool src_cpu_dead, struct task_struct *p);
-struct group_cpu_time {
- u64 curr_runnable_sum;
- u64 prev_runnable_sum;
- u64 nt_curr_runnable_sum;
- u64 nt_prev_runnable_sum;
- u64 window_start;
-};
-
/* Is frequency of two cpus synchronized with each other? */
static inline int same_freq_domain(int src_cpu, int dst_cpu)
{