diff options
| author | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2017-01-07 14:59:26 +0530 |
|---|---|---|
| committer | Pavankumar Kondeti <pkondeti@codeaurora.org> | 2017-01-19 12:30:19 +0530 |
| commit | 6d63f38bf2832a353cab15d9070fe8171b03bd22 (patch) | |
| tree | 74fe960a58e661a04fdab8e1c1edfbe574cecd94 /kernel | |
| parent | ebc5196e3eb88a2f28ef461caacf62d4459477d8 (diff) | |
sched: kill sync_cpu maintenance
We assume boot CPU as a sync CPU and initialize it's window_start
to sched_ktime_clock(). As windows are synchronized across all
CPUs, the secondary CPUs' window_start are initialized from the
sync_cpu's window_start. A CPU's window_start is never reset, so
this synchronization happens only once for a given CPU. Given this
fact, there is no need to reassigning the sync_cpu role to another
CPU when the boot CPU is going offline. Remove this unnecessary
maintenance of sync_cpu and use any online CPU's window_start as
reference.
Change-Id: I169a8e80573c6dbcb1edeab0659c07c17102f4c9
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 2 | ||||
| -rw-r--r-- | kernel/sched/hmp.c | 18 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 2 |
3 files changed, 6 insertions, 16 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 76cbd55e99ac..519aee32e122 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5905,7 +5905,6 @@ int sched_isolate_cpu(int cpu) smp_call_function_any(&avail_cpus, hrtimer_quiesce_cpu, &cpu, 1); smp_call_function_any(&avail_cpus, timer_quiesce_cpu, &cpu, 1); - migrate_sync_cpu(cpu, cpumask_first(&avail_cpus)); stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0); calc_load_migrate(rq); @@ -6286,7 +6285,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) sched_ttwu_pending(); /* Update our root-domain */ raw_spin_lock_irqsave(&rq->lock, flags); - migrate_sync_cpu(cpu, smp_processor_id()); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 443f16732414..433d19663496 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -788,8 +788,6 @@ __read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW; /* Temporarily disable window-stats activity on all cpus */ unsigned int __read_mostly sched_disable_window_stats; -static unsigned int sync_cpu; - struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID]; static LIST_HEAD(active_related_thread_groups); static DEFINE_RWLOCK(related_thread_group_lock); @@ -3015,18 +3013,20 @@ void mark_task_starting(struct task_struct *p) void set_window_start(struct rq *rq) { - int cpu = cpu_of(rq); - struct rq *sync_rq = cpu_rq(sync_cpu); + static int sync_cpu_available; if (rq->window_start || !sched_enable_hmp) return; - if (cpu == sync_cpu) { + if (!sync_cpu_available) { rq->window_start = sched_ktime_clock(); + sync_cpu_available = 1; } else { + struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask)); + raw_spin_unlock(&rq->lock); double_rq_lock(rq, sync_rq); - rq->window_start = cpu_rq(sync_cpu)->window_start; + rq->window_start = sync_rq->window_start; rq->curr_runnable_sum = rq->prev_runnable_sum = 0; rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0; raw_spin_unlock(&sync_rq->lock); @@ -3035,12 +3035,6 @@ void set_window_start(struct rq *rq) rq->curr->ravg.mark_start = rq->window_start; } -void migrate_sync_cpu(int cpu, int new_cpu) -{ - if (cpu == sync_cpu) - sync_cpu = new_cpu; -} - static void reset_all_task_stats(void) { struct task_struct *g, *p; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index a9d98b7dd10e..d907eeb297a3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1123,7 +1123,6 @@ extern void clear_boost_kick(int cpu); extern void clear_hmp_request(int cpu); extern void mark_task_starting(struct task_struct *p); extern void set_window_start(struct rq *rq); -extern void migrate_sync_cpu(int cpu, int new_cpu); extern void update_cluster_topology(void); extern void note_task_waking(struct task_struct *p, u64 wallclock); extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock); @@ -1525,7 +1524,6 @@ static inline void clear_boost_kick(int cpu) { } static inline void clear_hmp_request(int cpu) { } static inline void mark_task_starting(struct task_struct *p) { } static inline void set_window_start(struct rq *rq) { } -static inline void migrate_sync_cpu(int cpu, int new_cpu) {} static inline void init_clusters(void) {} static inline void update_cluster_topology(void) { } static inline void note_task_waking(struct task_struct *p, u64 wallclock) { } |
