summaryrefslogtreecommitdiff
path: root/kernel/sched/hmp.c
diff options
context:
space:
mode:
authorPavankumar Kondeti <pkondeti@codeaurora.org>2017-01-07 14:59:26 +0530
committerPavankumar Kondeti <pkondeti@codeaurora.org>2017-01-19 12:30:19 +0530
commit6d63f38bf2832a353cab15d9070fe8171b03bd22 (patch)
tree74fe960a58e661a04fdab8e1c1edfbe574cecd94 /kernel/sched/hmp.c
parentebc5196e3eb88a2f28ef461caacf62d4459477d8 (diff)
sched: kill sync_cpu maintenance
We assume boot CPU as a sync CPU and initialize it's window_start to sched_ktime_clock(). As windows are synchronized across all CPUs, the secondary CPUs' window_start are initialized from the sync_cpu's window_start. A CPU's window_start is never reset, so this synchronization happens only once for a given CPU. Given this fact, there is no need to reassigning the sync_cpu role to another CPU when the boot CPU is going offline. Remove this unnecessary maintenance of sync_cpu and use any online CPU's window_start as reference. Change-Id: I169a8e80573c6dbcb1edeab0659c07c17102f4c9 Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Diffstat (limited to 'kernel/sched/hmp.c')
-rw-r--r--kernel/sched/hmp.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 443f16732414..433d19663496 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -788,8 +788,6 @@ __read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
/* Temporarily disable window-stats activity on all cpus */
unsigned int __read_mostly sched_disable_window_stats;
-static unsigned int sync_cpu;
-
struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
static LIST_HEAD(active_related_thread_groups);
static DEFINE_RWLOCK(related_thread_group_lock);
@@ -3015,18 +3013,20 @@ void mark_task_starting(struct task_struct *p)
void set_window_start(struct rq *rq)
{
- int cpu = cpu_of(rq);
- struct rq *sync_rq = cpu_rq(sync_cpu);
+ static int sync_cpu_available;
if (rq->window_start || !sched_enable_hmp)
return;
- if (cpu == sync_cpu) {
+ if (!sync_cpu_available) {
rq->window_start = sched_ktime_clock();
+ sync_cpu_available = 1;
} else {
+ struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
+
raw_spin_unlock(&rq->lock);
double_rq_lock(rq, sync_rq);
- rq->window_start = cpu_rq(sync_cpu)->window_start;
+ rq->window_start = sync_rq->window_start;
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
raw_spin_unlock(&sync_rq->lock);
@@ -3035,12 +3035,6 @@ void set_window_start(struct rq *rq)
rq->curr->ravg.mark_start = rq->window_start;
}
-void migrate_sync_cpu(int cpu, int new_cpu)
-{
- if (cpu == sync_cpu)
- sync_cpu = new_cpu;
-}
-
static void reset_all_task_stats(void)
{
struct task_struct *g, *p;