summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-07-28 19:45:56 -0700
committerSyed Rameez Mustafa <rameezmustafa@codeaurora.org>2016-08-22 14:06:32 -0700
commit7663fb1d6e7f417c1127f8010eddf080b4fc6a24 (patch)
tree703084e45421bb9bef3b96f8a53ad2225ecfa2b0 /kernel
parentb01a93838d1ff0caf8057f852c437f95e798ccc6 (diff)
sched: Consolidate CONFIG_SCHED_HMP sections in various files
Code sections found either CONFIG_SCHED_HMP or !CONFIG_SCHED_HMP have become quite fragmented over time. Some of these fragmented sections are necessary because of the code dependencies. Others fragmented sections can easily be consolidated. Do so in order to make kernel upgrades a lot simpler. Change-Id: I6be476834ce70274aec5a52fd9455b5f0065af87 Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c113
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/sched/fair.c228
3 files changed, 157 insertions, 186 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f217924c10f2..20b42f8d6f67 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1591,7 +1591,6 @@ static void add_cluster(const struct cpumask *cpus, struct list_head *head)
num_clusters++;
}
-#ifdef CONFIG_SMP
static void update_cluster_topology(void)
{
struct cpumask cpus = *cpu_possible_mask;
@@ -1616,7 +1615,6 @@ static void update_cluster_topology(void)
*/
move_list(&cluster_head, &new_head, false);
}
-#endif
static void init_clusters(void)
{
@@ -1722,32 +1720,6 @@ unsigned int sched_get_static_cluster_pwr_cost(int cpu)
return cpu_rq(cpu)->cluster->static_cluster_pwr_cost;
}
-#else /* CONFIG_SCHED_HMP */
-
-static inline int got_boost_kick(void)
-{
- return 0;
-}
-
-static inline void clear_boost_kick(int cpu) { }
-
-static inline void clear_hmp_request(int cpu) { }
-
-int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
-{
- return 0;
-}
-
-#ifdef CONFIG_SMP
-static void update_cluster_topology(void) { }
-#endif
-
-#endif /* CONFIG_SCHED_HMP */
-
-#define SCHED_MIN_FREQ 1
-
-#if defined(CONFIG_SCHED_HMP)
-
/*
* sched_window_stats_policy and sched_ravg_hist_size have a 'sysctl' copy
* associated with them. This is required for atomic update of those variables
@@ -4166,9 +4138,50 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,
return 0;
}
-#else /* CONFIG_SCHED_HMP */
+static bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+ struct task_struct *p;
+ int loop_max = 10;
+
+ if (!sched_boost() || !rq->cfs.h_nr_running)
+ return 0;
+
+ rq->ed_task = NULL;
+ list_for_each_entry(p, &rq->cfs_tasks, se.group_node) {
+ if (!loop_max)
+ break;
+
+ if (wallclock - p->last_wake_ts >= EARLY_DETECTION_DURATION) {
+ rq->ed_task = p;
+ return 1;
+ }
+
+ loop_max--;
+ }
+
+ return 0;
+}
+
+#else /* CONFIG_SCHED_HMP */
static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
+static inline void clear_boost_kick(int cpu) { }
+static inline void clear_hmp_request(int cpu) { }
+static inline void mark_task_starting(struct task_struct *p) {}
+static inline void set_window_start(struct rq *rq) {}
+static inline void migrate_sync_cpu(int cpu) {}
+
+static inline int got_boost_kick(void)
+{
+ return 0;
+}
+
+int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
+{
+ return 0;
+}
+
+static inline void update_cluster_topology(void) { }
static void
update_task_ravg(struct task_struct *p, struct rq *rq,
@@ -4176,11 +4189,10 @@ update_task_ravg(struct task_struct *p, struct rq *rq,
{
}
-static inline void mark_task_starting(struct task_struct *p) {}
-
-static inline void set_window_start(struct rq *rq) {}
-
-static inline void migrate_sync_cpu(int cpu) {}
+static bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+ return 0;
+}
#endif /* CONFIG_SCHED_HMP */
@@ -6128,37 +6140,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
return ns;
}
-#ifdef CONFIG_SCHED_HMP
-static bool early_detection_notify(struct rq *rq, u64 wallclock)
-{
- struct task_struct *p;
- int loop_max = 10;
-
- if (!sched_boost() || !rq->cfs.h_nr_running)
- return 0;
-
- rq->ed_task = NULL;
- list_for_each_entry(p, &rq->cfs_tasks, se.group_node) {
- if (!loop_max)
- break;
-
- if (wallclock - p->last_wake_ts >= EARLY_DETECTION_DURATION) {
- rq->ed_task = p;
- return 1;
- }
-
- loop_max--;
- }
-
- return 0;
-}
-#else /* CONFIG_SCHED_HMP */
-static bool early_detection_notify(struct rq *rq, u64 wallclock)
-{
- return 0;
-}
-#endif /* CONFIG_SCHED_HMP */
-
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
@@ -10865,7 +10846,7 @@ void __init sched_init(void)
rq->avg_irqload = 0;
rq->irqload_ts = 0;
rq->static_cpu_pwr_cost = 0;
- rq->cc.cycles = SCHED_MIN_FREQ;
+ rq->cc.cycles = 1;
rq->cc.time = 1;
rq->cstate = 0;
rq->wakeup_latency = 0;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index fc9878eee5df..b6dc131f36a6 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -327,8 +327,6 @@ do { \
P(cluster->cur_freq);
P(cluster->max_freq);
P(cluster->exec_scale_factor);
-#endif
-#ifdef CONFIG_SCHED_HMP
P(hmp_stats.nr_big_tasks);
SEQ_printf(m, " .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
rq->hmp_stats.cumulative_runnable_avg);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a4f3af6fc175..4806ec37035d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4177,6 +4177,95 @@ unsigned int cpu_temp(int cpu)
return 0;
}
+void init_new_task_load(struct task_struct *p)
+{
+ int i;
+ u32 init_load_windows = sched_init_task_load_windows;
+ u32 init_load_pct = current->init_load_pct;
+
+ p->init_load_pct = 0;
+ rcu_assign_pointer(p->grp, NULL);
+ INIT_LIST_HEAD(&p->grp_list);
+ memset(&p->ravg, 0, sizeof(struct ravg));
+ p->cpu_cycles = 0;
+
+ if (init_load_pct)
+ init_load_windows = div64_u64((u64)init_load_pct *
+ (u64)sched_ravg_window, 100);
+
+ p->ravg.demand = init_load_windows;
+ p->ravg.pred_demand = 0;
+ for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
+ p->ravg.sum_history[i] = init_load_windows;
+}
+
+/* Return task demand in percentage scale */
+unsigned int pct_task_load(struct task_struct *p)
+{
+ unsigned int load;
+
+ load = div64_u64((u64)task_load(p) * 100, (u64)max_task_load());
+
+ return load;
+}
+
+#ifdef CONFIG_CFS_BANDWIDTH
+
+static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
+{
+ cfs_rq->hmp_stats.nr_big_tasks = 0;
+ cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
+ cfs_rq->hmp_stats.pred_demands_sum = 0;
+}
+
+static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra)
+{
+ inc_nr_big_task(&cfs_rq->hmp_stats, p);
+ if (change_cra)
+ inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
+}
+
+static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra)
+{
+ dec_nr_big_task(&cfs_rq->hmp_stats, p);
+ if (change_cra)
+ dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
+}
+
+static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+ struct cfs_rq *cfs_rq)
+{
+ stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
+ stats->cumulative_runnable_avg +=
+ cfs_rq->hmp_stats.cumulative_runnable_avg;
+ stats->pred_demands_sum += cfs_rq->hmp_stats.pred_demands_sum;
+}
+
+static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+ struct cfs_rq *cfs_rq)
+{
+ stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
+ stats->cumulative_runnable_avg -=
+ cfs_rq->hmp_stats.cumulative_runnable_avg;
+ stats->pred_demands_sum -= cfs_rq->hmp_stats.pred_demands_sum;
+
+ BUG_ON(stats->nr_big_tasks < 0 ||
+ (s64)stats->cumulative_runnable_avg < 0);
+ verify_pred_demands_sum(stats);
+}
+
+#else /* CONFIG_CFS_BANDWIDTH */
+
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra) { }
+
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra) { }
+
+#endif /* CONFIG_CFS_BANDWIDTH */
+
#else /* CONFIG_SCHED_HMP */
struct cpu_select_env;
@@ -4256,9 +4345,27 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq)
return NULL;
}
-#endif /* CONFIG_SCHED_HMP */
+void init_new_task_load(struct task_struct *p) { }
+
+static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) { }
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra) { }
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra) { }
+
+static inline void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+ struct cfs_rq *cfs_rq)
+{
+}
+
+static inline void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+ struct cfs_rq *cfs_rq)
+{
+}
+
+#endif /* CONFIG_SCHED_HMP */
#if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10
#error "load tracking assumes 2^10 as unit"
@@ -4628,130 +4735,15 @@ inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
static inline void
dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
-#endif /* CONFIG_SMP */
-
-#ifdef CONFIG_SCHED_HMP
-
-void init_new_task_load(struct task_struct *p)
-{
- int i;
- u32 init_load_windows = sched_init_task_load_windows;
- u32 init_load_pct = current->init_load_pct;
-
- p->init_load_pct = 0;
- rcu_assign_pointer(p->grp, NULL);
- INIT_LIST_HEAD(&p->grp_list);
- memset(&p->ravg, 0, sizeof(struct ravg));
- p->cpu_cycles = 0;
-
- if (init_load_pct)
- init_load_windows = div64_u64((u64)init_load_pct *
- (u64)sched_ravg_window, 100);
-
- p->ravg.demand = init_load_windows;
- p->ravg.pred_demand = 0;
- for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
- p->ravg.sum_history[i] = init_load_windows;
-}
-
-#else /* CONFIG_SCHED_HMP */
-
-void init_new_task_load(struct task_struct *p)
-{
-}
-
-#endif /* CONFIG_SCHED_HMP */
-
-#ifdef CONFIG_SCHED_HMP
-
-/* Return task demand in percentage scale */
-unsigned int pct_task_load(struct task_struct *p)
-{
- unsigned int load;
-
- load = div64_u64((u64)task_load(p) * 100, (u64)max_task_load());
-
- return load;
-}
-
-#ifdef CONFIG_CFS_BANDWIDTH
-
-static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
-{
- cfs_rq->hmp_stats.nr_big_tasks = 0;
- cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
- cfs_rq->hmp_stats.pred_demands_sum = 0;
-}
-
-static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra)
-{
- inc_nr_big_task(&cfs_rq->hmp_stats, p);
- if (change_cra)
- inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
-}
-
-static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra)
-{
- dec_nr_big_task(&cfs_rq->hmp_stats, p);
- if (change_cra)
- dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
-}
-
-static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq)
-{
- stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
- stats->cumulative_runnable_avg +=
- cfs_rq->hmp_stats.cumulative_runnable_avg;
- stats->pred_demands_sum += cfs_rq->hmp_stats.pred_demands_sum;
-}
-
-static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq)
-{
- stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
- stats->cumulative_runnable_avg -=
- cfs_rq->hmp_stats.cumulative_runnable_avg;
- stats->pred_demands_sum -= cfs_rq->hmp_stats.pred_demands_sum;
-
- BUG_ON(stats->nr_big_tasks < 0 ||
- (s64)stats->cumulative_runnable_avg < 0);
- verify_pred_demands_sum(stats);
-}
-
-#else /* CONFIG_CFS_BANDWIDTH */
-
static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
struct task_struct *p, int change_cra) { }
static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
struct task_struct *p, int change_cra) { }
-#endif /* CONFIG_CFS_BANDWIDTH */
-
-#else /* CONFIG_SCHED_HMP */
-
-static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) { }
+void init_new_task_load(struct task_struct *p) { }
-static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra) { }
-
-static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
- struct task_struct *p, int change_cra) { }
-
-static inline void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq)
-{
-}
-
-static inline void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
- struct cfs_rq *cfs_rq)
-{
-}
-
-#endif /* CONFIG_SCHED_HMP */
+#endif /* CONFIG_SMP */
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{