summaryrefslogtreecommitdiff
path: root/kernel/sched/hmp.c
diff options
context:
space:
mode:
authorRohit Gupta <rohgup@codeaurora.org>2016-12-16 10:51:18 -0800
committerRohit Gupta <rohgup@codeaurora.org>2017-01-04 15:55:29 -0800
commitf43931e8199aa34050240e83b48322c32510422e (patch)
treed8ca366ac7fa1bb23d478508adaf33ea69b8ae2a /kernel/sched/hmp.c
parente6993466cf9c3fe1c448ed56b3a888e5f80dc452 (diff)
sched: Delete heavy task heuristics in prediction code
Heavy task prediction code needs further tuning to avoid any negative power impact. Delete the code for now instead of adding tunables to avoid inefficiencies in the scheduler path. Change-Id: I71e3b37a5c99e24bc5be93cc825d7e171e8ff7ce Signed-off-by: Rohit Gupta <rohgup@codeaurora.org>
Diffstat (limited to 'kernel/sched/hmp.c')
-rw-r--r--kernel/sched/hmp.c44
1 files changed, 1 insertions, 43 deletions
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index ab5587309760..5b74d5758224 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -774,13 +774,6 @@ __read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
/* Temporarily disable window-stats activity on all cpus */
unsigned int __read_mostly sched_disable_window_stats;
-/*
- * Major task runtime. If a task runs for more than sched_major_task_runtime
- * in a window, it's considered to be generating majority of workload
- * for this window. Prediction could be adjusted for such tasks.
- */
-__read_mostly unsigned int sched_major_task_runtime = 10000000;
-
static unsigned int sync_cpu;
struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
@@ -1015,9 +1008,6 @@ void set_hmp_defaults(void)
update_up_down_migrate();
- sched_major_task_runtime =
- mult_frac(sched_ravg_window, MAJOR_TASK_PCT, 100);
-
sched_init_task_load_windows =
div64_u64((u64)sysctl_sched_init_task_load_pct *
(u64)sched_ravg_window, 100);
@@ -1961,8 +1951,6 @@ scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
return div64_u64(load * (u64)src_freq, (u64)dst_freq);
}
-#define HEAVY_TASK_SKIP 2
-#define HEAVY_TASK_SKIP_LIMIT 4
/*
* get_pred_busy - calculate predicted demand for a task on runqueue
*
@@ -1990,7 +1978,7 @@ static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
u32 *hist = p->ravg.sum_history;
u32 dmin, dmax;
u64 cur_freq_runtime = 0;
- int first = NUM_BUSY_BUCKETS, final, skip_to;
+ int first = NUM_BUSY_BUCKETS, final;
u32 ret = runtime;
/* skip prediction for new tasks due to lack of history */
@@ -2010,36 +1998,6 @@ static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
/* compute the bucket for prediction */
final = first;
- if (first < HEAVY_TASK_SKIP_LIMIT) {
- /* compute runtime at current CPU frequency */
- cur_freq_runtime = mult_frac(runtime, max_possible_efficiency,
- rq->cluster->efficiency);
- cur_freq_runtime = scale_load_to_freq(cur_freq_runtime,
- max_possible_freq, rq->cluster->cur_freq);
- /*
- * if the task runs for majority of the window, try to
- * pick higher buckets.
- */
- if (cur_freq_runtime >= sched_major_task_runtime) {
- int next = NUM_BUSY_BUCKETS;
- /*
- * if there is a higher bucket that's consistently
- * hit, don't jump beyond that.
- */
- for (i = start + 1; i <= HEAVY_TASK_SKIP_LIMIT &&
- i < NUM_BUSY_BUCKETS; i++) {
- if (buckets[i] > CONSISTENT_THRES) {
- next = i;
- break;
- }
- }
- skip_to = min(next, start + HEAVY_TASK_SKIP);
- /* don't jump beyond HEAVY_TASK_SKIP_LIMIT */
- skip_to = min(HEAVY_TASK_SKIP_LIMIT, skip_to);
- /* don't go below first non-empty bucket, if any */
- final = max(first, skip_to);
- }
- }
/* determine demand range for the predicted bucket */
if (final < 2) {