summaryrefslogtreecommitdiff
path: root/kernel/sched/hmp.c
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2017-01-05 02:09:05 -0800
committerGerrit - the friendly Code Review server <code-review@localhost>2017-01-05 02:09:05 -0800
commited4ef900e9939b517069839827779b1f4a5387b8 (patch)
tree142d4d94bd8b1a6afe1fc591f5657ab5b61590e7 /kernel/sched/hmp.c
parent54aaf0328a9d7d5afd8ba7e34670ce4c81858d92 (diff)
parentf43931e8199aa34050240e83b48322c32510422e (diff)
Merge "sched: Delete heavy task heuristics in prediction code"
Diffstat (limited to 'kernel/sched/hmp.c')
-rw-r--r--kernel/sched/hmp.c44
1 files changed, 1 insertions, 43 deletions
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index f96ac736b452..d3547391b937 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -774,13 +774,6 @@ __read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
/* Temporarily disable window-stats activity on all cpus */
unsigned int __read_mostly sched_disable_window_stats;
-/*
- * Major task runtime. If a task runs for more than sched_major_task_runtime
- * in a window, it's considered to be generating majority of workload
- * for this window. Prediction could be adjusted for such tasks.
- */
-__read_mostly unsigned int sched_major_task_runtime = 10000000;
-
static unsigned int sync_cpu;
struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
@@ -1015,9 +1008,6 @@ void set_hmp_defaults(void)
update_up_down_migrate();
- sched_major_task_runtime =
- mult_frac(sched_ravg_window, MAJOR_TASK_PCT, 100);
-
sched_init_task_load_windows =
div64_u64((u64)sysctl_sched_init_task_load_pct *
(u64)sched_ravg_window, 100);
@@ -1961,8 +1951,6 @@ scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
return div64_u64(load * (u64)src_freq, (u64)dst_freq);
}
-#define HEAVY_TASK_SKIP 2
-#define HEAVY_TASK_SKIP_LIMIT 4
/*
* get_pred_busy - calculate predicted demand for a task on runqueue
*
@@ -1990,7 +1978,7 @@ static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
u32 *hist = p->ravg.sum_history;
u32 dmin, dmax;
u64 cur_freq_runtime = 0;
- int first = NUM_BUSY_BUCKETS, final, skip_to;
+ int first = NUM_BUSY_BUCKETS, final;
u32 ret = runtime;
/* skip prediction for new tasks due to lack of history */
@@ -2010,36 +1998,6 @@ static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
/* compute the bucket for prediction */
final = first;
- if (first < HEAVY_TASK_SKIP_LIMIT) {
- /* compute runtime at current CPU frequency */
- cur_freq_runtime = mult_frac(runtime, max_possible_efficiency,
- rq->cluster->efficiency);
- cur_freq_runtime = scale_load_to_freq(cur_freq_runtime,
- max_possible_freq, rq->cluster->cur_freq);
- /*
- * if the task runs for majority of the window, try to
- * pick higher buckets.
- */
- if (cur_freq_runtime >= sched_major_task_runtime) {
- int next = NUM_BUSY_BUCKETS;
- /*
- * if there is a higher bucket that's consistently
- * hit, don't jump beyond that.
- */
- for (i = start + 1; i <= HEAVY_TASK_SKIP_LIMIT &&
- i < NUM_BUSY_BUCKETS; i++) {
- if (buckets[i] > CONSISTENT_THRES) {
- next = i;
- break;
- }
- }
- skip_to = min(next, start + HEAVY_TASK_SKIP);
- /* don't jump beyond HEAVY_TASK_SKIP_LIMIT */
- skip_to = min(HEAVY_TASK_SKIP_LIMIT, skip_to);
- /* don't go below first non-empty bucket, if any */
- final = max(first, skip_to);
- }
- }
/* determine demand range for the predicted bucket */
if (final < 2) {