diff options
| author | Joonwoo Park <joonwoop@codeaurora.org> | 2016-05-25 12:02:27 -0700 |
|---|---|---|
| committer | Kyle Yan <kyan@codeaurora.org> | 2016-06-03 14:47:39 -0700 |
| commit | eedf0821f656d424859d37d018e699b9e58344c6 (patch) | |
| tree | 7a982bdb2909c458cca1f04cedd5a0eddc2010b5 /kernel | |
| parent | 6b2c4343e7b18d9075b45c387396623665786534 (diff) | |
sched: Remove the sched heavy task frequency guidance feature
This has always been unused feature given its limitation of adding
phantom load to the system. Since there are no immediate plans of
using this and the fact that it adds unnecessary complications to
the new load fixup mechanism, remove this feature for now. It can
be revisited later in light of the new mechanism.
Change-Id: Ie9501a898d0f423338293a8dde6bc56f493f1e75
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 44 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 14 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 1 | ||||
| -rw-r--r-- | kernel/sysctl.c | 7 |
4 files changed, 0 insertions, 66 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 651fa57a5ca9..b6d48fbb0c60 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2155,22 +2155,6 @@ static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p, return SCHED_FREQ_ACCOUNT_WAIT_TIME; } -static inline int -heavy_task_wakeup(struct task_struct *p, struct rq *rq, int event) -{ - u32 task_demand = p->ravg.demand; - - if (!sched_heavy_task || event != TASK_WAKE || - task_demand < sched_heavy_task || exiting_task(p)) - return 0; - - if (p->ravg.mark_start > rq->window_start) - return 0; - - /* has a full window elapsed since task slept? */ - return (rq->window_start - p->ravg.mark_start > sched_ravg_window); -} - static inline bool is_new_task(struct task_struct *p) { return p->ravg.active_windows < sysctl_sched_new_task_windows; @@ -2524,18 +2508,6 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq, if (p_is_curr_task) { /* p is idle task */ BUG_ON(p != rq->idle); - } else if (heavy_task_wakeup(p, rq, event)) { - /* A new window has started. If p is a waking - * heavy task its prev_window contribution is faked - * to be its window-based demand. Note that this can - * introduce phantom load into the system depending - * on the window policy and task behavior. This feature - * can be controlled via the sched_heavy_task - * tunable. */ - p->ravg.prev_window = p->ravg.demand; - *prev_runnable_sum += p->ravg.demand; - if (new_task) - *nt_prev_runnable_sum += p->ravg.demand; } return; @@ -3593,12 +3565,6 @@ done: static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } -static inline int -heavy_task_wakeup(struct task_struct *p, struct rq *rq, int event) -{ - return 0; -} - #endif /* CONFIG_SCHED_FREQ_INPUT */ #define sched_up_down_migrate_auto_update 1 @@ -4253,12 +4219,6 @@ static inline int update_preferred_cluster(struct related_thread_group *grp, static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } -static inline int -heavy_task_wakeup(struct task_struct *p, struct rq *rq, int event) -{ - return 0; -} - static struct cpu_cycle update_task_ravg(struct task_struct *p, struct rq *rq, int event, u64 wallclock, u64 irqtime) @@ -5227,7 +5187,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) int cpu, src_cpu, success = 0; int notify = 0; struct migration_notify_data mnd; - int heavy_task = 0; #ifdef CONFIG_SMP unsigned int old_load; struct rq *rq; @@ -5302,7 +5261,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) old_load = task_load(p); wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); - heavy_task = heavy_task_wakeup(p, rq, TASK_WAKE); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); raw_spin_unlock(&rq->lock); @@ -5364,8 +5322,6 @@ out: false, check_group); check_for_freq_change(cpu_rq(src_cpu), false, check_group); - } else if (heavy_task) { - check_for_freq_change(cpu_rq(cpu), false, false); } else if (success) { check_for_freq_change(cpu_rq(cpu), true, false); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 10eee61d906c..6c5d393da122 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2725,18 +2725,6 @@ unsigned int __read_mostly sched_spill_load; unsigned int __read_mostly sysctl_sched_spill_load_pct = 100; /* - * Tasks with demand >= sched_heavy_task will have their - * window-based demand added to the previous window's CPU - * time when they wake up, if they have slept for at least - * one full window. This feature is disabled when the tunable - * is set to 0 (the default). - */ -#ifdef CONFIG_SCHED_FREQ_INPUT -unsigned int __read_mostly sysctl_sched_heavy_task_pct; -unsigned int __read_mostly sched_heavy_task; -#endif - -/* * Tasks whose bandwidth consumption on a cpu is more than * sched_upmigrate are considered "big" tasks. Big tasks will be * considered for "up" migration, i.e migrating to a cpu with better @@ -2818,8 +2806,6 @@ void set_hmp_defaults(void) update_up_down_migrate(); #ifdef CONFIG_SCHED_FREQ_INPUT - sched_heavy_task = - pct_to_real(sysctl_sched_heavy_task_pct); sched_major_task_runtime = mult_frac(sched_ravg_window, MAJOR_TASK_PCT, 100); #endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index df9b972195e5..3d5a89cc6eef 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1058,7 +1058,6 @@ extern unsigned int sched_upmigrate; extern unsigned int sched_downmigrate; extern unsigned int sched_init_task_load_pelt; extern unsigned int sched_init_task_load_windows; -extern unsigned int sched_heavy_task; extern unsigned int up_down_migrate_scale_factor; extern unsigned int sysctl_sched_restrict_cluster_spill; extern unsigned int sched_pred_alert_load; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index deffbdb0abf5..5f0767a2605c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -316,13 +316,6 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, - { - .procname = "sched_heavy_task", - .data = &sysctl_sched_heavy_task_pct, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = sched_hmp_proc_update_handler, - }, #endif #ifdef CONFIG_SCHED_HMP { |
