diff options
| author | Linux Build Service Account <lnxbuild@localhost> | 2016-09-29 11:20:30 -0700 |
|---|---|---|
| committer | Gerrit - the friendly Code Review server <code-review@localhost> | 2016-09-29 11:20:30 -0700 |
| commit | f6d68e27bff31d09b6c5b4ef702d3cbbf82741ad (patch) | |
| tree | 31565cca5faf64cf972545ee7d89f5966d3a0689 /kernel/sched | |
| parent | fbf3c441eb2def704afdedb6d73b3e3013677f9c (diff) | |
| parent | cc60f0790f27186b8b22cf4bac307b53202601b8 (diff) | |
Merge "sched: constrain HMP scheduler tunable range with in better way"
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/hmp.c | 26 |
1 files changed, 4 insertions, 22 deletions
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 0da63e0d8377..6e1757aa1541 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -1502,28 +1502,10 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write, if (write && (old_val == *data)) goto done; - /* - * Special handling for sched_freq_aggregate_threshold_pct - * which can be greater than 100. Use 1000 as an upper bound - * value which works for all practical use cases. - */ - if (data == &sysctl_sched_freq_aggregate_threshold_pct) { - if (*data > 1000) { - *data = old_val; - ret = -EINVAL; - goto done; - } - } else if (data != &sysctl_sched_select_prev_cpu_us) { - /* - * all tunables other than sched_select_prev_cpu_us are - * in percentage. - */ - if (sysctl_sched_downmigrate_pct > - sysctl_sched_upmigrate_pct || *data > 100) { - *data = old_val; - ret = -EINVAL; - goto done; - } + if (sysctl_sched_downmigrate_pct > sysctl_sched_upmigrate_pct) { + *data = old_val; + ret = -EINVAL; + goto done; } /* |
